Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
   3 * Copyright (c) 2016 Facebook
   4 */
   5#include <linux/kernel.h>
   6#include <linux/types.h>
   7#include <linux/slab.h>
   8#include <linux/bpf.h>
   9#include <linux/bpf_verifier.h>
  10#include <linux/bpf_perf_event.h>
  11#include <linux/btf.h>
  12#include <linux/filter.h>
  13#include <linux/uaccess.h>
  14#include <linux/ctype.h>
  15#include <linux/kprobes.h>
  16#include <linux/spinlock.h>
  17#include <linux/syscalls.h>
  18#include <linux/error-injection.h>
  19#include <linux/btf_ids.h>
  20#include <linux/bpf_lsm.h>
  21#include <linux/fprobe.h>
  22#include <linux/bsearch.h>
  23#include <linux/sort.h>
  24#include <linux/key.h>
  25#include <linux/verification.h>
  26#include <linux/namei.h>
  27#include <linux/fileattr.h>
  28
  29#include <net/bpf_sk_storage.h>
  30
  31#include <uapi/linux/bpf.h>
  32#include <uapi/linux/btf.h>
  33
  34#include <asm/tlb.h>
  35
  36#include "trace_probe.h"
  37#include "trace.h"
  38
  39#define CREATE_TRACE_POINTS
  40#include "bpf_trace.h"
  41
  42#define bpf_event_rcu_dereference(p)					\
  43	rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
  44
  45#define MAX_UPROBE_MULTI_CNT (1U << 20)
  46#define MAX_KPROBE_MULTI_CNT (1U << 20)
  47
  48#ifdef CONFIG_MODULES
  49struct bpf_trace_module {
  50	struct module *module;
  51	struct list_head list;
  52};
  53
  54static LIST_HEAD(bpf_trace_modules);
  55static DEFINE_MUTEX(bpf_module_mutex);
  56
  57static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
  58{
  59	struct bpf_raw_event_map *btp, *ret = NULL;
  60	struct bpf_trace_module *btm;
  61	unsigned int i;
  62
  63	mutex_lock(&bpf_module_mutex);
  64	list_for_each_entry(btm, &bpf_trace_modules, list) {
  65		for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
  66			btp = &btm->module->bpf_raw_events[i];
  67			if (!strcmp(btp->tp->name, name)) {
  68				if (try_module_get(btm->module))
  69					ret = btp;
  70				goto out;
  71			}
  72		}
  73	}
  74out:
  75	mutex_unlock(&bpf_module_mutex);
  76	return ret;
  77}
  78#else
  79static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
  80{
  81	return NULL;
  82}
  83#endif /* CONFIG_MODULES */
  84
  85u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  86u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  87
  88static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
  89				  u64 flags, const struct btf **btf,
  90				  s32 *btf_id);
  91static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
  92static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
  93
  94static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx);
  95static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
  96
  97/**
  98 * trace_call_bpf - invoke BPF program
  99 * @call: tracepoint event
 100 * @ctx: opaque context pointer
 101 *
 102 * kprobe handlers execute BPF programs via this helper.
 103 * Can be used from static tracepoints in the future.
 104 *
 105 * Return: BPF programs always return an integer which is interpreted by
 106 * kprobe handler as:
 107 * 0 - return from kprobe (event is filtered out)
 108 * 1 - store kprobe event into ring buffer
 109 * Other values are reserved and currently alias to 1
 110 */
 111unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
 112{
 113	unsigned int ret;
 114
 115	cant_sleep();
 116
 117	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
 118		/*
 119		 * since some bpf program is already running on this cpu,
 120		 * don't call into another bpf program (same or different)
 121		 * and don't send kprobe event into ring-buffer,
 122		 * so return zero here
 123		 */
 124		rcu_read_lock();
 125		bpf_prog_inc_misses_counters(rcu_dereference(call->prog_array));
 126		rcu_read_unlock();
 127		ret = 0;
 128		goto out;
 129	}
 130
 131	/*
 132	 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
 133	 * to all call sites, we did a bpf_prog_array_valid() there to check
 134	 * whether call->prog_array is empty or not, which is
 135	 * a heuristic to speed up execution.
 136	 *
 137	 * If bpf_prog_array_valid() fetched prog_array was
 138	 * non-NULL, we go into trace_call_bpf() and do the actual
 139	 * proper rcu_dereference() under RCU lock.
 140	 * If it turns out that prog_array is NULL then, we bail out.
 141	 * For the opposite, if the bpf_prog_array_valid() fetched pointer
 142	 * was NULL, you'll skip the prog_array with the risk of missing
 143	 * out of events when it was updated in between this and the
 144	 * rcu_dereference() which is accepted risk.
 145	 */
 146	rcu_read_lock();
 147	ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
 148				 ctx, bpf_prog_run);
 149	rcu_read_unlock();
 150
 151 out:
 152	__this_cpu_dec(bpf_prog_active);
 153
 154	return ret;
 155}
 156
 157#ifdef CONFIG_BPF_KPROBE_OVERRIDE
 158BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
 159{
 160	regs_set_return_value(regs, rc);
 161	override_function_with_return(regs);
 162	return 0;
 163}
 164
 165static const struct bpf_func_proto bpf_override_return_proto = {
 166	.func		= bpf_override_return,
 167	.gpl_only	= true,
 168	.ret_type	= RET_INTEGER,
 169	.arg1_type	= ARG_PTR_TO_CTX,
 170	.arg2_type	= ARG_ANYTHING,
 171};
 172#endif
 173
 174static __always_inline int
 175bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
 176{
 177	int ret;
 178
 179	ret = copy_from_user_nofault(dst, unsafe_ptr, size);
 180	if (unlikely(ret < 0))
 181		memset(dst, 0, size);
 182	return ret;
 183}
 184
 185BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
 186	   const void __user *, unsafe_ptr)
 187{
 188	return bpf_probe_read_user_common(dst, size, unsafe_ptr);
 189}
 190
 191const struct bpf_func_proto bpf_probe_read_user_proto = {
 192	.func		= bpf_probe_read_user,
 193	.gpl_only	= true,
 194	.ret_type	= RET_INTEGER,
 195	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 196	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 197	.arg3_type	= ARG_ANYTHING,
 198};
 199
 200static __always_inline int
 201bpf_probe_read_user_str_common(void *dst, u32 size,
 202			       const void __user *unsafe_ptr)
 203{
 204	int ret;
 205
 206	/*
 207	 * NB: We rely on strncpy_from_user() not copying junk past the NUL
 208	 * terminator into `dst`.
 209	 *
 210	 * strncpy_from_user() does long-sized strides in the fast path. If the
 211	 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
 212	 * then there could be junk after the NUL in `dst`. If user takes `dst`
 213	 * and keys a hash map with it, then semantically identical strings can
 214	 * occupy multiple entries in the map.
 215	 */
 216	ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
 217	if (unlikely(ret < 0))
 218		memset(dst, 0, size);
 219	return ret;
 220}
 221
 222BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
 223	   const void __user *, unsafe_ptr)
 224{
 225	return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
 226}
 227
 228const struct bpf_func_proto bpf_probe_read_user_str_proto = {
 229	.func		= bpf_probe_read_user_str,
 230	.gpl_only	= true,
 231	.ret_type	= RET_INTEGER,
 232	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 233	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 234	.arg3_type	= ARG_ANYTHING,
 235};
 236
 
 
 
 
 
 
 
 
 
 
 
 237BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
 238	   const void *, unsafe_ptr)
 239{
 240	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
 241}
 242
 243const struct bpf_func_proto bpf_probe_read_kernel_proto = {
 244	.func		= bpf_probe_read_kernel,
 245	.gpl_only	= true,
 246	.ret_type	= RET_INTEGER,
 247	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 248	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 249	.arg3_type	= ARG_ANYTHING,
 250};
 251
 252static __always_inline int
 253bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
 254{
 255	int ret;
 256
 257	/*
 258	 * The strncpy_from_kernel_nofault() call will likely not fill the
 259	 * entire buffer, but that's okay in this circumstance as we're probing
 260	 * arbitrary memory anyway similar to bpf_probe_read_*() and might
 261	 * as well probe the stack. Thus, memory is explicitly cleared
 262	 * only in error case, so that improper users ignoring return
 263	 * code altogether don't copy garbage; otherwise length of string
 264	 * is returned that can be used for bpf_perf_event_output() et al.
 265	 */
 266	ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
 267	if (unlikely(ret < 0))
 268		memset(dst, 0, size);
 269	return ret;
 270}
 271
 272BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
 273	   const void *, unsafe_ptr)
 274{
 275	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
 276}
 277
 278const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
 279	.func		= bpf_probe_read_kernel_str,
 280	.gpl_only	= true,
 281	.ret_type	= RET_INTEGER,
 282	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 283	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 284	.arg3_type	= ARG_ANYTHING,
 285};
 286
 287#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
 288BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
 289	   const void *, unsafe_ptr)
 290{
 291	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
 292		return bpf_probe_read_user_common(dst, size,
 293				(__force void __user *)unsafe_ptr);
 294	}
 295	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
 296}
 297
 298static const struct bpf_func_proto bpf_probe_read_compat_proto = {
 299	.func		= bpf_probe_read_compat,
 300	.gpl_only	= true,
 301	.ret_type	= RET_INTEGER,
 302	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 303	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 304	.arg3_type	= ARG_ANYTHING,
 305};
 306
 307BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
 308	   const void *, unsafe_ptr)
 309{
 310	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
 311		return bpf_probe_read_user_str_common(dst, size,
 312				(__force void __user *)unsafe_ptr);
 313	}
 314	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
 315}
 316
 317static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
 318	.func		= bpf_probe_read_compat_str,
 319	.gpl_only	= true,
 320	.ret_type	= RET_INTEGER,
 321	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 322	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 323	.arg3_type	= ARG_ANYTHING,
 324};
 325#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
 326
 327BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
 328	   u32, size)
 329{
 330	/*
 331	 * Ensure we're in user context which is safe for the helper to
 332	 * run. This helper has no business in a kthread.
 333	 *
 334	 * access_ok() should prevent writing to non-user memory, but in
 335	 * some situations (nommu, temporary switch, etc) access_ok() does
 336	 * not provide enough validation, hence the check on KERNEL_DS.
 337	 *
 338	 * nmi_uaccess_okay() ensures the probe is not run in an interim
 339	 * state, when the task or mm are switched. This is specifically
 340	 * required to prevent the use of temporary mm.
 341	 */
 342
 343	if (unlikely(in_interrupt() ||
 344		     current->flags & (PF_KTHREAD | PF_EXITING)))
 345		return -EPERM;
 
 
 346	if (unlikely(!nmi_uaccess_okay()))
 347		return -EPERM;
 348
 349	return copy_to_user_nofault(unsafe_ptr, src, size);
 350}
 351
 352static const struct bpf_func_proto bpf_probe_write_user_proto = {
 353	.func		= bpf_probe_write_user,
 354	.gpl_only	= true,
 355	.ret_type	= RET_INTEGER,
 356	.arg1_type	= ARG_ANYTHING,
 357	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 358	.arg3_type	= ARG_CONST_SIZE,
 359};
 360
 361static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
 362{
 363	if (!capable(CAP_SYS_ADMIN))
 364		return NULL;
 365
 366	pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
 367			    current->comm, task_pid_nr(current));
 368
 369	return &bpf_probe_write_user_proto;
 370}
 371
 
 
 372#define MAX_TRACE_PRINTK_VARARGS	3
 373#define BPF_TRACE_PRINTK_SIZE		1024
 374
 375BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
 376	   u64, arg2, u64, arg3)
 377{
 378	u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
 379	struct bpf_bprintf_data data = {
 380		.get_bin_args	= true,
 381		.get_buf	= true,
 382	};
 383	int ret;
 384
 385	ret = bpf_bprintf_prepare(fmt, fmt_size, args,
 386				  MAX_TRACE_PRINTK_VARARGS, &data);
 387	if (ret < 0)
 388		return ret;
 389
 390	ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
 
 391
 392	trace_bpf_trace_printk(data.buf);
 
 393
 394	bpf_bprintf_cleanup(&data);
 395
 396	return ret;
 397}
 398
 399static const struct bpf_func_proto bpf_trace_printk_proto = {
 400	.func		= bpf_trace_printk,
 401	.gpl_only	= true,
 402	.ret_type	= RET_INTEGER,
 403	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 404	.arg2_type	= ARG_CONST_SIZE,
 405};
 406
 407static void __set_printk_clr_event(void)
 408{
 409	/*
 410	 * This program might be calling bpf_trace_printk,
 411	 * so enable the associated bpf_trace/bpf_trace_printk event.
 412	 * Repeat this each time as it is possible a user has
 413	 * disabled bpf_trace_printk events.  By loading a program
 414	 * calling bpf_trace_printk() however the user has expressed
 415	 * the intent to see such events.
 416	 */
 417	if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
 418		pr_warn_ratelimited("could not enable bpf_trace_printk events");
 419}
 420
 421const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
 422{
 423	__set_printk_clr_event();
 424	return &bpf_trace_printk_proto;
 425}
 426
 427BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args,
 428	   u32, data_len)
 429{
 430	struct bpf_bprintf_data data = {
 431		.get_bin_args	= true,
 432		.get_buf	= true,
 433	};
 434	int ret, num_args;
 435
 436	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
 437	    (data_len && !args))
 438		return -EINVAL;
 439	num_args = data_len / 8;
 440
 441	ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
 442	if (ret < 0)
 443		return ret;
 444
 445	ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
 446
 447	trace_bpf_trace_printk(data.buf);
 448
 449	bpf_bprintf_cleanup(&data);
 450
 451	return ret;
 452}
 453
 454static const struct bpf_func_proto bpf_trace_vprintk_proto = {
 455	.func		= bpf_trace_vprintk,
 456	.gpl_only	= true,
 457	.ret_type	= RET_INTEGER,
 458	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 459	.arg2_type	= ARG_CONST_SIZE,
 460	.arg3_type	= ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
 461	.arg4_type	= ARG_CONST_SIZE_OR_ZERO,
 462};
 463
 464const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
 465{
 466	__set_printk_clr_event();
 467	return &bpf_trace_vprintk_proto;
 468}
 469
 470BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
 471	   const void *, args, u32, data_len)
 472{
 473	struct bpf_bprintf_data data = {
 474		.get_bin_args	= true,
 475	};
 476	int err, num_args;
 
 477
 478	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
 479	    (data_len && !args))
 480		return -EINVAL;
 481	num_args = data_len / 8;
 482
 483	err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
 484	if (err < 0)
 485		return err;
 486
 487	seq_bprintf(m, fmt, data.bin_args);
 488
 489	bpf_bprintf_cleanup(&data);
 490
 491	return seq_has_overflowed(m) ? -EOVERFLOW : 0;
 492}
 493
 494BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
 495
 496static const struct bpf_func_proto bpf_seq_printf_proto = {
 497	.func		= bpf_seq_printf,
 498	.gpl_only	= true,
 499	.ret_type	= RET_INTEGER,
 500	.arg1_type	= ARG_PTR_TO_BTF_ID,
 501	.arg1_btf_id	= &btf_seq_file_ids[0],
 502	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 503	.arg3_type	= ARG_CONST_SIZE,
 504	.arg4_type      = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
 505	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
 506};
 507
 508BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
 509{
 510	return seq_write(m, data, len) ? -EOVERFLOW : 0;
 511}
 512
 513static const struct bpf_func_proto bpf_seq_write_proto = {
 514	.func		= bpf_seq_write,
 515	.gpl_only	= true,
 516	.ret_type	= RET_INTEGER,
 517	.arg1_type	= ARG_PTR_TO_BTF_ID,
 518	.arg1_btf_id	= &btf_seq_file_ids[0],
 519	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 520	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
 521};
 522
 523BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
 524	   u32, btf_ptr_size, u64, flags)
 525{
 526	const struct btf *btf;
 527	s32 btf_id;
 528	int ret;
 529
 530	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
 531	if (ret)
 532		return ret;
 533
 534	return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
 535}
 536
 537static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
 538	.func		= bpf_seq_printf_btf,
 539	.gpl_only	= true,
 540	.ret_type	= RET_INTEGER,
 541	.arg1_type	= ARG_PTR_TO_BTF_ID,
 542	.arg1_btf_id	= &btf_seq_file_ids[0],
 543	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 544	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
 545	.arg4_type	= ARG_ANYTHING,
 546};
 547
 548static __always_inline int
 549get_map_perf_counter(struct bpf_map *map, u64 flags,
 550		     u64 *value, u64 *enabled, u64 *running)
 551{
 552	struct bpf_array *array = container_of(map, struct bpf_array, map);
 553	unsigned int cpu = smp_processor_id();
 554	u64 index = flags & BPF_F_INDEX_MASK;
 555	struct bpf_event_entry *ee;
 556
 557	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
 558		return -EINVAL;
 559	if (index == BPF_F_CURRENT_CPU)
 560		index = cpu;
 561	if (unlikely(index >= array->map.max_entries))
 562		return -E2BIG;
 563
 564	ee = READ_ONCE(array->ptrs[index]);
 565	if (!ee)
 566		return -ENOENT;
 567
 568	return perf_event_read_local(ee->event, value, enabled, running);
 569}
 570
 571BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
 572{
 573	u64 value = 0;
 574	int err;
 575
 576	err = get_map_perf_counter(map, flags, &value, NULL, NULL);
 577	/*
 578	 * this api is ugly since we miss [-22..-2] range of valid
 579	 * counter values, but that's uapi
 580	 */
 581	if (err)
 582		return err;
 583	return value;
 584}
 585
 586static const struct bpf_func_proto bpf_perf_event_read_proto = {
 587	.func		= bpf_perf_event_read,
 588	.gpl_only	= true,
 589	.ret_type	= RET_INTEGER,
 590	.arg1_type	= ARG_CONST_MAP_PTR,
 591	.arg2_type	= ARG_ANYTHING,
 592};
 593
 594BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
 595	   struct bpf_perf_event_value *, buf, u32, size)
 596{
 597	int err = -EINVAL;
 598
 599	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
 600		goto clear;
 601	err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
 602				   &buf->running);
 603	if (unlikely(err))
 604		goto clear;
 605	return 0;
 606clear:
 607	memset(buf, 0, size);
 608	return err;
 609}
 610
 611static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
 612	.func		= bpf_perf_event_read_value,
 613	.gpl_only	= true,
 614	.ret_type	= RET_INTEGER,
 615	.arg1_type	= ARG_CONST_MAP_PTR,
 616	.arg2_type	= ARG_ANYTHING,
 617	.arg3_type	= ARG_PTR_TO_UNINIT_MEM,
 618	.arg4_type	= ARG_CONST_SIZE,
 619};
 620
 621static __always_inline u64
 622__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
 623			u64 flags, struct perf_sample_data *sd)
 624{
 625	struct bpf_array *array = container_of(map, struct bpf_array, map);
 626	unsigned int cpu = smp_processor_id();
 627	u64 index = flags & BPF_F_INDEX_MASK;
 628	struct bpf_event_entry *ee;
 629	struct perf_event *event;
 630
 631	if (index == BPF_F_CURRENT_CPU)
 632		index = cpu;
 633	if (unlikely(index >= array->map.max_entries))
 634		return -E2BIG;
 635
 636	ee = READ_ONCE(array->ptrs[index]);
 637	if (!ee)
 638		return -ENOENT;
 639
 640	event = ee->event;
 641	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
 642		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
 643		return -EINVAL;
 644
 645	if (unlikely(event->oncpu != cpu))
 646		return -EOPNOTSUPP;
 647
 648	return perf_event_output(event, sd, regs);
 649}
 650
 651/*
 652 * Support executing tracepoints in normal, irq, and nmi context that each call
 653 * bpf_perf_event_output
 654 */
 655struct bpf_trace_sample_data {
 656	struct perf_sample_data sds[3];
 657};
 658
 659static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
 660static DEFINE_PER_CPU(int, bpf_trace_nest_level);
 661BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
 662	   u64, flags, void *, data, u64, size)
 663{
 664	struct bpf_trace_sample_data *sds;
 
 665	struct perf_raw_record raw = {
 666		.frag = {
 667			.size = size,
 668			.data = data,
 669		},
 670	};
 671	struct perf_sample_data *sd;
 672	int nest_level, err;
 673
 674	preempt_disable();
 675	sds = this_cpu_ptr(&bpf_trace_sds);
 676	nest_level = this_cpu_inc_return(bpf_trace_nest_level);
 677
 678	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
 679		err = -EBUSY;
 680		goto out;
 681	}
 682
 683	sd = &sds->sds[nest_level - 1];
 684
 685	if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
 686		err = -EINVAL;
 687		goto out;
 688	}
 689
 690	perf_sample_data_init(sd, 0, 0);
 691	perf_sample_save_raw_data(sd, &raw);
 692
 693	err = __bpf_perf_event_output(regs, map, flags, sd);
 
 694out:
 695	this_cpu_dec(bpf_trace_nest_level);
 696	preempt_enable();
 697	return err;
 698}
 699
 700static const struct bpf_func_proto bpf_perf_event_output_proto = {
 701	.func		= bpf_perf_event_output,
 702	.gpl_only	= true,
 703	.ret_type	= RET_INTEGER,
 704	.arg1_type	= ARG_PTR_TO_CTX,
 705	.arg2_type	= ARG_CONST_MAP_PTR,
 706	.arg3_type	= ARG_ANYTHING,
 707	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 708	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
 709};
 710
 711static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
 712struct bpf_nested_pt_regs {
 713	struct pt_regs regs[3];
 714};
 715static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
 716static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
 717
 718u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
 719		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
 720{
 
 721	struct perf_raw_frag frag = {
 722		.copy		= ctx_copy,
 723		.size		= ctx_size,
 724		.data		= ctx,
 725	};
 726	struct perf_raw_record raw = {
 727		.frag = {
 728			{
 729				.next	= ctx_size ? &frag : NULL,
 730			},
 731			.size	= meta_size,
 732			.data	= meta,
 733		},
 734	};
 735	struct perf_sample_data *sd;
 736	struct pt_regs *regs;
 737	int nest_level;
 738	u64 ret;
 739
 740	preempt_disable();
 741	nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
 742
 743	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
 744		ret = -EBUSY;
 745		goto out;
 746	}
 747	sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
 748	regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
 749
 750	perf_fetch_caller_regs(regs);
 751	perf_sample_data_init(sd, 0, 0);
 752	perf_sample_save_raw_data(sd, &raw);
 753
 754	ret = __bpf_perf_event_output(regs, map, flags, sd);
 755out:
 756	this_cpu_dec(bpf_event_output_nest_level);
 757	preempt_enable();
 758	return ret;
 759}
 760
 761BPF_CALL_0(bpf_get_current_task)
 762{
 763	return (long) current;
 764}
 765
 766const struct bpf_func_proto bpf_get_current_task_proto = {
 767	.func		= bpf_get_current_task,
 768	.gpl_only	= true,
 769	.ret_type	= RET_INTEGER,
 770};
 771
 772BPF_CALL_0(bpf_get_current_task_btf)
 773{
 774	return (unsigned long) current;
 775}
 776
 777const struct bpf_func_proto bpf_get_current_task_btf_proto = {
 778	.func		= bpf_get_current_task_btf,
 779	.gpl_only	= true,
 780	.ret_type	= RET_PTR_TO_BTF_ID_TRUSTED,
 781	.ret_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
 782};
 783
 784BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
 785{
 786	return (unsigned long) task_pt_regs(task);
 787}
 788
 789BTF_ID_LIST(bpf_task_pt_regs_ids)
 790BTF_ID(struct, pt_regs)
 791
 792const struct bpf_func_proto bpf_task_pt_regs_proto = {
 793	.func		= bpf_task_pt_regs,
 794	.gpl_only	= true,
 795	.arg1_type	= ARG_PTR_TO_BTF_ID,
 796	.arg1_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
 797	.ret_type	= RET_PTR_TO_BTF_ID,
 798	.ret_btf_id	= &bpf_task_pt_regs_ids[0],
 799};
 800
 801BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
 802{
 803	struct bpf_array *array = container_of(map, struct bpf_array, map);
 804	struct cgroup *cgrp;
 805
 806	if (unlikely(idx >= array->map.max_entries))
 807		return -E2BIG;
 808
 809	cgrp = READ_ONCE(array->ptrs[idx]);
 810	if (unlikely(!cgrp))
 811		return -EAGAIN;
 812
 813	return task_under_cgroup_hierarchy(current, cgrp);
 814}
 815
 816static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
 817	.func           = bpf_current_task_under_cgroup,
 818	.gpl_only       = false,
 819	.ret_type       = RET_INTEGER,
 820	.arg1_type      = ARG_CONST_MAP_PTR,
 821	.arg2_type      = ARG_ANYTHING,
 822};
 823
 824struct send_signal_irq_work {
 825	struct irq_work irq_work;
 826	struct task_struct *task;
 827	u32 sig;
 828	enum pid_type type;
 829};
 830
 831static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
 832
 833static void do_bpf_send_signal(struct irq_work *entry)
 834{
 835	struct send_signal_irq_work *work;
 836
 837	work = container_of(entry, struct send_signal_irq_work, irq_work);
 838	group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
 839	put_task_struct(work->task);
 840}
 841
 842static int bpf_send_signal_common(u32 sig, enum pid_type type)
 843{
 844	struct send_signal_irq_work *work = NULL;
 845
 846	/* Similar to bpf_probe_write_user, task needs to be
 847	 * in a sound condition and kernel memory access be
 848	 * permitted in order to send signal to the current
 849	 * task.
 850	 */
 851	if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
 852		return -EPERM;
 853	if (unlikely(!nmi_uaccess_okay()))
 854		return -EPERM;
 855	/* Task should not be pid=1 to avoid kernel panic. */
 856	if (unlikely(is_global_init(current)))
 857		return -EPERM;
 858
 859	if (irqs_disabled()) {
 860		/* Do an early check on signal validity. Otherwise,
 861		 * the error is lost in deferred irq_work.
 862		 */
 863		if (unlikely(!valid_signal(sig)))
 864			return -EINVAL;
 865
 866		work = this_cpu_ptr(&send_signal_work);
 867		if (irq_work_is_busy(&work->irq_work))
 868			return -EBUSY;
 869
 870		/* Add the current task, which is the target of sending signal,
 871		 * to the irq_work. The current task may change when queued
 872		 * irq works get executed.
 873		 */
 874		work->task = get_task_struct(current);
 875		work->sig = sig;
 876		work->type = type;
 877		irq_work_queue(&work->irq_work);
 878		return 0;
 879	}
 880
 881	return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
 882}
 883
 884BPF_CALL_1(bpf_send_signal, u32, sig)
 885{
 886	return bpf_send_signal_common(sig, PIDTYPE_TGID);
 887}
 888
 889static const struct bpf_func_proto bpf_send_signal_proto = {
 890	.func		= bpf_send_signal,
 891	.gpl_only	= false,
 892	.ret_type	= RET_INTEGER,
 893	.arg1_type	= ARG_ANYTHING,
 894};
 895
 896BPF_CALL_1(bpf_send_signal_thread, u32, sig)
 897{
 898	return bpf_send_signal_common(sig, PIDTYPE_PID);
 899}
 900
 901static const struct bpf_func_proto bpf_send_signal_thread_proto = {
 902	.func		= bpf_send_signal_thread,
 903	.gpl_only	= false,
 904	.ret_type	= RET_INTEGER,
 905	.arg1_type	= ARG_ANYTHING,
 906};
 907
 908BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
 909{
 910	struct path copy;
 911	long len;
 912	char *p;
 913
 914	if (!sz)
 915		return 0;
 916
 917	/*
 918	 * The path pointer is verified as trusted and safe to use,
 919	 * but let's double check it's valid anyway to workaround
 920	 * potentially broken verifier.
 921	 */
 922	len = copy_from_kernel_nofault(&copy, path, sizeof(*path));
 923	if (len < 0)
 924		return len;
 925
 926	p = d_path(&copy, buf, sz);
 927	if (IS_ERR(p)) {
 928		len = PTR_ERR(p);
 929	} else {
 930		len = buf + sz - p;
 931		memmove(buf, p, len);
 932	}
 933
 934	return len;
 935}
 936
 937BTF_SET_START(btf_allowlist_d_path)
 938#ifdef CONFIG_SECURITY
 939BTF_ID(func, security_file_permission)
 940BTF_ID(func, security_inode_getattr)
 941BTF_ID(func, security_file_open)
 942#endif
 943#ifdef CONFIG_SECURITY_PATH
 944BTF_ID(func, security_path_truncate)
 945#endif
 946BTF_ID(func, vfs_truncate)
 947BTF_ID(func, vfs_fallocate)
 948BTF_ID(func, dentry_open)
 949BTF_ID(func, vfs_getattr)
 950BTF_ID(func, filp_close)
 951BTF_SET_END(btf_allowlist_d_path)
 952
 953static bool bpf_d_path_allowed(const struct bpf_prog *prog)
 954{
 955	if (prog->type == BPF_PROG_TYPE_TRACING &&
 956	    prog->expected_attach_type == BPF_TRACE_ITER)
 957		return true;
 958
 959	if (prog->type == BPF_PROG_TYPE_LSM)
 960		return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
 961
 962	return btf_id_set_contains(&btf_allowlist_d_path,
 963				   prog->aux->attach_btf_id);
 964}
 965
 966BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
 967
 968static const struct bpf_func_proto bpf_d_path_proto = {
 969	.func		= bpf_d_path,
 970	.gpl_only	= false,
 971	.ret_type	= RET_INTEGER,
 972	.arg1_type	= ARG_PTR_TO_BTF_ID,
 973	.arg1_btf_id	= &bpf_d_path_btf_ids[0],
 974	.arg2_type	= ARG_PTR_TO_MEM,
 975	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
 976	.allowed	= bpf_d_path_allowed,
 977};
 978
 979#define BTF_F_ALL	(BTF_F_COMPACT  | BTF_F_NONAME | \
 980			 BTF_F_PTR_RAW | BTF_F_ZERO)
 981
 982static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
 983				  u64 flags, const struct btf **btf,
 984				  s32 *btf_id)
 985{
 986	const struct btf_type *t;
 987
 988	if (unlikely(flags & ~(BTF_F_ALL)))
 989		return -EINVAL;
 990
 991	if (btf_ptr_size != sizeof(struct btf_ptr))
 992		return -EINVAL;
 993
 994	*btf = bpf_get_btf_vmlinux();
 995
 996	if (IS_ERR_OR_NULL(*btf))
 997		return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
 998
 999	if (ptr->type_id > 0)
1000		*btf_id = ptr->type_id;
1001	else
1002		return -EINVAL;
1003
1004	if (*btf_id > 0)
1005		t = btf_type_by_id(*btf, *btf_id);
1006	if (*btf_id <= 0 || !t)
1007		return -ENOENT;
1008
1009	return 0;
1010}
1011
1012BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1013	   u32, btf_ptr_size, u64, flags)
1014{
1015	const struct btf *btf;
1016	s32 btf_id;
1017	int ret;
1018
1019	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1020	if (ret)
1021		return ret;
1022
1023	return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1024				      flags);
1025}
1026
1027const struct bpf_func_proto bpf_snprintf_btf_proto = {
1028	.func		= bpf_snprintf_btf,
1029	.gpl_only	= false,
1030	.ret_type	= RET_INTEGER,
1031	.arg1_type	= ARG_PTR_TO_MEM,
1032	.arg2_type	= ARG_CONST_SIZE,
1033	.arg3_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1034	.arg4_type	= ARG_CONST_SIZE,
1035	.arg5_type	= ARG_ANYTHING,
1036};
1037
1038BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1039{
1040	/* This helper call is inlined by verifier. */
1041	return ((u64 *)ctx)[-2];
1042}
1043
1044static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1045	.func		= bpf_get_func_ip_tracing,
1046	.gpl_only	= true,
1047	.ret_type	= RET_INTEGER,
1048	.arg1_type	= ARG_PTR_TO_CTX,
1049};
1050
1051#ifdef CONFIG_X86_KERNEL_IBT
1052static unsigned long get_entry_ip(unsigned long fentry_ip)
1053{
1054	u32 instr;
1055
1056	/* Being extra safe in here in case entry ip is on the page-edge. */
1057	if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1))
1058		return fentry_ip;
1059	if (is_endbr(instr))
1060		fentry_ip -= ENDBR_INSN_SIZE;
1061	return fentry_ip;
1062}
1063#else
1064#define get_entry_ip(fentry_ip) fentry_ip
1065#endif
1066
1067BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1068{
1069	struct bpf_trace_run_ctx *run_ctx __maybe_unused;
1070	struct kprobe *kp;
1071
1072#ifdef CONFIG_UPROBES
1073	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1074	if (run_ctx->is_uprobe)
1075		return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr;
1076#endif
1077
1078	kp = kprobe_running();
1079
1080	if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
1081		return 0;
1082
1083	return get_entry_ip((uintptr_t)kp->addr);
1084}
1085
1086static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1087	.func		= bpf_get_func_ip_kprobe,
1088	.gpl_only	= true,
1089	.ret_type	= RET_INTEGER,
1090	.arg1_type	= ARG_PTR_TO_CTX,
1091};
1092
1093BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
1094{
1095	return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
1096}
1097
1098static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = {
1099	.func		= bpf_get_func_ip_kprobe_multi,
1100	.gpl_only	= false,
1101	.ret_type	= RET_INTEGER,
1102	.arg1_type	= ARG_PTR_TO_CTX,
1103};
1104
1105BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
1106{
1107	return bpf_kprobe_multi_cookie(current->bpf_ctx);
1108}
1109
1110static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
1111	.func		= bpf_get_attach_cookie_kprobe_multi,
1112	.gpl_only	= false,
1113	.ret_type	= RET_INTEGER,
1114	.arg1_type	= ARG_PTR_TO_CTX,
1115};
1116
1117BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs)
1118{
1119	return bpf_uprobe_multi_entry_ip(current->bpf_ctx);
1120}
1121
1122static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi = {
1123	.func		= bpf_get_func_ip_uprobe_multi,
1124	.gpl_only	= false,
1125	.ret_type	= RET_INTEGER,
1126	.arg1_type	= ARG_PTR_TO_CTX,
1127};
1128
1129BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs)
1130{
1131	return bpf_uprobe_multi_cookie(current->bpf_ctx);
1132}
1133
1134static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti = {
1135	.func		= bpf_get_attach_cookie_uprobe_multi,
1136	.gpl_only	= false,
1137	.ret_type	= RET_INTEGER,
1138	.arg1_type	= ARG_PTR_TO_CTX,
1139};
1140
1141BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1142{
1143	struct bpf_trace_run_ctx *run_ctx;
1144
1145	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1146	return run_ctx->bpf_cookie;
1147}
1148
1149static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1150	.func		= bpf_get_attach_cookie_trace,
1151	.gpl_only	= false,
1152	.ret_type	= RET_INTEGER,
1153	.arg1_type	= ARG_PTR_TO_CTX,
1154};
1155
1156BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1157{
1158	return ctx->event->bpf_cookie;
1159}
1160
1161static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1162	.func		= bpf_get_attach_cookie_pe,
1163	.gpl_only	= false,
1164	.ret_type	= RET_INTEGER,
1165	.arg1_type	= ARG_PTR_TO_CTX,
1166};
1167
1168BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
1169{
1170	struct bpf_trace_run_ctx *run_ctx;
1171
1172	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1173	return run_ctx->bpf_cookie;
1174}
1175
1176static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = {
1177	.func		= bpf_get_attach_cookie_tracing,
1178	.gpl_only	= false,
1179	.ret_type	= RET_INTEGER,
1180	.arg1_type	= ARG_PTR_TO_CTX,
1181};
1182
1183BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1184{
1185#ifndef CONFIG_X86
1186	return -ENOENT;
1187#else
1188	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1189	u32 entry_cnt = size / br_entry_size;
1190
1191	entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1192
1193	if (unlikely(flags))
1194		return -EINVAL;
1195
1196	if (!entry_cnt)
1197		return -ENOENT;
1198
1199	return entry_cnt * br_entry_size;
1200#endif
1201}
1202
1203static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
1204	.func		= bpf_get_branch_snapshot,
1205	.gpl_only	= true,
1206	.ret_type	= RET_INTEGER,
1207	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1208	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1209};
1210
1211BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1212{
1213	/* This helper call is inlined by verifier. */
1214	u64 nr_args = ((u64 *)ctx)[-1];
1215
1216	if ((u64) n >= nr_args)
1217		return -EINVAL;
1218	*value = ((u64 *)ctx)[n];
1219	return 0;
1220}
1221
1222static const struct bpf_func_proto bpf_get_func_arg_proto = {
1223	.func		= get_func_arg,
1224	.ret_type	= RET_INTEGER,
1225	.arg1_type	= ARG_PTR_TO_CTX,
1226	.arg2_type	= ARG_ANYTHING,
1227	.arg3_type	= ARG_PTR_TO_LONG,
1228};
1229
1230BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1231{
1232	/* This helper call is inlined by verifier. */
1233	u64 nr_args = ((u64 *)ctx)[-1];
1234
1235	*value = ((u64 *)ctx)[nr_args];
1236	return 0;
1237}
1238
1239static const struct bpf_func_proto bpf_get_func_ret_proto = {
1240	.func		= get_func_ret,
1241	.ret_type	= RET_INTEGER,
1242	.arg1_type	= ARG_PTR_TO_CTX,
1243	.arg2_type	= ARG_PTR_TO_LONG,
1244};
1245
1246BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1247{
1248	/* This helper call is inlined by verifier. */
1249	return ((u64 *)ctx)[-1];
1250}
1251
1252static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1253	.func		= get_func_arg_cnt,
1254	.ret_type	= RET_INTEGER,
1255	.arg1_type	= ARG_PTR_TO_CTX,
1256};
1257
1258#ifdef CONFIG_KEYS
1259__bpf_kfunc_start_defs();
1260
1261/**
1262 * bpf_lookup_user_key - lookup a key by its serial
1263 * @serial: key handle serial number
1264 * @flags: lookup-specific flags
1265 *
1266 * Search a key with a given *serial* and the provided *flags*.
1267 * If found, increment the reference count of the key by one, and
1268 * return it in the bpf_key structure.
1269 *
1270 * The bpf_key structure must be passed to bpf_key_put() when done
1271 * with it, so that the key reference count is decremented and the
1272 * bpf_key structure is freed.
1273 *
1274 * Permission checks are deferred to the time the key is used by
1275 * one of the available key-specific kfuncs.
1276 *
1277 * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
1278 * special keyring (e.g. session keyring), if it doesn't yet exist.
1279 * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
1280 * for the key construction, and to retrieve uninstantiated keys (keys
1281 * without data attached to them).
1282 *
1283 * Return: a bpf_key pointer with a valid key pointer if the key is found, a
1284 *         NULL pointer otherwise.
1285 */
1286__bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
1287{
1288	key_ref_t key_ref;
1289	struct bpf_key *bkey;
1290
1291	if (flags & ~KEY_LOOKUP_ALL)
1292		return NULL;
1293
1294	/*
1295	 * Permission check is deferred until the key is used, as the
1296	 * intent of the caller is unknown here.
1297	 */
1298	key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
1299	if (IS_ERR(key_ref))
1300		return NULL;
1301
1302	bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
1303	if (!bkey) {
1304		key_put(key_ref_to_ptr(key_ref));
1305		return NULL;
1306	}
1307
1308	bkey->key = key_ref_to_ptr(key_ref);
1309	bkey->has_ref = true;
1310
1311	return bkey;
1312}
1313
1314/**
1315 * bpf_lookup_system_key - lookup a key by a system-defined ID
1316 * @id: key ID
1317 *
1318 * Obtain a bpf_key structure with a key pointer set to the passed key ID.
1319 * The key pointer is marked as invalid, to prevent bpf_key_put() from
1320 * attempting to decrement the key reference count on that pointer. The key
1321 * pointer set in such way is currently understood only by
1322 * verify_pkcs7_signature().
1323 *
1324 * Set *id* to one of the values defined in include/linux/verification.h:
1325 * 0 for the primary keyring (immutable keyring of system keys);
1326 * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
1327 * (where keys can be added only if they are vouched for by existing keys
1328 * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
1329 * keyring (primarily used by the integrity subsystem to verify a kexec'ed
1330 * kerned image and, possibly, the initramfs signature).
1331 *
1332 * Return: a bpf_key pointer with an invalid key pointer set from the
1333 *         pre-determined ID on success, a NULL pointer otherwise
1334 */
1335__bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
1336{
1337	struct bpf_key *bkey;
1338
1339	if (system_keyring_id_check(id) < 0)
1340		return NULL;
1341
1342	bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
1343	if (!bkey)
1344		return NULL;
1345
1346	bkey->key = (struct key *)(unsigned long)id;
1347	bkey->has_ref = false;
1348
1349	return bkey;
1350}
1351
1352/**
1353 * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1354 * @bkey: bpf_key structure
1355 *
1356 * Decrement the reference count of the key inside *bkey*, if the pointer
1357 * is valid, and free *bkey*.
1358 */
1359__bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
1360{
1361	if (bkey->has_ref)
1362		key_put(bkey->key);
1363
1364	kfree(bkey);
1365}
1366
1367#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1368/**
1369 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
1370 * @data_ptr: data to verify
1371 * @sig_ptr: signature of the data
1372 * @trusted_keyring: keyring with keys trusted for signature verification
1373 *
1374 * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
1375 * with keys in a keyring referenced by *trusted_keyring*.
1376 *
1377 * Return: 0 on success, a negative value on error.
1378 */
1379__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
1380			       struct bpf_dynptr_kern *sig_ptr,
1381			       struct bpf_key *trusted_keyring)
1382{
1383	const void *data, *sig;
1384	u32 data_len, sig_len;
1385	int ret;
1386
1387	if (trusted_keyring->has_ref) {
1388		/*
1389		 * Do the permission check deferred in bpf_lookup_user_key().
1390		 * See bpf_lookup_user_key() for more details.
1391		 *
1392		 * A call to key_task_permission() here would be redundant, as
1393		 * it is already done by keyring_search() called by
1394		 * find_asymmetric_key().
1395		 */
1396		ret = key_validate(trusted_keyring->key);
1397		if (ret < 0)
1398			return ret;
1399	}
1400
1401	data_len = __bpf_dynptr_size(data_ptr);
1402	data = __bpf_dynptr_data(data_ptr, data_len);
1403	sig_len = __bpf_dynptr_size(sig_ptr);
1404	sig = __bpf_dynptr_data(sig_ptr, sig_len);
1405
1406	return verify_pkcs7_signature(data, data_len, sig, sig_len,
1407				      trusted_keyring->key,
1408				      VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
1409				      NULL);
1410}
1411#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
1412
1413__bpf_kfunc_end_defs();
1414
1415BTF_SET8_START(key_sig_kfunc_set)
1416BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
1417BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
1418BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
1419#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1420BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
1421#endif
1422BTF_SET8_END(key_sig_kfunc_set)
1423
1424static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
1425	.owner = THIS_MODULE,
1426	.set = &key_sig_kfunc_set,
1427};
1428
1429static int __init bpf_key_sig_kfuncs_init(void)
1430{
1431	return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
1432					 &bpf_key_sig_kfunc_set);
1433}
1434
1435late_initcall(bpf_key_sig_kfuncs_init);
1436#endif /* CONFIG_KEYS */
1437
1438/* filesystem kfuncs */
1439__bpf_kfunc_start_defs();
1440
1441/**
1442 * bpf_get_file_xattr - get xattr of a file
1443 * @file: file to get xattr from
1444 * @name__str: name of the xattr
1445 * @value_ptr: output buffer of the xattr value
1446 *
1447 * Get xattr *name__str* of *file* and store the output in *value_ptr*.
1448 *
1449 * For security reasons, only *name__str* with prefix "user." is allowed.
1450 *
1451 * Return: 0 on success, a negative value on error.
1452 */
1453__bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str,
1454				   struct bpf_dynptr_kern *value_ptr)
1455{
1456	struct dentry *dentry;
1457	u32 value_len;
1458	void *value;
1459	int ret;
1460
1461	if (strncmp(name__str, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
1462		return -EPERM;
1463
1464	value_len = __bpf_dynptr_size(value_ptr);
1465	value = __bpf_dynptr_data_rw(value_ptr, value_len);
1466	if (!value)
1467		return -EINVAL;
1468
1469	dentry = file_dentry(file);
1470	ret = inode_permission(&nop_mnt_idmap, dentry->d_inode, MAY_READ);
1471	if (ret)
1472		return ret;
1473	return __vfs_getxattr(dentry, dentry->d_inode, name__str, value, value_len);
1474}
1475
1476__bpf_kfunc_end_defs();
1477
1478BTF_SET8_START(fs_kfunc_set_ids)
1479BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
1480BTF_SET8_END(fs_kfunc_set_ids)
1481
1482static int bpf_get_file_xattr_filter(const struct bpf_prog *prog, u32 kfunc_id)
1483{
1484	if (!btf_id_set8_contains(&fs_kfunc_set_ids, kfunc_id))
1485		return 0;
1486
1487	/* Only allow to attach from LSM hooks, to avoid recursion */
1488	return prog->type != BPF_PROG_TYPE_LSM ? -EACCES : 0;
1489}
1490
1491static const struct btf_kfunc_id_set bpf_fs_kfunc_set = {
1492	.owner = THIS_MODULE,
1493	.set = &fs_kfunc_set_ids,
1494	.filter = bpf_get_file_xattr_filter,
1495};
1496
1497static int __init bpf_fs_kfuncs_init(void)
1498{
1499	return register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fs_kfunc_set);
1500}
1501
1502late_initcall(bpf_fs_kfuncs_init);
1503
1504static const struct bpf_func_proto *
1505bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1506{
1507	switch (func_id) {
1508	case BPF_FUNC_map_lookup_elem:
1509		return &bpf_map_lookup_elem_proto;
1510	case BPF_FUNC_map_update_elem:
1511		return &bpf_map_update_elem_proto;
1512	case BPF_FUNC_map_delete_elem:
1513		return &bpf_map_delete_elem_proto;
1514	case BPF_FUNC_map_push_elem:
1515		return &bpf_map_push_elem_proto;
1516	case BPF_FUNC_map_pop_elem:
1517		return &bpf_map_pop_elem_proto;
1518	case BPF_FUNC_map_peek_elem:
1519		return &bpf_map_peek_elem_proto;
1520	case BPF_FUNC_map_lookup_percpu_elem:
1521		return &bpf_map_lookup_percpu_elem_proto;
1522	case BPF_FUNC_ktime_get_ns:
1523		return &bpf_ktime_get_ns_proto;
1524	case BPF_FUNC_ktime_get_boot_ns:
1525		return &bpf_ktime_get_boot_ns_proto;
 
 
1526	case BPF_FUNC_tail_call:
1527		return &bpf_tail_call_proto;
1528	case BPF_FUNC_get_current_pid_tgid:
1529		return &bpf_get_current_pid_tgid_proto;
1530	case BPF_FUNC_get_current_task:
1531		return &bpf_get_current_task_proto;
1532	case BPF_FUNC_get_current_task_btf:
1533		return &bpf_get_current_task_btf_proto;
1534	case BPF_FUNC_task_pt_regs:
1535		return &bpf_task_pt_regs_proto;
1536	case BPF_FUNC_get_current_uid_gid:
1537		return &bpf_get_current_uid_gid_proto;
1538	case BPF_FUNC_get_current_comm:
1539		return &bpf_get_current_comm_proto;
1540	case BPF_FUNC_trace_printk:
1541		return bpf_get_trace_printk_proto();
1542	case BPF_FUNC_get_smp_processor_id:
1543		return &bpf_get_smp_processor_id_proto;
1544	case BPF_FUNC_get_numa_node_id:
1545		return &bpf_get_numa_node_id_proto;
1546	case BPF_FUNC_perf_event_read:
1547		return &bpf_perf_event_read_proto;
1548	case BPF_FUNC_current_task_under_cgroup:
1549		return &bpf_current_task_under_cgroup_proto;
1550	case BPF_FUNC_get_prandom_u32:
1551		return &bpf_get_prandom_u32_proto;
1552	case BPF_FUNC_probe_write_user:
1553		return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1554		       NULL : bpf_get_probe_write_proto();
1555	case BPF_FUNC_probe_read_user:
1556		return &bpf_probe_read_user_proto;
1557	case BPF_FUNC_probe_read_kernel:
1558		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1559		       NULL : &bpf_probe_read_kernel_proto;
1560	case BPF_FUNC_probe_read_user_str:
1561		return &bpf_probe_read_user_str_proto;
1562	case BPF_FUNC_probe_read_kernel_str:
1563		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1564		       NULL : &bpf_probe_read_kernel_str_proto;
1565#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1566	case BPF_FUNC_probe_read:
1567		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1568		       NULL : &bpf_probe_read_compat_proto;
1569	case BPF_FUNC_probe_read_str:
1570		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1571		       NULL : &bpf_probe_read_compat_str_proto;
1572#endif
1573#ifdef CONFIG_CGROUPS
1574	case BPF_FUNC_cgrp_storage_get:
1575		return &bpf_cgrp_storage_get_proto;
1576	case BPF_FUNC_cgrp_storage_delete:
1577		return &bpf_cgrp_storage_delete_proto;
1578#endif
1579	case BPF_FUNC_send_signal:
1580		return &bpf_send_signal_proto;
1581	case BPF_FUNC_send_signal_thread:
1582		return &bpf_send_signal_thread_proto;
1583	case BPF_FUNC_perf_event_read_value:
1584		return &bpf_perf_event_read_value_proto;
1585	case BPF_FUNC_get_ns_current_pid_tgid:
1586		return &bpf_get_ns_current_pid_tgid_proto;
1587	case BPF_FUNC_ringbuf_output:
1588		return &bpf_ringbuf_output_proto;
1589	case BPF_FUNC_ringbuf_reserve:
1590		return &bpf_ringbuf_reserve_proto;
1591	case BPF_FUNC_ringbuf_submit:
1592		return &bpf_ringbuf_submit_proto;
1593	case BPF_FUNC_ringbuf_discard:
1594		return &bpf_ringbuf_discard_proto;
1595	case BPF_FUNC_ringbuf_query:
1596		return &bpf_ringbuf_query_proto;
1597	case BPF_FUNC_jiffies64:
1598		return &bpf_jiffies64_proto;
1599	case BPF_FUNC_get_task_stack:
1600		return &bpf_get_task_stack_proto;
1601	case BPF_FUNC_copy_from_user:
1602		return &bpf_copy_from_user_proto;
1603	case BPF_FUNC_copy_from_user_task:
1604		return &bpf_copy_from_user_task_proto;
1605	case BPF_FUNC_snprintf_btf:
1606		return &bpf_snprintf_btf_proto;
1607	case BPF_FUNC_per_cpu_ptr:
1608		return &bpf_per_cpu_ptr_proto;
1609	case BPF_FUNC_this_cpu_ptr:
1610		return &bpf_this_cpu_ptr_proto;
1611	case BPF_FUNC_task_storage_get:
1612		if (bpf_prog_check_recur(prog))
1613			return &bpf_task_storage_get_recur_proto;
1614		return &bpf_task_storage_get_proto;
1615	case BPF_FUNC_task_storage_delete:
1616		if (bpf_prog_check_recur(prog))
1617			return &bpf_task_storage_delete_recur_proto;
1618		return &bpf_task_storage_delete_proto;
1619	case BPF_FUNC_for_each_map_elem:
1620		return &bpf_for_each_map_elem_proto;
1621	case BPF_FUNC_snprintf:
1622		return &bpf_snprintf_proto;
1623	case BPF_FUNC_get_func_ip:
1624		return &bpf_get_func_ip_proto_tracing;
1625	case BPF_FUNC_get_branch_snapshot:
1626		return &bpf_get_branch_snapshot_proto;
1627	case BPF_FUNC_find_vma:
1628		return &bpf_find_vma_proto;
1629	case BPF_FUNC_trace_vprintk:
1630		return bpf_get_trace_vprintk_proto();
1631	default:
1632		return bpf_base_func_proto(func_id);
1633	}
1634}
1635
1636static const struct bpf_func_proto *
1637kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1638{
1639	switch (func_id) {
1640	case BPF_FUNC_perf_event_output:
1641		return &bpf_perf_event_output_proto;
1642	case BPF_FUNC_get_stackid:
1643		return &bpf_get_stackid_proto;
1644	case BPF_FUNC_get_stack:
1645		return &bpf_get_stack_proto;
1646#ifdef CONFIG_BPF_KPROBE_OVERRIDE
1647	case BPF_FUNC_override_return:
1648		return &bpf_override_return_proto;
1649#endif
1650	case BPF_FUNC_get_func_ip:
1651		if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI)
1652			return &bpf_get_func_ip_proto_kprobe_multi;
1653		if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
1654			return &bpf_get_func_ip_proto_uprobe_multi;
1655		return &bpf_get_func_ip_proto_kprobe;
1656	case BPF_FUNC_get_attach_cookie:
1657		if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI)
1658			return &bpf_get_attach_cookie_proto_kmulti;
1659		if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
1660			return &bpf_get_attach_cookie_proto_umulti;
1661		return &bpf_get_attach_cookie_proto_trace;
1662	default:
1663		return bpf_tracing_func_proto(func_id, prog);
1664	}
1665}
1666
1667/* bpf+kprobe programs can access fields of 'struct pt_regs' */
1668static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1669					const struct bpf_prog *prog,
1670					struct bpf_insn_access_aux *info)
1671{
1672	if (off < 0 || off >= sizeof(struct pt_regs))
1673		return false;
1674	if (type != BPF_READ)
1675		return false;
1676	if (off % size != 0)
1677		return false;
1678	/*
1679	 * Assertion for 32 bit to make sure last 8 byte access
1680	 * (BPF_DW) to the last 4 byte member is disallowed.
1681	 */
1682	if (off + size > sizeof(struct pt_regs))
1683		return false;
1684
1685	return true;
1686}
1687
1688const struct bpf_verifier_ops kprobe_verifier_ops = {
1689	.get_func_proto  = kprobe_prog_func_proto,
1690	.is_valid_access = kprobe_prog_is_valid_access,
1691};
1692
1693const struct bpf_prog_ops kprobe_prog_ops = {
1694};
1695
1696BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1697	   u64, flags, void *, data, u64, size)
1698{
1699	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1700
1701	/*
1702	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1703	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1704	 * from there and call the same bpf_perf_event_output() helper inline.
1705	 */
1706	return ____bpf_perf_event_output(regs, map, flags, data, size);
1707}
1708
1709static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1710	.func		= bpf_perf_event_output_tp,
1711	.gpl_only	= true,
1712	.ret_type	= RET_INTEGER,
1713	.arg1_type	= ARG_PTR_TO_CTX,
1714	.arg2_type	= ARG_CONST_MAP_PTR,
1715	.arg3_type	= ARG_ANYTHING,
1716	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1717	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1718};
1719
1720BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1721	   u64, flags)
1722{
1723	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1724
1725	/*
1726	 * Same comment as in bpf_perf_event_output_tp(), only that this time
1727	 * the other helper's function body cannot be inlined due to being
1728	 * external, thus we need to call raw helper function.
1729	 */
1730	return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1731			       flags, 0, 0);
1732}
1733
1734static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1735	.func		= bpf_get_stackid_tp,
1736	.gpl_only	= true,
1737	.ret_type	= RET_INTEGER,
1738	.arg1_type	= ARG_PTR_TO_CTX,
1739	.arg2_type	= ARG_CONST_MAP_PTR,
1740	.arg3_type	= ARG_ANYTHING,
1741};
1742
1743BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1744	   u64, flags)
1745{
1746	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1747
1748	return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1749			     (unsigned long) size, flags, 0);
1750}
1751
1752static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1753	.func		= bpf_get_stack_tp,
1754	.gpl_only	= true,
1755	.ret_type	= RET_INTEGER,
1756	.arg1_type	= ARG_PTR_TO_CTX,
1757	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
1758	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1759	.arg4_type	= ARG_ANYTHING,
1760};
1761
1762static const struct bpf_func_proto *
1763tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1764{
1765	switch (func_id) {
1766	case BPF_FUNC_perf_event_output:
1767		return &bpf_perf_event_output_proto_tp;
1768	case BPF_FUNC_get_stackid:
1769		return &bpf_get_stackid_proto_tp;
1770	case BPF_FUNC_get_stack:
1771		return &bpf_get_stack_proto_tp;
1772	case BPF_FUNC_get_attach_cookie:
1773		return &bpf_get_attach_cookie_proto_trace;
1774	default:
1775		return bpf_tracing_func_proto(func_id, prog);
1776	}
1777}
1778
1779static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1780				    const struct bpf_prog *prog,
1781				    struct bpf_insn_access_aux *info)
1782{
1783	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1784		return false;
1785	if (type != BPF_READ)
1786		return false;
1787	if (off % size != 0)
1788		return false;
1789
1790	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1791	return true;
1792}
1793
1794const struct bpf_verifier_ops tracepoint_verifier_ops = {
1795	.get_func_proto  = tp_prog_func_proto,
1796	.is_valid_access = tp_prog_is_valid_access,
1797};
1798
1799const struct bpf_prog_ops tracepoint_prog_ops = {
1800};
1801
1802BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1803	   struct bpf_perf_event_value *, buf, u32, size)
1804{
1805	int err = -EINVAL;
1806
1807	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1808		goto clear;
1809	err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1810				    &buf->running);
1811	if (unlikely(err))
1812		goto clear;
1813	return 0;
1814clear:
1815	memset(buf, 0, size);
1816	return err;
1817}
1818
1819static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1820         .func           = bpf_perf_prog_read_value,
1821         .gpl_only       = true,
1822         .ret_type       = RET_INTEGER,
1823         .arg1_type      = ARG_PTR_TO_CTX,
1824         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1825         .arg3_type      = ARG_CONST_SIZE,
1826};
1827
1828BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1829	   void *, buf, u32, size, u64, flags)
1830{
 
 
 
1831	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1832	struct perf_branch_stack *br_stack = ctx->data->br_stack;
1833	u32 to_copy;
1834
1835	if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1836		return -EINVAL;
1837
1838	if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK)))
1839		return -ENOENT;
1840
1841	if (unlikely(!br_stack))
1842		return -ENOENT;
1843
1844	if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1845		return br_stack->nr * br_entry_size;
1846
1847	if (!buf || (size % br_entry_size != 0))
1848		return -EINVAL;
1849
1850	to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1851	memcpy(buf, br_stack->entries, to_copy);
1852
1853	return to_copy;
 
1854}
1855
1856static const struct bpf_func_proto bpf_read_branch_records_proto = {
1857	.func           = bpf_read_branch_records,
1858	.gpl_only       = true,
1859	.ret_type       = RET_INTEGER,
1860	.arg1_type      = ARG_PTR_TO_CTX,
1861	.arg2_type      = ARG_PTR_TO_MEM_OR_NULL,
1862	.arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1863	.arg4_type      = ARG_ANYTHING,
1864};
1865
1866static const struct bpf_func_proto *
1867pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1868{
1869	switch (func_id) {
1870	case BPF_FUNC_perf_event_output:
1871		return &bpf_perf_event_output_proto_tp;
1872	case BPF_FUNC_get_stackid:
1873		return &bpf_get_stackid_proto_pe;
1874	case BPF_FUNC_get_stack:
1875		return &bpf_get_stack_proto_pe;
1876	case BPF_FUNC_perf_prog_read_value:
1877		return &bpf_perf_prog_read_value_proto;
1878	case BPF_FUNC_read_branch_records:
1879		return &bpf_read_branch_records_proto;
1880	case BPF_FUNC_get_attach_cookie:
1881		return &bpf_get_attach_cookie_proto_pe;
1882	default:
1883		return bpf_tracing_func_proto(func_id, prog);
1884	}
1885}
1886
1887/*
1888 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1889 * to avoid potential recursive reuse issue when/if tracepoints are added
1890 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1891 *
1892 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1893 * in normal, irq, and nmi context.
1894 */
1895struct bpf_raw_tp_regs {
1896	struct pt_regs regs[3];
1897};
1898static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1899static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1900static struct pt_regs *get_bpf_raw_tp_regs(void)
1901{
1902	struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1903	int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1904
1905	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1906		this_cpu_dec(bpf_raw_tp_nest_level);
1907		return ERR_PTR(-EBUSY);
1908	}
1909
1910	return &tp_regs->regs[nest_level - 1];
1911}
1912
1913static void put_bpf_raw_tp_regs(void)
1914{
1915	this_cpu_dec(bpf_raw_tp_nest_level);
1916}
1917
1918BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1919	   struct bpf_map *, map, u64, flags, void *, data, u64, size)
1920{
1921	struct pt_regs *regs = get_bpf_raw_tp_regs();
1922	int ret;
1923
1924	if (IS_ERR(regs))
1925		return PTR_ERR(regs);
1926
1927	perf_fetch_caller_regs(regs);
1928	ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1929
1930	put_bpf_raw_tp_regs();
1931	return ret;
1932}
1933
1934static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1935	.func		= bpf_perf_event_output_raw_tp,
1936	.gpl_only	= true,
1937	.ret_type	= RET_INTEGER,
1938	.arg1_type	= ARG_PTR_TO_CTX,
1939	.arg2_type	= ARG_CONST_MAP_PTR,
1940	.arg3_type	= ARG_ANYTHING,
1941	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1942	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1943};
1944
1945extern const struct bpf_func_proto bpf_skb_output_proto;
1946extern const struct bpf_func_proto bpf_xdp_output_proto;
1947extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
1948
1949BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1950	   struct bpf_map *, map, u64, flags)
1951{
1952	struct pt_regs *regs = get_bpf_raw_tp_regs();
1953	int ret;
1954
1955	if (IS_ERR(regs))
1956		return PTR_ERR(regs);
1957
1958	perf_fetch_caller_regs(regs);
1959	/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1960	ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1961			      flags, 0, 0);
1962	put_bpf_raw_tp_regs();
1963	return ret;
1964}
1965
1966static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1967	.func		= bpf_get_stackid_raw_tp,
1968	.gpl_only	= true,
1969	.ret_type	= RET_INTEGER,
1970	.arg1_type	= ARG_PTR_TO_CTX,
1971	.arg2_type	= ARG_CONST_MAP_PTR,
1972	.arg3_type	= ARG_ANYTHING,
1973};
1974
1975BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1976	   void *, buf, u32, size, u64, flags)
1977{
1978	struct pt_regs *regs = get_bpf_raw_tp_regs();
1979	int ret;
1980
1981	if (IS_ERR(regs))
1982		return PTR_ERR(regs);
1983
1984	perf_fetch_caller_regs(regs);
1985	ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1986			    (unsigned long) size, flags, 0);
1987	put_bpf_raw_tp_regs();
1988	return ret;
1989}
1990
1991static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1992	.func		= bpf_get_stack_raw_tp,
1993	.gpl_only	= true,
1994	.ret_type	= RET_INTEGER,
1995	.arg1_type	= ARG_PTR_TO_CTX,
1996	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1997	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1998	.arg4_type	= ARG_ANYTHING,
1999};
2000
2001static const struct bpf_func_proto *
2002raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2003{
2004	switch (func_id) {
2005	case BPF_FUNC_perf_event_output:
2006		return &bpf_perf_event_output_proto_raw_tp;
2007	case BPF_FUNC_get_stackid:
2008		return &bpf_get_stackid_proto_raw_tp;
2009	case BPF_FUNC_get_stack:
2010		return &bpf_get_stack_proto_raw_tp;
2011	default:
2012		return bpf_tracing_func_proto(func_id, prog);
2013	}
2014}
2015
2016const struct bpf_func_proto *
2017tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2018{
2019	const struct bpf_func_proto *fn;
2020
2021	switch (func_id) {
2022#ifdef CONFIG_NET
2023	case BPF_FUNC_skb_output:
2024		return &bpf_skb_output_proto;
2025	case BPF_FUNC_xdp_output:
2026		return &bpf_xdp_output_proto;
2027	case BPF_FUNC_skc_to_tcp6_sock:
2028		return &bpf_skc_to_tcp6_sock_proto;
2029	case BPF_FUNC_skc_to_tcp_sock:
2030		return &bpf_skc_to_tcp_sock_proto;
2031	case BPF_FUNC_skc_to_tcp_timewait_sock:
2032		return &bpf_skc_to_tcp_timewait_sock_proto;
2033	case BPF_FUNC_skc_to_tcp_request_sock:
2034		return &bpf_skc_to_tcp_request_sock_proto;
2035	case BPF_FUNC_skc_to_udp6_sock:
2036		return &bpf_skc_to_udp6_sock_proto;
2037	case BPF_FUNC_skc_to_unix_sock:
2038		return &bpf_skc_to_unix_sock_proto;
2039	case BPF_FUNC_skc_to_mptcp_sock:
2040		return &bpf_skc_to_mptcp_sock_proto;
2041	case BPF_FUNC_sk_storage_get:
2042		return &bpf_sk_storage_get_tracing_proto;
2043	case BPF_FUNC_sk_storage_delete:
2044		return &bpf_sk_storage_delete_tracing_proto;
2045	case BPF_FUNC_sock_from_file:
2046		return &bpf_sock_from_file_proto;
2047	case BPF_FUNC_get_socket_cookie:
2048		return &bpf_get_socket_ptr_cookie_proto;
2049	case BPF_FUNC_xdp_get_buff_len:
2050		return &bpf_xdp_get_buff_len_trace_proto;
2051#endif
2052	case BPF_FUNC_seq_printf:
2053		return prog->expected_attach_type == BPF_TRACE_ITER ?
2054		       &bpf_seq_printf_proto :
2055		       NULL;
2056	case BPF_FUNC_seq_write:
2057		return prog->expected_attach_type == BPF_TRACE_ITER ?
2058		       &bpf_seq_write_proto :
2059		       NULL;
2060	case BPF_FUNC_seq_printf_btf:
2061		return prog->expected_attach_type == BPF_TRACE_ITER ?
2062		       &bpf_seq_printf_btf_proto :
2063		       NULL;
2064	case BPF_FUNC_d_path:
2065		return &bpf_d_path_proto;
2066	case BPF_FUNC_get_func_arg:
2067		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
2068	case BPF_FUNC_get_func_ret:
2069		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
2070	case BPF_FUNC_get_func_arg_cnt:
2071		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
2072	case BPF_FUNC_get_attach_cookie:
2073		return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
2074	default:
2075		fn = raw_tp_prog_func_proto(func_id, prog);
2076		if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
2077			fn = bpf_iter_get_func_proto(func_id, prog);
2078		return fn;
2079	}
2080}
2081
2082static bool raw_tp_prog_is_valid_access(int off, int size,
2083					enum bpf_access_type type,
2084					const struct bpf_prog *prog,
2085					struct bpf_insn_access_aux *info)
2086{
2087	return bpf_tracing_ctx_access(off, size, type);
 
 
 
 
 
 
2088}
2089
2090static bool tracing_prog_is_valid_access(int off, int size,
2091					 enum bpf_access_type type,
2092					 const struct bpf_prog *prog,
2093					 struct bpf_insn_access_aux *info)
2094{
2095	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
 
 
 
 
 
 
2096}
2097
2098int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
2099				     const union bpf_attr *kattr,
2100				     union bpf_attr __user *uattr)
2101{
2102	return -ENOTSUPP;
2103}
2104
2105const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
2106	.get_func_proto  = raw_tp_prog_func_proto,
2107	.is_valid_access = raw_tp_prog_is_valid_access,
2108};
2109
2110const struct bpf_prog_ops raw_tracepoint_prog_ops = {
2111#ifdef CONFIG_NET
2112	.test_run = bpf_prog_test_run_raw_tp,
2113#endif
2114};
2115
2116const struct bpf_verifier_ops tracing_verifier_ops = {
2117	.get_func_proto  = tracing_prog_func_proto,
2118	.is_valid_access = tracing_prog_is_valid_access,
2119};
2120
2121const struct bpf_prog_ops tracing_prog_ops = {
2122	.test_run = bpf_prog_test_run_tracing,
2123};
2124
2125static bool raw_tp_writable_prog_is_valid_access(int off, int size,
2126						 enum bpf_access_type type,
2127						 const struct bpf_prog *prog,
2128						 struct bpf_insn_access_aux *info)
2129{
2130	if (off == 0) {
2131		if (size != sizeof(u64) || type != BPF_READ)
2132			return false;
2133		info->reg_type = PTR_TO_TP_BUFFER;
2134	}
2135	return raw_tp_prog_is_valid_access(off, size, type, prog, info);
2136}
2137
2138const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
2139	.get_func_proto  = raw_tp_prog_func_proto,
2140	.is_valid_access = raw_tp_writable_prog_is_valid_access,
2141};
2142
2143const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
2144};
2145
2146static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
2147				    const struct bpf_prog *prog,
2148				    struct bpf_insn_access_aux *info)
2149{
2150	const int size_u64 = sizeof(u64);
2151
2152	if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
2153		return false;
2154	if (type != BPF_READ)
2155		return false;
2156	if (off % size != 0) {
2157		if (sizeof(unsigned long) != 4)
2158			return false;
2159		if (size != 8)
2160			return false;
2161		if (off % size != 4)
2162			return false;
2163	}
2164
2165	switch (off) {
2166	case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
2167		bpf_ctx_record_field_size(info, size_u64);
2168		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2169			return false;
2170		break;
2171	case bpf_ctx_range(struct bpf_perf_event_data, addr):
2172		bpf_ctx_record_field_size(info, size_u64);
2173		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2174			return false;
2175		break;
2176	default:
2177		if (size != sizeof(long))
2178			return false;
2179	}
2180
2181	return true;
2182}
2183
2184static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
2185				      const struct bpf_insn *si,
2186				      struct bpf_insn *insn_buf,
2187				      struct bpf_prog *prog, u32 *target_size)
2188{
2189	struct bpf_insn *insn = insn_buf;
2190
2191	switch (si->off) {
2192	case offsetof(struct bpf_perf_event_data, sample_period):
2193		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2194						       data), si->dst_reg, si->src_reg,
2195				      offsetof(struct bpf_perf_event_data_kern, data));
2196		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2197				      bpf_target_off(struct perf_sample_data, period, 8,
2198						     target_size));
2199		break;
2200	case offsetof(struct bpf_perf_event_data, addr):
2201		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2202						       data), si->dst_reg, si->src_reg,
2203				      offsetof(struct bpf_perf_event_data_kern, data));
2204		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2205				      bpf_target_off(struct perf_sample_data, addr, 8,
2206						     target_size));
2207		break;
2208	default:
2209		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2210						       regs), si->dst_reg, si->src_reg,
2211				      offsetof(struct bpf_perf_event_data_kern, regs));
2212		*insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
2213				      si->off);
2214		break;
2215	}
2216
2217	return insn - insn_buf;
2218}
2219
2220const struct bpf_verifier_ops perf_event_verifier_ops = {
2221	.get_func_proto		= pe_prog_func_proto,
2222	.is_valid_access	= pe_prog_is_valid_access,
2223	.convert_ctx_access	= pe_prog_convert_ctx_access,
2224};
2225
2226const struct bpf_prog_ops perf_event_prog_ops = {
2227};
2228
2229static DEFINE_MUTEX(bpf_event_mutex);
2230
2231#define BPF_TRACE_MAX_PROGS 64
2232
2233int perf_event_attach_bpf_prog(struct perf_event *event,
2234			       struct bpf_prog *prog,
2235			       u64 bpf_cookie)
2236{
2237	struct bpf_prog_array *old_array;
2238	struct bpf_prog_array *new_array;
2239	int ret = -EEXIST;
2240
2241	/*
2242	 * Kprobe override only works if they are on the function entry,
2243	 * and only if they are on the opt-in list.
2244	 */
2245	if (prog->kprobe_override &&
2246	    (!trace_kprobe_on_func_entry(event->tp_event) ||
2247	     !trace_kprobe_error_injectable(event->tp_event)))
2248		return -EINVAL;
2249
2250	mutex_lock(&bpf_event_mutex);
2251
2252	if (event->prog)
2253		goto unlock;
2254
2255	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2256	if (old_array &&
2257	    bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
2258		ret = -E2BIG;
2259		goto unlock;
2260	}
2261
2262	ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
2263	if (ret < 0)
2264		goto unlock;
2265
2266	/* set the new array to event->tp_event and set event->prog */
2267	event->prog = prog;
2268	event->bpf_cookie = bpf_cookie;
2269	rcu_assign_pointer(event->tp_event->prog_array, new_array);
2270	bpf_prog_array_free_sleepable(old_array);
2271
2272unlock:
2273	mutex_unlock(&bpf_event_mutex);
2274	return ret;
2275}
2276
2277void perf_event_detach_bpf_prog(struct perf_event *event)
2278{
2279	struct bpf_prog_array *old_array;
2280	struct bpf_prog_array *new_array;
2281	int ret;
2282
2283	mutex_lock(&bpf_event_mutex);
2284
2285	if (!event->prog)
2286		goto unlock;
2287
2288	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2289	ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
2290	if (ret == -ENOENT)
2291		goto unlock;
2292	if (ret < 0) {
2293		bpf_prog_array_delete_safe(old_array, event->prog);
2294	} else {
2295		rcu_assign_pointer(event->tp_event->prog_array, new_array);
2296		bpf_prog_array_free_sleepable(old_array);
2297	}
2298
2299	bpf_prog_put(event->prog);
2300	event->prog = NULL;
2301
2302unlock:
2303	mutex_unlock(&bpf_event_mutex);
2304}
2305
2306int perf_event_query_prog_array(struct perf_event *event, void __user *info)
2307{
2308	struct perf_event_query_bpf __user *uquery = info;
2309	struct perf_event_query_bpf query = {};
2310	struct bpf_prog_array *progs;
2311	u32 *ids, prog_cnt, ids_len;
2312	int ret;
2313
2314	if (!perfmon_capable())
2315		return -EPERM;
2316	if (event->attr.type != PERF_TYPE_TRACEPOINT)
2317		return -EINVAL;
2318	if (copy_from_user(&query, uquery, sizeof(query)))
2319		return -EFAULT;
2320
2321	ids_len = query.ids_len;
2322	if (ids_len > BPF_TRACE_MAX_PROGS)
2323		return -E2BIG;
2324	ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2325	if (!ids)
2326		return -ENOMEM;
2327	/*
2328	 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2329	 * is required when user only wants to check for uquery->prog_cnt.
2330	 * There is no need to check for it since the case is handled
2331	 * gracefully in bpf_prog_array_copy_info.
2332	 */
2333
2334	mutex_lock(&bpf_event_mutex);
2335	progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2336	ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
2337	mutex_unlock(&bpf_event_mutex);
2338
2339	if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2340	    copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2341		ret = -EFAULT;
2342
2343	kfree(ids);
2344	return ret;
2345}
2346
2347extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2348extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2349
2350struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
2351{
2352	struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2353
2354	for (; btp < __stop__bpf_raw_tp; btp++) {
2355		if (!strcmp(btp->tp->name, name))
2356			return btp;
2357	}
2358
2359	return bpf_get_raw_tracepoint_module(name);
2360}
2361
2362void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2363{
2364	struct module *mod;
2365
2366	preempt_disable();
2367	mod = __module_address((unsigned long)btp);
2368	module_put(mod);
2369	preempt_enable();
2370}
2371
2372static __always_inline
2373void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
2374{
2375	cant_sleep();
2376	if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
2377		bpf_prog_inc_misses_counter(prog);
2378		goto out;
2379	}
2380	rcu_read_lock();
2381	(void) bpf_prog_run(prog, args);
2382	rcu_read_unlock();
2383out:
2384	this_cpu_dec(*(prog->active));
2385}
2386
2387#define UNPACK(...)			__VA_ARGS__
2388#define REPEAT_1(FN, DL, X, ...)	FN(X)
2389#define REPEAT_2(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2390#define REPEAT_3(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2391#define REPEAT_4(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2392#define REPEAT_5(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2393#define REPEAT_6(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2394#define REPEAT_7(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2395#define REPEAT_8(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2396#define REPEAT_9(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2397#define REPEAT_10(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2398#define REPEAT_11(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2399#define REPEAT_12(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2400#define REPEAT(X, FN, DL, ...)		REPEAT_##X(FN, DL, __VA_ARGS__)
2401
2402#define SARG(X)		u64 arg##X
2403#define COPY(X)		args[X] = arg##X
2404
2405#define __DL_COM	(,)
2406#define __DL_SEM	(;)
2407
2408#define __SEQ_0_11	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2409
2410#define BPF_TRACE_DEFN_x(x)						\
2411	void bpf_trace_run##x(struct bpf_prog *prog,			\
2412			      REPEAT(x, SARG, __DL_COM, __SEQ_0_11))	\
2413	{								\
2414		u64 args[x];						\
2415		REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);			\
2416		__bpf_trace_run(prog, args);				\
2417	}								\
2418	EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2419BPF_TRACE_DEFN_x(1);
2420BPF_TRACE_DEFN_x(2);
2421BPF_TRACE_DEFN_x(3);
2422BPF_TRACE_DEFN_x(4);
2423BPF_TRACE_DEFN_x(5);
2424BPF_TRACE_DEFN_x(6);
2425BPF_TRACE_DEFN_x(7);
2426BPF_TRACE_DEFN_x(8);
2427BPF_TRACE_DEFN_x(9);
2428BPF_TRACE_DEFN_x(10);
2429BPF_TRACE_DEFN_x(11);
2430BPF_TRACE_DEFN_x(12);
2431
2432static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2433{
2434	struct tracepoint *tp = btp->tp;
2435
2436	/*
2437	 * check that program doesn't access arguments beyond what's
2438	 * available in this tracepoint
2439	 */
2440	if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2441		return -EINVAL;
2442
2443	if (prog->aux->max_tp_access > btp->writable_size)
2444		return -EINVAL;
2445
2446	return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
2447						   prog);
2448}
2449
2450int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2451{
2452	return __bpf_probe_register(btp, prog);
2453}
2454
2455int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2456{
2457	return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
2458}
2459
2460int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2461			    u32 *fd_type, const char **buf,
2462			    u64 *probe_offset, u64 *probe_addr,
2463			    unsigned long *missed)
2464{
2465	bool is_tracepoint, is_syscall_tp;
2466	struct bpf_prog *prog;
2467	int flags, err = 0;
2468
2469	prog = event->prog;
2470	if (!prog)
2471		return -ENOENT;
2472
2473	/* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2474	if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2475		return -EOPNOTSUPP;
2476
2477	*prog_id = prog->aux->id;
2478	flags = event->tp_event->flags;
2479	is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2480	is_syscall_tp = is_syscall_trace_event(event->tp_event);
2481
2482	if (is_tracepoint || is_syscall_tp) {
2483		*buf = is_tracepoint ? event->tp_event->tp->name
2484				     : event->tp_event->name;
2485		/* We allow NULL pointer for tracepoint */
2486		if (fd_type)
2487			*fd_type = BPF_FD_TYPE_TRACEPOINT;
2488		if (probe_offset)
2489			*probe_offset = 0x0;
2490		if (probe_addr)
2491			*probe_addr = 0x0;
2492	} else {
2493		/* kprobe/uprobe */
2494		err = -EOPNOTSUPP;
2495#ifdef CONFIG_KPROBE_EVENTS
2496		if (flags & TRACE_EVENT_FL_KPROBE)
2497			err = bpf_get_kprobe_info(event, fd_type, buf,
2498						  probe_offset, probe_addr, missed,
2499						  event->attr.type == PERF_TYPE_TRACEPOINT);
2500#endif
2501#ifdef CONFIG_UPROBE_EVENTS
2502		if (flags & TRACE_EVENT_FL_UPROBE)
2503			err = bpf_get_uprobe_info(event, fd_type, buf,
2504						  probe_offset, probe_addr,
2505						  event->attr.type == PERF_TYPE_TRACEPOINT);
2506#endif
2507	}
2508
2509	return err;
2510}
2511
2512static int __init send_signal_irq_work_init(void)
2513{
2514	int cpu;
2515	struct send_signal_irq_work *work;
2516
2517	for_each_possible_cpu(cpu) {
2518		work = per_cpu_ptr(&send_signal_work, cpu);
2519		init_irq_work(&work->irq_work, do_bpf_send_signal);
2520	}
2521	return 0;
2522}
2523
2524subsys_initcall(send_signal_irq_work_init);
2525
2526#ifdef CONFIG_MODULES
2527static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2528			    void *module)
2529{
2530	struct bpf_trace_module *btm, *tmp;
2531	struct module *mod = module;
2532	int ret = 0;
2533
2534	if (mod->num_bpf_raw_events == 0 ||
2535	    (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2536		goto out;
2537
2538	mutex_lock(&bpf_module_mutex);
2539
2540	switch (op) {
2541	case MODULE_STATE_COMING:
2542		btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2543		if (btm) {
2544			btm->module = module;
2545			list_add(&btm->list, &bpf_trace_modules);
2546		} else {
2547			ret = -ENOMEM;
2548		}
2549		break;
2550	case MODULE_STATE_GOING:
2551		list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2552			if (btm->module == module) {
2553				list_del(&btm->list);
2554				kfree(btm);
2555				break;
2556			}
2557		}
2558		break;
2559	}
2560
2561	mutex_unlock(&bpf_module_mutex);
2562
2563out:
2564	return notifier_from_errno(ret);
2565}
2566
2567static struct notifier_block bpf_module_nb = {
2568	.notifier_call = bpf_event_notify,
2569};
2570
2571static int __init bpf_event_init(void)
2572{
2573	register_module_notifier(&bpf_module_nb);
2574	return 0;
2575}
2576
2577fs_initcall(bpf_event_init);
2578#endif /* CONFIG_MODULES */
2579
2580#ifdef CONFIG_FPROBE
2581struct bpf_kprobe_multi_link {
2582	struct bpf_link link;
2583	struct fprobe fp;
2584	unsigned long *addrs;
2585	u64 *cookies;
2586	u32 cnt;
2587	u32 mods_cnt;
2588	struct module **mods;
2589	u32 flags;
2590};
2591
2592struct bpf_kprobe_multi_run_ctx {
2593	struct bpf_run_ctx run_ctx;
2594	struct bpf_kprobe_multi_link *link;
2595	unsigned long entry_ip;
2596};
2597
2598struct user_syms {
2599	const char **syms;
2600	char *buf;
2601};
2602
2603static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
2604{
2605	unsigned long __user usymbol;
2606	const char **syms = NULL;
2607	char *buf = NULL, *p;
2608	int err = -ENOMEM;
2609	unsigned int i;
2610
2611	syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
2612	if (!syms)
2613		goto error;
2614
2615	buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
2616	if (!buf)
2617		goto error;
2618
2619	for (p = buf, i = 0; i < cnt; i++) {
2620		if (__get_user(usymbol, usyms + i)) {
2621			err = -EFAULT;
2622			goto error;
2623		}
2624		err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN);
2625		if (err == KSYM_NAME_LEN)
2626			err = -E2BIG;
2627		if (err < 0)
2628			goto error;
2629		syms[i] = p;
2630		p += err + 1;
2631	}
2632
2633	us->syms = syms;
2634	us->buf = buf;
2635	return 0;
2636
2637error:
2638	if (err) {
2639		kvfree(syms);
2640		kvfree(buf);
2641	}
2642	return err;
2643}
2644
2645static void kprobe_multi_put_modules(struct module **mods, u32 cnt)
2646{
2647	u32 i;
2648
2649	for (i = 0; i < cnt; i++)
2650		module_put(mods[i]);
2651}
2652
2653static void free_user_syms(struct user_syms *us)
2654{
2655	kvfree(us->syms);
2656	kvfree(us->buf);
2657}
2658
2659static void bpf_kprobe_multi_link_release(struct bpf_link *link)
2660{
2661	struct bpf_kprobe_multi_link *kmulti_link;
2662
2663	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2664	unregister_fprobe(&kmulti_link->fp);
2665	kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt);
2666}
2667
2668static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
2669{
2670	struct bpf_kprobe_multi_link *kmulti_link;
2671
2672	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2673	kvfree(kmulti_link->addrs);
2674	kvfree(kmulti_link->cookies);
2675	kfree(kmulti_link->mods);
2676	kfree(kmulti_link);
2677}
2678
2679static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
2680						struct bpf_link_info *info)
2681{
2682	u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs);
2683	struct bpf_kprobe_multi_link *kmulti_link;
2684	u32 ucount = info->kprobe_multi.count;
2685	int err = 0, i;
2686
2687	if (!uaddrs ^ !ucount)
2688		return -EINVAL;
2689
2690	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2691	info->kprobe_multi.count = kmulti_link->cnt;
2692	info->kprobe_multi.flags = kmulti_link->flags;
2693	info->kprobe_multi.missed = kmulti_link->fp.nmissed;
2694
2695	if (!uaddrs)
2696		return 0;
2697	if (ucount < kmulti_link->cnt)
2698		err = -ENOSPC;
2699	else
2700		ucount = kmulti_link->cnt;
2701
2702	if (kallsyms_show_value(current_cred())) {
2703		if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64)))
2704			return -EFAULT;
2705	} else {
2706		for (i = 0; i < ucount; i++) {
2707			if (put_user(0, uaddrs + i))
2708				return -EFAULT;
2709		}
2710	}
2711	return err;
2712}
2713
2714static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
2715	.release = bpf_kprobe_multi_link_release,
2716	.dealloc = bpf_kprobe_multi_link_dealloc,
2717	.fill_link_info = bpf_kprobe_multi_link_fill_link_info,
2718};
2719
2720static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
2721{
2722	const struct bpf_kprobe_multi_link *link = priv;
2723	unsigned long *addr_a = a, *addr_b = b;
2724	u64 *cookie_a, *cookie_b;
2725
2726	cookie_a = link->cookies + (addr_a - link->addrs);
2727	cookie_b = link->cookies + (addr_b - link->addrs);
2728
2729	/* swap addr_a/addr_b and cookie_a/cookie_b values */
2730	swap(*addr_a, *addr_b);
2731	swap(*cookie_a, *cookie_b);
2732}
2733
2734static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b)
2735{
2736	const unsigned long *addr_a = a, *addr_b = b;
2737
2738	if (*addr_a == *addr_b)
2739		return 0;
2740	return *addr_a < *addr_b ? -1 : 1;
2741}
2742
2743static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
2744{
2745	return bpf_kprobe_multi_addrs_cmp(a, b);
2746}
2747
2748static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2749{
2750	struct bpf_kprobe_multi_run_ctx *run_ctx;
2751	struct bpf_kprobe_multi_link *link;
2752	u64 *cookie, entry_ip;
2753	unsigned long *addr;
2754
2755	if (WARN_ON_ONCE(!ctx))
2756		return 0;
2757	run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2758	link = run_ctx->link;
2759	if (!link->cookies)
2760		return 0;
2761	entry_ip = run_ctx->entry_ip;
2762	addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
2763		       bpf_kprobe_multi_addrs_cmp);
2764	if (!addr)
2765		return 0;
2766	cookie = link->cookies + (addr - link->addrs);
2767	return *cookie;
2768}
2769
2770static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2771{
2772	struct bpf_kprobe_multi_run_ctx *run_ctx;
2773
2774	run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2775	return run_ctx->entry_ip;
2776}
2777
2778static int
2779kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
2780			   unsigned long entry_ip, struct pt_regs *regs)
2781{
2782	struct bpf_kprobe_multi_run_ctx run_ctx = {
2783		.link = link,
2784		.entry_ip = entry_ip,
2785	};
2786	struct bpf_run_ctx *old_run_ctx;
2787	int err;
2788
2789	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
2790		bpf_prog_inc_misses_counter(link->link.prog);
2791		err = 0;
2792		goto out;
2793	}
2794
2795	migrate_disable();
2796	rcu_read_lock();
2797	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2798	err = bpf_prog_run(link->link.prog, regs);
2799	bpf_reset_run_ctx(old_run_ctx);
2800	rcu_read_unlock();
2801	migrate_enable();
2802
2803 out:
2804	__this_cpu_dec(bpf_prog_active);
2805	return err;
2806}
2807
2808static int
2809kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
2810			  unsigned long ret_ip, struct pt_regs *regs,
2811			  void *data)
2812{
2813	struct bpf_kprobe_multi_link *link;
2814
2815	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2816	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
2817	return 0;
2818}
2819
2820static void
2821kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip,
2822			       unsigned long ret_ip, struct pt_regs *regs,
2823			       void *data)
2824{
2825	struct bpf_kprobe_multi_link *link;
2826
2827	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2828	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
2829}
2830
2831static int symbols_cmp_r(const void *a, const void *b, const void *priv)
2832{
2833	const char **str_a = (const char **) a;
2834	const char **str_b = (const char **) b;
2835
2836	return strcmp(*str_a, *str_b);
2837}
2838
2839struct multi_symbols_sort {
2840	const char **funcs;
2841	u64 *cookies;
2842};
2843
2844static void symbols_swap_r(void *a, void *b, int size, const void *priv)
2845{
2846	const struct multi_symbols_sort *data = priv;
2847	const char **name_a = a, **name_b = b;
2848
2849	swap(*name_a, *name_b);
2850
2851	/* If defined, swap also related cookies. */
2852	if (data->cookies) {
2853		u64 *cookie_a, *cookie_b;
2854
2855		cookie_a = data->cookies + (name_a - data->funcs);
2856		cookie_b = data->cookies + (name_b - data->funcs);
2857		swap(*cookie_a, *cookie_b);
2858	}
2859}
2860
2861struct modules_array {
2862	struct module **mods;
2863	int mods_cnt;
2864	int mods_cap;
2865};
2866
2867static int add_module(struct modules_array *arr, struct module *mod)
2868{
2869	struct module **mods;
2870
2871	if (arr->mods_cnt == arr->mods_cap) {
2872		arr->mods_cap = max(16, arr->mods_cap * 3 / 2);
2873		mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL);
2874		if (!mods)
2875			return -ENOMEM;
2876		arr->mods = mods;
2877	}
2878
2879	arr->mods[arr->mods_cnt] = mod;
2880	arr->mods_cnt++;
2881	return 0;
2882}
2883
2884static bool has_module(struct modules_array *arr, struct module *mod)
2885{
2886	int i;
2887
2888	for (i = arr->mods_cnt - 1; i >= 0; i--) {
2889		if (arr->mods[i] == mod)
2890			return true;
2891	}
2892	return false;
2893}
2894
2895static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
2896{
2897	struct modules_array arr = {};
2898	u32 i, err = 0;
2899
2900	for (i = 0; i < addrs_cnt; i++) {
2901		struct module *mod;
2902
2903		preempt_disable();
2904		mod = __module_address(addrs[i]);
2905		/* Either no module or we it's already stored  */
2906		if (!mod || has_module(&arr, mod)) {
2907			preempt_enable();
2908			continue;
2909		}
2910		if (!try_module_get(mod))
2911			err = -EINVAL;
2912		preempt_enable();
2913		if (err)
2914			break;
2915		err = add_module(&arr, mod);
2916		if (err) {
2917			module_put(mod);
2918			break;
2919		}
2920	}
2921
2922	/* We return either err < 0 in case of error, ... */
2923	if (err) {
2924		kprobe_multi_put_modules(arr.mods, arr.mods_cnt);
2925		kfree(arr.mods);
2926		return err;
2927	}
2928
2929	/* or number of modules found if everything is ok. */
2930	*mods = arr.mods;
2931	return arr.mods_cnt;
2932}
2933
2934static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt)
2935{
2936	u32 i;
2937
2938	for (i = 0; i < cnt; i++) {
2939		if (!within_error_injection_list(addrs[i]))
2940			return -EINVAL;
2941	}
2942	return 0;
2943}
2944
2945int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2946{
2947	struct bpf_kprobe_multi_link *link = NULL;
2948	struct bpf_link_primer link_primer;
2949	void __user *ucookies;
2950	unsigned long *addrs;
2951	u32 flags, cnt, size;
2952	void __user *uaddrs;
2953	u64 *cookies = NULL;
2954	void __user *usyms;
2955	int err;
2956
2957	/* no support for 32bit archs yet */
2958	if (sizeof(u64) != sizeof(void *))
2959		return -EOPNOTSUPP;
2960
2961	if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI)
2962		return -EINVAL;
2963
2964	flags = attr->link_create.kprobe_multi.flags;
2965	if (flags & ~BPF_F_KPROBE_MULTI_RETURN)
2966		return -EINVAL;
2967
2968	uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
2969	usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
2970	if (!!uaddrs == !!usyms)
2971		return -EINVAL;
2972
2973	cnt = attr->link_create.kprobe_multi.cnt;
2974	if (!cnt)
2975		return -EINVAL;
2976	if (cnt > MAX_KPROBE_MULTI_CNT)
2977		return -E2BIG;
2978
2979	size = cnt * sizeof(*addrs);
2980	addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2981	if (!addrs)
2982		return -ENOMEM;
2983
2984	ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
2985	if (ucookies) {
2986		cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2987		if (!cookies) {
2988			err = -ENOMEM;
2989			goto error;
2990		}
2991		if (copy_from_user(cookies, ucookies, size)) {
2992			err = -EFAULT;
2993			goto error;
2994		}
2995	}
2996
2997	if (uaddrs) {
2998		if (copy_from_user(addrs, uaddrs, size)) {
2999			err = -EFAULT;
3000			goto error;
3001		}
3002	} else {
3003		struct multi_symbols_sort data = {
3004			.cookies = cookies,
3005		};
3006		struct user_syms us;
3007
3008		err = copy_user_syms(&us, usyms, cnt);
3009		if (err)
3010			goto error;
3011
3012		if (cookies)
3013			data.funcs = us.syms;
3014
3015		sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
3016		       symbols_swap_r, &data);
3017
3018		err = ftrace_lookup_symbols(us.syms, cnt, addrs);
3019		free_user_syms(&us);
3020		if (err)
3021			goto error;
3022	}
3023
3024	if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) {
3025		err = -EINVAL;
3026		goto error;
3027	}
3028
3029	link = kzalloc(sizeof(*link), GFP_KERNEL);
3030	if (!link) {
3031		err = -ENOMEM;
3032		goto error;
3033	}
3034
3035	bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
3036		      &bpf_kprobe_multi_link_lops, prog);
3037
3038	err = bpf_link_prime(&link->link, &link_primer);
3039	if (err)
3040		goto error;
3041
3042	if (flags & BPF_F_KPROBE_MULTI_RETURN)
3043		link->fp.exit_handler = kprobe_multi_link_exit_handler;
3044	else
3045		link->fp.entry_handler = kprobe_multi_link_handler;
3046
3047	link->addrs = addrs;
3048	link->cookies = cookies;
3049	link->cnt = cnt;
3050	link->flags = flags;
3051
3052	if (cookies) {
3053		/*
3054		 * Sorting addresses will trigger sorting cookies as well
3055		 * (check bpf_kprobe_multi_cookie_swap). This way we can
3056		 * find cookie based on the address in bpf_get_attach_cookie
3057		 * helper.
3058		 */
3059		sort_r(addrs, cnt, sizeof(*addrs),
3060		       bpf_kprobe_multi_cookie_cmp,
3061		       bpf_kprobe_multi_cookie_swap,
3062		       link);
3063	}
3064
3065	err = get_modules_for_addrs(&link->mods, addrs, cnt);
3066	if (err < 0) {
3067		bpf_link_cleanup(&link_primer);
3068		return err;
3069	}
3070	link->mods_cnt = err;
3071
3072	err = register_fprobe_ips(&link->fp, addrs, cnt);
3073	if (err) {
3074		kprobe_multi_put_modules(link->mods, link->mods_cnt);
3075		bpf_link_cleanup(&link_primer);
3076		return err;
3077	}
3078
3079	return bpf_link_settle(&link_primer);
3080
3081error:
3082	kfree(link);
3083	kvfree(addrs);
3084	kvfree(cookies);
3085	return err;
3086}
3087#else /* !CONFIG_FPROBE */
3088int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3089{
3090	return -EOPNOTSUPP;
3091}
3092static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
3093{
3094	return 0;
3095}
3096static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3097{
3098	return 0;
3099}
3100#endif
3101
3102#ifdef CONFIG_UPROBES
3103struct bpf_uprobe_multi_link;
3104
3105struct bpf_uprobe {
3106	struct bpf_uprobe_multi_link *link;
3107	loff_t offset;
3108	unsigned long ref_ctr_offset;
3109	u64 cookie;
3110	struct uprobe_consumer consumer;
3111};
3112
3113struct bpf_uprobe_multi_link {
3114	struct path path;
3115	struct bpf_link link;
3116	u32 cnt;
3117	u32 flags;
3118	struct bpf_uprobe *uprobes;
3119	struct task_struct *task;
3120};
3121
3122struct bpf_uprobe_multi_run_ctx {
3123	struct bpf_run_ctx run_ctx;
3124	unsigned long entry_ip;
3125	struct bpf_uprobe *uprobe;
3126};
3127
3128static void bpf_uprobe_unregister(struct path *path, struct bpf_uprobe *uprobes,
3129				  u32 cnt)
3130{
3131	u32 i;
3132
3133	for (i = 0; i < cnt; i++) {
3134		uprobe_unregister(d_real_inode(path->dentry), uprobes[i].offset,
3135				  &uprobes[i].consumer);
3136	}
3137}
3138
3139static void bpf_uprobe_multi_link_release(struct bpf_link *link)
3140{
3141	struct bpf_uprobe_multi_link *umulti_link;
3142
3143	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3144	bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt);
3145}
3146
3147static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
3148{
3149	struct bpf_uprobe_multi_link *umulti_link;
3150
3151	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3152	if (umulti_link->task)
3153		put_task_struct(umulti_link->task);
3154	path_put(&umulti_link->path);
3155	kvfree(umulti_link->uprobes);
3156	kfree(umulti_link);
3157}
3158
3159static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
3160						struct bpf_link_info *info)
3161{
3162	u64 __user *uref_ctr_offsets = u64_to_user_ptr(info->uprobe_multi.ref_ctr_offsets);
3163	u64 __user *ucookies = u64_to_user_ptr(info->uprobe_multi.cookies);
3164	u64 __user *uoffsets = u64_to_user_ptr(info->uprobe_multi.offsets);
3165	u64 __user *upath = u64_to_user_ptr(info->uprobe_multi.path);
3166	u32 upath_size = info->uprobe_multi.path_size;
3167	struct bpf_uprobe_multi_link *umulti_link;
3168	u32 ucount = info->uprobe_multi.count;
3169	int err = 0, i;
3170	long left;
3171
3172	if (!upath ^ !upath_size)
3173		return -EINVAL;
3174
3175	if ((uoffsets || uref_ctr_offsets || ucookies) && !ucount)
3176		return -EINVAL;
3177
3178	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3179	info->uprobe_multi.count = umulti_link->cnt;
3180	info->uprobe_multi.flags = umulti_link->flags;
3181	info->uprobe_multi.pid = umulti_link->task ?
3182				 task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0;
3183
3184	if (upath) {
3185		char *p, *buf;
3186
3187		upath_size = min_t(u32, upath_size, PATH_MAX);
3188
3189		buf = kmalloc(upath_size, GFP_KERNEL);
3190		if (!buf)
3191			return -ENOMEM;
3192		p = d_path(&umulti_link->path, buf, upath_size);
3193		if (IS_ERR(p)) {
3194			kfree(buf);
3195			return PTR_ERR(p);
3196		}
3197		upath_size = buf + upath_size - p;
3198		left = copy_to_user(upath, p, upath_size);
3199		kfree(buf);
3200		if (left)
3201			return -EFAULT;
3202		info->uprobe_multi.path_size = upath_size;
3203	}
3204
3205	if (!uoffsets && !ucookies && !uref_ctr_offsets)
3206		return 0;
3207
3208	if (ucount < umulti_link->cnt)
3209		err = -ENOSPC;
3210	else
3211		ucount = umulti_link->cnt;
3212
3213	for (i = 0; i < ucount; i++) {
3214		if (uoffsets &&
3215		    put_user(umulti_link->uprobes[i].offset, uoffsets + i))
3216			return -EFAULT;
3217		if (uref_ctr_offsets &&
3218		    put_user(umulti_link->uprobes[i].ref_ctr_offset, uref_ctr_offsets + i))
3219			return -EFAULT;
3220		if (ucookies &&
3221		    put_user(umulti_link->uprobes[i].cookie, ucookies + i))
3222			return -EFAULT;
3223	}
3224
3225	return err;
3226}
3227
3228static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
3229	.release = bpf_uprobe_multi_link_release,
3230	.dealloc = bpf_uprobe_multi_link_dealloc,
3231	.fill_link_info = bpf_uprobe_multi_link_fill_link_info,
3232};
3233
3234static int uprobe_prog_run(struct bpf_uprobe *uprobe,
3235			   unsigned long entry_ip,
3236			   struct pt_regs *regs)
3237{
3238	struct bpf_uprobe_multi_link *link = uprobe->link;
3239	struct bpf_uprobe_multi_run_ctx run_ctx = {
3240		.entry_ip = entry_ip,
3241		.uprobe = uprobe,
3242	};
3243	struct bpf_prog *prog = link->link.prog;
3244	bool sleepable = prog->aux->sleepable;
3245	struct bpf_run_ctx *old_run_ctx;
3246	int err = 0;
3247
3248	if (link->task && current != link->task)
3249		return 0;
3250
3251	if (sleepable)
3252		rcu_read_lock_trace();
3253	else
3254		rcu_read_lock();
3255
3256	migrate_disable();
3257
3258	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
3259	err = bpf_prog_run(link->link.prog, regs);
3260	bpf_reset_run_ctx(old_run_ctx);
3261
3262	migrate_enable();
3263
3264	if (sleepable)
3265		rcu_read_unlock_trace();
3266	else
3267		rcu_read_unlock();
3268	return err;
3269}
3270
3271static bool
3272uprobe_multi_link_filter(struct uprobe_consumer *con, enum uprobe_filter_ctx ctx,
3273			 struct mm_struct *mm)
3274{
3275	struct bpf_uprobe *uprobe;
3276
3277	uprobe = container_of(con, struct bpf_uprobe, consumer);
3278	return uprobe->link->task->mm == mm;
3279}
3280
3281static int
3282uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs)
3283{
3284	struct bpf_uprobe *uprobe;
3285
3286	uprobe = container_of(con, struct bpf_uprobe, consumer);
3287	return uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
3288}
3289
3290static int
3291uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs)
3292{
3293	struct bpf_uprobe *uprobe;
3294
3295	uprobe = container_of(con, struct bpf_uprobe, consumer);
3296	return uprobe_prog_run(uprobe, func, regs);
3297}
3298
3299static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3300{
3301	struct bpf_uprobe_multi_run_ctx *run_ctx;
3302
3303	run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx);
3304	return run_ctx->entry_ip;
3305}
3306
3307static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3308{
3309	struct bpf_uprobe_multi_run_ctx *run_ctx;
3310
3311	run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx);
3312	return run_ctx->uprobe->cookie;
3313}
3314
3315int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3316{
3317	struct bpf_uprobe_multi_link *link = NULL;
3318	unsigned long __user *uref_ctr_offsets;
3319	struct bpf_link_primer link_primer;
3320	struct bpf_uprobe *uprobes = NULL;
3321	struct task_struct *task = NULL;
3322	unsigned long __user *uoffsets;
3323	u64 __user *ucookies;
3324	void __user *upath;
3325	u32 flags, cnt, i;
3326	struct path path;
3327	char *name;
3328	pid_t pid;
3329	int err;
3330
3331	/* no support for 32bit archs yet */
3332	if (sizeof(u64) != sizeof(void *))
3333		return -EOPNOTSUPP;
3334
3335	if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI)
3336		return -EINVAL;
3337
3338	flags = attr->link_create.uprobe_multi.flags;
3339	if (flags & ~BPF_F_UPROBE_MULTI_RETURN)
3340		return -EINVAL;
3341
3342	/*
3343	 * path, offsets and cnt are mandatory,
3344	 * ref_ctr_offsets and cookies are optional
3345	 */
3346	upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
3347	uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
3348	cnt = attr->link_create.uprobe_multi.cnt;
3349
3350	if (!upath || !uoffsets || !cnt)
3351		return -EINVAL;
3352	if (cnt > MAX_UPROBE_MULTI_CNT)
3353		return -E2BIG;
3354
3355	uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets);
3356	ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies);
3357
3358	name = strndup_user(upath, PATH_MAX);
3359	if (IS_ERR(name)) {
3360		err = PTR_ERR(name);
3361		return err;
3362	}
3363
3364	err = kern_path(name, LOOKUP_FOLLOW, &path);
3365	kfree(name);
3366	if (err)
3367		return err;
3368
3369	if (!d_is_reg(path.dentry)) {
3370		err = -EBADF;
3371		goto error_path_put;
3372	}
3373
3374	pid = attr->link_create.uprobe_multi.pid;
3375	if (pid) {
3376		rcu_read_lock();
3377		task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
3378		rcu_read_unlock();
3379		if (!task) {
3380			err = -ESRCH;
3381			goto error_path_put;
3382		}
3383	}
3384
3385	err = -ENOMEM;
3386
3387	link = kzalloc(sizeof(*link), GFP_KERNEL);
3388	uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL);
3389
3390	if (!uprobes || !link)
3391		goto error_free;
3392
3393	for (i = 0; i < cnt; i++) {
3394		if (__get_user(uprobes[i].offset, uoffsets + i)) {
3395			err = -EFAULT;
3396			goto error_free;
3397		}
3398		if (uprobes[i].offset < 0) {
3399			err = -EINVAL;
3400			goto error_free;
3401		}
3402		if (uref_ctr_offsets && __get_user(uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) {
3403			err = -EFAULT;
3404			goto error_free;
3405		}
3406		if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) {
3407			err = -EFAULT;
3408			goto error_free;
3409		}
3410
3411		uprobes[i].link = link;
3412
3413		if (flags & BPF_F_UPROBE_MULTI_RETURN)
3414			uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler;
3415		else
3416			uprobes[i].consumer.handler = uprobe_multi_link_handler;
3417
3418		if (pid)
3419			uprobes[i].consumer.filter = uprobe_multi_link_filter;
3420	}
3421
3422	link->cnt = cnt;
3423	link->uprobes = uprobes;
3424	link->path = path;
3425	link->task = task;
3426	link->flags = flags;
3427
3428	bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI,
3429		      &bpf_uprobe_multi_link_lops, prog);
3430
3431	for (i = 0; i < cnt; i++) {
3432		err = uprobe_register_refctr(d_real_inode(link->path.dentry),
3433					     uprobes[i].offset,
3434					     uprobes[i].ref_ctr_offset,
3435					     &uprobes[i].consumer);
3436		if (err) {
3437			bpf_uprobe_unregister(&path, uprobes, i);
3438			goto error_free;
3439		}
3440	}
3441
3442	err = bpf_link_prime(&link->link, &link_primer);
3443	if (err)
3444		goto error_free;
3445
3446	return bpf_link_settle(&link_primer);
3447
3448error_free:
3449	kvfree(uprobes);
3450	kfree(link);
3451	if (task)
3452		put_task_struct(task);
3453error_path_put:
3454	path_put(&path);
3455	return err;
3456}
3457#else /* !CONFIG_UPROBES */
3458int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3459{
3460	return -EOPNOTSUPP;
3461}
3462static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3463{
3464	return 0;
3465}
3466static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3467{
3468	return 0;
3469}
3470#endif /* CONFIG_UPROBES */
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
   3 * Copyright (c) 2016 Facebook
   4 */
   5#include <linux/kernel.h>
   6#include <linux/types.h>
   7#include <linux/slab.h>
   8#include <linux/bpf.h>
 
   9#include <linux/bpf_perf_event.h>
  10#include <linux/btf.h>
  11#include <linux/filter.h>
  12#include <linux/uaccess.h>
  13#include <linux/ctype.h>
  14#include <linux/kprobes.h>
  15#include <linux/spinlock.h>
  16#include <linux/syscalls.h>
  17#include <linux/error-injection.h>
  18#include <linux/btf_ids.h>
  19#include <linux/bpf_lsm.h>
 
 
 
 
 
 
 
  20
  21#include <net/bpf_sk_storage.h>
  22
  23#include <uapi/linux/bpf.h>
  24#include <uapi/linux/btf.h>
  25
  26#include <asm/tlb.h>
  27
  28#include "trace_probe.h"
  29#include "trace.h"
  30
  31#define CREATE_TRACE_POINTS
  32#include "bpf_trace.h"
  33
  34#define bpf_event_rcu_dereference(p)					\
  35	rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
  36
 
 
 
  37#ifdef CONFIG_MODULES
  38struct bpf_trace_module {
  39	struct module *module;
  40	struct list_head list;
  41};
  42
  43static LIST_HEAD(bpf_trace_modules);
  44static DEFINE_MUTEX(bpf_module_mutex);
  45
  46static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
  47{
  48	struct bpf_raw_event_map *btp, *ret = NULL;
  49	struct bpf_trace_module *btm;
  50	unsigned int i;
  51
  52	mutex_lock(&bpf_module_mutex);
  53	list_for_each_entry(btm, &bpf_trace_modules, list) {
  54		for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
  55			btp = &btm->module->bpf_raw_events[i];
  56			if (!strcmp(btp->tp->name, name)) {
  57				if (try_module_get(btm->module))
  58					ret = btp;
  59				goto out;
  60			}
  61		}
  62	}
  63out:
  64	mutex_unlock(&bpf_module_mutex);
  65	return ret;
  66}
  67#else
  68static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
  69{
  70	return NULL;
  71}
  72#endif /* CONFIG_MODULES */
  73
  74u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  75u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  76
  77static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
  78				  u64 flags, const struct btf **btf,
  79				  s32 *btf_id);
 
 
 
 
 
  80
  81/**
  82 * trace_call_bpf - invoke BPF program
  83 * @call: tracepoint event
  84 * @ctx: opaque context pointer
  85 *
  86 * kprobe handlers execute BPF programs via this helper.
  87 * Can be used from static tracepoints in the future.
  88 *
  89 * Return: BPF programs always return an integer which is interpreted by
  90 * kprobe handler as:
  91 * 0 - return from kprobe (event is filtered out)
  92 * 1 - store kprobe event into ring buffer
  93 * Other values are reserved and currently alias to 1
  94 */
  95unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
  96{
  97	unsigned int ret;
  98
  99	cant_sleep();
 100
 101	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
 102		/*
 103		 * since some bpf program is already running on this cpu,
 104		 * don't call into another bpf program (same or different)
 105		 * and don't send kprobe event into ring-buffer,
 106		 * so return zero here
 107		 */
 
 
 
 108		ret = 0;
 109		goto out;
 110	}
 111
 112	/*
 113	 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
 114	 * to all call sites, we did a bpf_prog_array_valid() there to check
 115	 * whether call->prog_array is empty or not, which is
 116	 * a heuristic to speed up execution.
 117	 *
 118	 * If bpf_prog_array_valid() fetched prog_array was
 119	 * non-NULL, we go into trace_call_bpf() and do the actual
 120	 * proper rcu_dereference() under RCU lock.
 121	 * If it turns out that prog_array is NULL then, we bail out.
 122	 * For the opposite, if the bpf_prog_array_valid() fetched pointer
 123	 * was NULL, you'll skip the prog_array with the risk of missing
 124	 * out of events when it was updated in between this and the
 125	 * rcu_dereference() which is accepted risk.
 126	 */
 127	ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
 
 
 
 128
 129 out:
 130	__this_cpu_dec(bpf_prog_active);
 131
 132	return ret;
 133}
 134
 135#ifdef CONFIG_BPF_KPROBE_OVERRIDE
 136BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
 137{
 138	regs_set_return_value(regs, rc);
 139	override_function_with_return(regs);
 140	return 0;
 141}
 142
 143static const struct bpf_func_proto bpf_override_return_proto = {
 144	.func		= bpf_override_return,
 145	.gpl_only	= true,
 146	.ret_type	= RET_INTEGER,
 147	.arg1_type	= ARG_PTR_TO_CTX,
 148	.arg2_type	= ARG_ANYTHING,
 149};
 150#endif
 151
 152static __always_inline int
 153bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
 154{
 155	int ret;
 156
 157	ret = copy_from_user_nofault(dst, unsafe_ptr, size);
 158	if (unlikely(ret < 0))
 159		memset(dst, 0, size);
 160	return ret;
 161}
 162
 163BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
 164	   const void __user *, unsafe_ptr)
 165{
 166	return bpf_probe_read_user_common(dst, size, unsafe_ptr);
 167}
 168
 169const struct bpf_func_proto bpf_probe_read_user_proto = {
 170	.func		= bpf_probe_read_user,
 171	.gpl_only	= true,
 172	.ret_type	= RET_INTEGER,
 173	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 174	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 175	.arg3_type	= ARG_ANYTHING,
 176};
 177
 178static __always_inline int
 179bpf_probe_read_user_str_common(void *dst, u32 size,
 180			       const void __user *unsafe_ptr)
 181{
 182	int ret;
 183
 184	/*
 185	 * NB: We rely on strncpy_from_user() not copying junk past the NUL
 186	 * terminator into `dst`.
 187	 *
 188	 * strncpy_from_user() does long-sized strides in the fast path. If the
 189	 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
 190	 * then there could be junk after the NUL in `dst`. If user takes `dst`
 191	 * and keys a hash map with it, then semantically identical strings can
 192	 * occupy multiple entries in the map.
 193	 */
 194	ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
 195	if (unlikely(ret < 0))
 196		memset(dst, 0, size);
 197	return ret;
 198}
 199
 200BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
 201	   const void __user *, unsafe_ptr)
 202{
 203	return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
 204}
 205
 206const struct bpf_func_proto bpf_probe_read_user_str_proto = {
 207	.func		= bpf_probe_read_user_str,
 208	.gpl_only	= true,
 209	.ret_type	= RET_INTEGER,
 210	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 211	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 212	.arg3_type	= ARG_ANYTHING,
 213};
 214
 215static __always_inline int
 216bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
 217{
 218	int ret;
 219
 220	ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
 221	if (unlikely(ret < 0))
 222		memset(dst, 0, size);
 223	return ret;
 224}
 225
 226BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
 227	   const void *, unsafe_ptr)
 228{
 229	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
 230}
 231
 232const struct bpf_func_proto bpf_probe_read_kernel_proto = {
 233	.func		= bpf_probe_read_kernel,
 234	.gpl_only	= true,
 235	.ret_type	= RET_INTEGER,
 236	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 237	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 238	.arg3_type	= ARG_ANYTHING,
 239};
 240
 241static __always_inline int
 242bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
 243{
 244	int ret;
 245
 246	/*
 247	 * The strncpy_from_kernel_nofault() call will likely not fill the
 248	 * entire buffer, but that's okay in this circumstance as we're probing
 249	 * arbitrary memory anyway similar to bpf_probe_read_*() and might
 250	 * as well probe the stack. Thus, memory is explicitly cleared
 251	 * only in error case, so that improper users ignoring return
 252	 * code altogether don't copy garbage; otherwise length of string
 253	 * is returned that can be used for bpf_perf_event_output() et al.
 254	 */
 255	ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
 256	if (unlikely(ret < 0))
 257		memset(dst, 0, size);
 258	return ret;
 259}
 260
 261BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
 262	   const void *, unsafe_ptr)
 263{
 264	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
 265}
 266
 267const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
 268	.func		= bpf_probe_read_kernel_str,
 269	.gpl_only	= true,
 270	.ret_type	= RET_INTEGER,
 271	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 272	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 273	.arg3_type	= ARG_ANYTHING,
 274};
 275
 276#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
 277BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
 278	   const void *, unsafe_ptr)
 279{
 280	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
 281		return bpf_probe_read_user_common(dst, size,
 282				(__force void __user *)unsafe_ptr);
 283	}
 284	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
 285}
 286
 287static const struct bpf_func_proto bpf_probe_read_compat_proto = {
 288	.func		= bpf_probe_read_compat,
 289	.gpl_only	= true,
 290	.ret_type	= RET_INTEGER,
 291	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 292	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 293	.arg3_type	= ARG_ANYTHING,
 294};
 295
 296BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
 297	   const void *, unsafe_ptr)
 298{
 299	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
 300		return bpf_probe_read_user_str_common(dst, size,
 301				(__force void __user *)unsafe_ptr);
 302	}
 303	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
 304}
 305
 306static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
 307	.func		= bpf_probe_read_compat_str,
 308	.gpl_only	= true,
 309	.ret_type	= RET_INTEGER,
 310	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 311	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 312	.arg3_type	= ARG_ANYTHING,
 313};
 314#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
 315
 316BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
 317	   u32, size)
 318{
 319	/*
 320	 * Ensure we're in user context which is safe for the helper to
 321	 * run. This helper has no business in a kthread.
 322	 *
 323	 * access_ok() should prevent writing to non-user memory, but in
 324	 * some situations (nommu, temporary switch, etc) access_ok() does
 325	 * not provide enough validation, hence the check on KERNEL_DS.
 326	 *
 327	 * nmi_uaccess_okay() ensures the probe is not run in an interim
 328	 * state, when the task or mm are switched. This is specifically
 329	 * required to prevent the use of temporary mm.
 330	 */
 331
 332	if (unlikely(in_interrupt() ||
 333		     current->flags & (PF_KTHREAD | PF_EXITING)))
 334		return -EPERM;
 335	if (unlikely(uaccess_kernel()))
 336		return -EPERM;
 337	if (unlikely(!nmi_uaccess_okay()))
 338		return -EPERM;
 339
 340	return copy_to_user_nofault(unsafe_ptr, src, size);
 341}
 342
 343static const struct bpf_func_proto bpf_probe_write_user_proto = {
 344	.func		= bpf_probe_write_user,
 345	.gpl_only	= true,
 346	.ret_type	= RET_INTEGER,
 347	.arg1_type	= ARG_ANYTHING,
 348	.arg2_type	= ARG_PTR_TO_MEM,
 349	.arg3_type	= ARG_CONST_SIZE,
 350};
 351
 352static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
 353{
 354	if (!capable(CAP_SYS_ADMIN))
 355		return NULL;
 356
 357	pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
 358			    current->comm, task_pid_nr(current));
 359
 360	return &bpf_probe_write_user_proto;
 361}
 362
 363static DEFINE_RAW_SPINLOCK(trace_printk_lock);
 364
 365#define MAX_TRACE_PRINTK_VARARGS	3
 366#define BPF_TRACE_PRINTK_SIZE		1024
 367
 368BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
 369	   u64, arg2, u64, arg3)
 370{
 371	u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
 372	u32 *bin_args;
 373	static char buf[BPF_TRACE_PRINTK_SIZE];
 374	unsigned long flags;
 
 375	int ret;
 376
 377	ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args,
 378				  MAX_TRACE_PRINTK_VARARGS);
 379	if (ret < 0)
 380		return ret;
 381
 382	raw_spin_lock_irqsave(&trace_printk_lock, flags);
 383	ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
 384
 385	trace_bpf_trace_printk(buf);
 386	raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
 387
 388	bpf_bprintf_cleanup();
 389
 390	return ret;
 391}
 392
 393static const struct bpf_func_proto bpf_trace_printk_proto = {
 394	.func		= bpf_trace_printk,
 395	.gpl_only	= true,
 396	.ret_type	= RET_INTEGER,
 397	.arg1_type	= ARG_PTR_TO_MEM,
 398	.arg2_type	= ARG_CONST_SIZE,
 399};
 400
 401const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
 402{
 403	/*
 404	 * This program might be calling bpf_trace_printk,
 405	 * so enable the associated bpf_trace/bpf_trace_printk event.
 406	 * Repeat this each time as it is possible a user has
 407	 * disabled bpf_trace_printk events.  By loading a program
 408	 * calling bpf_trace_printk() however the user has expressed
 409	 * the intent to see such events.
 410	 */
 411	if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
 412		pr_warn_ratelimited("could not enable bpf_trace_printk events");
 
 413
 
 
 
 414	return &bpf_trace_printk_proto;
 415}
 416
 417#define MAX_SEQ_PRINTF_VARARGS		12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 418
 419BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
 420	   const void *, data, u32, data_len)
 421{
 
 
 
 422	int err, num_args;
 423	u32 *bin_args;
 424
 425	if (data_len & 7 || data_len > MAX_SEQ_PRINTF_VARARGS * 8 ||
 426	    (data_len && !data))
 427		return -EINVAL;
 428	num_args = data_len / 8;
 429
 430	err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
 431	if (err < 0)
 432		return err;
 433
 434	seq_bprintf(m, fmt, bin_args);
 435
 436	bpf_bprintf_cleanup();
 437
 438	return seq_has_overflowed(m) ? -EOVERFLOW : 0;
 439}
 440
 441BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
 442
 443static const struct bpf_func_proto bpf_seq_printf_proto = {
 444	.func		= bpf_seq_printf,
 445	.gpl_only	= true,
 446	.ret_type	= RET_INTEGER,
 447	.arg1_type	= ARG_PTR_TO_BTF_ID,
 448	.arg1_btf_id	= &btf_seq_file_ids[0],
 449	.arg2_type	= ARG_PTR_TO_MEM,
 450	.arg3_type	= ARG_CONST_SIZE,
 451	.arg4_type      = ARG_PTR_TO_MEM_OR_NULL,
 452	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
 453};
 454
 455BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
 456{
 457	return seq_write(m, data, len) ? -EOVERFLOW : 0;
 458}
 459
 460static const struct bpf_func_proto bpf_seq_write_proto = {
 461	.func		= bpf_seq_write,
 462	.gpl_only	= true,
 463	.ret_type	= RET_INTEGER,
 464	.arg1_type	= ARG_PTR_TO_BTF_ID,
 465	.arg1_btf_id	= &btf_seq_file_ids[0],
 466	.arg2_type	= ARG_PTR_TO_MEM,
 467	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
 468};
 469
 470BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
 471	   u32, btf_ptr_size, u64, flags)
 472{
 473	const struct btf *btf;
 474	s32 btf_id;
 475	int ret;
 476
 477	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
 478	if (ret)
 479		return ret;
 480
 481	return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
 482}
 483
 484static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
 485	.func		= bpf_seq_printf_btf,
 486	.gpl_only	= true,
 487	.ret_type	= RET_INTEGER,
 488	.arg1_type	= ARG_PTR_TO_BTF_ID,
 489	.arg1_btf_id	= &btf_seq_file_ids[0],
 490	.arg2_type	= ARG_PTR_TO_MEM,
 491	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
 492	.arg4_type	= ARG_ANYTHING,
 493};
 494
 495static __always_inline int
 496get_map_perf_counter(struct bpf_map *map, u64 flags,
 497		     u64 *value, u64 *enabled, u64 *running)
 498{
 499	struct bpf_array *array = container_of(map, struct bpf_array, map);
 500	unsigned int cpu = smp_processor_id();
 501	u64 index = flags & BPF_F_INDEX_MASK;
 502	struct bpf_event_entry *ee;
 503
 504	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
 505		return -EINVAL;
 506	if (index == BPF_F_CURRENT_CPU)
 507		index = cpu;
 508	if (unlikely(index >= array->map.max_entries))
 509		return -E2BIG;
 510
 511	ee = READ_ONCE(array->ptrs[index]);
 512	if (!ee)
 513		return -ENOENT;
 514
 515	return perf_event_read_local(ee->event, value, enabled, running);
 516}
 517
 518BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
 519{
 520	u64 value = 0;
 521	int err;
 522
 523	err = get_map_perf_counter(map, flags, &value, NULL, NULL);
 524	/*
 525	 * this api is ugly since we miss [-22..-2] range of valid
 526	 * counter values, but that's uapi
 527	 */
 528	if (err)
 529		return err;
 530	return value;
 531}
 532
 533static const struct bpf_func_proto bpf_perf_event_read_proto = {
 534	.func		= bpf_perf_event_read,
 535	.gpl_only	= true,
 536	.ret_type	= RET_INTEGER,
 537	.arg1_type	= ARG_CONST_MAP_PTR,
 538	.arg2_type	= ARG_ANYTHING,
 539};
 540
 541BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
 542	   struct bpf_perf_event_value *, buf, u32, size)
 543{
 544	int err = -EINVAL;
 545
 546	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
 547		goto clear;
 548	err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
 549				   &buf->running);
 550	if (unlikely(err))
 551		goto clear;
 552	return 0;
 553clear:
 554	memset(buf, 0, size);
 555	return err;
 556}
 557
 558static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
 559	.func		= bpf_perf_event_read_value,
 560	.gpl_only	= true,
 561	.ret_type	= RET_INTEGER,
 562	.arg1_type	= ARG_CONST_MAP_PTR,
 563	.arg2_type	= ARG_ANYTHING,
 564	.arg3_type	= ARG_PTR_TO_UNINIT_MEM,
 565	.arg4_type	= ARG_CONST_SIZE,
 566};
 567
 568static __always_inline u64
 569__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
 570			u64 flags, struct perf_sample_data *sd)
 571{
 572	struct bpf_array *array = container_of(map, struct bpf_array, map);
 573	unsigned int cpu = smp_processor_id();
 574	u64 index = flags & BPF_F_INDEX_MASK;
 575	struct bpf_event_entry *ee;
 576	struct perf_event *event;
 577
 578	if (index == BPF_F_CURRENT_CPU)
 579		index = cpu;
 580	if (unlikely(index >= array->map.max_entries))
 581		return -E2BIG;
 582
 583	ee = READ_ONCE(array->ptrs[index]);
 584	if (!ee)
 585		return -ENOENT;
 586
 587	event = ee->event;
 588	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
 589		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
 590		return -EINVAL;
 591
 592	if (unlikely(event->oncpu != cpu))
 593		return -EOPNOTSUPP;
 594
 595	return perf_event_output(event, sd, regs);
 596}
 597
 598/*
 599 * Support executing tracepoints in normal, irq, and nmi context that each call
 600 * bpf_perf_event_output
 601 */
 602struct bpf_trace_sample_data {
 603	struct perf_sample_data sds[3];
 604};
 605
 606static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
 607static DEFINE_PER_CPU(int, bpf_trace_nest_level);
 608BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
 609	   u64, flags, void *, data, u64, size)
 610{
 611	struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
 612	int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
 613	struct perf_raw_record raw = {
 614		.frag = {
 615			.size = size,
 616			.data = data,
 617		},
 618	};
 619	struct perf_sample_data *sd;
 620	int err;
 
 
 
 
 621
 622	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
 623		err = -EBUSY;
 624		goto out;
 625	}
 626
 627	sd = &sds->sds[nest_level - 1];
 628
 629	if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
 630		err = -EINVAL;
 631		goto out;
 632	}
 633
 634	perf_sample_data_init(sd, 0, 0);
 635	sd->raw = &raw;
 636
 637	err = __bpf_perf_event_output(regs, map, flags, sd);
 638
 639out:
 640	this_cpu_dec(bpf_trace_nest_level);
 
 641	return err;
 642}
 643
 644static const struct bpf_func_proto bpf_perf_event_output_proto = {
 645	.func		= bpf_perf_event_output,
 646	.gpl_only	= true,
 647	.ret_type	= RET_INTEGER,
 648	.arg1_type	= ARG_PTR_TO_CTX,
 649	.arg2_type	= ARG_CONST_MAP_PTR,
 650	.arg3_type	= ARG_ANYTHING,
 651	.arg4_type	= ARG_PTR_TO_MEM,
 652	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
 653};
 654
 655static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
 656struct bpf_nested_pt_regs {
 657	struct pt_regs regs[3];
 658};
 659static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
 660static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
 661
 662u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
 663		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
 664{
 665	int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
 666	struct perf_raw_frag frag = {
 667		.copy		= ctx_copy,
 668		.size		= ctx_size,
 669		.data		= ctx,
 670	};
 671	struct perf_raw_record raw = {
 672		.frag = {
 673			{
 674				.next	= ctx_size ? &frag : NULL,
 675			},
 676			.size	= meta_size,
 677			.data	= meta,
 678		},
 679	};
 680	struct perf_sample_data *sd;
 681	struct pt_regs *regs;
 
 682	u64 ret;
 683
 
 
 
 684	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
 685		ret = -EBUSY;
 686		goto out;
 687	}
 688	sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
 689	regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
 690
 691	perf_fetch_caller_regs(regs);
 692	perf_sample_data_init(sd, 0, 0);
 693	sd->raw = &raw;
 694
 695	ret = __bpf_perf_event_output(regs, map, flags, sd);
 696out:
 697	this_cpu_dec(bpf_event_output_nest_level);
 
 698	return ret;
 699}
 700
 701BPF_CALL_0(bpf_get_current_task)
 702{
 703	return (long) current;
 704}
 705
 706const struct bpf_func_proto bpf_get_current_task_proto = {
 707	.func		= bpf_get_current_task,
 708	.gpl_only	= true,
 709	.ret_type	= RET_INTEGER,
 710};
 711
 712BPF_CALL_0(bpf_get_current_task_btf)
 713{
 714	return (unsigned long) current;
 715}
 716
 717BTF_ID_LIST_SINGLE(bpf_get_current_btf_ids, struct, task_struct)
 
 
 
 
 
 
 
 
 
 
 
 
 
 718
 719static const struct bpf_func_proto bpf_get_current_task_btf_proto = {
 720	.func		= bpf_get_current_task_btf,
 721	.gpl_only	= true,
 
 
 722	.ret_type	= RET_PTR_TO_BTF_ID,
 723	.ret_btf_id	= &bpf_get_current_btf_ids[0],
 724};
 725
 726BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
 727{
 728	struct bpf_array *array = container_of(map, struct bpf_array, map);
 729	struct cgroup *cgrp;
 730
 731	if (unlikely(idx >= array->map.max_entries))
 732		return -E2BIG;
 733
 734	cgrp = READ_ONCE(array->ptrs[idx]);
 735	if (unlikely(!cgrp))
 736		return -EAGAIN;
 737
 738	return task_under_cgroup_hierarchy(current, cgrp);
 739}
 740
 741static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
 742	.func           = bpf_current_task_under_cgroup,
 743	.gpl_only       = false,
 744	.ret_type       = RET_INTEGER,
 745	.arg1_type      = ARG_CONST_MAP_PTR,
 746	.arg2_type      = ARG_ANYTHING,
 747};
 748
 749struct send_signal_irq_work {
 750	struct irq_work irq_work;
 751	struct task_struct *task;
 752	u32 sig;
 753	enum pid_type type;
 754};
 755
 756static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
 757
 758static void do_bpf_send_signal(struct irq_work *entry)
 759{
 760	struct send_signal_irq_work *work;
 761
 762	work = container_of(entry, struct send_signal_irq_work, irq_work);
 763	group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
 
 764}
 765
 766static int bpf_send_signal_common(u32 sig, enum pid_type type)
 767{
 768	struct send_signal_irq_work *work = NULL;
 769
 770	/* Similar to bpf_probe_write_user, task needs to be
 771	 * in a sound condition and kernel memory access be
 772	 * permitted in order to send signal to the current
 773	 * task.
 774	 */
 775	if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
 776		return -EPERM;
 777	if (unlikely(uaccess_kernel()))
 778		return -EPERM;
 779	if (unlikely(!nmi_uaccess_okay()))
 
 780		return -EPERM;
 781
 782	if (irqs_disabled()) {
 783		/* Do an early check on signal validity. Otherwise,
 784		 * the error is lost in deferred irq_work.
 785		 */
 786		if (unlikely(!valid_signal(sig)))
 787			return -EINVAL;
 788
 789		work = this_cpu_ptr(&send_signal_work);
 790		if (irq_work_is_busy(&work->irq_work))
 791			return -EBUSY;
 792
 793		/* Add the current task, which is the target of sending signal,
 794		 * to the irq_work. The current task may change when queued
 795		 * irq works get executed.
 796		 */
 797		work->task = current;
 798		work->sig = sig;
 799		work->type = type;
 800		irq_work_queue(&work->irq_work);
 801		return 0;
 802	}
 803
 804	return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
 805}
 806
 807BPF_CALL_1(bpf_send_signal, u32, sig)
 808{
 809	return bpf_send_signal_common(sig, PIDTYPE_TGID);
 810}
 811
 812static const struct bpf_func_proto bpf_send_signal_proto = {
 813	.func		= bpf_send_signal,
 814	.gpl_only	= false,
 815	.ret_type	= RET_INTEGER,
 816	.arg1_type	= ARG_ANYTHING,
 817};
 818
 819BPF_CALL_1(bpf_send_signal_thread, u32, sig)
 820{
 821	return bpf_send_signal_common(sig, PIDTYPE_PID);
 822}
 823
 824static const struct bpf_func_proto bpf_send_signal_thread_proto = {
 825	.func		= bpf_send_signal_thread,
 826	.gpl_only	= false,
 827	.ret_type	= RET_INTEGER,
 828	.arg1_type	= ARG_ANYTHING,
 829};
 830
 831BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
 832{
 
 833	long len;
 834	char *p;
 835
 836	if (!sz)
 837		return 0;
 838
 839	p = d_path(path, buf, sz);
 
 
 
 
 
 
 
 
 
 840	if (IS_ERR(p)) {
 841		len = PTR_ERR(p);
 842	} else {
 843		len = buf + sz - p;
 844		memmove(buf, p, len);
 845	}
 846
 847	return len;
 848}
 849
 850BTF_SET_START(btf_allowlist_d_path)
 851#ifdef CONFIG_SECURITY
 852BTF_ID(func, security_file_permission)
 853BTF_ID(func, security_inode_getattr)
 854BTF_ID(func, security_file_open)
 855#endif
 856#ifdef CONFIG_SECURITY_PATH
 857BTF_ID(func, security_path_truncate)
 858#endif
 859BTF_ID(func, vfs_truncate)
 860BTF_ID(func, vfs_fallocate)
 861BTF_ID(func, dentry_open)
 862BTF_ID(func, vfs_getattr)
 863BTF_ID(func, filp_close)
 864BTF_SET_END(btf_allowlist_d_path)
 865
 866static bool bpf_d_path_allowed(const struct bpf_prog *prog)
 867{
 868	if (prog->type == BPF_PROG_TYPE_TRACING &&
 869	    prog->expected_attach_type == BPF_TRACE_ITER)
 870		return true;
 871
 872	if (prog->type == BPF_PROG_TYPE_LSM)
 873		return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
 874
 875	return btf_id_set_contains(&btf_allowlist_d_path,
 876				   prog->aux->attach_btf_id);
 877}
 878
 879BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
 880
 881static const struct bpf_func_proto bpf_d_path_proto = {
 882	.func		= bpf_d_path,
 883	.gpl_only	= false,
 884	.ret_type	= RET_INTEGER,
 885	.arg1_type	= ARG_PTR_TO_BTF_ID,
 886	.arg1_btf_id	= &bpf_d_path_btf_ids[0],
 887	.arg2_type	= ARG_PTR_TO_MEM,
 888	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
 889	.allowed	= bpf_d_path_allowed,
 890};
 891
 892#define BTF_F_ALL	(BTF_F_COMPACT  | BTF_F_NONAME | \
 893			 BTF_F_PTR_RAW | BTF_F_ZERO)
 894
 895static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
 896				  u64 flags, const struct btf **btf,
 897				  s32 *btf_id)
 898{
 899	const struct btf_type *t;
 900
 901	if (unlikely(flags & ~(BTF_F_ALL)))
 902		return -EINVAL;
 903
 904	if (btf_ptr_size != sizeof(struct btf_ptr))
 905		return -EINVAL;
 906
 907	*btf = bpf_get_btf_vmlinux();
 908
 909	if (IS_ERR_OR_NULL(*btf))
 910		return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
 911
 912	if (ptr->type_id > 0)
 913		*btf_id = ptr->type_id;
 914	else
 915		return -EINVAL;
 916
 917	if (*btf_id > 0)
 918		t = btf_type_by_id(*btf, *btf_id);
 919	if (*btf_id <= 0 || !t)
 920		return -ENOENT;
 921
 922	return 0;
 923}
 924
 925BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
 926	   u32, btf_ptr_size, u64, flags)
 927{
 928	const struct btf *btf;
 929	s32 btf_id;
 930	int ret;
 931
 932	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
 933	if (ret)
 934		return ret;
 935
 936	return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
 937				      flags);
 938}
 939
 940const struct bpf_func_proto bpf_snprintf_btf_proto = {
 941	.func		= bpf_snprintf_btf,
 942	.gpl_only	= false,
 943	.ret_type	= RET_INTEGER,
 944	.arg1_type	= ARG_PTR_TO_MEM,
 945	.arg2_type	= ARG_CONST_SIZE,
 946	.arg3_type	= ARG_PTR_TO_MEM,
 947	.arg4_type	= ARG_CONST_SIZE,
 948	.arg5_type	= ARG_ANYTHING,
 949};
 950
 951const struct bpf_func_proto *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 952bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 953{
 954	switch (func_id) {
 955	case BPF_FUNC_map_lookup_elem:
 956		return &bpf_map_lookup_elem_proto;
 957	case BPF_FUNC_map_update_elem:
 958		return &bpf_map_update_elem_proto;
 959	case BPF_FUNC_map_delete_elem:
 960		return &bpf_map_delete_elem_proto;
 961	case BPF_FUNC_map_push_elem:
 962		return &bpf_map_push_elem_proto;
 963	case BPF_FUNC_map_pop_elem:
 964		return &bpf_map_pop_elem_proto;
 965	case BPF_FUNC_map_peek_elem:
 966		return &bpf_map_peek_elem_proto;
 
 
 967	case BPF_FUNC_ktime_get_ns:
 968		return &bpf_ktime_get_ns_proto;
 969	case BPF_FUNC_ktime_get_boot_ns:
 970		return &bpf_ktime_get_boot_ns_proto;
 971	case BPF_FUNC_ktime_get_coarse_ns:
 972		return &bpf_ktime_get_coarse_ns_proto;
 973	case BPF_FUNC_tail_call:
 974		return &bpf_tail_call_proto;
 975	case BPF_FUNC_get_current_pid_tgid:
 976		return &bpf_get_current_pid_tgid_proto;
 977	case BPF_FUNC_get_current_task:
 978		return &bpf_get_current_task_proto;
 979	case BPF_FUNC_get_current_task_btf:
 980		return &bpf_get_current_task_btf_proto;
 
 
 981	case BPF_FUNC_get_current_uid_gid:
 982		return &bpf_get_current_uid_gid_proto;
 983	case BPF_FUNC_get_current_comm:
 984		return &bpf_get_current_comm_proto;
 985	case BPF_FUNC_trace_printk:
 986		return bpf_get_trace_printk_proto();
 987	case BPF_FUNC_get_smp_processor_id:
 988		return &bpf_get_smp_processor_id_proto;
 989	case BPF_FUNC_get_numa_node_id:
 990		return &bpf_get_numa_node_id_proto;
 991	case BPF_FUNC_perf_event_read:
 992		return &bpf_perf_event_read_proto;
 993	case BPF_FUNC_current_task_under_cgroup:
 994		return &bpf_current_task_under_cgroup_proto;
 995	case BPF_FUNC_get_prandom_u32:
 996		return &bpf_get_prandom_u32_proto;
 997	case BPF_FUNC_probe_write_user:
 998		return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
 999		       NULL : bpf_get_probe_write_proto();
1000	case BPF_FUNC_probe_read_user:
1001		return &bpf_probe_read_user_proto;
1002	case BPF_FUNC_probe_read_kernel:
1003		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1004		       NULL : &bpf_probe_read_kernel_proto;
1005	case BPF_FUNC_probe_read_user_str:
1006		return &bpf_probe_read_user_str_proto;
1007	case BPF_FUNC_probe_read_kernel_str:
1008		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1009		       NULL : &bpf_probe_read_kernel_str_proto;
1010#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1011	case BPF_FUNC_probe_read:
1012		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1013		       NULL : &bpf_probe_read_compat_proto;
1014	case BPF_FUNC_probe_read_str:
1015		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1016		       NULL : &bpf_probe_read_compat_str_proto;
1017#endif
1018#ifdef CONFIG_CGROUPS
1019	case BPF_FUNC_get_current_cgroup_id:
1020		return &bpf_get_current_cgroup_id_proto;
1021	case BPF_FUNC_get_current_ancestor_cgroup_id:
1022		return &bpf_get_current_ancestor_cgroup_id_proto;
1023#endif
1024	case BPF_FUNC_send_signal:
1025		return &bpf_send_signal_proto;
1026	case BPF_FUNC_send_signal_thread:
1027		return &bpf_send_signal_thread_proto;
1028	case BPF_FUNC_perf_event_read_value:
1029		return &bpf_perf_event_read_value_proto;
1030	case BPF_FUNC_get_ns_current_pid_tgid:
1031		return &bpf_get_ns_current_pid_tgid_proto;
1032	case BPF_FUNC_ringbuf_output:
1033		return &bpf_ringbuf_output_proto;
1034	case BPF_FUNC_ringbuf_reserve:
1035		return &bpf_ringbuf_reserve_proto;
1036	case BPF_FUNC_ringbuf_submit:
1037		return &bpf_ringbuf_submit_proto;
1038	case BPF_FUNC_ringbuf_discard:
1039		return &bpf_ringbuf_discard_proto;
1040	case BPF_FUNC_ringbuf_query:
1041		return &bpf_ringbuf_query_proto;
1042	case BPF_FUNC_jiffies64:
1043		return &bpf_jiffies64_proto;
1044	case BPF_FUNC_get_task_stack:
1045		return &bpf_get_task_stack_proto;
1046	case BPF_FUNC_copy_from_user:
1047		return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
 
 
1048	case BPF_FUNC_snprintf_btf:
1049		return &bpf_snprintf_btf_proto;
1050	case BPF_FUNC_per_cpu_ptr:
1051		return &bpf_per_cpu_ptr_proto;
1052	case BPF_FUNC_this_cpu_ptr:
1053		return &bpf_this_cpu_ptr_proto;
1054	case BPF_FUNC_task_storage_get:
 
 
1055		return &bpf_task_storage_get_proto;
1056	case BPF_FUNC_task_storage_delete:
 
 
1057		return &bpf_task_storage_delete_proto;
1058	case BPF_FUNC_for_each_map_elem:
1059		return &bpf_for_each_map_elem_proto;
1060	case BPF_FUNC_snprintf:
1061		return &bpf_snprintf_proto;
 
 
 
 
 
 
 
 
1062	default:
1063		return NULL;
1064	}
1065}
1066
1067static const struct bpf_func_proto *
1068kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1069{
1070	switch (func_id) {
1071	case BPF_FUNC_perf_event_output:
1072		return &bpf_perf_event_output_proto;
1073	case BPF_FUNC_get_stackid:
1074		return &bpf_get_stackid_proto;
1075	case BPF_FUNC_get_stack:
1076		return &bpf_get_stack_proto;
1077#ifdef CONFIG_BPF_KPROBE_OVERRIDE
1078	case BPF_FUNC_override_return:
1079		return &bpf_override_return_proto;
1080#endif
 
 
 
 
 
 
 
 
 
 
 
 
1081	default:
1082		return bpf_tracing_func_proto(func_id, prog);
1083	}
1084}
1085
1086/* bpf+kprobe programs can access fields of 'struct pt_regs' */
1087static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1088					const struct bpf_prog *prog,
1089					struct bpf_insn_access_aux *info)
1090{
1091	if (off < 0 || off >= sizeof(struct pt_regs))
1092		return false;
1093	if (type != BPF_READ)
1094		return false;
1095	if (off % size != 0)
1096		return false;
1097	/*
1098	 * Assertion for 32 bit to make sure last 8 byte access
1099	 * (BPF_DW) to the last 4 byte member is disallowed.
1100	 */
1101	if (off + size > sizeof(struct pt_regs))
1102		return false;
1103
1104	return true;
1105}
1106
1107const struct bpf_verifier_ops kprobe_verifier_ops = {
1108	.get_func_proto  = kprobe_prog_func_proto,
1109	.is_valid_access = kprobe_prog_is_valid_access,
1110};
1111
1112const struct bpf_prog_ops kprobe_prog_ops = {
1113};
1114
1115BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1116	   u64, flags, void *, data, u64, size)
1117{
1118	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1119
1120	/*
1121	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1122	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1123	 * from there and call the same bpf_perf_event_output() helper inline.
1124	 */
1125	return ____bpf_perf_event_output(regs, map, flags, data, size);
1126}
1127
1128static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1129	.func		= bpf_perf_event_output_tp,
1130	.gpl_only	= true,
1131	.ret_type	= RET_INTEGER,
1132	.arg1_type	= ARG_PTR_TO_CTX,
1133	.arg2_type	= ARG_CONST_MAP_PTR,
1134	.arg3_type	= ARG_ANYTHING,
1135	.arg4_type	= ARG_PTR_TO_MEM,
1136	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1137};
1138
1139BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1140	   u64, flags)
1141{
1142	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1143
1144	/*
1145	 * Same comment as in bpf_perf_event_output_tp(), only that this time
1146	 * the other helper's function body cannot be inlined due to being
1147	 * external, thus we need to call raw helper function.
1148	 */
1149	return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1150			       flags, 0, 0);
1151}
1152
1153static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1154	.func		= bpf_get_stackid_tp,
1155	.gpl_only	= true,
1156	.ret_type	= RET_INTEGER,
1157	.arg1_type	= ARG_PTR_TO_CTX,
1158	.arg2_type	= ARG_CONST_MAP_PTR,
1159	.arg3_type	= ARG_ANYTHING,
1160};
1161
1162BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1163	   u64, flags)
1164{
1165	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1166
1167	return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1168			     (unsigned long) size, flags, 0);
1169}
1170
1171static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1172	.func		= bpf_get_stack_tp,
1173	.gpl_only	= true,
1174	.ret_type	= RET_INTEGER,
1175	.arg1_type	= ARG_PTR_TO_CTX,
1176	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
1177	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1178	.arg4_type	= ARG_ANYTHING,
1179};
1180
1181static const struct bpf_func_proto *
1182tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1183{
1184	switch (func_id) {
1185	case BPF_FUNC_perf_event_output:
1186		return &bpf_perf_event_output_proto_tp;
1187	case BPF_FUNC_get_stackid:
1188		return &bpf_get_stackid_proto_tp;
1189	case BPF_FUNC_get_stack:
1190		return &bpf_get_stack_proto_tp;
 
 
1191	default:
1192		return bpf_tracing_func_proto(func_id, prog);
1193	}
1194}
1195
1196static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1197				    const struct bpf_prog *prog,
1198				    struct bpf_insn_access_aux *info)
1199{
1200	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1201		return false;
1202	if (type != BPF_READ)
1203		return false;
1204	if (off % size != 0)
1205		return false;
1206
1207	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1208	return true;
1209}
1210
1211const struct bpf_verifier_ops tracepoint_verifier_ops = {
1212	.get_func_proto  = tp_prog_func_proto,
1213	.is_valid_access = tp_prog_is_valid_access,
1214};
1215
1216const struct bpf_prog_ops tracepoint_prog_ops = {
1217};
1218
1219BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1220	   struct bpf_perf_event_value *, buf, u32, size)
1221{
1222	int err = -EINVAL;
1223
1224	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1225		goto clear;
1226	err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1227				    &buf->running);
1228	if (unlikely(err))
1229		goto clear;
1230	return 0;
1231clear:
1232	memset(buf, 0, size);
1233	return err;
1234}
1235
1236static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1237         .func           = bpf_perf_prog_read_value,
1238         .gpl_only       = true,
1239         .ret_type       = RET_INTEGER,
1240         .arg1_type      = ARG_PTR_TO_CTX,
1241         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1242         .arg3_type      = ARG_CONST_SIZE,
1243};
1244
1245BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1246	   void *, buf, u32, size, u64, flags)
1247{
1248#ifndef CONFIG_X86
1249	return -ENOENT;
1250#else
1251	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1252	struct perf_branch_stack *br_stack = ctx->data->br_stack;
1253	u32 to_copy;
1254
1255	if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1256		return -EINVAL;
1257
 
 
 
1258	if (unlikely(!br_stack))
1259		return -EINVAL;
1260
1261	if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1262		return br_stack->nr * br_entry_size;
1263
1264	if (!buf || (size % br_entry_size != 0))
1265		return -EINVAL;
1266
1267	to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1268	memcpy(buf, br_stack->entries, to_copy);
1269
1270	return to_copy;
1271#endif
1272}
1273
1274static const struct bpf_func_proto bpf_read_branch_records_proto = {
1275	.func           = bpf_read_branch_records,
1276	.gpl_only       = true,
1277	.ret_type       = RET_INTEGER,
1278	.arg1_type      = ARG_PTR_TO_CTX,
1279	.arg2_type      = ARG_PTR_TO_MEM_OR_NULL,
1280	.arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1281	.arg4_type      = ARG_ANYTHING,
1282};
1283
1284static const struct bpf_func_proto *
1285pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1286{
1287	switch (func_id) {
1288	case BPF_FUNC_perf_event_output:
1289		return &bpf_perf_event_output_proto_tp;
1290	case BPF_FUNC_get_stackid:
1291		return &bpf_get_stackid_proto_pe;
1292	case BPF_FUNC_get_stack:
1293		return &bpf_get_stack_proto_pe;
1294	case BPF_FUNC_perf_prog_read_value:
1295		return &bpf_perf_prog_read_value_proto;
1296	case BPF_FUNC_read_branch_records:
1297		return &bpf_read_branch_records_proto;
 
 
1298	default:
1299		return bpf_tracing_func_proto(func_id, prog);
1300	}
1301}
1302
1303/*
1304 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1305 * to avoid potential recursive reuse issue when/if tracepoints are added
1306 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1307 *
1308 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1309 * in normal, irq, and nmi context.
1310 */
1311struct bpf_raw_tp_regs {
1312	struct pt_regs regs[3];
1313};
1314static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1315static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1316static struct pt_regs *get_bpf_raw_tp_regs(void)
1317{
1318	struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1319	int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1320
1321	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1322		this_cpu_dec(bpf_raw_tp_nest_level);
1323		return ERR_PTR(-EBUSY);
1324	}
1325
1326	return &tp_regs->regs[nest_level - 1];
1327}
1328
1329static void put_bpf_raw_tp_regs(void)
1330{
1331	this_cpu_dec(bpf_raw_tp_nest_level);
1332}
1333
1334BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1335	   struct bpf_map *, map, u64, flags, void *, data, u64, size)
1336{
1337	struct pt_regs *regs = get_bpf_raw_tp_regs();
1338	int ret;
1339
1340	if (IS_ERR(regs))
1341		return PTR_ERR(regs);
1342
1343	perf_fetch_caller_regs(regs);
1344	ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1345
1346	put_bpf_raw_tp_regs();
1347	return ret;
1348}
1349
1350static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1351	.func		= bpf_perf_event_output_raw_tp,
1352	.gpl_only	= true,
1353	.ret_type	= RET_INTEGER,
1354	.arg1_type	= ARG_PTR_TO_CTX,
1355	.arg2_type	= ARG_CONST_MAP_PTR,
1356	.arg3_type	= ARG_ANYTHING,
1357	.arg4_type	= ARG_PTR_TO_MEM,
1358	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1359};
1360
1361extern const struct bpf_func_proto bpf_skb_output_proto;
1362extern const struct bpf_func_proto bpf_xdp_output_proto;
 
1363
1364BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1365	   struct bpf_map *, map, u64, flags)
1366{
1367	struct pt_regs *regs = get_bpf_raw_tp_regs();
1368	int ret;
1369
1370	if (IS_ERR(regs))
1371		return PTR_ERR(regs);
1372
1373	perf_fetch_caller_regs(regs);
1374	/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1375	ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1376			      flags, 0, 0);
1377	put_bpf_raw_tp_regs();
1378	return ret;
1379}
1380
1381static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1382	.func		= bpf_get_stackid_raw_tp,
1383	.gpl_only	= true,
1384	.ret_type	= RET_INTEGER,
1385	.arg1_type	= ARG_PTR_TO_CTX,
1386	.arg2_type	= ARG_CONST_MAP_PTR,
1387	.arg3_type	= ARG_ANYTHING,
1388};
1389
1390BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1391	   void *, buf, u32, size, u64, flags)
1392{
1393	struct pt_regs *regs = get_bpf_raw_tp_regs();
1394	int ret;
1395
1396	if (IS_ERR(regs))
1397		return PTR_ERR(regs);
1398
1399	perf_fetch_caller_regs(regs);
1400	ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1401			    (unsigned long) size, flags, 0);
1402	put_bpf_raw_tp_regs();
1403	return ret;
1404}
1405
1406static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1407	.func		= bpf_get_stack_raw_tp,
1408	.gpl_only	= true,
1409	.ret_type	= RET_INTEGER,
1410	.arg1_type	= ARG_PTR_TO_CTX,
1411	.arg2_type	= ARG_PTR_TO_MEM,
1412	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1413	.arg4_type	= ARG_ANYTHING,
1414};
1415
1416static const struct bpf_func_proto *
1417raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1418{
1419	switch (func_id) {
1420	case BPF_FUNC_perf_event_output:
1421		return &bpf_perf_event_output_proto_raw_tp;
1422	case BPF_FUNC_get_stackid:
1423		return &bpf_get_stackid_proto_raw_tp;
1424	case BPF_FUNC_get_stack:
1425		return &bpf_get_stack_proto_raw_tp;
1426	default:
1427		return bpf_tracing_func_proto(func_id, prog);
1428	}
1429}
1430
1431const struct bpf_func_proto *
1432tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1433{
 
 
1434	switch (func_id) {
1435#ifdef CONFIG_NET
1436	case BPF_FUNC_skb_output:
1437		return &bpf_skb_output_proto;
1438	case BPF_FUNC_xdp_output:
1439		return &bpf_xdp_output_proto;
1440	case BPF_FUNC_skc_to_tcp6_sock:
1441		return &bpf_skc_to_tcp6_sock_proto;
1442	case BPF_FUNC_skc_to_tcp_sock:
1443		return &bpf_skc_to_tcp_sock_proto;
1444	case BPF_FUNC_skc_to_tcp_timewait_sock:
1445		return &bpf_skc_to_tcp_timewait_sock_proto;
1446	case BPF_FUNC_skc_to_tcp_request_sock:
1447		return &bpf_skc_to_tcp_request_sock_proto;
1448	case BPF_FUNC_skc_to_udp6_sock:
1449		return &bpf_skc_to_udp6_sock_proto;
 
 
 
 
1450	case BPF_FUNC_sk_storage_get:
1451		return &bpf_sk_storage_get_tracing_proto;
1452	case BPF_FUNC_sk_storage_delete:
1453		return &bpf_sk_storage_delete_tracing_proto;
1454	case BPF_FUNC_sock_from_file:
1455		return &bpf_sock_from_file_proto;
1456	case BPF_FUNC_get_socket_cookie:
1457		return &bpf_get_socket_ptr_cookie_proto;
 
 
1458#endif
1459	case BPF_FUNC_seq_printf:
1460		return prog->expected_attach_type == BPF_TRACE_ITER ?
1461		       &bpf_seq_printf_proto :
1462		       NULL;
1463	case BPF_FUNC_seq_write:
1464		return prog->expected_attach_type == BPF_TRACE_ITER ?
1465		       &bpf_seq_write_proto :
1466		       NULL;
1467	case BPF_FUNC_seq_printf_btf:
1468		return prog->expected_attach_type == BPF_TRACE_ITER ?
1469		       &bpf_seq_printf_btf_proto :
1470		       NULL;
1471	case BPF_FUNC_d_path:
1472		return &bpf_d_path_proto;
 
 
 
 
 
 
 
 
1473	default:
1474		return raw_tp_prog_func_proto(func_id, prog);
 
 
 
1475	}
1476}
1477
1478static bool raw_tp_prog_is_valid_access(int off, int size,
1479					enum bpf_access_type type,
1480					const struct bpf_prog *prog,
1481					struct bpf_insn_access_aux *info)
1482{
1483	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1484		return false;
1485	if (type != BPF_READ)
1486		return false;
1487	if (off % size != 0)
1488		return false;
1489	return true;
1490}
1491
1492static bool tracing_prog_is_valid_access(int off, int size,
1493					 enum bpf_access_type type,
1494					 const struct bpf_prog *prog,
1495					 struct bpf_insn_access_aux *info)
1496{
1497	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1498		return false;
1499	if (type != BPF_READ)
1500		return false;
1501	if (off % size != 0)
1502		return false;
1503	return btf_ctx_access(off, size, type, prog, info);
1504}
1505
1506int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1507				     const union bpf_attr *kattr,
1508				     union bpf_attr __user *uattr)
1509{
1510	return -ENOTSUPP;
1511}
1512
1513const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1514	.get_func_proto  = raw_tp_prog_func_proto,
1515	.is_valid_access = raw_tp_prog_is_valid_access,
1516};
1517
1518const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1519#ifdef CONFIG_NET
1520	.test_run = bpf_prog_test_run_raw_tp,
1521#endif
1522};
1523
1524const struct bpf_verifier_ops tracing_verifier_ops = {
1525	.get_func_proto  = tracing_prog_func_proto,
1526	.is_valid_access = tracing_prog_is_valid_access,
1527};
1528
1529const struct bpf_prog_ops tracing_prog_ops = {
1530	.test_run = bpf_prog_test_run_tracing,
1531};
1532
1533static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1534						 enum bpf_access_type type,
1535						 const struct bpf_prog *prog,
1536						 struct bpf_insn_access_aux *info)
1537{
1538	if (off == 0) {
1539		if (size != sizeof(u64) || type != BPF_READ)
1540			return false;
1541		info->reg_type = PTR_TO_TP_BUFFER;
1542	}
1543	return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1544}
1545
1546const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1547	.get_func_proto  = raw_tp_prog_func_proto,
1548	.is_valid_access = raw_tp_writable_prog_is_valid_access,
1549};
1550
1551const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1552};
1553
1554static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1555				    const struct bpf_prog *prog,
1556				    struct bpf_insn_access_aux *info)
1557{
1558	const int size_u64 = sizeof(u64);
1559
1560	if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1561		return false;
1562	if (type != BPF_READ)
1563		return false;
1564	if (off % size != 0) {
1565		if (sizeof(unsigned long) != 4)
1566			return false;
1567		if (size != 8)
1568			return false;
1569		if (off % size != 4)
1570			return false;
1571	}
1572
1573	switch (off) {
1574	case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1575		bpf_ctx_record_field_size(info, size_u64);
1576		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1577			return false;
1578		break;
1579	case bpf_ctx_range(struct bpf_perf_event_data, addr):
1580		bpf_ctx_record_field_size(info, size_u64);
1581		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1582			return false;
1583		break;
1584	default:
1585		if (size != sizeof(long))
1586			return false;
1587	}
1588
1589	return true;
1590}
1591
1592static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1593				      const struct bpf_insn *si,
1594				      struct bpf_insn *insn_buf,
1595				      struct bpf_prog *prog, u32 *target_size)
1596{
1597	struct bpf_insn *insn = insn_buf;
1598
1599	switch (si->off) {
1600	case offsetof(struct bpf_perf_event_data, sample_period):
1601		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1602						       data), si->dst_reg, si->src_reg,
1603				      offsetof(struct bpf_perf_event_data_kern, data));
1604		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1605				      bpf_target_off(struct perf_sample_data, period, 8,
1606						     target_size));
1607		break;
1608	case offsetof(struct bpf_perf_event_data, addr):
1609		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1610						       data), si->dst_reg, si->src_reg,
1611				      offsetof(struct bpf_perf_event_data_kern, data));
1612		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1613				      bpf_target_off(struct perf_sample_data, addr, 8,
1614						     target_size));
1615		break;
1616	default:
1617		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1618						       regs), si->dst_reg, si->src_reg,
1619				      offsetof(struct bpf_perf_event_data_kern, regs));
1620		*insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1621				      si->off);
1622		break;
1623	}
1624
1625	return insn - insn_buf;
1626}
1627
1628const struct bpf_verifier_ops perf_event_verifier_ops = {
1629	.get_func_proto		= pe_prog_func_proto,
1630	.is_valid_access	= pe_prog_is_valid_access,
1631	.convert_ctx_access	= pe_prog_convert_ctx_access,
1632};
1633
1634const struct bpf_prog_ops perf_event_prog_ops = {
1635};
1636
1637static DEFINE_MUTEX(bpf_event_mutex);
1638
1639#define BPF_TRACE_MAX_PROGS 64
1640
1641int perf_event_attach_bpf_prog(struct perf_event *event,
1642			       struct bpf_prog *prog)
 
1643{
1644	struct bpf_prog_array *old_array;
1645	struct bpf_prog_array *new_array;
1646	int ret = -EEXIST;
1647
1648	/*
1649	 * Kprobe override only works if they are on the function entry,
1650	 * and only if they are on the opt-in list.
1651	 */
1652	if (prog->kprobe_override &&
1653	    (!trace_kprobe_on_func_entry(event->tp_event) ||
1654	     !trace_kprobe_error_injectable(event->tp_event)))
1655		return -EINVAL;
1656
1657	mutex_lock(&bpf_event_mutex);
1658
1659	if (event->prog)
1660		goto unlock;
1661
1662	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1663	if (old_array &&
1664	    bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1665		ret = -E2BIG;
1666		goto unlock;
1667	}
1668
1669	ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1670	if (ret < 0)
1671		goto unlock;
1672
1673	/* set the new array to event->tp_event and set event->prog */
1674	event->prog = prog;
 
1675	rcu_assign_pointer(event->tp_event->prog_array, new_array);
1676	bpf_prog_array_free(old_array);
1677
1678unlock:
1679	mutex_unlock(&bpf_event_mutex);
1680	return ret;
1681}
1682
1683void perf_event_detach_bpf_prog(struct perf_event *event)
1684{
1685	struct bpf_prog_array *old_array;
1686	struct bpf_prog_array *new_array;
1687	int ret;
1688
1689	mutex_lock(&bpf_event_mutex);
1690
1691	if (!event->prog)
1692		goto unlock;
1693
1694	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1695	ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
1696	if (ret == -ENOENT)
1697		goto unlock;
1698	if (ret < 0) {
1699		bpf_prog_array_delete_safe(old_array, event->prog);
1700	} else {
1701		rcu_assign_pointer(event->tp_event->prog_array, new_array);
1702		bpf_prog_array_free(old_array);
1703	}
1704
1705	bpf_prog_put(event->prog);
1706	event->prog = NULL;
1707
1708unlock:
1709	mutex_unlock(&bpf_event_mutex);
1710}
1711
1712int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1713{
1714	struct perf_event_query_bpf __user *uquery = info;
1715	struct perf_event_query_bpf query = {};
1716	struct bpf_prog_array *progs;
1717	u32 *ids, prog_cnt, ids_len;
1718	int ret;
1719
1720	if (!perfmon_capable())
1721		return -EPERM;
1722	if (event->attr.type != PERF_TYPE_TRACEPOINT)
1723		return -EINVAL;
1724	if (copy_from_user(&query, uquery, sizeof(query)))
1725		return -EFAULT;
1726
1727	ids_len = query.ids_len;
1728	if (ids_len > BPF_TRACE_MAX_PROGS)
1729		return -E2BIG;
1730	ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1731	if (!ids)
1732		return -ENOMEM;
1733	/*
1734	 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1735	 * is required when user only wants to check for uquery->prog_cnt.
1736	 * There is no need to check for it since the case is handled
1737	 * gracefully in bpf_prog_array_copy_info.
1738	 */
1739
1740	mutex_lock(&bpf_event_mutex);
1741	progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1742	ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
1743	mutex_unlock(&bpf_event_mutex);
1744
1745	if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1746	    copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1747		ret = -EFAULT;
1748
1749	kfree(ids);
1750	return ret;
1751}
1752
1753extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1754extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1755
1756struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
1757{
1758	struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1759
1760	for (; btp < __stop__bpf_raw_tp; btp++) {
1761		if (!strcmp(btp->tp->name, name))
1762			return btp;
1763	}
1764
1765	return bpf_get_raw_tracepoint_module(name);
1766}
1767
1768void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1769{
1770	struct module *mod;
1771
1772	preempt_disable();
1773	mod = __module_address((unsigned long)btp);
1774	module_put(mod);
1775	preempt_enable();
1776}
1777
1778static __always_inline
1779void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1780{
1781	cant_sleep();
 
 
 
 
1782	rcu_read_lock();
1783	(void) BPF_PROG_RUN(prog, args);
1784	rcu_read_unlock();
 
 
1785}
1786
1787#define UNPACK(...)			__VA_ARGS__
1788#define REPEAT_1(FN, DL, X, ...)	FN(X)
1789#define REPEAT_2(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1790#define REPEAT_3(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1791#define REPEAT_4(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1792#define REPEAT_5(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1793#define REPEAT_6(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1794#define REPEAT_7(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1795#define REPEAT_8(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1796#define REPEAT_9(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1797#define REPEAT_10(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1798#define REPEAT_11(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1799#define REPEAT_12(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1800#define REPEAT(X, FN, DL, ...)		REPEAT_##X(FN, DL, __VA_ARGS__)
1801
1802#define SARG(X)		u64 arg##X
1803#define COPY(X)		args[X] = arg##X
1804
1805#define __DL_COM	(,)
1806#define __DL_SEM	(;)
1807
1808#define __SEQ_0_11	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1809
1810#define BPF_TRACE_DEFN_x(x)						\
1811	void bpf_trace_run##x(struct bpf_prog *prog,			\
1812			      REPEAT(x, SARG, __DL_COM, __SEQ_0_11))	\
1813	{								\
1814		u64 args[x];						\
1815		REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);			\
1816		__bpf_trace_run(prog, args);				\
1817	}								\
1818	EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1819BPF_TRACE_DEFN_x(1);
1820BPF_TRACE_DEFN_x(2);
1821BPF_TRACE_DEFN_x(3);
1822BPF_TRACE_DEFN_x(4);
1823BPF_TRACE_DEFN_x(5);
1824BPF_TRACE_DEFN_x(6);
1825BPF_TRACE_DEFN_x(7);
1826BPF_TRACE_DEFN_x(8);
1827BPF_TRACE_DEFN_x(9);
1828BPF_TRACE_DEFN_x(10);
1829BPF_TRACE_DEFN_x(11);
1830BPF_TRACE_DEFN_x(12);
1831
1832static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1833{
1834	struct tracepoint *tp = btp->tp;
1835
1836	/*
1837	 * check that program doesn't access arguments beyond what's
1838	 * available in this tracepoint
1839	 */
1840	if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1841		return -EINVAL;
1842
1843	if (prog->aux->max_tp_access > btp->writable_size)
1844		return -EINVAL;
1845
1846	return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
1847						   prog);
1848}
1849
1850int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1851{
1852	return __bpf_probe_register(btp, prog);
1853}
1854
1855int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1856{
1857	return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
1858}
1859
1860int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1861			    u32 *fd_type, const char **buf,
1862			    u64 *probe_offset, u64 *probe_addr)
 
1863{
1864	bool is_tracepoint, is_syscall_tp;
1865	struct bpf_prog *prog;
1866	int flags, err = 0;
1867
1868	prog = event->prog;
1869	if (!prog)
1870		return -ENOENT;
1871
1872	/* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1873	if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
1874		return -EOPNOTSUPP;
1875
1876	*prog_id = prog->aux->id;
1877	flags = event->tp_event->flags;
1878	is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
1879	is_syscall_tp = is_syscall_trace_event(event->tp_event);
1880
1881	if (is_tracepoint || is_syscall_tp) {
1882		*buf = is_tracepoint ? event->tp_event->tp->name
1883				     : event->tp_event->name;
1884		*fd_type = BPF_FD_TYPE_TRACEPOINT;
1885		*probe_offset = 0x0;
1886		*probe_addr = 0x0;
 
 
 
 
1887	} else {
1888		/* kprobe/uprobe */
1889		err = -EOPNOTSUPP;
1890#ifdef CONFIG_KPROBE_EVENTS
1891		if (flags & TRACE_EVENT_FL_KPROBE)
1892			err = bpf_get_kprobe_info(event, fd_type, buf,
1893						  probe_offset, probe_addr,
1894						  event->attr.type == PERF_TYPE_TRACEPOINT);
1895#endif
1896#ifdef CONFIG_UPROBE_EVENTS
1897		if (flags & TRACE_EVENT_FL_UPROBE)
1898			err = bpf_get_uprobe_info(event, fd_type, buf,
1899						  probe_offset,
1900						  event->attr.type == PERF_TYPE_TRACEPOINT);
1901#endif
1902	}
1903
1904	return err;
1905}
1906
1907static int __init send_signal_irq_work_init(void)
1908{
1909	int cpu;
1910	struct send_signal_irq_work *work;
1911
1912	for_each_possible_cpu(cpu) {
1913		work = per_cpu_ptr(&send_signal_work, cpu);
1914		init_irq_work(&work->irq_work, do_bpf_send_signal);
1915	}
1916	return 0;
1917}
1918
1919subsys_initcall(send_signal_irq_work_init);
1920
1921#ifdef CONFIG_MODULES
1922static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
1923			    void *module)
1924{
1925	struct bpf_trace_module *btm, *tmp;
1926	struct module *mod = module;
1927	int ret = 0;
1928
1929	if (mod->num_bpf_raw_events == 0 ||
1930	    (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
1931		goto out;
1932
1933	mutex_lock(&bpf_module_mutex);
1934
1935	switch (op) {
1936	case MODULE_STATE_COMING:
1937		btm = kzalloc(sizeof(*btm), GFP_KERNEL);
1938		if (btm) {
1939			btm->module = module;
1940			list_add(&btm->list, &bpf_trace_modules);
1941		} else {
1942			ret = -ENOMEM;
1943		}
1944		break;
1945	case MODULE_STATE_GOING:
1946		list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
1947			if (btm->module == module) {
1948				list_del(&btm->list);
1949				kfree(btm);
1950				break;
1951			}
1952		}
1953		break;
1954	}
1955
1956	mutex_unlock(&bpf_module_mutex);
1957
1958out:
1959	return notifier_from_errno(ret);
1960}
1961
1962static struct notifier_block bpf_module_nb = {
1963	.notifier_call = bpf_event_notify,
1964};
1965
1966static int __init bpf_event_init(void)
1967{
1968	register_module_notifier(&bpf_module_nb);
1969	return 0;
1970}
1971
1972fs_initcall(bpf_event_init);
1973#endif /* CONFIG_MODULES */