Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
   3 * Copyright (c) 2016 Facebook
   4 */
   5#include <linux/kernel.h>
   6#include <linux/types.h>
   7#include <linux/slab.h>
   8#include <linux/bpf.h>
   9#include <linux/bpf_verifier.h>
  10#include <linux/bpf_perf_event.h>
  11#include <linux/btf.h>
  12#include <linux/filter.h>
  13#include <linux/uaccess.h>
  14#include <linux/ctype.h>
  15#include <linux/kprobes.h>
  16#include <linux/spinlock.h>
  17#include <linux/syscalls.h>
  18#include <linux/error-injection.h>
  19#include <linux/btf_ids.h>
  20#include <linux/bpf_lsm.h>
  21#include <linux/fprobe.h>
  22#include <linux/bsearch.h>
  23#include <linux/sort.h>
  24#include <linux/key.h>
  25#include <linux/verification.h>
 
  26
  27#include <net/bpf_sk_storage.h>
  28
  29#include <uapi/linux/bpf.h>
  30#include <uapi/linux/btf.h>
  31
  32#include <asm/tlb.h>
  33
  34#include "trace_probe.h"
  35#include "trace.h"
  36
  37#define CREATE_TRACE_POINTS
  38#include "bpf_trace.h"
  39
  40#define bpf_event_rcu_dereference(p)					\
  41	rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
  42
 
 
 
  43#ifdef CONFIG_MODULES
  44struct bpf_trace_module {
  45	struct module *module;
  46	struct list_head list;
  47};
  48
  49static LIST_HEAD(bpf_trace_modules);
  50static DEFINE_MUTEX(bpf_module_mutex);
  51
  52static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
  53{
  54	struct bpf_raw_event_map *btp, *ret = NULL;
  55	struct bpf_trace_module *btm;
  56	unsigned int i;
  57
  58	mutex_lock(&bpf_module_mutex);
  59	list_for_each_entry(btm, &bpf_trace_modules, list) {
  60		for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
  61			btp = &btm->module->bpf_raw_events[i];
  62			if (!strcmp(btp->tp->name, name)) {
  63				if (try_module_get(btm->module))
  64					ret = btp;
  65				goto out;
  66			}
  67		}
  68	}
  69out:
  70	mutex_unlock(&bpf_module_mutex);
  71	return ret;
  72}
  73#else
  74static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
  75{
  76	return NULL;
  77}
  78#endif /* CONFIG_MODULES */
  79
  80u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  81u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  82
  83static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
  84				  u64 flags, const struct btf **btf,
  85				  s32 *btf_id);
  86static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
  87static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
  88
 
 
 
  89/**
  90 * trace_call_bpf - invoke BPF program
  91 * @call: tracepoint event
  92 * @ctx: opaque context pointer
  93 *
  94 * kprobe handlers execute BPF programs via this helper.
  95 * Can be used from static tracepoints in the future.
  96 *
  97 * Return: BPF programs always return an integer which is interpreted by
  98 * kprobe handler as:
  99 * 0 - return from kprobe (event is filtered out)
 100 * 1 - store kprobe event into ring buffer
 101 * Other values are reserved and currently alias to 1
 102 */
 103unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
 104{
 105	unsigned int ret;
 106
 107	cant_sleep();
 108
 109	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
 110		/*
 111		 * since some bpf program is already running on this cpu,
 112		 * don't call into another bpf program (same or different)
 113		 * and don't send kprobe event into ring-buffer,
 114		 * so return zero here
 115		 */
 
 
 
 116		ret = 0;
 117		goto out;
 118	}
 119
 120	/*
 121	 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
 122	 * to all call sites, we did a bpf_prog_array_valid() there to check
 123	 * whether call->prog_array is empty or not, which is
 124	 * a heuristic to speed up execution.
 125	 *
 126	 * If bpf_prog_array_valid() fetched prog_array was
 127	 * non-NULL, we go into trace_call_bpf() and do the actual
 128	 * proper rcu_dereference() under RCU lock.
 129	 * If it turns out that prog_array is NULL then, we bail out.
 130	 * For the opposite, if the bpf_prog_array_valid() fetched pointer
 131	 * was NULL, you'll skip the prog_array with the risk of missing
 132	 * out of events when it was updated in between this and the
 133	 * rcu_dereference() which is accepted risk.
 134	 */
 135	rcu_read_lock();
 136	ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
 137				 ctx, bpf_prog_run);
 138	rcu_read_unlock();
 139
 140 out:
 141	__this_cpu_dec(bpf_prog_active);
 142
 143	return ret;
 144}
 145
 146#ifdef CONFIG_BPF_KPROBE_OVERRIDE
 147BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
 148{
 149	regs_set_return_value(regs, rc);
 150	override_function_with_return(regs);
 151	return 0;
 152}
 153
 154static const struct bpf_func_proto bpf_override_return_proto = {
 155	.func		= bpf_override_return,
 156	.gpl_only	= true,
 157	.ret_type	= RET_INTEGER,
 158	.arg1_type	= ARG_PTR_TO_CTX,
 159	.arg2_type	= ARG_ANYTHING,
 160};
 161#endif
 162
 163static __always_inline int
 164bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
 165{
 166	int ret;
 167
 168	ret = copy_from_user_nofault(dst, unsafe_ptr, size);
 169	if (unlikely(ret < 0))
 170		memset(dst, 0, size);
 171	return ret;
 172}
 173
 174BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
 175	   const void __user *, unsafe_ptr)
 176{
 177	return bpf_probe_read_user_common(dst, size, unsafe_ptr);
 178}
 179
 180const struct bpf_func_proto bpf_probe_read_user_proto = {
 181	.func		= bpf_probe_read_user,
 182	.gpl_only	= true,
 183	.ret_type	= RET_INTEGER,
 184	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 185	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 186	.arg3_type	= ARG_ANYTHING,
 187};
 188
 189static __always_inline int
 190bpf_probe_read_user_str_common(void *dst, u32 size,
 191			       const void __user *unsafe_ptr)
 192{
 193	int ret;
 194
 195	/*
 196	 * NB: We rely on strncpy_from_user() not copying junk past the NUL
 197	 * terminator into `dst`.
 198	 *
 199	 * strncpy_from_user() does long-sized strides in the fast path. If the
 200	 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
 201	 * then there could be junk after the NUL in `dst`. If user takes `dst`
 202	 * and keys a hash map with it, then semantically identical strings can
 203	 * occupy multiple entries in the map.
 204	 */
 205	ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
 206	if (unlikely(ret < 0))
 207		memset(dst, 0, size);
 208	return ret;
 209}
 210
 211BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
 212	   const void __user *, unsafe_ptr)
 213{
 214	return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
 215}
 216
 217const struct bpf_func_proto bpf_probe_read_user_str_proto = {
 218	.func		= bpf_probe_read_user_str,
 219	.gpl_only	= true,
 220	.ret_type	= RET_INTEGER,
 221	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 222	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 223	.arg3_type	= ARG_ANYTHING,
 224};
 225
 226static __always_inline int
 227bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
 228{
 229	int ret;
 230
 231	ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
 232	if (unlikely(ret < 0))
 233		memset(dst, 0, size);
 234	return ret;
 235}
 236
 237BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
 238	   const void *, unsafe_ptr)
 239{
 240	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
 241}
 242
 243const struct bpf_func_proto bpf_probe_read_kernel_proto = {
 244	.func		= bpf_probe_read_kernel,
 245	.gpl_only	= true,
 246	.ret_type	= RET_INTEGER,
 247	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 248	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 249	.arg3_type	= ARG_ANYTHING,
 250};
 251
 252static __always_inline int
 253bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
 254{
 255	int ret;
 256
 257	/*
 258	 * The strncpy_from_kernel_nofault() call will likely not fill the
 259	 * entire buffer, but that's okay in this circumstance as we're probing
 260	 * arbitrary memory anyway similar to bpf_probe_read_*() and might
 261	 * as well probe the stack. Thus, memory is explicitly cleared
 262	 * only in error case, so that improper users ignoring return
 263	 * code altogether don't copy garbage; otherwise length of string
 264	 * is returned that can be used for bpf_perf_event_output() et al.
 265	 */
 266	ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
 267	if (unlikely(ret < 0))
 268		memset(dst, 0, size);
 269	return ret;
 270}
 271
 272BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
 273	   const void *, unsafe_ptr)
 274{
 275	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
 276}
 277
 278const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
 279	.func		= bpf_probe_read_kernel_str,
 280	.gpl_only	= true,
 281	.ret_type	= RET_INTEGER,
 282	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 283	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 284	.arg3_type	= ARG_ANYTHING,
 285};
 286
 287#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
 288BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
 289	   const void *, unsafe_ptr)
 290{
 291	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
 292		return bpf_probe_read_user_common(dst, size,
 293				(__force void __user *)unsafe_ptr);
 294	}
 295	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
 296}
 297
 298static const struct bpf_func_proto bpf_probe_read_compat_proto = {
 299	.func		= bpf_probe_read_compat,
 300	.gpl_only	= true,
 301	.ret_type	= RET_INTEGER,
 302	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 303	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 304	.arg3_type	= ARG_ANYTHING,
 305};
 306
 307BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
 308	   const void *, unsafe_ptr)
 309{
 310	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
 311		return bpf_probe_read_user_str_common(dst, size,
 312				(__force void __user *)unsafe_ptr);
 313	}
 314	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
 315}
 316
 317static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
 318	.func		= bpf_probe_read_compat_str,
 319	.gpl_only	= true,
 320	.ret_type	= RET_INTEGER,
 321	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 322	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 323	.arg3_type	= ARG_ANYTHING,
 324};
 325#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
 326
 327BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
 328	   u32, size)
 329{
 330	/*
 331	 * Ensure we're in user context which is safe for the helper to
 332	 * run. This helper has no business in a kthread.
 333	 *
 334	 * access_ok() should prevent writing to non-user memory, but in
 335	 * some situations (nommu, temporary switch, etc) access_ok() does
 336	 * not provide enough validation, hence the check on KERNEL_DS.
 337	 *
 338	 * nmi_uaccess_okay() ensures the probe is not run in an interim
 339	 * state, when the task or mm are switched. This is specifically
 340	 * required to prevent the use of temporary mm.
 341	 */
 342
 343	if (unlikely(in_interrupt() ||
 344		     current->flags & (PF_KTHREAD | PF_EXITING)))
 345		return -EPERM;
 346	if (unlikely(!nmi_uaccess_okay()))
 347		return -EPERM;
 348
 349	return copy_to_user_nofault(unsafe_ptr, src, size);
 350}
 351
 352static const struct bpf_func_proto bpf_probe_write_user_proto = {
 353	.func		= bpf_probe_write_user,
 354	.gpl_only	= true,
 355	.ret_type	= RET_INTEGER,
 356	.arg1_type	= ARG_ANYTHING,
 357	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 358	.arg3_type	= ARG_CONST_SIZE,
 359};
 360
 361static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
 362{
 363	if (!capable(CAP_SYS_ADMIN))
 364		return NULL;
 365
 366	pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
 367			    current->comm, task_pid_nr(current));
 368
 369	return &bpf_probe_write_user_proto;
 370}
 371
 372static DEFINE_RAW_SPINLOCK(trace_printk_lock);
 373
 374#define MAX_TRACE_PRINTK_VARARGS	3
 375#define BPF_TRACE_PRINTK_SIZE		1024
 376
 377BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
 378	   u64, arg2, u64, arg3)
 379{
 380	u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
 381	u32 *bin_args;
 382	static char buf[BPF_TRACE_PRINTK_SIZE];
 383	unsigned long flags;
 
 384	int ret;
 385
 386	ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args,
 387				  MAX_TRACE_PRINTK_VARARGS);
 388	if (ret < 0)
 389		return ret;
 390
 391	raw_spin_lock_irqsave(&trace_printk_lock, flags);
 392	ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
 393
 394	trace_bpf_trace_printk(buf);
 395	raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
 396
 397	bpf_bprintf_cleanup();
 398
 399	return ret;
 400}
 401
 402static const struct bpf_func_proto bpf_trace_printk_proto = {
 403	.func		= bpf_trace_printk,
 404	.gpl_only	= true,
 405	.ret_type	= RET_INTEGER,
 406	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 407	.arg2_type	= ARG_CONST_SIZE,
 408};
 409
 410static void __set_printk_clr_event(void)
 411{
 412	/*
 413	 * This program might be calling bpf_trace_printk,
 414	 * so enable the associated bpf_trace/bpf_trace_printk event.
 415	 * Repeat this each time as it is possible a user has
 416	 * disabled bpf_trace_printk events.  By loading a program
 417	 * calling bpf_trace_printk() however the user has expressed
 418	 * the intent to see such events.
 419	 */
 420	if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
 421		pr_warn_ratelimited("could not enable bpf_trace_printk events");
 422}
 423
 424const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
 425{
 426	__set_printk_clr_event();
 427	return &bpf_trace_printk_proto;
 428}
 429
 430BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, data,
 431	   u32, data_len)
 432{
 433	static char buf[BPF_TRACE_PRINTK_SIZE];
 434	unsigned long flags;
 
 
 435	int ret, num_args;
 436	u32 *bin_args;
 437
 438	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
 439	    (data_len && !data))
 440		return -EINVAL;
 441	num_args = data_len / 8;
 442
 443	ret = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
 444	if (ret < 0)
 445		return ret;
 446
 447	raw_spin_lock_irqsave(&trace_printk_lock, flags);
 448	ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
 449
 450	trace_bpf_trace_printk(buf);
 451	raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
 452
 453	bpf_bprintf_cleanup();
 454
 455	return ret;
 456}
 457
 458static const struct bpf_func_proto bpf_trace_vprintk_proto = {
 459	.func		= bpf_trace_vprintk,
 460	.gpl_only	= true,
 461	.ret_type	= RET_INTEGER,
 462	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 463	.arg2_type	= ARG_CONST_SIZE,
 464	.arg3_type	= ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
 465	.arg4_type	= ARG_CONST_SIZE_OR_ZERO,
 466};
 467
 468const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
 469{
 470	__set_printk_clr_event();
 471	return &bpf_trace_vprintk_proto;
 472}
 473
 474BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
 475	   const void *, data, u32, data_len)
 476{
 
 
 
 477	int err, num_args;
 478	u32 *bin_args;
 479
 480	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
 481	    (data_len && !data))
 482		return -EINVAL;
 483	num_args = data_len / 8;
 484
 485	err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
 486	if (err < 0)
 487		return err;
 488
 489	seq_bprintf(m, fmt, bin_args);
 490
 491	bpf_bprintf_cleanup();
 492
 493	return seq_has_overflowed(m) ? -EOVERFLOW : 0;
 494}
 495
 496BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
 497
 498static const struct bpf_func_proto bpf_seq_printf_proto = {
 499	.func		= bpf_seq_printf,
 500	.gpl_only	= true,
 501	.ret_type	= RET_INTEGER,
 502	.arg1_type	= ARG_PTR_TO_BTF_ID,
 503	.arg1_btf_id	= &btf_seq_file_ids[0],
 504	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 505	.arg3_type	= ARG_CONST_SIZE,
 506	.arg4_type      = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
 507	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
 508};
 509
 510BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
 511{
 512	return seq_write(m, data, len) ? -EOVERFLOW : 0;
 513}
 514
 515static const struct bpf_func_proto bpf_seq_write_proto = {
 516	.func		= bpf_seq_write,
 517	.gpl_only	= true,
 518	.ret_type	= RET_INTEGER,
 519	.arg1_type	= ARG_PTR_TO_BTF_ID,
 520	.arg1_btf_id	= &btf_seq_file_ids[0],
 521	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 522	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
 523};
 524
 525BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
 526	   u32, btf_ptr_size, u64, flags)
 527{
 528	const struct btf *btf;
 529	s32 btf_id;
 530	int ret;
 531
 532	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
 533	if (ret)
 534		return ret;
 535
 536	return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
 537}
 538
 539static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
 540	.func		= bpf_seq_printf_btf,
 541	.gpl_only	= true,
 542	.ret_type	= RET_INTEGER,
 543	.arg1_type	= ARG_PTR_TO_BTF_ID,
 544	.arg1_btf_id	= &btf_seq_file_ids[0],
 545	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 546	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
 547	.arg4_type	= ARG_ANYTHING,
 548};
 549
 550static __always_inline int
 551get_map_perf_counter(struct bpf_map *map, u64 flags,
 552		     u64 *value, u64 *enabled, u64 *running)
 553{
 554	struct bpf_array *array = container_of(map, struct bpf_array, map);
 555	unsigned int cpu = smp_processor_id();
 556	u64 index = flags & BPF_F_INDEX_MASK;
 557	struct bpf_event_entry *ee;
 558
 559	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
 560		return -EINVAL;
 561	if (index == BPF_F_CURRENT_CPU)
 562		index = cpu;
 563	if (unlikely(index >= array->map.max_entries))
 564		return -E2BIG;
 565
 566	ee = READ_ONCE(array->ptrs[index]);
 567	if (!ee)
 568		return -ENOENT;
 569
 570	return perf_event_read_local(ee->event, value, enabled, running);
 571}
 572
 573BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
 574{
 575	u64 value = 0;
 576	int err;
 577
 578	err = get_map_perf_counter(map, flags, &value, NULL, NULL);
 579	/*
 580	 * this api is ugly since we miss [-22..-2] range of valid
 581	 * counter values, but that's uapi
 582	 */
 583	if (err)
 584		return err;
 585	return value;
 586}
 587
 588static const struct bpf_func_proto bpf_perf_event_read_proto = {
 589	.func		= bpf_perf_event_read,
 590	.gpl_only	= true,
 591	.ret_type	= RET_INTEGER,
 592	.arg1_type	= ARG_CONST_MAP_PTR,
 593	.arg2_type	= ARG_ANYTHING,
 594};
 595
 596BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
 597	   struct bpf_perf_event_value *, buf, u32, size)
 598{
 599	int err = -EINVAL;
 600
 601	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
 602		goto clear;
 603	err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
 604				   &buf->running);
 605	if (unlikely(err))
 606		goto clear;
 607	return 0;
 608clear:
 609	memset(buf, 0, size);
 610	return err;
 611}
 612
 613static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
 614	.func		= bpf_perf_event_read_value,
 615	.gpl_only	= true,
 616	.ret_type	= RET_INTEGER,
 617	.arg1_type	= ARG_CONST_MAP_PTR,
 618	.arg2_type	= ARG_ANYTHING,
 619	.arg3_type	= ARG_PTR_TO_UNINIT_MEM,
 620	.arg4_type	= ARG_CONST_SIZE,
 621};
 622
 623static __always_inline u64
 624__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
 625			u64 flags, struct perf_sample_data *sd)
 
 626{
 627	struct bpf_array *array = container_of(map, struct bpf_array, map);
 628	unsigned int cpu = smp_processor_id();
 629	u64 index = flags & BPF_F_INDEX_MASK;
 630	struct bpf_event_entry *ee;
 631	struct perf_event *event;
 632
 633	if (index == BPF_F_CURRENT_CPU)
 634		index = cpu;
 635	if (unlikely(index >= array->map.max_entries))
 636		return -E2BIG;
 637
 638	ee = READ_ONCE(array->ptrs[index]);
 639	if (!ee)
 640		return -ENOENT;
 641
 642	event = ee->event;
 643	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
 644		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
 645		return -EINVAL;
 646
 647	if (unlikely(event->oncpu != cpu))
 648		return -EOPNOTSUPP;
 649
 
 
 650	return perf_event_output(event, sd, regs);
 651}
 652
 653/*
 654 * Support executing tracepoints in normal, irq, and nmi context that each call
 655 * bpf_perf_event_output
 656 */
 657struct bpf_trace_sample_data {
 658	struct perf_sample_data sds[3];
 659};
 660
 661static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
 662static DEFINE_PER_CPU(int, bpf_trace_nest_level);
 663BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
 664	   u64, flags, void *, data, u64, size)
 665{
 666	struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
 667	int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
 668	struct perf_raw_record raw = {
 669		.frag = {
 670			.size = size,
 671			.data = data,
 672		},
 673	};
 674	struct perf_sample_data *sd;
 675	int err;
 
 
 
 
 676
 677	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
 678		err = -EBUSY;
 679		goto out;
 680	}
 681
 682	sd = &sds->sds[nest_level - 1];
 683
 684	if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
 685		err = -EINVAL;
 686		goto out;
 687	}
 688
 689	perf_sample_data_init(sd, 0, 0);
 690	sd->raw = &raw;
 691	sd->sample_flags |= PERF_SAMPLE_RAW;
 692
 693	err = __bpf_perf_event_output(regs, map, flags, sd);
 694
 
 695out:
 696	this_cpu_dec(bpf_trace_nest_level);
 
 697	return err;
 698}
 699
 700static const struct bpf_func_proto bpf_perf_event_output_proto = {
 701	.func		= bpf_perf_event_output,
 702	.gpl_only	= true,
 703	.ret_type	= RET_INTEGER,
 704	.arg1_type	= ARG_PTR_TO_CTX,
 705	.arg2_type	= ARG_CONST_MAP_PTR,
 706	.arg3_type	= ARG_ANYTHING,
 707	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 708	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
 709};
 710
 711static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
 712struct bpf_nested_pt_regs {
 713	struct pt_regs regs[3];
 714};
 715static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
 716static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
 717
 718u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
 719		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
 720{
 721	int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
 722	struct perf_raw_frag frag = {
 723		.copy		= ctx_copy,
 724		.size		= ctx_size,
 725		.data		= ctx,
 726	};
 727	struct perf_raw_record raw = {
 728		.frag = {
 729			{
 730				.next	= ctx_size ? &frag : NULL,
 731			},
 732			.size	= meta_size,
 733			.data	= meta,
 734		},
 735	};
 736	struct perf_sample_data *sd;
 737	struct pt_regs *regs;
 
 738	u64 ret;
 739
 
 
 
 740	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
 741		ret = -EBUSY;
 742		goto out;
 743	}
 744	sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
 745	regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
 746
 747	perf_fetch_caller_regs(regs);
 748	perf_sample_data_init(sd, 0, 0);
 749	sd->raw = &raw;
 750	sd->sample_flags |= PERF_SAMPLE_RAW;
 751
 752	ret = __bpf_perf_event_output(regs, map, flags, sd);
 753out:
 754	this_cpu_dec(bpf_event_output_nest_level);
 
 755	return ret;
 756}
 757
 758BPF_CALL_0(bpf_get_current_task)
 759{
 760	return (long) current;
 761}
 762
 763const struct bpf_func_proto bpf_get_current_task_proto = {
 764	.func		= bpf_get_current_task,
 765	.gpl_only	= true,
 766	.ret_type	= RET_INTEGER,
 767};
 768
 769BPF_CALL_0(bpf_get_current_task_btf)
 770{
 771	return (unsigned long) current;
 772}
 773
 774const struct bpf_func_proto bpf_get_current_task_btf_proto = {
 775	.func		= bpf_get_current_task_btf,
 776	.gpl_only	= true,
 777	.ret_type	= RET_PTR_TO_BTF_ID_TRUSTED,
 778	.ret_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
 779};
 780
 781BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
 782{
 783	return (unsigned long) task_pt_regs(task);
 784}
 785
 786BTF_ID_LIST(bpf_task_pt_regs_ids)
 787BTF_ID(struct, pt_regs)
 788
 789const struct bpf_func_proto bpf_task_pt_regs_proto = {
 790	.func		= bpf_task_pt_regs,
 791	.gpl_only	= true,
 792	.arg1_type	= ARG_PTR_TO_BTF_ID,
 793	.arg1_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
 794	.ret_type	= RET_PTR_TO_BTF_ID,
 795	.ret_btf_id	= &bpf_task_pt_regs_ids[0],
 796};
 797
 798BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
 799{
 800	struct bpf_array *array = container_of(map, struct bpf_array, map);
 801	struct cgroup *cgrp;
 802
 803	if (unlikely(idx >= array->map.max_entries))
 804		return -E2BIG;
 805
 806	cgrp = READ_ONCE(array->ptrs[idx]);
 807	if (unlikely(!cgrp))
 808		return -EAGAIN;
 809
 810	return task_under_cgroup_hierarchy(current, cgrp);
 811}
 812
 813static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
 814	.func           = bpf_current_task_under_cgroup,
 815	.gpl_only       = false,
 816	.ret_type       = RET_INTEGER,
 817	.arg1_type      = ARG_CONST_MAP_PTR,
 818	.arg2_type      = ARG_ANYTHING,
 819};
 820
 821struct send_signal_irq_work {
 822	struct irq_work irq_work;
 823	struct task_struct *task;
 824	u32 sig;
 825	enum pid_type type;
 
 
 826};
 827
 828static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
 829
 830static void do_bpf_send_signal(struct irq_work *entry)
 831{
 832	struct send_signal_irq_work *work;
 
 833
 834	work = container_of(entry, struct send_signal_irq_work, irq_work);
 835	group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
 
 
 836	put_task_struct(work->task);
 837}
 838
 839static int bpf_send_signal_common(u32 sig, enum pid_type type)
 840{
 841	struct send_signal_irq_work *work = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 842
 843	/* Similar to bpf_probe_write_user, task needs to be
 844	 * in a sound condition and kernel memory access be
 845	 * permitted in order to send signal to the current
 846	 * task.
 847	 */
 848	if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
 849		return -EPERM;
 850	if (unlikely(!nmi_uaccess_okay()))
 851		return -EPERM;
 852	/* Task should not be pid=1 to avoid kernel panic. */
 853	if (unlikely(is_global_init(current)))
 854		return -EPERM;
 855
 856	if (irqs_disabled()) {
 857		/* Do an early check on signal validity. Otherwise,
 858		 * the error is lost in deferred irq_work.
 859		 */
 860		if (unlikely(!valid_signal(sig)))
 861			return -EINVAL;
 862
 863		work = this_cpu_ptr(&send_signal_work);
 864		if (irq_work_is_busy(&work->irq_work))
 865			return -EBUSY;
 866
 867		/* Add the current task, which is the target of sending signal,
 868		 * to the irq_work. The current task may change when queued
 869		 * irq works get executed.
 870		 */
 871		work->task = get_task_struct(current);
 
 
 
 872		work->sig = sig;
 873		work->type = type;
 874		irq_work_queue(&work->irq_work);
 875		return 0;
 876	}
 877
 878	return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
 879}
 880
 881BPF_CALL_1(bpf_send_signal, u32, sig)
 882{
 883	return bpf_send_signal_common(sig, PIDTYPE_TGID);
 884}
 885
 886static const struct bpf_func_proto bpf_send_signal_proto = {
 887	.func		= bpf_send_signal,
 888	.gpl_only	= false,
 889	.ret_type	= RET_INTEGER,
 890	.arg1_type	= ARG_ANYTHING,
 891};
 892
 893BPF_CALL_1(bpf_send_signal_thread, u32, sig)
 894{
 895	return bpf_send_signal_common(sig, PIDTYPE_PID);
 896}
 897
 898static const struct bpf_func_proto bpf_send_signal_thread_proto = {
 899	.func		= bpf_send_signal_thread,
 900	.gpl_only	= false,
 901	.ret_type	= RET_INTEGER,
 902	.arg1_type	= ARG_ANYTHING,
 903};
 904
 905BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
 906{
 
 907	long len;
 908	char *p;
 909
 910	if (!sz)
 911		return 0;
 912
 913	p = d_path(path, buf, sz);
 
 
 
 
 
 
 
 
 
 914	if (IS_ERR(p)) {
 915		len = PTR_ERR(p);
 916	} else {
 917		len = buf + sz - p;
 918		memmove(buf, p, len);
 919	}
 920
 921	return len;
 922}
 923
 924BTF_SET_START(btf_allowlist_d_path)
 925#ifdef CONFIG_SECURITY
 926BTF_ID(func, security_file_permission)
 927BTF_ID(func, security_inode_getattr)
 928BTF_ID(func, security_file_open)
 929#endif
 930#ifdef CONFIG_SECURITY_PATH
 931BTF_ID(func, security_path_truncate)
 932#endif
 933BTF_ID(func, vfs_truncate)
 934BTF_ID(func, vfs_fallocate)
 935BTF_ID(func, dentry_open)
 936BTF_ID(func, vfs_getattr)
 937BTF_ID(func, filp_close)
 938BTF_SET_END(btf_allowlist_d_path)
 939
 940static bool bpf_d_path_allowed(const struct bpf_prog *prog)
 941{
 942	if (prog->type == BPF_PROG_TYPE_TRACING &&
 943	    prog->expected_attach_type == BPF_TRACE_ITER)
 944		return true;
 945
 946	if (prog->type == BPF_PROG_TYPE_LSM)
 947		return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
 948
 949	return btf_id_set_contains(&btf_allowlist_d_path,
 950				   prog->aux->attach_btf_id);
 951}
 952
 953BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
 954
 955static const struct bpf_func_proto bpf_d_path_proto = {
 956	.func		= bpf_d_path,
 957	.gpl_only	= false,
 958	.ret_type	= RET_INTEGER,
 959	.arg1_type	= ARG_PTR_TO_BTF_ID,
 960	.arg1_btf_id	= &bpf_d_path_btf_ids[0],
 961	.arg2_type	= ARG_PTR_TO_MEM,
 962	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
 963	.allowed	= bpf_d_path_allowed,
 964};
 965
 966#define BTF_F_ALL	(BTF_F_COMPACT  | BTF_F_NONAME | \
 967			 BTF_F_PTR_RAW | BTF_F_ZERO)
 968
 969static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
 970				  u64 flags, const struct btf **btf,
 971				  s32 *btf_id)
 972{
 973	const struct btf_type *t;
 974
 975	if (unlikely(flags & ~(BTF_F_ALL)))
 976		return -EINVAL;
 977
 978	if (btf_ptr_size != sizeof(struct btf_ptr))
 979		return -EINVAL;
 980
 981	*btf = bpf_get_btf_vmlinux();
 982
 983	if (IS_ERR_OR_NULL(*btf))
 984		return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
 985
 986	if (ptr->type_id > 0)
 987		*btf_id = ptr->type_id;
 988	else
 989		return -EINVAL;
 990
 991	if (*btf_id > 0)
 992		t = btf_type_by_id(*btf, *btf_id);
 993	if (*btf_id <= 0 || !t)
 994		return -ENOENT;
 995
 996	return 0;
 997}
 998
 999BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1000	   u32, btf_ptr_size, u64, flags)
1001{
1002	const struct btf *btf;
1003	s32 btf_id;
1004	int ret;
1005
1006	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1007	if (ret)
1008		return ret;
1009
1010	return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1011				      flags);
1012}
1013
1014const struct bpf_func_proto bpf_snprintf_btf_proto = {
1015	.func		= bpf_snprintf_btf,
1016	.gpl_only	= false,
1017	.ret_type	= RET_INTEGER,
1018	.arg1_type	= ARG_PTR_TO_MEM,
1019	.arg2_type	= ARG_CONST_SIZE,
1020	.arg3_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1021	.arg4_type	= ARG_CONST_SIZE,
1022	.arg5_type	= ARG_ANYTHING,
1023};
1024
1025BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1026{
1027	/* This helper call is inlined by verifier. */
1028	return ((u64 *)ctx)[-2];
1029}
1030
1031static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1032	.func		= bpf_get_func_ip_tracing,
1033	.gpl_only	= true,
1034	.ret_type	= RET_INTEGER,
1035	.arg1_type	= ARG_PTR_TO_CTX,
1036};
1037
1038#ifdef CONFIG_X86_KERNEL_IBT
1039static unsigned long get_entry_ip(unsigned long fentry_ip)
1040{
1041	u32 instr;
1042
1043	/* Being extra safe in here in case entry ip is on the page-edge. */
1044	if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1))
1045		return fentry_ip;
 
 
 
 
 
 
1046	if (is_endbr(instr))
1047		fentry_ip -= ENDBR_INSN_SIZE;
1048	return fentry_ip;
1049}
1050#else
1051#define get_entry_ip(fentry_ip) fentry_ip
1052#endif
1053
1054BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1055{
1056	struct kprobe *kp = kprobe_running();
 
 
 
 
 
 
 
 
 
1057
1058	if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
1059		return 0;
1060
1061	return get_entry_ip((uintptr_t)kp->addr);
1062}
1063
1064static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1065	.func		= bpf_get_func_ip_kprobe,
1066	.gpl_only	= true,
1067	.ret_type	= RET_INTEGER,
1068	.arg1_type	= ARG_PTR_TO_CTX,
1069};
1070
1071BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
1072{
1073	return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
1074}
1075
1076static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = {
1077	.func		= bpf_get_func_ip_kprobe_multi,
1078	.gpl_only	= false,
1079	.ret_type	= RET_INTEGER,
1080	.arg1_type	= ARG_PTR_TO_CTX,
1081};
1082
1083BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
1084{
1085	return bpf_kprobe_multi_cookie(current->bpf_ctx);
1086}
1087
1088static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
1089	.func		= bpf_get_attach_cookie_kprobe_multi,
1090	.gpl_only	= false,
1091	.ret_type	= RET_INTEGER,
1092	.arg1_type	= ARG_PTR_TO_CTX,
1093};
1094
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1095BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1096{
1097	struct bpf_trace_run_ctx *run_ctx;
1098
1099	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1100	return run_ctx->bpf_cookie;
1101}
1102
1103static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1104	.func		= bpf_get_attach_cookie_trace,
1105	.gpl_only	= false,
1106	.ret_type	= RET_INTEGER,
1107	.arg1_type	= ARG_PTR_TO_CTX,
1108};
1109
1110BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1111{
1112	return ctx->event->bpf_cookie;
1113}
1114
1115static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1116	.func		= bpf_get_attach_cookie_pe,
1117	.gpl_only	= false,
1118	.ret_type	= RET_INTEGER,
1119	.arg1_type	= ARG_PTR_TO_CTX,
1120};
1121
1122BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
1123{
1124	struct bpf_trace_run_ctx *run_ctx;
1125
1126	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1127	return run_ctx->bpf_cookie;
1128}
1129
1130static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = {
1131	.func		= bpf_get_attach_cookie_tracing,
1132	.gpl_only	= false,
1133	.ret_type	= RET_INTEGER,
1134	.arg1_type	= ARG_PTR_TO_CTX,
1135};
1136
1137BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1138{
1139#ifndef CONFIG_X86
1140	return -ENOENT;
1141#else
1142	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1143	u32 entry_cnt = size / br_entry_size;
1144
1145	entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1146
1147	if (unlikely(flags))
1148		return -EINVAL;
1149
1150	if (!entry_cnt)
1151		return -ENOENT;
1152
1153	return entry_cnt * br_entry_size;
1154#endif
1155}
1156
1157static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
1158	.func		= bpf_get_branch_snapshot,
1159	.gpl_only	= true,
1160	.ret_type	= RET_INTEGER,
1161	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1162	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1163};
1164
1165BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1166{
1167	/* This helper call is inlined by verifier. */
1168	u64 nr_args = ((u64 *)ctx)[-1];
1169
1170	if ((u64) n >= nr_args)
1171		return -EINVAL;
1172	*value = ((u64 *)ctx)[n];
1173	return 0;
1174}
1175
1176static const struct bpf_func_proto bpf_get_func_arg_proto = {
1177	.func		= get_func_arg,
1178	.ret_type	= RET_INTEGER,
1179	.arg1_type	= ARG_PTR_TO_CTX,
1180	.arg2_type	= ARG_ANYTHING,
1181	.arg3_type	= ARG_PTR_TO_LONG,
 
1182};
1183
1184BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1185{
1186	/* This helper call is inlined by verifier. */
1187	u64 nr_args = ((u64 *)ctx)[-1];
1188
1189	*value = ((u64 *)ctx)[nr_args];
1190	return 0;
1191}
1192
1193static const struct bpf_func_proto bpf_get_func_ret_proto = {
1194	.func		= get_func_ret,
1195	.ret_type	= RET_INTEGER,
1196	.arg1_type	= ARG_PTR_TO_CTX,
1197	.arg2_type	= ARG_PTR_TO_LONG,
 
1198};
1199
1200BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1201{
1202	/* This helper call is inlined by verifier. */
1203	return ((u64 *)ctx)[-1];
1204}
1205
1206static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1207	.func		= get_func_arg_cnt,
1208	.ret_type	= RET_INTEGER,
1209	.arg1_type	= ARG_PTR_TO_CTX,
1210};
1211
1212#ifdef CONFIG_KEYS
1213__diag_push();
1214__diag_ignore_all("-Wmissing-prototypes",
1215		  "kfuncs which will be used in BPF programs");
1216
1217/**
1218 * bpf_lookup_user_key - lookup a key by its serial
1219 * @serial: key handle serial number
1220 * @flags: lookup-specific flags
1221 *
1222 * Search a key with a given *serial* and the provided *flags*.
1223 * If found, increment the reference count of the key by one, and
1224 * return it in the bpf_key structure.
1225 *
1226 * The bpf_key structure must be passed to bpf_key_put() when done
1227 * with it, so that the key reference count is decremented and the
1228 * bpf_key structure is freed.
1229 *
1230 * Permission checks are deferred to the time the key is used by
1231 * one of the available key-specific kfuncs.
1232 *
1233 * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
1234 * special keyring (e.g. session keyring), if it doesn't yet exist.
1235 * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
1236 * for the key construction, and to retrieve uninstantiated keys (keys
1237 * without data attached to them).
1238 *
1239 * Return: a bpf_key pointer with a valid key pointer if the key is found, a
1240 *         NULL pointer otherwise.
1241 */
1242struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
1243{
1244	key_ref_t key_ref;
1245	struct bpf_key *bkey;
1246
1247	if (flags & ~KEY_LOOKUP_ALL)
1248		return NULL;
1249
1250	/*
1251	 * Permission check is deferred until the key is used, as the
1252	 * intent of the caller is unknown here.
1253	 */
1254	key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
1255	if (IS_ERR(key_ref))
1256		return NULL;
1257
1258	bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
1259	if (!bkey) {
1260		key_put(key_ref_to_ptr(key_ref));
1261		return NULL;
1262	}
1263
1264	bkey->key = key_ref_to_ptr(key_ref);
1265	bkey->has_ref = true;
1266
1267	return bkey;
1268}
1269
1270/**
1271 * bpf_lookup_system_key - lookup a key by a system-defined ID
1272 * @id: key ID
1273 *
1274 * Obtain a bpf_key structure with a key pointer set to the passed key ID.
1275 * The key pointer is marked as invalid, to prevent bpf_key_put() from
1276 * attempting to decrement the key reference count on that pointer. The key
1277 * pointer set in such way is currently understood only by
1278 * verify_pkcs7_signature().
1279 *
1280 * Set *id* to one of the values defined in include/linux/verification.h:
1281 * 0 for the primary keyring (immutable keyring of system keys);
1282 * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
1283 * (where keys can be added only if they are vouched for by existing keys
1284 * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
1285 * keyring (primarily used by the integrity subsystem to verify a kexec'ed
1286 * kerned image and, possibly, the initramfs signature).
1287 *
1288 * Return: a bpf_key pointer with an invalid key pointer set from the
1289 *         pre-determined ID on success, a NULL pointer otherwise
1290 */
1291struct bpf_key *bpf_lookup_system_key(u64 id)
1292{
1293	struct bpf_key *bkey;
1294
1295	if (system_keyring_id_check(id) < 0)
1296		return NULL;
1297
1298	bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
1299	if (!bkey)
1300		return NULL;
1301
1302	bkey->key = (struct key *)(unsigned long)id;
1303	bkey->has_ref = false;
1304
1305	return bkey;
1306}
1307
1308/**
1309 * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1310 * @bkey: bpf_key structure
1311 *
1312 * Decrement the reference count of the key inside *bkey*, if the pointer
1313 * is valid, and free *bkey*.
1314 */
1315void bpf_key_put(struct bpf_key *bkey)
1316{
1317	if (bkey->has_ref)
1318		key_put(bkey->key);
1319
1320	kfree(bkey);
1321}
1322
1323#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1324/**
1325 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
1326 * @data_ptr: data to verify
1327 * @sig_ptr: signature of the data
1328 * @trusted_keyring: keyring with keys trusted for signature verification
1329 *
1330 * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
1331 * with keys in a keyring referenced by *trusted_keyring*.
1332 *
1333 * Return: 0 on success, a negative value on error.
1334 */
1335int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
1336			       struct bpf_dynptr_kern *sig_ptr,
1337			       struct bpf_key *trusted_keyring)
1338{
 
 
 
 
1339	int ret;
1340
1341	if (trusted_keyring->has_ref) {
1342		/*
1343		 * Do the permission check deferred in bpf_lookup_user_key().
1344		 * See bpf_lookup_user_key() for more details.
1345		 *
1346		 * A call to key_task_permission() here would be redundant, as
1347		 * it is already done by keyring_search() called by
1348		 * find_asymmetric_key().
1349		 */
1350		ret = key_validate(trusted_keyring->key);
1351		if (ret < 0)
1352			return ret;
1353	}
1354
1355	return verify_pkcs7_signature(data_ptr->data,
1356				      bpf_dynptr_get_size(data_ptr),
1357				      sig_ptr->data,
1358				      bpf_dynptr_get_size(sig_ptr),
 
 
1359				      trusted_keyring->key,
1360				      VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
1361				      NULL);
1362}
1363#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
1364
1365__diag_pop();
1366
1367BTF_SET8_START(key_sig_kfunc_set)
1368BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
1369BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
1370BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
1371#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1372BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
1373#endif
1374BTF_SET8_END(key_sig_kfunc_set)
1375
1376static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
1377	.owner = THIS_MODULE,
1378	.set = &key_sig_kfunc_set,
1379};
1380
1381static int __init bpf_key_sig_kfuncs_init(void)
1382{
1383	return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
1384					 &bpf_key_sig_kfunc_set);
1385}
1386
1387late_initcall(bpf_key_sig_kfuncs_init);
1388#endif /* CONFIG_KEYS */
1389
1390static const struct bpf_func_proto *
1391bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1392{
1393	switch (func_id) {
1394	case BPF_FUNC_map_lookup_elem:
1395		return &bpf_map_lookup_elem_proto;
1396	case BPF_FUNC_map_update_elem:
1397		return &bpf_map_update_elem_proto;
1398	case BPF_FUNC_map_delete_elem:
1399		return &bpf_map_delete_elem_proto;
1400	case BPF_FUNC_map_push_elem:
1401		return &bpf_map_push_elem_proto;
1402	case BPF_FUNC_map_pop_elem:
1403		return &bpf_map_pop_elem_proto;
1404	case BPF_FUNC_map_peek_elem:
1405		return &bpf_map_peek_elem_proto;
1406	case BPF_FUNC_map_lookup_percpu_elem:
1407		return &bpf_map_lookup_percpu_elem_proto;
1408	case BPF_FUNC_ktime_get_ns:
1409		return &bpf_ktime_get_ns_proto;
1410	case BPF_FUNC_ktime_get_boot_ns:
1411		return &bpf_ktime_get_boot_ns_proto;
1412	case BPF_FUNC_tail_call:
1413		return &bpf_tail_call_proto;
1414	case BPF_FUNC_get_current_pid_tgid:
1415		return &bpf_get_current_pid_tgid_proto;
1416	case BPF_FUNC_get_current_task:
1417		return &bpf_get_current_task_proto;
1418	case BPF_FUNC_get_current_task_btf:
1419		return &bpf_get_current_task_btf_proto;
1420	case BPF_FUNC_task_pt_regs:
1421		return &bpf_task_pt_regs_proto;
1422	case BPF_FUNC_get_current_uid_gid:
1423		return &bpf_get_current_uid_gid_proto;
1424	case BPF_FUNC_get_current_comm:
1425		return &bpf_get_current_comm_proto;
1426	case BPF_FUNC_trace_printk:
1427		return bpf_get_trace_printk_proto();
1428	case BPF_FUNC_get_smp_processor_id:
1429		return &bpf_get_smp_processor_id_proto;
1430	case BPF_FUNC_get_numa_node_id:
1431		return &bpf_get_numa_node_id_proto;
1432	case BPF_FUNC_perf_event_read:
1433		return &bpf_perf_event_read_proto;
1434	case BPF_FUNC_current_task_under_cgroup:
1435		return &bpf_current_task_under_cgroup_proto;
1436	case BPF_FUNC_get_prandom_u32:
1437		return &bpf_get_prandom_u32_proto;
1438	case BPF_FUNC_probe_write_user:
1439		return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1440		       NULL : bpf_get_probe_write_proto();
1441	case BPF_FUNC_probe_read_user:
1442		return &bpf_probe_read_user_proto;
1443	case BPF_FUNC_probe_read_kernel:
1444		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1445		       NULL : &bpf_probe_read_kernel_proto;
1446	case BPF_FUNC_probe_read_user_str:
1447		return &bpf_probe_read_user_str_proto;
1448	case BPF_FUNC_probe_read_kernel_str:
1449		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1450		       NULL : &bpf_probe_read_kernel_str_proto;
1451#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1452	case BPF_FUNC_probe_read:
1453		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1454		       NULL : &bpf_probe_read_compat_proto;
1455	case BPF_FUNC_probe_read_str:
1456		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1457		       NULL : &bpf_probe_read_compat_str_proto;
1458#endif
1459#ifdef CONFIG_CGROUPS
1460	case BPF_FUNC_get_current_cgroup_id:
1461		return &bpf_get_current_cgroup_id_proto;
1462	case BPF_FUNC_get_current_ancestor_cgroup_id:
1463		return &bpf_get_current_ancestor_cgroup_id_proto;
1464	case BPF_FUNC_cgrp_storage_get:
1465		return &bpf_cgrp_storage_get_proto;
1466	case BPF_FUNC_cgrp_storage_delete:
1467		return &bpf_cgrp_storage_delete_proto;
 
 
1468#endif
1469	case BPF_FUNC_send_signal:
1470		return &bpf_send_signal_proto;
1471	case BPF_FUNC_send_signal_thread:
1472		return &bpf_send_signal_thread_proto;
1473	case BPF_FUNC_perf_event_read_value:
1474		return &bpf_perf_event_read_value_proto;
1475	case BPF_FUNC_get_ns_current_pid_tgid:
1476		return &bpf_get_ns_current_pid_tgid_proto;
1477	case BPF_FUNC_ringbuf_output:
1478		return &bpf_ringbuf_output_proto;
1479	case BPF_FUNC_ringbuf_reserve:
1480		return &bpf_ringbuf_reserve_proto;
1481	case BPF_FUNC_ringbuf_submit:
1482		return &bpf_ringbuf_submit_proto;
1483	case BPF_FUNC_ringbuf_discard:
1484		return &bpf_ringbuf_discard_proto;
1485	case BPF_FUNC_ringbuf_query:
1486		return &bpf_ringbuf_query_proto;
1487	case BPF_FUNC_jiffies64:
1488		return &bpf_jiffies64_proto;
1489	case BPF_FUNC_get_task_stack:
1490		return &bpf_get_task_stack_proto;
 
1491	case BPF_FUNC_copy_from_user:
1492		return &bpf_copy_from_user_proto;
1493	case BPF_FUNC_copy_from_user_task:
1494		return &bpf_copy_from_user_task_proto;
1495	case BPF_FUNC_snprintf_btf:
1496		return &bpf_snprintf_btf_proto;
1497	case BPF_FUNC_per_cpu_ptr:
1498		return &bpf_per_cpu_ptr_proto;
1499	case BPF_FUNC_this_cpu_ptr:
1500		return &bpf_this_cpu_ptr_proto;
1501	case BPF_FUNC_task_storage_get:
1502		if (bpf_prog_check_recur(prog))
1503			return &bpf_task_storage_get_recur_proto;
1504		return &bpf_task_storage_get_proto;
1505	case BPF_FUNC_task_storage_delete:
1506		if (bpf_prog_check_recur(prog))
1507			return &bpf_task_storage_delete_recur_proto;
1508		return &bpf_task_storage_delete_proto;
1509	case BPF_FUNC_for_each_map_elem:
1510		return &bpf_for_each_map_elem_proto;
1511	case BPF_FUNC_snprintf:
1512		return &bpf_snprintf_proto;
1513	case BPF_FUNC_get_func_ip:
1514		return &bpf_get_func_ip_proto_tracing;
1515	case BPF_FUNC_get_branch_snapshot:
1516		return &bpf_get_branch_snapshot_proto;
1517	case BPF_FUNC_find_vma:
1518		return &bpf_find_vma_proto;
1519	case BPF_FUNC_trace_vprintk:
1520		return bpf_get_trace_vprintk_proto();
1521	default:
1522		return bpf_base_func_proto(func_id);
1523	}
1524}
1525
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1526static const struct bpf_func_proto *
1527kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1528{
1529	switch (func_id) {
1530	case BPF_FUNC_perf_event_output:
1531		return &bpf_perf_event_output_proto;
1532	case BPF_FUNC_get_stackid:
1533		return &bpf_get_stackid_proto;
1534	case BPF_FUNC_get_stack:
1535		return &bpf_get_stack_proto;
1536#ifdef CONFIG_BPF_KPROBE_OVERRIDE
1537	case BPF_FUNC_override_return:
1538		return &bpf_override_return_proto;
1539#endif
1540	case BPF_FUNC_get_func_ip:
1541		return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ?
1542			&bpf_get_func_ip_proto_kprobe_multi :
1543			&bpf_get_func_ip_proto_kprobe;
 
 
1544	case BPF_FUNC_get_attach_cookie:
1545		return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ?
1546			&bpf_get_attach_cookie_proto_kmulti :
1547			&bpf_get_attach_cookie_proto_trace;
 
 
1548	default:
1549		return bpf_tracing_func_proto(func_id, prog);
1550	}
1551}
1552
1553/* bpf+kprobe programs can access fields of 'struct pt_regs' */
1554static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1555					const struct bpf_prog *prog,
1556					struct bpf_insn_access_aux *info)
1557{
1558	if (off < 0 || off >= sizeof(struct pt_regs))
1559		return false;
1560	if (type != BPF_READ)
1561		return false;
1562	if (off % size != 0)
1563		return false;
1564	/*
1565	 * Assertion for 32 bit to make sure last 8 byte access
1566	 * (BPF_DW) to the last 4 byte member is disallowed.
1567	 */
1568	if (off + size > sizeof(struct pt_regs))
1569		return false;
1570
1571	return true;
1572}
1573
1574const struct bpf_verifier_ops kprobe_verifier_ops = {
1575	.get_func_proto  = kprobe_prog_func_proto,
1576	.is_valid_access = kprobe_prog_is_valid_access,
1577};
1578
1579const struct bpf_prog_ops kprobe_prog_ops = {
1580};
1581
1582BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1583	   u64, flags, void *, data, u64, size)
1584{
1585	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1586
1587	/*
1588	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1589	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1590	 * from there and call the same bpf_perf_event_output() helper inline.
1591	 */
1592	return ____bpf_perf_event_output(regs, map, flags, data, size);
1593}
1594
1595static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1596	.func		= bpf_perf_event_output_tp,
1597	.gpl_only	= true,
1598	.ret_type	= RET_INTEGER,
1599	.arg1_type	= ARG_PTR_TO_CTX,
1600	.arg2_type	= ARG_CONST_MAP_PTR,
1601	.arg3_type	= ARG_ANYTHING,
1602	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1603	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1604};
1605
1606BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1607	   u64, flags)
1608{
1609	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1610
1611	/*
1612	 * Same comment as in bpf_perf_event_output_tp(), only that this time
1613	 * the other helper's function body cannot be inlined due to being
1614	 * external, thus we need to call raw helper function.
1615	 */
1616	return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1617			       flags, 0, 0);
1618}
1619
1620static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1621	.func		= bpf_get_stackid_tp,
1622	.gpl_only	= true,
1623	.ret_type	= RET_INTEGER,
1624	.arg1_type	= ARG_PTR_TO_CTX,
1625	.arg2_type	= ARG_CONST_MAP_PTR,
1626	.arg3_type	= ARG_ANYTHING,
1627};
1628
1629BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1630	   u64, flags)
1631{
1632	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1633
1634	return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1635			     (unsigned long) size, flags, 0);
1636}
1637
1638static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1639	.func		= bpf_get_stack_tp,
1640	.gpl_only	= true,
1641	.ret_type	= RET_INTEGER,
1642	.arg1_type	= ARG_PTR_TO_CTX,
1643	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
1644	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1645	.arg4_type	= ARG_ANYTHING,
1646};
1647
1648static const struct bpf_func_proto *
1649tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1650{
1651	switch (func_id) {
1652	case BPF_FUNC_perf_event_output:
1653		return &bpf_perf_event_output_proto_tp;
1654	case BPF_FUNC_get_stackid:
1655		return &bpf_get_stackid_proto_tp;
1656	case BPF_FUNC_get_stack:
1657		return &bpf_get_stack_proto_tp;
1658	case BPF_FUNC_get_attach_cookie:
1659		return &bpf_get_attach_cookie_proto_trace;
1660	default:
1661		return bpf_tracing_func_proto(func_id, prog);
1662	}
1663}
1664
1665static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1666				    const struct bpf_prog *prog,
1667				    struct bpf_insn_access_aux *info)
1668{
1669	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1670		return false;
1671	if (type != BPF_READ)
1672		return false;
1673	if (off % size != 0)
1674		return false;
1675
1676	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1677	return true;
1678}
1679
1680const struct bpf_verifier_ops tracepoint_verifier_ops = {
1681	.get_func_proto  = tp_prog_func_proto,
1682	.is_valid_access = tp_prog_is_valid_access,
1683};
1684
1685const struct bpf_prog_ops tracepoint_prog_ops = {
1686};
1687
1688BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1689	   struct bpf_perf_event_value *, buf, u32, size)
1690{
1691	int err = -EINVAL;
1692
1693	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1694		goto clear;
1695	err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1696				    &buf->running);
1697	if (unlikely(err))
1698		goto clear;
1699	return 0;
1700clear:
1701	memset(buf, 0, size);
1702	return err;
1703}
1704
1705static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1706         .func           = bpf_perf_prog_read_value,
1707         .gpl_only       = true,
1708         .ret_type       = RET_INTEGER,
1709         .arg1_type      = ARG_PTR_TO_CTX,
1710         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1711         .arg3_type      = ARG_CONST_SIZE,
1712};
1713
1714BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1715	   void *, buf, u32, size, u64, flags)
1716{
1717	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1718	struct perf_branch_stack *br_stack = ctx->data->br_stack;
1719	u32 to_copy;
1720
1721	if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1722		return -EINVAL;
1723
1724	if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK)))
1725		return -ENOENT;
1726
1727	if (unlikely(!br_stack))
1728		return -ENOENT;
1729
1730	if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1731		return br_stack->nr * br_entry_size;
1732
1733	if (!buf || (size % br_entry_size != 0))
1734		return -EINVAL;
1735
1736	to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1737	memcpy(buf, br_stack->entries, to_copy);
1738
1739	return to_copy;
1740}
1741
1742static const struct bpf_func_proto bpf_read_branch_records_proto = {
1743	.func           = bpf_read_branch_records,
1744	.gpl_only       = true,
1745	.ret_type       = RET_INTEGER,
1746	.arg1_type      = ARG_PTR_TO_CTX,
1747	.arg2_type      = ARG_PTR_TO_MEM_OR_NULL,
1748	.arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1749	.arg4_type      = ARG_ANYTHING,
1750};
1751
1752static const struct bpf_func_proto *
1753pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1754{
1755	switch (func_id) {
1756	case BPF_FUNC_perf_event_output:
1757		return &bpf_perf_event_output_proto_tp;
1758	case BPF_FUNC_get_stackid:
1759		return &bpf_get_stackid_proto_pe;
1760	case BPF_FUNC_get_stack:
1761		return &bpf_get_stack_proto_pe;
1762	case BPF_FUNC_perf_prog_read_value:
1763		return &bpf_perf_prog_read_value_proto;
1764	case BPF_FUNC_read_branch_records:
1765		return &bpf_read_branch_records_proto;
1766	case BPF_FUNC_get_attach_cookie:
1767		return &bpf_get_attach_cookie_proto_pe;
1768	default:
1769		return bpf_tracing_func_proto(func_id, prog);
1770	}
1771}
1772
1773/*
1774 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1775 * to avoid potential recursive reuse issue when/if tracepoints are added
1776 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1777 *
1778 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1779 * in normal, irq, and nmi context.
1780 */
1781struct bpf_raw_tp_regs {
1782	struct pt_regs regs[3];
1783};
1784static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1785static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1786static struct pt_regs *get_bpf_raw_tp_regs(void)
1787{
1788	struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1789	int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1790
1791	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1792		this_cpu_dec(bpf_raw_tp_nest_level);
1793		return ERR_PTR(-EBUSY);
1794	}
1795
1796	return &tp_regs->regs[nest_level - 1];
1797}
1798
1799static void put_bpf_raw_tp_regs(void)
1800{
1801	this_cpu_dec(bpf_raw_tp_nest_level);
1802}
1803
1804BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1805	   struct bpf_map *, map, u64, flags, void *, data, u64, size)
1806{
1807	struct pt_regs *regs = get_bpf_raw_tp_regs();
1808	int ret;
1809
1810	if (IS_ERR(regs))
1811		return PTR_ERR(regs);
1812
1813	perf_fetch_caller_regs(regs);
1814	ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1815
1816	put_bpf_raw_tp_regs();
1817	return ret;
1818}
1819
1820static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1821	.func		= bpf_perf_event_output_raw_tp,
1822	.gpl_only	= true,
1823	.ret_type	= RET_INTEGER,
1824	.arg1_type	= ARG_PTR_TO_CTX,
1825	.arg2_type	= ARG_CONST_MAP_PTR,
1826	.arg3_type	= ARG_ANYTHING,
1827	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1828	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1829};
1830
1831extern const struct bpf_func_proto bpf_skb_output_proto;
1832extern const struct bpf_func_proto bpf_xdp_output_proto;
1833extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
1834
1835BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1836	   struct bpf_map *, map, u64, flags)
1837{
1838	struct pt_regs *regs = get_bpf_raw_tp_regs();
1839	int ret;
1840
1841	if (IS_ERR(regs))
1842		return PTR_ERR(regs);
1843
1844	perf_fetch_caller_regs(regs);
1845	/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1846	ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1847			      flags, 0, 0);
1848	put_bpf_raw_tp_regs();
1849	return ret;
1850}
1851
1852static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1853	.func		= bpf_get_stackid_raw_tp,
1854	.gpl_only	= true,
1855	.ret_type	= RET_INTEGER,
1856	.arg1_type	= ARG_PTR_TO_CTX,
1857	.arg2_type	= ARG_CONST_MAP_PTR,
1858	.arg3_type	= ARG_ANYTHING,
1859};
1860
1861BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1862	   void *, buf, u32, size, u64, flags)
1863{
1864	struct pt_regs *regs = get_bpf_raw_tp_regs();
1865	int ret;
1866
1867	if (IS_ERR(regs))
1868		return PTR_ERR(regs);
1869
1870	perf_fetch_caller_regs(regs);
1871	ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1872			    (unsigned long) size, flags, 0);
1873	put_bpf_raw_tp_regs();
1874	return ret;
1875}
1876
1877static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1878	.func		= bpf_get_stack_raw_tp,
1879	.gpl_only	= true,
1880	.ret_type	= RET_INTEGER,
1881	.arg1_type	= ARG_PTR_TO_CTX,
1882	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1883	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1884	.arg4_type	= ARG_ANYTHING,
1885};
1886
1887static const struct bpf_func_proto *
1888raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1889{
1890	switch (func_id) {
1891	case BPF_FUNC_perf_event_output:
1892		return &bpf_perf_event_output_proto_raw_tp;
1893	case BPF_FUNC_get_stackid:
1894		return &bpf_get_stackid_proto_raw_tp;
1895	case BPF_FUNC_get_stack:
1896		return &bpf_get_stack_proto_raw_tp;
 
 
1897	default:
1898		return bpf_tracing_func_proto(func_id, prog);
1899	}
1900}
1901
1902const struct bpf_func_proto *
1903tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1904{
1905	const struct bpf_func_proto *fn;
1906
1907	switch (func_id) {
1908#ifdef CONFIG_NET
1909	case BPF_FUNC_skb_output:
1910		return &bpf_skb_output_proto;
1911	case BPF_FUNC_xdp_output:
1912		return &bpf_xdp_output_proto;
1913	case BPF_FUNC_skc_to_tcp6_sock:
1914		return &bpf_skc_to_tcp6_sock_proto;
1915	case BPF_FUNC_skc_to_tcp_sock:
1916		return &bpf_skc_to_tcp_sock_proto;
1917	case BPF_FUNC_skc_to_tcp_timewait_sock:
1918		return &bpf_skc_to_tcp_timewait_sock_proto;
1919	case BPF_FUNC_skc_to_tcp_request_sock:
1920		return &bpf_skc_to_tcp_request_sock_proto;
1921	case BPF_FUNC_skc_to_udp6_sock:
1922		return &bpf_skc_to_udp6_sock_proto;
1923	case BPF_FUNC_skc_to_unix_sock:
1924		return &bpf_skc_to_unix_sock_proto;
1925	case BPF_FUNC_skc_to_mptcp_sock:
1926		return &bpf_skc_to_mptcp_sock_proto;
1927	case BPF_FUNC_sk_storage_get:
1928		return &bpf_sk_storage_get_tracing_proto;
1929	case BPF_FUNC_sk_storage_delete:
1930		return &bpf_sk_storage_delete_tracing_proto;
1931	case BPF_FUNC_sock_from_file:
1932		return &bpf_sock_from_file_proto;
1933	case BPF_FUNC_get_socket_cookie:
1934		return &bpf_get_socket_ptr_cookie_proto;
1935	case BPF_FUNC_xdp_get_buff_len:
1936		return &bpf_xdp_get_buff_len_trace_proto;
1937#endif
1938	case BPF_FUNC_seq_printf:
1939		return prog->expected_attach_type == BPF_TRACE_ITER ?
1940		       &bpf_seq_printf_proto :
1941		       NULL;
1942	case BPF_FUNC_seq_write:
1943		return prog->expected_attach_type == BPF_TRACE_ITER ?
1944		       &bpf_seq_write_proto :
1945		       NULL;
1946	case BPF_FUNC_seq_printf_btf:
1947		return prog->expected_attach_type == BPF_TRACE_ITER ?
1948		       &bpf_seq_printf_btf_proto :
1949		       NULL;
1950	case BPF_FUNC_d_path:
1951		return &bpf_d_path_proto;
1952	case BPF_FUNC_get_func_arg:
1953		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
1954	case BPF_FUNC_get_func_ret:
1955		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
1956	case BPF_FUNC_get_func_arg_cnt:
1957		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
1958	case BPF_FUNC_get_attach_cookie:
 
 
 
1959		return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
1960	default:
1961		fn = raw_tp_prog_func_proto(func_id, prog);
1962		if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
1963			fn = bpf_iter_get_func_proto(func_id, prog);
1964		return fn;
1965	}
1966}
1967
1968static bool raw_tp_prog_is_valid_access(int off, int size,
1969					enum bpf_access_type type,
1970					const struct bpf_prog *prog,
1971					struct bpf_insn_access_aux *info)
1972{
1973	return bpf_tracing_ctx_access(off, size, type);
1974}
1975
1976static bool tracing_prog_is_valid_access(int off, int size,
1977					 enum bpf_access_type type,
1978					 const struct bpf_prog *prog,
1979					 struct bpf_insn_access_aux *info)
1980{
1981	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
1982}
1983
1984int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1985				     const union bpf_attr *kattr,
1986				     union bpf_attr __user *uattr)
1987{
1988	return -ENOTSUPP;
1989}
1990
1991const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1992	.get_func_proto  = raw_tp_prog_func_proto,
1993	.is_valid_access = raw_tp_prog_is_valid_access,
1994};
1995
1996const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1997#ifdef CONFIG_NET
1998	.test_run = bpf_prog_test_run_raw_tp,
1999#endif
2000};
2001
2002const struct bpf_verifier_ops tracing_verifier_ops = {
2003	.get_func_proto  = tracing_prog_func_proto,
2004	.is_valid_access = tracing_prog_is_valid_access,
2005};
2006
2007const struct bpf_prog_ops tracing_prog_ops = {
2008	.test_run = bpf_prog_test_run_tracing,
2009};
2010
2011static bool raw_tp_writable_prog_is_valid_access(int off, int size,
2012						 enum bpf_access_type type,
2013						 const struct bpf_prog *prog,
2014						 struct bpf_insn_access_aux *info)
2015{
2016	if (off == 0) {
2017		if (size != sizeof(u64) || type != BPF_READ)
2018			return false;
2019		info->reg_type = PTR_TO_TP_BUFFER;
2020	}
2021	return raw_tp_prog_is_valid_access(off, size, type, prog, info);
2022}
2023
2024const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
2025	.get_func_proto  = raw_tp_prog_func_proto,
2026	.is_valid_access = raw_tp_writable_prog_is_valid_access,
2027};
2028
2029const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
2030};
2031
2032static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
2033				    const struct bpf_prog *prog,
2034				    struct bpf_insn_access_aux *info)
2035{
2036	const int size_u64 = sizeof(u64);
2037
2038	if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
2039		return false;
2040	if (type != BPF_READ)
2041		return false;
2042	if (off % size != 0) {
2043		if (sizeof(unsigned long) != 4)
2044			return false;
2045		if (size != 8)
2046			return false;
2047		if (off % size != 4)
2048			return false;
2049	}
2050
2051	switch (off) {
2052	case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
2053		bpf_ctx_record_field_size(info, size_u64);
2054		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2055			return false;
2056		break;
2057	case bpf_ctx_range(struct bpf_perf_event_data, addr):
2058		bpf_ctx_record_field_size(info, size_u64);
2059		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2060			return false;
2061		break;
2062	default:
2063		if (size != sizeof(long))
2064			return false;
2065	}
2066
2067	return true;
2068}
2069
2070static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
2071				      const struct bpf_insn *si,
2072				      struct bpf_insn *insn_buf,
2073				      struct bpf_prog *prog, u32 *target_size)
2074{
2075	struct bpf_insn *insn = insn_buf;
2076
2077	switch (si->off) {
2078	case offsetof(struct bpf_perf_event_data, sample_period):
2079		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2080						       data), si->dst_reg, si->src_reg,
2081				      offsetof(struct bpf_perf_event_data_kern, data));
2082		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2083				      bpf_target_off(struct perf_sample_data, period, 8,
2084						     target_size));
2085		break;
2086	case offsetof(struct bpf_perf_event_data, addr):
2087		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2088						       data), si->dst_reg, si->src_reg,
2089				      offsetof(struct bpf_perf_event_data_kern, data));
2090		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2091				      bpf_target_off(struct perf_sample_data, addr, 8,
2092						     target_size));
2093		break;
2094	default:
2095		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2096						       regs), si->dst_reg, si->src_reg,
2097				      offsetof(struct bpf_perf_event_data_kern, regs));
2098		*insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
2099				      si->off);
2100		break;
2101	}
2102
2103	return insn - insn_buf;
2104}
2105
2106const struct bpf_verifier_ops perf_event_verifier_ops = {
2107	.get_func_proto		= pe_prog_func_proto,
2108	.is_valid_access	= pe_prog_is_valid_access,
2109	.convert_ctx_access	= pe_prog_convert_ctx_access,
2110};
2111
2112const struct bpf_prog_ops perf_event_prog_ops = {
2113};
2114
2115static DEFINE_MUTEX(bpf_event_mutex);
2116
2117#define BPF_TRACE_MAX_PROGS 64
2118
2119int perf_event_attach_bpf_prog(struct perf_event *event,
2120			       struct bpf_prog *prog,
2121			       u64 bpf_cookie)
2122{
2123	struct bpf_prog_array *old_array;
2124	struct bpf_prog_array *new_array;
2125	int ret = -EEXIST;
2126
2127	/*
2128	 * Kprobe override only works if they are on the function entry,
2129	 * and only if they are on the opt-in list.
2130	 */
2131	if (prog->kprobe_override &&
2132	    (!trace_kprobe_on_func_entry(event->tp_event) ||
2133	     !trace_kprobe_error_injectable(event->tp_event)))
2134		return -EINVAL;
2135
2136	mutex_lock(&bpf_event_mutex);
2137
2138	if (event->prog)
2139		goto unlock;
2140
2141	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2142	if (old_array &&
2143	    bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
2144		ret = -E2BIG;
2145		goto unlock;
2146	}
2147
2148	ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
2149	if (ret < 0)
2150		goto unlock;
2151
2152	/* set the new array to event->tp_event and set event->prog */
2153	event->prog = prog;
2154	event->bpf_cookie = bpf_cookie;
2155	rcu_assign_pointer(event->tp_event->prog_array, new_array);
2156	bpf_prog_array_free_sleepable(old_array);
2157
2158unlock:
2159	mutex_unlock(&bpf_event_mutex);
2160	return ret;
2161}
2162
2163void perf_event_detach_bpf_prog(struct perf_event *event)
2164{
2165	struct bpf_prog_array *old_array;
2166	struct bpf_prog_array *new_array;
2167	int ret;
2168
2169	mutex_lock(&bpf_event_mutex);
2170
2171	if (!event->prog)
2172		goto unlock;
2173
2174	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
 
 
 
2175	ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
2176	if (ret == -ENOENT)
2177		goto unlock;
2178	if (ret < 0) {
2179		bpf_prog_array_delete_safe(old_array, event->prog);
2180	} else {
2181		rcu_assign_pointer(event->tp_event->prog_array, new_array);
2182		bpf_prog_array_free_sleepable(old_array);
2183	}
2184
 
 
 
 
 
 
 
 
2185	bpf_prog_put(event->prog);
2186	event->prog = NULL;
2187
2188unlock:
2189	mutex_unlock(&bpf_event_mutex);
2190}
2191
2192int perf_event_query_prog_array(struct perf_event *event, void __user *info)
2193{
2194	struct perf_event_query_bpf __user *uquery = info;
2195	struct perf_event_query_bpf query = {};
2196	struct bpf_prog_array *progs;
2197	u32 *ids, prog_cnt, ids_len;
2198	int ret;
2199
2200	if (!perfmon_capable())
2201		return -EPERM;
2202	if (event->attr.type != PERF_TYPE_TRACEPOINT)
2203		return -EINVAL;
2204	if (copy_from_user(&query, uquery, sizeof(query)))
2205		return -EFAULT;
2206
2207	ids_len = query.ids_len;
2208	if (ids_len > BPF_TRACE_MAX_PROGS)
2209		return -E2BIG;
2210	ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2211	if (!ids)
2212		return -ENOMEM;
2213	/*
2214	 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2215	 * is required when user only wants to check for uquery->prog_cnt.
2216	 * There is no need to check for it since the case is handled
2217	 * gracefully in bpf_prog_array_copy_info.
2218	 */
2219
2220	mutex_lock(&bpf_event_mutex);
2221	progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2222	ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
2223	mutex_unlock(&bpf_event_mutex);
2224
2225	if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2226	    copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2227		ret = -EFAULT;
2228
2229	kfree(ids);
2230	return ret;
2231}
2232
2233extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2234extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2235
2236struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
2237{
2238	struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2239
2240	for (; btp < __stop__bpf_raw_tp; btp++) {
2241		if (!strcmp(btp->tp->name, name))
2242			return btp;
2243	}
2244
2245	return bpf_get_raw_tracepoint_module(name);
2246}
2247
2248void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2249{
2250	struct module *mod;
2251
2252	preempt_disable();
2253	mod = __module_address((unsigned long)btp);
2254	module_put(mod);
2255	preempt_enable();
2256}
2257
2258static __always_inline
2259void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
2260{
 
 
 
 
2261	cant_sleep();
2262	if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
2263		bpf_prog_inc_misses_counter(prog);
2264		goto out;
2265	}
 
 
 
 
2266	rcu_read_lock();
2267	(void) bpf_prog_run(prog, args);
2268	rcu_read_unlock();
 
 
2269out:
2270	this_cpu_dec(*(prog->active));
2271}
2272
2273#define UNPACK(...)			__VA_ARGS__
2274#define REPEAT_1(FN, DL, X, ...)	FN(X)
2275#define REPEAT_2(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2276#define REPEAT_3(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2277#define REPEAT_4(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2278#define REPEAT_5(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2279#define REPEAT_6(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2280#define REPEAT_7(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2281#define REPEAT_8(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2282#define REPEAT_9(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2283#define REPEAT_10(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2284#define REPEAT_11(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2285#define REPEAT_12(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2286#define REPEAT(X, FN, DL, ...)		REPEAT_##X(FN, DL, __VA_ARGS__)
2287
2288#define SARG(X)		u64 arg##X
2289#define COPY(X)		args[X] = arg##X
2290
2291#define __DL_COM	(,)
2292#define __DL_SEM	(;)
2293
2294#define __SEQ_0_11	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2295
2296#define BPF_TRACE_DEFN_x(x)						\
2297	void bpf_trace_run##x(struct bpf_prog *prog,			\
2298			      REPEAT(x, SARG, __DL_COM, __SEQ_0_11))	\
2299	{								\
2300		u64 args[x];						\
2301		REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);			\
2302		__bpf_trace_run(prog, args);				\
2303	}								\
2304	EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2305BPF_TRACE_DEFN_x(1);
2306BPF_TRACE_DEFN_x(2);
2307BPF_TRACE_DEFN_x(3);
2308BPF_TRACE_DEFN_x(4);
2309BPF_TRACE_DEFN_x(5);
2310BPF_TRACE_DEFN_x(6);
2311BPF_TRACE_DEFN_x(7);
2312BPF_TRACE_DEFN_x(8);
2313BPF_TRACE_DEFN_x(9);
2314BPF_TRACE_DEFN_x(10);
2315BPF_TRACE_DEFN_x(11);
2316BPF_TRACE_DEFN_x(12);
2317
2318static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2319{
2320	struct tracepoint *tp = btp->tp;
 
2321
2322	/*
2323	 * check that program doesn't access arguments beyond what's
2324	 * available in this tracepoint
2325	 */
2326	if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2327		return -EINVAL;
2328
2329	if (prog->aux->max_tp_access > btp->writable_size)
2330		return -EINVAL;
2331
2332	return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
2333						   prog);
2334}
2335
2336int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2337{
2338	return __bpf_probe_register(btp, prog);
2339}
2340
2341int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2342{
2343	return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
2344}
2345
2346int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2347			    u32 *fd_type, const char **buf,
2348			    u64 *probe_offset, u64 *probe_addr)
 
2349{
2350	bool is_tracepoint, is_syscall_tp;
2351	struct bpf_prog *prog;
2352	int flags, err = 0;
2353
2354	prog = event->prog;
2355	if (!prog)
2356		return -ENOENT;
2357
2358	/* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2359	if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2360		return -EOPNOTSUPP;
2361
2362	*prog_id = prog->aux->id;
2363	flags = event->tp_event->flags;
2364	is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2365	is_syscall_tp = is_syscall_trace_event(event->tp_event);
2366
2367	if (is_tracepoint || is_syscall_tp) {
2368		*buf = is_tracepoint ? event->tp_event->tp->name
2369				     : event->tp_event->name;
2370		*fd_type = BPF_FD_TYPE_TRACEPOINT;
2371		*probe_offset = 0x0;
2372		*probe_addr = 0x0;
 
 
 
 
2373	} else {
2374		/* kprobe/uprobe */
2375		err = -EOPNOTSUPP;
2376#ifdef CONFIG_KPROBE_EVENTS
2377		if (flags & TRACE_EVENT_FL_KPROBE)
2378			err = bpf_get_kprobe_info(event, fd_type, buf,
2379						  probe_offset, probe_addr,
2380						  event->attr.type == PERF_TYPE_TRACEPOINT);
2381#endif
2382#ifdef CONFIG_UPROBE_EVENTS
2383		if (flags & TRACE_EVENT_FL_UPROBE)
2384			err = bpf_get_uprobe_info(event, fd_type, buf,
2385						  probe_offset,
2386						  event->attr.type == PERF_TYPE_TRACEPOINT);
2387#endif
2388	}
2389
2390	return err;
2391}
2392
2393static int __init send_signal_irq_work_init(void)
2394{
2395	int cpu;
2396	struct send_signal_irq_work *work;
2397
2398	for_each_possible_cpu(cpu) {
2399		work = per_cpu_ptr(&send_signal_work, cpu);
2400		init_irq_work(&work->irq_work, do_bpf_send_signal);
2401	}
2402	return 0;
2403}
2404
2405subsys_initcall(send_signal_irq_work_init);
2406
2407#ifdef CONFIG_MODULES
2408static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2409			    void *module)
2410{
2411	struct bpf_trace_module *btm, *tmp;
2412	struct module *mod = module;
2413	int ret = 0;
2414
2415	if (mod->num_bpf_raw_events == 0 ||
2416	    (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2417		goto out;
2418
2419	mutex_lock(&bpf_module_mutex);
2420
2421	switch (op) {
2422	case MODULE_STATE_COMING:
2423		btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2424		if (btm) {
2425			btm->module = module;
2426			list_add(&btm->list, &bpf_trace_modules);
2427		} else {
2428			ret = -ENOMEM;
2429		}
2430		break;
2431	case MODULE_STATE_GOING:
2432		list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2433			if (btm->module == module) {
2434				list_del(&btm->list);
2435				kfree(btm);
2436				break;
2437			}
2438		}
2439		break;
2440	}
2441
2442	mutex_unlock(&bpf_module_mutex);
2443
2444out:
2445	return notifier_from_errno(ret);
2446}
2447
2448static struct notifier_block bpf_module_nb = {
2449	.notifier_call = bpf_event_notify,
2450};
2451
2452static int __init bpf_event_init(void)
2453{
2454	register_module_notifier(&bpf_module_nb);
2455	return 0;
2456}
2457
2458fs_initcall(bpf_event_init);
2459#endif /* CONFIG_MODULES */
2460
 
 
 
 
 
 
2461#ifdef CONFIG_FPROBE
2462struct bpf_kprobe_multi_link {
2463	struct bpf_link link;
2464	struct fprobe fp;
2465	unsigned long *addrs;
2466	u64 *cookies;
2467	u32 cnt;
2468	u32 mods_cnt;
2469	struct module **mods;
 
2470};
2471
2472struct bpf_kprobe_multi_run_ctx {
2473	struct bpf_run_ctx run_ctx;
2474	struct bpf_kprobe_multi_link *link;
2475	unsigned long entry_ip;
2476};
2477
2478struct user_syms {
2479	const char **syms;
2480	char *buf;
2481};
2482
2483static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
2484{
2485	unsigned long __user usymbol;
2486	const char **syms = NULL;
2487	char *buf = NULL, *p;
2488	int err = -ENOMEM;
2489	unsigned int i;
2490
2491	syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
2492	if (!syms)
2493		goto error;
2494
2495	buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
2496	if (!buf)
2497		goto error;
2498
2499	for (p = buf, i = 0; i < cnt; i++) {
2500		if (__get_user(usymbol, usyms + i)) {
2501			err = -EFAULT;
2502			goto error;
2503		}
2504		err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN);
2505		if (err == KSYM_NAME_LEN)
2506			err = -E2BIG;
2507		if (err < 0)
2508			goto error;
2509		syms[i] = p;
2510		p += err + 1;
2511	}
2512
2513	us->syms = syms;
2514	us->buf = buf;
2515	return 0;
2516
2517error:
2518	if (err) {
2519		kvfree(syms);
2520		kvfree(buf);
2521	}
2522	return err;
2523}
2524
2525static void kprobe_multi_put_modules(struct module **mods, u32 cnt)
2526{
2527	u32 i;
2528
2529	for (i = 0; i < cnt; i++)
2530		module_put(mods[i]);
2531}
2532
2533static void free_user_syms(struct user_syms *us)
2534{
2535	kvfree(us->syms);
2536	kvfree(us->buf);
2537}
2538
2539static void bpf_kprobe_multi_link_release(struct bpf_link *link)
2540{
2541	struct bpf_kprobe_multi_link *kmulti_link;
2542
2543	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2544	unregister_fprobe(&kmulti_link->fp);
2545	kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt);
2546}
2547
2548static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
2549{
2550	struct bpf_kprobe_multi_link *kmulti_link;
2551
2552	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2553	kvfree(kmulti_link->addrs);
2554	kvfree(kmulti_link->cookies);
2555	kfree(kmulti_link->mods);
2556	kfree(kmulti_link);
2557}
2558
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2559static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
2560	.release = bpf_kprobe_multi_link_release,
2561	.dealloc = bpf_kprobe_multi_link_dealloc,
 
2562};
2563
2564static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
2565{
2566	const struct bpf_kprobe_multi_link *link = priv;
2567	unsigned long *addr_a = a, *addr_b = b;
2568	u64 *cookie_a, *cookie_b;
2569
2570	cookie_a = link->cookies + (addr_a - link->addrs);
2571	cookie_b = link->cookies + (addr_b - link->addrs);
2572
2573	/* swap addr_a/addr_b and cookie_a/cookie_b values */
2574	swap(*addr_a, *addr_b);
2575	swap(*cookie_a, *cookie_b);
2576}
2577
2578static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b)
2579{
2580	const unsigned long *addr_a = a, *addr_b = b;
2581
2582	if (*addr_a == *addr_b)
2583		return 0;
2584	return *addr_a < *addr_b ? -1 : 1;
2585}
2586
2587static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
2588{
2589	return bpf_kprobe_multi_addrs_cmp(a, b);
2590}
2591
2592static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2593{
2594	struct bpf_kprobe_multi_run_ctx *run_ctx;
2595	struct bpf_kprobe_multi_link *link;
2596	u64 *cookie, entry_ip;
2597	unsigned long *addr;
2598
2599	if (WARN_ON_ONCE(!ctx))
2600		return 0;
2601	run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
 
2602	link = run_ctx->link;
2603	if (!link->cookies)
2604		return 0;
2605	entry_ip = run_ctx->entry_ip;
2606	addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
2607		       bpf_kprobe_multi_addrs_cmp);
2608	if (!addr)
2609		return 0;
2610	cookie = link->cookies + (addr - link->addrs);
2611	return *cookie;
2612}
2613
2614static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2615{
2616	struct bpf_kprobe_multi_run_ctx *run_ctx;
2617
2618	run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
 
2619	return run_ctx->entry_ip;
2620}
2621
2622static int
2623kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
2624			   unsigned long entry_ip, struct pt_regs *regs)
 
2625{
2626	struct bpf_kprobe_multi_run_ctx run_ctx = {
 
 
 
 
2627		.link = link,
2628		.entry_ip = entry_ip,
2629	};
2630	struct bpf_run_ctx *old_run_ctx;
2631	int err;
2632
2633	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
 
2634		err = 0;
2635		goto out;
2636	}
2637
2638	migrate_disable();
2639	rcu_read_lock();
2640	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2641	err = bpf_prog_run(link->link.prog, regs);
2642	bpf_reset_run_ctx(old_run_ctx);
2643	rcu_read_unlock();
2644	migrate_enable();
2645
2646 out:
2647	__this_cpu_dec(bpf_prog_active);
2648	return err;
2649}
2650
2651static void
2652kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
2653			  struct pt_regs *regs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2654{
2655	struct bpf_kprobe_multi_link *link;
2656
2657	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2658	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
2659}
2660
2661static int symbols_cmp_r(const void *a, const void *b, const void *priv)
2662{
2663	const char **str_a = (const char **) a;
2664	const char **str_b = (const char **) b;
2665
2666	return strcmp(*str_a, *str_b);
2667}
2668
2669struct multi_symbols_sort {
2670	const char **funcs;
2671	u64 *cookies;
2672};
2673
2674static void symbols_swap_r(void *a, void *b, int size, const void *priv)
2675{
2676	const struct multi_symbols_sort *data = priv;
2677	const char **name_a = a, **name_b = b;
2678
2679	swap(*name_a, *name_b);
2680
2681	/* If defined, swap also related cookies. */
2682	if (data->cookies) {
2683		u64 *cookie_a, *cookie_b;
2684
2685		cookie_a = data->cookies + (name_a - data->funcs);
2686		cookie_b = data->cookies + (name_b - data->funcs);
2687		swap(*cookie_a, *cookie_b);
2688	}
2689}
2690
2691struct module_addr_args {
2692	unsigned long *addrs;
2693	u32 addrs_cnt;
2694	struct module **mods;
2695	int mods_cnt;
2696	int mods_cap;
2697};
2698
2699static int module_callback(void *data, const char *name,
2700			   struct module *mod, unsigned long addr)
2701{
2702	struct module_addr_args *args = data;
2703	struct module **mods;
2704
2705	/* We iterate all modules symbols and for each we:
2706	 * - search for it in provided addresses array
2707	 * - if found we check if we already have the module pointer stored
2708	 *   (we iterate modules sequentially, so we can check just the last
2709	 *   module pointer)
2710	 * - take module reference and store it
2711	 */
2712	if (!bsearch(&addr, args->addrs, args->addrs_cnt, sizeof(addr),
2713		       bpf_kprobe_multi_addrs_cmp))
2714		return 0;
2715
2716	if (args->mods && args->mods[args->mods_cnt - 1] == mod)
2717		return 0;
2718
2719	if (args->mods_cnt == args->mods_cap) {
2720		args->mods_cap = max(16, args->mods_cap * 3 / 2);
2721		mods = krealloc_array(args->mods, args->mods_cap, sizeof(*mods), GFP_KERNEL);
2722		if (!mods)
2723			return -ENOMEM;
2724		args->mods = mods;
2725	}
2726
2727	if (!try_module_get(mod))
2728		return -EINVAL;
2729
2730	args->mods[args->mods_cnt] = mod;
2731	args->mods_cnt++;
2732	return 0;
2733}
2734
 
 
 
 
 
 
 
 
 
 
 
2735static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
2736{
2737	struct module_addr_args args = {
2738		.addrs     = addrs,
2739		.addrs_cnt = addrs_cnt,
2740	};
2741	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2742
2743	/* We return either err < 0 in case of error, ... */
2744	err = module_kallsyms_on_each_symbol(module_callback, &args);
2745	if (err) {
2746		kprobe_multi_put_modules(args.mods, args.mods_cnt);
2747		kfree(args.mods);
2748		return err;
2749	}
2750
2751	/* or number of modules found if everything is ok. */
2752	*mods = args.mods;
2753	return args.mods_cnt;
 
 
 
 
 
 
 
 
 
 
 
2754}
2755
2756int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2757{
2758	struct bpf_kprobe_multi_link *link = NULL;
2759	struct bpf_link_primer link_primer;
2760	void __user *ucookies;
2761	unsigned long *addrs;
2762	u32 flags, cnt, size;
2763	void __user *uaddrs;
2764	u64 *cookies = NULL;
2765	void __user *usyms;
2766	int err;
2767
2768	/* no support for 32bit archs yet */
2769	if (sizeof(u64) != sizeof(void *))
2770		return -EOPNOTSUPP;
2771
2772	if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI)
2773		return -EINVAL;
2774
2775	flags = attr->link_create.kprobe_multi.flags;
2776	if (flags & ~BPF_F_KPROBE_MULTI_RETURN)
2777		return -EINVAL;
2778
2779	uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
2780	usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
2781	if (!!uaddrs == !!usyms)
2782		return -EINVAL;
2783
2784	cnt = attr->link_create.kprobe_multi.cnt;
2785	if (!cnt)
2786		return -EINVAL;
 
 
2787
2788	size = cnt * sizeof(*addrs);
2789	addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2790	if (!addrs)
2791		return -ENOMEM;
2792
2793	ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
2794	if (ucookies) {
2795		cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2796		if (!cookies) {
2797			err = -ENOMEM;
2798			goto error;
2799		}
2800		if (copy_from_user(cookies, ucookies, size)) {
2801			err = -EFAULT;
2802			goto error;
2803		}
2804	}
2805
2806	if (uaddrs) {
2807		if (copy_from_user(addrs, uaddrs, size)) {
2808			err = -EFAULT;
2809			goto error;
2810		}
2811	} else {
2812		struct multi_symbols_sort data = {
2813			.cookies = cookies,
2814		};
2815		struct user_syms us;
2816
2817		err = copy_user_syms(&us, usyms, cnt);
2818		if (err)
2819			goto error;
2820
2821		if (cookies)
2822			data.funcs = us.syms;
2823
2824		sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
2825		       symbols_swap_r, &data);
2826
2827		err = ftrace_lookup_symbols(us.syms, cnt, addrs);
2828		free_user_syms(&us);
2829		if (err)
2830			goto error;
2831	}
2832
 
 
 
 
 
2833	link = kzalloc(sizeof(*link), GFP_KERNEL);
2834	if (!link) {
2835		err = -ENOMEM;
2836		goto error;
2837	}
2838
2839	bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
2840		      &bpf_kprobe_multi_link_lops, prog);
2841
2842	err = bpf_link_prime(&link->link, &link_primer);
2843	if (err)
2844		goto error;
2845
2846	if (flags & BPF_F_KPROBE_MULTI_RETURN)
2847		link->fp.exit_handler = kprobe_multi_link_handler;
2848	else
2849		link->fp.entry_handler = kprobe_multi_link_handler;
 
 
 
 
2850
2851	link->addrs = addrs;
2852	link->cookies = cookies;
2853	link->cnt = cnt;
 
2854
2855	if (cookies) {
2856		/*
2857		 * Sorting addresses will trigger sorting cookies as well
2858		 * (check bpf_kprobe_multi_cookie_swap). This way we can
2859		 * find cookie based on the address in bpf_get_attach_cookie
2860		 * helper.
2861		 */
2862		sort_r(addrs, cnt, sizeof(*addrs),
2863		       bpf_kprobe_multi_cookie_cmp,
2864		       bpf_kprobe_multi_cookie_swap,
2865		       link);
2866	} else {
2867		/*
2868		 * We need to sort addrs array even if there are no cookies
2869		 * provided, to allow bsearch in get_modules_for_addrs.
2870		 */
2871		sort(addrs, cnt, sizeof(*addrs),
2872		       bpf_kprobe_multi_addrs_cmp, NULL);
2873	}
2874
2875	err = get_modules_for_addrs(&link->mods, addrs, cnt);
2876	if (err < 0) {
2877		bpf_link_cleanup(&link_primer);
2878		return err;
2879	}
2880	link->mods_cnt = err;
2881
2882	err = register_fprobe_ips(&link->fp, addrs, cnt);
2883	if (err) {
2884		kprobe_multi_put_modules(link->mods, link->mods_cnt);
2885		bpf_link_cleanup(&link_primer);
2886		return err;
2887	}
2888
2889	return bpf_link_settle(&link_primer);
2890
2891error:
2892	kfree(link);
2893	kvfree(addrs);
2894	kvfree(cookies);
2895	return err;
2896}
2897#else /* !CONFIG_FPROBE */
2898int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2899{
2900	return -EOPNOTSUPP;
2901}
2902static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2903{
2904	return 0;
2905}
2906static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2907{
2908	return 0;
2909}
2910#endif
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
   3 * Copyright (c) 2016 Facebook
   4 */
   5#include <linux/kernel.h>
   6#include <linux/types.h>
   7#include <linux/slab.h>
   8#include <linux/bpf.h>
   9#include <linux/bpf_verifier.h>
  10#include <linux/bpf_perf_event.h>
  11#include <linux/btf.h>
  12#include <linux/filter.h>
  13#include <linux/uaccess.h>
  14#include <linux/ctype.h>
  15#include <linux/kprobes.h>
  16#include <linux/spinlock.h>
  17#include <linux/syscalls.h>
  18#include <linux/error-injection.h>
  19#include <linux/btf_ids.h>
  20#include <linux/bpf_lsm.h>
  21#include <linux/fprobe.h>
  22#include <linux/bsearch.h>
  23#include <linux/sort.h>
  24#include <linux/key.h>
  25#include <linux/verification.h>
  26#include <linux/namei.h>
  27
  28#include <net/bpf_sk_storage.h>
  29
  30#include <uapi/linux/bpf.h>
  31#include <uapi/linux/btf.h>
  32
  33#include <asm/tlb.h>
  34
  35#include "trace_probe.h"
  36#include "trace.h"
  37
  38#define CREATE_TRACE_POINTS
  39#include "bpf_trace.h"
  40
  41#define bpf_event_rcu_dereference(p)					\
  42	rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
  43
  44#define MAX_UPROBE_MULTI_CNT (1U << 20)
  45#define MAX_KPROBE_MULTI_CNT (1U << 20)
  46
  47#ifdef CONFIG_MODULES
  48struct bpf_trace_module {
  49	struct module *module;
  50	struct list_head list;
  51};
  52
  53static LIST_HEAD(bpf_trace_modules);
  54static DEFINE_MUTEX(bpf_module_mutex);
  55
  56static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
  57{
  58	struct bpf_raw_event_map *btp, *ret = NULL;
  59	struct bpf_trace_module *btm;
  60	unsigned int i;
  61
  62	mutex_lock(&bpf_module_mutex);
  63	list_for_each_entry(btm, &bpf_trace_modules, list) {
  64		for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
  65			btp = &btm->module->bpf_raw_events[i];
  66			if (!strcmp(btp->tp->name, name)) {
  67				if (try_module_get(btm->module))
  68					ret = btp;
  69				goto out;
  70			}
  71		}
  72	}
  73out:
  74	mutex_unlock(&bpf_module_mutex);
  75	return ret;
  76}
  77#else
  78static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
  79{
  80	return NULL;
  81}
  82#endif /* CONFIG_MODULES */
  83
  84u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  85u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  86
  87static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
  88				  u64 flags, const struct btf **btf,
  89				  s32 *btf_id);
  90static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
  91static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
  92
  93static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx);
  94static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
  95
  96/**
  97 * trace_call_bpf - invoke BPF program
  98 * @call: tracepoint event
  99 * @ctx: opaque context pointer
 100 *
 101 * kprobe handlers execute BPF programs via this helper.
 102 * Can be used from static tracepoints in the future.
 103 *
 104 * Return: BPF programs always return an integer which is interpreted by
 105 * kprobe handler as:
 106 * 0 - return from kprobe (event is filtered out)
 107 * 1 - store kprobe event into ring buffer
 108 * Other values are reserved and currently alias to 1
 109 */
 110unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
 111{
 112	unsigned int ret;
 113
 114	cant_sleep();
 115
 116	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
 117		/*
 118		 * since some bpf program is already running on this cpu,
 119		 * don't call into another bpf program (same or different)
 120		 * and don't send kprobe event into ring-buffer,
 121		 * so return zero here
 122		 */
 123		rcu_read_lock();
 124		bpf_prog_inc_misses_counters(rcu_dereference(call->prog_array));
 125		rcu_read_unlock();
 126		ret = 0;
 127		goto out;
 128	}
 129
 130	/*
 131	 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
 132	 * to all call sites, we did a bpf_prog_array_valid() there to check
 133	 * whether call->prog_array is empty or not, which is
 134	 * a heuristic to speed up execution.
 135	 *
 136	 * If bpf_prog_array_valid() fetched prog_array was
 137	 * non-NULL, we go into trace_call_bpf() and do the actual
 138	 * proper rcu_dereference() under RCU lock.
 139	 * If it turns out that prog_array is NULL then, we bail out.
 140	 * For the opposite, if the bpf_prog_array_valid() fetched pointer
 141	 * was NULL, you'll skip the prog_array with the risk of missing
 142	 * out of events when it was updated in between this and the
 143	 * rcu_dereference() which is accepted risk.
 144	 */
 145	rcu_read_lock();
 146	ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
 147				 ctx, bpf_prog_run);
 148	rcu_read_unlock();
 149
 150 out:
 151	__this_cpu_dec(bpf_prog_active);
 152
 153	return ret;
 154}
 155
 156#ifdef CONFIG_BPF_KPROBE_OVERRIDE
 157BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
 158{
 159	regs_set_return_value(regs, rc);
 160	override_function_with_return(regs);
 161	return 0;
 162}
 163
 164static const struct bpf_func_proto bpf_override_return_proto = {
 165	.func		= bpf_override_return,
 166	.gpl_only	= true,
 167	.ret_type	= RET_INTEGER,
 168	.arg1_type	= ARG_PTR_TO_CTX,
 169	.arg2_type	= ARG_ANYTHING,
 170};
 171#endif
 172
 173static __always_inline int
 174bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
 175{
 176	int ret;
 177
 178	ret = copy_from_user_nofault(dst, unsafe_ptr, size);
 179	if (unlikely(ret < 0))
 180		memset(dst, 0, size);
 181	return ret;
 182}
 183
 184BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
 185	   const void __user *, unsafe_ptr)
 186{
 187	return bpf_probe_read_user_common(dst, size, unsafe_ptr);
 188}
 189
 190const struct bpf_func_proto bpf_probe_read_user_proto = {
 191	.func		= bpf_probe_read_user,
 192	.gpl_only	= true,
 193	.ret_type	= RET_INTEGER,
 194	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 195	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 196	.arg3_type	= ARG_ANYTHING,
 197};
 198
 199static __always_inline int
 200bpf_probe_read_user_str_common(void *dst, u32 size,
 201			       const void __user *unsafe_ptr)
 202{
 203	int ret;
 204
 205	/*
 206	 * NB: We rely on strncpy_from_user() not copying junk past the NUL
 207	 * terminator into `dst`.
 208	 *
 209	 * strncpy_from_user() does long-sized strides in the fast path. If the
 210	 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
 211	 * then there could be junk after the NUL in `dst`. If user takes `dst`
 212	 * and keys a hash map with it, then semantically identical strings can
 213	 * occupy multiple entries in the map.
 214	 */
 215	ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
 216	if (unlikely(ret < 0))
 217		memset(dst, 0, size);
 218	return ret;
 219}
 220
 221BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
 222	   const void __user *, unsafe_ptr)
 223{
 224	return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
 225}
 226
 227const struct bpf_func_proto bpf_probe_read_user_str_proto = {
 228	.func		= bpf_probe_read_user_str,
 229	.gpl_only	= true,
 230	.ret_type	= RET_INTEGER,
 231	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 232	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 233	.arg3_type	= ARG_ANYTHING,
 234};
 235
 
 
 
 
 
 
 
 
 
 
 
 236BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
 237	   const void *, unsafe_ptr)
 238{
 239	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
 240}
 241
 242const struct bpf_func_proto bpf_probe_read_kernel_proto = {
 243	.func		= bpf_probe_read_kernel,
 244	.gpl_only	= true,
 245	.ret_type	= RET_INTEGER,
 246	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 247	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 248	.arg3_type	= ARG_ANYTHING,
 249};
 250
 251static __always_inline int
 252bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
 253{
 254	int ret;
 255
 256	/*
 257	 * The strncpy_from_kernel_nofault() call will likely not fill the
 258	 * entire buffer, but that's okay in this circumstance as we're probing
 259	 * arbitrary memory anyway similar to bpf_probe_read_*() and might
 260	 * as well probe the stack. Thus, memory is explicitly cleared
 261	 * only in error case, so that improper users ignoring return
 262	 * code altogether don't copy garbage; otherwise length of string
 263	 * is returned that can be used for bpf_perf_event_output() et al.
 264	 */
 265	ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
 266	if (unlikely(ret < 0))
 267		memset(dst, 0, size);
 268	return ret;
 269}
 270
 271BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
 272	   const void *, unsafe_ptr)
 273{
 274	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
 275}
 276
 277const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
 278	.func		= bpf_probe_read_kernel_str,
 279	.gpl_only	= true,
 280	.ret_type	= RET_INTEGER,
 281	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 282	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 283	.arg3_type	= ARG_ANYTHING,
 284};
 285
 286#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
 287BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
 288	   const void *, unsafe_ptr)
 289{
 290	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
 291		return bpf_probe_read_user_common(dst, size,
 292				(__force void __user *)unsafe_ptr);
 293	}
 294	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
 295}
 296
 297static const struct bpf_func_proto bpf_probe_read_compat_proto = {
 298	.func		= bpf_probe_read_compat,
 299	.gpl_only	= true,
 300	.ret_type	= RET_INTEGER,
 301	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 302	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 303	.arg3_type	= ARG_ANYTHING,
 304};
 305
 306BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
 307	   const void *, unsafe_ptr)
 308{
 309	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
 310		return bpf_probe_read_user_str_common(dst, size,
 311				(__force void __user *)unsafe_ptr);
 312	}
 313	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
 314}
 315
 316static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
 317	.func		= bpf_probe_read_compat_str,
 318	.gpl_only	= true,
 319	.ret_type	= RET_INTEGER,
 320	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 321	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 322	.arg3_type	= ARG_ANYTHING,
 323};
 324#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
 325
 326BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
 327	   u32, size)
 328{
 329	/*
 330	 * Ensure we're in user context which is safe for the helper to
 331	 * run. This helper has no business in a kthread.
 332	 *
 333	 * access_ok() should prevent writing to non-user memory, but in
 334	 * some situations (nommu, temporary switch, etc) access_ok() does
 335	 * not provide enough validation, hence the check on KERNEL_DS.
 336	 *
 337	 * nmi_uaccess_okay() ensures the probe is not run in an interim
 338	 * state, when the task or mm are switched. This is specifically
 339	 * required to prevent the use of temporary mm.
 340	 */
 341
 342	if (unlikely(in_interrupt() ||
 343		     current->flags & (PF_KTHREAD | PF_EXITING)))
 344		return -EPERM;
 345	if (unlikely(!nmi_uaccess_okay()))
 346		return -EPERM;
 347
 348	return copy_to_user_nofault(unsafe_ptr, src, size);
 349}
 350
 351static const struct bpf_func_proto bpf_probe_write_user_proto = {
 352	.func		= bpf_probe_write_user,
 353	.gpl_only	= true,
 354	.ret_type	= RET_INTEGER,
 355	.arg1_type	= ARG_ANYTHING,
 356	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 357	.arg3_type	= ARG_CONST_SIZE,
 358};
 359
 360static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
 361{
 362	if (!capable(CAP_SYS_ADMIN))
 363		return NULL;
 364
 365	pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
 366			    current->comm, task_pid_nr(current));
 367
 368	return &bpf_probe_write_user_proto;
 369}
 370
 
 
 371#define MAX_TRACE_PRINTK_VARARGS	3
 372#define BPF_TRACE_PRINTK_SIZE		1024
 373
 374BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
 375	   u64, arg2, u64, arg3)
 376{
 377	u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
 378	struct bpf_bprintf_data data = {
 379		.get_bin_args	= true,
 380		.get_buf	= true,
 381	};
 382	int ret;
 383
 384	ret = bpf_bprintf_prepare(fmt, fmt_size, args,
 385				  MAX_TRACE_PRINTK_VARARGS, &data);
 386	if (ret < 0)
 387		return ret;
 388
 389	ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
 
 390
 391	trace_bpf_trace_printk(data.buf);
 
 392
 393	bpf_bprintf_cleanup(&data);
 394
 395	return ret;
 396}
 397
 398static const struct bpf_func_proto bpf_trace_printk_proto = {
 399	.func		= bpf_trace_printk,
 400	.gpl_only	= true,
 401	.ret_type	= RET_INTEGER,
 402	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 403	.arg2_type	= ARG_CONST_SIZE,
 404};
 405
 406static void __set_printk_clr_event(void)
 407{
 408	/*
 409	 * This program might be calling bpf_trace_printk,
 410	 * so enable the associated bpf_trace/bpf_trace_printk event.
 411	 * Repeat this each time as it is possible a user has
 412	 * disabled bpf_trace_printk events.  By loading a program
 413	 * calling bpf_trace_printk() however the user has expressed
 414	 * the intent to see such events.
 415	 */
 416	if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
 417		pr_warn_ratelimited("could not enable bpf_trace_printk events");
 418}
 419
 420const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
 421{
 422	__set_printk_clr_event();
 423	return &bpf_trace_printk_proto;
 424}
 425
 426BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args,
 427	   u32, data_len)
 428{
 429	struct bpf_bprintf_data data = {
 430		.get_bin_args	= true,
 431		.get_buf	= true,
 432	};
 433	int ret, num_args;
 
 434
 435	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
 436	    (data_len && !args))
 437		return -EINVAL;
 438	num_args = data_len / 8;
 439
 440	ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
 441	if (ret < 0)
 442		return ret;
 443
 444	ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
 
 445
 446	trace_bpf_trace_printk(data.buf);
 
 447
 448	bpf_bprintf_cleanup(&data);
 449
 450	return ret;
 451}
 452
 453static const struct bpf_func_proto bpf_trace_vprintk_proto = {
 454	.func		= bpf_trace_vprintk,
 455	.gpl_only	= true,
 456	.ret_type	= RET_INTEGER,
 457	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 458	.arg2_type	= ARG_CONST_SIZE,
 459	.arg3_type	= ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
 460	.arg4_type	= ARG_CONST_SIZE_OR_ZERO,
 461};
 462
 463const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
 464{
 465	__set_printk_clr_event();
 466	return &bpf_trace_vprintk_proto;
 467}
 468
 469BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
 470	   const void *, args, u32, data_len)
 471{
 472	struct bpf_bprintf_data data = {
 473		.get_bin_args	= true,
 474	};
 475	int err, num_args;
 
 476
 477	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
 478	    (data_len && !args))
 479		return -EINVAL;
 480	num_args = data_len / 8;
 481
 482	err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
 483	if (err < 0)
 484		return err;
 485
 486	seq_bprintf(m, fmt, data.bin_args);
 487
 488	bpf_bprintf_cleanup(&data);
 489
 490	return seq_has_overflowed(m) ? -EOVERFLOW : 0;
 491}
 492
 493BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
 494
 495static const struct bpf_func_proto bpf_seq_printf_proto = {
 496	.func		= bpf_seq_printf,
 497	.gpl_only	= true,
 498	.ret_type	= RET_INTEGER,
 499	.arg1_type	= ARG_PTR_TO_BTF_ID,
 500	.arg1_btf_id	= &btf_seq_file_ids[0],
 501	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 502	.arg3_type	= ARG_CONST_SIZE,
 503	.arg4_type      = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
 504	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
 505};
 506
 507BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
 508{
 509	return seq_write(m, data, len) ? -EOVERFLOW : 0;
 510}
 511
 512static const struct bpf_func_proto bpf_seq_write_proto = {
 513	.func		= bpf_seq_write,
 514	.gpl_only	= true,
 515	.ret_type	= RET_INTEGER,
 516	.arg1_type	= ARG_PTR_TO_BTF_ID,
 517	.arg1_btf_id	= &btf_seq_file_ids[0],
 518	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 519	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
 520};
 521
 522BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
 523	   u32, btf_ptr_size, u64, flags)
 524{
 525	const struct btf *btf;
 526	s32 btf_id;
 527	int ret;
 528
 529	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
 530	if (ret)
 531		return ret;
 532
 533	return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
 534}
 535
 536static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
 537	.func		= bpf_seq_printf_btf,
 538	.gpl_only	= true,
 539	.ret_type	= RET_INTEGER,
 540	.arg1_type	= ARG_PTR_TO_BTF_ID,
 541	.arg1_btf_id	= &btf_seq_file_ids[0],
 542	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 543	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
 544	.arg4_type	= ARG_ANYTHING,
 545};
 546
 547static __always_inline int
 548get_map_perf_counter(struct bpf_map *map, u64 flags,
 549		     u64 *value, u64 *enabled, u64 *running)
 550{
 551	struct bpf_array *array = container_of(map, struct bpf_array, map);
 552	unsigned int cpu = smp_processor_id();
 553	u64 index = flags & BPF_F_INDEX_MASK;
 554	struct bpf_event_entry *ee;
 555
 556	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
 557		return -EINVAL;
 558	if (index == BPF_F_CURRENT_CPU)
 559		index = cpu;
 560	if (unlikely(index >= array->map.max_entries))
 561		return -E2BIG;
 562
 563	ee = READ_ONCE(array->ptrs[index]);
 564	if (!ee)
 565		return -ENOENT;
 566
 567	return perf_event_read_local(ee->event, value, enabled, running);
 568}
 569
 570BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
 571{
 572	u64 value = 0;
 573	int err;
 574
 575	err = get_map_perf_counter(map, flags, &value, NULL, NULL);
 576	/*
 577	 * this api is ugly since we miss [-22..-2] range of valid
 578	 * counter values, but that's uapi
 579	 */
 580	if (err)
 581		return err;
 582	return value;
 583}
 584
 585static const struct bpf_func_proto bpf_perf_event_read_proto = {
 586	.func		= bpf_perf_event_read,
 587	.gpl_only	= true,
 588	.ret_type	= RET_INTEGER,
 589	.arg1_type	= ARG_CONST_MAP_PTR,
 590	.arg2_type	= ARG_ANYTHING,
 591};
 592
 593BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
 594	   struct bpf_perf_event_value *, buf, u32, size)
 595{
 596	int err = -EINVAL;
 597
 598	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
 599		goto clear;
 600	err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
 601				   &buf->running);
 602	if (unlikely(err))
 603		goto clear;
 604	return 0;
 605clear:
 606	memset(buf, 0, size);
 607	return err;
 608}
 609
 610static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
 611	.func		= bpf_perf_event_read_value,
 612	.gpl_only	= true,
 613	.ret_type	= RET_INTEGER,
 614	.arg1_type	= ARG_CONST_MAP_PTR,
 615	.arg2_type	= ARG_ANYTHING,
 616	.arg3_type	= ARG_PTR_TO_UNINIT_MEM,
 617	.arg4_type	= ARG_CONST_SIZE,
 618};
 619
 620static __always_inline u64
 621__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
 622			u64 flags, struct perf_raw_record *raw,
 623			struct perf_sample_data *sd)
 624{
 625	struct bpf_array *array = container_of(map, struct bpf_array, map);
 626	unsigned int cpu = smp_processor_id();
 627	u64 index = flags & BPF_F_INDEX_MASK;
 628	struct bpf_event_entry *ee;
 629	struct perf_event *event;
 630
 631	if (index == BPF_F_CURRENT_CPU)
 632		index = cpu;
 633	if (unlikely(index >= array->map.max_entries))
 634		return -E2BIG;
 635
 636	ee = READ_ONCE(array->ptrs[index]);
 637	if (!ee)
 638		return -ENOENT;
 639
 640	event = ee->event;
 641	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
 642		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
 643		return -EINVAL;
 644
 645	if (unlikely(event->oncpu != cpu))
 646		return -EOPNOTSUPP;
 647
 648	perf_sample_save_raw_data(sd, event, raw);
 649
 650	return perf_event_output(event, sd, regs);
 651}
 652
 653/*
 654 * Support executing tracepoints in normal, irq, and nmi context that each call
 655 * bpf_perf_event_output
 656 */
 657struct bpf_trace_sample_data {
 658	struct perf_sample_data sds[3];
 659};
 660
 661static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
 662static DEFINE_PER_CPU(int, bpf_trace_nest_level);
 663BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
 664	   u64, flags, void *, data, u64, size)
 665{
 666	struct bpf_trace_sample_data *sds;
 
 667	struct perf_raw_record raw = {
 668		.frag = {
 669			.size = size,
 670			.data = data,
 671		},
 672	};
 673	struct perf_sample_data *sd;
 674	int nest_level, err;
 675
 676	preempt_disable();
 677	sds = this_cpu_ptr(&bpf_trace_sds);
 678	nest_level = this_cpu_inc_return(bpf_trace_nest_level);
 679
 680	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
 681		err = -EBUSY;
 682		goto out;
 683	}
 684
 685	sd = &sds->sds[nest_level - 1];
 686
 687	if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
 688		err = -EINVAL;
 689		goto out;
 690	}
 691
 692	perf_sample_data_init(sd, 0, 0);
 
 
 
 
 693
 694	err = __bpf_perf_event_output(regs, map, flags, &raw, sd);
 695out:
 696	this_cpu_dec(bpf_trace_nest_level);
 697	preempt_enable();
 698	return err;
 699}
 700
 701static const struct bpf_func_proto bpf_perf_event_output_proto = {
 702	.func		= bpf_perf_event_output,
 703	.gpl_only	= true,
 704	.ret_type	= RET_INTEGER,
 705	.arg1_type	= ARG_PTR_TO_CTX,
 706	.arg2_type	= ARG_CONST_MAP_PTR,
 707	.arg3_type	= ARG_ANYTHING,
 708	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 709	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
 710};
 711
 712static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
 713struct bpf_nested_pt_regs {
 714	struct pt_regs regs[3];
 715};
 716static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
 717static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
 718
 719u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
 720		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
 721{
 
 722	struct perf_raw_frag frag = {
 723		.copy		= ctx_copy,
 724		.size		= ctx_size,
 725		.data		= ctx,
 726	};
 727	struct perf_raw_record raw = {
 728		.frag = {
 729			{
 730				.next	= ctx_size ? &frag : NULL,
 731			},
 732			.size	= meta_size,
 733			.data	= meta,
 734		},
 735	};
 736	struct perf_sample_data *sd;
 737	struct pt_regs *regs;
 738	int nest_level;
 739	u64 ret;
 740
 741	preempt_disable();
 742	nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
 743
 744	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
 745		ret = -EBUSY;
 746		goto out;
 747	}
 748	sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
 749	regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
 750
 751	perf_fetch_caller_regs(regs);
 752	perf_sample_data_init(sd, 0, 0);
 
 
 753
 754	ret = __bpf_perf_event_output(regs, map, flags, &raw, sd);
 755out:
 756	this_cpu_dec(bpf_event_output_nest_level);
 757	preempt_enable();
 758	return ret;
 759}
 760
 761BPF_CALL_0(bpf_get_current_task)
 762{
 763	return (long) current;
 764}
 765
 766const struct bpf_func_proto bpf_get_current_task_proto = {
 767	.func		= bpf_get_current_task,
 768	.gpl_only	= true,
 769	.ret_type	= RET_INTEGER,
 770};
 771
 772BPF_CALL_0(bpf_get_current_task_btf)
 773{
 774	return (unsigned long) current;
 775}
 776
 777const struct bpf_func_proto bpf_get_current_task_btf_proto = {
 778	.func		= bpf_get_current_task_btf,
 779	.gpl_only	= true,
 780	.ret_type	= RET_PTR_TO_BTF_ID_TRUSTED,
 781	.ret_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
 782};
 783
 784BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
 785{
 786	return (unsigned long) task_pt_regs(task);
 787}
 788
 789BTF_ID_LIST(bpf_task_pt_regs_ids)
 790BTF_ID(struct, pt_regs)
 791
 792const struct bpf_func_proto bpf_task_pt_regs_proto = {
 793	.func		= bpf_task_pt_regs,
 794	.gpl_only	= true,
 795	.arg1_type	= ARG_PTR_TO_BTF_ID,
 796	.arg1_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
 797	.ret_type	= RET_PTR_TO_BTF_ID,
 798	.ret_btf_id	= &bpf_task_pt_regs_ids[0],
 799};
 800
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 801struct send_signal_irq_work {
 802	struct irq_work irq_work;
 803	struct task_struct *task;
 804	u32 sig;
 805	enum pid_type type;
 806	bool has_siginfo;
 807	struct kernel_siginfo info;
 808};
 809
 810static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
 811
 812static void do_bpf_send_signal(struct irq_work *entry)
 813{
 814	struct send_signal_irq_work *work;
 815	struct kernel_siginfo *siginfo;
 816
 817	work = container_of(entry, struct send_signal_irq_work, irq_work);
 818	siginfo = work->has_siginfo ? &work->info : SEND_SIG_PRIV;
 819
 820	group_send_sig_info(work->sig, siginfo, work->task, work->type);
 821	put_task_struct(work->task);
 822}
 823
 824static int bpf_send_signal_common(u32 sig, enum pid_type type, struct task_struct *task, u64 value)
 825{
 826	struct send_signal_irq_work *work = NULL;
 827	struct kernel_siginfo info;
 828	struct kernel_siginfo *siginfo;
 829
 830	if (!task) {
 831		task = current;
 832		siginfo = SEND_SIG_PRIV;
 833	} else {
 834		clear_siginfo(&info);
 835		info.si_signo = sig;
 836		info.si_errno = 0;
 837		info.si_code = SI_KERNEL;
 838		info.si_pid = 0;
 839		info.si_uid = 0;
 840		info.si_value.sival_ptr = (void *)(unsigned long)value;
 841		siginfo = &info;
 842	}
 843
 844	/* Similar to bpf_probe_write_user, task needs to be
 845	 * in a sound condition and kernel memory access be
 846	 * permitted in order to send signal to the current
 847	 * task.
 848	 */
 849	if (unlikely(task->flags & (PF_KTHREAD | PF_EXITING)))
 850		return -EPERM;
 851	if (unlikely(!nmi_uaccess_okay()))
 852		return -EPERM;
 853	/* Task should not be pid=1 to avoid kernel panic. */
 854	if (unlikely(is_global_init(task)))
 855		return -EPERM;
 856
 857	if (!preemptible()) {
 858		/* Do an early check on signal validity. Otherwise,
 859		 * the error is lost in deferred irq_work.
 860		 */
 861		if (unlikely(!valid_signal(sig)))
 862			return -EINVAL;
 863
 864		work = this_cpu_ptr(&send_signal_work);
 865		if (irq_work_is_busy(&work->irq_work))
 866			return -EBUSY;
 867
 868		/* Add the current task, which is the target of sending signal,
 869		 * to the irq_work. The current task may change when queued
 870		 * irq works get executed.
 871		 */
 872		work->task = get_task_struct(task);
 873		work->has_siginfo = siginfo == &info;
 874		if (work->has_siginfo)
 875			copy_siginfo(&work->info, &info);
 876		work->sig = sig;
 877		work->type = type;
 878		irq_work_queue(&work->irq_work);
 879		return 0;
 880	}
 881
 882	return group_send_sig_info(sig, siginfo, task, type);
 883}
 884
 885BPF_CALL_1(bpf_send_signal, u32, sig)
 886{
 887	return bpf_send_signal_common(sig, PIDTYPE_TGID, NULL, 0);
 888}
 889
 890static const struct bpf_func_proto bpf_send_signal_proto = {
 891	.func		= bpf_send_signal,
 892	.gpl_only	= false,
 893	.ret_type	= RET_INTEGER,
 894	.arg1_type	= ARG_ANYTHING,
 895};
 896
 897BPF_CALL_1(bpf_send_signal_thread, u32, sig)
 898{
 899	return bpf_send_signal_common(sig, PIDTYPE_PID, NULL, 0);
 900}
 901
 902static const struct bpf_func_proto bpf_send_signal_thread_proto = {
 903	.func		= bpf_send_signal_thread,
 904	.gpl_only	= false,
 905	.ret_type	= RET_INTEGER,
 906	.arg1_type	= ARG_ANYTHING,
 907};
 908
 909BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
 910{
 911	struct path copy;
 912	long len;
 913	char *p;
 914
 915	if (!sz)
 916		return 0;
 917
 918	/*
 919	 * The path pointer is verified as trusted and safe to use,
 920	 * but let's double check it's valid anyway to workaround
 921	 * potentially broken verifier.
 922	 */
 923	len = copy_from_kernel_nofault(&copy, path, sizeof(*path));
 924	if (len < 0)
 925		return len;
 926
 927	p = d_path(&copy, buf, sz);
 928	if (IS_ERR(p)) {
 929		len = PTR_ERR(p);
 930	} else {
 931		len = buf + sz - p;
 932		memmove(buf, p, len);
 933	}
 934
 935	return len;
 936}
 937
 938BTF_SET_START(btf_allowlist_d_path)
 939#ifdef CONFIG_SECURITY
 940BTF_ID(func, security_file_permission)
 941BTF_ID(func, security_inode_getattr)
 942BTF_ID(func, security_file_open)
 943#endif
 944#ifdef CONFIG_SECURITY_PATH
 945BTF_ID(func, security_path_truncate)
 946#endif
 947BTF_ID(func, vfs_truncate)
 948BTF_ID(func, vfs_fallocate)
 949BTF_ID(func, dentry_open)
 950BTF_ID(func, vfs_getattr)
 951BTF_ID(func, filp_close)
 952BTF_SET_END(btf_allowlist_d_path)
 953
 954static bool bpf_d_path_allowed(const struct bpf_prog *prog)
 955{
 956	if (prog->type == BPF_PROG_TYPE_TRACING &&
 957	    prog->expected_attach_type == BPF_TRACE_ITER)
 958		return true;
 959
 960	if (prog->type == BPF_PROG_TYPE_LSM)
 961		return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
 962
 963	return btf_id_set_contains(&btf_allowlist_d_path,
 964				   prog->aux->attach_btf_id);
 965}
 966
 967BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
 968
 969static const struct bpf_func_proto bpf_d_path_proto = {
 970	.func		= bpf_d_path,
 971	.gpl_only	= false,
 972	.ret_type	= RET_INTEGER,
 973	.arg1_type	= ARG_PTR_TO_BTF_ID,
 974	.arg1_btf_id	= &bpf_d_path_btf_ids[0],
 975	.arg2_type	= ARG_PTR_TO_MEM,
 976	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
 977	.allowed	= bpf_d_path_allowed,
 978};
 979
 980#define BTF_F_ALL	(BTF_F_COMPACT  | BTF_F_NONAME | \
 981			 BTF_F_PTR_RAW | BTF_F_ZERO)
 982
 983static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
 984				  u64 flags, const struct btf **btf,
 985				  s32 *btf_id)
 986{
 987	const struct btf_type *t;
 988
 989	if (unlikely(flags & ~(BTF_F_ALL)))
 990		return -EINVAL;
 991
 992	if (btf_ptr_size != sizeof(struct btf_ptr))
 993		return -EINVAL;
 994
 995	*btf = bpf_get_btf_vmlinux();
 996
 997	if (IS_ERR_OR_NULL(*btf))
 998		return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
 999
1000	if (ptr->type_id > 0)
1001		*btf_id = ptr->type_id;
1002	else
1003		return -EINVAL;
1004
1005	if (*btf_id > 0)
1006		t = btf_type_by_id(*btf, *btf_id);
1007	if (*btf_id <= 0 || !t)
1008		return -ENOENT;
1009
1010	return 0;
1011}
1012
1013BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1014	   u32, btf_ptr_size, u64, flags)
1015{
1016	const struct btf *btf;
1017	s32 btf_id;
1018	int ret;
1019
1020	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1021	if (ret)
1022		return ret;
1023
1024	return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1025				      flags);
1026}
1027
1028const struct bpf_func_proto bpf_snprintf_btf_proto = {
1029	.func		= bpf_snprintf_btf,
1030	.gpl_only	= false,
1031	.ret_type	= RET_INTEGER,
1032	.arg1_type	= ARG_PTR_TO_MEM,
1033	.arg2_type	= ARG_CONST_SIZE,
1034	.arg3_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1035	.arg4_type	= ARG_CONST_SIZE,
1036	.arg5_type	= ARG_ANYTHING,
1037};
1038
1039BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1040{
1041	/* This helper call is inlined by verifier. */
1042	return ((u64 *)ctx)[-2];
1043}
1044
1045static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1046	.func		= bpf_get_func_ip_tracing,
1047	.gpl_only	= true,
1048	.ret_type	= RET_INTEGER,
1049	.arg1_type	= ARG_PTR_TO_CTX,
1050};
1051
1052#ifdef CONFIG_X86_KERNEL_IBT
1053static unsigned long get_entry_ip(unsigned long fentry_ip)
1054{
1055	u32 instr;
1056
1057	/* We want to be extra safe in case entry ip is on the page edge,
1058	 * but otherwise we need to avoid get_kernel_nofault()'s overhead.
1059	 */
1060	if ((fentry_ip & ~PAGE_MASK) < ENDBR_INSN_SIZE) {
1061		if (get_kernel_nofault(instr, (u32 *)(fentry_ip - ENDBR_INSN_SIZE)))
1062			return fentry_ip;
1063	} else {
1064		instr = *(u32 *)(fentry_ip - ENDBR_INSN_SIZE);
1065	}
1066	if (is_endbr(instr))
1067		fentry_ip -= ENDBR_INSN_SIZE;
1068	return fentry_ip;
1069}
1070#else
1071#define get_entry_ip(fentry_ip) fentry_ip
1072#endif
1073
1074BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1075{
1076	struct bpf_trace_run_ctx *run_ctx __maybe_unused;
1077	struct kprobe *kp;
1078
1079#ifdef CONFIG_UPROBES
1080	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1081	if (run_ctx->is_uprobe)
1082		return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr;
1083#endif
1084
1085	kp = kprobe_running();
1086
1087	if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
1088		return 0;
1089
1090	return get_entry_ip((uintptr_t)kp->addr);
1091}
1092
1093static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1094	.func		= bpf_get_func_ip_kprobe,
1095	.gpl_only	= true,
1096	.ret_type	= RET_INTEGER,
1097	.arg1_type	= ARG_PTR_TO_CTX,
1098};
1099
1100BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
1101{
1102	return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
1103}
1104
1105static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = {
1106	.func		= bpf_get_func_ip_kprobe_multi,
1107	.gpl_only	= false,
1108	.ret_type	= RET_INTEGER,
1109	.arg1_type	= ARG_PTR_TO_CTX,
1110};
1111
1112BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
1113{
1114	return bpf_kprobe_multi_cookie(current->bpf_ctx);
1115}
1116
1117static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
1118	.func		= bpf_get_attach_cookie_kprobe_multi,
1119	.gpl_only	= false,
1120	.ret_type	= RET_INTEGER,
1121	.arg1_type	= ARG_PTR_TO_CTX,
1122};
1123
1124BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs)
1125{
1126	return bpf_uprobe_multi_entry_ip(current->bpf_ctx);
1127}
1128
1129static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi = {
1130	.func		= bpf_get_func_ip_uprobe_multi,
1131	.gpl_only	= false,
1132	.ret_type	= RET_INTEGER,
1133	.arg1_type	= ARG_PTR_TO_CTX,
1134};
1135
1136BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs)
1137{
1138	return bpf_uprobe_multi_cookie(current->bpf_ctx);
1139}
1140
1141static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti = {
1142	.func		= bpf_get_attach_cookie_uprobe_multi,
1143	.gpl_only	= false,
1144	.ret_type	= RET_INTEGER,
1145	.arg1_type	= ARG_PTR_TO_CTX,
1146};
1147
1148BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1149{
1150	struct bpf_trace_run_ctx *run_ctx;
1151
1152	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1153	return run_ctx->bpf_cookie;
1154}
1155
1156static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1157	.func		= bpf_get_attach_cookie_trace,
1158	.gpl_only	= false,
1159	.ret_type	= RET_INTEGER,
1160	.arg1_type	= ARG_PTR_TO_CTX,
1161};
1162
1163BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1164{
1165	return ctx->event->bpf_cookie;
1166}
1167
1168static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1169	.func		= bpf_get_attach_cookie_pe,
1170	.gpl_only	= false,
1171	.ret_type	= RET_INTEGER,
1172	.arg1_type	= ARG_PTR_TO_CTX,
1173};
1174
1175BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
1176{
1177	struct bpf_trace_run_ctx *run_ctx;
1178
1179	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1180	return run_ctx->bpf_cookie;
1181}
1182
1183static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = {
1184	.func		= bpf_get_attach_cookie_tracing,
1185	.gpl_only	= false,
1186	.ret_type	= RET_INTEGER,
1187	.arg1_type	= ARG_PTR_TO_CTX,
1188};
1189
1190BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1191{
 
 
 
1192	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1193	u32 entry_cnt = size / br_entry_size;
1194
1195	entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1196
1197	if (unlikely(flags))
1198		return -EINVAL;
1199
1200	if (!entry_cnt)
1201		return -ENOENT;
1202
1203	return entry_cnt * br_entry_size;
 
1204}
1205
1206static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
1207	.func		= bpf_get_branch_snapshot,
1208	.gpl_only	= true,
1209	.ret_type	= RET_INTEGER,
1210	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1211	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1212};
1213
1214BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1215{
1216	/* This helper call is inlined by verifier. */
1217	u64 nr_args = ((u64 *)ctx)[-1];
1218
1219	if ((u64) n >= nr_args)
1220		return -EINVAL;
1221	*value = ((u64 *)ctx)[n];
1222	return 0;
1223}
1224
1225static const struct bpf_func_proto bpf_get_func_arg_proto = {
1226	.func		= get_func_arg,
1227	.ret_type	= RET_INTEGER,
1228	.arg1_type	= ARG_PTR_TO_CTX,
1229	.arg2_type	= ARG_ANYTHING,
1230	.arg3_type	= ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
1231	.arg3_size	= sizeof(u64),
1232};
1233
1234BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1235{
1236	/* This helper call is inlined by verifier. */
1237	u64 nr_args = ((u64 *)ctx)[-1];
1238
1239	*value = ((u64 *)ctx)[nr_args];
1240	return 0;
1241}
1242
1243static const struct bpf_func_proto bpf_get_func_ret_proto = {
1244	.func		= get_func_ret,
1245	.ret_type	= RET_INTEGER,
1246	.arg1_type	= ARG_PTR_TO_CTX,
1247	.arg2_type	= ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
1248	.arg2_size	= sizeof(u64),
1249};
1250
1251BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1252{
1253	/* This helper call is inlined by verifier. */
1254	return ((u64 *)ctx)[-1];
1255}
1256
1257static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1258	.func		= get_func_arg_cnt,
1259	.ret_type	= RET_INTEGER,
1260	.arg1_type	= ARG_PTR_TO_CTX,
1261};
1262
1263#ifdef CONFIG_KEYS
1264__bpf_kfunc_start_defs();
 
 
1265
1266/**
1267 * bpf_lookup_user_key - lookup a key by its serial
1268 * @serial: key handle serial number
1269 * @flags: lookup-specific flags
1270 *
1271 * Search a key with a given *serial* and the provided *flags*.
1272 * If found, increment the reference count of the key by one, and
1273 * return it in the bpf_key structure.
1274 *
1275 * The bpf_key structure must be passed to bpf_key_put() when done
1276 * with it, so that the key reference count is decremented and the
1277 * bpf_key structure is freed.
1278 *
1279 * Permission checks are deferred to the time the key is used by
1280 * one of the available key-specific kfuncs.
1281 *
1282 * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
1283 * special keyring (e.g. session keyring), if it doesn't yet exist.
1284 * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
1285 * for the key construction, and to retrieve uninstantiated keys (keys
1286 * without data attached to them).
1287 *
1288 * Return: a bpf_key pointer with a valid key pointer if the key is found, a
1289 *         NULL pointer otherwise.
1290 */
1291__bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
1292{
1293	key_ref_t key_ref;
1294	struct bpf_key *bkey;
1295
1296	if (flags & ~KEY_LOOKUP_ALL)
1297		return NULL;
1298
1299	/*
1300	 * Permission check is deferred until the key is used, as the
1301	 * intent of the caller is unknown here.
1302	 */
1303	key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
1304	if (IS_ERR(key_ref))
1305		return NULL;
1306
1307	bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
1308	if (!bkey) {
1309		key_put(key_ref_to_ptr(key_ref));
1310		return NULL;
1311	}
1312
1313	bkey->key = key_ref_to_ptr(key_ref);
1314	bkey->has_ref = true;
1315
1316	return bkey;
1317}
1318
1319/**
1320 * bpf_lookup_system_key - lookup a key by a system-defined ID
1321 * @id: key ID
1322 *
1323 * Obtain a bpf_key structure with a key pointer set to the passed key ID.
1324 * The key pointer is marked as invalid, to prevent bpf_key_put() from
1325 * attempting to decrement the key reference count on that pointer. The key
1326 * pointer set in such way is currently understood only by
1327 * verify_pkcs7_signature().
1328 *
1329 * Set *id* to one of the values defined in include/linux/verification.h:
1330 * 0 for the primary keyring (immutable keyring of system keys);
1331 * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
1332 * (where keys can be added only if they are vouched for by existing keys
1333 * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
1334 * keyring (primarily used by the integrity subsystem to verify a kexec'ed
1335 * kerned image and, possibly, the initramfs signature).
1336 *
1337 * Return: a bpf_key pointer with an invalid key pointer set from the
1338 *         pre-determined ID on success, a NULL pointer otherwise
1339 */
1340__bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
1341{
1342	struct bpf_key *bkey;
1343
1344	if (system_keyring_id_check(id) < 0)
1345		return NULL;
1346
1347	bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
1348	if (!bkey)
1349		return NULL;
1350
1351	bkey->key = (struct key *)(unsigned long)id;
1352	bkey->has_ref = false;
1353
1354	return bkey;
1355}
1356
1357/**
1358 * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1359 * @bkey: bpf_key structure
1360 *
1361 * Decrement the reference count of the key inside *bkey*, if the pointer
1362 * is valid, and free *bkey*.
1363 */
1364__bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
1365{
1366	if (bkey->has_ref)
1367		key_put(bkey->key);
1368
1369	kfree(bkey);
1370}
1371
1372#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1373/**
1374 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
1375 * @data_p: data to verify
1376 * @sig_p: signature of the data
1377 * @trusted_keyring: keyring with keys trusted for signature verification
1378 *
1379 * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
1380 * with keys in a keyring referenced by *trusted_keyring*.
1381 *
1382 * Return: 0 on success, a negative value on error.
1383 */
1384__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p,
1385			       struct bpf_dynptr *sig_p,
1386			       struct bpf_key *trusted_keyring)
1387{
1388	struct bpf_dynptr_kern *data_ptr = (struct bpf_dynptr_kern *)data_p;
1389	struct bpf_dynptr_kern *sig_ptr = (struct bpf_dynptr_kern *)sig_p;
1390	const void *data, *sig;
1391	u32 data_len, sig_len;
1392	int ret;
1393
1394	if (trusted_keyring->has_ref) {
1395		/*
1396		 * Do the permission check deferred in bpf_lookup_user_key().
1397		 * See bpf_lookup_user_key() for more details.
1398		 *
1399		 * A call to key_task_permission() here would be redundant, as
1400		 * it is already done by keyring_search() called by
1401		 * find_asymmetric_key().
1402		 */
1403		ret = key_validate(trusted_keyring->key);
1404		if (ret < 0)
1405			return ret;
1406	}
1407
1408	data_len = __bpf_dynptr_size(data_ptr);
1409	data = __bpf_dynptr_data(data_ptr, data_len);
1410	sig_len = __bpf_dynptr_size(sig_ptr);
1411	sig = __bpf_dynptr_data(sig_ptr, sig_len);
1412
1413	return verify_pkcs7_signature(data, data_len, sig, sig_len,
1414				      trusted_keyring->key,
1415				      VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
1416				      NULL);
1417}
1418#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
1419
1420__bpf_kfunc_end_defs();
1421
1422BTF_KFUNCS_START(key_sig_kfunc_set)
1423BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
1424BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
1425BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
1426#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1427BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
1428#endif
1429BTF_KFUNCS_END(key_sig_kfunc_set)
1430
1431static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
1432	.owner = THIS_MODULE,
1433	.set = &key_sig_kfunc_set,
1434};
1435
1436static int __init bpf_key_sig_kfuncs_init(void)
1437{
1438	return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
1439					 &bpf_key_sig_kfunc_set);
1440}
1441
1442late_initcall(bpf_key_sig_kfuncs_init);
1443#endif /* CONFIG_KEYS */
1444
1445static const struct bpf_func_proto *
1446bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1447{
1448	switch (func_id) {
1449	case BPF_FUNC_map_lookup_elem:
1450		return &bpf_map_lookup_elem_proto;
1451	case BPF_FUNC_map_update_elem:
1452		return &bpf_map_update_elem_proto;
1453	case BPF_FUNC_map_delete_elem:
1454		return &bpf_map_delete_elem_proto;
1455	case BPF_FUNC_map_push_elem:
1456		return &bpf_map_push_elem_proto;
1457	case BPF_FUNC_map_pop_elem:
1458		return &bpf_map_pop_elem_proto;
1459	case BPF_FUNC_map_peek_elem:
1460		return &bpf_map_peek_elem_proto;
1461	case BPF_FUNC_map_lookup_percpu_elem:
1462		return &bpf_map_lookup_percpu_elem_proto;
1463	case BPF_FUNC_ktime_get_ns:
1464		return &bpf_ktime_get_ns_proto;
1465	case BPF_FUNC_ktime_get_boot_ns:
1466		return &bpf_ktime_get_boot_ns_proto;
1467	case BPF_FUNC_tail_call:
1468		return &bpf_tail_call_proto;
 
 
1469	case BPF_FUNC_get_current_task:
1470		return &bpf_get_current_task_proto;
1471	case BPF_FUNC_get_current_task_btf:
1472		return &bpf_get_current_task_btf_proto;
1473	case BPF_FUNC_task_pt_regs:
1474		return &bpf_task_pt_regs_proto;
1475	case BPF_FUNC_get_current_uid_gid:
1476		return &bpf_get_current_uid_gid_proto;
1477	case BPF_FUNC_get_current_comm:
1478		return &bpf_get_current_comm_proto;
1479	case BPF_FUNC_trace_printk:
1480		return bpf_get_trace_printk_proto();
1481	case BPF_FUNC_get_smp_processor_id:
1482		return &bpf_get_smp_processor_id_proto;
1483	case BPF_FUNC_get_numa_node_id:
1484		return &bpf_get_numa_node_id_proto;
1485	case BPF_FUNC_perf_event_read:
1486		return &bpf_perf_event_read_proto;
 
 
1487	case BPF_FUNC_get_prandom_u32:
1488		return &bpf_get_prandom_u32_proto;
1489	case BPF_FUNC_probe_write_user:
1490		return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1491		       NULL : bpf_get_probe_write_proto();
1492	case BPF_FUNC_probe_read_user:
1493		return &bpf_probe_read_user_proto;
1494	case BPF_FUNC_probe_read_kernel:
1495		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1496		       NULL : &bpf_probe_read_kernel_proto;
1497	case BPF_FUNC_probe_read_user_str:
1498		return &bpf_probe_read_user_str_proto;
1499	case BPF_FUNC_probe_read_kernel_str:
1500		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1501		       NULL : &bpf_probe_read_kernel_str_proto;
1502#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1503	case BPF_FUNC_probe_read:
1504		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1505		       NULL : &bpf_probe_read_compat_proto;
1506	case BPF_FUNC_probe_read_str:
1507		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1508		       NULL : &bpf_probe_read_compat_str_proto;
1509#endif
1510#ifdef CONFIG_CGROUPS
 
 
 
 
1511	case BPF_FUNC_cgrp_storage_get:
1512		return &bpf_cgrp_storage_get_proto;
1513	case BPF_FUNC_cgrp_storage_delete:
1514		return &bpf_cgrp_storage_delete_proto;
1515	case BPF_FUNC_current_task_under_cgroup:
1516		return &bpf_current_task_under_cgroup_proto;
1517#endif
1518	case BPF_FUNC_send_signal:
1519		return &bpf_send_signal_proto;
1520	case BPF_FUNC_send_signal_thread:
1521		return &bpf_send_signal_thread_proto;
1522	case BPF_FUNC_perf_event_read_value:
1523		return &bpf_perf_event_read_value_proto;
 
 
1524	case BPF_FUNC_ringbuf_output:
1525		return &bpf_ringbuf_output_proto;
1526	case BPF_FUNC_ringbuf_reserve:
1527		return &bpf_ringbuf_reserve_proto;
1528	case BPF_FUNC_ringbuf_submit:
1529		return &bpf_ringbuf_submit_proto;
1530	case BPF_FUNC_ringbuf_discard:
1531		return &bpf_ringbuf_discard_proto;
1532	case BPF_FUNC_ringbuf_query:
1533		return &bpf_ringbuf_query_proto;
1534	case BPF_FUNC_jiffies64:
1535		return &bpf_jiffies64_proto;
1536	case BPF_FUNC_get_task_stack:
1537		return prog->sleepable ? &bpf_get_task_stack_sleepable_proto
1538				       : &bpf_get_task_stack_proto;
1539	case BPF_FUNC_copy_from_user:
1540		return &bpf_copy_from_user_proto;
1541	case BPF_FUNC_copy_from_user_task:
1542		return &bpf_copy_from_user_task_proto;
1543	case BPF_FUNC_snprintf_btf:
1544		return &bpf_snprintf_btf_proto;
1545	case BPF_FUNC_per_cpu_ptr:
1546		return &bpf_per_cpu_ptr_proto;
1547	case BPF_FUNC_this_cpu_ptr:
1548		return &bpf_this_cpu_ptr_proto;
1549	case BPF_FUNC_task_storage_get:
1550		if (bpf_prog_check_recur(prog))
1551			return &bpf_task_storage_get_recur_proto;
1552		return &bpf_task_storage_get_proto;
1553	case BPF_FUNC_task_storage_delete:
1554		if (bpf_prog_check_recur(prog))
1555			return &bpf_task_storage_delete_recur_proto;
1556		return &bpf_task_storage_delete_proto;
1557	case BPF_FUNC_for_each_map_elem:
1558		return &bpf_for_each_map_elem_proto;
1559	case BPF_FUNC_snprintf:
1560		return &bpf_snprintf_proto;
1561	case BPF_FUNC_get_func_ip:
1562		return &bpf_get_func_ip_proto_tracing;
1563	case BPF_FUNC_get_branch_snapshot:
1564		return &bpf_get_branch_snapshot_proto;
1565	case BPF_FUNC_find_vma:
1566		return &bpf_find_vma_proto;
1567	case BPF_FUNC_trace_vprintk:
1568		return bpf_get_trace_vprintk_proto();
1569	default:
1570		return bpf_base_func_proto(func_id, prog);
1571	}
1572}
1573
1574static bool is_kprobe_multi(const struct bpf_prog *prog)
1575{
1576	return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ||
1577	       prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION;
1578}
1579
1580static inline bool is_kprobe_session(const struct bpf_prog *prog)
1581{
1582	return prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION;
1583}
1584
1585static inline bool is_uprobe_multi(const struct bpf_prog *prog)
1586{
1587	return prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI ||
1588	       prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION;
1589}
1590
1591static inline bool is_uprobe_session(const struct bpf_prog *prog)
1592{
1593	return prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION;
1594}
1595
1596static const struct bpf_func_proto *
1597kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1598{
1599	switch (func_id) {
1600	case BPF_FUNC_perf_event_output:
1601		return &bpf_perf_event_output_proto;
1602	case BPF_FUNC_get_stackid:
1603		return &bpf_get_stackid_proto;
1604	case BPF_FUNC_get_stack:
1605		return prog->sleepable ? &bpf_get_stack_sleepable_proto : &bpf_get_stack_proto;
1606#ifdef CONFIG_BPF_KPROBE_OVERRIDE
1607	case BPF_FUNC_override_return:
1608		return &bpf_override_return_proto;
1609#endif
1610	case BPF_FUNC_get_func_ip:
1611		if (is_kprobe_multi(prog))
1612			return &bpf_get_func_ip_proto_kprobe_multi;
1613		if (is_uprobe_multi(prog))
1614			return &bpf_get_func_ip_proto_uprobe_multi;
1615		return &bpf_get_func_ip_proto_kprobe;
1616	case BPF_FUNC_get_attach_cookie:
1617		if (is_kprobe_multi(prog))
1618			return &bpf_get_attach_cookie_proto_kmulti;
1619		if (is_uprobe_multi(prog))
1620			return &bpf_get_attach_cookie_proto_umulti;
1621		return &bpf_get_attach_cookie_proto_trace;
1622	default:
1623		return bpf_tracing_func_proto(func_id, prog);
1624	}
1625}
1626
1627/* bpf+kprobe programs can access fields of 'struct pt_regs' */
1628static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1629					const struct bpf_prog *prog,
1630					struct bpf_insn_access_aux *info)
1631{
1632	if (off < 0 || off >= sizeof(struct pt_regs))
1633		return false;
1634	if (type != BPF_READ)
1635		return false;
1636	if (off % size != 0)
1637		return false;
1638	/*
1639	 * Assertion for 32 bit to make sure last 8 byte access
1640	 * (BPF_DW) to the last 4 byte member is disallowed.
1641	 */
1642	if (off + size > sizeof(struct pt_regs))
1643		return false;
1644
1645	return true;
1646}
1647
1648const struct bpf_verifier_ops kprobe_verifier_ops = {
1649	.get_func_proto  = kprobe_prog_func_proto,
1650	.is_valid_access = kprobe_prog_is_valid_access,
1651};
1652
1653const struct bpf_prog_ops kprobe_prog_ops = {
1654};
1655
1656BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1657	   u64, flags, void *, data, u64, size)
1658{
1659	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1660
1661	/*
1662	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1663	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1664	 * from there and call the same bpf_perf_event_output() helper inline.
1665	 */
1666	return ____bpf_perf_event_output(regs, map, flags, data, size);
1667}
1668
1669static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1670	.func		= bpf_perf_event_output_tp,
1671	.gpl_only	= true,
1672	.ret_type	= RET_INTEGER,
1673	.arg1_type	= ARG_PTR_TO_CTX,
1674	.arg2_type	= ARG_CONST_MAP_PTR,
1675	.arg3_type	= ARG_ANYTHING,
1676	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1677	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1678};
1679
1680BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1681	   u64, flags)
1682{
1683	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1684
1685	/*
1686	 * Same comment as in bpf_perf_event_output_tp(), only that this time
1687	 * the other helper's function body cannot be inlined due to being
1688	 * external, thus we need to call raw helper function.
1689	 */
1690	return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1691			       flags, 0, 0);
1692}
1693
1694static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1695	.func		= bpf_get_stackid_tp,
1696	.gpl_only	= true,
1697	.ret_type	= RET_INTEGER,
1698	.arg1_type	= ARG_PTR_TO_CTX,
1699	.arg2_type	= ARG_CONST_MAP_PTR,
1700	.arg3_type	= ARG_ANYTHING,
1701};
1702
1703BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1704	   u64, flags)
1705{
1706	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1707
1708	return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1709			     (unsigned long) size, flags, 0);
1710}
1711
1712static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1713	.func		= bpf_get_stack_tp,
1714	.gpl_only	= true,
1715	.ret_type	= RET_INTEGER,
1716	.arg1_type	= ARG_PTR_TO_CTX,
1717	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
1718	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1719	.arg4_type	= ARG_ANYTHING,
1720};
1721
1722static const struct bpf_func_proto *
1723tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1724{
1725	switch (func_id) {
1726	case BPF_FUNC_perf_event_output:
1727		return &bpf_perf_event_output_proto_tp;
1728	case BPF_FUNC_get_stackid:
1729		return &bpf_get_stackid_proto_tp;
1730	case BPF_FUNC_get_stack:
1731		return &bpf_get_stack_proto_tp;
1732	case BPF_FUNC_get_attach_cookie:
1733		return &bpf_get_attach_cookie_proto_trace;
1734	default:
1735		return bpf_tracing_func_proto(func_id, prog);
1736	}
1737}
1738
1739static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1740				    const struct bpf_prog *prog,
1741				    struct bpf_insn_access_aux *info)
1742{
1743	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1744		return false;
1745	if (type != BPF_READ)
1746		return false;
1747	if (off % size != 0)
1748		return false;
1749
1750	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1751	return true;
1752}
1753
1754const struct bpf_verifier_ops tracepoint_verifier_ops = {
1755	.get_func_proto  = tp_prog_func_proto,
1756	.is_valid_access = tp_prog_is_valid_access,
1757};
1758
1759const struct bpf_prog_ops tracepoint_prog_ops = {
1760};
1761
1762BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1763	   struct bpf_perf_event_value *, buf, u32, size)
1764{
1765	int err = -EINVAL;
1766
1767	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1768		goto clear;
1769	err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1770				    &buf->running);
1771	if (unlikely(err))
1772		goto clear;
1773	return 0;
1774clear:
1775	memset(buf, 0, size);
1776	return err;
1777}
1778
1779static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1780         .func           = bpf_perf_prog_read_value,
1781         .gpl_only       = true,
1782         .ret_type       = RET_INTEGER,
1783         .arg1_type      = ARG_PTR_TO_CTX,
1784         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1785         .arg3_type      = ARG_CONST_SIZE,
1786};
1787
1788BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1789	   void *, buf, u32, size, u64, flags)
1790{
1791	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1792	struct perf_branch_stack *br_stack = ctx->data->br_stack;
1793	u32 to_copy;
1794
1795	if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1796		return -EINVAL;
1797
1798	if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK)))
1799		return -ENOENT;
1800
1801	if (unlikely(!br_stack))
1802		return -ENOENT;
1803
1804	if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1805		return br_stack->nr * br_entry_size;
1806
1807	if (!buf || (size % br_entry_size != 0))
1808		return -EINVAL;
1809
1810	to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1811	memcpy(buf, br_stack->entries, to_copy);
1812
1813	return to_copy;
1814}
1815
1816static const struct bpf_func_proto bpf_read_branch_records_proto = {
1817	.func           = bpf_read_branch_records,
1818	.gpl_only       = true,
1819	.ret_type       = RET_INTEGER,
1820	.arg1_type      = ARG_PTR_TO_CTX,
1821	.arg2_type      = ARG_PTR_TO_MEM_OR_NULL,
1822	.arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1823	.arg4_type      = ARG_ANYTHING,
1824};
1825
1826static const struct bpf_func_proto *
1827pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1828{
1829	switch (func_id) {
1830	case BPF_FUNC_perf_event_output:
1831		return &bpf_perf_event_output_proto_tp;
1832	case BPF_FUNC_get_stackid:
1833		return &bpf_get_stackid_proto_pe;
1834	case BPF_FUNC_get_stack:
1835		return &bpf_get_stack_proto_pe;
1836	case BPF_FUNC_perf_prog_read_value:
1837		return &bpf_perf_prog_read_value_proto;
1838	case BPF_FUNC_read_branch_records:
1839		return &bpf_read_branch_records_proto;
1840	case BPF_FUNC_get_attach_cookie:
1841		return &bpf_get_attach_cookie_proto_pe;
1842	default:
1843		return bpf_tracing_func_proto(func_id, prog);
1844	}
1845}
1846
1847/*
1848 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1849 * to avoid potential recursive reuse issue when/if tracepoints are added
1850 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1851 *
1852 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1853 * in normal, irq, and nmi context.
1854 */
1855struct bpf_raw_tp_regs {
1856	struct pt_regs regs[3];
1857};
1858static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1859static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1860static struct pt_regs *get_bpf_raw_tp_regs(void)
1861{
1862	struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1863	int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1864
1865	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1866		this_cpu_dec(bpf_raw_tp_nest_level);
1867		return ERR_PTR(-EBUSY);
1868	}
1869
1870	return &tp_regs->regs[nest_level - 1];
1871}
1872
1873static void put_bpf_raw_tp_regs(void)
1874{
1875	this_cpu_dec(bpf_raw_tp_nest_level);
1876}
1877
1878BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1879	   struct bpf_map *, map, u64, flags, void *, data, u64, size)
1880{
1881	struct pt_regs *regs = get_bpf_raw_tp_regs();
1882	int ret;
1883
1884	if (IS_ERR(regs))
1885		return PTR_ERR(regs);
1886
1887	perf_fetch_caller_regs(regs);
1888	ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1889
1890	put_bpf_raw_tp_regs();
1891	return ret;
1892}
1893
1894static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1895	.func		= bpf_perf_event_output_raw_tp,
1896	.gpl_only	= true,
1897	.ret_type	= RET_INTEGER,
1898	.arg1_type	= ARG_PTR_TO_CTX,
1899	.arg2_type	= ARG_CONST_MAP_PTR,
1900	.arg3_type	= ARG_ANYTHING,
1901	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1902	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1903};
1904
1905extern const struct bpf_func_proto bpf_skb_output_proto;
1906extern const struct bpf_func_proto bpf_xdp_output_proto;
1907extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
1908
1909BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1910	   struct bpf_map *, map, u64, flags)
1911{
1912	struct pt_regs *regs = get_bpf_raw_tp_regs();
1913	int ret;
1914
1915	if (IS_ERR(regs))
1916		return PTR_ERR(regs);
1917
1918	perf_fetch_caller_regs(regs);
1919	/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1920	ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1921			      flags, 0, 0);
1922	put_bpf_raw_tp_regs();
1923	return ret;
1924}
1925
1926static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1927	.func		= bpf_get_stackid_raw_tp,
1928	.gpl_only	= true,
1929	.ret_type	= RET_INTEGER,
1930	.arg1_type	= ARG_PTR_TO_CTX,
1931	.arg2_type	= ARG_CONST_MAP_PTR,
1932	.arg3_type	= ARG_ANYTHING,
1933};
1934
1935BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1936	   void *, buf, u32, size, u64, flags)
1937{
1938	struct pt_regs *regs = get_bpf_raw_tp_regs();
1939	int ret;
1940
1941	if (IS_ERR(regs))
1942		return PTR_ERR(regs);
1943
1944	perf_fetch_caller_regs(regs);
1945	ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1946			    (unsigned long) size, flags, 0);
1947	put_bpf_raw_tp_regs();
1948	return ret;
1949}
1950
1951static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1952	.func		= bpf_get_stack_raw_tp,
1953	.gpl_only	= true,
1954	.ret_type	= RET_INTEGER,
1955	.arg1_type	= ARG_PTR_TO_CTX,
1956	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1957	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1958	.arg4_type	= ARG_ANYTHING,
1959};
1960
1961static const struct bpf_func_proto *
1962raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1963{
1964	switch (func_id) {
1965	case BPF_FUNC_perf_event_output:
1966		return &bpf_perf_event_output_proto_raw_tp;
1967	case BPF_FUNC_get_stackid:
1968		return &bpf_get_stackid_proto_raw_tp;
1969	case BPF_FUNC_get_stack:
1970		return &bpf_get_stack_proto_raw_tp;
1971	case BPF_FUNC_get_attach_cookie:
1972		return &bpf_get_attach_cookie_proto_tracing;
1973	default:
1974		return bpf_tracing_func_proto(func_id, prog);
1975	}
1976}
1977
1978const struct bpf_func_proto *
1979tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1980{
1981	const struct bpf_func_proto *fn;
1982
1983	switch (func_id) {
1984#ifdef CONFIG_NET
1985	case BPF_FUNC_skb_output:
1986		return &bpf_skb_output_proto;
1987	case BPF_FUNC_xdp_output:
1988		return &bpf_xdp_output_proto;
1989	case BPF_FUNC_skc_to_tcp6_sock:
1990		return &bpf_skc_to_tcp6_sock_proto;
1991	case BPF_FUNC_skc_to_tcp_sock:
1992		return &bpf_skc_to_tcp_sock_proto;
1993	case BPF_FUNC_skc_to_tcp_timewait_sock:
1994		return &bpf_skc_to_tcp_timewait_sock_proto;
1995	case BPF_FUNC_skc_to_tcp_request_sock:
1996		return &bpf_skc_to_tcp_request_sock_proto;
1997	case BPF_FUNC_skc_to_udp6_sock:
1998		return &bpf_skc_to_udp6_sock_proto;
1999	case BPF_FUNC_skc_to_unix_sock:
2000		return &bpf_skc_to_unix_sock_proto;
2001	case BPF_FUNC_skc_to_mptcp_sock:
2002		return &bpf_skc_to_mptcp_sock_proto;
2003	case BPF_FUNC_sk_storage_get:
2004		return &bpf_sk_storage_get_tracing_proto;
2005	case BPF_FUNC_sk_storage_delete:
2006		return &bpf_sk_storage_delete_tracing_proto;
2007	case BPF_FUNC_sock_from_file:
2008		return &bpf_sock_from_file_proto;
2009	case BPF_FUNC_get_socket_cookie:
2010		return &bpf_get_socket_ptr_cookie_proto;
2011	case BPF_FUNC_xdp_get_buff_len:
2012		return &bpf_xdp_get_buff_len_trace_proto;
2013#endif
2014	case BPF_FUNC_seq_printf:
2015		return prog->expected_attach_type == BPF_TRACE_ITER ?
2016		       &bpf_seq_printf_proto :
2017		       NULL;
2018	case BPF_FUNC_seq_write:
2019		return prog->expected_attach_type == BPF_TRACE_ITER ?
2020		       &bpf_seq_write_proto :
2021		       NULL;
2022	case BPF_FUNC_seq_printf_btf:
2023		return prog->expected_attach_type == BPF_TRACE_ITER ?
2024		       &bpf_seq_printf_btf_proto :
2025		       NULL;
2026	case BPF_FUNC_d_path:
2027		return &bpf_d_path_proto;
2028	case BPF_FUNC_get_func_arg:
2029		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
2030	case BPF_FUNC_get_func_ret:
2031		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
2032	case BPF_FUNC_get_func_arg_cnt:
2033		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
2034	case BPF_FUNC_get_attach_cookie:
2035		if (prog->type == BPF_PROG_TYPE_TRACING &&
2036		    prog->expected_attach_type == BPF_TRACE_RAW_TP)
2037			return &bpf_get_attach_cookie_proto_tracing;
2038		return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
2039	default:
2040		fn = raw_tp_prog_func_proto(func_id, prog);
2041		if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
2042			fn = bpf_iter_get_func_proto(func_id, prog);
2043		return fn;
2044	}
2045}
2046
2047static bool raw_tp_prog_is_valid_access(int off, int size,
2048					enum bpf_access_type type,
2049					const struct bpf_prog *prog,
2050					struct bpf_insn_access_aux *info)
2051{
2052	return bpf_tracing_ctx_access(off, size, type);
2053}
2054
2055static bool tracing_prog_is_valid_access(int off, int size,
2056					 enum bpf_access_type type,
2057					 const struct bpf_prog *prog,
2058					 struct bpf_insn_access_aux *info)
2059{
2060	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
2061}
2062
2063int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
2064				     const union bpf_attr *kattr,
2065				     union bpf_attr __user *uattr)
2066{
2067	return -ENOTSUPP;
2068}
2069
2070const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
2071	.get_func_proto  = raw_tp_prog_func_proto,
2072	.is_valid_access = raw_tp_prog_is_valid_access,
2073};
2074
2075const struct bpf_prog_ops raw_tracepoint_prog_ops = {
2076#ifdef CONFIG_NET
2077	.test_run = bpf_prog_test_run_raw_tp,
2078#endif
2079};
2080
2081const struct bpf_verifier_ops tracing_verifier_ops = {
2082	.get_func_proto  = tracing_prog_func_proto,
2083	.is_valid_access = tracing_prog_is_valid_access,
2084};
2085
2086const struct bpf_prog_ops tracing_prog_ops = {
2087	.test_run = bpf_prog_test_run_tracing,
2088};
2089
2090static bool raw_tp_writable_prog_is_valid_access(int off, int size,
2091						 enum bpf_access_type type,
2092						 const struct bpf_prog *prog,
2093						 struct bpf_insn_access_aux *info)
2094{
2095	if (off == 0) {
2096		if (size != sizeof(u64) || type != BPF_READ)
2097			return false;
2098		info->reg_type = PTR_TO_TP_BUFFER;
2099	}
2100	return raw_tp_prog_is_valid_access(off, size, type, prog, info);
2101}
2102
2103const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
2104	.get_func_proto  = raw_tp_prog_func_proto,
2105	.is_valid_access = raw_tp_writable_prog_is_valid_access,
2106};
2107
2108const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
2109};
2110
2111static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
2112				    const struct bpf_prog *prog,
2113				    struct bpf_insn_access_aux *info)
2114{
2115	const int size_u64 = sizeof(u64);
2116
2117	if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
2118		return false;
2119	if (type != BPF_READ)
2120		return false;
2121	if (off % size != 0) {
2122		if (sizeof(unsigned long) != 4)
2123			return false;
2124		if (size != 8)
2125			return false;
2126		if (off % size != 4)
2127			return false;
2128	}
2129
2130	switch (off) {
2131	case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
2132		bpf_ctx_record_field_size(info, size_u64);
2133		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2134			return false;
2135		break;
2136	case bpf_ctx_range(struct bpf_perf_event_data, addr):
2137		bpf_ctx_record_field_size(info, size_u64);
2138		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2139			return false;
2140		break;
2141	default:
2142		if (size != sizeof(long))
2143			return false;
2144	}
2145
2146	return true;
2147}
2148
2149static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
2150				      const struct bpf_insn *si,
2151				      struct bpf_insn *insn_buf,
2152				      struct bpf_prog *prog, u32 *target_size)
2153{
2154	struct bpf_insn *insn = insn_buf;
2155
2156	switch (si->off) {
2157	case offsetof(struct bpf_perf_event_data, sample_period):
2158		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2159						       data), si->dst_reg, si->src_reg,
2160				      offsetof(struct bpf_perf_event_data_kern, data));
2161		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2162				      bpf_target_off(struct perf_sample_data, period, 8,
2163						     target_size));
2164		break;
2165	case offsetof(struct bpf_perf_event_data, addr):
2166		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2167						       data), si->dst_reg, si->src_reg,
2168				      offsetof(struct bpf_perf_event_data_kern, data));
2169		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2170				      bpf_target_off(struct perf_sample_data, addr, 8,
2171						     target_size));
2172		break;
2173	default:
2174		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2175						       regs), si->dst_reg, si->src_reg,
2176				      offsetof(struct bpf_perf_event_data_kern, regs));
2177		*insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
2178				      si->off);
2179		break;
2180	}
2181
2182	return insn - insn_buf;
2183}
2184
2185const struct bpf_verifier_ops perf_event_verifier_ops = {
2186	.get_func_proto		= pe_prog_func_proto,
2187	.is_valid_access	= pe_prog_is_valid_access,
2188	.convert_ctx_access	= pe_prog_convert_ctx_access,
2189};
2190
2191const struct bpf_prog_ops perf_event_prog_ops = {
2192};
2193
2194static DEFINE_MUTEX(bpf_event_mutex);
2195
2196#define BPF_TRACE_MAX_PROGS 64
2197
2198int perf_event_attach_bpf_prog(struct perf_event *event,
2199			       struct bpf_prog *prog,
2200			       u64 bpf_cookie)
2201{
2202	struct bpf_prog_array *old_array;
2203	struct bpf_prog_array *new_array;
2204	int ret = -EEXIST;
2205
2206	/*
2207	 * Kprobe override only works if they are on the function entry,
2208	 * and only if they are on the opt-in list.
2209	 */
2210	if (prog->kprobe_override &&
2211	    (!trace_kprobe_on_func_entry(event->tp_event) ||
2212	     !trace_kprobe_error_injectable(event->tp_event)))
2213		return -EINVAL;
2214
2215	mutex_lock(&bpf_event_mutex);
2216
2217	if (event->prog)
2218		goto unlock;
2219
2220	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2221	if (old_array &&
2222	    bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
2223		ret = -E2BIG;
2224		goto unlock;
2225	}
2226
2227	ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
2228	if (ret < 0)
2229		goto unlock;
2230
2231	/* set the new array to event->tp_event and set event->prog */
2232	event->prog = prog;
2233	event->bpf_cookie = bpf_cookie;
2234	rcu_assign_pointer(event->tp_event->prog_array, new_array);
2235	bpf_prog_array_free_sleepable(old_array);
2236
2237unlock:
2238	mutex_unlock(&bpf_event_mutex);
2239	return ret;
2240}
2241
2242void perf_event_detach_bpf_prog(struct perf_event *event)
2243{
2244	struct bpf_prog_array *old_array;
2245	struct bpf_prog_array *new_array;
2246	int ret;
2247
2248	mutex_lock(&bpf_event_mutex);
2249
2250	if (!event->prog)
2251		goto unlock;
2252
2253	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2254	if (!old_array)
2255		goto put;
2256
2257	ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
 
 
2258	if (ret < 0) {
2259		bpf_prog_array_delete_safe(old_array, event->prog);
2260	} else {
2261		rcu_assign_pointer(event->tp_event->prog_array, new_array);
2262		bpf_prog_array_free_sleepable(old_array);
2263	}
2264
2265put:
2266	/*
2267	 * It could be that the bpf_prog is not sleepable (and will be freed
2268	 * via normal RCU), but is called from a point that supports sleepable
2269	 * programs and uses tasks-trace-RCU.
2270	 */
2271	synchronize_rcu_tasks_trace();
2272
2273	bpf_prog_put(event->prog);
2274	event->prog = NULL;
2275
2276unlock:
2277	mutex_unlock(&bpf_event_mutex);
2278}
2279
2280int perf_event_query_prog_array(struct perf_event *event, void __user *info)
2281{
2282	struct perf_event_query_bpf __user *uquery = info;
2283	struct perf_event_query_bpf query = {};
2284	struct bpf_prog_array *progs;
2285	u32 *ids, prog_cnt, ids_len;
2286	int ret;
2287
2288	if (!perfmon_capable())
2289		return -EPERM;
2290	if (event->attr.type != PERF_TYPE_TRACEPOINT)
2291		return -EINVAL;
2292	if (copy_from_user(&query, uquery, sizeof(query)))
2293		return -EFAULT;
2294
2295	ids_len = query.ids_len;
2296	if (ids_len > BPF_TRACE_MAX_PROGS)
2297		return -E2BIG;
2298	ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2299	if (!ids)
2300		return -ENOMEM;
2301	/*
2302	 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2303	 * is required when user only wants to check for uquery->prog_cnt.
2304	 * There is no need to check for it since the case is handled
2305	 * gracefully in bpf_prog_array_copy_info.
2306	 */
2307
2308	mutex_lock(&bpf_event_mutex);
2309	progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2310	ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
2311	mutex_unlock(&bpf_event_mutex);
2312
2313	if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2314	    copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2315		ret = -EFAULT;
2316
2317	kfree(ids);
2318	return ret;
2319}
2320
2321extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2322extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2323
2324struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
2325{
2326	struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2327
2328	for (; btp < __stop__bpf_raw_tp; btp++) {
2329		if (!strcmp(btp->tp->name, name))
2330			return btp;
2331	}
2332
2333	return bpf_get_raw_tracepoint_module(name);
2334}
2335
2336void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2337{
2338	struct module *mod;
2339
2340	preempt_disable();
2341	mod = __module_address((unsigned long)btp);
2342	module_put(mod);
2343	preempt_enable();
2344}
2345
2346static __always_inline
2347void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
2348{
2349	struct bpf_prog *prog = link->link.prog;
2350	struct bpf_run_ctx *old_run_ctx;
2351	struct bpf_trace_run_ctx run_ctx;
2352
2353	cant_sleep();
2354	if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
2355		bpf_prog_inc_misses_counter(prog);
2356		goto out;
2357	}
2358
2359	run_ctx.bpf_cookie = link->cookie;
2360	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2361
2362	rcu_read_lock();
2363	(void) bpf_prog_run(prog, args);
2364	rcu_read_unlock();
2365
2366	bpf_reset_run_ctx(old_run_ctx);
2367out:
2368	this_cpu_dec(*(prog->active));
2369}
2370
2371#define UNPACK(...)			__VA_ARGS__
2372#define REPEAT_1(FN, DL, X, ...)	FN(X)
2373#define REPEAT_2(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2374#define REPEAT_3(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2375#define REPEAT_4(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2376#define REPEAT_5(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2377#define REPEAT_6(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2378#define REPEAT_7(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2379#define REPEAT_8(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2380#define REPEAT_9(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2381#define REPEAT_10(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2382#define REPEAT_11(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2383#define REPEAT_12(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2384#define REPEAT(X, FN, DL, ...)		REPEAT_##X(FN, DL, __VA_ARGS__)
2385
2386#define SARG(X)		u64 arg##X
2387#define COPY(X)		args[X] = arg##X
2388
2389#define __DL_COM	(,)
2390#define __DL_SEM	(;)
2391
2392#define __SEQ_0_11	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2393
2394#define BPF_TRACE_DEFN_x(x)						\
2395	void bpf_trace_run##x(struct bpf_raw_tp_link *link,		\
2396			      REPEAT(x, SARG, __DL_COM, __SEQ_0_11))	\
2397	{								\
2398		u64 args[x];						\
2399		REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);			\
2400		__bpf_trace_run(link, args);				\
2401	}								\
2402	EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2403BPF_TRACE_DEFN_x(1);
2404BPF_TRACE_DEFN_x(2);
2405BPF_TRACE_DEFN_x(3);
2406BPF_TRACE_DEFN_x(4);
2407BPF_TRACE_DEFN_x(5);
2408BPF_TRACE_DEFN_x(6);
2409BPF_TRACE_DEFN_x(7);
2410BPF_TRACE_DEFN_x(8);
2411BPF_TRACE_DEFN_x(9);
2412BPF_TRACE_DEFN_x(10);
2413BPF_TRACE_DEFN_x(11);
2414BPF_TRACE_DEFN_x(12);
2415
2416int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
2417{
2418	struct tracepoint *tp = btp->tp;
2419	struct bpf_prog *prog = link->link.prog;
2420
2421	/*
2422	 * check that program doesn't access arguments beyond what's
2423	 * available in this tracepoint
2424	 */
2425	if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2426		return -EINVAL;
2427
2428	if (prog->aux->max_tp_access > btp->writable_size)
2429		return -EINVAL;
2430
2431	return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, link);
 
 
 
 
 
 
2432}
2433
2434int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
2435{
2436	return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, link);
2437}
2438
2439int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2440			    u32 *fd_type, const char **buf,
2441			    u64 *probe_offset, u64 *probe_addr,
2442			    unsigned long *missed)
2443{
2444	bool is_tracepoint, is_syscall_tp;
2445	struct bpf_prog *prog;
2446	int flags, err = 0;
2447
2448	prog = event->prog;
2449	if (!prog)
2450		return -ENOENT;
2451
2452	/* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2453	if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2454		return -EOPNOTSUPP;
2455
2456	*prog_id = prog->aux->id;
2457	flags = event->tp_event->flags;
2458	is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2459	is_syscall_tp = is_syscall_trace_event(event->tp_event);
2460
2461	if (is_tracepoint || is_syscall_tp) {
2462		*buf = is_tracepoint ? event->tp_event->tp->name
2463				     : event->tp_event->name;
2464		/* We allow NULL pointer for tracepoint */
2465		if (fd_type)
2466			*fd_type = BPF_FD_TYPE_TRACEPOINT;
2467		if (probe_offset)
2468			*probe_offset = 0x0;
2469		if (probe_addr)
2470			*probe_addr = 0x0;
2471	} else {
2472		/* kprobe/uprobe */
2473		err = -EOPNOTSUPP;
2474#ifdef CONFIG_KPROBE_EVENTS
2475		if (flags & TRACE_EVENT_FL_KPROBE)
2476			err = bpf_get_kprobe_info(event, fd_type, buf,
2477						  probe_offset, probe_addr, missed,
2478						  event->attr.type == PERF_TYPE_TRACEPOINT);
2479#endif
2480#ifdef CONFIG_UPROBE_EVENTS
2481		if (flags & TRACE_EVENT_FL_UPROBE)
2482			err = bpf_get_uprobe_info(event, fd_type, buf,
2483						  probe_offset, probe_addr,
2484						  event->attr.type == PERF_TYPE_TRACEPOINT);
2485#endif
2486	}
2487
2488	return err;
2489}
2490
2491static int __init send_signal_irq_work_init(void)
2492{
2493	int cpu;
2494	struct send_signal_irq_work *work;
2495
2496	for_each_possible_cpu(cpu) {
2497		work = per_cpu_ptr(&send_signal_work, cpu);
2498		init_irq_work(&work->irq_work, do_bpf_send_signal);
2499	}
2500	return 0;
2501}
2502
2503subsys_initcall(send_signal_irq_work_init);
2504
2505#ifdef CONFIG_MODULES
2506static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2507			    void *module)
2508{
2509	struct bpf_trace_module *btm, *tmp;
2510	struct module *mod = module;
2511	int ret = 0;
2512
2513	if (mod->num_bpf_raw_events == 0 ||
2514	    (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2515		goto out;
2516
2517	mutex_lock(&bpf_module_mutex);
2518
2519	switch (op) {
2520	case MODULE_STATE_COMING:
2521		btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2522		if (btm) {
2523			btm->module = module;
2524			list_add(&btm->list, &bpf_trace_modules);
2525		} else {
2526			ret = -ENOMEM;
2527		}
2528		break;
2529	case MODULE_STATE_GOING:
2530		list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2531			if (btm->module == module) {
2532				list_del(&btm->list);
2533				kfree(btm);
2534				break;
2535			}
2536		}
2537		break;
2538	}
2539
2540	mutex_unlock(&bpf_module_mutex);
2541
2542out:
2543	return notifier_from_errno(ret);
2544}
2545
2546static struct notifier_block bpf_module_nb = {
2547	.notifier_call = bpf_event_notify,
2548};
2549
2550static int __init bpf_event_init(void)
2551{
2552	register_module_notifier(&bpf_module_nb);
2553	return 0;
2554}
2555
2556fs_initcall(bpf_event_init);
2557#endif /* CONFIG_MODULES */
2558
2559struct bpf_session_run_ctx {
2560	struct bpf_run_ctx run_ctx;
2561	bool is_return;
2562	void *data;
2563};
2564
2565#ifdef CONFIG_FPROBE
2566struct bpf_kprobe_multi_link {
2567	struct bpf_link link;
2568	struct fprobe fp;
2569	unsigned long *addrs;
2570	u64 *cookies;
2571	u32 cnt;
2572	u32 mods_cnt;
2573	struct module **mods;
2574	u32 flags;
2575};
2576
2577struct bpf_kprobe_multi_run_ctx {
2578	struct bpf_session_run_ctx session_ctx;
2579	struct bpf_kprobe_multi_link *link;
2580	unsigned long entry_ip;
2581};
2582
2583struct user_syms {
2584	const char **syms;
2585	char *buf;
2586};
2587
2588static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
2589{
2590	unsigned long __user usymbol;
2591	const char **syms = NULL;
2592	char *buf = NULL, *p;
2593	int err = -ENOMEM;
2594	unsigned int i;
2595
2596	syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
2597	if (!syms)
2598		goto error;
2599
2600	buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
2601	if (!buf)
2602		goto error;
2603
2604	for (p = buf, i = 0; i < cnt; i++) {
2605		if (__get_user(usymbol, usyms + i)) {
2606			err = -EFAULT;
2607			goto error;
2608		}
2609		err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN);
2610		if (err == KSYM_NAME_LEN)
2611			err = -E2BIG;
2612		if (err < 0)
2613			goto error;
2614		syms[i] = p;
2615		p += err + 1;
2616	}
2617
2618	us->syms = syms;
2619	us->buf = buf;
2620	return 0;
2621
2622error:
2623	if (err) {
2624		kvfree(syms);
2625		kvfree(buf);
2626	}
2627	return err;
2628}
2629
2630static void kprobe_multi_put_modules(struct module **mods, u32 cnt)
2631{
2632	u32 i;
2633
2634	for (i = 0; i < cnt; i++)
2635		module_put(mods[i]);
2636}
2637
2638static void free_user_syms(struct user_syms *us)
2639{
2640	kvfree(us->syms);
2641	kvfree(us->buf);
2642}
2643
2644static void bpf_kprobe_multi_link_release(struct bpf_link *link)
2645{
2646	struct bpf_kprobe_multi_link *kmulti_link;
2647
2648	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2649	unregister_fprobe(&kmulti_link->fp);
2650	kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt);
2651}
2652
2653static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
2654{
2655	struct bpf_kprobe_multi_link *kmulti_link;
2656
2657	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2658	kvfree(kmulti_link->addrs);
2659	kvfree(kmulti_link->cookies);
2660	kfree(kmulti_link->mods);
2661	kfree(kmulti_link);
2662}
2663
2664static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
2665						struct bpf_link_info *info)
2666{
2667	u64 __user *ucookies = u64_to_user_ptr(info->kprobe_multi.cookies);
2668	u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs);
2669	struct bpf_kprobe_multi_link *kmulti_link;
2670	u32 ucount = info->kprobe_multi.count;
2671	int err = 0, i;
2672
2673	if (!uaddrs ^ !ucount)
2674		return -EINVAL;
2675	if (ucookies && !ucount)
2676		return -EINVAL;
2677
2678	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2679	info->kprobe_multi.count = kmulti_link->cnt;
2680	info->kprobe_multi.flags = kmulti_link->flags;
2681	info->kprobe_multi.missed = kmulti_link->fp.nmissed;
2682
2683	if (!uaddrs)
2684		return 0;
2685	if (ucount < kmulti_link->cnt)
2686		err = -ENOSPC;
2687	else
2688		ucount = kmulti_link->cnt;
2689
2690	if (ucookies) {
2691		if (kmulti_link->cookies) {
2692			if (copy_to_user(ucookies, kmulti_link->cookies, ucount * sizeof(u64)))
2693				return -EFAULT;
2694		} else {
2695			for (i = 0; i < ucount; i++) {
2696				if (put_user(0, ucookies + i))
2697					return -EFAULT;
2698			}
2699		}
2700	}
2701
2702	if (kallsyms_show_value(current_cred())) {
2703		if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64)))
2704			return -EFAULT;
2705	} else {
2706		for (i = 0; i < ucount; i++) {
2707			if (put_user(0, uaddrs + i))
2708				return -EFAULT;
2709		}
2710	}
2711	return err;
2712}
2713
2714static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
2715	.release = bpf_kprobe_multi_link_release,
2716	.dealloc_deferred = bpf_kprobe_multi_link_dealloc,
2717	.fill_link_info = bpf_kprobe_multi_link_fill_link_info,
2718};
2719
2720static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
2721{
2722	const struct bpf_kprobe_multi_link *link = priv;
2723	unsigned long *addr_a = a, *addr_b = b;
2724	u64 *cookie_a, *cookie_b;
2725
2726	cookie_a = link->cookies + (addr_a - link->addrs);
2727	cookie_b = link->cookies + (addr_b - link->addrs);
2728
2729	/* swap addr_a/addr_b and cookie_a/cookie_b values */
2730	swap(*addr_a, *addr_b);
2731	swap(*cookie_a, *cookie_b);
2732}
2733
2734static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b)
2735{
2736	const unsigned long *addr_a = a, *addr_b = b;
2737
2738	if (*addr_a == *addr_b)
2739		return 0;
2740	return *addr_a < *addr_b ? -1 : 1;
2741}
2742
2743static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
2744{
2745	return bpf_kprobe_multi_addrs_cmp(a, b);
2746}
2747
2748static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2749{
2750	struct bpf_kprobe_multi_run_ctx *run_ctx;
2751	struct bpf_kprobe_multi_link *link;
2752	u64 *cookie, entry_ip;
2753	unsigned long *addr;
2754
2755	if (WARN_ON_ONCE(!ctx))
2756		return 0;
2757	run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx,
2758			       session_ctx.run_ctx);
2759	link = run_ctx->link;
2760	if (!link->cookies)
2761		return 0;
2762	entry_ip = run_ctx->entry_ip;
2763	addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
2764		       bpf_kprobe_multi_addrs_cmp);
2765	if (!addr)
2766		return 0;
2767	cookie = link->cookies + (addr - link->addrs);
2768	return *cookie;
2769}
2770
2771static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2772{
2773	struct bpf_kprobe_multi_run_ctx *run_ctx;
2774
2775	run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx,
2776			       session_ctx.run_ctx);
2777	return run_ctx->entry_ip;
2778}
2779
2780static int
2781kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
2782			   unsigned long entry_ip, struct pt_regs *regs,
2783			   bool is_return, void *data)
2784{
2785	struct bpf_kprobe_multi_run_ctx run_ctx = {
2786		.session_ctx = {
2787			.is_return = is_return,
2788			.data = data,
2789		},
2790		.link = link,
2791		.entry_ip = entry_ip,
2792	};
2793	struct bpf_run_ctx *old_run_ctx;
2794	int err;
2795
2796	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
2797		bpf_prog_inc_misses_counter(link->link.prog);
2798		err = 0;
2799		goto out;
2800	}
2801
2802	migrate_disable();
2803	rcu_read_lock();
2804	old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx);
2805	err = bpf_prog_run(link->link.prog, regs);
2806	bpf_reset_run_ctx(old_run_ctx);
2807	rcu_read_unlock();
2808	migrate_enable();
2809
2810 out:
2811	__this_cpu_dec(bpf_prog_active);
2812	return err;
2813}
2814
2815static int
2816kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
2817			  unsigned long ret_ip, struct pt_regs *regs,
2818			  void *data)
2819{
2820	struct bpf_kprobe_multi_link *link;
2821	int err;
2822
2823	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2824	err = kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs, false, data);
2825	return is_kprobe_session(link->link.prog) ? err : 0;
2826}
2827
2828static void
2829kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip,
2830			       unsigned long ret_ip, struct pt_regs *regs,
2831			       void *data)
2832{
2833	struct bpf_kprobe_multi_link *link;
2834
2835	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2836	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs, true, data);
2837}
2838
2839static int symbols_cmp_r(const void *a, const void *b, const void *priv)
2840{
2841	const char **str_a = (const char **) a;
2842	const char **str_b = (const char **) b;
2843
2844	return strcmp(*str_a, *str_b);
2845}
2846
2847struct multi_symbols_sort {
2848	const char **funcs;
2849	u64 *cookies;
2850};
2851
2852static void symbols_swap_r(void *a, void *b, int size, const void *priv)
2853{
2854	const struct multi_symbols_sort *data = priv;
2855	const char **name_a = a, **name_b = b;
2856
2857	swap(*name_a, *name_b);
2858
2859	/* If defined, swap also related cookies. */
2860	if (data->cookies) {
2861		u64 *cookie_a, *cookie_b;
2862
2863		cookie_a = data->cookies + (name_a - data->funcs);
2864		cookie_b = data->cookies + (name_b - data->funcs);
2865		swap(*cookie_a, *cookie_b);
2866	}
2867}
2868
2869struct modules_array {
 
 
2870	struct module **mods;
2871	int mods_cnt;
2872	int mods_cap;
2873};
2874
2875static int add_module(struct modules_array *arr, struct module *mod)
 
2876{
 
2877	struct module **mods;
2878
2879	if (arr->mods_cnt == arr->mods_cap) {
2880		arr->mods_cap = max(16, arr->mods_cap * 3 / 2);
2881		mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2882		if (!mods)
2883			return -ENOMEM;
2884		arr->mods = mods;
2885	}
2886
2887	arr->mods[arr->mods_cnt] = mod;
2888	arr->mods_cnt++;
 
 
 
2889	return 0;
2890}
2891
2892static bool has_module(struct modules_array *arr, struct module *mod)
2893{
2894	int i;
2895
2896	for (i = arr->mods_cnt - 1; i >= 0; i--) {
2897		if (arr->mods[i] == mod)
2898			return true;
2899	}
2900	return false;
2901}
2902
2903static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
2904{
2905	struct modules_array arr = {};
2906	u32 i, err = 0;
2907
2908	for (i = 0; i < addrs_cnt; i++) {
2909		struct module *mod;
2910
2911		preempt_disable();
2912		mod = __module_address(addrs[i]);
2913		/* Either no module or we it's already stored  */
2914		if (!mod || has_module(&arr, mod)) {
2915			preempt_enable();
2916			continue;
2917		}
2918		if (!try_module_get(mod))
2919			err = -EINVAL;
2920		preempt_enable();
2921		if (err)
2922			break;
2923		err = add_module(&arr, mod);
2924		if (err) {
2925			module_put(mod);
2926			break;
2927		}
2928	}
2929
2930	/* We return either err < 0 in case of error, ... */
 
2931	if (err) {
2932		kprobe_multi_put_modules(arr.mods, arr.mods_cnt);
2933		kfree(arr.mods);
2934		return err;
2935	}
2936
2937	/* or number of modules found if everything is ok. */
2938	*mods = arr.mods;
2939	return arr.mods_cnt;
2940}
2941
2942static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt)
2943{
2944	u32 i;
2945
2946	for (i = 0; i < cnt; i++) {
2947		if (!within_error_injection_list(addrs[i]))
2948			return -EINVAL;
2949	}
2950	return 0;
2951}
2952
2953int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2954{
2955	struct bpf_kprobe_multi_link *link = NULL;
2956	struct bpf_link_primer link_primer;
2957	void __user *ucookies;
2958	unsigned long *addrs;
2959	u32 flags, cnt, size;
2960	void __user *uaddrs;
2961	u64 *cookies = NULL;
2962	void __user *usyms;
2963	int err;
2964
2965	/* no support for 32bit archs yet */
2966	if (sizeof(u64) != sizeof(void *))
2967		return -EOPNOTSUPP;
2968
2969	if (!is_kprobe_multi(prog))
2970		return -EINVAL;
2971
2972	flags = attr->link_create.kprobe_multi.flags;
2973	if (flags & ~BPF_F_KPROBE_MULTI_RETURN)
2974		return -EINVAL;
2975
2976	uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
2977	usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
2978	if (!!uaddrs == !!usyms)
2979		return -EINVAL;
2980
2981	cnt = attr->link_create.kprobe_multi.cnt;
2982	if (!cnt)
2983		return -EINVAL;
2984	if (cnt > MAX_KPROBE_MULTI_CNT)
2985		return -E2BIG;
2986
2987	size = cnt * sizeof(*addrs);
2988	addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2989	if (!addrs)
2990		return -ENOMEM;
2991
2992	ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
2993	if (ucookies) {
2994		cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2995		if (!cookies) {
2996			err = -ENOMEM;
2997			goto error;
2998		}
2999		if (copy_from_user(cookies, ucookies, size)) {
3000			err = -EFAULT;
3001			goto error;
3002		}
3003	}
3004
3005	if (uaddrs) {
3006		if (copy_from_user(addrs, uaddrs, size)) {
3007			err = -EFAULT;
3008			goto error;
3009		}
3010	} else {
3011		struct multi_symbols_sort data = {
3012			.cookies = cookies,
3013		};
3014		struct user_syms us;
3015
3016		err = copy_user_syms(&us, usyms, cnt);
3017		if (err)
3018			goto error;
3019
3020		if (cookies)
3021			data.funcs = us.syms;
3022
3023		sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
3024		       symbols_swap_r, &data);
3025
3026		err = ftrace_lookup_symbols(us.syms, cnt, addrs);
3027		free_user_syms(&us);
3028		if (err)
3029			goto error;
3030	}
3031
3032	if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) {
3033		err = -EINVAL;
3034		goto error;
3035	}
3036
3037	link = kzalloc(sizeof(*link), GFP_KERNEL);
3038	if (!link) {
3039		err = -ENOMEM;
3040		goto error;
3041	}
3042
3043	bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
3044		      &bpf_kprobe_multi_link_lops, prog);
3045
3046	err = bpf_link_prime(&link->link, &link_primer);
3047	if (err)
3048		goto error;
3049
3050	if (!(flags & BPF_F_KPROBE_MULTI_RETURN))
 
 
3051		link->fp.entry_handler = kprobe_multi_link_handler;
3052	if ((flags & BPF_F_KPROBE_MULTI_RETURN) || is_kprobe_session(prog))
3053		link->fp.exit_handler = kprobe_multi_link_exit_handler;
3054	if (is_kprobe_session(prog))
3055		link->fp.entry_data_size = sizeof(u64);
3056
3057	link->addrs = addrs;
3058	link->cookies = cookies;
3059	link->cnt = cnt;
3060	link->flags = flags;
3061
3062	if (cookies) {
3063		/*
3064		 * Sorting addresses will trigger sorting cookies as well
3065		 * (check bpf_kprobe_multi_cookie_swap). This way we can
3066		 * find cookie based on the address in bpf_get_attach_cookie
3067		 * helper.
3068		 */
3069		sort_r(addrs, cnt, sizeof(*addrs),
3070		       bpf_kprobe_multi_cookie_cmp,
3071		       bpf_kprobe_multi_cookie_swap,
3072		       link);
 
 
 
 
 
 
 
3073	}
3074
3075	err = get_modules_for_addrs(&link->mods, addrs, cnt);
3076	if (err < 0) {
3077		bpf_link_cleanup(&link_primer);
3078		return err;
3079	}
3080	link->mods_cnt = err;
3081
3082	err = register_fprobe_ips(&link->fp, addrs, cnt);
3083	if (err) {
3084		kprobe_multi_put_modules(link->mods, link->mods_cnt);
3085		bpf_link_cleanup(&link_primer);
3086		return err;
3087	}
3088
3089	return bpf_link_settle(&link_primer);
3090
3091error:
3092	kfree(link);
3093	kvfree(addrs);
3094	kvfree(cookies);
3095	return err;
3096}
3097#else /* !CONFIG_FPROBE */
3098int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3099{
3100	return -EOPNOTSUPP;
3101}
3102static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
3103{
3104	return 0;
3105}
3106static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3107{
3108	return 0;
3109}
3110#endif
3111
3112#ifdef CONFIG_UPROBES
3113struct bpf_uprobe_multi_link;
3114
3115struct bpf_uprobe {
3116	struct bpf_uprobe_multi_link *link;
3117	loff_t offset;
3118	unsigned long ref_ctr_offset;
3119	u64 cookie;
3120	struct uprobe *uprobe;
3121	struct uprobe_consumer consumer;
3122	bool session;
3123};
3124
3125struct bpf_uprobe_multi_link {
3126	struct path path;
3127	struct bpf_link link;
3128	u32 cnt;
3129	u32 flags;
3130	struct bpf_uprobe *uprobes;
3131	struct task_struct *task;
3132};
3133
3134struct bpf_uprobe_multi_run_ctx {
3135	struct bpf_session_run_ctx session_ctx;
3136	unsigned long entry_ip;
3137	struct bpf_uprobe *uprobe;
3138};
3139
3140static void bpf_uprobe_unregister(struct bpf_uprobe *uprobes, u32 cnt)
3141{
3142	u32 i;
3143
3144	for (i = 0; i < cnt; i++)
3145		uprobe_unregister_nosync(uprobes[i].uprobe, &uprobes[i].consumer);
3146
3147	if (cnt)
3148		uprobe_unregister_sync();
3149}
3150
3151static void bpf_uprobe_multi_link_release(struct bpf_link *link)
3152{
3153	struct bpf_uprobe_multi_link *umulti_link;
3154
3155	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3156	bpf_uprobe_unregister(umulti_link->uprobes, umulti_link->cnt);
3157	if (umulti_link->task)
3158		put_task_struct(umulti_link->task);
3159	path_put(&umulti_link->path);
3160}
3161
3162static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
3163{
3164	struct bpf_uprobe_multi_link *umulti_link;
3165
3166	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3167	kvfree(umulti_link->uprobes);
3168	kfree(umulti_link);
3169}
3170
3171static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
3172						struct bpf_link_info *info)
3173{
3174	u64 __user *uref_ctr_offsets = u64_to_user_ptr(info->uprobe_multi.ref_ctr_offsets);
3175	u64 __user *ucookies = u64_to_user_ptr(info->uprobe_multi.cookies);
3176	u64 __user *uoffsets = u64_to_user_ptr(info->uprobe_multi.offsets);
3177	u64 __user *upath = u64_to_user_ptr(info->uprobe_multi.path);
3178	u32 upath_size = info->uprobe_multi.path_size;
3179	struct bpf_uprobe_multi_link *umulti_link;
3180	u32 ucount = info->uprobe_multi.count;
3181	int err = 0, i;
3182	char *p, *buf;
3183	long left = 0;
3184
3185	if (!upath ^ !upath_size)
3186		return -EINVAL;
3187
3188	if ((uoffsets || uref_ctr_offsets || ucookies) && !ucount)
3189		return -EINVAL;
3190
3191	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3192	info->uprobe_multi.count = umulti_link->cnt;
3193	info->uprobe_multi.flags = umulti_link->flags;
3194	info->uprobe_multi.pid = umulti_link->task ?
3195				 task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0;
3196
3197	upath_size = upath_size ? min_t(u32, upath_size, PATH_MAX) : PATH_MAX;
3198	buf = kmalloc(upath_size, GFP_KERNEL);
3199	if (!buf)
3200		return -ENOMEM;
3201	p = d_path(&umulti_link->path, buf, upath_size);
3202	if (IS_ERR(p)) {
3203		kfree(buf);
3204		return PTR_ERR(p);
3205	}
3206	upath_size = buf + upath_size - p;
3207
3208	if (upath)
3209		left = copy_to_user(upath, p, upath_size);
3210	kfree(buf);
3211	if (left)
3212		return -EFAULT;
3213	info->uprobe_multi.path_size = upath_size;
3214
3215	if (!uoffsets && !ucookies && !uref_ctr_offsets)
3216		return 0;
3217
3218	if (ucount < umulti_link->cnt)
3219		err = -ENOSPC;
3220	else
3221		ucount = umulti_link->cnt;
3222
3223	for (i = 0; i < ucount; i++) {
3224		if (uoffsets &&
3225		    put_user(umulti_link->uprobes[i].offset, uoffsets + i))
3226			return -EFAULT;
3227		if (uref_ctr_offsets &&
3228		    put_user(umulti_link->uprobes[i].ref_ctr_offset, uref_ctr_offsets + i))
3229			return -EFAULT;
3230		if (ucookies &&
3231		    put_user(umulti_link->uprobes[i].cookie, ucookies + i))
3232			return -EFAULT;
3233	}
3234
3235	return err;
3236}
3237
3238static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
3239	.release = bpf_uprobe_multi_link_release,
3240	.dealloc_deferred = bpf_uprobe_multi_link_dealloc,
3241	.fill_link_info = bpf_uprobe_multi_link_fill_link_info,
3242};
3243
3244static int uprobe_prog_run(struct bpf_uprobe *uprobe,
3245			   unsigned long entry_ip,
3246			   struct pt_regs *regs,
3247			   bool is_return, void *data)
3248{
3249	struct bpf_uprobe_multi_link *link = uprobe->link;
3250	struct bpf_uprobe_multi_run_ctx run_ctx = {
3251		.session_ctx = {
3252			.is_return = is_return,
3253			.data = data,
3254		},
3255		.entry_ip = entry_ip,
3256		.uprobe = uprobe,
3257	};
3258	struct bpf_prog *prog = link->link.prog;
3259	bool sleepable = prog->sleepable;
3260	struct bpf_run_ctx *old_run_ctx;
3261	int err;
3262
3263	if (link->task && !same_thread_group(current, link->task))
3264		return 0;
3265
3266	if (sleepable)
3267		rcu_read_lock_trace();
3268	else
3269		rcu_read_lock();
3270
3271	migrate_disable();
3272
3273	old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx);
3274	err = bpf_prog_run(link->link.prog, regs);
3275	bpf_reset_run_ctx(old_run_ctx);
3276
3277	migrate_enable();
3278
3279	if (sleepable)
3280		rcu_read_unlock_trace();
3281	else
3282		rcu_read_unlock();
3283	return err;
3284}
3285
3286static bool
3287uprobe_multi_link_filter(struct uprobe_consumer *con, struct mm_struct *mm)
3288{
3289	struct bpf_uprobe *uprobe;
3290
3291	uprobe = container_of(con, struct bpf_uprobe, consumer);
3292	return uprobe->link->task->mm == mm;
3293}
3294
3295static int
3296uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs,
3297			  __u64 *data)
3298{
3299	struct bpf_uprobe *uprobe;
3300	int ret;
3301
3302	uprobe = container_of(con, struct bpf_uprobe, consumer);
3303	ret = uprobe_prog_run(uprobe, instruction_pointer(regs), regs, false, data);
3304	if (uprobe->session)
3305		return ret ? UPROBE_HANDLER_IGNORE : 0;
3306	return 0;
3307}
3308
3309static int
3310uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs,
3311			      __u64 *data)
3312{
3313	struct bpf_uprobe *uprobe;
3314
3315	uprobe = container_of(con, struct bpf_uprobe, consumer);
3316	uprobe_prog_run(uprobe, func, regs, true, data);
3317	return 0;
3318}
3319
3320static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3321{
3322	struct bpf_uprobe_multi_run_ctx *run_ctx;
3323
3324	run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx,
3325			       session_ctx.run_ctx);
3326	return run_ctx->entry_ip;
3327}
3328
3329static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3330{
3331	struct bpf_uprobe_multi_run_ctx *run_ctx;
3332
3333	run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx,
3334			       session_ctx.run_ctx);
3335	return run_ctx->uprobe->cookie;
3336}
3337
3338int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3339{
3340	struct bpf_uprobe_multi_link *link = NULL;
3341	unsigned long __user *uref_ctr_offsets;
3342	struct bpf_link_primer link_primer;
3343	struct bpf_uprobe *uprobes = NULL;
3344	struct task_struct *task = NULL;
3345	unsigned long __user *uoffsets;
3346	u64 __user *ucookies;
3347	void __user *upath;
3348	u32 flags, cnt, i;
3349	struct path path;
3350	char *name;
3351	pid_t pid;
3352	int err;
3353
3354	/* no support for 32bit archs yet */
3355	if (sizeof(u64) != sizeof(void *))
3356		return -EOPNOTSUPP;
3357
3358	if (!is_uprobe_multi(prog))
3359		return -EINVAL;
3360
3361	flags = attr->link_create.uprobe_multi.flags;
3362	if (flags & ~BPF_F_UPROBE_MULTI_RETURN)
3363		return -EINVAL;
3364
3365	/*
3366	 * path, offsets and cnt are mandatory,
3367	 * ref_ctr_offsets and cookies are optional
3368	 */
3369	upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
3370	uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
3371	cnt = attr->link_create.uprobe_multi.cnt;
3372	pid = attr->link_create.uprobe_multi.pid;
3373
3374	if (!upath || !uoffsets || !cnt || pid < 0)
3375		return -EINVAL;
3376	if (cnt > MAX_UPROBE_MULTI_CNT)
3377		return -E2BIG;
3378
3379	uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets);
3380	ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies);
3381
3382	name = strndup_user(upath, PATH_MAX);
3383	if (IS_ERR(name)) {
3384		err = PTR_ERR(name);
3385		return err;
3386	}
3387
3388	err = kern_path(name, LOOKUP_FOLLOW, &path);
3389	kfree(name);
3390	if (err)
3391		return err;
3392
3393	if (!d_is_reg(path.dentry)) {
3394		err = -EBADF;
3395		goto error_path_put;
3396	}
3397
3398	if (pid) {
3399		task = get_pid_task(find_vpid(pid), PIDTYPE_TGID);
3400		if (!task) {
3401			err = -ESRCH;
3402			goto error_path_put;
3403		}
3404	}
3405
3406	err = -ENOMEM;
3407
3408	link = kzalloc(sizeof(*link), GFP_KERNEL);
3409	uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL);
3410
3411	if (!uprobes || !link)
3412		goto error_free;
3413
3414	for (i = 0; i < cnt; i++) {
3415		if (__get_user(uprobes[i].offset, uoffsets + i)) {
3416			err = -EFAULT;
3417			goto error_free;
3418		}
3419		if (uprobes[i].offset < 0) {
3420			err = -EINVAL;
3421			goto error_free;
3422		}
3423		if (uref_ctr_offsets && __get_user(uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) {
3424			err = -EFAULT;
3425			goto error_free;
3426		}
3427		if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) {
3428			err = -EFAULT;
3429			goto error_free;
3430		}
3431
3432		uprobes[i].link = link;
3433
3434		if (!(flags & BPF_F_UPROBE_MULTI_RETURN))
3435			uprobes[i].consumer.handler = uprobe_multi_link_handler;
3436		if (flags & BPF_F_UPROBE_MULTI_RETURN || is_uprobe_session(prog))
3437			uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler;
3438		if (is_uprobe_session(prog))
3439			uprobes[i].session = true;
3440		if (pid)
3441			uprobes[i].consumer.filter = uprobe_multi_link_filter;
3442	}
3443
3444	link->cnt = cnt;
3445	link->uprobes = uprobes;
3446	link->path = path;
3447	link->task = task;
3448	link->flags = flags;
3449
3450	bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI,
3451		      &bpf_uprobe_multi_link_lops, prog);
3452
3453	for (i = 0; i < cnt; i++) {
3454		uprobes[i].uprobe = uprobe_register(d_real_inode(link->path.dentry),
3455						    uprobes[i].offset,
3456						    uprobes[i].ref_ctr_offset,
3457						    &uprobes[i].consumer);
3458		if (IS_ERR(uprobes[i].uprobe)) {
3459			err = PTR_ERR(uprobes[i].uprobe);
3460			link->cnt = i;
3461			goto error_unregister;
3462		}
3463	}
3464
3465	err = bpf_link_prime(&link->link, &link_primer);
3466	if (err)
3467		goto error_unregister;
3468
3469	return bpf_link_settle(&link_primer);
3470
3471error_unregister:
3472	bpf_uprobe_unregister(uprobes, link->cnt);
3473
3474error_free:
3475	kvfree(uprobes);
3476	kfree(link);
3477	if (task)
3478		put_task_struct(task);
3479error_path_put:
3480	path_put(&path);
3481	return err;
3482}
3483#else /* !CONFIG_UPROBES */
3484int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3485{
3486	return -EOPNOTSUPP;
3487}
3488static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3489{
3490	return 0;
3491}
3492static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3493{
3494	return 0;
3495}
3496#endif /* CONFIG_UPROBES */
3497
3498__bpf_kfunc_start_defs();
3499
3500__bpf_kfunc bool bpf_session_is_return(void)
3501{
3502	struct bpf_session_run_ctx *session_ctx;
3503
3504	session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx);
3505	return session_ctx->is_return;
3506}
3507
3508__bpf_kfunc __u64 *bpf_session_cookie(void)
3509{
3510	struct bpf_session_run_ctx *session_ctx;
3511
3512	session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx);
3513	return session_ctx->data;
3514}
3515
3516__bpf_kfunc_end_defs();
3517
3518BTF_KFUNCS_START(kprobe_multi_kfunc_set_ids)
3519BTF_ID_FLAGS(func, bpf_session_is_return)
3520BTF_ID_FLAGS(func, bpf_session_cookie)
3521BTF_KFUNCS_END(kprobe_multi_kfunc_set_ids)
3522
3523static int bpf_kprobe_multi_filter(const struct bpf_prog *prog, u32 kfunc_id)
3524{
3525	if (!btf_id_set8_contains(&kprobe_multi_kfunc_set_ids, kfunc_id))
3526		return 0;
3527
3528	if (!is_kprobe_session(prog) && !is_uprobe_session(prog))
3529		return -EACCES;
3530
3531	return 0;
3532}
3533
3534static const struct btf_kfunc_id_set bpf_kprobe_multi_kfunc_set = {
3535	.owner = THIS_MODULE,
3536	.set = &kprobe_multi_kfunc_set_ids,
3537	.filter = bpf_kprobe_multi_filter,
3538};
3539
3540static int __init bpf_kprobe_multi_kfuncs_init(void)
3541{
3542	return register_btf_kfunc_id_set(BPF_PROG_TYPE_KPROBE, &bpf_kprobe_multi_kfunc_set);
3543}
3544
3545late_initcall(bpf_kprobe_multi_kfuncs_init);
3546
3547__bpf_kfunc_start_defs();
3548
3549__bpf_kfunc int bpf_send_signal_task(struct task_struct *task, int sig, enum pid_type type,
3550				     u64 value)
3551{
3552	if (type != PIDTYPE_PID && type != PIDTYPE_TGID)
3553		return -EINVAL;
3554
3555	return bpf_send_signal_common(sig, type, task, value);
3556}
3557
3558__bpf_kfunc_end_defs();