Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
   3 * Copyright (c) 2016 Facebook
   4 */
   5#include <linux/kernel.h>
   6#include <linux/types.h>
   7#include <linux/slab.h>
   8#include <linux/bpf.h>
 
   9#include <linux/bpf_perf_event.h>
 
  10#include <linux/filter.h>
  11#include <linux/uaccess.h>
  12#include <linux/ctype.h>
  13#include <linux/kprobes.h>
 
  14#include <linux/syscalls.h>
  15#include <linux/error-injection.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
  16
  17#include <asm/tlb.h>
  18
  19#include "trace_probe.h"
  20#include "trace.h"
  21
 
 
 
  22#define bpf_event_rcu_dereference(p)					\
  23	rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
  24
 
 
 
  25#ifdef CONFIG_MODULES
  26struct bpf_trace_module {
  27	struct module *module;
  28	struct list_head list;
  29};
  30
  31static LIST_HEAD(bpf_trace_modules);
  32static DEFINE_MUTEX(bpf_module_mutex);
  33
  34static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
  35{
  36	struct bpf_raw_event_map *btp, *ret = NULL;
  37	struct bpf_trace_module *btm;
  38	unsigned int i;
  39
  40	mutex_lock(&bpf_module_mutex);
  41	list_for_each_entry(btm, &bpf_trace_modules, list) {
  42		for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
  43			btp = &btm->module->bpf_raw_events[i];
  44			if (!strcmp(btp->tp->name, name)) {
  45				if (try_module_get(btm->module))
  46					ret = btp;
  47				goto out;
  48			}
  49		}
  50	}
  51out:
  52	mutex_unlock(&bpf_module_mutex);
  53	return ret;
  54}
  55#else
  56static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
  57{
  58	return NULL;
  59}
  60#endif /* CONFIG_MODULES */
  61
  62u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  63u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  64
 
 
 
 
 
 
 
 
 
  65/**
  66 * trace_call_bpf - invoke BPF program
  67 * @call: tracepoint event
  68 * @ctx: opaque context pointer
  69 *
  70 * kprobe handlers execute BPF programs via this helper.
  71 * Can be used from static tracepoints in the future.
  72 *
  73 * Return: BPF programs always return an integer which is interpreted by
  74 * kprobe handler as:
  75 * 0 - return from kprobe (event is filtered out)
  76 * 1 - store kprobe event into ring buffer
  77 * Other values are reserved and currently alias to 1
  78 */
  79unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
  80{
  81	unsigned int ret;
  82
  83	if (in_nmi()) /* not supported yet */
  84		return 1;
  85
  86	preempt_disable();
  87
  88	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
  89		/*
  90		 * since some bpf program is already running on this cpu,
  91		 * don't call into another bpf program (same or different)
  92		 * and don't send kprobe event into ring-buffer,
  93		 * so return zero here
  94		 */
 
 
 
  95		ret = 0;
  96		goto out;
  97	}
  98
  99	/*
 100	 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
 101	 * to all call sites, we did a bpf_prog_array_valid() there to check
 102	 * whether call->prog_array is empty or not, which is
 103	 * a heurisitc to speed up execution.
 104	 *
 105	 * If bpf_prog_array_valid() fetched prog_array was
 106	 * non-NULL, we go into trace_call_bpf() and do the actual
 107	 * proper rcu_dereference() under RCU lock.
 108	 * If it turns out that prog_array is NULL then, we bail out.
 109	 * For the opposite, if the bpf_prog_array_valid() fetched pointer
 110	 * was NULL, you'll skip the prog_array with the risk of missing
 111	 * out of events when it was updated in between this and the
 112	 * rcu_dereference() which is accepted risk.
 113	 */
 114	ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
 
 
 
 115
 116 out:
 117	__this_cpu_dec(bpf_prog_active);
 118	preempt_enable();
 119
 120	return ret;
 121}
 122EXPORT_SYMBOL_GPL(trace_call_bpf);
 123
 124#ifdef CONFIG_BPF_KPROBE_OVERRIDE
 125BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
 126{
 127	regs_set_return_value(regs, rc);
 128	override_function_with_return(regs);
 129	return 0;
 130}
 131
 132static const struct bpf_func_proto bpf_override_return_proto = {
 133	.func		= bpf_override_return,
 134	.gpl_only	= true,
 135	.ret_type	= RET_INTEGER,
 136	.arg1_type	= ARG_PTR_TO_CTX,
 137	.arg2_type	= ARG_ANYTHING,
 138};
 139#endif
 140
 141BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
 
 142{
 143	int ret;
 144
 145	ret = security_locked_down(LOCKDOWN_BPF_READ);
 146	if (ret < 0)
 147		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 148
 149	ret = probe_kernel_read(dst, unsafe_ptr, size);
 
 
 
 
 
 
 
 
 
 
 150	if (unlikely(ret < 0))
 151out:
 152		memset(dst, 0, size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 153
 
 
 
 
 
 
 
 
 
 
 
 
 154	return ret;
 155}
 156
 157static const struct bpf_func_proto bpf_probe_read_proto = {
 158	.func		= bpf_probe_read,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 159	.gpl_only	= true,
 160	.ret_type	= RET_INTEGER,
 161	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 162	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 163	.arg3_type	= ARG_ANYTHING,
 164};
 
 165
 166BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
 167	   u32, size)
 168{
 169	/*
 170	 * Ensure we're in user context which is safe for the helper to
 171	 * run. This helper has no business in a kthread.
 172	 *
 173	 * access_ok() should prevent writing to non-user memory, but in
 174	 * some situations (nommu, temporary switch, etc) access_ok() does
 175	 * not provide enough validation, hence the check on KERNEL_DS.
 176	 *
 177	 * nmi_uaccess_okay() ensures the probe is not run in an interim
 178	 * state, when the task or mm are switched. This is specifically
 179	 * required to prevent the use of temporary mm.
 180	 */
 181
 182	if (unlikely(in_interrupt() ||
 183		     current->flags & (PF_KTHREAD | PF_EXITING)))
 184		return -EPERM;
 185	if (unlikely(uaccess_kernel()))
 186		return -EPERM;
 187	if (unlikely(!nmi_uaccess_okay()))
 188		return -EPERM;
 189	if (!access_ok(unsafe_ptr, size))
 190		return -EPERM;
 191
 192	return probe_kernel_write(unsafe_ptr, src, size);
 193}
 194
 195static const struct bpf_func_proto bpf_probe_write_user_proto = {
 196	.func		= bpf_probe_write_user,
 197	.gpl_only	= true,
 198	.ret_type	= RET_INTEGER,
 199	.arg1_type	= ARG_ANYTHING,
 200	.arg2_type	= ARG_PTR_TO_MEM,
 201	.arg3_type	= ARG_CONST_SIZE,
 202};
 203
 204static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
 205{
 
 
 
 206	pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
 207			    current->comm, task_pid_nr(current));
 208
 209	return &bpf_probe_write_user_proto;
 210}
 211
 212/*
 213 * Only limited trace_printk() conversion specifiers allowed:
 214 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
 215 */
 216BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
 217	   u64, arg2, u64, arg3)
 218{
 219	bool str_seen = false;
 220	int mod[3] = {};
 221	int fmt_cnt = 0;
 222	u64 unsafe_addr;
 223	char buf[64];
 224	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 225
 
 
 226	/*
 227	 * bpf_check()->check_func_arg()->check_stack_boundary()
 228	 * guarantees that fmt points to bpf program stack,
 229	 * fmt_size bytes of it were initialized and fmt_size > 0
 
 
 
 230	 */
 231	if (fmt[--fmt_size] != 0)
 232		return -EINVAL;
 
 233
 234	/* check format string for allowed specifiers */
 235	for (i = 0; i < fmt_size; i++) {
 236		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
 237			return -EINVAL;
 
 238
 239		if (fmt[i] != '%')
 240			continue;
 
 
 
 
 
 
 241
 242		if (fmt_cnt >= 3)
 243			return -EINVAL;
 
 
 244
 245		/* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
 246		i++;
 247		if (fmt[i] == 'l') {
 248			mod[fmt_cnt]++;
 249			i++;
 250		} else if (fmt[i] == 'p' || fmt[i] == 's') {
 251			mod[fmt_cnt]++;
 252			/* disallow any further format extensions */
 253			if (fmt[i + 1] != 0 &&
 254			    !isspace(fmt[i + 1]) &&
 255			    !ispunct(fmt[i + 1]))
 256				return -EINVAL;
 257			fmt_cnt++;
 258			if (fmt[i] == 's') {
 259				if (str_seen)
 260					/* allow only one '%s' per fmt string */
 261					return -EINVAL;
 262				str_seen = true;
 263
 264				switch (fmt_cnt) {
 265				case 1:
 266					unsafe_addr = arg1;
 267					arg1 = (long) buf;
 268					break;
 269				case 2:
 270					unsafe_addr = arg2;
 271					arg2 = (long) buf;
 272					break;
 273				case 3:
 274					unsafe_addr = arg3;
 275					arg3 = (long) buf;
 276					break;
 277				}
 278				buf[0] = 0;
 279				strncpy_from_unsafe(buf,
 280						    (void *) (long) unsafe_addr,
 281						    sizeof(buf));
 282			}
 283			continue;
 284		}
 285
 286		if (fmt[i] == 'l') {
 287			mod[fmt_cnt]++;
 288			i++;
 289		}
 290
 291		if (fmt[i] != 'i' && fmt[i] != 'd' &&
 292		    fmt[i] != 'u' && fmt[i] != 'x')
 293			return -EINVAL;
 294		fmt_cnt++;
 295	}
 296
 297/* Horrid workaround for getting va_list handling working with different
 298 * argument type combinations generically for 32 and 64 bit archs.
 299 */
 300#define __BPF_TP_EMIT()	__BPF_ARG3_TP()
 301#define __BPF_TP(...)							\
 302	__trace_printk(0 /* Fake ip */,					\
 303		       fmt, ##__VA_ARGS__)
 304
 305#define __BPF_ARG1_TP(...)						\
 306	((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64))	\
 307	  ? __BPF_TP(arg1, ##__VA_ARGS__)				\
 308	  : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32))	\
 309	      ? __BPF_TP((long)arg1, ##__VA_ARGS__)			\
 310	      : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
 311
 312#define __BPF_ARG2_TP(...)						\
 313	((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64))	\
 314	  ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__)				\
 315	  : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32))	\
 316	      ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__)		\
 317	      : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
 318
 319#define __BPF_ARG3_TP(...)						\
 320	((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64))	\
 321	  ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__)				\
 322	  : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32))	\
 323	      ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__)		\
 324	      : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
 325
 326	return __BPF_TP_EMIT();
 327}
 328
 329static const struct bpf_func_proto bpf_trace_printk_proto = {
 330	.func		= bpf_trace_printk,
 331	.gpl_only	= true,
 332	.ret_type	= RET_INTEGER,
 333	.arg1_type	= ARG_PTR_TO_MEM,
 334	.arg2_type	= ARG_CONST_SIZE,
 
 
 335};
 336
 337const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
 338{
 339	/*
 340	 * this program might be calling bpf_trace_printk,
 341	 * so allocate per-cpu printk buffers
 342	 */
 343	trace_printk_init_buffers();
 344
 345	return &bpf_trace_printk_proto;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 346}
 347
 
 
 
 
 
 
 
 
 
 
 
 348static __always_inline int
 349get_map_perf_counter(struct bpf_map *map, u64 flags,
 350		     u64 *value, u64 *enabled, u64 *running)
 351{
 352	struct bpf_array *array = container_of(map, struct bpf_array, map);
 353	unsigned int cpu = smp_processor_id();
 354	u64 index = flags & BPF_F_INDEX_MASK;
 355	struct bpf_event_entry *ee;
 356
 357	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
 358		return -EINVAL;
 359	if (index == BPF_F_CURRENT_CPU)
 360		index = cpu;
 361	if (unlikely(index >= array->map.max_entries))
 362		return -E2BIG;
 363
 364	ee = READ_ONCE(array->ptrs[index]);
 365	if (!ee)
 366		return -ENOENT;
 367
 368	return perf_event_read_local(ee->event, value, enabled, running);
 369}
 370
 371BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
 372{
 373	u64 value = 0;
 374	int err;
 375
 376	err = get_map_perf_counter(map, flags, &value, NULL, NULL);
 377	/*
 378	 * this api is ugly since we miss [-22..-2] range of valid
 379	 * counter values, but that's uapi
 380	 */
 381	if (err)
 382		return err;
 383	return value;
 384}
 385
 386static const struct bpf_func_proto bpf_perf_event_read_proto = {
 387	.func		= bpf_perf_event_read,
 388	.gpl_only	= true,
 389	.ret_type	= RET_INTEGER,
 390	.arg1_type	= ARG_CONST_MAP_PTR,
 391	.arg2_type	= ARG_ANYTHING,
 392};
 393
 394BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
 395	   struct bpf_perf_event_value *, buf, u32, size)
 396{
 397	int err = -EINVAL;
 398
 399	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
 400		goto clear;
 401	err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
 402				   &buf->running);
 403	if (unlikely(err))
 404		goto clear;
 405	return 0;
 406clear:
 407	memset(buf, 0, size);
 408	return err;
 409}
 410
 411static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
 412	.func		= bpf_perf_event_read_value,
 413	.gpl_only	= true,
 414	.ret_type	= RET_INTEGER,
 415	.arg1_type	= ARG_CONST_MAP_PTR,
 416	.arg2_type	= ARG_ANYTHING,
 417	.arg3_type	= ARG_PTR_TO_UNINIT_MEM,
 418	.arg4_type	= ARG_CONST_SIZE,
 419};
 420
 421static __always_inline u64
 422__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
 423			u64 flags, struct perf_sample_data *sd)
 
 424{
 425	struct bpf_array *array = container_of(map, struct bpf_array, map);
 426	unsigned int cpu = smp_processor_id();
 427	u64 index = flags & BPF_F_INDEX_MASK;
 428	struct bpf_event_entry *ee;
 429	struct perf_event *event;
 430
 431	if (index == BPF_F_CURRENT_CPU)
 432		index = cpu;
 433	if (unlikely(index >= array->map.max_entries))
 434		return -E2BIG;
 435
 436	ee = READ_ONCE(array->ptrs[index]);
 437	if (!ee)
 438		return -ENOENT;
 439
 440	event = ee->event;
 441	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
 442		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
 443		return -EINVAL;
 444
 445	if (unlikely(event->oncpu != cpu))
 446		return -EOPNOTSUPP;
 447
 
 
 448	return perf_event_output(event, sd, regs);
 449}
 450
 451/*
 452 * Support executing tracepoints in normal, irq, and nmi context that each call
 453 * bpf_perf_event_output
 454 */
 455struct bpf_trace_sample_data {
 456	struct perf_sample_data sds[3];
 457};
 458
 459static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
 460static DEFINE_PER_CPU(int, bpf_trace_nest_level);
 461BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
 462	   u64, flags, void *, data, u64, size)
 463{
 464	struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
 465	int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
 466	struct perf_raw_record raw = {
 467		.frag = {
 468			.size = size,
 469			.data = data,
 470		},
 471	};
 472	struct perf_sample_data *sd;
 473	int err;
 
 
 
 
 474
 475	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
 476		err = -EBUSY;
 477		goto out;
 478	}
 479
 480	sd = &sds->sds[nest_level - 1];
 481
 482	if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
 483		err = -EINVAL;
 484		goto out;
 485	}
 486
 487	perf_sample_data_init(sd, 0, 0);
 488	sd->raw = &raw;
 489
 490	err = __bpf_perf_event_output(regs, map, flags, sd);
 491
 
 492out:
 493	this_cpu_dec(bpf_trace_nest_level);
 
 494	return err;
 495}
 496
 497static const struct bpf_func_proto bpf_perf_event_output_proto = {
 498	.func		= bpf_perf_event_output,
 499	.gpl_only	= true,
 500	.ret_type	= RET_INTEGER,
 501	.arg1_type	= ARG_PTR_TO_CTX,
 502	.arg2_type	= ARG_CONST_MAP_PTR,
 503	.arg3_type	= ARG_ANYTHING,
 504	.arg4_type	= ARG_PTR_TO_MEM,
 505	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
 506};
 507
 508static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
 509struct bpf_nested_pt_regs {
 510	struct pt_regs regs[3];
 511};
 512static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
 513static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
 514
 515u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
 516		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
 517{
 518	int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
 519	struct perf_raw_frag frag = {
 520		.copy		= ctx_copy,
 521		.size		= ctx_size,
 522		.data		= ctx,
 523	};
 524	struct perf_raw_record raw = {
 525		.frag = {
 526			{
 527				.next	= ctx_size ? &frag : NULL,
 528			},
 529			.size	= meta_size,
 530			.data	= meta,
 531		},
 532	};
 533	struct perf_sample_data *sd;
 534	struct pt_regs *regs;
 
 535	u64 ret;
 536
 
 
 
 537	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
 538		ret = -EBUSY;
 539		goto out;
 540	}
 541	sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
 542	regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
 543
 544	perf_fetch_caller_regs(regs);
 545	perf_sample_data_init(sd, 0, 0);
 546	sd->raw = &raw;
 547
 548	ret = __bpf_perf_event_output(regs, map, flags, sd);
 549out:
 550	this_cpu_dec(bpf_event_output_nest_level);
 
 551	return ret;
 552}
 553
 554BPF_CALL_0(bpf_get_current_task)
 555{
 556	return (long) current;
 557}
 558
 559static const struct bpf_func_proto bpf_get_current_task_proto = {
 560	.func		= bpf_get_current_task,
 561	.gpl_only	= true,
 562	.ret_type	= RET_INTEGER,
 563};
 564
 565BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
 566{
 567	struct bpf_array *array = container_of(map, struct bpf_array, map);
 568	struct cgroup *cgrp;
 569
 570	if (unlikely(idx >= array->map.max_entries))
 571		return -E2BIG;
 572
 573	cgrp = READ_ONCE(array->ptrs[idx]);
 574	if (unlikely(!cgrp))
 575		return -EAGAIN;
 576
 577	return task_under_cgroup_hierarchy(current, cgrp);
 578}
 579
 580static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
 581	.func           = bpf_current_task_under_cgroup,
 582	.gpl_only       = false,
 583	.ret_type       = RET_INTEGER,
 584	.arg1_type      = ARG_CONST_MAP_PTR,
 585	.arg2_type      = ARG_ANYTHING,
 586};
 587
 588BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
 589	   const void *, unsafe_ptr)
 590{
 591	int ret;
 592
 593	ret = security_locked_down(LOCKDOWN_BPF_READ);
 594	if (ret < 0)
 595		goto out;
 596
 597	/*
 598	 * The strncpy_from_unsafe() call will likely not fill the entire
 599	 * buffer, but that's okay in this circumstance as we're probing
 600	 * arbitrary memory anyway similar to bpf_probe_read() and might
 601	 * as well probe the stack. Thus, memory is explicitly cleared
 602	 * only in error case, so that improper users ignoring return
 603	 * code altogether don't copy garbage; otherwise length of string
 604	 * is returned that can be used for bpf_perf_event_output() et al.
 605	 */
 606	ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
 607	if (unlikely(ret < 0))
 608out:
 609		memset(dst, 0, size);
 610
 611	return ret;
 612}
 613
 614static const struct bpf_func_proto bpf_probe_read_str_proto = {
 615	.func		= bpf_probe_read_str,
 
 
 
 616	.gpl_only	= true,
 617	.ret_type	= RET_INTEGER,
 618	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 619	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 620	.arg3_type	= ARG_ANYTHING,
 621};
 622
 623struct send_signal_irq_work {
 624	struct irq_work irq_work;
 625	struct task_struct *task;
 626	u32 sig;
 
 
 
 627};
 628
 629static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
 630
 631static void do_bpf_send_signal(struct irq_work *entry)
 632{
 633	struct send_signal_irq_work *work;
 
 634
 635	work = container_of(entry, struct send_signal_irq_work, irq_work);
 636	group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, PIDTYPE_TGID);
 
 
 
 637}
 638
 639BPF_CALL_1(bpf_send_signal, u32, sig)
 640{
 641	struct send_signal_irq_work *work = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 642
 643	/* Similar to bpf_probe_write_user, task needs to be
 644	 * in a sound condition and kernel memory access be
 645	 * permitted in order to send signal to the current
 646	 * task.
 647	 */
 648	if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
 649		return -EPERM;
 650	if (unlikely(uaccess_kernel()))
 651		return -EPERM;
 652	if (unlikely(!nmi_uaccess_okay()))
 653		return -EPERM;
 
 
 
 654
 655	if (in_nmi()) {
 656		/* Do an early check on signal validity. Otherwise,
 657		 * the error is lost in deferred irq_work.
 658		 */
 659		if (unlikely(!valid_signal(sig)))
 660			return -EINVAL;
 661
 662		work = this_cpu_ptr(&send_signal_work);
 663		if (work->irq_work.flags & IRQ_WORK_BUSY)
 664			return -EBUSY;
 665
 666		/* Add the current task, which is the target of sending signal,
 667		 * to the irq_work. The current task may change when queued
 668		 * irq works get executed.
 669		 */
 670		work->task = current;
 
 
 
 671		work->sig = sig;
 
 672		irq_work_queue(&work->irq_work);
 673		return 0;
 674	}
 675
 676	return group_send_sig_info(sig, SEND_SIG_PRIV, current, PIDTYPE_TGID);
 
 
 
 
 
 677}
 678
 679static const struct bpf_func_proto bpf_send_signal_proto = {
 680	.func		= bpf_send_signal,
 681	.gpl_only	= false,
 682	.ret_type	= RET_INTEGER,
 683	.arg1_type	= ARG_ANYTHING,
 684};
 685
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 686static const struct bpf_func_proto *
 687tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 688{
 689	switch (func_id) {
 690	case BPF_FUNC_map_lookup_elem:
 691		return &bpf_map_lookup_elem_proto;
 692	case BPF_FUNC_map_update_elem:
 693		return &bpf_map_update_elem_proto;
 694	case BPF_FUNC_map_delete_elem:
 695		return &bpf_map_delete_elem_proto;
 696	case BPF_FUNC_map_push_elem:
 697		return &bpf_map_push_elem_proto;
 698	case BPF_FUNC_map_pop_elem:
 699		return &bpf_map_pop_elem_proto;
 700	case BPF_FUNC_map_peek_elem:
 701		return &bpf_map_peek_elem_proto;
 702	case BPF_FUNC_probe_read:
 703		return &bpf_probe_read_proto;
 704	case BPF_FUNC_ktime_get_ns:
 705		return &bpf_ktime_get_ns_proto;
 
 
 706	case BPF_FUNC_tail_call:
 707		return &bpf_tail_call_proto;
 708	case BPF_FUNC_get_current_pid_tgid:
 709		return &bpf_get_current_pid_tgid_proto;
 710	case BPF_FUNC_get_current_task:
 711		return &bpf_get_current_task_proto;
 
 
 
 
 712	case BPF_FUNC_get_current_uid_gid:
 713		return &bpf_get_current_uid_gid_proto;
 714	case BPF_FUNC_get_current_comm:
 715		return &bpf_get_current_comm_proto;
 716	case BPF_FUNC_trace_printk:
 717		return bpf_get_trace_printk_proto();
 718	case BPF_FUNC_get_smp_processor_id:
 719		return &bpf_get_smp_processor_id_proto;
 720	case BPF_FUNC_get_numa_node_id:
 721		return &bpf_get_numa_node_id_proto;
 722	case BPF_FUNC_perf_event_read:
 723		return &bpf_perf_event_read_proto;
 724	case BPF_FUNC_probe_write_user:
 725		return bpf_get_probe_write_proto();
 726	case BPF_FUNC_current_task_under_cgroup:
 727		return &bpf_current_task_under_cgroup_proto;
 728	case BPF_FUNC_get_prandom_u32:
 729		return &bpf_get_prandom_u32_proto;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 730	case BPF_FUNC_probe_read_str:
 731		return &bpf_probe_read_str_proto;
 
 
 732#ifdef CONFIG_CGROUPS
 733	case BPF_FUNC_get_current_cgroup_id:
 734		return &bpf_get_current_cgroup_id_proto;
 
 
 
 
 735#endif
 736	case BPF_FUNC_send_signal:
 737		return &bpf_send_signal_proto;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 738	default:
 739		return NULL;
 740	}
 741}
 742
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 743static const struct bpf_func_proto *
 744kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 745{
 746	switch (func_id) {
 747	case BPF_FUNC_perf_event_output:
 748		return &bpf_perf_event_output_proto;
 749	case BPF_FUNC_get_stackid:
 750		return &bpf_get_stackid_proto;
 751	case BPF_FUNC_get_stack:
 752		return &bpf_get_stack_proto;
 753	case BPF_FUNC_perf_event_read_value:
 754		return &bpf_perf_event_read_value_proto;
 755#ifdef CONFIG_BPF_KPROBE_OVERRIDE
 756	case BPF_FUNC_override_return:
 757		return &bpf_override_return_proto;
 758#endif
 
 
 
 
 
 
 
 
 
 
 
 
 759	default:
 760		return tracing_func_proto(func_id, prog);
 761	}
 762}
 763
 764/* bpf+kprobe programs can access fields of 'struct pt_regs' */
 765static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
 766					const struct bpf_prog *prog,
 767					struct bpf_insn_access_aux *info)
 768{
 769	if (off < 0 || off >= sizeof(struct pt_regs))
 770		return false;
 771	if (type != BPF_READ)
 772		return false;
 773	if (off % size != 0)
 774		return false;
 775	/*
 776	 * Assertion for 32 bit to make sure last 8 byte access
 777	 * (BPF_DW) to the last 4 byte member is disallowed.
 778	 */
 779	if (off + size > sizeof(struct pt_regs))
 780		return false;
 781
 782	return true;
 783}
 784
 785const struct bpf_verifier_ops kprobe_verifier_ops = {
 786	.get_func_proto  = kprobe_prog_func_proto,
 787	.is_valid_access = kprobe_prog_is_valid_access,
 788};
 789
 790const struct bpf_prog_ops kprobe_prog_ops = {
 791};
 792
 793BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
 794	   u64, flags, void *, data, u64, size)
 795{
 796	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
 797
 798	/*
 799	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
 800	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
 801	 * from there and call the same bpf_perf_event_output() helper inline.
 802	 */
 803	return ____bpf_perf_event_output(regs, map, flags, data, size);
 804}
 805
 806static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
 807	.func		= bpf_perf_event_output_tp,
 808	.gpl_only	= true,
 809	.ret_type	= RET_INTEGER,
 810	.arg1_type	= ARG_PTR_TO_CTX,
 811	.arg2_type	= ARG_CONST_MAP_PTR,
 812	.arg3_type	= ARG_ANYTHING,
 813	.arg4_type	= ARG_PTR_TO_MEM,
 814	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
 815};
 816
 817BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
 818	   u64, flags)
 819{
 820	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
 821
 822	/*
 823	 * Same comment as in bpf_perf_event_output_tp(), only that this time
 824	 * the other helper's function body cannot be inlined due to being
 825	 * external, thus we need to call raw helper function.
 826	 */
 827	return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
 828			       flags, 0, 0);
 829}
 830
 831static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
 832	.func		= bpf_get_stackid_tp,
 833	.gpl_only	= true,
 834	.ret_type	= RET_INTEGER,
 835	.arg1_type	= ARG_PTR_TO_CTX,
 836	.arg2_type	= ARG_CONST_MAP_PTR,
 837	.arg3_type	= ARG_ANYTHING,
 838};
 839
 840BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
 841	   u64, flags)
 842{
 843	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
 844
 845	return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
 846			     (unsigned long) size, flags, 0);
 847}
 848
 849static const struct bpf_func_proto bpf_get_stack_proto_tp = {
 850	.func		= bpf_get_stack_tp,
 851	.gpl_only	= true,
 852	.ret_type	= RET_INTEGER,
 853	.arg1_type	= ARG_PTR_TO_CTX,
 854	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
 855	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
 856	.arg4_type	= ARG_ANYTHING,
 857};
 858
 859static const struct bpf_func_proto *
 860tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 861{
 862	switch (func_id) {
 863	case BPF_FUNC_perf_event_output:
 864		return &bpf_perf_event_output_proto_tp;
 865	case BPF_FUNC_get_stackid:
 866		return &bpf_get_stackid_proto_tp;
 867	case BPF_FUNC_get_stack:
 868		return &bpf_get_stack_proto_tp;
 
 
 869	default:
 870		return tracing_func_proto(func_id, prog);
 871	}
 872}
 873
 874static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
 875				    const struct bpf_prog *prog,
 876				    struct bpf_insn_access_aux *info)
 877{
 878	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
 879		return false;
 880	if (type != BPF_READ)
 881		return false;
 882	if (off % size != 0)
 883		return false;
 884
 885	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
 886	return true;
 887}
 888
 889const struct bpf_verifier_ops tracepoint_verifier_ops = {
 890	.get_func_proto  = tp_prog_func_proto,
 891	.is_valid_access = tp_prog_is_valid_access,
 892};
 893
 894const struct bpf_prog_ops tracepoint_prog_ops = {
 895};
 896
 897BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
 898	   struct bpf_perf_event_value *, buf, u32, size)
 899{
 900	int err = -EINVAL;
 901
 902	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
 903		goto clear;
 904	err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
 905				    &buf->running);
 906	if (unlikely(err))
 907		goto clear;
 908	return 0;
 909clear:
 910	memset(buf, 0, size);
 911	return err;
 912}
 913
 914static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
 915         .func           = bpf_perf_prog_read_value,
 916         .gpl_only       = true,
 917         .ret_type       = RET_INTEGER,
 918         .arg1_type      = ARG_PTR_TO_CTX,
 919         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
 920         .arg3_type      = ARG_CONST_SIZE,
 921};
 922
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 923static const struct bpf_func_proto *
 924pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 925{
 926	switch (func_id) {
 927	case BPF_FUNC_perf_event_output:
 928		return &bpf_perf_event_output_proto_tp;
 929	case BPF_FUNC_get_stackid:
 930		return &bpf_get_stackid_proto_tp;
 931	case BPF_FUNC_get_stack:
 932		return &bpf_get_stack_proto_tp;
 933	case BPF_FUNC_perf_prog_read_value:
 934		return &bpf_perf_prog_read_value_proto;
 
 
 
 
 935	default:
 936		return tracing_func_proto(func_id, prog);
 937	}
 938}
 939
 940/*
 941 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
 942 * to avoid potential recursive reuse issue when/if tracepoints are added
 943 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
 944 *
 945 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
 946 * in normal, irq, and nmi context.
 947 */
 948struct bpf_raw_tp_regs {
 949	struct pt_regs regs[3];
 950};
 951static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
 952static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
 953static struct pt_regs *get_bpf_raw_tp_regs(void)
 954{
 955	struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
 956	int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
 957
 958	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
 959		this_cpu_dec(bpf_raw_tp_nest_level);
 960		return ERR_PTR(-EBUSY);
 961	}
 962
 963	return &tp_regs->regs[nest_level - 1];
 964}
 965
 966static void put_bpf_raw_tp_regs(void)
 967{
 968	this_cpu_dec(bpf_raw_tp_nest_level);
 969}
 970
 971BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
 972	   struct bpf_map *, map, u64, flags, void *, data, u64, size)
 973{
 974	struct pt_regs *regs = get_bpf_raw_tp_regs();
 975	int ret;
 976
 977	if (IS_ERR(regs))
 978		return PTR_ERR(regs);
 979
 980	perf_fetch_caller_regs(regs);
 981	ret = ____bpf_perf_event_output(regs, map, flags, data, size);
 982
 983	put_bpf_raw_tp_regs();
 984	return ret;
 985}
 986
 987static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
 988	.func		= bpf_perf_event_output_raw_tp,
 989	.gpl_only	= true,
 990	.ret_type	= RET_INTEGER,
 991	.arg1_type	= ARG_PTR_TO_CTX,
 992	.arg2_type	= ARG_CONST_MAP_PTR,
 993	.arg3_type	= ARG_ANYTHING,
 994	.arg4_type	= ARG_PTR_TO_MEM,
 995	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
 996};
 997
 
 
 
 
 998BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
 999	   struct bpf_map *, map, u64, flags)
1000{
1001	struct pt_regs *regs = get_bpf_raw_tp_regs();
1002	int ret;
1003
1004	if (IS_ERR(regs))
1005		return PTR_ERR(regs);
1006
1007	perf_fetch_caller_regs(regs);
1008	/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1009	ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1010			      flags, 0, 0);
1011	put_bpf_raw_tp_regs();
1012	return ret;
1013}
1014
1015static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1016	.func		= bpf_get_stackid_raw_tp,
1017	.gpl_only	= true,
1018	.ret_type	= RET_INTEGER,
1019	.arg1_type	= ARG_PTR_TO_CTX,
1020	.arg2_type	= ARG_CONST_MAP_PTR,
1021	.arg3_type	= ARG_ANYTHING,
1022};
1023
1024BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1025	   void *, buf, u32, size, u64, flags)
1026{
1027	struct pt_regs *regs = get_bpf_raw_tp_regs();
1028	int ret;
1029
1030	if (IS_ERR(regs))
1031		return PTR_ERR(regs);
1032
1033	perf_fetch_caller_regs(regs);
1034	ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1035			    (unsigned long) size, flags, 0);
1036	put_bpf_raw_tp_regs();
1037	return ret;
1038}
1039
1040static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1041	.func		= bpf_get_stack_raw_tp,
1042	.gpl_only	= true,
1043	.ret_type	= RET_INTEGER,
1044	.arg1_type	= ARG_PTR_TO_CTX,
1045	.arg2_type	= ARG_PTR_TO_MEM,
1046	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1047	.arg4_type	= ARG_ANYTHING,
1048};
1049
1050static const struct bpf_func_proto *
1051raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1052{
1053	switch (func_id) {
1054	case BPF_FUNC_perf_event_output:
1055		return &bpf_perf_event_output_proto_raw_tp;
1056	case BPF_FUNC_get_stackid:
1057		return &bpf_get_stackid_proto_raw_tp;
1058	case BPF_FUNC_get_stack:
1059		return &bpf_get_stack_proto_raw_tp;
 
 
1060	default:
1061		return tracing_func_proto(func_id, prog);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1062	}
1063}
1064
1065static bool raw_tp_prog_is_valid_access(int off, int size,
1066					enum bpf_access_type type,
1067					const struct bpf_prog *prog,
1068					struct bpf_insn_access_aux *info)
1069{
1070	/* largest tracepoint in the kernel has 12 args */
1071	if (off < 0 || off >= sizeof(__u64) * 12)
1072		return false;
1073	if (type != BPF_READ)
1074		return false;
1075	if (off % size != 0)
1076		return false;
1077	return true;
 
 
 
 
 
 
 
 
1078}
1079
1080const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1081	.get_func_proto  = raw_tp_prog_func_proto,
1082	.is_valid_access = raw_tp_prog_is_valid_access,
1083};
1084
1085const struct bpf_prog_ops raw_tracepoint_prog_ops = {
 
 
 
 
 
 
 
 
 
 
 
 
1086};
1087
1088static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1089						 enum bpf_access_type type,
1090						 const struct bpf_prog *prog,
1091						 struct bpf_insn_access_aux *info)
1092{
1093	if (off == 0) {
1094		if (size != sizeof(u64) || type != BPF_READ)
1095			return false;
1096		info->reg_type = PTR_TO_TP_BUFFER;
1097	}
1098	return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1099}
1100
1101const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1102	.get_func_proto  = raw_tp_prog_func_proto,
1103	.is_valid_access = raw_tp_writable_prog_is_valid_access,
1104};
1105
1106const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1107};
1108
1109static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1110				    const struct bpf_prog *prog,
1111				    struct bpf_insn_access_aux *info)
1112{
1113	const int size_u64 = sizeof(u64);
1114
1115	if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1116		return false;
1117	if (type != BPF_READ)
1118		return false;
1119	if (off % size != 0) {
1120		if (sizeof(unsigned long) != 4)
1121			return false;
1122		if (size != 8)
1123			return false;
1124		if (off % size != 4)
1125			return false;
1126	}
1127
1128	switch (off) {
1129	case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1130		bpf_ctx_record_field_size(info, size_u64);
1131		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1132			return false;
1133		break;
1134	case bpf_ctx_range(struct bpf_perf_event_data, addr):
1135		bpf_ctx_record_field_size(info, size_u64);
1136		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1137			return false;
1138		break;
1139	default:
1140		if (size != sizeof(long))
1141			return false;
1142	}
1143
1144	return true;
1145}
1146
1147static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1148				      const struct bpf_insn *si,
1149				      struct bpf_insn *insn_buf,
1150				      struct bpf_prog *prog, u32 *target_size)
1151{
1152	struct bpf_insn *insn = insn_buf;
1153
1154	switch (si->off) {
1155	case offsetof(struct bpf_perf_event_data, sample_period):
1156		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1157						       data), si->dst_reg, si->src_reg,
1158				      offsetof(struct bpf_perf_event_data_kern, data));
1159		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1160				      bpf_target_off(struct perf_sample_data, period, 8,
1161						     target_size));
1162		break;
1163	case offsetof(struct bpf_perf_event_data, addr):
1164		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1165						       data), si->dst_reg, si->src_reg,
1166				      offsetof(struct bpf_perf_event_data_kern, data));
1167		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1168				      bpf_target_off(struct perf_sample_data, addr, 8,
1169						     target_size));
1170		break;
1171	default:
1172		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1173						       regs), si->dst_reg, si->src_reg,
1174				      offsetof(struct bpf_perf_event_data_kern, regs));
1175		*insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1176				      si->off);
1177		break;
1178	}
1179
1180	return insn - insn_buf;
1181}
1182
1183const struct bpf_verifier_ops perf_event_verifier_ops = {
1184	.get_func_proto		= pe_prog_func_proto,
1185	.is_valid_access	= pe_prog_is_valid_access,
1186	.convert_ctx_access	= pe_prog_convert_ctx_access,
1187};
1188
1189const struct bpf_prog_ops perf_event_prog_ops = {
1190};
1191
1192static DEFINE_MUTEX(bpf_event_mutex);
1193
1194#define BPF_TRACE_MAX_PROGS 64
1195
1196int perf_event_attach_bpf_prog(struct perf_event *event,
1197			       struct bpf_prog *prog)
 
1198{
1199	struct bpf_prog_array *old_array;
1200	struct bpf_prog_array *new_array;
1201	int ret = -EEXIST;
1202
1203	/*
1204	 * Kprobe override only works if they are on the function entry,
1205	 * and only if they are on the opt-in list.
1206	 */
1207	if (prog->kprobe_override &&
1208	    (!trace_kprobe_on_func_entry(event->tp_event) ||
1209	     !trace_kprobe_error_injectable(event->tp_event)))
1210		return -EINVAL;
1211
1212	mutex_lock(&bpf_event_mutex);
1213
1214	if (event->prog)
1215		goto unlock;
1216
1217	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1218	if (old_array &&
1219	    bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1220		ret = -E2BIG;
1221		goto unlock;
1222	}
1223
1224	ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1225	if (ret < 0)
1226		goto unlock;
1227
1228	/* set the new array to event->tp_event and set event->prog */
1229	event->prog = prog;
 
1230	rcu_assign_pointer(event->tp_event->prog_array, new_array);
1231	bpf_prog_array_free(old_array);
1232
1233unlock:
1234	mutex_unlock(&bpf_event_mutex);
1235	return ret;
1236}
1237
1238void perf_event_detach_bpf_prog(struct perf_event *event)
1239{
1240	struct bpf_prog_array *old_array;
1241	struct bpf_prog_array *new_array;
1242	int ret;
1243
1244	mutex_lock(&bpf_event_mutex);
1245
1246	if (!event->prog)
1247		goto unlock;
1248
1249	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1250	ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
1251	if (ret == -ENOENT)
1252		goto unlock;
 
1253	if (ret < 0) {
1254		bpf_prog_array_delete_safe(old_array, event->prog);
1255	} else {
1256		rcu_assign_pointer(event->tp_event->prog_array, new_array);
1257		bpf_prog_array_free(old_array);
1258	}
1259
 
 
 
 
 
 
 
 
1260	bpf_prog_put(event->prog);
1261	event->prog = NULL;
1262
1263unlock:
1264	mutex_unlock(&bpf_event_mutex);
1265}
1266
1267int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1268{
1269	struct perf_event_query_bpf __user *uquery = info;
1270	struct perf_event_query_bpf query = {};
1271	struct bpf_prog_array *progs;
1272	u32 *ids, prog_cnt, ids_len;
1273	int ret;
1274
1275	if (!capable(CAP_SYS_ADMIN))
1276		return -EPERM;
1277	if (event->attr.type != PERF_TYPE_TRACEPOINT)
1278		return -EINVAL;
1279	if (copy_from_user(&query, uquery, sizeof(query)))
1280		return -EFAULT;
1281
1282	ids_len = query.ids_len;
1283	if (ids_len > BPF_TRACE_MAX_PROGS)
1284		return -E2BIG;
1285	ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1286	if (!ids)
1287		return -ENOMEM;
1288	/*
1289	 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1290	 * is required when user only wants to check for uquery->prog_cnt.
1291	 * There is no need to check for it since the case is handled
1292	 * gracefully in bpf_prog_array_copy_info.
1293	 */
1294
1295	mutex_lock(&bpf_event_mutex);
1296	progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1297	ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
1298	mutex_unlock(&bpf_event_mutex);
1299
1300	if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1301	    copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1302		ret = -EFAULT;
1303
1304	kfree(ids);
1305	return ret;
1306}
1307
1308extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1309extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1310
1311struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
1312{
1313	struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1314
1315	for (; btp < __stop__bpf_raw_tp; btp++) {
1316		if (!strcmp(btp->tp->name, name))
1317			return btp;
1318	}
1319
1320	return bpf_get_raw_tracepoint_module(name);
1321}
1322
1323void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1324{
1325	struct module *mod = __module_address((unsigned long)btp);
1326
1327	if (mod)
1328		module_put(mod);
 
 
1329}
1330
1331static __always_inline
1332void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1333{
 
 
 
 
 
 
 
 
 
 
 
 
 
1334	rcu_read_lock();
1335	preempt_disable();
1336	(void) BPF_PROG_RUN(prog, args);
1337	preempt_enable();
1338	rcu_read_unlock();
 
 
 
 
1339}
1340
1341#define UNPACK(...)			__VA_ARGS__
1342#define REPEAT_1(FN, DL, X, ...)	FN(X)
1343#define REPEAT_2(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1344#define REPEAT_3(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1345#define REPEAT_4(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1346#define REPEAT_5(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1347#define REPEAT_6(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1348#define REPEAT_7(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1349#define REPEAT_8(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1350#define REPEAT_9(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1351#define REPEAT_10(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1352#define REPEAT_11(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1353#define REPEAT_12(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1354#define REPEAT(X, FN, DL, ...)		REPEAT_##X(FN, DL, __VA_ARGS__)
1355
1356#define SARG(X)		u64 arg##X
1357#define COPY(X)		args[X] = arg##X
1358
1359#define __DL_COM	(,)
1360#define __DL_SEM	(;)
1361
1362#define __SEQ_0_11	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1363
1364#define BPF_TRACE_DEFN_x(x)						\
1365	void bpf_trace_run##x(struct bpf_prog *prog,			\
1366			      REPEAT(x, SARG, __DL_COM, __SEQ_0_11))	\
1367	{								\
1368		u64 args[x];						\
1369		REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);			\
1370		__bpf_trace_run(prog, args);				\
1371	}								\
1372	EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1373BPF_TRACE_DEFN_x(1);
1374BPF_TRACE_DEFN_x(2);
1375BPF_TRACE_DEFN_x(3);
1376BPF_TRACE_DEFN_x(4);
1377BPF_TRACE_DEFN_x(5);
1378BPF_TRACE_DEFN_x(6);
1379BPF_TRACE_DEFN_x(7);
1380BPF_TRACE_DEFN_x(8);
1381BPF_TRACE_DEFN_x(9);
1382BPF_TRACE_DEFN_x(10);
1383BPF_TRACE_DEFN_x(11);
1384BPF_TRACE_DEFN_x(12);
1385
1386static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1387{
1388	struct tracepoint *tp = btp->tp;
 
1389
1390	/*
1391	 * check that program doesn't access arguments beyond what's
1392	 * available in this tracepoint
1393	 */
1394	if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1395		return -EINVAL;
1396
1397	if (prog->aux->max_tp_access > btp->writable_size)
1398		return -EINVAL;
1399
1400	return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
1401}
1402
1403int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1404{
1405	return __bpf_probe_register(btp, prog);
1406}
1407
1408int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1409{
1410	return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
1411}
1412
1413int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1414			    u32 *fd_type, const char **buf,
1415			    u64 *probe_offset, u64 *probe_addr)
 
1416{
1417	bool is_tracepoint, is_syscall_tp;
1418	struct bpf_prog *prog;
1419	int flags, err = 0;
1420
1421	prog = event->prog;
1422	if (!prog)
1423		return -ENOENT;
1424
1425	/* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1426	if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
1427		return -EOPNOTSUPP;
1428
1429	*prog_id = prog->aux->id;
1430	flags = event->tp_event->flags;
1431	is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
1432	is_syscall_tp = is_syscall_trace_event(event->tp_event);
1433
1434	if (is_tracepoint || is_syscall_tp) {
1435		*buf = is_tracepoint ? event->tp_event->tp->name
1436				     : event->tp_event->name;
1437		*fd_type = BPF_FD_TYPE_TRACEPOINT;
1438		*probe_offset = 0x0;
1439		*probe_addr = 0x0;
 
 
 
 
1440	} else {
1441		/* kprobe/uprobe */
1442		err = -EOPNOTSUPP;
1443#ifdef CONFIG_KPROBE_EVENTS
1444		if (flags & TRACE_EVENT_FL_KPROBE)
1445			err = bpf_get_kprobe_info(event, fd_type, buf,
1446						  probe_offset, probe_addr,
1447						  event->attr.type == PERF_TYPE_TRACEPOINT);
1448#endif
1449#ifdef CONFIG_UPROBE_EVENTS
1450		if (flags & TRACE_EVENT_FL_UPROBE)
1451			err = bpf_get_uprobe_info(event, fd_type, buf,
1452						  probe_offset,
1453						  event->attr.type == PERF_TYPE_TRACEPOINT);
1454#endif
1455	}
1456
1457	return err;
1458}
1459
1460static int __init send_signal_irq_work_init(void)
1461{
1462	int cpu;
1463	struct send_signal_irq_work *work;
1464
1465	for_each_possible_cpu(cpu) {
1466		work = per_cpu_ptr(&send_signal_work, cpu);
1467		init_irq_work(&work->irq_work, do_bpf_send_signal);
1468	}
1469	return 0;
1470}
1471
1472subsys_initcall(send_signal_irq_work_init);
1473
1474#ifdef CONFIG_MODULES
1475static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
1476			    void *module)
1477{
1478	struct bpf_trace_module *btm, *tmp;
1479	struct module *mod = module;
 
1480
1481	if (mod->num_bpf_raw_events == 0 ||
1482	    (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
1483		return 0;
1484
1485	mutex_lock(&bpf_module_mutex);
1486
1487	switch (op) {
1488	case MODULE_STATE_COMING:
1489		btm = kzalloc(sizeof(*btm), GFP_KERNEL);
1490		if (btm) {
1491			btm->module = module;
1492			list_add(&btm->list, &bpf_trace_modules);
 
 
1493		}
1494		break;
1495	case MODULE_STATE_GOING:
1496		list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
1497			if (btm->module == module) {
1498				list_del(&btm->list);
1499				kfree(btm);
1500				break;
1501			}
1502		}
1503		break;
1504	}
1505
1506	mutex_unlock(&bpf_module_mutex);
1507
1508	return 0;
 
1509}
1510
1511static struct notifier_block bpf_module_nb = {
1512	.notifier_call = bpf_event_notify,
1513};
1514
1515static int __init bpf_event_init(void)
1516{
1517	register_module_notifier(&bpf_module_nb);
1518	return 0;
1519}
1520
1521fs_initcall(bpf_event_init);
1522#endif /* CONFIG_MODULES */
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
   3 * Copyright (c) 2016 Facebook
   4 */
   5#include <linux/kernel.h>
   6#include <linux/types.h>
   7#include <linux/slab.h>
   8#include <linux/bpf.h>
   9#include <linux/bpf_verifier.h>
  10#include <linux/bpf_perf_event.h>
  11#include <linux/btf.h>
  12#include <linux/filter.h>
  13#include <linux/uaccess.h>
  14#include <linux/ctype.h>
  15#include <linux/kprobes.h>
  16#include <linux/spinlock.h>
  17#include <linux/syscalls.h>
  18#include <linux/error-injection.h>
  19#include <linux/btf_ids.h>
  20#include <linux/bpf_lsm.h>
  21#include <linux/fprobe.h>
  22#include <linux/bsearch.h>
  23#include <linux/sort.h>
  24#include <linux/key.h>
  25#include <linux/verification.h>
  26#include <linux/namei.h>
  27
  28#include <net/bpf_sk_storage.h>
  29
  30#include <uapi/linux/bpf.h>
  31#include <uapi/linux/btf.h>
  32
  33#include <asm/tlb.h>
  34
  35#include "trace_probe.h"
  36#include "trace.h"
  37
  38#define CREATE_TRACE_POINTS
  39#include "bpf_trace.h"
  40
  41#define bpf_event_rcu_dereference(p)					\
  42	rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
  43
  44#define MAX_UPROBE_MULTI_CNT (1U << 20)
  45#define MAX_KPROBE_MULTI_CNT (1U << 20)
  46
  47#ifdef CONFIG_MODULES
  48struct bpf_trace_module {
  49	struct module *module;
  50	struct list_head list;
  51};
  52
  53static LIST_HEAD(bpf_trace_modules);
  54static DEFINE_MUTEX(bpf_module_mutex);
  55
  56static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
  57{
  58	struct bpf_raw_event_map *btp, *ret = NULL;
  59	struct bpf_trace_module *btm;
  60	unsigned int i;
  61
  62	mutex_lock(&bpf_module_mutex);
  63	list_for_each_entry(btm, &bpf_trace_modules, list) {
  64		for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
  65			btp = &btm->module->bpf_raw_events[i];
  66			if (!strcmp(btp->tp->name, name)) {
  67				if (try_module_get(btm->module))
  68					ret = btp;
  69				goto out;
  70			}
  71		}
  72	}
  73out:
  74	mutex_unlock(&bpf_module_mutex);
  75	return ret;
  76}
  77#else
  78static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
  79{
  80	return NULL;
  81}
  82#endif /* CONFIG_MODULES */
  83
  84u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  85u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  86
  87static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
  88				  u64 flags, const struct btf **btf,
  89				  s32 *btf_id);
  90static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
  91static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
  92
  93static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx);
  94static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
  95
  96/**
  97 * trace_call_bpf - invoke BPF program
  98 * @call: tracepoint event
  99 * @ctx: opaque context pointer
 100 *
 101 * kprobe handlers execute BPF programs via this helper.
 102 * Can be used from static tracepoints in the future.
 103 *
 104 * Return: BPF programs always return an integer which is interpreted by
 105 * kprobe handler as:
 106 * 0 - return from kprobe (event is filtered out)
 107 * 1 - store kprobe event into ring buffer
 108 * Other values are reserved and currently alias to 1
 109 */
 110unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
 111{
 112	unsigned int ret;
 113
 114	cant_sleep();
 
 
 
 115
 116	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
 117		/*
 118		 * since some bpf program is already running on this cpu,
 119		 * don't call into another bpf program (same or different)
 120		 * and don't send kprobe event into ring-buffer,
 121		 * so return zero here
 122		 */
 123		rcu_read_lock();
 124		bpf_prog_inc_misses_counters(rcu_dereference(call->prog_array));
 125		rcu_read_unlock();
 126		ret = 0;
 127		goto out;
 128	}
 129
 130	/*
 131	 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
 132	 * to all call sites, we did a bpf_prog_array_valid() there to check
 133	 * whether call->prog_array is empty or not, which is
 134	 * a heuristic to speed up execution.
 135	 *
 136	 * If bpf_prog_array_valid() fetched prog_array was
 137	 * non-NULL, we go into trace_call_bpf() and do the actual
 138	 * proper rcu_dereference() under RCU lock.
 139	 * If it turns out that prog_array is NULL then, we bail out.
 140	 * For the opposite, if the bpf_prog_array_valid() fetched pointer
 141	 * was NULL, you'll skip the prog_array with the risk of missing
 142	 * out of events when it was updated in between this and the
 143	 * rcu_dereference() which is accepted risk.
 144	 */
 145	rcu_read_lock();
 146	ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
 147				 ctx, bpf_prog_run);
 148	rcu_read_unlock();
 149
 150 out:
 151	__this_cpu_dec(bpf_prog_active);
 
 152
 153	return ret;
 154}
 
 155
 156#ifdef CONFIG_BPF_KPROBE_OVERRIDE
 157BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
 158{
 159	regs_set_return_value(regs, rc);
 160	override_function_with_return(regs);
 161	return 0;
 162}
 163
 164static const struct bpf_func_proto bpf_override_return_proto = {
 165	.func		= bpf_override_return,
 166	.gpl_only	= true,
 167	.ret_type	= RET_INTEGER,
 168	.arg1_type	= ARG_PTR_TO_CTX,
 169	.arg2_type	= ARG_ANYTHING,
 170};
 171#endif
 172
 173static __always_inline int
 174bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
 175{
 176	int ret;
 177
 178	ret = copy_from_user_nofault(dst, unsafe_ptr, size);
 179	if (unlikely(ret < 0))
 180		memset(dst, 0, size);
 181	return ret;
 182}
 183
 184BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
 185	   const void __user *, unsafe_ptr)
 186{
 187	return bpf_probe_read_user_common(dst, size, unsafe_ptr);
 188}
 189
 190const struct bpf_func_proto bpf_probe_read_user_proto = {
 191	.func		= bpf_probe_read_user,
 192	.gpl_only	= true,
 193	.ret_type	= RET_INTEGER,
 194	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 195	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 196	.arg3_type	= ARG_ANYTHING,
 197};
 198
 199static __always_inline int
 200bpf_probe_read_user_str_common(void *dst, u32 size,
 201			       const void __user *unsafe_ptr)
 202{
 203	int ret;
 204
 205	/*
 206	 * NB: We rely on strncpy_from_user() not copying junk past the NUL
 207	 * terminator into `dst`.
 208	 *
 209	 * strncpy_from_user() does long-sized strides in the fast path. If the
 210	 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
 211	 * then there could be junk after the NUL in `dst`. If user takes `dst`
 212	 * and keys a hash map with it, then semantically identical strings can
 213	 * occupy multiple entries in the map.
 214	 */
 215	ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
 216	if (unlikely(ret < 0))
 
 217		memset(dst, 0, size);
 218	return ret;
 219}
 220
 221BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
 222	   const void __user *, unsafe_ptr)
 223{
 224	return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
 225}
 226
 227const struct bpf_func_proto bpf_probe_read_user_str_proto = {
 228	.func		= bpf_probe_read_user_str,
 229	.gpl_only	= true,
 230	.ret_type	= RET_INTEGER,
 231	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 232	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 233	.arg3_type	= ARG_ANYTHING,
 234};
 235
 236BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
 237	   const void *, unsafe_ptr)
 238{
 239	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
 240}
 241
 242const struct bpf_func_proto bpf_probe_read_kernel_proto = {
 243	.func		= bpf_probe_read_kernel,
 244	.gpl_only	= true,
 245	.ret_type	= RET_INTEGER,
 246	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 247	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 248	.arg3_type	= ARG_ANYTHING,
 249};
 250
 251static __always_inline int
 252bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
 253{
 254	int ret;
 255
 256	/*
 257	 * The strncpy_from_kernel_nofault() call will likely not fill the
 258	 * entire buffer, but that's okay in this circumstance as we're probing
 259	 * arbitrary memory anyway similar to bpf_probe_read_*() and might
 260	 * as well probe the stack. Thus, memory is explicitly cleared
 261	 * only in error case, so that improper users ignoring return
 262	 * code altogether don't copy garbage; otherwise length of string
 263	 * is returned that can be used for bpf_perf_event_output() et al.
 264	 */
 265	ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
 266	if (unlikely(ret < 0))
 267		memset(dst, 0, size);
 268	return ret;
 269}
 270
 271BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
 272	   const void *, unsafe_ptr)
 273{
 274	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
 275}
 276
 277const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
 278	.func		= bpf_probe_read_kernel_str,
 279	.gpl_only	= true,
 280	.ret_type	= RET_INTEGER,
 281	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 282	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 283	.arg3_type	= ARG_ANYTHING,
 284};
 285
 286#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
 287BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
 288	   const void *, unsafe_ptr)
 289{
 290	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
 291		return bpf_probe_read_user_common(dst, size,
 292				(__force void __user *)unsafe_ptr);
 293	}
 294	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
 295}
 296
 297static const struct bpf_func_proto bpf_probe_read_compat_proto = {
 298	.func		= bpf_probe_read_compat,
 299	.gpl_only	= true,
 300	.ret_type	= RET_INTEGER,
 301	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 302	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 303	.arg3_type	= ARG_ANYTHING,
 304};
 305
 306BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
 307	   const void *, unsafe_ptr)
 308{
 309	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
 310		return bpf_probe_read_user_str_common(dst, size,
 311				(__force void __user *)unsafe_ptr);
 312	}
 313	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
 314}
 315
 316static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
 317	.func		= bpf_probe_read_compat_str,
 318	.gpl_only	= true,
 319	.ret_type	= RET_INTEGER,
 320	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 321	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 322	.arg3_type	= ARG_ANYTHING,
 323};
 324#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
 325
 326BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
 327	   u32, size)
 328{
 329	/*
 330	 * Ensure we're in user context which is safe for the helper to
 331	 * run. This helper has no business in a kthread.
 332	 *
 333	 * access_ok() should prevent writing to non-user memory, but in
 334	 * some situations (nommu, temporary switch, etc) access_ok() does
 335	 * not provide enough validation, hence the check on KERNEL_DS.
 336	 *
 337	 * nmi_uaccess_okay() ensures the probe is not run in an interim
 338	 * state, when the task or mm are switched. This is specifically
 339	 * required to prevent the use of temporary mm.
 340	 */
 341
 342	if (unlikely(in_interrupt() ||
 343		     current->flags & (PF_KTHREAD | PF_EXITING)))
 344		return -EPERM;
 
 
 345	if (unlikely(!nmi_uaccess_okay()))
 346		return -EPERM;
 
 
 347
 348	return copy_to_user_nofault(unsafe_ptr, src, size);
 349}
 350
 351static const struct bpf_func_proto bpf_probe_write_user_proto = {
 352	.func		= bpf_probe_write_user,
 353	.gpl_only	= true,
 354	.ret_type	= RET_INTEGER,
 355	.arg1_type	= ARG_ANYTHING,
 356	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 357	.arg3_type	= ARG_CONST_SIZE,
 358};
 359
 360static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
 361{
 362	if (!capable(CAP_SYS_ADMIN))
 363		return NULL;
 364
 365	pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
 366			    current->comm, task_pid_nr(current));
 367
 368	return &bpf_probe_write_user_proto;
 369}
 370
 371#define MAX_TRACE_PRINTK_VARARGS	3
 372#define BPF_TRACE_PRINTK_SIZE		1024
 373
 
 374BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
 375	   u64, arg2, u64, arg3)
 376{
 377	u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
 378	struct bpf_bprintf_data data = {
 379		.get_bin_args	= true,
 380		.get_buf	= true,
 381	};
 382	int ret;
 383
 384	ret = bpf_bprintf_prepare(fmt, fmt_size, args,
 385				  MAX_TRACE_PRINTK_VARARGS, &data);
 386	if (ret < 0)
 387		return ret;
 388
 389	ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
 390
 391	trace_bpf_trace_printk(data.buf);
 392
 393	bpf_bprintf_cleanup(&data);
 394
 395	return ret;
 396}
 397
 398static const struct bpf_func_proto bpf_trace_printk_proto = {
 399	.func		= bpf_trace_printk,
 400	.gpl_only	= true,
 401	.ret_type	= RET_INTEGER,
 402	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 403	.arg2_type	= ARG_CONST_SIZE,
 404};
 405
 406static void __set_printk_clr_event(void)
 407{
 408	/*
 409	 * This program might be calling bpf_trace_printk,
 410	 * so enable the associated bpf_trace/bpf_trace_printk event.
 411	 * Repeat this each time as it is possible a user has
 412	 * disabled bpf_trace_printk events.  By loading a program
 413	 * calling bpf_trace_printk() however the user has expressed
 414	 * the intent to see such events.
 415	 */
 416	if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
 417		pr_warn_ratelimited("could not enable bpf_trace_printk events");
 418}
 419
 420const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
 421{
 422	__set_printk_clr_event();
 423	return &bpf_trace_printk_proto;
 424}
 425
 426BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args,
 427	   u32, data_len)
 428{
 429	struct bpf_bprintf_data data = {
 430		.get_bin_args	= true,
 431		.get_buf	= true,
 432	};
 433	int ret, num_args;
 434
 435	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
 436	    (data_len && !args))
 437		return -EINVAL;
 438	num_args = data_len / 8;
 439
 440	ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
 441	if (ret < 0)
 442		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 443
 444	ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
 
 
 
 445
 446	trace_bpf_trace_printk(data.buf);
 
 
 
 
 447
 448	bpf_bprintf_cleanup(&data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 449
 450	return ret;
 451}
 452
 453static const struct bpf_func_proto bpf_trace_vprintk_proto = {
 454	.func		= bpf_trace_vprintk,
 455	.gpl_only	= true,
 456	.ret_type	= RET_INTEGER,
 457	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 458	.arg2_type	= ARG_CONST_SIZE,
 459	.arg3_type	= ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
 460	.arg4_type	= ARG_CONST_SIZE_OR_ZERO,
 461};
 462
 463const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
 464{
 465	__set_printk_clr_event();
 466	return &bpf_trace_vprintk_proto;
 467}
 
 
 468
 469BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
 470	   const void *, args, u32, data_len)
 471{
 472	struct bpf_bprintf_data data = {
 473		.get_bin_args	= true,
 474	};
 475	int err, num_args;
 476
 477	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
 478	    (data_len && !args))
 479		return -EINVAL;
 480	num_args = data_len / 8;
 481
 482	err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
 483	if (err < 0)
 484		return err;
 485
 486	seq_bprintf(m, fmt, data.bin_args);
 487
 488	bpf_bprintf_cleanup(&data);
 489
 490	return seq_has_overflowed(m) ? -EOVERFLOW : 0;
 491}
 492
 493BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
 494
 495static const struct bpf_func_proto bpf_seq_printf_proto = {
 496	.func		= bpf_seq_printf,
 497	.gpl_only	= true,
 498	.ret_type	= RET_INTEGER,
 499	.arg1_type	= ARG_PTR_TO_BTF_ID,
 500	.arg1_btf_id	= &btf_seq_file_ids[0],
 501	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 502	.arg3_type	= ARG_CONST_SIZE,
 503	.arg4_type      = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
 504	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
 505};
 506
 507BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
 508{
 509	return seq_write(m, data, len) ? -EOVERFLOW : 0;
 510}
 511
 512static const struct bpf_func_proto bpf_seq_write_proto = {
 513	.func		= bpf_seq_write,
 514	.gpl_only	= true,
 515	.ret_type	= RET_INTEGER,
 516	.arg1_type	= ARG_PTR_TO_BTF_ID,
 517	.arg1_btf_id	= &btf_seq_file_ids[0],
 518	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 519	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
 520};
 521
 522BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
 523	   u32, btf_ptr_size, u64, flags)
 524{
 525	const struct btf *btf;
 526	s32 btf_id;
 527	int ret;
 528
 529	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
 530	if (ret)
 531		return ret;
 532
 533	return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
 534}
 535
 536static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
 537	.func		= bpf_seq_printf_btf,
 538	.gpl_only	= true,
 539	.ret_type	= RET_INTEGER,
 540	.arg1_type	= ARG_PTR_TO_BTF_ID,
 541	.arg1_btf_id	= &btf_seq_file_ids[0],
 542	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 543	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
 544	.arg4_type	= ARG_ANYTHING,
 545};
 546
 547static __always_inline int
 548get_map_perf_counter(struct bpf_map *map, u64 flags,
 549		     u64 *value, u64 *enabled, u64 *running)
 550{
 551	struct bpf_array *array = container_of(map, struct bpf_array, map);
 552	unsigned int cpu = smp_processor_id();
 553	u64 index = flags & BPF_F_INDEX_MASK;
 554	struct bpf_event_entry *ee;
 555
 556	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
 557		return -EINVAL;
 558	if (index == BPF_F_CURRENT_CPU)
 559		index = cpu;
 560	if (unlikely(index >= array->map.max_entries))
 561		return -E2BIG;
 562
 563	ee = READ_ONCE(array->ptrs[index]);
 564	if (!ee)
 565		return -ENOENT;
 566
 567	return perf_event_read_local(ee->event, value, enabled, running);
 568}
 569
 570BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
 571{
 572	u64 value = 0;
 573	int err;
 574
 575	err = get_map_perf_counter(map, flags, &value, NULL, NULL);
 576	/*
 577	 * this api is ugly since we miss [-22..-2] range of valid
 578	 * counter values, but that's uapi
 579	 */
 580	if (err)
 581		return err;
 582	return value;
 583}
 584
 585static const struct bpf_func_proto bpf_perf_event_read_proto = {
 586	.func		= bpf_perf_event_read,
 587	.gpl_only	= true,
 588	.ret_type	= RET_INTEGER,
 589	.arg1_type	= ARG_CONST_MAP_PTR,
 590	.arg2_type	= ARG_ANYTHING,
 591};
 592
 593BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
 594	   struct bpf_perf_event_value *, buf, u32, size)
 595{
 596	int err = -EINVAL;
 597
 598	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
 599		goto clear;
 600	err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
 601				   &buf->running);
 602	if (unlikely(err))
 603		goto clear;
 604	return 0;
 605clear:
 606	memset(buf, 0, size);
 607	return err;
 608}
 609
 610static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
 611	.func		= bpf_perf_event_read_value,
 612	.gpl_only	= true,
 613	.ret_type	= RET_INTEGER,
 614	.arg1_type	= ARG_CONST_MAP_PTR,
 615	.arg2_type	= ARG_ANYTHING,
 616	.arg3_type	= ARG_PTR_TO_UNINIT_MEM,
 617	.arg4_type	= ARG_CONST_SIZE,
 618};
 619
 620static __always_inline u64
 621__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
 622			u64 flags, struct perf_raw_record *raw,
 623			struct perf_sample_data *sd)
 624{
 625	struct bpf_array *array = container_of(map, struct bpf_array, map);
 626	unsigned int cpu = smp_processor_id();
 627	u64 index = flags & BPF_F_INDEX_MASK;
 628	struct bpf_event_entry *ee;
 629	struct perf_event *event;
 630
 631	if (index == BPF_F_CURRENT_CPU)
 632		index = cpu;
 633	if (unlikely(index >= array->map.max_entries))
 634		return -E2BIG;
 635
 636	ee = READ_ONCE(array->ptrs[index]);
 637	if (!ee)
 638		return -ENOENT;
 639
 640	event = ee->event;
 641	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
 642		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
 643		return -EINVAL;
 644
 645	if (unlikely(event->oncpu != cpu))
 646		return -EOPNOTSUPP;
 647
 648	perf_sample_save_raw_data(sd, event, raw);
 649
 650	return perf_event_output(event, sd, regs);
 651}
 652
 653/*
 654 * Support executing tracepoints in normal, irq, and nmi context that each call
 655 * bpf_perf_event_output
 656 */
 657struct bpf_trace_sample_data {
 658	struct perf_sample_data sds[3];
 659};
 660
 661static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
 662static DEFINE_PER_CPU(int, bpf_trace_nest_level);
 663BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
 664	   u64, flags, void *, data, u64, size)
 665{
 666	struct bpf_trace_sample_data *sds;
 
 667	struct perf_raw_record raw = {
 668		.frag = {
 669			.size = size,
 670			.data = data,
 671		},
 672	};
 673	struct perf_sample_data *sd;
 674	int nest_level, err;
 675
 676	preempt_disable();
 677	sds = this_cpu_ptr(&bpf_trace_sds);
 678	nest_level = this_cpu_inc_return(bpf_trace_nest_level);
 679
 680	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
 681		err = -EBUSY;
 682		goto out;
 683	}
 684
 685	sd = &sds->sds[nest_level - 1];
 686
 687	if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
 688		err = -EINVAL;
 689		goto out;
 690	}
 691
 692	perf_sample_data_init(sd, 0, 0);
 
 
 
 693
 694	err = __bpf_perf_event_output(regs, map, flags, &raw, sd);
 695out:
 696	this_cpu_dec(bpf_trace_nest_level);
 697	preempt_enable();
 698	return err;
 699}
 700
 701static const struct bpf_func_proto bpf_perf_event_output_proto = {
 702	.func		= bpf_perf_event_output,
 703	.gpl_only	= true,
 704	.ret_type	= RET_INTEGER,
 705	.arg1_type	= ARG_PTR_TO_CTX,
 706	.arg2_type	= ARG_CONST_MAP_PTR,
 707	.arg3_type	= ARG_ANYTHING,
 708	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
 709	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
 710};
 711
 712static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
 713struct bpf_nested_pt_regs {
 714	struct pt_regs regs[3];
 715};
 716static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
 717static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
 718
 719u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
 720		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
 721{
 
 722	struct perf_raw_frag frag = {
 723		.copy		= ctx_copy,
 724		.size		= ctx_size,
 725		.data		= ctx,
 726	};
 727	struct perf_raw_record raw = {
 728		.frag = {
 729			{
 730				.next	= ctx_size ? &frag : NULL,
 731			},
 732			.size	= meta_size,
 733			.data	= meta,
 734		},
 735	};
 736	struct perf_sample_data *sd;
 737	struct pt_regs *regs;
 738	int nest_level;
 739	u64 ret;
 740
 741	preempt_disable();
 742	nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
 743
 744	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
 745		ret = -EBUSY;
 746		goto out;
 747	}
 748	sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
 749	regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
 750
 751	perf_fetch_caller_regs(regs);
 752	perf_sample_data_init(sd, 0, 0);
 
 753
 754	ret = __bpf_perf_event_output(regs, map, flags, &raw, sd);
 755out:
 756	this_cpu_dec(bpf_event_output_nest_level);
 757	preempt_enable();
 758	return ret;
 759}
 760
 761BPF_CALL_0(bpf_get_current_task)
 762{
 763	return (long) current;
 764}
 765
 766const struct bpf_func_proto bpf_get_current_task_proto = {
 767	.func		= bpf_get_current_task,
 768	.gpl_only	= true,
 769	.ret_type	= RET_INTEGER,
 770};
 771
 772BPF_CALL_0(bpf_get_current_task_btf)
 773{
 774	return (unsigned long) current;
 
 
 
 
 
 
 
 
 
 
 775}
 776
 777const struct bpf_func_proto bpf_get_current_task_btf_proto = {
 778	.func		= bpf_get_current_task_btf,
 779	.gpl_only	= true,
 780	.ret_type	= RET_PTR_TO_BTF_ID_TRUSTED,
 781	.ret_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
 
 782};
 783
 784BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
 
 785{
 786	return (unsigned long) task_pt_regs(task);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 787}
 788
 789BTF_ID_LIST(bpf_task_pt_regs_ids)
 790BTF_ID(struct, pt_regs)
 791
 792const struct bpf_func_proto bpf_task_pt_regs_proto = {
 793	.func		= bpf_task_pt_regs,
 794	.gpl_only	= true,
 795	.arg1_type	= ARG_PTR_TO_BTF_ID,
 796	.arg1_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
 797	.ret_type	= RET_PTR_TO_BTF_ID,
 798	.ret_btf_id	= &bpf_task_pt_regs_ids[0],
 799};
 800
 801struct send_signal_irq_work {
 802	struct irq_work irq_work;
 803	struct task_struct *task;
 804	u32 sig;
 805	enum pid_type type;
 806	bool has_siginfo;
 807	struct kernel_siginfo info;
 808};
 809
 810static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
 811
 812static void do_bpf_send_signal(struct irq_work *entry)
 813{
 814	struct send_signal_irq_work *work;
 815	struct kernel_siginfo *siginfo;
 816
 817	work = container_of(entry, struct send_signal_irq_work, irq_work);
 818	siginfo = work->has_siginfo ? &work->info : SEND_SIG_PRIV;
 819
 820	group_send_sig_info(work->sig, siginfo, work->task, work->type);
 821	put_task_struct(work->task);
 822}
 823
 824static int bpf_send_signal_common(u32 sig, enum pid_type type, struct task_struct *task, u64 value)
 825{
 826	struct send_signal_irq_work *work = NULL;
 827	struct kernel_siginfo info;
 828	struct kernel_siginfo *siginfo;
 829
 830	if (!task) {
 831		task = current;
 832		siginfo = SEND_SIG_PRIV;
 833	} else {
 834		clear_siginfo(&info);
 835		info.si_signo = sig;
 836		info.si_errno = 0;
 837		info.si_code = SI_KERNEL;
 838		info.si_pid = 0;
 839		info.si_uid = 0;
 840		info.si_value.sival_ptr = (void *)(unsigned long)value;
 841		siginfo = &info;
 842	}
 843
 844	/* Similar to bpf_probe_write_user, task needs to be
 845	 * in a sound condition and kernel memory access be
 846	 * permitted in order to send signal to the current
 847	 * task.
 848	 */
 849	if (unlikely(task->flags & (PF_KTHREAD | PF_EXITING)))
 
 
 850		return -EPERM;
 851	if (unlikely(!nmi_uaccess_okay()))
 852		return -EPERM;
 853	/* Task should not be pid=1 to avoid kernel panic. */
 854	if (unlikely(is_global_init(task)))
 855		return -EPERM;
 856
 857	if (!preemptible()) {
 858		/* Do an early check on signal validity. Otherwise,
 859		 * the error is lost in deferred irq_work.
 860		 */
 861		if (unlikely(!valid_signal(sig)))
 862			return -EINVAL;
 863
 864		work = this_cpu_ptr(&send_signal_work);
 865		if (irq_work_is_busy(&work->irq_work))
 866			return -EBUSY;
 867
 868		/* Add the current task, which is the target of sending signal,
 869		 * to the irq_work. The current task may change when queued
 870		 * irq works get executed.
 871		 */
 872		work->task = get_task_struct(task);
 873		work->has_siginfo = siginfo == &info;
 874		if (work->has_siginfo)
 875			copy_siginfo(&work->info, &info);
 876		work->sig = sig;
 877		work->type = type;
 878		irq_work_queue(&work->irq_work);
 879		return 0;
 880	}
 881
 882	return group_send_sig_info(sig, siginfo, task, type);
 883}
 884
 885BPF_CALL_1(bpf_send_signal, u32, sig)
 886{
 887	return bpf_send_signal_common(sig, PIDTYPE_TGID, NULL, 0);
 888}
 889
 890static const struct bpf_func_proto bpf_send_signal_proto = {
 891	.func		= bpf_send_signal,
 892	.gpl_only	= false,
 893	.ret_type	= RET_INTEGER,
 894	.arg1_type	= ARG_ANYTHING,
 895};
 896
 897BPF_CALL_1(bpf_send_signal_thread, u32, sig)
 898{
 899	return bpf_send_signal_common(sig, PIDTYPE_PID, NULL, 0);
 900}
 901
 902static const struct bpf_func_proto bpf_send_signal_thread_proto = {
 903	.func		= bpf_send_signal_thread,
 904	.gpl_only	= false,
 905	.ret_type	= RET_INTEGER,
 906	.arg1_type	= ARG_ANYTHING,
 907};
 908
 909BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
 910{
 911	struct path copy;
 912	long len;
 913	char *p;
 914
 915	if (!sz)
 916		return 0;
 917
 918	/*
 919	 * The path pointer is verified as trusted and safe to use,
 920	 * but let's double check it's valid anyway to workaround
 921	 * potentially broken verifier.
 922	 */
 923	len = copy_from_kernel_nofault(&copy, path, sizeof(*path));
 924	if (len < 0)
 925		return len;
 926
 927	p = d_path(&copy, buf, sz);
 928	if (IS_ERR(p)) {
 929		len = PTR_ERR(p);
 930	} else {
 931		len = buf + sz - p;
 932		memmove(buf, p, len);
 933	}
 934
 935	return len;
 936}
 937
 938BTF_SET_START(btf_allowlist_d_path)
 939#ifdef CONFIG_SECURITY
 940BTF_ID(func, security_file_permission)
 941BTF_ID(func, security_inode_getattr)
 942BTF_ID(func, security_file_open)
 943#endif
 944#ifdef CONFIG_SECURITY_PATH
 945BTF_ID(func, security_path_truncate)
 946#endif
 947BTF_ID(func, vfs_truncate)
 948BTF_ID(func, vfs_fallocate)
 949BTF_ID(func, dentry_open)
 950BTF_ID(func, vfs_getattr)
 951BTF_ID(func, filp_close)
 952BTF_SET_END(btf_allowlist_d_path)
 953
 954static bool bpf_d_path_allowed(const struct bpf_prog *prog)
 955{
 956	if (prog->type == BPF_PROG_TYPE_TRACING &&
 957	    prog->expected_attach_type == BPF_TRACE_ITER)
 958		return true;
 959
 960	if (prog->type == BPF_PROG_TYPE_LSM)
 961		return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
 962
 963	return btf_id_set_contains(&btf_allowlist_d_path,
 964				   prog->aux->attach_btf_id);
 965}
 966
 967BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
 968
 969static const struct bpf_func_proto bpf_d_path_proto = {
 970	.func		= bpf_d_path,
 971	.gpl_only	= false,
 972	.ret_type	= RET_INTEGER,
 973	.arg1_type	= ARG_PTR_TO_BTF_ID,
 974	.arg1_btf_id	= &bpf_d_path_btf_ids[0],
 975	.arg2_type	= ARG_PTR_TO_MEM,
 976	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
 977	.allowed	= bpf_d_path_allowed,
 978};
 979
 980#define BTF_F_ALL	(BTF_F_COMPACT  | BTF_F_NONAME | \
 981			 BTF_F_PTR_RAW | BTF_F_ZERO)
 982
 983static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
 984				  u64 flags, const struct btf **btf,
 985				  s32 *btf_id)
 986{
 987	const struct btf_type *t;
 988
 989	if (unlikely(flags & ~(BTF_F_ALL)))
 990		return -EINVAL;
 991
 992	if (btf_ptr_size != sizeof(struct btf_ptr))
 993		return -EINVAL;
 994
 995	*btf = bpf_get_btf_vmlinux();
 996
 997	if (IS_ERR_OR_NULL(*btf))
 998		return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
 999
1000	if (ptr->type_id > 0)
1001		*btf_id = ptr->type_id;
1002	else
1003		return -EINVAL;
1004
1005	if (*btf_id > 0)
1006		t = btf_type_by_id(*btf, *btf_id);
1007	if (*btf_id <= 0 || !t)
1008		return -ENOENT;
1009
1010	return 0;
1011}
1012
1013BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1014	   u32, btf_ptr_size, u64, flags)
1015{
1016	const struct btf *btf;
1017	s32 btf_id;
1018	int ret;
1019
1020	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1021	if (ret)
1022		return ret;
1023
1024	return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1025				      flags);
1026}
1027
1028const struct bpf_func_proto bpf_snprintf_btf_proto = {
1029	.func		= bpf_snprintf_btf,
1030	.gpl_only	= false,
1031	.ret_type	= RET_INTEGER,
1032	.arg1_type	= ARG_PTR_TO_MEM,
1033	.arg2_type	= ARG_CONST_SIZE,
1034	.arg3_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1035	.arg4_type	= ARG_CONST_SIZE,
1036	.arg5_type	= ARG_ANYTHING,
1037};
1038
1039BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1040{
1041	/* This helper call is inlined by verifier. */
1042	return ((u64 *)ctx)[-2];
1043}
1044
1045static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1046	.func		= bpf_get_func_ip_tracing,
1047	.gpl_only	= true,
1048	.ret_type	= RET_INTEGER,
1049	.arg1_type	= ARG_PTR_TO_CTX,
1050};
1051
1052#ifdef CONFIG_X86_KERNEL_IBT
1053static unsigned long get_entry_ip(unsigned long fentry_ip)
1054{
1055	u32 instr;
1056
1057	/* We want to be extra safe in case entry ip is on the page edge,
1058	 * but otherwise we need to avoid get_kernel_nofault()'s overhead.
1059	 */
1060	if ((fentry_ip & ~PAGE_MASK) < ENDBR_INSN_SIZE) {
1061		if (get_kernel_nofault(instr, (u32 *)(fentry_ip - ENDBR_INSN_SIZE)))
1062			return fentry_ip;
1063	} else {
1064		instr = *(u32 *)(fentry_ip - ENDBR_INSN_SIZE);
1065	}
1066	if (is_endbr(instr))
1067		fentry_ip -= ENDBR_INSN_SIZE;
1068	return fentry_ip;
1069}
1070#else
1071#define get_entry_ip(fentry_ip) fentry_ip
1072#endif
1073
1074BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1075{
1076	struct bpf_trace_run_ctx *run_ctx __maybe_unused;
1077	struct kprobe *kp;
1078
1079#ifdef CONFIG_UPROBES
1080	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1081	if (run_ctx->is_uprobe)
1082		return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr;
1083#endif
1084
1085	kp = kprobe_running();
1086
1087	if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
1088		return 0;
1089
1090	return get_entry_ip((uintptr_t)kp->addr);
1091}
1092
1093static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1094	.func		= bpf_get_func_ip_kprobe,
1095	.gpl_only	= true,
1096	.ret_type	= RET_INTEGER,
1097	.arg1_type	= ARG_PTR_TO_CTX,
1098};
1099
1100BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
1101{
1102	return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
1103}
1104
1105static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = {
1106	.func		= bpf_get_func_ip_kprobe_multi,
1107	.gpl_only	= false,
1108	.ret_type	= RET_INTEGER,
1109	.arg1_type	= ARG_PTR_TO_CTX,
1110};
1111
1112BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
1113{
1114	return bpf_kprobe_multi_cookie(current->bpf_ctx);
1115}
1116
1117static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
1118	.func		= bpf_get_attach_cookie_kprobe_multi,
1119	.gpl_only	= false,
1120	.ret_type	= RET_INTEGER,
1121	.arg1_type	= ARG_PTR_TO_CTX,
1122};
1123
1124BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs)
1125{
1126	return bpf_uprobe_multi_entry_ip(current->bpf_ctx);
1127}
1128
1129static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi = {
1130	.func		= bpf_get_func_ip_uprobe_multi,
1131	.gpl_only	= false,
1132	.ret_type	= RET_INTEGER,
1133	.arg1_type	= ARG_PTR_TO_CTX,
1134};
1135
1136BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs)
1137{
1138	return bpf_uprobe_multi_cookie(current->bpf_ctx);
1139}
1140
1141static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti = {
1142	.func		= bpf_get_attach_cookie_uprobe_multi,
1143	.gpl_only	= false,
1144	.ret_type	= RET_INTEGER,
1145	.arg1_type	= ARG_PTR_TO_CTX,
1146};
1147
1148BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1149{
1150	struct bpf_trace_run_ctx *run_ctx;
1151
1152	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1153	return run_ctx->bpf_cookie;
1154}
1155
1156static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1157	.func		= bpf_get_attach_cookie_trace,
1158	.gpl_only	= false,
1159	.ret_type	= RET_INTEGER,
1160	.arg1_type	= ARG_PTR_TO_CTX,
1161};
1162
1163BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1164{
1165	return ctx->event->bpf_cookie;
1166}
1167
1168static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1169	.func		= bpf_get_attach_cookie_pe,
1170	.gpl_only	= false,
1171	.ret_type	= RET_INTEGER,
1172	.arg1_type	= ARG_PTR_TO_CTX,
1173};
1174
1175BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
1176{
1177	struct bpf_trace_run_ctx *run_ctx;
1178
1179	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1180	return run_ctx->bpf_cookie;
1181}
1182
1183static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = {
1184	.func		= bpf_get_attach_cookie_tracing,
1185	.gpl_only	= false,
1186	.ret_type	= RET_INTEGER,
1187	.arg1_type	= ARG_PTR_TO_CTX,
1188};
1189
1190BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1191{
1192	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1193	u32 entry_cnt = size / br_entry_size;
1194
1195	entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1196
1197	if (unlikely(flags))
1198		return -EINVAL;
1199
1200	if (!entry_cnt)
1201		return -ENOENT;
1202
1203	return entry_cnt * br_entry_size;
1204}
1205
1206static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
1207	.func		= bpf_get_branch_snapshot,
1208	.gpl_only	= true,
1209	.ret_type	= RET_INTEGER,
1210	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1211	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1212};
1213
1214BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1215{
1216	/* This helper call is inlined by verifier. */
1217	u64 nr_args = ((u64 *)ctx)[-1];
1218
1219	if ((u64) n >= nr_args)
1220		return -EINVAL;
1221	*value = ((u64 *)ctx)[n];
1222	return 0;
1223}
1224
1225static const struct bpf_func_proto bpf_get_func_arg_proto = {
1226	.func		= get_func_arg,
1227	.ret_type	= RET_INTEGER,
1228	.arg1_type	= ARG_PTR_TO_CTX,
1229	.arg2_type	= ARG_ANYTHING,
1230	.arg3_type	= ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
1231	.arg3_size	= sizeof(u64),
1232};
1233
1234BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1235{
1236	/* This helper call is inlined by verifier. */
1237	u64 nr_args = ((u64 *)ctx)[-1];
1238
1239	*value = ((u64 *)ctx)[nr_args];
1240	return 0;
1241}
1242
1243static const struct bpf_func_proto bpf_get_func_ret_proto = {
1244	.func		= get_func_ret,
1245	.ret_type	= RET_INTEGER,
1246	.arg1_type	= ARG_PTR_TO_CTX,
1247	.arg2_type	= ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
1248	.arg2_size	= sizeof(u64),
1249};
1250
1251BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1252{
1253	/* This helper call is inlined by verifier. */
1254	return ((u64 *)ctx)[-1];
1255}
1256
1257static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1258	.func		= get_func_arg_cnt,
1259	.ret_type	= RET_INTEGER,
1260	.arg1_type	= ARG_PTR_TO_CTX,
1261};
1262
1263#ifdef CONFIG_KEYS
1264__bpf_kfunc_start_defs();
1265
1266/**
1267 * bpf_lookup_user_key - lookup a key by its serial
1268 * @serial: key handle serial number
1269 * @flags: lookup-specific flags
1270 *
1271 * Search a key with a given *serial* and the provided *flags*.
1272 * If found, increment the reference count of the key by one, and
1273 * return it in the bpf_key structure.
1274 *
1275 * The bpf_key structure must be passed to bpf_key_put() when done
1276 * with it, so that the key reference count is decremented and the
1277 * bpf_key structure is freed.
1278 *
1279 * Permission checks are deferred to the time the key is used by
1280 * one of the available key-specific kfuncs.
1281 *
1282 * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
1283 * special keyring (e.g. session keyring), if it doesn't yet exist.
1284 * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
1285 * for the key construction, and to retrieve uninstantiated keys (keys
1286 * without data attached to them).
1287 *
1288 * Return: a bpf_key pointer with a valid key pointer if the key is found, a
1289 *         NULL pointer otherwise.
1290 */
1291__bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
1292{
1293	key_ref_t key_ref;
1294	struct bpf_key *bkey;
1295
1296	if (flags & ~KEY_LOOKUP_ALL)
1297		return NULL;
1298
1299	/*
1300	 * Permission check is deferred until the key is used, as the
1301	 * intent of the caller is unknown here.
1302	 */
1303	key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
1304	if (IS_ERR(key_ref))
1305		return NULL;
1306
1307	bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
1308	if (!bkey) {
1309		key_put(key_ref_to_ptr(key_ref));
1310		return NULL;
1311	}
1312
1313	bkey->key = key_ref_to_ptr(key_ref);
1314	bkey->has_ref = true;
1315
1316	return bkey;
1317}
1318
1319/**
1320 * bpf_lookup_system_key - lookup a key by a system-defined ID
1321 * @id: key ID
1322 *
1323 * Obtain a bpf_key structure with a key pointer set to the passed key ID.
1324 * The key pointer is marked as invalid, to prevent bpf_key_put() from
1325 * attempting to decrement the key reference count on that pointer. The key
1326 * pointer set in such way is currently understood only by
1327 * verify_pkcs7_signature().
1328 *
1329 * Set *id* to one of the values defined in include/linux/verification.h:
1330 * 0 for the primary keyring (immutable keyring of system keys);
1331 * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
1332 * (where keys can be added only if they are vouched for by existing keys
1333 * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
1334 * keyring (primarily used by the integrity subsystem to verify a kexec'ed
1335 * kerned image and, possibly, the initramfs signature).
1336 *
1337 * Return: a bpf_key pointer with an invalid key pointer set from the
1338 *         pre-determined ID on success, a NULL pointer otherwise
1339 */
1340__bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
1341{
1342	struct bpf_key *bkey;
1343
1344	if (system_keyring_id_check(id) < 0)
1345		return NULL;
1346
1347	bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
1348	if (!bkey)
1349		return NULL;
1350
1351	bkey->key = (struct key *)(unsigned long)id;
1352	bkey->has_ref = false;
1353
1354	return bkey;
1355}
1356
1357/**
1358 * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1359 * @bkey: bpf_key structure
1360 *
1361 * Decrement the reference count of the key inside *bkey*, if the pointer
1362 * is valid, and free *bkey*.
1363 */
1364__bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
1365{
1366	if (bkey->has_ref)
1367		key_put(bkey->key);
1368
1369	kfree(bkey);
1370}
1371
1372#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1373/**
1374 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
1375 * @data_p: data to verify
1376 * @sig_p: signature of the data
1377 * @trusted_keyring: keyring with keys trusted for signature verification
1378 *
1379 * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
1380 * with keys in a keyring referenced by *trusted_keyring*.
1381 *
1382 * Return: 0 on success, a negative value on error.
1383 */
1384__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p,
1385			       struct bpf_dynptr *sig_p,
1386			       struct bpf_key *trusted_keyring)
1387{
1388	struct bpf_dynptr_kern *data_ptr = (struct bpf_dynptr_kern *)data_p;
1389	struct bpf_dynptr_kern *sig_ptr = (struct bpf_dynptr_kern *)sig_p;
1390	const void *data, *sig;
1391	u32 data_len, sig_len;
1392	int ret;
1393
1394	if (trusted_keyring->has_ref) {
1395		/*
1396		 * Do the permission check deferred in bpf_lookup_user_key().
1397		 * See bpf_lookup_user_key() for more details.
1398		 *
1399		 * A call to key_task_permission() here would be redundant, as
1400		 * it is already done by keyring_search() called by
1401		 * find_asymmetric_key().
1402		 */
1403		ret = key_validate(trusted_keyring->key);
1404		if (ret < 0)
1405			return ret;
1406	}
1407
1408	data_len = __bpf_dynptr_size(data_ptr);
1409	data = __bpf_dynptr_data(data_ptr, data_len);
1410	sig_len = __bpf_dynptr_size(sig_ptr);
1411	sig = __bpf_dynptr_data(sig_ptr, sig_len);
1412
1413	return verify_pkcs7_signature(data, data_len, sig, sig_len,
1414				      trusted_keyring->key,
1415				      VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
1416				      NULL);
1417}
1418#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
1419
1420__bpf_kfunc_end_defs();
1421
1422BTF_KFUNCS_START(key_sig_kfunc_set)
1423BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
1424BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
1425BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
1426#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1427BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
1428#endif
1429BTF_KFUNCS_END(key_sig_kfunc_set)
1430
1431static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
1432	.owner = THIS_MODULE,
1433	.set = &key_sig_kfunc_set,
1434};
1435
1436static int __init bpf_key_sig_kfuncs_init(void)
1437{
1438	return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
1439					 &bpf_key_sig_kfunc_set);
1440}
1441
1442late_initcall(bpf_key_sig_kfuncs_init);
1443#endif /* CONFIG_KEYS */
1444
1445static const struct bpf_func_proto *
1446bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1447{
1448	switch (func_id) {
1449	case BPF_FUNC_map_lookup_elem:
1450		return &bpf_map_lookup_elem_proto;
1451	case BPF_FUNC_map_update_elem:
1452		return &bpf_map_update_elem_proto;
1453	case BPF_FUNC_map_delete_elem:
1454		return &bpf_map_delete_elem_proto;
1455	case BPF_FUNC_map_push_elem:
1456		return &bpf_map_push_elem_proto;
1457	case BPF_FUNC_map_pop_elem:
1458		return &bpf_map_pop_elem_proto;
1459	case BPF_FUNC_map_peek_elem:
1460		return &bpf_map_peek_elem_proto;
1461	case BPF_FUNC_map_lookup_percpu_elem:
1462		return &bpf_map_lookup_percpu_elem_proto;
1463	case BPF_FUNC_ktime_get_ns:
1464		return &bpf_ktime_get_ns_proto;
1465	case BPF_FUNC_ktime_get_boot_ns:
1466		return &bpf_ktime_get_boot_ns_proto;
1467	case BPF_FUNC_tail_call:
1468		return &bpf_tail_call_proto;
 
 
1469	case BPF_FUNC_get_current_task:
1470		return &bpf_get_current_task_proto;
1471	case BPF_FUNC_get_current_task_btf:
1472		return &bpf_get_current_task_btf_proto;
1473	case BPF_FUNC_task_pt_regs:
1474		return &bpf_task_pt_regs_proto;
1475	case BPF_FUNC_get_current_uid_gid:
1476		return &bpf_get_current_uid_gid_proto;
1477	case BPF_FUNC_get_current_comm:
1478		return &bpf_get_current_comm_proto;
1479	case BPF_FUNC_trace_printk:
1480		return bpf_get_trace_printk_proto();
1481	case BPF_FUNC_get_smp_processor_id:
1482		return &bpf_get_smp_processor_id_proto;
1483	case BPF_FUNC_get_numa_node_id:
1484		return &bpf_get_numa_node_id_proto;
1485	case BPF_FUNC_perf_event_read:
1486		return &bpf_perf_event_read_proto;
 
 
 
 
1487	case BPF_FUNC_get_prandom_u32:
1488		return &bpf_get_prandom_u32_proto;
1489	case BPF_FUNC_probe_write_user:
1490		return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1491		       NULL : bpf_get_probe_write_proto();
1492	case BPF_FUNC_probe_read_user:
1493		return &bpf_probe_read_user_proto;
1494	case BPF_FUNC_probe_read_kernel:
1495		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1496		       NULL : &bpf_probe_read_kernel_proto;
1497	case BPF_FUNC_probe_read_user_str:
1498		return &bpf_probe_read_user_str_proto;
1499	case BPF_FUNC_probe_read_kernel_str:
1500		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1501		       NULL : &bpf_probe_read_kernel_str_proto;
1502#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1503	case BPF_FUNC_probe_read:
1504		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1505		       NULL : &bpf_probe_read_compat_proto;
1506	case BPF_FUNC_probe_read_str:
1507		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1508		       NULL : &bpf_probe_read_compat_str_proto;
1509#endif
1510#ifdef CONFIG_CGROUPS
1511	case BPF_FUNC_cgrp_storage_get:
1512		return &bpf_cgrp_storage_get_proto;
1513	case BPF_FUNC_cgrp_storage_delete:
1514		return &bpf_cgrp_storage_delete_proto;
1515	case BPF_FUNC_current_task_under_cgroup:
1516		return &bpf_current_task_under_cgroup_proto;
1517#endif
1518	case BPF_FUNC_send_signal:
1519		return &bpf_send_signal_proto;
1520	case BPF_FUNC_send_signal_thread:
1521		return &bpf_send_signal_thread_proto;
1522	case BPF_FUNC_perf_event_read_value:
1523		return &bpf_perf_event_read_value_proto;
1524	case BPF_FUNC_ringbuf_output:
1525		return &bpf_ringbuf_output_proto;
1526	case BPF_FUNC_ringbuf_reserve:
1527		return &bpf_ringbuf_reserve_proto;
1528	case BPF_FUNC_ringbuf_submit:
1529		return &bpf_ringbuf_submit_proto;
1530	case BPF_FUNC_ringbuf_discard:
1531		return &bpf_ringbuf_discard_proto;
1532	case BPF_FUNC_ringbuf_query:
1533		return &bpf_ringbuf_query_proto;
1534	case BPF_FUNC_jiffies64:
1535		return &bpf_jiffies64_proto;
1536	case BPF_FUNC_get_task_stack:
1537		return prog->sleepable ? &bpf_get_task_stack_sleepable_proto
1538				       : &bpf_get_task_stack_proto;
1539	case BPF_FUNC_copy_from_user:
1540		return &bpf_copy_from_user_proto;
1541	case BPF_FUNC_copy_from_user_task:
1542		return &bpf_copy_from_user_task_proto;
1543	case BPF_FUNC_snprintf_btf:
1544		return &bpf_snprintf_btf_proto;
1545	case BPF_FUNC_per_cpu_ptr:
1546		return &bpf_per_cpu_ptr_proto;
1547	case BPF_FUNC_this_cpu_ptr:
1548		return &bpf_this_cpu_ptr_proto;
1549	case BPF_FUNC_task_storage_get:
1550		if (bpf_prog_check_recur(prog))
1551			return &bpf_task_storage_get_recur_proto;
1552		return &bpf_task_storage_get_proto;
1553	case BPF_FUNC_task_storage_delete:
1554		if (bpf_prog_check_recur(prog))
1555			return &bpf_task_storage_delete_recur_proto;
1556		return &bpf_task_storage_delete_proto;
1557	case BPF_FUNC_for_each_map_elem:
1558		return &bpf_for_each_map_elem_proto;
1559	case BPF_FUNC_snprintf:
1560		return &bpf_snprintf_proto;
1561	case BPF_FUNC_get_func_ip:
1562		return &bpf_get_func_ip_proto_tracing;
1563	case BPF_FUNC_get_branch_snapshot:
1564		return &bpf_get_branch_snapshot_proto;
1565	case BPF_FUNC_find_vma:
1566		return &bpf_find_vma_proto;
1567	case BPF_FUNC_trace_vprintk:
1568		return bpf_get_trace_vprintk_proto();
1569	default:
1570		return bpf_base_func_proto(func_id, prog);
1571	}
1572}
1573
1574static bool is_kprobe_multi(const struct bpf_prog *prog)
1575{
1576	return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ||
1577	       prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION;
1578}
1579
1580static inline bool is_kprobe_session(const struct bpf_prog *prog)
1581{
1582	return prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION;
1583}
1584
1585static inline bool is_uprobe_multi(const struct bpf_prog *prog)
1586{
1587	return prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI ||
1588	       prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION;
1589}
1590
1591static inline bool is_uprobe_session(const struct bpf_prog *prog)
1592{
1593	return prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION;
1594}
1595
1596static const struct bpf_func_proto *
1597kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1598{
1599	switch (func_id) {
1600	case BPF_FUNC_perf_event_output:
1601		return &bpf_perf_event_output_proto;
1602	case BPF_FUNC_get_stackid:
1603		return &bpf_get_stackid_proto;
1604	case BPF_FUNC_get_stack:
1605		return prog->sleepable ? &bpf_get_stack_sleepable_proto : &bpf_get_stack_proto;
 
 
1606#ifdef CONFIG_BPF_KPROBE_OVERRIDE
1607	case BPF_FUNC_override_return:
1608		return &bpf_override_return_proto;
1609#endif
1610	case BPF_FUNC_get_func_ip:
1611		if (is_kprobe_multi(prog))
1612			return &bpf_get_func_ip_proto_kprobe_multi;
1613		if (is_uprobe_multi(prog))
1614			return &bpf_get_func_ip_proto_uprobe_multi;
1615		return &bpf_get_func_ip_proto_kprobe;
1616	case BPF_FUNC_get_attach_cookie:
1617		if (is_kprobe_multi(prog))
1618			return &bpf_get_attach_cookie_proto_kmulti;
1619		if (is_uprobe_multi(prog))
1620			return &bpf_get_attach_cookie_proto_umulti;
1621		return &bpf_get_attach_cookie_proto_trace;
1622	default:
1623		return bpf_tracing_func_proto(func_id, prog);
1624	}
1625}
1626
1627/* bpf+kprobe programs can access fields of 'struct pt_regs' */
1628static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1629					const struct bpf_prog *prog,
1630					struct bpf_insn_access_aux *info)
1631{
1632	if (off < 0 || off >= sizeof(struct pt_regs))
1633		return false;
1634	if (type != BPF_READ)
1635		return false;
1636	if (off % size != 0)
1637		return false;
1638	/*
1639	 * Assertion for 32 bit to make sure last 8 byte access
1640	 * (BPF_DW) to the last 4 byte member is disallowed.
1641	 */
1642	if (off + size > sizeof(struct pt_regs))
1643		return false;
1644
1645	return true;
1646}
1647
1648const struct bpf_verifier_ops kprobe_verifier_ops = {
1649	.get_func_proto  = kprobe_prog_func_proto,
1650	.is_valid_access = kprobe_prog_is_valid_access,
1651};
1652
1653const struct bpf_prog_ops kprobe_prog_ops = {
1654};
1655
1656BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1657	   u64, flags, void *, data, u64, size)
1658{
1659	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1660
1661	/*
1662	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1663	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1664	 * from there and call the same bpf_perf_event_output() helper inline.
1665	 */
1666	return ____bpf_perf_event_output(regs, map, flags, data, size);
1667}
1668
1669static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1670	.func		= bpf_perf_event_output_tp,
1671	.gpl_only	= true,
1672	.ret_type	= RET_INTEGER,
1673	.arg1_type	= ARG_PTR_TO_CTX,
1674	.arg2_type	= ARG_CONST_MAP_PTR,
1675	.arg3_type	= ARG_ANYTHING,
1676	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1677	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1678};
1679
1680BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1681	   u64, flags)
1682{
1683	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1684
1685	/*
1686	 * Same comment as in bpf_perf_event_output_tp(), only that this time
1687	 * the other helper's function body cannot be inlined due to being
1688	 * external, thus we need to call raw helper function.
1689	 */
1690	return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1691			       flags, 0, 0);
1692}
1693
1694static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1695	.func		= bpf_get_stackid_tp,
1696	.gpl_only	= true,
1697	.ret_type	= RET_INTEGER,
1698	.arg1_type	= ARG_PTR_TO_CTX,
1699	.arg2_type	= ARG_CONST_MAP_PTR,
1700	.arg3_type	= ARG_ANYTHING,
1701};
1702
1703BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1704	   u64, flags)
1705{
1706	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1707
1708	return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1709			     (unsigned long) size, flags, 0);
1710}
1711
1712static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1713	.func		= bpf_get_stack_tp,
1714	.gpl_only	= true,
1715	.ret_type	= RET_INTEGER,
1716	.arg1_type	= ARG_PTR_TO_CTX,
1717	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
1718	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1719	.arg4_type	= ARG_ANYTHING,
1720};
1721
1722static const struct bpf_func_proto *
1723tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1724{
1725	switch (func_id) {
1726	case BPF_FUNC_perf_event_output:
1727		return &bpf_perf_event_output_proto_tp;
1728	case BPF_FUNC_get_stackid:
1729		return &bpf_get_stackid_proto_tp;
1730	case BPF_FUNC_get_stack:
1731		return &bpf_get_stack_proto_tp;
1732	case BPF_FUNC_get_attach_cookie:
1733		return &bpf_get_attach_cookie_proto_trace;
1734	default:
1735		return bpf_tracing_func_proto(func_id, prog);
1736	}
1737}
1738
1739static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1740				    const struct bpf_prog *prog,
1741				    struct bpf_insn_access_aux *info)
1742{
1743	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1744		return false;
1745	if (type != BPF_READ)
1746		return false;
1747	if (off % size != 0)
1748		return false;
1749
1750	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1751	return true;
1752}
1753
1754const struct bpf_verifier_ops tracepoint_verifier_ops = {
1755	.get_func_proto  = tp_prog_func_proto,
1756	.is_valid_access = tp_prog_is_valid_access,
1757};
1758
1759const struct bpf_prog_ops tracepoint_prog_ops = {
1760};
1761
1762BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1763	   struct bpf_perf_event_value *, buf, u32, size)
1764{
1765	int err = -EINVAL;
1766
1767	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1768		goto clear;
1769	err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1770				    &buf->running);
1771	if (unlikely(err))
1772		goto clear;
1773	return 0;
1774clear:
1775	memset(buf, 0, size);
1776	return err;
1777}
1778
1779static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1780         .func           = bpf_perf_prog_read_value,
1781         .gpl_only       = true,
1782         .ret_type       = RET_INTEGER,
1783         .arg1_type      = ARG_PTR_TO_CTX,
1784         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1785         .arg3_type      = ARG_CONST_SIZE,
1786};
1787
1788BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1789	   void *, buf, u32, size, u64, flags)
1790{
1791	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1792	struct perf_branch_stack *br_stack = ctx->data->br_stack;
1793	u32 to_copy;
1794
1795	if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1796		return -EINVAL;
1797
1798	if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK)))
1799		return -ENOENT;
1800
1801	if (unlikely(!br_stack))
1802		return -ENOENT;
1803
1804	if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1805		return br_stack->nr * br_entry_size;
1806
1807	if (!buf || (size % br_entry_size != 0))
1808		return -EINVAL;
1809
1810	to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1811	memcpy(buf, br_stack->entries, to_copy);
1812
1813	return to_copy;
1814}
1815
1816static const struct bpf_func_proto bpf_read_branch_records_proto = {
1817	.func           = bpf_read_branch_records,
1818	.gpl_only       = true,
1819	.ret_type       = RET_INTEGER,
1820	.arg1_type      = ARG_PTR_TO_CTX,
1821	.arg2_type      = ARG_PTR_TO_MEM_OR_NULL,
1822	.arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1823	.arg4_type      = ARG_ANYTHING,
1824};
1825
1826static const struct bpf_func_proto *
1827pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1828{
1829	switch (func_id) {
1830	case BPF_FUNC_perf_event_output:
1831		return &bpf_perf_event_output_proto_tp;
1832	case BPF_FUNC_get_stackid:
1833		return &bpf_get_stackid_proto_pe;
1834	case BPF_FUNC_get_stack:
1835		return &bpf_get_stack_proto_pe;
1836	case BPF_FUNC_perf_prog_read_value:
1837		return &bpf_perf_prog_read_value_proto;
1838	case BPF_FUNC_read_branch_records:
1839		return &bpf_read_branch_records_proto;
1840	case BPF_FUNC_get_attach_cookie:
1841		return &bpf_get_attach_cookie_proto_pe;
1842	default:
1843		return bpf_tracing_func_proto(func_id, prog);
1844	}
1845}
1846
1847/*
1848 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1849 * to avoid potential recursive reuse issue when/if tracepoints are added
1850 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1851 *
1852 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1853 * in normal, irq, and nmi context.
1854 */
1855struct bpf_raw_tp_regs {
1856	struct pt_regs regs[3];
1857};
1858static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1859static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1860static struct pt_regs *get_bpf_raw_tp_regs(void)
1861{
1862	struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1863	int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1864
1865	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1866		this_cpu_dec(bpf_raw_tp_nest_level);
1867		return ERR_PTR(-EBUSY);
1868	}
1869
1870	return &tp_regs->regs[nest_level - 1];
1871}
1872
1873static void put_bpf_raw_tp_regs(void)
1874{
1875	this_cpu_dec(bpf_raw_tp_nest_level);
1876}
1877
1878BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1879	   struct bpf_map *, map, u64, flags, void *, data, u64, size)
1880{
1881	struct pt_regs *regs = get_bpf_raw_tp_regs();
1882	int ret;
1883
1884	if (IS_ERR(regs))
1885		return PTR_ERR(regs);
1886
1887	perf_fetch_caller_regs(regs);
1888	ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1889
1890	put_bpf_raw_tp_regs();
1891	return ret;
1892}
1893
1894static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1895	.func		= bpf_perf_event_output_raw_tp,
1896	.gpl_only	= true,
1897	.ret_type	= RET_INTEGER,
1898	.arg1_type	= ARG_PTR_TO_CTX,
1899	.arg2_type	= ARG_CONST_MAP_PTR,
1900	.arg3_type	= ARG_ANYTHING,
1901	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1902	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1903};
1904
1905extern const struct bpf_func_proto bpf_skb_output_proto;
1906extern const struct bpf_func_proto bpf_xdp_output_proto;
1907extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
1908
1909BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1910	   struct bpf_map *, map, u64, flags)
1911{
1912	struct pt_regs *regs = get_bpf_raw_tp_regs();
1913	int ret;
1914
1915	if (IS_ERR(regs))
1916		return PTR_ERR(regs);
1917
1918	perf_fetch_caller_regs(regs);
1919	/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1920	ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1921			      flags, 0, 0);
1922	put_bpf_raw_tp_regs();
1923	return ret;
1924}
1925
1926static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1927	.func		= bpf_get_stackid_raw_tp,
1928	.gpl_only	= true,
1929	.ret_type	= RET_INTEGER,
1930	.arg1_type	= ARG_PTR_TO_CTX,
1931	.arg2_type	= ARG_CONST_MAP_PTR,
1932	.arg3_type	= ARG_ANYTHING,
1933};
1934
1935BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1936	   void *, buf, u32, size, u64, flags)
1937{
1938	struct pt_regs *regs = get_bpf_raw_tp_regs();
1939	int ret;
1940
1941	if (IS_ERR(regs))
1942		return PTR_ERR(regs);
1943
1944	perf_fetch_caller_regs(regs);
1945	ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1946			    (unsigned long) size, flags, 0);
1947	put_bpf_raw_tp_regs();
1948	return ret;
1949}
1950
1951static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1952	.func		= bpf_get_stack_raw_tp,
1953	.gpl_only	= true,
1954	.ret_type	= RET_INTEGER,
1955	.arg1_type	= ARG_PTR_TO_CTX,
1956	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1957	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1958	.arg4_type	= ARG_ANYTHING,
1959};
1960
1961static const struct bpf_func_proto *
1962raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1963{
1964	switch (func_id) {
1965	case BPF_FUNC_perf_event_output:
1966		return &bpf_perf_event_output_proto_raw_tp;
1967	case BPF_FUNC_get_stackid:
1968		return &bpf_get_stackid_proto_raw_tp;
1969	case BPF_FUNC_get_stack:
1970		return &bpf_get_stack_proto_raw_tp;
1971	case BPF_FUNC_get_attach_cookie:
1972		return &bpf_get_attach_cookie_proto_tracing;
1973	default:
1974		return bpf_tracing_func_proto(func_id, prog);
1975	}
1976}
1977
1978const struct bpf_func_proto *
1979tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1980{
1981	const struct bpf_func_proto *fn;
1982
1983	switch (func_id) {
1984#ifdef CONFIG_NET
1985	case BPF_FUNC_skb_output:
1986		return &bpf_skb_output_proto;
1987	case BPF_FUNC_xdp_output:
1988		return &bpf_xdp_output_proto;
1989	case BPF_FUNC_skc_to_tcp6_sock:
1990		return &bpf_skc_to_tcp6_sock_proto;
1991	case BPF_FUNC_skc_to_tcp_sock:
1992		return &bpf_skc_to_tcp_sock_proto;
1993	case BPF_FUNC_skc_to_tcp_timewait_sock:
1994		return &bpf_skc_to_tcp_timewait_sock_proto;
1995	case BPF_FUNC_skc_to_tcp_request_sock:
1996		return &bpf_skc_to_tcp_request_sock_proto;
1997	case BPF_FUNC_skc_to_udp6_sock:
1998		return &bpf_skc_to_udp6_sock_proto;
1999	case BPF_FUNC_skc_to_unix_sock:
2000		return &bpf_skc_to_unix_sock_proto;
2001	case BPF_FUNC_skc_to_mptcp_sock:
2002		return &bpf_skc_to_mptcp_sock_proto;
2003	case BPF_FUNC_sk_storage_get:
2004		return &bpf_sk_storage_get_tracing_proto;
2005	case BPF_FUNC_sk_storage_delete:
2006		return &bpf_sk_storage_delete_tracing_proto;
2007	case BPF_FUNC_sock_from_file:
2008		return &bpf_sock_from_file_proto;
2009	case BPF_FUNC_get_socket_cookie:
2010		return &bpf_get_socket_ptr_cookie_proto;
2011	case BPF_FUNC_xdp_get_buff_len:
2012		return &bpf_xdp_get_buff_len_trace_proto;
2013#endif
2014	case BPF_FUNC_seq_printf:
2015		return prog->expected_attach_type == BPF_TRACE_ITER ?
2016		       &bpf_seq_printf_proto :
2017		       NULL;
2018	case BPF_FUNC_seq_write:
2019		return prog->expected_attach_type == BPF_TRACE_ITER ?
2020		       &bpf_seq_write_proto :
2021		       NULL;
2022	case BPF_FUNC_seq_printf_btf:
2023		return prog->expected_attach_type == BPF_TRACE_ITER ?
2024		       &bpf_seq_printf_btf_proto :
2025		       NULL;
2026	case BPF_FUNC_d_path:
2027		return &bpf_d_path_proto;
2028	case BPF_FUNC_get_func_arg:
2029		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
2030	case BPF_FUNC_get_func_ret:
2031		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
2032	case BPF_FUNC_get_func_arg_cnt:
2033		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
2034	case BPF_FUNC_get_attach_cookie:
2035		if (prog->type == BPF_PROG_TYPE_TRACING &&
2036		    prog->expected_attach_type == BPF_TRACE_RAW_TP)
2037			return &bpf_get_attach_cookie_proto_tracing;
2038		return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
2039	default:
2040		fn = raw_tp_prog_func_proto(func_id, prog);
2041		if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
2042			fn = bpf_iter_get_func_proto(func_id, prog);
2043		return fn;
2044	}
2045}
2046
2047static bool raw_tp_prog_is_valid_access(int off, int size,
2048					enum bpf_access_type type,
2049					const struct bpf_prog *prog,
2050					struct bpf_insn_access_aux *info)
2051{
2052	return bpf_tracing_ctx_access(off, size, type);
2053}
2054
2055static bool tracing_prog_is_valid_access(int off, int size,
2056					 enum bpf_access_type type,
2057					 const struct bpf_prog *prog,
2058					 struct bpf_insn_access_aux *info)
2059{
2060	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
2061}
2062
2063int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
2064				     const union bpf_attr *kattr,
2065				     union bpf_attr __user *uattr)
2066{
2067	return -ENOTSUPP;
2068}
2069
2070const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
2071	.get_func_proto  = raw_tp_prog_func_proto,
2072	.is_valid_access = raw_tp_prog_is_valid_access,
2073};
2074
2075const struct bpf_prog_ops raw_tracepoint_prog_ops = {
2076#ifdef CONFIG_NET
2077	.test_run = bpf_prog_test_run_raw_tp,
2078#endif
2079};
2080
2081const struct bpf_verifier_ops tracing_verifier_ops = {
2082	.get_func_proto  = tracing_prog_func_proto,
2083	.is_valid_access = tracing_prog_is_valid_access,
2084};
2085
2086const struct bpf_prog_ops tracing_prog_ops = {
2087	.test_run = bpf_prog_test_run_tracing,
2088};
2089
2090static bool raw_tp_writable_prog_is_valid_access(int off, int size,
2091						 enum bpf_access_type type,
2092						 const struct bpf_prog *prog,
2093						 struct bpf_insn_access_aux *info)
2094{
2095	if (off == 0) {
2096		if (size != sizeof(u64) || type != BPF_READ)
2097			return false;
2098		info->reg_type = PTR_TO_TP_BUFFER;
2099	}
2100	return raw_tp_prog_is_valid_access(off, size, type, prog, info);
2101}
2102
2103const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
2104	.get_func_proto  = raw_tp_prog_func_proto,
2105	.is_valid_access = raw_tp_writable_prog_is_valid_access,
2106};
2107
2108const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
2109};
2110
2111static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
2112				    const struct bpf_prog *prog,
2113				    struct bpf_insn_access_aux *info)
2114{
2115	const int size_u64 = sizeof(u64);
2116
2117	if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
2118		return false;
2119	if (type != BPF_READ)
2120		return false;
2121	if (off % size != 0) {
2122		if (sizeof(unsigned long) != 4)
2123			return false;
2124		if (size != 8)
2125			return false;
2126		if (off % size != 4)
2127			return false;
2128	}
2129
2130	switch (off) {
2131	case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
2132		bpf_ctx_record_field_size(info, size_u64);
2133		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2134			return false;
2135		break;
2136	case bpf_ctx_range(struct bpf_perf_event_data, addr):
2137		bpf_ctx_record_field_size(info, size_u64);
2138		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2139			return false;
2140		break;
2141	default:
2142		if (size != sizeof(long))
2143			return false;
2144	}
2145
2146	return true;
2147}
2148
2149static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
2150				      const struct bpf_insn *si,
2151				      struct bpf_insn *insn_buf,
2152				      struct bpf_prog *prog, u32 *target_size)
2153{
2154	struct bpf_insn *insn = insn_buf;
2155
2156	switch (si->off) {
2157	case offsetof(struct bpf_perf_event_data, sample_period):
2158		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2159						       data), si->dst_reg, si->src_reg,
2160				      offsetof(struct bpf_perf_event_data_kern, data));
2161		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2162				      bpf_target_off(struct perf_sample_data, period, 8,
2163						     target_size));
2164		break;
2165	case offsetof(struct bpf_perf_event_data, addr):
2166		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2167						       data), si->dst_reg, si->src_reg,
2168				      offsetof(struct bpf_perf_event_data_kern, data));
2169		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2170				      bpf_target_off(struct perf_sample_data, addr, 8,
2171						     target_size));
2172		break;
2173	default:
2174		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2175						       regs), si->dst_reg, si->src_reg,
2176				      offsetof(struct bpf_perf_event_data_kern, regs));
2177		*insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
2178				      si->off);
2179		break;
2180	}
2181
2182	return insn - insn_buf;
2183}
2184
2185const struct bpf_verifier_ops perf_event_verifier_ops = {
2186	.get_func_proto		= pe_prog_func_proto,
2187	.is_valid_access	= pe_prog_is_valid_access,
2188	.convert_ctx_access	= pe_prog_convert_ctx_access,
2189};
2190
2191const struct bpf_prog_ops perf_event_prog_ops = {
2192};
2193
2194static DEFINE_MUTEX(bpf_event_mutex);
2195
2196#define BPF_TRACE_MAX_PROGS 64
2197
2198int perf_event_attach_bpf_prog(struct perf_event *event,
2199			       struct bpf_prog *prog,
2200			       u64 bpf_cookie)
2201{
2202	struct bpf_prog_array *old_array;
2203	struct bpf_prog_array *new_array;
2204	int ret = -EEXIST;
2205
2206	/*
2207	 * Kprobe override only works if they are on the function entry,
2208	 * and only if they are on the opt-in list.
2209	 */
2210	if (prog->kprobe_override &&
2211	    (!trace_kprobe_on_func_entry(event->tp_event) ||
2212	     !trace_kprobe_error_injectable(event->tp_event)))
2213		return -EINVAL;
2214
2215	mutex_lock(&bpf_event_mutex);
2216
2217	if (event->prog)
2218		goto unlock;
2219
2220	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2221	if (old_array &&
2222	    bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
2223		ret = -E2BIG;
2224		goto unlock;
2225	}
2226
2227	ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
2228	if (ret < 0)
2229		goto unlock;
2230
2231	/* set the new array to event->tp_event and set event->prog */
2232	event->prog = prog;
2233	event->bpf_cookie = bpf_cookie;
2234	rcu_assign_pointer(event->tp_event->prog_array, new_array);
2235	bpf_prog_array_free_sleepable(old_array);
2236
2237unlock:
2238	mutex_unlock(&bpf_event_mutex);
2239	return ret;
2240}
2241
2242void perf_event_detach_bpf_prog(struct perf_event *event)
2243{
2244	struct bpf_prog_array *old_array;
2245	struct bpf_prog_array *new_array;
2246	int ret;
2247
2248	mutex_lock(&bpf_event_mutex);
2249
2250	if (!event->prog)
2251		goto unlock;
2252
2253	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2254	if (!old_array)
2255		goto put;
2256
2257	ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
2258	if (ret < 0) {
2259		bpf_prog_array_delete_safe(old_array, event->prog);
2260	} else {
2261		rcu_assign_pointer(event->tp_event->prog_array, new_array);
2262		bpf_prog_array_free_sleepable(old_array);
2263	}
2264
2265put:
2266	/*
2267	 * It could be that the bpf_prog is not sleepable (and will be freed
2268	 * via normal RCU), but is called from a point that supports sleepable
2269	 * programs and uses tasks-trace-RCU.
2270	 */
2271	synchronize_rcu_tasks_trace();
2272
2273	bpf_prog_put(event->prog);
2274	event->prog = NULL;
2275
2276unlock:
2277	mutex_unlock(&bpf_event_mutex);
2278}
2279
2280int perf_event_query_prog_array(struct perf_event *event, void __user *info)
2281{
2282	struct perf_event_query_bpf __user *uquery = info;
2283	struct perf_event_query_bpf query = {};
2284	struct bpf_prog_array *progs;
2285	u32 *ids, prog_cnt, ids_len;
2286	int ret;
2287
2288	if (!perfmon_capable())
2289		return -EPERM;
2290	if (event->attr.type != PERF_TYPE_TRACEPOINT)
2291		return -EINVAL;
2292	if (copy_from_user(&query, uquery, sizeof(query)))
2293		return -EFAULT;
2294
2295	ids_len = query.ids_len;
2296	if (ids_len > BPF_TRACE_MAX_PROGS)
2297		return -E2BIG;
2298	ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2299	if (!ids)
2300		return -ENOMEM;
2301	/*
2302	 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2303	 * is required when user only wants to check for uquery->prog_cnt.
2304	 * There is no need to check for it since the case is handled
2305	 * gracefully in bpf_prog_array_copy_info.
2306	 */
2307
2308	mutex_lock(&bpf_event_mutex);
2309	progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2310	ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
2311	mutex_unlock(&bpf_event_mutex);
2312
2313	if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2314	    copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2315		ret = -EFAULT;
2316
2317	kfree(ids);
2318	return ret;
2319}
2320
2321extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2322extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2323
2324struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
2325{
2326	struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2327
2328	for (; btp < __stop__bpf_raw_tp; btp++) {
2329		if (!strcmp(btp->tp->name, name))
2330			return btp;
2331	}
2332
2333	return bpf_get_raw_tracepoint_module(name);
2334}
2335
2336void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2337{
2338	struct module *mod;
2339
2340	preempt_disable();
2341	mod = __module_address((unsigned long)btp);
2342	module_put(mod);
2343	preempt_enable();
2344}
2345
2346static __always_inline
2347void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
2348{
2349	struct bpf_prog *prog = link->link.prog;
2350	struct bpf_run_ctx *old_run_ctx;
2351	struct bpf_trace_run_ctx run_ctx;
2352
2353	cant_sleep();
2354	if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
2355		bpf_prog_inc_misses_counter(prog);
2356		goto out;
2357	}
2358
2359	run_ctx.bpf_cookie = link->cookie;
2360	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2361
2362	rcu_read_lock();
2363	(void) bpf_prog_run(prog, args);
 
 
2364	rcu_read_unlock();
2365
2366	bpf_reset_run_ctx(old_run_ctx);
2367out:
2368	this_cpu_dec(*(prog->active));
2369}
2370
2371#define UNPACK(...)			__VA_ARGS__
2372#define REPEAT_1(FN, DL, X, ...)	FN(X)
2373#define REPEAT_2(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2374#define REPEAT_3(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2375#define REPEAT_4(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2376#define REPEAT_5(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2377#define REPEAT_6(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2378#define REPEAT_7(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2379#define REPEAT_8(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2380#define REPEAT_9(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2381#define REPEAT_10(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2382#define REPEAT_11(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2383#define REPEAT_12(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2384#define REPEAT(X, FN, DL, ...)		REPEAT_##X(FN, DL, __VA_ARGS__)
2385
2386#define SARG(X)		u64 arg##X
2387#define COPY(X)		args[X] = arg##X
2388
2389#define __DL_COM	(,)
2390#define __DL_SEM	(;)
2391
2392#define __SEQ_0_11	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2393
2394#define BPF_TRACE_DEFN_x(x)						\
2395	void bpf_trace_run##x(struct bpf_raw_tp_link *link,		\
2396			      REPEAT(x, SARG, __DL_COM, __SEQ_0_11))	\
2397	{								\
2398		u64 args[x];						\
2399		REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);			\
2400		__bpf_trace_run(link, args);				\
2401	}								\
2402	EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2403BPF_TRACE_DEFN_x(1);
2404BPF_TRACE_DEFN_x(2);
2405BPF_TRACE_DEFN_x(3);
2406BPF_TRACE_DEFN_x(4);
2407BPF_TRACE_DEFN_x(5);
2408BPF_TRACE_DEFN_x(6);
2409BPF_TRACE_DEFN_x(7);
2410BPF_TRACE_DEFN_x(8);
2411BPF_TRACE_DEFN_x(9);
2412BPF_TRACE_DEFN_x(10);
2413BPF_TRACE_DEFN_x(11);
2414BPF_TRACE_DEFN_x(12);
2415
2416int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
2417{
2418	struct tracepoint *tp = btp->tp;
2419	struct bpf_prog *prog = link->link.prog;
2420
2421	/*
2422	 * check that program doesn't access arguments beyond what's
2423	 * available in this tracepoint
2424	 */
2425	if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2426		return -EINVAL;
2427
2428	if (prog->aux->max_tp_access > btp->writable_size)
2429		return -EINVAL;
2430
2431	return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, link);
 
 
 
 
 
2432}
2433
2434int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
2435{
2436	return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, link);
2437}
2438
2439int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2440			    u32 *fd_type, const char **buf,
2441			    u64 *probe_offset, u64 *probe_addr,
2442			    unsigned long *missed)
2443{
2444	bool is_tracepoint, is_syscall_tp;
2445	struct bpf_prog *prog;
2446	int flags, err = 0;
2447
2448	prog = event->prog;
2449	if (!prog)
2450		return -ENOENT;
2451
2452	/* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2453	if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2454		return -EOPNOTSUPP;
2455
2456	*prog_id = prog->aux->id;
2457	flags = event->tp_event->flags;
2458	is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2459	is_syscall_tp = is_syscall_trace_event(event->tp_event);
2460
2461	if (is_tracepoint || is_syscall_tp) {
2462		*buf = is_tracepoint ? event->tp_event->tp->name
2463				     : event->tp_event->name;
2464		/* We allow NULL pointer for tracepoint */
2465		if (fd_type)
2466			*fd_type = BPF_FD_TYPE_TRACEPOINT;
2467		if (probe_offset)
2468			*probe_offset = 0x0;
2469		if (probe_addr)
2470			*probe_addr = 0x0;
2471	} else {
2472		/* kprobe/uprobe */
2473		err = -EOPNOTSUPP;
2474#ifdef CONFIG_KPROBE_EVENTS
2475		if (flags & TRACE_EVENT_FL_KPROBE)
2476			err = bpf_get_kprobe_info(event, fd_type, buf,
2477						  probe_offset, probe_addr, missed,
2478						  event->attr.type == PERF_TYPE_TRACEPOINT);
2479#endif
2480#ifdef CONFIG_UPROBE_EVENTS
2481		if (flags & TRACE_EVENT_FL_UPROBE)
2482			err = bpf_get_uprobe_info(event, fd_type, buf,
2483						  probe_offset, probe_addr,
2484						  event->attr.type == PERF_TYPE_TRACEPOINT);
2485#endif
2486	}
2487
2488	return err;
2489}
2490
2491static int __init send_signal_irq_work_init(void)
2492{
2493	int cpu;
2494	struct send_signal_irq_work *work;
2495
2496	for_each_possible_cpu(cpu) {
2497		work = per_cpu_ptr(&send_signal_work, cpu);
2498		init_irq_work(&work->irq_work, do_bpf_send_signal);
2499	}
2500	return 0;
2501}
2502
2503subsys_initcall(send_signal_irq_work_init);
2504
2505#ifdef CONFIG_MODULES
2506static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2507			    void *module)
2508{
2509	struct bpf_trace_module *btm, *tmp;
2510	struct module *mod = module;
2511	int ret = 0;
2512
2513	if (mod->num_bpf_raw_events == 0 ||
2514	    (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2515		goto out;
2516
2517	mutex_lock(&bpf_module_mutex);
2518
2519	switch (op) {
2520	case MODULE_STATE_COMING:
2521		btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2522		if (btm) {
2523			btm->module = module;
2524			list_add(&btm->list, &bpf_trace_modules);
2525		} else {
2526			ret = -ENOMEM;
2527		}
2528		break;
2529	case MODULE_STATE_GOING:
2530		list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2531			if (btm->module == module) {
2532				list_del(&btm->list);
2533				kfree(btm);
2534				break;
2535			}
2536		}
2537		break;
2538	}
2539
2540	mutex_unlock(&bpf_module_mutex);
2541
2542out:
2543	return notifier_from_errno(ret);
2544}
2545
2546static struct notifier_block bpf_module_nb = {
2547	.notifier_call = bpf_event_notify,
2548};
2549
2550static int __init bpf_event_init(void)
2551{
2552	register_module_notifier(&bpf_module_nb);
2553	return 0;
2554}
2555
2556fs_initcall(bpf_event_init);
2557#endif /* CONFIG_MODULES */
2558
2559struct bpf_session_run_ctx {
2560	struct bpf_run_ctx run_ctx;
2561	bool is_return;
2562	void *data;
2563};
2564
2565#ifdef CONFIG_FPROBE
2566struct bpf_kprobe_multi_link {
2567	struct bpf_link link;
2568	struct fprobe fp;
2569	unsigned long *addrs;
2570	u64 *cookies;
2571	u32 cnt;
2572	u32 mods_cnt;
2573	struct module **mods;
2574	u32 flags;
2575};
2576
2577struct bpf_kprobe_multi_run_ctx {
2578	struct bpf_session_run_ctx session_ctx;
2579	struct bpf_kprobe_multi_link *link;
2580	unsigned long entry_ip;
2581};
2582
2583struct user_syms {
2584	const char **syms;
2585	char *buf;
2586};
2587
2588static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
2589{
2590	unsigned long __user usymbol;
2591	const char **syms = NULL;
2592	char *buf = NULL, *p;
2593	int err = -ENOMEM;
2594	unsigned int i;
2595
2596	syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
2597	if (!syms)
2598		goto error;
2599
2600	buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
2601	if (!buf)
2602		goto error;
2603
2604	for (p = buf, i = 0; i < cnt; i++) {
2605		if (__get_user(usymbol, usyms + i)) {
2606			err = -EFAULT;
2607			goto error;
2608		}
2609		err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN);
2610		if (err == KSYM_NAME_LEN)
2611			err = -E2BIG;
2612		if (err < 0)
2613			goto error;
2614		syms[i] = p;
2615		p += err + 1;
2616	}
2617
2618	us->syms = syms;
2619	us->buf = buf;
2620	return 0;
2621
2622error:
2623	if (err) {
2624		kvfree(syms);
2625		kvfree(buf);
2626	}
2627	return err;
2628}
2629
2630static void kprobe_multi_put_modules(struct module **mods, u32 cnt)
2631{
2632	u32 i;
2633
2634	for (i = 0; i < cnt; i++)
2635		module_put(mods[i]);
2636}
2637
2638static void free_user_syms(struct user_syms *us)
2639{
2640	kvfree(us->syms);
2641	kvfree(us->buf);
2642}
2643
2644static void bpf_kprobe_multi_link_release(struct bpf_link *link)
2645{
2646	struct bpf_kprobe_multi_link *kmulti_link;
2647
2648	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2649	unregister_fprobe(&kmulti_link->fp);
2650	kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt);
2651}
2652
2653static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
2654{
2655	struct bpf_kprobe_multi_link *kmulti_link;
2656
2657	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2658	kvfree(kmulti_link->addrs);
2659	kvfree(kmulti_link->cookies);
2660	kfree(kmulti_link->mods);
2661	kfree(kmulti_link);
2662}
2663
2664static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
2665						struct bpf_link_info *info)
2666{
2667	u64 __user *ucookies = u64_to_user_ptr(info->kprobe_multi.cookies);
2668	u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs);
2669	struct bpf_kprobe_multi_link *kmulti_link;
2670	u32 ucount = info->kprobe_multi.count;
2671	int err = 0, i;
2672
2673	if (!uaddrs ^ !ucount)
2674		return -EINVAL;
2675	if (ucookies && !ucount)
2676		return -EINVAL;
2677
2678	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2679	info->kprobe_multi.count = kmulti_link->cnt;
2680	info->kprobe_multi.flags = kmulti_link->flags;
2681	info->kprobe_multi.missed = kmulti_link->fp.nmissed;
2682
2683	if (!uaddrs)
2684		return 0;
2685	if (ucount < kmulti_link->cnt)
2686		err = -ENOSPC;
2687	else
2688		ucount = kmulti_link->cnt;
2689
2690	if (ucookies) {
2691		if (kmulti_link->cookies) {
2692			if (copy_to_user(ucookies, kmulti_link->cookies, ucount * sizeof(u64)))
2693				return -EFAULT;
2694		} else {
2695			for (i = 0; i < ucount; i++) {
2696				if (put_user(0, ucookies + i))
2697					return -EFAULT;
2698			}
2699		}
2700	}
2701
2702	if (kallsyms_show_value(current_cred())) {
2703		if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64)))
2704			return -EFAULT;
2705	} else {
2706		for (i = 0; i < ucount; i++) {
2707			if (put_user(0, uaddrs + i))
2708				return -EFAULT;
2709		}
2710	}
2711	return err;
2712}
2713
2714static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
2715	.release = bpf_kprobe_multi_link_release,
2716	.dealloc_deferred = bpf_kprobe_multi_link_dealloc,
2717	.fill_link_info = bpf_kprobe_multi_link_fill_link_info,
2718};
2719
2720static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
2721{
2722	const struct bpf_kprobe_multi_link *link = priv;
2723	unsigned long *addr_a = a, *addr_b = b;
2724	u64 *cookie_a, *cookie_b;
2725
2726	cookie_a = link->cookies + (addr_a - link->addrs);
2727	cookie_b = link->cookies + (addr_b - link->addrs);
2728
2729	/* swap addr_a/addr_b and cookie_a/cookie_b values */
2730	swap(*addr_a, *addr_b);
2731	swap(*cookie_a, *cookie_b);
2732}
2733
2734static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b)
2735{
2736	const unsigned long *addr_a = a, *addr_b = b;
2737
2738	if (*addr_a == *addr_b)
2739		return 0;
2740	return *addr_a < *addr_b ? -1 : 1;
2741}
2742
2743static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
2744{
2745	return bpf_kprobe_multi_addrs_cmp(a, b);
2746}
2747
2748static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2749{
2750	struct bpf_kprobe_multi_run_ctx *run_ctx;
2751	struct bpf_kprobe_multi_link *link;
2752	u64 *cookie, entry_ip;
2753	unsigned long *addr;
2754
2755	if (WARN_ON_ONCE(!ctx))
2756		return 0;
2757	run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx,
2758			       session_ctx.run_ctx);
2759	link = run_ctx->link;
2760	if (!link->cookies)
2761		return 0;
2762	entry_ip = run_ctx->entry_ip;
2763	addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
2764		       bpf_kprobe_multi_addrs_cmp);
2765	if (!addr)
2766		return 0;
2767	cookie = link->cookies + (addr - link->addrs);
2768	return *cookie;
2769}
2770
2771static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2772{
2773	struct bpf_kprobe_multi_run_ctx *run_ctx;
2774
2775	run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx,
2776			       session_ctx.run_ctx);
2777	return run_ctx->entry_ip;
2778}
2779
2780static int
2781kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
2782			   unsigned long entry_ip, struct pt_regs *regs,
2783			   bool is_return, void *data)
2784{
2785	struct bpf_kprobe_multi_run_ctx run_ctx = {
2786		.session_ctx = {
2787			.is_return = is_return,
2788			.data = data,
2789		},
2790		.link = link,
2791		.entry_ip = entry_ip,
2792	};
2793	struct bpf_run_ctx *old_run_ctx;
2794	int err;
2795
2796	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
2797		bpf_prog_inc_misses_counter(link->link.prog);
2798		err = 0;
2799		goto out;
2800	}
2801
2802	migrate_disable();
2803	rcu_read_lock();
2804	old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx);
2805	err = bpf_prog_run(link->link.prog, regs);
2806	bpf_reset_run_ctx(old_run_ctx);
2807	rcu_read_unlock();
2808	migrate_enable();
2809
2810 out:
2811	__this_cpu_dec(bpf_prog_active);
2812	return err;
2813}
2814
2815static int
2816kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
2817			  unsigned long ret_ip, struct pt_regs *regs,
2818			  void *data)
2819{
2820	struct bpf_kprobe_multi_link *link;
2821	int err;
2822
2823	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2824	err = kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs, false, data);
2825	return is_kprobe_session(link->link.prog) ? err : 0;
2826}
2827
2828static void
2829kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip,
2830			       unsigned long ret_ip, struct pt_regs *regs,
2831			       void *data)
2832{
2833	struct bpf_kprobe_multi_link *link;
2834
2835	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2836	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs, true, data);
2837}
2838
2839static int symbols_cmp_r(const void *a, const void *b, const void *priv)
2840{
2841	const char **str_a = (const char **) a;
2842	const char **str_b = (const char **) b;
2843
2844	return strcmp(*str_a, *str_b);
2845}
2846
2847struct multi_symbols_sort {
2848	const char **funcs;
2849	u64 *cookies;
2850};
2851
2852static void symbols_swap_r(void *a, void *b, int size, const void *priv)
2853{
2854	const struct multi_symbols_sort *data = priv;
2855	const char **name_a = a, **name_b = b;
2856
2857	swap(*name_a, *name_b);
2858
2859	/* If defined, swap also related cookies. */
2860	if (data->cookies) {
2861		u64 *cookie_a, *cookie_b;
2862
2863		cookie_a = data->cookies + (name_a - data->funcs);
2864		cookie_b = data->cookies + (name_b - data->funcs);
2865		swap(*cookie_a, *cookie_b);
2866	}
2867}
2868
2869struct modules_array {
2870	struct module **mods;
2871	int mods_cnt;
2872	int mods_cap;
2873};
2874
2875static int add_module(struct modules_array *arr, struct module *mod)
2876{
2877	struct module **mods;
2878
2879	if (arr->mods_cnt == arr->mods_cap) {
2880		arr->mods_cap = max(16, arr->mods_cap * 3 / 2);
2881		mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL);
2882		if (!mods)
2883			return -ENOMEM;
2884		arr->mods = mods;
2885	}
2886
2887	arr->mods[arr->mods_cnt] = mod;
2888	arr->mods_cnt++;
2889	return 0;
2890}
2891
2892static bool has_module(struct modules_array *arr, struct module *mod)
2893{
2894	int i;
2895
2896	for (i = arr->mods_cnt - 1; i >= 0; i--) {
2897		if (arr->mods[i] == mod)
2898			return true;
2899	}
2900	return false;
2901}
2902
2903static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
2904{
2905	struct modules_array arr = {};
2906	u32 i, err = 0;
2907
2908	for (i = 0; i < addrs_cnt; i++) {
2909		struct module *mod;
2910
2911		preempt_disable();
2912		mod = __module_address(addrs[i]);
2913		/* Either no module or we it's already stored  */
2914		if (!mod || has_module(&arr, mod)) {
2915			preempt_enable();
2916			continue;
2917		}
2918		if (!try_module_get(mod))
2919			err = -EINVAL;
2920		preempt_enable();
2921		if (err)
2922			break;
2923		err = add_module(&arr, mod);
2924		if (err) {
2925			module_put(mod);
2926			break;
2927		}
2928	}
2929
2930	/* We return either err < 0 in case of error, ... */
2931	if (err) {
2932		kprobe_multi_put_modules(arr.mods, arr.mods_cnt);
2933		kfree(arr.mods);
2934		return err;
2935	}
2936
2937	/* or number of modules found if everything is ok. */
2938	*mods = arr.mods;
2939	return arr.mods_cnt;
2940}
2941
2942static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt)
2943{
2944	u32 i;
2945
2946	for (i = 0; i < cnt; i++) {
2947		if (!within_error_injection_list(addrs[i]))
2948			return -EINVAL;
2949	}
2950	return 0;
2951}
2952
2953int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2954{
2955	struct bpf_kprobe_multi_link *link = NULL;
2956	struct bpf_link_primer link_primer;
2957	void __user *ucookies;
2958	unsigned long *addrs;
2959	u32 flags, cnt, size;
2960	void __user *uaddrs;
2961	u64 *cookies = NULL;
2962	void __user *usyms;
2963	int err;
2964
2965	/* no support for 32bit archs yet */
2966	if (sizeof(u64) != sizeof(void *))
2967		return -EOPNOTSUPP;
2968
2969	if (!is_kprobe_multi(prog))
2970		return -EINVAL;
2971
2972	flags = attr->link_create.kprobe_multi.flags;
2973	if (flags & ~BPF_F_KPROBE_MULTI_RETURN)
2974		return -EINVAL;
2975
2976	uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
2977	usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
2978	if (!!uaddrs == !!usyms)
2979		return -EINVAL;
2980
2981	cnt = attr->link_create.kprobe_multi.cnt;
2982	if (!cnt)
2983		return -EINVAL;
2984	if (cnt > MAX_KPROBE_MULTI_CNT)
2985		return -E2BIG;
2986
2987	size = cnt * sizeof(*addrs);
2988	addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2989	if (!addrs)
2990		return -ENOMEM;
2991
2992	ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
2993	if (ucookies) {
2994		cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2995		if (!cookies) {
2996			err = -ENOMEM;
2997			goto error;
2998		}
2999		if (copy_from_user(cookies, ucookies, size)) {
3000			err = -EFAULT;
3001			goto error;
3002		}
3003	}
3004
3005	if (uaddrs) {
3006		if (copy_from_user(addrs, uaddrs, size)) {
3007			err = -EFAULT;
3008			goto error;
3009		}
3010	} else {
3011		struct multi_symbols_sort data = {
3012			.cookies = cookies,
3013		};
3014		struct user_syms us;
3015
3016		err = copy_user_syms(&us, usyms, cnt);
3017		if (err)
3018			goto error;
3019
3020		if (cookies)
3021			data.funcs = us.syms;
3022
3023		sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
3024		       symbols_swap_r, &data);
3025
3026		err = ftrace_lookup_symbols(us.syms, cnt, addrs);
3027		free_user_syms(&us);
3028		if (err)
3029			goto error;
3030	}
3031
3032	if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) {
3033		err = -EINVAL;
3034		goto error;
3035	}
3036
3037	link = kzalloc(sizeof(*link), GFP_KERNEL);
3038	if (!link) {
3039		err = -ENOMEM;
3040		goto error;
3041	}
3042
3043	bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
3044		      &bpf_kprobe_multi_link_lops, prog);
3045
3046	err = bpf_link_prime(&link->link, &link_primer);
3047	if (err)
3048		goto error;
3049
3050	if (!(flags & BPF_F_KPROBE_MULTI_RETURN))
3051		link->fp.entry_handler = kprobe_multi_link_handler;
3052	if ((flags & BPF_F_KPROBE_MULTI_RETURN) || is_kprobe_session(prog))
3053		link->fp.exit_handler = kprobe_multi_link_exit_handler;
3054	if (is_kprobe_session(prog))
3055		link->fp.entry_data_size = sizeof(u64);
3056
3057	link->addrs = addrs;
3058	link->cookies = cookies;
3059	link->cnt = cnt;
3060	link->flags = flags;
3061
3062	if (cookies) {
3063		/*
3064		 * Sorting addresses will trigger sorting cookies as well
3065		 * (check bpf_kprobe_multi_cookie_swap). This way we can
3066		 * find cookie based on the address in bpf_get_attach_cookie
3067		 * helper.
3068		 */
3069		sort_r(addrs, cnt, sizeof(*addrs),
3070		       bpf_kprobe_multi_cookie_cmp,
3071		       bpf_kprobe_multi_cookie_swap,
3072		       link);
3073	}
3074
3075	err = get_modules_for_addrs(&link->mods, addrs, cnt);
3076	if (err < 0) {
3077		bpf_link_cleanup(&link_primer);
3078		return err;
3079	}
3080	link->mods_cnt = err;
3081
3082	err = register_fprobe_ips(&link->fp, addrs, cnt);
3083	if (err) {
3084		kprobe_multi_put_modules(link->mods, link->mods_cnt);
3085		bpf_link_cleanup(&link_primer);
3086		return err;
3087	}
3088
3089	return bpf_link_settle(&link_primer);
3090
3091error:
3092	kfree(link);
3093	kvfree(addrs);
3094	kvfree(cookies);
3095	return err;
3096}
3097#else /* !CONFIG_FPROBE */
3098int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3099{
3100	return -EOPNOTSUPP;
3101}
3102static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
3103{
3104	return 0;
3105}
3106static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3107{
3108	return 0;
3109}
3110#endif
3111
3112#ifdef CONFIG_UPROBES
3113struct bpf_uprobe_multi_link;
3114
3115struct bpf_uprobe {
3116	struct bpf_uprobe_multi_link *link;
3117	loff_t offset;
3118	unsigned long ref_ctr_offset;
3119	u64 cookie;
3120	struct uprobe *uprobe;
3121	struct uprobe_consumer consumer;
3122	bool session;
3123};
3124
3125struct bpf_uprobe_multi_link {
3126	struct path path;
3127	struct bpf_link link;
3128	u32 cnt;
3129	u32 flags;
3130	struct bpf_uprobe *uprobes;
3131	struct task_struct *task;
3132};
3133
3134struct bpf_uprobe_multi_run_ctx {
3135	struct bpf_session_run_ctx session_ctx;
3136	unsigned long entry_ip;
3137	struct bpf_uprobe *uprobe;
3138};
3139
3140static void bpf_uprobe_unregister(struct bpf_uprobe *uprobes, u32 cnt)
3141{
3142	u32 i;
3143
3144	for (i = 0; i < cnt; i++)
3145		uprobe_unregister_nosync(uprobes[i].uprobe, &uprobes[i].consumer);
3146
3147	if (cnt)
3148		uprobe_unregister_sync();
3149}
3150
3151static void bpf_uprobe_multi_link_release(struct bpf_link *link)
3152{
3153	struct bpf_uprobe_multi_link *umulti_link;
3154
3155	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3156	bpf_uprobe_unregister(umulti_link->uprobes, umulti_link->cnt);
3157	if (umulti_link->task)
3158		put_task_struct(umulti_link->task);
3159	path_put(&umulti_link->path);
3160}
3161
3162static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
3163{
3164	struct bpf_uprobe_multi_link *umulti_link;
3165
3166	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3167	kvfree(umulti_link->uprobes);
3168	kfree(umulti_link);
3169}
3170
3171static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
3172						struct bpf_link_info *info)
3173{
3174	u64 __user *uref_ctr_offsets = u64_to_user_ptr(info->uprobe_multi.ref_ctr_offsets);
3175	u64 __user *ucookies = u64_to_user_ptr(info->uprobe_multi.cookies);
3176	u64 __user *uoffsets = u64_to_user_ptr(info->uprobe_multi.offsets);
3177	u64 __user *upath = u64_to_user_ptr(info->uprobe_multi.path);
3178	u32 upath_size = info->uprobe_multi.path_size;
3179	struct bpf_uprobe_multi_link *umulti_link;
3180	u32 ucount = info->uprobe_multi.count;
3181	int err = 0, i;
3182	char *p, *buf;
3183	long left = 0;
3184
3185	if (!upath ^ !upath_size)
3186		return -EINVAL;
3187
3188	if ((uoffsets || uref_ctr_offsets || ucookies) && !ucount)
3189		return -EINVAL;
3190
3191	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3192	info->uprobe_multi.count = umulti_link->cnt;
3193	info->uprobe_multi.flags = umulti_link->flags;
3194	info->uprobe_multi.pid = umulti_link->task ?
3195				 task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0;
3196
3197	upath_size = upath_size ? min_t(u32, upath_size, PATH_MAX) : PATH_MAX;
3198	buf = kmalloc(upath_size, GFP_KERNEL);
3199	if (!buf)
3200		return -ENOMEM;
3201	p = d_path(&umulti_link->path, buf, upath_size);
3202	if (IS_ERR(p)) {
3203		kfree(buf);
3204		return PTR_ERR(p);
3205	}
3206	upath_size = buf + upath_size - p;
3207
3208	if (upath)
3209		left = copy_to_user(upath, p, upath_size);
3210	kfree(buf);
3211	if (left)
3212		return -EFAULT;
3213	info->uprobe_multi.path_size = upath_size;
3214
3215	if (!uoffsets && !ucookies && !uref_ctr_offsets)
3216		return 0;
3217
3218	if (ucount < umulti_link->cnt)
3219		err = -ENOSPC;
3220	else
3221		ucount = umulti_link->cnt;
3222
3223	for (i = 0; i < ucount; i++) {
3224		if (uoffsets &&
3225		    put_user(umulti_link->uprobes[i].offset, uoffsets + i))
3226			return -EFAULT;
3227		if (uref_ctr_offsets &&
3228		    put_user(umulti_link->uprobes[i].ref_ctr_offset, uref_ctr_offsets + i))
3229			return -EFAULT;
3230		if (ucookies &&
3231		    put_user(umulti_link->uprobes[i].cookie, ucookies + i))
3232			return -EFAULT;
3233	}
3234
3235	return err;
3236}
3237
3238static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
3239	.release = bpf_uprobe_multi_link_release,
3240	.dealloc_deferred = bpf_uprobe_multi_link_dealloc,
3241	.fill_link_info = bpf_uprobe_multi_link_fill_link_info,
3242};
3243
3244static int uprobe_prog_run(struct bpf_uprobe *uprobe,
3245			   unsigned long entry_ip,
3246			   struct pt_regs *regs,
3247			   bool is_return, void *data)
3248{
3249	struct bpf_uprobe_multi_link *link = uprobe->link;
3250	struct bpf_uprobe_multi_run_ctx run_ctx = {
3251		.session_ctx = {
3252			.is_return = is_return,
3253			.data = data,
3254		},
3255		.entry_ip = entry_ip,
3256		.uprobe = uprobe,
3257	};
3258	struct bpf_prog *prog = link->link.prog;
3259	bool sleepable = prog->sleepable;
3260	struct bpf_run_ctx *old_run_ctx;
3261	int err;
3262
3263	if (link->task && !same_thread_group(current, link->task))
3264		return 0;
3265
3266	if (sleepable)
3267		rcu_read_lock_trace();
3268	else
3269		rcu_read_lock();
3270
3271	migrate_disable();
3272
3273	old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx);
3274	err = bpf_prog_run(link->link.prog, regs);
3275	bpf_reset_run_ctx(old_run_ctx);
3276
3277	migrate_enable();
3278
3279	if (sleepable)
3280		rcu_read_unlock_trace();
3281	else
3282		rcu_read_unlock();
3283	return err;
3284}
3285
3286static bool
3287uprobe_multi_link_filter(struct uprobe_consumer *con, struct mm_struct *mm)
3288{
3289	struct bpf_uprobe *uprobe;
3290
3291	uprobe = container_of(con, struct bpf_uprobe, consumer);
3292	return uprobe->link->task->mm == mm;
3293}
3294
3295static int
3296uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs,
3297			  __u64 *data)
3298{
3299	struct bpf_uprobe *uprobe;
3300	int ret;
3301
3302	uprobe = container_of(con, struct bpf_uprobe, consumer);
3303	ret = uprobe_prog_run(uprobe, instruction_pointer(regs), regs, false, data);
3304	if (uprobe->session)
3305		return ret ? UPROBE_HANDLER_IGNORE : 0;
3306	return 0;
3307}
3308
3309static int
3310uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs,
3311			      __u64 *data)
3312{
3313	struct bpf_uprobe *uprobe;
3314
3315	uprobe = container_of(con, struct bpf_uprobe, consumer);
3316	uprobe_prog_run(uprobe, func, regs, true, data);
3317	return 0;
3318}
3319
3320static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3321{
3322	struct bpf_uprobe_multi_run_ctx *run_ctx;
3323
3324	run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx,
3325			       session_ctx.run_ctx);
3326	return run_ctx->entry_ip;
3327}
3328
3329static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3330{
3331	struct bpf_uprobe_multi_run_ctx *run_ctx;
3332
3333	run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx,
3334			       session_ctx.run_ctx);
3335	return run_ctx->uprobe->cookie;
3336}
3337
3338int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3339{
3340	struct bpf_uprobe_multi_link *link = NULL;
3341	unsigned long __user *uref_ctr_offsets;
3342	struct bpf_link_primer link_primer;
3343	struct bpf_uprobe *uprobes = NULL;
3344	struct task_struct *task = NULL;
3345	unsigned long __user *uoffsets;
3346	u64 __user *ucookies;
3347	void __user *upath;
3348	u32 flags, cnt, i;
3349	struct path path;
3350	char *name;
3351	pid_t pid;
3352	int err;
3353
3354	/* no support for 32bit archs yet */
3355	if (sizeof(u64) != sizeof(void *))
3356		return -EOPNOTSUPP;
3357
3358	if (!is_uprobe_multi(prog))
3359		return -EINVAL;
3360
3361	flags = attr->link_create.uprobe_multi.flags;
3362	if (flags & ~BPF_F_UPROBE_MULTI_RETURN)
3363		return -EINVAL;
3364
3365	/*
3366	 * path, offsets and cnt are mandatory,
3367	 * ref_ctr_offsets and cookies are optional
3368	 */
3369	upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
3370	uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
3371	cnt = attr->link_create.uprobe_multi.cnt;
3372	pid = attr->link_create.uprobe_multi.pid;
3373
3374	if (!upath || !uoffsets || !cnt || pid < 0)
3375		return -EINVAL;
3376	if (cnt > MAX_UPROBE_MULTI_CNT)
3377		return -E2BIG;
3378
3379	uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets);
3380	ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies);
3381
3382	name = strndup_user(upath, PATH_MAX);
3383	if (IS_ERR(name)) {
3384		err = PTR_ERR(name);
3385		return err;
3386	}
3387
3388	err = kern_path(name, LOOKUP_FOLLOW, &path);
3389	kfree(name);
3390	if (err)
3391		return err;
3392
3393	if (!d_is_reg(path.dentry)) {
3394		err = -EBADF;
3395		goto error_path_put;
3396	}
3397
3398	if (pid) {
3399		task = get_pid_task(find_vpid(pid), PIDTYPE_TGID);
3400		if (!task) {
3401			err = -ESRCH;
3402			goto error_path_put;
3403		}
3404	}
3405
3406	err = -ENOMEM;
3407
3408	link = kzalloc(sizeof(*link), GFP_KERNEL);
3409	uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL);
3410
3411	if (!uprobes || !link)
3412		goto error_free;
3413
3414	for (i = 0; i < cnt; i++) {
3415		if (__get_user(uprobes[i].offset, uoffsets + i)) {
3416			err = -EFAULT;
3417			goto error_free;
3418		}
3419		if (uprobes[i].offset < 0) {
3420			err = -EINVAL;
3421			goto error_free;
3422		}
3423		if (uref_ctr_offsets && __get_user(uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) {
3424			err = -EFAULT;
3425			goto error_free;
3426		}
3427		if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) {
3428			err = -EFAULT;
3429			goto error_free;
3430		}
3431
3432		uprobes[i].link = link;
3433
3434		if (!(flags & BPF_F_UPROBE_MULTI_RETURN))
3435			uprobes[i].consumer.handler = uprobe_multi_link_handler;
3436		if (flags & BPF_F_UPROBE_MULTI_RETURN || is_uprobe_session(prog))
3437			uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler;
3438		if (is_uprobe_session(prog))
3439			uprobes[i].session = true;
3440		if (pid)
3441			uprobes[i].consumer.filter = uprobe_multi_link_filter;
3442	}
3443
3444	link->cnt = cnt;
3445	link->uprobes = uprobes;
3446	link->path = path;
3447	link->task = task;
3448	link->flags = flags;
3449
3450	bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI,
3451		      &bpf_uprobe_multi_link_lops, prog);
3452
3453	for (i = 0; i < cnt; i++) {
3454		uprobes[i].uprobe = uprobe_register(d_real_inode(link->path.dentry),
3455						    uprobes[i].offset,
3456						    uprobes[i].ref_ctr_offset,
3457						    &uprobes[i].consumer);
3458		if (IS_ERR(uprobes[i].uprobe)) {
3459			err = PTR_ERR(uprobes[i].uprobe);
3460			link->cnt = i;
3461			goto error_unregister;
3462		}
3463	}
3464
3465	err = bpf_link_prime(&link->link, &link_primer);
3466	if (err)
3467		goto error_unregister;
3468
3469	return bpf_link_settle(&link_primer);
3470
3471error_unregister:
3472	bpf_uprobe_unregister(uprobes, link->cnt);
3473
3474error_free:
3475	kvfree(uprobes);
3476	kfree(link);
3477	if (task)
3478		put_task_struct(task);
3479error_path_put:
3480	path_put(&path);
3481	return err;
3482}
3483#else /* !CONFIG_UPROBES */
3484int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3485{
3486	return -EOPNOTSUPP;
3487}
3488static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3489{
3490	return 0;
3491}
3492static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3493{
3494	return 0;
3495}
3496#endif /* CONFIG_UPROBES */
3497
3498__bpf_kfunc_start_defs();
3499
3500__bpf_kfunc bool bpf_session_is_return(void)
3501{
3502	struct bpf_session_run_ctx *session_ctx;
3503
3504	session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx);
3505	return session_ctx->is_return;
3506}
3507
3508__bpf_kfunc __u64 *bpf_session_cookie(void)
3509{
3510	struct bpf_session_run_ctx *session_ctx;
3511
3512	session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx);
3513	return session_ctx->data;
3514}
3515
3516__bpf_kfunc_end_defs();
3517
3518BTF_KFUNCS_START(kprobe_multi_kfunc_set_ids)
3519BTF_ID_FLAGS(func, bpf_session_is_return)
3520BTF_ID_FLAGS(func, bpf_session_cookie)
3521BTF_KFUNCS_END(kprobe_multi_kfunc_set_ids)
3522
3523static int bpf_kprobe_multi_filter(const struct bpf_prog *prog, u32 kfunc_id)
3524{
3525	if (!btf_id_set8_contains(&kprobe_multi_kfunc_set_ids, kfunc_id))
3526		return 0;
3527
3528	if (!is_kprobe_session(prog) && !is_uprobe_session(prog))
3529		return -EACCES;
3530
3531	return 0;
3532}
3533
3534static const struct btf_kfunc_id_set bpf_kprobe_multi_kfunc_set = {
3535	.owner = THIS_MODULE,
3536	.set = &kprobe_multi_kfunc_set_ids,
3537	.filter = bpf_kprobe_multi_filter,
3538};
3539
3540static int __init bpf_kprobe_multi_kfuncs_init(void)
3541{
3542	return register_btf_kfunc_id_set(BPF_PROG_TYPE_KPROBE, &bpf_kprobe_multi_kfunc_set);
3543}
3544
3545late_initcall(bpf_kprobe_multi_kfuncs_init);
3546
3547__bpf_kfunc_start_defs();
3548
3549__bpf_kfunc int bpf_send_signal_task(struct task_struct *task, int sig, enum pid_type type,
3550				     u64 value)
3551{
3552	if (type != PIDTYPE_PID && type != PIDTYPE_TGID)
3553		return -EINVAL;
3554
3555	return bpf_send_signal_common(sig, type, task, value);
3556}
3557
3558__bpf_kfunc_end_defs();