Linux Audio

Check our new training course

Loading...
v4.6
 
  1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2 *
  3 * This program is free software; you can redistribute it and/or
  4 * modify it under the terms of version 2 of the GNU General Public
  5 * License as published by the Free Software Foundation.
  6 *
  7 * This program is distributed in the hope that it will be useful, but
  8 * WITHOUT ANY WARRANTY; without even the implied warranty of
  9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 10 * General Public License for more details.
 11 */
 12#include <linux/bpf.h>
 13#include <linux/rcupdate.h>
 14#include <linux/random.h>
 15#include <linux/smp.h>
 
 16#include <linux/ktime.h>
 17#include <linux/sched.h>
 18#include <linux/uidgid.h>
 
 
 
 
 
 
 
 
 19
 20/* If kernel subsystem is allowing eBPF programs to call this function,
 21 * inside its own verifier_ops->get_func_proto() callback it should return
 22 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
 23 *
 24 * Different map implementations will rely on rcu in map methods
 25 * lookup/update/delete, therefore eBPF programs must run under rcu lock
 26 * if program is allowed to access maps, so check rcu_read_lock_held in
 27 * all three functions.
 28 */
 29static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 30{
 31	/* verifier checked that R1 contains a valid pointer to bpf_map
 32	 * and R2 points to a program stack and map->key_size bytes were
 33	 * initialized
 34	 */
 35	struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
 36	void *key = (void *) (unsigned long) r2;
 37	void *value;
 38
 39	WARN_ON_ONCE(!rcu_read_lock_held());
 40
 41	value = map->ops->map_lookup_elem(map, key);
 42
 43	/* lookup() returns either pointer to element value or NULL
 44	 * which is the meaning of PTR_TO_MAP_VALUE_OR_NULL type
 45	 */
 46	return (unsigned long) value;
 47}
 48
 49const struct bpf_func_proto bpf_map_lookup_elem_proto = {
 50	.func		= bpf_map_lookup_elem,
 51	.gpl_only	= false,
 
 52	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
 53	.arg1_type	= ARG_CONST_MAP_PTR,
 54	.arg2_type	= ARG_PTR_TO_MAP_KEY,
 55};
 56
 57static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 
 58{
 59	struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
 60	void *key = (void *) (unsigned long) r2;
 61	void *value = (void *) (unsigned long) r3;
 62
 63	WARN_ON_ONCE(!rcu_read_lock_held());
 64
 65	return map->ops->map_update_elem(map, key, value, r4);
 66}
 67
 68const struct bpf_func_proto bpf_map_update_elem_proto = {
 69	.func		= bpf_map_update_elem,
 70	.gpl_only	= false,
 
 71	.ret_type	= RET_INTEGER,
 72	.arg1_type	= ARG_CONST_MAP_PTR,
 73	.arg2_type	= ARG_PTR_TO_MAP_KEY,
 74	.arg3_type	= ARG_PTR_TO_MAP_VALUE,
 75	.arg4_type	= ARG_ANYTHING,
 76};
 77
 78static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 79{
 80	struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
 81	void *key = (void *) (unsigned long) r2;
 82
 83	WARN_ON_ONCE(!rcu_read_lock_held());
 84
 85	return map->ops->map_delete_elem(map, key);
 86}
 87
 88const struct bpf_func_proto bpf_map_delete_elem_proto = {
 89	.func		= bpf_map_delete_elem,
 90	.gpl_only	= false,
 
 91	.ret_type	= RET_INTEGER,
 92	.arg1_type	= ARG_CONST_MAP_PTR,
 93	.arg2_type	= ARG_PTR_TO_MAP_KEY,
 94};
 95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 96const struct bpf_func_proto bpf_get_prandom_u32_proto = {
 97	.func		= bpf_user_rnd_u32,
 98	.gpl_only	= false,
 99	.ret_type	= RET_INTEGER,
100};
101
102static u64 bpf_get_smp_processor_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
103{
104	return raw_smp_processor_id();
105}
106
107const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
108	.func		= bpf_get_smp_processor_id,
109	.gpl_only	= false,
110	.ret_type	= RET_INTEGER,
111};
112
113static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 
 
 
 
 
 
 
 
 
 
 
114{
115	/* NMI safe access to clock monotonic */
116	return ktime_get_mono_fast_ns();
117}
118
119const struct bpf_func_proto bpf_ktime_get_ns_proto = {
120	.func		= bpf_ktime_get_ns,
121	.gpl_only	= true,
 
 
 
 
 
 
 
 
 
 
 
 
122	.ret_type	= RET_INTEGER,
123};
124
125static u64 bpf_get_current_pid_tgid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 
 
 
 
 
 
 
 
 
 
 
126{
127	struct task_struct *task = current;
128
129	if (!task)
130		return -EINVAL;
131
132	return (u64) task->tgid << 32 | task->pid;
133}
134
135const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
136	.func		= bpf_get_current_pid_tgid,
137	.gpl_only	= false,
138	.ret_type	= RET_INTEGER,
139};
140
141static u64 bpf_get_current_uid_gid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
142{
143	struct task_struct *task = current;
144	kuid_t uid;
145	kgid_t gid;
146
147	if (!task)
148		return -EINVAL;
149
150	current_uid_gid(&uid, &gid);
151	return (u64) from_kgid(&init_user_ns, gid) << 32 |
152		from_kuid(&init_user_ns, uid);
153}
154
155const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
156	.func		= bpf_get_current_uid_gid,
157	.gpl_only	= false,
158	.ret_type	= RET_INTEGER,
159};
160
161static u64 bpf_get_current_comm(u64 r1, u64 size, u64 r3, u64 r4, u64 r5)
162{
163	struct task_struct *task = current;
164	char *buf = (char *) (long) r1;
165
166	if (!task)
167		return -EINVAL;
168
169	strlcpy(buf, task->comm, min_t(size_t, size, sizeof(task->comm)));
 
 
 
 
 
 
170	return 0;
 
 
 
171}
172
173const struct bpf_func_proto bpf_get_current_comm_proto = {
174	.func		= bpf_get_current_comm,
175	.gpl_only	= false,
176	.ret_type	= RET_INTEGER,
177	.arg1_type	= ARG_PTR_TO_STACK,
178	.arg2_type	= ARG_CONST_STACK_SIZE,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179};
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
 
 
 
 
 
 
 
 
 
   3 */
   4#include <linux/bpf.h>
   5#include <linux/rcupdate.h>
   6#include <linux/random.h>
   7#include <linux/smp.h>
   8#include <linux/topology.h>
   9#include <linux/ktime.h>
  10#include <linux/sched.h>
  11#include <linux/uidgid.h>
  12#include <linux/filter.h>
  13#include <linux/ctype.h>
  14#include <linux/jiffies.h>
  15#include <linux/pid_namespace.h>
  16#include <linux/proc_ns.h>
  17#include <linux/security.h>
  18
  19#include "../../lib/kstrtox.h"
  20
  21/* If kernel subsystem is allowing eBPF programs to call this function,
  22 * inside its own verifier_ops->get_func_proto() callback it should return
  23 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
  24 *
  25 * Different map implementations will rely on rcu in map methods
  26 * lookup/update/delete, therefore eBPF programs must run under rcu lock
  27 * if program is allowed to access maps, so check rcu_read_lock_held in
  28 * all three functions.
  29 */
  30BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
  31{
  32	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
  33	return (unsigned long) map->ops->map_lookup_elem(map, key);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  34}
  35
  36const struct bpf_func_proto bpf_map_lookup_elem_proto = {
  37	.func		= bpf_map_lookup_elem,
  38	.gpl_only	= false,
  39	.pkt_access	= true,
  40	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
  41	.arg1_type	= ARG_CONST_MAP_PTR,
  42	.arg2_type	= ARG_PTR_TO_MAP_KEY,
  43};
  44
  45BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
  46	   void *, value, u64, flags)
  47{
  48	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
  49	return map->ops->map_update_elem(map, key, value, flags);
 
 
 
 
 
  50}
  51
  52const struct bpf_func_proto bpf_map_update_elem_proto = {
  53	.func		= bpf_map_update_elem,
  54	.gpl_only	= false,
  55	.pkt_access	= true,
  56	.ret_type	= RET_INTEGER,
  57	.arg1_type	= ARG_CONST_MAP_PTR,
  58	.arg2_type	= ARG_PTR_TO_MAP_KEY,
  59	.arg3_type	= ARG_PTR_TO_MAP_VALUE,
  60	.arg4_type	= ARG_ANYTHING,
  61};
  62
  63BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
  64{
  65	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
 
 
 
 
  66	return map->ops->map_delete_elem(map, key);
  67}
  68
  69const struct bpf_func_proto bpf_map_delete_elem_proto = {
  70	.func		= bpf_map_delete_elem,
  71	.gpl_only	= false,
  72	.pkt_access	= true,
  73	.ret_type	= RET_INTEGER,
  74	.arg1_type	= ARG_CONST_MAP_PTR,
  75	.arg2_type	= ARG_PTR_TO_MAP_KEY,
  76};
  77
  78BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
  79{
  80	return map->ops->map_push_elem(map, value, flags);
  81}
  82
  83const struct bpf_func_proto bpf_map_push_elem_proto = {
  84	.func		= bpf_map_push_elem,
  85	.gpl_only	= false,
  86	.pkt_access	= true,
  87	.ret_type	= RET_INTEGER,
  88	.arg1_type	= ARG_CONST_MAP_PTR,
  89	.arg2_type	= ARG_PTR_TO_MAP_VALUE,
  90	.arg3_type	= ARG_ANYTHING,
  91};
  92
  93BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
  94{
  95	return map->ops->map_pop_elem(map, value);
  96}
  97
  98const struct bpf_func_proto bpf_map_pop_elem_proto = {
  99	.func		= bpf_map_pop_elem,
 100	.gpl_only	= false,
 101	.ret_type	= RET_INTEGER,
 102	.arg1_type	= ARG_CONST_MAP_PTR,
 103	.arg2_type	= ARG_PTR_TO_UNINIT_MAP_VALUE,
 104};
 105
 106BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
 107{
 108	return map->ops->map_peek_elem(map, value);
 109}
 110
 111const struct bpf_func_proto bpf_map_peek_elem_proto = {
 112	.func		= bpf_map_peek_elem,
 113	.gpl_only	= false,
 114	.ret_type	= RET_INTEGER,
 115	.arg1_type	= ARG_CONST_MAP_PTR,
 116	.arg2_type	= ARG_PTR_TO_UNINIT_MAP_VALUE,
 117};
 118
 119const struct bpf_func_proto bpf_get_prandom_u32_proto = {
 120	.func		= bpf_user_rnd_u32,
 121	.gpl_only	= false,
 122	.ret_type	= RET_INTEGER,
 123};
 124
 125BPF_CALL_0(bpf_get_smp_processor_id)
 126{
 127	return smp_processor_id();
 128}
 129
 130const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
 131	.func		= bpf_get_smp_processor_id,
 132	.gpl_only	= false,
 133	.ret_type	= RET_INTEGER,
 134};
 135
 136BPF_CALL_0(bpf_get_numa_node_id)
 137{
 138	return numa_node_id();
 139}
 140
 141const struct bpf_func_proto bpf_get_numa_node_id_proto = {
 142	.func		= bpf_get_numa_node_id,
 143	.gpl_only	= false,
 144	.ret_type	= RET_INTEGER,
 145};
 146
 147BPF_CALL_0(bpf_ktime_get_ns)
 148{
 149	/* NMI safe access to clock monotonic */
 150	return ktime_get_mono_fast_ns();
 151}
 152
 153const struct bpf_func_proto bpf_ktime_get_ns_proto = {
 154	.func		= bpf_ktime_get_ns,
 155	.gpl_only	= false,
 156	.ret_type	= RET_INTEGER,
 157};
 158
 159BPF_CALL_0(bpf_ktime_get_boot_ns)
 160{
 161	/* NMI safe access to clock boottime */
 162	return ktime_get_boot_fast_ns();
 163}
 164
 165const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
 166	.func		= bpf_ktime_get_boot_ns,
 167	.gpl_only	= false,
 168	.ret_type	= RET_INTEGER,
 169};
 170
 171BPF_CALL_0(bpf_ktime_get_coarse_ns)
 172{
 173	return ktime_get_coarse_ns();
 174}
 175
 176const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
 177	.func		= bpf_ktime_get_coarse_ns,
 178	.gpl_only	= false,
 179	.ret_type	= RET_INTEGER,
 180};
 181
 182BPF_CALL_0(bpf_get_current_pid_tgid)
 183{
 184	struct task_struct *task = current;
 185
 186	if (unlikely(!task))
 187		return -EINVAL;
 188
 189	return (u64) task->tgid << 32 | task->pid;
 190}
 191
 192const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
 193	.func		= bpf_get_current_pid_tgid,
 194	.gpl_only	= false,
 195	.ret_type	= RET_INTEGER,
 196};
 197
 198BPF_CALL_0(bpf_get_current_uid_gid)
 199{
 200	struct task_struct *task = current;
 201	kuid_t uid;
 202	kgid_t gid;
 203
 204	if (unlikely(!task))
 205		return -EINVAL;
 206
 207	current_uid_gid(&uid, &gid);
 208	return (u64) from_kgid(&init_user_ns, gid) << 32 |
 209		     from_kuid(&init_user_ns, uid);
 210}
 211
 212const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
 213	.func		= bpf_get_current_uid_gid,
 214	.gpl_only	= false,
 215	.ret_type	= RET_INTEGER,
 216};
 217
 218BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
 219{
 220	struct task_struct *task = current;
 
 221
 222	if (unlikely(!task))
 223		goto err_clear;
 224
 225	strncpy(buf, task->comm, size);
 226
 227	/* Verifier guarantees that size > 0. For task->comm exceeding
 228	 * size, guarantee that buf is %NUL-terminated. Unconditionally
 229	 * done here to save the size test.
 230	 */
 231	buf[size - 1] = 0;
 232	return 0;
 233err_clear:
 234	memset(buf, 0, size);
 235	return -EINVAL;
 236}
 237
 238const struct bpf_func_proto bpf_get_current_comm_proto = {
 239	.func		= bpf_get_current_comm,
 240	.gpl_only	= false,
 241	.ret_type	= RET_INTEGER,
 242	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 243	.arg2_type	= ARG_CONST_SIZE,
 244};
 245
 246#if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
 247
 248static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
 249{
 250	arch_spinlock_t *l = (void *)lock;
 251	union {
 252		__u32 val;
 253		arch_spinlock_t lock;
 254	} u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
 255
 256	compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
 257	BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
 258	BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
 259	arch_spin_lock(l);
 260}
 261
 262static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
 263{
 264	arch_spinlock_t *l = (void *)lock;
 265
 266	arch_spin_unlock(l);
 267}
 268
 269#else
 270
 271static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
 272{
 273	atomic_t *l = (void *)lock;
 274
 275	BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
 276	do {
 277		atomic_cond_read_relaxed(l, !VAL);
 278	} while (atomic_xchg(l, 1));
 279}
 280
 281static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
 282{
 283	atomic_t *l = (void *)lock;
 284
 285	atomic_set_release(l, 0);
 286}
 287
 288#endif
 289
 290static DEFINE_PER_CPU(unsigned long, irqsave_flags);
 291
 292notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
 293{
 294	unsigned long flags;
 295
 296	local_irq_save(flags);
 297	__bpf_spin_lock(lock);
 298	__this_cpu_write(irqsave_flags, flags);
 299	return 0;
 300}
 301
 302const struct bpf_func_proto bpf_spin_lock_proto = {
 303	.func		= bpf_spin_lock,
 304	.gpl_only	= false,
 305	.ret_type	= RET_VOID,
 306	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
 307};
 308
 309notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
 310{
 311	unsigned long flags;
 312
 313	flags = __this_cpu_read(irqsave_flags);
 314	__bpf_spin_unlock(lock);
 315	local_irq_restore(flags);
 316	return 0;
 317}
 318
 319const struct bpf_func_proto bpf_spin_unlock_proto = {
 320	.func		= bpf_spin_unlock,
 321	.gpl_only	= false,
 322	.ret_type	= RET_VOID,
 323	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
 324};
 325
 326void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
 327			   bool lock_src)
 328{
 329	struct bpf_spin_lock *lock;
 330
 331	if (lock_src)
 332		lock = src + map->spin_lock_off;
 333	else
 334		lock = dst + map->spin_lock_off;
 335	preempt_disable();
 336	____bpf_spin_lock(lock);
 337	copy_map_value(map, dst, src);
 338	____bpf_spin_unlock(lock);
 339	preempt_enable();
 340}
 341
 342BPF_CALL_0(bpf_jiffies64)
 343{
 344	return get_jiffies_64();
 345}
 346
 347const struct bpf_func_proto bpf_jiffies64_proto = {
 348	.func		= bpf_jiffies64,
 349	.gpl_only	= false,
 350	.ret_type	= RET_INTEGER,
 351};
 352
 353#ifdef CONFIG_CGROUPS
 354BPF_CALL_0(bpf_get_current_cgroup_id)
 355{
 356	struct cgroup *cgrp;
 357	u64 cgrp_id;
 358
 359	rcu_read_lock();
 360	cgrp = task_dfl_cgroup(current);
 361	cgrp_id = cgroup_id(cgrp);
 362	rcu_read_unlock();
 363
 364	return cgrp_id;
 365}
 366
 367const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
 368	.func		= bpf_get_current_cgroup_id,
 369	.gpl_only	= false,
 370	.ret_type	= RET_INTEGER,
 371};
 372
 373BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
 374{
 375	struct cgroup *cgrp;
 376	struct cgroup *ancestor;
 377	u64 cgrp_id;
 378
 379	rcu_read_lock();
 380	cgrp = task_dfl_cgroup(current);
 381	ancestor = cgroup_ancestor(cgrp, ancestor_level);
 382	cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
 383	rcu_read_unlock();
 384
 385	return cgrp_id;
 386}
 387
 388const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
 389	.func		= bpf_get_current_ancestor_cgroup_id,
 390	.gpl_only	= false,
 391	.ret_type	= RET_INTEGER,
 392	.arg1_type	= ARG_ANYTHING,
 393};
 394
 395#ifdef CONFIG_CGROUP_BPF
 396DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
 397		bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
 398
 399BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
 400{
 401	/* flags argument is not used now,
 402	 * but provides an ability to extend the API.
 403	 * verifier checks that its value is correct.
 404	 */
 405	enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
 406	struct bpf_cgroup_storage *storage = NULL;
 407	void *ptr;
 408	int i;
 409
 410	for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
 411		if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
 412			continue;
 413
 414		storage = this_cpu_read(bpf_cgroup_storage_info[i].storage[stype]);
 415		break;
 416	}
 417
 418	if (stype == BPF_CGROUP_STORAGE_SHARED)
 419		ptr = &READ_ONCE(storage->buf)->data[0];
 420	else
 421		ptr = this_cpu_ptr(storage->percpu_buf);
 422
 423	return (unsigned long)ptr;
 424}
 425
 426const struct bpf_func_proto bpf_get_local_storage_proto = {
 427	.func		= bpf_get_local_storage,
 428	.gpl_only	= false,
 429	.ret_type	= RET_PTR_TO_MAP_VALUE,
 430	.arg1_type	= ARG_CONST_MAP_PTR,
 431	.arg2_type	= ARG_ANYTHING,
 432};
 433#endif
 434
 435#define BPF_STRTOX_BASE_MASK 0x1F
 436
 437static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
 438			  unsigned long long *res, bool *is_negative)
 439{
 440	unsigned int base = flags & BPF_STRTOX_BASE_MASK;
 441	const char *cur_buf = buf;
 442	size_t cur_len = buf_len;
 443	unsigned int consumed;
 444	size_t val_len;
 445	char str[64];
 446
 447	if (!buf || !buf_len || !res || !is_negative)
 448		return -EINVAL;
 449
 450	if (base != 0 && base != 8 && base != 10 && base != 16)
 451		return -EINVAL;
 452
 453	if (flags & ~BPF_STRTOX_BASE_MASK)
 454		return -EINVAL;
 455
 456	while (cur_buf < buf + buf_len && isspace(*cur_buf))
 457		++cur_buf;
 458
 459	*is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
 460	if (*is_negative)
 461		++cur_buf;
 462
 463	consumed = cur_buf - buf;
 464	cur_len -= consumed;
 465	if (!cur_len)
 466		return -EINVAL;
 467
 468	cur_len = min(cur_len, sizeof(str) - 1);
 469	memcpy(str, cur_buf, cur_len);
 470	str[cur_len] = '\0';
 471	cur_buf = str;
 472
 473	cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
 474	val_len = _parse_integer(cur_buf, base, res);
 475
 476	if (val_len & KSTRTOX_OVERFLOW)
 477		return -ERANGE;
 478
 479	if (val_len == 0)
 480		return -EINVAL;
 481
 482	cur_buf += val_len;
 483	consumed += cur_buf - str;
 484
 485	return consumed;
 486}
 487
 488static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
 489			 long long *res)
 490{
 491	unsigned long long _res;
 492	bool is_negative;
 493	int err;
 494
 495	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
 496	if (err < 0)
 497		return err;
 498	if (is_negative) {
 499		if ((long long)-_res > 0)
 500			return -ERANGE;
 501		*res = -_res;
 502	} else {
 503		if ((long long)_res < 0)
 504			return -ERANGE;
 505		*res = _res;
 506	}
 507	return err;
 508}
 509
 510BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
 511	   long *, res)
 512{
 513	long long _res;
 514	int err;
 515
 516	err = __bpf_strtoll(buf, buf_len, flags, &_res);
 517	if (err < 0)
 518		return err;
 519	if (_res != (long)_res)
 520		return -ERANGE;
 521	*res = _res;
 522	return err;
 523}
 524
 525const struct bpf_func_proto bpf_strtol_proto = {
 526	.func		= bpf_strtol,
 527	.gpl_only	= false,
 528	.ret_type	= RET_INTEGER,
 529	.arg1_type	= ARG_PTR_TO_MEM,
 530	.arg2_type	= ARG_CONST_SIZE,
 531	.arg3_type	= ARG_ANYTHING,
 532	.arg4_type	= ARG_PTR_TO_LONG,
 533};
 534
 535BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
 536	   unsigned long *, res)
 537{
 538	unsigned long long _res;
 539	bool is_negative;
 540	int err;
 541
 542	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
 543	if (err < 0)
 544		return err;
 545	if (is_negative)
 546		return -EINVAL;
 547	if (_res != (unsigned long)_res)
 548		return -ERANGE;
 549	*res = _res;
 550	return err;
 551}
 552
 553const struct bpf_func_proto bpf_strtoul_proto = {
 554	.func		= bpf_strtoul,
 555	.gpl_only	= false,
 556	.ret_type	= RET_INTEGER,
 557	.arg1_type	= ARG_PTR_TO_MEM,
 558	.arg2_type	= ARG_CONST_SIZE,
 559	.arg3_type	= ARG_ANYTHING,
 560	.arg4_type	= ARG_PTR_TO_LONG,
 561};
 562#endif
 563
 564BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
 565	   struct bpf_pidns_info *, nsdata, u32, size)
 566{
 567	struct task_struct *task = current;
 568	struct pid_namespace *pidns;
 569	int err = -EINVAL;
 570
 571	if (unlikely(size != sizeof(struct bpf_pidns_info)))
 572		goto clear;
 573
 574	if (unlikely((u64)(dev_t)dev != dev))
 575		goto clear;
 576
 577	if (unlikely(!task))
 578		goto clear;
 579
 580	pidns = task_active_pid_ns(task);
 581	if (unlikely(!pidns)) {
 582		err = -ENOENT;
 583		goto clear;
 584	}
 585
 586	if (!ns_match(&pidns->ns, (dev_t)dev, ino))
 587		goto clear;
 588
 589	nsdata->pid = task_pid_nr_ns(task, pidns);
 590	nsdata->tgid = task_tgid_nr_ns(task, pidns);
 591	return 0;
 592clear:
 593	memset((void *)nsdata, 0, (size_t) size);
 594	return err;
 595}
 596
 597const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
 598	.func		= bpf_get_ns_current_pid_tgid,
 599	.gpl_only	= false,
 600	.ret_type	= RET_INTEGER,
 601	.arg1_type	= ARG_ANYTHING,
 602	.arg2_type	= ARG_ANYTHING,
 603	.arg3_type      = ARG_PTR_TO_UNINIT_MEM,
 604	.arg4_type      = ARG_CONST_SIZE,
 605};
 606
 607static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
 608	.func		= bpf_get_raw_cpu_id,
 609	.gpl_only	= false,
 610	.ret_type	= RET_INTEGER,
 611};
 612
 613BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
 614	   u64, flags, void *, data, u64, size)
 615{
 616	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
 617		return -EINVAL;
 618
 619	return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
 620}
 621
 622const struct bpf_func_proto bpf_event_output_data_proto =  {
 623	.func		= bpf_event_output_data,
 624	.gpl_only       = true,
 625	.ret_type       = RET_INTEGER,
 626	.arg1_type      = ARG_PTR_TO_CTX,
 627	.arg2_type      = ARG_CONST_MAP_PTR,
 628	.arg3_type      = ARG_ANYTHING,
 629	.arg4_type      = ARG_PTR_TO_MEM,
 630	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
 631};
 632
 633BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
 634	   const void __user *, user_ptr)
 635{
 636	int ret = copy_from_user(dst, user_ptr, size);
 637
 638	if (unlikely(ret)) {
 639		memset(dst, 0, size);
 640		ret = -EFAULT;
 641	}
 642
 643	return ret;
 644}
 645
 646const struct bpf_func_proto bpf_copy_from_user_proto = {
 647	.func		= bpf_copy_from_user,
 648	.gpl_only	= false,
 649	.ret_type	= RET_INTEGER,
 650	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 651	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 652	.arg3_type	= ARG_ANYTHING,
 653};
 654
 655BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
 656{
 657	if (cpu >= nr_cpu_ids)
 658		return (unsigned long)NULL;
 659
 660	return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
 661}
 662
 663const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
 664	.func		= bpf_per_cpu_ptr,
 665	.gpl_only	= false,
 666	.ret_type	= RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL,
 667	.arg1_type	= ARG_PTR_TO_PERCPU_BTF_ID,
 668	.arg2_type	= ARG_ANYTHING,
 669};
 670
 671BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
 672{
 673	return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
 674}
 675
 676const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
 677	.func		= bpf_this_cpu_ptr,
 678	.gpl_only	= false,
 679	.ret_type	= RET_PTR_TO_MEM_OR_BTF_ID,
 680	.arg1_type	= ARG_PTR_TO_PERCPU_BTF_ID,
 681};
 682
 683static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
 684		size_t bufsz)
 685{
 686	void __user *user_ptr = (__force void __user *)unsafe_ptr;
 687
 688	buf[0] = 0;
 689
 690	switch (fmt_ptype) {
 691	case 's':
 692#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
 693		if ((unsigned long)unsafe_ptr < TASK_SIZE)
 694			return strncpy_from_user_nofault(buf, user_ptr, bufsz);
 695		fallthrough;
 696#endif
 697	case 'k':
 698		return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
 699	case 'u':
 700		return strncpy_from_user_nofault(buf, user_ptr, bufsz);
 701	}
 702
 703	return -EINVAL;
 704}
 705
 706/* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
 707 * arguments representation.
 708 */
 709#define MAX_BPRINTF_BUF_LEN	512
 710
 711/* Support executing three nested bprintf helper calls on a given CPU */
 712#define MAX_BPRINTF_NEST_LEVEL	3
 713struct bpf_bprintf_buffers {
 714	char tmp_bufs[MAX_BPRINTF_NEST_LEVEL][MAX_BPRINTF_BUF_LEN];
 715};
 716static DEFINE_PER_CPU(struct bpf_bprintf_buffers, bpf_bprintf_bufs);
 717static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
 718
 719static int try_get_fmt_tmp_buf(char **tmp_buf)
 720{
 721	struct bpf_bprintf_buffers *bufs;
 722	int nest_level;
 723
 724	preempt_disable();
 725	nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
 726	if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
 727		this_cpu_dec(bpf_bprintf_nest_level);
 728		preempt_enable();
 729		return -EBUSY;
 730	}
 731	bufs = this_cpu_ptr(&bpf_bprintf_bufs);
 732	*tmp_buf = bufs->tmp_bufs[nest_level - 1];
 733
 734	return 0;
 735}
 736
 737void bpf_bprintf_cleanup(void)
 738{
 739	if (this_cpu_read(bpf_bprintf_nest_level)) {
 740		this_cpu_dec(bpf_bprintf_nest_level);
 741		preempt_enable();
 742	}
 743}
 744
 745/*
 746 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
 747 *
 748 * Returns a negative value if fmt is an invalid format string or 0 otherwise.
 749 *
 750 * This can be used in two ways:
 751 * - Format string verification only: when bin_args is NULL
 752 * - Arguments preparation: in addition to the above verification, it writes in
 753 *   bin_args a binary representation of arguments usable by bstr_printf where
 754 *   pointers from BPF have been sanitized.
 755 *
 756 * In argument preparation mode, if 0 is returned, safe temporary buffers are
 757 * allocated and bpf_bprintf_cleanup should be called to free them after use.
 758 */
 759int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
 760			u32 **bin_args, u32 num_args)
 761{
 762	char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
 763	size_t sizeof_cur_arg, sizeof_cur_ip;
 764	int err, i, num_spec = 0;
 765	u64 cur_arg;
 766	char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
 767
 768	fmt_end = strnchr(fmt, fmt_size, 0);
 769	if (!fmt_end)
 770		return -EINVAL;
 771	fmt_size = fmt_end - fmt;
 772
 773	if (bin_args) {
 774		if (num_args && try_get_fmt_tmp_buf(&tmp_buf))
 775			return -EBUSY;
 776
 777		tmp_buf_end = tmp_buf + MAX_BPRINTF_BUF_LEN;
 778		*bin_args = (u32 *)tmp_buf;
 779	}
 780
 781	for (i = 0; i < fmt_size; i++) {
 782		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
 783			err = -EINVAL;
 784			goto out;
 785		}
 786
 787		if (fmt[i] != '%')
 788			continue;
 789
 790		if (fmt[i + 1] == '%') {
 791			i++;
 792			continue;
 793		}
 794
 795		if (num_spec >= num_args) {
 796			err = -EINVAL;
 797			goto out;
 798		}
 799
 800		/* The string is zero-terminated so if fmt[i] != 0, we can
 801		 * always access fmt[i + 1], in the worst case it will be a 0
 802		 */
 803		i++;
 804
 805		/* skip optional "[0 +-][num]" width formatting field */
 806		while (fmt[i] == '0' || fmt[i] == '+'  || fmt[i] == '-' ||
 807		       fmt[i] == ' ')
 808			i++;
 809		if (fmt[i] >= '1' && fmt[i] <= '9') {
 810			i++;
 811			while (fmt[i] >= '0' && fmt[i] <= '9')
 812				i++;
 813		}
 814
 815		if (fmt[i] == 'p') {
 816			sizeof_cur_arg = sizeof(long);
 817
 818			if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
 819			    fmt[i + 2] == 's') {
 820				fmt_ptype = fmt[i + 1];
 821				i += 2;
 822				goto fmt_str;
 823			}
 824
 825			if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
 826			    ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' ||
 827			    fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
 828			    fmt[i + 1] == 'S') {
 829				/* just kernel pointers */
 830				if (tmp_buf)
 831					cur_arg = raw_args[num_spec];
 832				i++;
 833				goto nocopy_fmt;
 834			}
 835
 836			if (fmt[i + 1] == 'B') {
 837				if (tmp_buf)  {
 838					err = snprintf(tmp_buf,
 839						       (tmp_buf_end - tmp_buf),
 840						       "%pB",
 841						       (void *)(long)raw_args[num_spec]);
 842					tmp_buf += (err + 1);
 843				}
 844
 845				i++;
 846				num_spec++;
 847				continue;
 848			}
 849
 850			/* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
 851			if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
 852			    (fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
 853				err = -EINVAL;
 854				goto out;
 855			}
 856
 857			i += 2;
 858			if (!tmp_buf)
 859				goto nocopy_fmt;
 860
 861			sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
 862			if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
 863				err = -ENOSPC;
 864				goto out;
 865			}
 866
 867			unsafe_ptr = (char *)(long)raw_args[num_spec];
 868			err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
 869						       sizeof_cur_ip);
 870			if (err < 0)
 871				memset(cur_ip, 0, sizeof_cur_ip);
 872
 873			/* hack: bstr_printf expects IP addresses to be
 874			 * pre-formatted as strings, ironically, the easiest way
 875			 * to do that is to call snprintf.
 876			 */
 877			ip_spec[2] = fmt[i - 1];
 878			ip_spec[3] = fmt[i];
 879			err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
 880				       ip_spec, &cur_ip);
 881
 882			tmp_buf += err + 1;
 883			num_spec++;
 884
 885			continue;
 886		} else if (fmt[i] == 's') {
 887			fmt_ptype = fmt[i];
 888fmt_str:
 889			if (fmt[i + 1] != 0 &&
 890			    !isspace(fmt[i + 1]) &&
 891			    !ispunct(fmt[i + 1])) {
 892				err = -EINVAL;
 893				goto out;
 894			}
 895
 896			if (!tmp_buf)
 897				goto nocopy_fmt;
 898
 899			if (tmp_buf_end == tmp_buf) {
 900				err = -ENOSPC;
 901				goto out;
 902			}
 903
 904			unsafe_ptr = (char *)(long)raw_args[num_spec];
 905			err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
 906						    fmt_ptype,
 907						    tmp_buf_end - tmp_buf);
 908			if (err < 0) {
 909				tmp_buf[0] = '\0';
 910				err = 1;
 911			}
 912
 913			tmp_buf += err;
 914			num_spec++;
 915
 916			continue;
 917		}
 918
 919		sizeof_cur_arg = sizeof(int);
 920
 921		if (fmt[i] == 'l') {
 922			sizeof_cur_arg = sizeof(long);
 923			i++;
 924		}
 925		if (fmt[i] == 'l') {
 926			sizeof_cur_arg = sizeof(long long);
 927			i++;
 928		}
 929
 930		if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
 931		    fmt[i] != 'x' && fmt[i] != 'X') {
 932			err = -EINVAL;
 933			goto out;
 934		}
 935
 936		if (tmp_buf)
 937			cur_arg = raw_args[num_spec];
 938nocopy_fmt:
 939		if (tmp_buf) {
 940			tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
 941			if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
 942				err = -ENOSPC;
 943				goto out;
 944			}
 945
 946			if (sizeof_cur_arg == 8) {
 947				*(u32 *)tmp_buf = *(u32 *)&cur_arg;
 948				*(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
 949			} else {
 950				*(u32 *)tmp_buf = (u32)(long)cur_arg;
 951			}
 952			tmp_buf += sizeof_cur_arg;
 953		}
 954		num_spec++;
 955	}
 956
 957	err = 0;
 958out:
 959	if (err)
 960		bpf_bprintf_cleanup();
 961	return err;
 962}
 963
 964#define MAX_SNPRINTF_VARARGS		12
 965
 966BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
 967	   const void *, data, u32, data_len)
 968{
 969	int err, num_args;
 970	u32 *bin_args;
 971
 972	if (data_len % 8 || data_len > MAX_SNPRINTF_VARARGS * 8 ||
 973	    (data_len && !data))
 974		return -EINVAL;
 975	num_args = data_len / 8;
 976
 977	/* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
 978	 * can safely give an unbounded size.
 979	 */
 980	err = bpf_bprintf_prepare(fmt, UINT_MAX, data, &bin_args, num_args);
 981	if (err < 0)
 982		return err;
 983
 984	err = bstr_printf(str, str_size, fmt, bin_args);
 985
 986	bpf_bprintf_cleanup();
 987
 988	return err + 1;
 989}
 990
 991const struct bpf_func_proto bpf_snprintf_proto = {
 992	.func		= bpf_snprintf,
 993	.gpl_only	= true,
 994	.ret_type	= RET_INTEGER,
 995	.arg1_type	= ARG_PTR_TO_MEM_OR_NULL,
 996	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 997	.arg3_type	= ARG_PTR_TO_CONST_STR,
 998	.arg4_type	= ARG_PTR_TO_MEM_OR_NULL,
 999	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1000};
1001
1002const struct bpf_func_proto bpf_get_current_task_proto __weak;
1003const struct bpf_func_proto bpf_probe_read_user_proto __weak;
1004const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
1005const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
1006const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
1007
1008const struct bpf_func_proto *
1009bpf_base_func_proto(enum bpf_func_id func_id)
1010{
1011	switch (func_id) {
1012	case BPF_FUNC_map_lookup_elem:
1013		return &bpf_map_lookup_elem_proto;
1014	case BPF_FUNC_map_update_elem:
1015		return &bpf_map_update_elem_proto;
1016	case BPF_FUNC_map_delete_elem:
1017		return &bpf_map_delete_elem_proto;
1018	case BPF_FUNC_map_push_elem:
1019		return &bpf_map_push_elem_proto;
1020	case BPF_FUNC_map_pop_elem:
1021		return &bpf_map_pop_elem_proto;
1022	case BPF_FUNC_map_peek_elem:
1023		return &bpf_map_peek_elem_proto;
1024	case BPF_FUNC_get_prandom_u32:
1025		return &bpf_get_prandom_u32_proto;
1026	case BPF_FUNC_get_smp_processor_id:
1027		return &bpf_get_raw_smp_processor_id_proto;
1028	case BPF_FUNC_get_numa_node_id:
1029		return &bpf_get_numa_node_id_proto;
1030	case BPF_FUNC_tail_call:
1031		return &bpf_tail_call_proto;
1032	case BPF_FUNC_ktime_get_ns:
1033		return &bpf_ktime_get_ns_proto;
1034	case BPF_FUNC_ktime_get_boot_ns:
1035		return &bpf_ktime_get_boot_ns_proto;
1036	case BPF_FUNC_ktime_get_coarse_ns:
1037		return &bpf_ktime_get_coarse_ns_proto;
1038	case BPF_FUNC_ringbuf_output:
1039		return &bpf_ringbuf_output_proto;
1040	case BPF_FUNC_ringbuf_reserve:
1041		return &bpf_ringbuf_reserve_proto;
1042	case BPF_FUNC_ringbuf_submit:
1043		return &bpf_ringbuf_submit_proto;
1044	case BPF_FUNC_ringbuf_discard:
1045		return &bpf_ringbuf_discard_proto;
1046	case BPF_FUNC_ringbuf_query:
1047		return &bpf_ringbuf_query_proto;
1048	case BPF_FUNC_for_each_map_elem:
1049		return &bpf_for_each_map_elem_proto;
1050	default:
1051		break;
1052	}
1053
1054	if (!bpf_capable())
1055		return NULL;
1056
1057	switch (func_id) {
1058	case BPF_FUNC_spin_lock:
1059		return &bpf_spin_lock_proto;
1060	case BPF_FUNC_spin_unlock:
1061		return &bpf_spin_unlock_proto;
1062	case BPF_FUNC_jiffies64:
1063		return &bpf_jiffies64_proto;
1064	case BPF_FUNC_per_cpu_ptr:
1065		return &bpf_per_cpu_ptr_proto;
1066	case BPF_FUNC_this_cpu_ptr:
1067		return &bpf_this_cpu_ptr_proto;
1068	default:
1069		break;
1070	}
1071
1072	if (!perfmon_capable())
1073		return NULL;
1074
1075	switch (func_id) {
1076	case BPF_FUNC_trace_printk:
1077		return bpf_get_trace_printk_proto();
1078	case BPF_FUNC_get_current_task:
1079		return &bpf_get_current_task_proto;
1080	case BPF_FUNC_probe_read_user:
1081		return &bpf_probe_read_user_proto;
1082	case BPF_FUNC_probe_read_kernel:
1083		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1084		       NULL : &bpf_probe_read_kernel_proto;
1085	case BPF_FUNC_probe_read_user_str:
1086		return &bpf_probe_read_user_str_proto;
1087	case BPF_FUNC_probe_read_kernel_str:
1088		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1089		       NULL : &bpf_probe_read_kernel_str_proto;
1090	case BPF_FUNC_snprintf_btf:
1091		return &bpf_snprintf_btf_proto;
1092	case BPF_FUNC_snprintf:
1093		return &bpf_snprintf_proto;
1094	default:
1095		return NULL;
1096	}
1097}