Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  3 */
  4#include <linux/bpf.h>
  5#include <linux/rcupdate.h>
  6#include <linux/random.h>
  7#include <linux/smp.h>
  8#include <linux/topology.h>
  9#include <linux/ktime.h>
 10#include <linux/sched.h>
 11#include <linux/uidgid.h>
 12#include <linux/filter.h>
 13#include <linux/ctype.h>
 
 
 
 
 14
 15#include "../../lib/kstrtox.h"
 16
 17/* If kernel subsystem is allowing eBPF programs to call this function,
 18 * inside its own verifier_ops->get_func_proto() callback it should return
 19 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
 20 *
 21 * Different map implementations will rely on rcu in map methods
 22 * lookup/update/delete, therefore eBPF programs must run under rcu lock
 23 * if program is allowed to access maps, so check rcu_read_lock_held in
 24 * all three functions.
 25 */
 26BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
 27{
 28	WARN_ON_ONCE(!rcu_read_lock_held());
 29	return (unsigned long) map->ops->map_lookup_elem(map, key);
 30}
 31
 32const struct bpf_func_proto bpf_map_lookup_elem_proto = {
 33	.func		= bpf_map_lookup_elem,
 34	.gpl_only	= false,
 35	.pkt_access	= true,
 36	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
 37	.arg1_type	= ARG_CONST_MAP_PTR,
 38	.arg2_type	= ARG_PTR_TO_MAP_KEY,
 39};
 40
 41BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
 42	   void *, value, u64, flags)
 43{
 44	WARN_ON_ONCE(!rcu_read_lock_held());
 45	return map->ops->map_update_elem(map, key, value, flags);
 46}
 47
 48const struct bpf_func_proto bpf_map_update_elem_proto = {
 49	.func		= bpf_map_update_elem,
 50	.gpl_only	= false,
 51	.pkt_access	= true,
 52	.ret_type	= RET_INTEGER,
 53	.arg1_type	= ARG_CONST_MAP_PTR,
 54	.arg2_type	= ARG_PTR_TO_MAP_KEY,
 55	.arg3_type	= ARG_PTR_TO_MAP_VALUE,
 56	.arg4_type	= ARG_ANYTHING,
 57};
 58
 59BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
 60{
 61	WARN_ON_ONCE(!rcu_read_lock_held());
 62	return map->ops->map_delete_elem(map, key);
 63}
 64
 65const struct bpf_func_proto bpf_map_delete_elem_proto = {
 66	.func		= bpf_map_delete_elem,
 67	.gpl_only	= false,
 68	.pkt_access	= true,
 69	.ret_type	= RET_INTEGER,
 70	.arg1_type	= ARG_CONST_MAP_PTR,
 71	.arg2_type	= ARG_PTR_TO_MAP_KEY,
 72};
 73
 74BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
 75{
 76	return map->ops->map_push_elem(map, value, flags);
 77}
 78
 79const struct bpf_func_proto bpf_map_push_elem_proto = {
 80	.func		= bpf_map_push_elem,
 81	.gpl_only	= false,
 82	.pkt_access	= true,
 83	.ret_type	= RET_INTEGER,
 84	.arg1_type	= ARG_CONST_MAP_PTR,
 85	.arg2_type	= ARG_PTR_TO_MAP_VALUE,
 86	.arg3_type	= ARG_ANYTHING,
 87};
 88
 89BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
 90{
 91	return map->ops->map_pop_elem(map, value);
 92}
 93
 94const struct bpf_func_proto bpf_map_pop_elem_proto = {
 95	.func		= bpf_map_pop_elem,
 96	.gpl_only	= false,
 97	.ret_type	= RET_INTEGER,
 98	.arg1_type	= ARG_CONST_MAP_PTR,
 99	.arg2_type	= ARG_PTR_TO_UNINIT_MAP_VALUE,
100};
101
102BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
103{
104	return map->ops->map_peek_elem(map, value);
105}
106
107const struct bpf_func_proto bpf_map_peek_elem_proto = {
108	.func		= bpf_map_pop_elem,
109	.gpl_only	= false,
110	.ret_type	= RET_INTEGER,
111	.arg1_type	= ARG_CONST_MAP_PTR,
112	.arg2_type	= ARG_PTR_TO_UNINIT_MAP_VALUE,
113};
114
115const struct bpf_func_proto bpf_get_prandom_u32_proto = {
116	.func		= bpf_user_rnd_u32,
117	.gpl_only	= false,
118	.ret_type	= RET_INTEGER,
119};
120
121BPF_CALL_0(bpf_get_smp_processor_id)
122{
123	return smp_processor_id();
124}
125
126const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
127	.func		= bpf_get_smp_processor_id,
128	.gpl_only	= false,
129	.ret_type	= RET_INTEGER,
130};
131
132BPF_CALL_0(bpf_get_numa_node_id)
133{
134	return numa_node_id();
135}
136
137const struct bpf_func_proto bpf_get_numa_node_id_proto = {
138	.func		= bpf_get_numa_node_id,
139	.gpl_only	= false,
140	.ret_type	= RET_INTEGER,
141};
142
143BPF_CALL_0(bpf_ktime_get_ns)
144{
145	/* NMI safe access to clock monotonic */
146	return ktime_get_mono_fast_ns();
147}
148
149const struct bpf_func_proto bpf_ktime_get_ns_proto = {
150	.func		= bpf_ktime_get_ns,
151	.gpl_only	= true,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152	.ret_type	= RET_INTEGER,
153};
154
155BPF_CALL_0(bpf_get_current_pid_tgid)
156{
157	struct task_struct *task = current;
158
159	if (unlikely(!task))
160		return -EINVAL;
161
162	return (u64) task->tgid << 32 | task->pid;
163}
164
165const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
166	.func		= bpf_get_current_pid_tgid,
167	.gpl_only	= false,
168	.ret_type	= RET_INTEGER,
169};
170
171BPF_CALL_0(bpf_get_current_uid_gid)
172{
173	struct task_struct *task = current;
174	kuid_t uid;
175	kgid_t gid;
176
177	if (unlikely(!task))
178		return -EINVAL;
179
180	current_uid_gid(&uid, &gid);
181	return (u64) from_kgid(&init_user_ns, gid) << 32 |
182		     from_kuid(&init_user_ns, uid);
183}
184
185const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
186	.func		= bpf_get_current_uid_gid,
187	.gpl_only	= false,
188	.ret_type	= RET_INTEGER,
189};
190
191BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
192{
193	struct task_struct *task = current;
194
195	if (unlikely(!task))
196		goto err_clear;
197
198	strncpy(buf, task->comm, size);
199
200	/* Verifier guarantees that size > 0. For task->comm exceeding
201	 * size, guarantee that buf is %NUL-terminated. Unconditionally
202	 * done here to save the size test.
203	 */
204	buf[size - 1] = 0;
205	return 0;
206err_clear:
207	memset(buf, 0, size);
208	return -EINVAL;
209}
210
211const struct bpf_func_proto bpf_get_current_comm_proto = {
212	.func		= bpf_get_current_comm,
213	.gpl_only	= false,
214	.ret_type	= RET_INTEGER,
215	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
216	.arg2_type	= ARG_CONST_SIZE,
217};
218
219#if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
220
221static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
222{
223	arch_spinlock_t *l = (void *)lock;
224	union {
225		__u32 val;
226		arch_spinlock_t lock;
227	} u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
228
229	compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
230	BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
231	BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
232	arch_spin_lock(l);
233}
234
235static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
236{
237	arch_spinlock_t *l = (void *)lock;
238
239	arch_spin_unlock(l);
240}
241
242#else
243
244static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
245{
246	atomic_t *l = (void *)lock;
247
248	BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
249	do {
250		atomic_cond_read_relaxed(l, !VAL);
251	} while (atomic_xchg(l, 1));
252}
253
254static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
255{
256	atomic_t *l = (void *)lock;
257
258	atomic_set_release(l, 0);
259}
260
261#endif
262
263static DEFINE_PER_CPU(unsigned long, irqsave_flags);
264
265notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
266{
267	unsigned long flags;
268
269	local_irq_save(flags);
270	__bpf_spin_lock(lock);
271	__this_cpu_write(irqsave_flags, flags);
272	return 0;
273}
274
275const struct bpf_func_proto bpf_spin_lock_proto = {
276	.func		= bpf_spin_lock,
277	.gpl_only	= false,
278	.ret_type	= RET_VOID,
279	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
280};
281
282notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
283{
284	unsigned long flags;
285
286	flags = __this_cpu_read(irqsave_flags);
287	__bpf_spin_unlock(lock);
288	local_irq_restore(flags);
289	return 0;
290}
291
292const struct bpf_func_proto bpf_spin_unlock_proto = {
293	.func		= bpf_spin_unlock,
294	.gpl_only	= false,
295	.ret_type	= RET_VOID,
296	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
297};
298
299void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
300			   bool lock_src)
301{
302	struct bpf_spin_lock *lock;
303
304	if (lock_src)
305		lock = src + map->spin_lock_off;
306	else
307		lock = dst + map->spin_lock_off;
308	preempt_disable();
309	____bpf_spin_lock(lock);
310	copy_map_value(map, dst, src);
311	____bpf_spin_unlock(lock);
312	preempt_enable();
313}
314
 
 
 
 
 
 
 
 
 
 
 
315#ifdef CONFIG_CGROUPS
316BPF_CALL_0(bpf_get_current_cgroup_id)
317{
318	struct cgroup *cgrp = task_dfl_cgroup(current);
 
 
 
 
 
 
319
320	return cgrp->kn->id.id;
321}
322
323const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
324	.func		= bpf_get_current_cgroup_id,
325	.gpl_only	= false,
326	.ret_type	= RET_INTEGER,
327};
328
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
329#ifdef CONFIG_CGROUP_BPF
330DECLARE_PER_CPU(struct bpf_cgroup_storage*,
331		bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
332
333BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
334{
335	/* flags argument is not used now,
336	 * but provides an ability to extend the API.
337	 * verifier checks that its value is correct.
338	 */
339	enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
340	struct bpf_cgroup_storage *storage;
341	void *ptr;
 
 
 
 
 
342
343	storage = this_cpu_read(bpf_cgroup_storage[stype]);
 
 
344
345	if (stype == BPF_CGROUP_STORAGE_SHARED)
346		ptr = &READ_ONCE(storage->buf)->data[0];
347	else
348		ptr = this_cpu_ptr(storage->percpu_buf);
349
350	return (unsigned long)ptr;
351}
352
353const struct bpf_func_proto bpf_get_local_storage_proto = {
354	.func		= bpf_get_local_storage,
355	.gpl_only	= false,
356	.ret_type	= RET_PTR_TO_MAP_VALUE,
357	.arg1_type	= ARG_CONST_MAP_PTR,
358	.arg2_type	= ARG_ANYTHING,
359};
360#endif
361
362#define BPF_STRTOX_BASE_MASK 0x1F
363
364static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
365			  unsigned long long *res, bool *is_negative)
366{
367	unsigned int base = flags & BPF_STRTOX_BASE_MASK;
368	const char *cur_buf = buf;
369	size_t cur_len = buf_len;
370	unsigned int consumed;
371	size_t val_len;
372	char str[64];
373
374	if (!buf || !buf_len || !res || !is_negative)
375		return -EINVAL;
376
377	if (base != 0 && base != 8 && base != 10 && base != 16)
378		return -EINVAL;
379
380	if (flags & ~BPF_STRTOX_BASE_MASK)
381		return -EINVAL;
382
383	while (cur_buf < buf + buf_len && isspace(*cur_buf))
384		++cur_buf;
385
386	*is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
387	if (*is_negative)
388		++cur_buf;
389
390	consumed = cur_buf - buf;
391	cur_len -= consumed;
392	if (!cur_len)
393		return -EINVAL;
394
395	cur_len = min(cur_len, sizeof(str) - 1);
396	memcpy(str, cur_buf, cur_len);
397	str[cur_len] = '\0';
398	cur_buf = str;
399
400	cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
401	val_len = _parse_integer(cur_buf, base, res);
402
403	if (val_len & KSTRTOX_OVERFLOW)
404		return -ERANGE;
405
406	if (val_len == 0)
407		return -EINVAL;
408
409	cur_buf += val_len;
410	consumed += cur_buf - str;
411
412	return consumed;
413}
414
415static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
416			 long long *res)
417{
418	unsigned long long _res;
419	bool is_negative;
420	int err;
421
422	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
423	if (err < 0)
424		return err;
425	if (is_negative) {
426		if ((long long)-_res > 0)
427			return -ERANGE;
428		*res = -_res;
429	} else {
430		if ((long long)_res < 0)
431			return -ERANGE;
432		*res = _res;
433	}
434	return err;
435}
436
437BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
438	   long *, res)
439{
440	long long _res;
441	int err;
442
443	err = __bpf_strtoll(buf, buf_len, flags, &_res);
444	if (err < 0)
445		return err;
446	if (_res != (long)_res)
447		return -ERANGE;
448	*res = _res;
449	return err;
450}
451
452const struct bpf_func_proto bpf_strtol_proto = {
453	.func		= bpf_strtol,
454	.gpl_only	= false,
455	.ret_type	= RET_INTEGER,
456	.arg1_type	= ARG_PTR_TO_MEM,
457	.arg2_type	= ARG_CONST_SIZE,
458	.arg3_type	= ARG_ANYTHING,
459	.arg4_type	= ARG_PTR_TO_LONG,
460};
461
462BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
463	   unsigned long *, res)
464{
465	unsigned long long _res;
466	bool is_negative;
467	int err;
468
469	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
470	if (err < 0)
471		return err;
472	if (is_negative)
473		return -EINVAL;
474	if (_res != (unsigned long)_res)
475		return -ERANGE;
476	*res = _res;
477	return err;
478}
479
480const struct bpf_func_proto bpf_strtoul_proto = {
481	.func		= bpf_strtoul,
482	.gpl_only	= false,
483	.ret_type	= RET_INTEGER,
484	.arg1_type	= ARG_PTR_TO_MEM,
485	.arg2_type	= ARG_CONST_SIZE,
486	.arg3_type	= ARG_ANYTHING,
487	.arg4_type	= ARG_PTR_TO_LONG,
488};
489#endif
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   3 */
   4#include <linux/bpf.h>
   5#include <linux/rcupdate.h>
   6#include <linux/random.h>
   7#include <linux/smp.h>
   8#include <linux/topology.h>
   9#include <linux/ktime.h>
  10#include <linux/sched.h>
  11#include <linux/uidgid.h>
  12#include <linux/filter.h>
  13#include <linux/ctype.h>
  14#include <linux/jiffies.h>
  15#include <linux/pid_namespace.h>
  16#include <linux/proc_ns.h>
  17#include <linux/security.h>
  18
  19#include "../../lib/kstrtox.h"
  20
  21/* If kernel subsystem is allowing eBPF programs to call this function,
  22 * inside its own verifier_ops->get_func_proto() callback it should return
  23 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
  24 *
  25 * Different map implementations will rely on rcu in map methods
  26 * lookup/update/delete, therefore eBPF programs must run under rcu lock
  27 * if program is allowed to access maps, so check rcu_read_lock_held in
  28 * all three functions.
  29 */
  30BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
  31{
  32	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
  33	return (unsigned long) map->ops->map_lookup_elem(map, key);
  34}
  35
  36const struct bpf_func_proto bpf_map_lookup_elem_proto = {
  37	.func		= bpf_map_lookup_elem,
  38	.gpl_only	= false,
  39	.pkt_access	= true,
  40	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
  41	.arg1_type	= ARG_CONST_MAP_PTR,
  42	.arg2_type	= ARG_PTR_TO_MAP_KEY,
  43};
  44
  45BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
  46	   void *, value, u64, flags)
  47{
  48	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
  49	return map->ops->map_update_elem(map, key, value, flags);
  50}
  51
  52const struct bpf_func_proto bpf_map_update_elem_proto = {
  53	.func		= bpf_map_update_elem,
  54	.gpl_only	= false,
  55	.pkt_access	= true,
  56	.ret_type	= RET_INTEGER,
  57	.arg1_type	= ARG_CONST_MAP_PTR,
  58	.arg2_type	= ARG_PTR_TO_MAP_KEY,
  59	.arg3_type	= ARG_PTR_TO_MAP_VALUE,
  60	.arg4_type	= ARG_ANYTHING,
  61};
  62
  63BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
  64{
  65	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
  66	return map->ops->map_delete_elem(map, key);
  67}
  68
  69const struct bpf_func_proto bpf_map_delete_elem_proto = {
  70	.func		= bpf_map_delete_elem,
  71	.gpl_only	= false,
  72	.pkt_access	= true,
  73	.ret_type	= RET_INTEGER,
  74	.arg1_type	= ARG_CONST_MAP_PTR,
  75	.arg2_type	= ARG_PTR_TO_MAP_KEY,
  76};
  77
  78BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
  79{
  80	return map->ops->map_push_elem(map, value, flags);
  81}
  82
  83const struct bpf_func_proto bpf_map_push_elem_proto = {
  84	.func		= bpf_map_push_elem,
  85	.gpl_only	= false,
  86	.pkt_access	= true,
  87	.ret_type	= RET_INTEGER,
  88	.arg1_type	= ARG_CONST_MAP_PTR,
  89	.arg2_type	= ARG_PTR_TO_MAP_VALUE,
  90	.arg3_type	= ARG_ANYTHING,
  91};
  92
  93BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
  94{
  95	return map->ops->map_pop_elem(map, value);
  96}
  97
  98const struct bpf_func_proto bpf_map_pop_elem_proto = {
  99	.func		= bpf_map_pop_elem,
 100	.gpl_only	= false,
 101	.ret_type	= RET_INTEGER,
 102	.arg1_type	= ARG_CONST_MAP_PTR,
 103	.arg2_type	= ARG_PTR_TO_UNINIT_MAP_VALUE,
 104};
 105
 106BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
 107{
 108	return map->ops->map_peek_elem(map, value);
 109}
 110
 111const struct bpf_func_proto bpf_map_peek_elem_proto = {
 112	.func		= bpf_map_peek_elem,
 113	.gpl_only	= false,
 114	.ret_type	= RET_INTEGER,
 115	.arg1_type	= ARG_CONST_MAP_PTR,
 116	.arg2_type	= ARG_PTR_TO_UNINIT_MAP_VALUE,
 117};
 118
 119const struct bpf_func_proto bpf_get_prandom_u32_proto = {
 120	.func		= bpf_user_rnd_u32,
 121	.gpl_only	= false,
 122	.ret_type	= RET_INTEGER,
 123};
 124
 125BPF_CALL_0(bpf_get_smp_processor_id)
 126{
 127	return smp_processor_id();
 128}
 129
 130const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
 131	.func		= bpf_get_smp_processor_id,
 132	.gpl_only	= false,
 133	.ret_type	= RET_INTEGER,
 134};
 135
 136BPF_CALL_0(bpf_get_numa_node_id)
 137{
 138	return numa_node_id();
 139}
 140
 141const struct bpf_func_proto bpf_get_numa_node_id_proto = {
 142	.func		= bpf_get_numa_node_id,
 143	.gpl_only	= false,
 144	.ret_type	= RET_INTEGER,
 145};
 146
 147BPF_CALL_0(bpf_ktime_get_ns)
 148{
 149	/* NMI safe access to clock monotonic */
 150	return ktime_get_mono_fast_ns();
 151}
 152
 153const struct bpf_func_proto bpf_ktime_get_ns_proto = {
 154	.func		= bpf_ktime_get_ns,
 155	.gpl_only	= false,
 156	.ret_type	= RET_INTEGER,
 157};
 158
 159BPF_CALL_0(bpf_ktime_get_boot_ns)
 160{
 161	/* NMI safe access to clock boottime */
 162	return ktime_get_boot_fast_ns();
 163}
 164
 165const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
 166	.func		= bpf_ktime_get_boot_ns,
 167	.gpl_only	= false,
 168	.ret_type	= RET_INTEGER,
 169};
 170
 171BPF_CALL_0(bpf_ktime_get_coarse_ns)
 172{
 173	return ktime_get_coarse_ns();
 174}
 175
 176const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
 177	.func		= bpf_ktime_get_coarse_ns,
 178	.gpl_only	= false,
 179	.ret_type	= RET_INTEGER,
 180};
 181
 182BPF_CALL_0(bpf_get_current_pid_tgid)
 183{
 184	struct task_struct *task = current;
 185
 186	if (unlikely(!task))
 187		return -EINVAL;
 188
 189	return (u64) task->tgid << 32 | task->pid;
 190}
 191
 192const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
 193	.func		= bpf_get_current_pid_tgid,
 194	.gpl_only	= false,
 195	.ret_type	= RET_INTEGER,
 196};
 197
 198BPF_CALL_0(bpf_get_current_uid_gid)
 199{
 200	struct task_struct *task = current;
 201	kuid_t uid;
 202	kgid_t gid;
 203
 204	if (unlikely(!task))
 205		return -EINVAL;
 206
 207	current_uid_gid(&uid, &gid);
 208	return (u64) from_kgid(&init_user_ns, gid) << 32 |
 209		     from_kuid(&init_user_ns, uid);
 210}
 211
 212const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
 213	.func		= bpf_get_current_uid_gid,
 214	.gpl_only	= false,
 215	.ret_type	= RET_INTEGER,
 216};
 217
 218BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
 219{
 220	struct task_struct *task = current;
 221
 222	if (unlikely(!task))
 223		goto err_clear;
 224
 225	strncpy(buf, task->comm, size);
 226
 227	/* Verifier guarantees that size > 0. For task->comm exceeding
 228	 * size, guarantee that buf is %NUL-terminated. Unconditionally
 229	 * done here to save the size test.
 230	 */
 231	buf[size - 1] = 0;
 232	return 0;
 233err_clear:
 234	memset(buf, 0, size);
 235	return -EINVAL;
 236}
 237
 238const struct bpf_func_proto bpf_get_current_comm_proto = {
 239	.func		= bpf_get_current_comm,
 240	.gpl_only	= false,
 241	.ret_type	= RET_INTEGER,
 242	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 243	.arg2_type	= ARG_CONST_SIZE,
 244};
 245
 246#if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
 247
 248static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
 249{
 250	arch_spinlock_t *l = (void *)lock;
 251	union {
 252		__u32 val;
 253		arch_spinlock_t lock;
 254	} u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
 255
 256	compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
 257	BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
 258	BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
 259	arch_spin_lock(l);
 260}
 261
 262static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
 263{
 264	arch_spinlock_t *l = (void *)lock;
 265
 266	arch_spin_unlock(l);
 267}
 268
 269#else
 270
 271static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
 272{
 273	atomic_t *l = (void *)lock;
 274
 275	BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
 276	do {
 277		atomic_cond_read_relaxed(l, !VAL);
 278	} while (atomic_xchg(l, 1));
 279}
 280
 281static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
 282{
 283	atomic_t *l = (void *)lock;
 284
 285	atomic_set_release(l, 0);
 286}
 287
 288#endif
 289
 290static DEFINE_PER_CPU(unsigned long, irqsave_flags);
 291
 292notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
 293{
 294	unsigned long flags;
 295
 296	local_irq_save(flags);
 297	__bpf_spin_lock(lock);
 298	__this_cpu_write(irqsave_flags, flags);
 299	return 0;
 300}
 301
 302const struct bpf_func_proto bpf_spin_lock_proto = {
 303	.func		= bpf_spin_lock,
 304	.gpl_only	= false,
 305	.ret_type	= RET_VOID,
 306	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
 307};
 308
 309notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
 310{
 311	unsigned long flags;
 312
 313	flags = __this_cpu_read(irqsave_flags);
 314	__bpf_spin_unlock(lock);
 315	local_irq_restore(flags);
 316	return 0;
 317}
 318
 319const struct bpf_func_proto bpf_spin_unlock_proto = {
 320	.func		= bpf_spin_unlock,
 321	.gpl_only	= false,
 322	.ret_type	= RET_VOID,
 323	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
 324};
 325
 326void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
 327			   bool lock_src)
 328{
 329	struct bpf_spin_lock *lock;
 330
 331	if (lock_src)
 332		lock = src + map->spin_lock_off;
 333	else
 334		lock = dst + map->spin_lock_off;
 335	preempt_disable();
 336	____bpf_spin_lock(lock);
 337	copy_map_value(map, dst, src);
 338	____bpf_spin_unlock(lock);
 339	preempt_enable();
 340}
 341
 342BPF_CALL_0(bpf_jiffies64)
 343{
 344	return get_jiffies_64();
 345}
 346
 347const struct bpf_func_proto bpf_jiffies64_proto = {
 348	.func		= bpf_jiffies64,
 349	.gpl_only	= false,
 350	.ret_type	= RET_INTEGER,
 351};
 352
 353#ifdef CONFIG_CGROUPS
 354BPF_CALL_0(bpf_get_current_cgroup_id)
 355{
 356	struct cgroup *cgrp;
 357	u64 cgrp_id;
 358
 359	rcu_read_lock();
 360	cgrp = task_dfl_cgroup(current);
 361	cgrp_id = cgroup_id(cgrp);
 362	rcu_read_unlock();
 363
 364	return cgrp_id;
 365}
 366
 367const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
 368	.func		= bpf_get_current_cgroup_id,
 369	.gpl_only	= false,
 370	.ret_type	= RET_INTEGER,
 371};
 372
 373BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
 374{
 375	struct cgroup *cgrp;
 376	struct cgroup *ancestor;
 377	u64 cgrp_id;
 378
 379	rcu_read_lock();
 380	cgrp = task_dfl_cgroup(current);
 381	ancestor = cgroup_ancestor(cgrp, ancestor_level);
 382	cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
 383	rcu_read_unlock();
 384
 385	return cgrp_id;
 386}
 387
 388const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
 389	.func		= bpf_get_current_ancestor_cgroup_id,
 390	.gpl_only	= false,
 391	.ret_type	= RET_INTEGER,
 392	.arg1_type	= ARG_ANYTHING,
 393};
 394
 395#ifdef CONFIG_CGROUP_BPF
 396DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
 397		bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
 398
 399BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
 400{
 401	/* flags argument is not used now,
 402	 * but provides an ability to extend the API.
 403	 * verifier checks that its value is correct.
 404	 */
 405	enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
 406	struct bpf_cgroup_storage *storage = NULL;
 407	void *ptr;
 408	int i;
 409
 410	for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
 411		if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
 412			continue;
 413
 414		storage = this_cpu_read(bpf_cgroup_storage_info[i].storage[stype]);
 415		break;
 416	}
 417
 418	if (stype == BPF_CGROUP_STORAGE_SHARED)
 419		ptr = &READ_ONCE(storage->buf)->data[0];
 420	else
 421		ptr = this_cpu_ptr(storage->percpu_buf);
 422
 423	return (unsigned long)ptr;
 424}
 425
 426const struct bpf_func_proto bpf_get_local_storage_proto = {
 427	.func		= bpf_get_local_storage,
 428	.gpl_only	= false,
 429	.ret_type	= RET_PTR_TO_MAP_VALUE,
 430	.arg1_type	= ARG_CONST_MAP_PTR,
 431	.arg2_type	= ARG_ANYTHING,
 432};
 433#endif
 434
 435#define BPF_STRTOX_BASE_MASK 0x1F
 436
 437static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
 438			  unsigned long long *res, bool *is_negative)
 439{
 440	unsigned int base = flags & BPF_STRTOX_BASE_MASK;
 441	const char *cur_buf = buf;
 442	size_t cur_len = buf_len;
 443	unsigned int consumed;
 444	size_t val_len;
 445	char str[64];
 446
 447	if (!buf || !buf_len || !res || !is_negative)
 448		return -EINVAL;
 449
 450	if (base != 0 && base != 8 && base != 10 && base != 16)
 451		return -EINVAL;
 452
 453	if (flags & ~BPF_STRTOX_BASE_MASK)
 454		return -EINVAL;
 455
 456	while (cur_buf < buf + buf_len && isspace(*cur_buf))
 457		++cur_buf;
 458
 459	*is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
 460	if (*is_negative)
 461		++cur_buf;
 462
 463	consumed = cur_buf - buf;
 464	cur_len -= consumed;
 465	if (!cur_len)
 466		return -EINVAL;
 467
 468	cur_len = min(cur_len, sizeof(str) - 1);
 469	memcpy(str, cur_buf, cur_len);
 470	str[cur_len] = '\0';
 471	cur_buf = str;
 472
 473	cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
 474	val_len = _parse_integer(cur_buf, base, res);
 475
 476	if (val_len & KSTRTOX_OVERFLOW)
 477		return -ERANGE;
 478
 479	if (val_len == 0)
 480		return -EINVAL;
 481
 482	cur_buf += val_len;
 483	consumed += cur_buf - str;
 484
 485	return consumed;
 486}
 487
 488static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
 489			 long long *res)
 490{
 491	unsigned long long _res;
 492	bool is_negative;
 493	int err;
 494
 495	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
 496	if (err < 0)
 497		return err;
 498	if (is_negative) {
 499		if ((long long)-_res > 0)
 500			return -ERANGE;
 501		*res = -_res;
 502	} else {
 503		if ((long long)_res < 0)
 504			return -ERANGE;
 505		*res = _res;
 506	}
 507	return err;
 508}
 509
 510BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
 511	   long *, res)
 512{
 513	long long _res;
 514	int err;
 515
 516	err = __bpf_strtoll(buf, buf_len, flags, &_res);
 517	if (err < 0)
 518		return err;
 519	if (_res != (long)_res)
 520		return -ERANGE;
 521	*res = _res;
 522	return err;
 523}
 524
 525const struct bpf_func_proto bpf_strtol_proto = {
 526	.func		= bpf_strtol,
 527	.gpl_only	= false,
 528	.ret_type	= RET_INTEGER,
 529	.arg1_type	= ARG_PTR_TO_MEM,
 530	.arg2_type	= ARG_CONST_SIZE,
 531	.arg3_type	= ARG_ANYTHING,
 532	.arg4_type	= ARG_PTR_TO_LONG,
 533};
 534
 535BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
 536	   unsigned long *, res)
 537{
 538	unsigned long long _res;
 539	bool is_negative;
 540	int err;
 541
 542	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
 543	if (err < 0)
 544		return err;
 545	if (is_negative)
 546		return -EINVAL;
 547	if (_res != (unsigned long)_res)
 548		return -ERANGE;
 549	*res = _res;
 550	return err;
 551}
 552
 553const struct bpf_func_proto bpf_strtoul_proto = {
 554	.func		= bpf_strtoul,
 555	.gpl_only	= false,
 556	.ret_type	= RET_INTEGER,
 557	.arg1_type	= ARG_PTR_TO_MEM,
 558	.arg2_type	= ARG_CONST_SIZE,
 559	.arg3_type	= ARG_ANYTHING,
 560	.arg4_type	= ARG_PTR_TO_LONG,
 561};
 562#endif
 563
 564BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
 565	   struct bpf_pidns_info *, nsdata, u32, size)
 566{
 567	struct task_struct *task = current;
 568	struct pid_namespace *pidns;
 569	int err = -EINVAL;
 570
 571	if (unlikely(size != sizeof(struct bpf_pidns_info)))
 572		goto clear;
 573
 574	if (unlikely((u64)(dev_t)dev != dev))
 575		goto clear;
 576
 577	if (unlikely(!task))
 578		goto clear;
 579
 580	pidns = task_active_pid_ns(task);
 581	if (unlikely(!pidns)) {
 582		err = -ENOENT;
 583		goto clear;
 584	}
 585
 586	if (!ns_match(&pidns->ns, (dev_t)dev, ino))
 587		goto clear;
 588
 589	nsdata->pid = task_pid_nr_ns(task, pidns);
 590	nsdata->tgid = task_tgid_nr_ns(task, pidns);
 591	return 0;
 592clear:
 593	memset((void *)nsdata, 0, (size_t) size);
 594	return err;
 595}
 596
 597const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
 598	.func		= bpf_get_ns_current_pid_tgid,
 599	.gpl_only	= false,
 600	.ret_type	= RET_INTEGER,
 601	.arg1_type	= ARG_ANYTHING,
 602	.arg2_type	= ARG_ANYTHING,
 603	.arg3_type      = ARG_PTR_TO_UNINIT_MEM,
 604	.arg4_type      = ARG_CONST_SIZE,
 605};
 606
 607static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
 608	.func		= bpf_get_raw_cpu_id,
 609	.gpl_only	= false,
 610	.ret_type	= RET_INTEGER,
 611};
 612
 613BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
 614	   u64, flags, void *, data, u64, size)
 615{
 616	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
 617		return -EINVAL;
 618
 619	return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
 620}
 621
 622const struct bpf_func_proto bpf_event_output_data_proto =  {
 623	.func		= bpf_event_output_data,
 624	.gpl_only       = true,
 625	.ret_type       = RET_INTEGER,
 626	.arg1_type      = ARG_PTR_TO_CTX,
 627	.arg2_type      = ARG_CONST_MAP_PTR,
 628	.arg3_type      = ARG_ANYTHING,
 629	.arg4_type      = ARG_PTR_TO_MEM,
 630	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
 631};
 632
 633BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
 634	   const void __user *, user_ptr)
 635{
 636	int ret = copy_from_user(dst, user_ptr, size);
 637
 638	if (unlikely(ret)) {
 639		memset(dst, 0, size);
 640		ret = -EFAULT;
 641	}
 642
 643	return ret;
 644}
 645
 646const struct bpf_func_proto bpf_copy_from_user_proto = {
 647	.func		= bpf_copy_from_user,
 648	.gpl_only	= false,
 649	.ret_type	= RET_INTEGER,
 650	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 651	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 652	.arg3_type	= ARG_ANYTHING,
 653};
 654
 655BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
 656{
 657	if (cpu >= nr_cpu_ids)
 658		return (unsigned long)NULL;
 659
 660	return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
 661}
 662
 663const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
 664	.func		= bpf_per_cpu_ptr,
 665	.gpl_only	= false,
 666	.ret_type	= RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL,
 667	.arg1_type	= ARG_PTR_TO_PERCPU_BTF_ID,
 668	.arg2_type	= ARG_ANYTHING,
 669};
 670
 671BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
 672{
 673	return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
 674}
 675
 676const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
 677	.func		= bpf_this_cpu_ptr,
 678	.gpl_only	= false,
 679	.ret_type	= RET_PTR_TO_MEM_OR_BTF_ID,
 680	.arg1_type	= ARG_PTR_TO_PERCPU_BTF_ID,
 681};
 682
 683static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
 684		size_t bufsz)
 685{
 686	void __user *user_ptr = (__force void __user *)unsafe_ptr;
 687
 688	buf[0] = 0;
 689
 690	switch (fmt_ptype) {
 691	case 's':
 692#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
 693		if ((unsigned long)unsafe_ptr < TASK_SIZE)
 694			return strncpy_from_user_nofault(buf, user_ptr, bufsz);
 695		fallthrough;
 696#endif
 697	case 'k':
 698		return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
 699	case 'u':
 700		return strncpy_from_user_nofault(buf, user_ptr, bufsz);
 701	}
 702
 703	return -EINVAL;
 704}
 705
 706/* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
 707 * arguments representation.
 708 */
 709#define MAX_BPRINTF_BUF_LEN	512
 710
 711/* Support executing three nested bprintf helper calls on a given CPU */
 712#define MAX_BPRINTF_NEST_LEVEL	3
 713struct bpf_bprintf_buffers {
 714	char tmp_bufs[MAX_BPRINTF_NEST_LEVEL][MAX_BPRINTF_BUF_LEN];
 715};
 716static DEFINE_PER_CPU(struct bpf_bprintf_buffers, bpf_bprintf_bufs);
 717static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
 718
 719static int try_get_fmt_tmp_buf(char **tmp_buf)
 720{
 721	struct bpf_bprintf_buffers *bufs;
 722	int nest_level;
 723
 724	preempt_disable();
 725	nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
 726	if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
 727		this_cpu_dec(bpf_bprintf_nest_level);
 728		preempt_enable();
 729		return -EBUSY;
 730	}
 731	bufs = this_cpu_ptr(&bpf_bprintf_bufs);
 732	*tmp_buf = bufs->tmp_bufs[nest_level - 1];
 733
 734	return 0;
 735}
 736
 737void bpf_bprintf_cleanup(void)
 738{
 739	if (this_cpu_read(bpf_bprintf_nest_level)) {
 740		this_cpu_dec(bpf_bprintf_nest_level);
 741		preempt_enable();
 742	}
 743}
 744
 745/*
 746 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
 747 *
 748 * Returns a negative value if fmt is an invalid format string or 0 otherwise.
 749 *
 750 * This can be used in two ways:
 751 * - Format string verification only: when bin_args is NULL
 752 * - Arguments preparation: in addition to the above verification, it writes in
 753 *   bin_args a binary representation of arguments usable by bstr_printf where
 754 *   pointers from BPF have been sanitized.
 755 *
 756 * In argument preparation mode, if 0 is returned, safe temporary buffers are
 757 * allocated and bpf_bprintf_cleanup should be called to free them after use.
 758 */
 759int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
 760			u32 **bin_args, u32 num_args)
 761{
 762	char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
 763	size_t sizeof_cur_arg, sizeof_cur_ip;
 764	int err, i, num_spec = 0;
 765	u64 cur_arg;
 766	char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
 767
 768	fmt_end = strnchr(fmt, fmt_size, 0);
 769	if (!fmt_end)
 770		return -EINVAL;
 771	fmt_size = fmt_end - fmt;
 772
 773	if (bin_args) {
 774		if (num_args && try_get_fmt_tmp_buf(&tmp_buf))
 775			return -EBUSY;
 776
 777		tmp_buf_end = tmp_buf + MAX_BPRINTF_BUF_LEN;
 778		*bin_args = (u32 *)tmp_buf;
 779	}
 780
 781	for (i = 0; i < fmt_size; i++) {
 782		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
 783			err = -EINVAL;
 784			goto out;
 785		}
 786
 787		if (fmt[i] != '%')
 788			continue;
 789
 790		if (fmt[i + 1] == '%') {
 791			i++;
 792			continue;
 793		}
 794
 795		if (num_spec >= num_args) {
 796			err = -EINVAL;
 797			goto out;
 798		}
 799
 800		/* The string is zero-terminated so if fmt[i] != 0, we can
 801		 * always access fmt[i + 1], in the worst case it will be a 0
 802		 */
 803		i++;
 804
 805		/* skip optional "[0 +-][num]" width formatting field */
 806		while (fmt[i] == '0' || fmt[i] == '+'  || fmt[i] == '-' ||
 807		       fmt[i] == ' ')
 808			i++;
 809		if (fmt[i] >= '1' && fmt[i] <= '9') {
 810			i++;
 811			while (fmt[i] >= '0' && fmt[i] <= '9')
 812				i++;
 813		}
 814
 815		if (fmt[i] == 'p') {
 816			sizeof_cur_arg = sizeof(long);
 817
 818			if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
 819			    fmt[i + 2] == 's') {
 820				fmt_ptype = fmt[i + 1];
 821				i += 2;
 822				goto fmt_str;
 823			}
 824
 825			if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
 826			    ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' ||
 827			    fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
 828			    fmt[i + 1] == 'S') {
 829				/* just kernel pointers */
 830				if (tmp_buf)
 831					cur_arg = raw_args[num_spec];
 832				i++;
 833				goto nocopy_fmt;
 834			}
 835
 836			if (fmt[i + 1] == 'B') {
 837				if (tmp_buf)  {
 838					err = snprintf(tmp_buf,
 839						       (tmp_buf_end - tmp_buf),
 840						       "%pB",
 841						       (void *)(long)raw_args[num_spec]);
 842					tmp_buf += (err + 1);
 843				}
 844
 845				i++;
 846				num_spec++;
 847				continue;
 848			}
 849
 850			/* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
 851			if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
 852			    (fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
 853				err = -EINVAL;
 854				goto out;
 855			}
 856
 857			i += 2;
 858			if (!tmp_buf)
 859				goto nocopy_fmt;
 860
 861			sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
 862			if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
 863				err = -ENOSPC;
 864				goto out;
 865			}
 866
 867			unsafe_ptr = (char *)(long)raw_args[num_spec];
 868			err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
 869						       sizeof_cur_ip);
 870			if (err < 0)
 871				memset(cur_ip, 0, sizeof_cur_ip);
 872
 873			/* hack: bstr_printf expects IP addresses to be
 874			 * pre-formatted as strings, ironically, the easiest way
 875			 * to do that is to call snprintf.
 876			 */
 877			ip_spec[2] = fmt[i - 1];
 878			ip_spec[3] = fmt[i];
 879			err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
 880				       ip_spec, &cur_ip);
 881
 882			tmp_buf += err + 1;
 883			num_spec++;
 884
 885			continue;
 886		} else if (fmt[i] == 's') {
 887			fmt_ptype = fmt[i];
 888fmt_str:
 889			if (fmt[i + 1] != 0 &&
 890			    !isspace(fmt[i + 1]) &&
 891			    !ispunct(fmt[i + 1])) {
 892				err = -EINVAL;
 893				goto out;
 894			}
 895
 896			if (!tmp_buf)
 897				goto nocopy_fmt;
 898
 899			if (tmp_buf_end == tmp_buf) {
 900				err = -ENOSPC;
 901				goto out;
 902			}
 903
 904			unsafe_ptr = (char *)(long)raw_args[num_spec];
 905			err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
 906						    fmt_ptype,
 907						    tmp_buf_end - tmp_buf);
 908			if (err < 0) {
 909				tmp_buf[0] = '\0';
 910				err = 1;
 911			}
 912
 913			tmp_buf += err;
 914			num_spec++;
 915
 916			continue;
 917		}
 918
 919		sizeof_cur_arg = sizeof(int);
 920
 921		if (fmt[i] == 'l') {
 922			sizeof_cur_arg = sizeof(long);
 923			i++;
 924		}
 925		if (fmt[i] == 'l') {
 926			sizeof_cur_arg = sizeof(long long);
 927			i++;
 928		}
 929
 930		if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
 931		    fmt[i] != 'x' && fmt[i] != 'X') {
 932			err = -EINVAL;
 933			goto out;
 934		}
 935
 936		if (tmp_buf)
 937			cur_arg = raw_args[num_spec];
 938nocopy_fmt:
 939		if (tmp_buf) {
 940			tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
 941			if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
 942				err = -ENOSPC;
 943				goto out;
 944			}
 945
 946			if (sizeof_cur_arg == 8) {
 947				*(u32 *)tmp_buf = *(u32 *)&cur_arg;
 948				*(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
 949			} else {
 950				*(u32 *)tmp_buf = (u32)(long)cur_arg;
 951			}
 952			tmp_buf += sizeof_cur_arg;
 953		}
 954		num_spec++;
 955	}
 956
 957	err = 0;
 958out:
 959	if (err)
 960		bpf_bprintf_cleanup();
 961	return err;
 962}
 963
 964#define MAX_SNPRINTF_VARARGS		12
 965
 966BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
 967	   const void *, data, u32, data_len)
 968{
 969	int err, num_args;
 970	u32 *bin_args;
 971
 972	if (data_len % 8 || data_len > MAX_SNPRINTF_VARARGS * 8 ||
 973	    (data_len && !data))
 974		return -EINVAL;
 975	num_args = data_len / 8;
 976
 977	/* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
 978	 * can safely give an unbounded size.
 979	 */
 980	err = bpf_bprintf_prepare(fmt, UINT_MAX, data, &bin_args, num_args);
 981	if (err < 0)
 982		return err;
 983
 984	err = bstr_printf(str, str_size, fmt, bin_args);
 985
 986	bpf_bprintf_cleanup();
 987
 988	return err + 1;
 989}
 990
 991const struct bpf_func_proto bpf_snprintf_proto = {
 992	.func		= bpf_snprintf,
 993	.gpl_only	= true,
 994	.ret_type	= RET_INTEGER,
 995	.arg1_type	= ARG_PTR_TO_MEM_OR_NULL,
 996	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
 997	.arg3_type	= ARG_PTR_TO_CONST_STR,
 998	.arg4_type	= ARG_PTR_TO_MEM_OR_NULL,
 999	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1000};
1001
1002const struct bpf_func_proto bpf_get_current_task_proto __weak;
1003const struct bpf_func_proto bpf_probe_read_user_proto __weak;
1004const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
1005const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
1006const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
1007
1008const struct bpf_func_proto *
1009bpf_base_func_proto(enum bpf_func_id func_id)
1010{
1011	switch (func_id) {
1012	case BPF_FUNC_map_lookup_elem:
1013		return &bpf_map_lookup_elem_proto;
1014	case BPF_FUNC_map_update_elem:
1015		return &bpf_map_update_elem_proto;
1016	case BPF_FUNC_map_delete_elem:
1017		return &bpf_map_delete_elem_proto;
1018	case BPF_FUNC_map_push_elem:
1019		return &bpf_map_push_elem_proto;
1020	case BPF_FUNC_map_pop_elem:
1021		return &bpf_map_pop_elem_proto;
1022	case BPF_FUNC_map_peek_elem:
1023		return &bpf_map_peek_elem_proto;
1024	case BPF_FUNC_get_prandom_u32:
1025		return &bpf_get_prandom_u32_proto;
1026	case BPF_FUNC_get_smp_processor_id:
1027		return &bpf_get_raw_smp_processor_id_proto;
1028	case BPF_FUNC_get_numa_node_id:
1029		return &bpf_get_numa_node_id_proto;
1030	case BPF_FUNC_tail_call:
1031		return &bpf_tail_call_proto;
1032	case BPF_FUNC_ktime_get_ns:
1033		return &bpf_ktime_get_ns_proto;
1034	case BPF_FUNC_ktime_get_boot_ns:
1035		return &bpf_ktime_get_boot_ns_proto;
1036	case BPF_FUNC_ktime_get_coarse_ns:
1037		return &bpf_ktime_get_coarse_ns_proto;
1038	case BPF_FUNC_ringbuf_output:
1039		return &bpf_ringbuf_output_proto;
1040	case BPF_FUNC_ringbuf_reserve:
1041		return &bpf_ringbuf_reserve_proto;
1042	case BPF_FUNC_ringbuf_submit:
1043		return &bpf_ringbuf_submit_proto;
1044	case BPF_FUNC_ringbuf_discard:
1045		return &bpf_ringbuf_discard_proto;
1046	case BPF_FUNC_ringbuf_query:
1047		return &bpf_ringbuf_query_proto;
1048	case BPF_FUNC_for_each_map_elem:
1049		return &bpf_for_each_map_elem_proto;
1050	default:
1051		break;
1052	}
1053
1054	if (!bpf_capable())
1055		return NULL;
1056
1057	switch (func_id) {
1058	case BPF_FUNC_spin_lock:
1059		return &bpf_spin_lock_proto;
1060	case BPF_FUNC_spin_unlock:
1061		return &bpf_spin_unlock_proto;
1062	case BPF_FUNC_jiffies64:
1063		return &bpf_jiffies64_proto;
1064	case BPF_FUNC_per_cpu_ptr:
1065		return &bpf_per_cpu_ptr_proto;
1066	case BPF_FUNC_this_cpu_ptr:
1067		return &bpf_this_cpu_ptr_proto;
1068	default:
1069		break;
1070	}
1071
1072	if (!perfmon_capable())
1073		return NULL;
1074
1075	switch (func_id) {
1076	case BPF_FUNC_trace_printk:
1077		return bpf_get_trace_printk_proto();
1078	case BPF_FUNC_get_current_task:
1079		return &bpf_get_current_task_proto;
1080	case BPF_FUNC_probe_read_user:
1081		return &bpf_probe_read_user_proto;
1082	case BPF_FUNC_probe_read_kernel:
1083		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1084		       NULL : &bpf_probe_read_kernel_proto;
1085	case BPF_FUNC_probe_read_user_str:
1086		return &bpf_probe_read_user_str_proto;
1087	case BPF_FUNC_probe_read_kernel_str:
1088		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1089		       NULL : &bpf_probe_read_kernel_str_proto;
1090	case BPF_FUNC_snprintf_btf:
1091		return &bpf_snprintf_btf_proto;
1092	case BPF_FUNC_snprintf:
1093		return &bpf_snprintf_proto;
1094	default:
1095		return NULL;
1096	}
1097}