Linux Audio

Check our new training course

Loading...
v4.6
 
  1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2 *
  3 * This program is free software; you can redistribute it and/or
  4 * modify it under the terms of version 2 of the GNU General Public
  5 * License as published by the Free Software Foundation.
  6 *
  7 * This program is distributed in the hope that it will be useful, but
  8 * WITHOUT ANY WARRANTY; without even the implied warranty of
  9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 10 * General Public License for more details.
 11 */
 12#include <linux/bpf.h>
 
 
 
 
 13#include <linux/syscalls.h>
 14#include <linux/slab.h>
 
 
 
 15#include <linux/anon_inodes.h>
 
 16#include <linux/file.h>
 
 17#include <linux/license.h>
 18#include <linux/filter.h>
 19#include <linux/version.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 20
 21DEFINE_PER_CPU(int, bpf_prog_active);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 22
 23int sysctl_unprivileged_bpf_disabled __read_mostly;
 
 24
 25static LIST_HEAD(bpf_map_types);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26
 27static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
 28{
 29	struct bpf_map_type_list *tl;
 
 30	struct bpf_map *map;
 
 31
 32	list_for_each_entry(tl, &bpf_map_types, list_node) {
 33		if (tl->type == attr->map_type) {
 34			map = tl->ops->map_alloc(attr);
 35			if (IS_ERR(map))
 36				return map;
 37			map->ops = tl->ops;
 38			map->map_type = attr->map_type;
 39			return map;
 40		}
 
 
 41	}
 42	return ERR_PTR(-EINVAL);
 
 
 
 
 
 
 
 43}
 44
 45/* boot time registration of different map implementations */
 46void bpf_register_map_type(struct bpf_map_type_list *tl)
 47{
 48	list_add(&tl->list_node, &bpf_map_types);
 
 
 
 
 
 
 
 
 49}
 50
 51int bpf_map_precharge_memlock(u32 pages)
 52{
 53	struct user_struct *user = get_current_user();
 54	unsigned long memlock_limit, cur;
 
 
 
 
 
 
 55
 56	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 57	cur = atomic_long_read(&user->locked_vm);
 58	free_uid(user);
 59	if (cur + pages > memlock_limit)
 60		return -EPERM;
 61	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62}
 63
 64static int bpf_map_charge_memlock(struct bpf_map *map)
 
 65{
 66	struct user_struct *user = get_current_user();
 67	unsigned long memlock_limit;
 68
 69	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 
 70
 71	atomic_long_add(map->pages, &user->locked_vm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 72
 73	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
 74		atomic_long_sub(map->pages, &user->locked_vm);
 75		free_uid(user);
 76		return -EPERM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 77	}
 78	map->user = user;
 79	return 0;
 
 
 80}
 81
 82static void bpf_map_uncharge_memlock(struct bpf_map *map)
 83{
 84	struct user_struct *user = map->user;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 85
 86	atomic_long_sub(map->pages, &user->locked_vm);
 87	free_uid(user);
 
 
 
 
 
 
 88}
 89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 90/* called from workqueue */
 91static void bpf_map_free_deferred(struct work_struct *work)
 92{
 93	struct bpf_map *map = container_of(work, struct bpf_map, work);
 94
 95	bpf_map_uncharge_memlock(map);
 
 96	/* implementation dependent freeing */
 97	map->ops->map_free(map);
 98}
 99
100static void bpf_map_put_uref(struct bpf_map *map)
101{
102	if (atomic_dec_and_test(&map->usercnt)) {
103		if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
104			bpf_fd_array_map_clear(map);
105	}
106}
107
108/* decrement map refcnt and schedule it for freeing via workqueue
109 * (unrelying map implementation ops->map_free() might sleep)
110 */
111void bpf_map_put(struct bpf_map *map)
112{
113	if (atomic_dec_and_test(&map->refcnt)) {
 
 
 
114		INIT_WORK(&map->work, bpf_map_free_deferred);
115		schedule_work(&map->work);
116	}
117}
118
 
 
 
 
 
 
119void bpf_map_put_with_uref(struct bpf_map *map)
120{
121	bpf_map_put_uref(map);
122	bpf_map_put(map);
123}
124
125static int bpf_map_release(struct inode *inode, struct file *filp)
126{
127	bpf_map_put_with_uref(filp->private_data);
 
 
 
 
 
128	return 0;
129}
130
 
 
 
 
 
 
 
 
 
 
 
 
131#ifdef CONFIG_PROC_FS
 
 
 
 
 
 
 
 
 
 
 
 
 
132static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
133{
134	const struct bpf_map *map = filp->private_data;
 
 
 
 
 
 
 
 
135
136	seq_printf(m,
137		   "map_type:\t%u\n"
138		   "key_size:\t%u\n"
139		   "value_size:\t%u\n"
140		   "max_entries:\t%u\n"
141		   "map_flags:\t%#x\n",
 
 
 
142		   map->map_type,
143		   map->key_size,
144		   map->value_size,
145		   map->max_entries,
146		   map->map_flags);
 
 
 
 
 
 
 
147}
148#endif
149
150static const struct file_operations bpf_map_fops = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151#ifdef CONFIG_PROC_FS
152	.show_fdinfo	= bpf_map_show_fdinfo,
153#endif
154	.release	= bpf_map_release,
 
 
 
 
155};
156
157int bpf_map_new_fd(struct bpf_map *map)
158{
 
 
 
 
 
 
159	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
160				O_RDWR | O_CLOEXEC);
 
 
 
 
 
 
 
 
 
 
 
161}
162
163/* helper macro to check that unused fields 'union bpf_attr' are zero */
164#define CHECK_ATTR(CMD) \
165	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
166		   sizeof(attr->CMD##_LAST_FIELD), 0, \
167		   sizeof(*attr) - \
168		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
169		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
170
171#define BPF_MAP_CREATE_LAST_FIELD map_flags
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172/* called via syscall */
173static int map_create(union bpf_attr *attr)
174{
 
175	struct bpf_map *map;
 
176	int err;
177
178	err = CHECK_ATTR(BPF_MAP_CREATE);
179	if (err)
180		return -EINVAL;
181
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
183	map = find_and_alloc_map(attr);
184	if (IS_ERR(map))
185		return PTR_ERR(map);
186
187	atomic_set(&map->refcnt, 1);
188	atomic_set(&map->usercnt, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
190	err = bpf_map_charge_memlock(map);
 
 
 
 
 
 
191	if (err)
192		goto free_map;
193
194	err = bpf_map_new_fd(map);
195	if (err < 0)
196		/* failed to allocate fd */
197		goto free_map;
 
 
 
 
 
 
 
 
 
 
 
 
 
198
199	return err;
200
 
 
201free_map:
 
202	map->ops->map_free(map);
203	return err;
204}
205
206/* if error is returned, fd is released.
207 * On success caller should complete fd access with matching fdput()
208 */
209struct bpf_map *__bpf_map_get(struct fd f)
210{
211	if (!f.file)
212		return ERR_PTR(-EBADF);
213	if (f.file->f_op != &bpf_map_fops) {
214		fdput(f);
215		return ERR_PTR(-EINVAL);
216	}
217
218	return f.file->private_data;
219}
220
221/* prog's and map's refcnt limit */
222#define BPF_MAX_REFCNT 32768
 
 
 
223
224struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
225{
226	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
227		atomic_dec(&map->refcnt);
228		return ERR_PTR(-EBUSY);
229	}
230	if (uref)
231		atomic_inc(&map->usercnt);
 
 
 
 
 
 
 
 
 
 
 
232	return map;
233}
234
235struct bpf_map *bpf_map_get_with_uref(u32 ufd)
236{
237	struct fd f = fdget(ufd);
238	struct bpf_map *map;
239
240	map = __bpf_map_get(f);
241	if (IS_ERR(map))
242		return map;
243
244	map = bpf_map_inc(map, true);
245	fdput(f);
246
247	return map;
248}
249
250/* helper to convert user pointers passed inside __aligned_u64 fields */
251static void __user *u64_to_ptr(__u64 val)
252{
253	return (void __user *) (unsigned long) val;
 
 
 
 
 
 
 
 
254}
255
 
 
 
 
 
 
 
 
 
 
256int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
257{
258	return -ENOTSUPP;
259}
260
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
261/* last field in 'union bpf_attr' used by this command */
262#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
263
264static int map_lookup_elem(union bpf_attr *attr)
265{
266	void __user *ukey = u64_to_ptr(attr->key);
267	void __user *uvalue = u64_to_ptr(attr->value);
268	int ufd = attr->map_fd;
269	struct bpf_map *map;
270	void *key, *value, *ptr;
271	u32 value_size;
272	struct fd f;
273	int err;
274
275	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
276		return -EINVAL;
277
 
 
 
278	f = fdget(ufd);
279	map = __bpf_map_get(f);
280	if (IS_ERR(map))
281		return PTR_ERR(map);
 
 
 
 
282
283	err = -ENOMEM;
284	key = kmalloc(map->key_size, GFP_USER);
285	if (!key)
286		goto err_put;
 
287
288	err = -EFAULT;
289	if (copy_from_user(key, ukey, map->key_size) != 0)
290		goto free_key;
 
 
291
292	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
293	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
294		value_size = round_up(map->value_size, 8) * num_possible_cpus();
295	else
296		value_size = map->value_size;
297
298	err = -ENOMEM;
299	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
300	if (!value)
301		goto free_key;
302
303	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
304		err = bpf_percpu_hash_copy(map, key, value);
305	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
306		err = bpf_percpu_array_copy(map, key, value);
307	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
308		err = bpf_stackmap_copy(map, key, value);
309	} else {
310		rcu_read_lock();
311		ptr = map->ops->map_lookup_elem(map, key);
312		if (ptr)
313			memcpy(value, ptr, value_size);
314		rcu_read_unlock();
315		err = ptr ? 0 : -ENOENT;
316	}
317
318	if (err)
319		goto free_value;
320
321	err = -EFAULT;
322	if (copy_to_user(uvalue, value, value_size) != 0)
323		goto free_value;
324
325	err = 0;
326
327free_value:
328	kfree(value);
329free_key:
330	kfree(key);
331err_put:
332	fdput(f);
333	return err;
334}
335
 
336#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
337
338static int map_update_elem(union bpf_attr *attr)
339{
340	void __user *ukey = u64_to_ptr(attr->key);
341	void __user *uvalue = u64_to_ptr(attr->value);
342	int ufd = attr->map_fd;
343	struct bpf_map *map;
344	void *key, *value;
345	u32 value_size;
346	struct fd f;
347	int err;
348
349	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
350		return -EINVAL;
351
352	f = fdget(ufd);
353	map = __bpf_map_get(f);
354	if (IS_ERR(map))
355		return PTR_ERR(map);
 
 
 
 
356
357	err = -ENOMEM;
358	key = kmalloc(map->key_size, GFP_USER);
359	if (!key)
360		goto err_put;
 
361
362	err = -EFAULT;
363	if (copy_from_user(key, ukey, map->key_size) != 0)
364		goto free_key;
 
 
365
366	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
367	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
 
 
368		value_size = round_up(map->value_size, 8) * num_possible_cpus();
369	else
370		value_size = map->value_size;
371
372	err = -ENOMEM;
373	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
374	if (!value)
375		goto free_key;
376
377	err = -EFAULT;
378	if (copy_from_user(value, uvalue, value_size) != 0)
379		goto free_value;
380
381	/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
382	 * inside bpf map update or delete otherwise deadlocks are possible
383	 */
384	preempt_disable();
385	__this_cpu_inc(bpf_prog_active);
386	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
387		err = bpf_percpu_hash_update(map, key, value, attr->flags);
388	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
389		err = bpf_percpu_array_update(map, key, value, attr->flags);
390	} else {
391		rcu_read_lock();
392		err = map->ops->map_update_elem(map, key, value, attr->flags);
393		rcu_read_unlock();
394	}
395	__this_cpu_dec(bpf_prog_active);
396	preempt_enable();
397
398free_value:
399	kfree(value);
400free_key:
401	kfree(key);
402err_put:
403	fdput(f);
404	return err;
405}
406
407#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
408
409static int map_delete_elem(union bpf_attr *attr)
410{
411	void __user *ukey = u64_to_ptr(attr->key);
412	int ufd = attr->map_fd;
413	struct bpf_map *map;
414	struct fd f;
415	void *key;
416	int err;
417
418	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
419		return -EINVAL;
420
421	f = fdget(ufd);
422	map = __bpf_map_get(f);
423	if (IS_ERR(map))
424		return PTR_ERR(map);
 
 
 
 
425
426	err = -ENOMEM;
427	key = kmalloc(map->key_size, GFP_USER);
428	if (!key)
429		goto err_put;
 
430
431	err = -EFAULT;
432	if (copy_from_user(key, ukey, map->key_size) != 0)
433		goto free_key;
 
 
 
 
 
 
434
435	preempt_disable();
436	__this_cpu_inc(bpf_prog_active);
437	rcu_read_lock();
438	err = map->ops->map_delete_elem(map, key);
439	rcu_read_unlock();
440	__this_cpu_dec(bpf_prog_active);
441	preempt_enable();
442
443free_key:
444	kfree(key);
445err_put:
446	fdput(f);
447	return err;
448}
449
450/* last field in 'union bpf_attr' used by this command */
451#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
452
453static int map_get_next_key(union bpf_attr *attr)
454{
455	void __user *ukey = u64_to_ptr(attr->key);
456	void __user *unext_key = u64_to_ptr(attr->next_key);
457	int ufd = attr->map_fd;
458	struct bpf_map *map;
459	void *key, *next_key;
460	struct fd f;
461	int err;
462
463	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
464		return -EINVAL;
465
466	f = fdget(ufd);
467	map = __bpf_map_get(f);
468	if (IS_ERR(map))
469		return PTR_ERR(map);
470
471	err = -ENOMEM;
472	key = kmalloc(map->key_size, GFP_USER);
473	if (!key)
474		goto err_put;
 
475
476	err = -EFAULT;
477	if (copy_from_user(key, ukey, map->key_size) != 0)
478		goto free_key;
 
 
 
 
 
 
479
480	err = -ENOMEM;
481	next_key = kmalloc(map->key_size, GFP_USER);
482	if (!next_key)
483		goto free_key;
484
 
 
 
 
 
485	rcu_read_lock();
486	err = map->ops->map_get_next_key(map, key, next_key);
487	rcu_read_unlock();
 
488	if (err)
489		goto free_next_key;
490
491	err = -EFAULT;
492	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
493		goto free_next_key;
494
495	err = 0;
496
497free_next_key:
498	kfree(next_key);
499free_key:
500	kfree(key);
501err_put:
502	fdput(f);
503	return err;
504}
505
506static LIST_HEAD(bpf_prog_types);
507
508static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
509{
510	struct bpf_prog_type_list *tl;
 
 
 
511
512	list_for_each_entry(tl, &bpf_prog_types, list_node) {
513		if (tl->type == type) {
514			prog->aux->ops = tl->ops;
515			prog->type = type;
516			return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
517		}
 
 
 
 
 
 
 
 
 
518	}
 
 
519
520	return -EINVAL;
 
521}
522
523void bpf_register_prog_type(struct bpf_prog_type_list *tl)
 
 
524{
525	list_add(&tl->list_node, &bpf_prog_types);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
526}
527
528/* fixup insn->imm field of bpf_call instructions:
529 * if (insn->imm == BPF_FUNC_map_lookup_elem)
530 *      insn->imm = bpf_map_lookup_elem - __bpf_call_base;
531 * else if (insn->imm == BPF_FUNC_map_update_elem)
532 *      insn->imm = bpf_map_update_elem - __bpf_call_base;
533 * else ...
534 *
535 * this function is called after eBPF program passed verification
536 */
537static void fixup_bpf_calls(struct bpf_prog *prog)
538{
539	const struct bpf_func_proto *fn;
540	int i;
 
 
 
 
 
541
542	for (i = 0; i < prog->len; i++) {
543		struct bpf_insn *insn = &prog->insnsi[i];
544
545		if (insn->code == (BPF_JMP | BPF_CALL)) {
546			/* we reach here when program has bpf_call instructions
547			 * and it passed bpf_check(), means that
548			 * ops->get_func_proto must have been supplied, check it
549			 */
550			BUG_ON(!prog->aux->ops->get_func_proto);
551
552			if (insn->imm == BPF_FUNC_get_route_realm)
553				prog->dst_needed = 1;
554			if (insn->imm == BPF_FUNC_get_prandom_u32)
555				bpf_user_rnd_init_once();
556			if (insn->imm == BPF_FUNC_tail_call) {
557				/* mark bpf_tail_call as different opcode
558				 * to avoid conditional branch in
559				 * interpeter for every normal call
560				 * and to prevent accidental JITing by
561				 * JIT compiler that doesn't support
562				 * bpf_tail_call yet
563				 */
564				insn->imm = 0;
565				insn->code |= BPF_X;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
566				continue;
567			}
 
 
 
568
569			fn = prog->aux->ops->get_func_proto(insn->imm);
570			/* all functions that have prototype and verifier allowed
571			 * programs to call them, must be real in-kernel functions
572			 */
573			BUG_ON(!fn->func);
574			insn->imm = fn->func - __bpf_call_base;
 
 
 
 
 
575		}
 
 
 
 
 
 
 
576	}
 
 
 
 
 
 
 
 
 
 
 
 
577}
578
579/* drop refcnt on maps used by eBPF program and free auxilary data */
580static void free_used_maps(struct bpf_prog_aux *aux)
 
581{
582	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
583
584	for (i = 0; i < aux->used_map_cnt; i++)
585		bpf_map_put(aux->used_maps[i]);
 
 
 
 
 
 
 
 
 
 
586
587	kfree(aux->used_maps);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
588}
589
590static int bpf_prog_charge_memlock(struct bpf_prog *prog)
 
 
591{
592	struct user_struct *user = get_current_user();
593	unsigned long memlock_limit;
 
594
595	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 
596
597	atomic_long_add(prog->pages, &user->locked_vm);
598	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
599		atomic_long_sub(prog->pages, &user->locked_vm);
600		free_uid(user);
601		return -EPERM;
 
 
 
602	}
603	prog->aux->user = user;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
604	return 0;
605}
606
607static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
608{
609	struct user_struct *user = prog->aux->user;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
610
611	atomic_long_sub(prog->pages, &user->locked_vm);
612	free_uid(user);
 
 
613}
614
615static void __prog_put_common(struct rcu_head *rcu)
616{
617	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
618
619	free_used_maps(aux);
620	bpf_prog_uncharge_memlock(aux->prog);
 
 
621	bpf_prog_free(aux->prog);
622}
623
624/* version of bpf_prog_put() that is called after a grace period */
625void bpf_prog_put_rcu(struct bpf_prog *prog)
626{
627	if (atomic_dec_and_test(&prog->aux->refcnt))
628		call_rcu(&prog->aux->rcu, __prog_put_common);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
629}
630
631void bpf_prog_put(struct bpf_prog *prog)
632{
633	if (atomic_dec_and_test(&prog->aux->refcnt))
634		__prog_put_common(&prog->aux->rcu);
635}
636EXPORT_SYMBOL_GPL(bpf_prog_put);
637
638static int bpf_prog_release(struct inode *inode, struct file *filp)
639{
640	struct bpf_prog *prog = filp->private_data;
641
642	bpf_prog_put_rcu(prog);
643	return 0;
644}
645
646static const struct file_operations bpf_prog_fops = {
647        .release = bpf_prog_release,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
648};
649
650int bpf_prog_new_fd(struct bpf_prog *prog)
651{
 
 
 
 
 
 
652	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
653				O_RDWR | O_CLOEXEC);
654}
655
656static struct bpf_prog *__bpf_prog_get(struct fd f)
657{
658	if (!f.file)
659		return ERR_PTR(-EBADF);
660	if (f.file->f_op != &bpf_prog_fops) {
661		fdput(f);
662		return ERR_PTR(-EINVAL);
663	}
664
665	return f.file->private_data;
666}
667
668struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
669{
670	if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
671		atomic_dec(&prog->aux->refcnt);
672		return ERR_PTR(-EBUSY);
673	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
674	return prog;
675}
 
676
677/* called by sockets/tracing/seccomp before attaching program to an event
678 * pairs with bpf_prog_put()
679 */
680struct bpf_prog *bpf_prog_get(u32 ufd)
 
 
 
 
 
 
 
 
 
 
 
 
 
681{
682	struct fd f = fdget(ufd);
683	struct bpf_prog *prog;
684
685	prog = __bpf_prog_get(f);
686	if (IS_ERR(prog))
687		return prog;
 
 
 
 
688
689	prog = bpf_prog_inc(prog);
 
690	fdput(f);
691
692	return prog;
693}
694EXPORT_SYMBOL_GPL(bpf_prog_get);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
695
696/* last field in 'union bpf_attr' used by this command */
697#define	BPF_PROG_LOAD_LAST_FIELD kern_version
698
699static int bpf_prog_load(union bpf_attr *attr)
700{
701	enum bpf_prog_type type = attr->prog_type;
702	struct bpf_prog *prog;
 
703	int err;
704	char license[128];
705	bool is_gpl;
706
707	if (CHECK_ATTR(BPF_PROG_LOAD))
708		return -EINVAL;
709
 
 
 
 
 
 
 
 
 
 
 
 
710	/* copy eBPF program license from user space */
711	if (strncpy_from_user(license, u64_to_ptr(attr->license),
712			      sizeof(license) - 1) < 0)
 
713		return -EFAULT;
714	license[sizeof(license) - 1] = 0;
715
716	/* eBPF programs must be GPL compatible to use GPL-ed functions */
717	is_gpl = license_is_gpl_compatible(license);
718
719	if (attr->insn_cnt >= BPF_MAXINSNS)
720		return -EINVAL;
721
722	if (type == BPF_PROG_TYPE_KPROBE &&
723	    attr->kern_version != LINUX_VERSION_CODE)
724		return -EINVAL;
 
725
726	if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
 
 
727		return -EPERM;
728
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
729	/* plain bpf_prog allocation */
730	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
731	if (!prog)
 
 
 
 
732		return -ENOMEM;
 
733
734	err = bpf_prog_charge_memlock(prog);
 
 
 
 
 
 
 
735	if (err)
736		goto free_prog_nouncharge;
737
 
738	prog->len = attr->insn_cnt;
739
740	err = -EFAULT;
741	if (copy_from_user(prog->insns, u64_to_ptr(attr->insns),
742			   prog->len * sizeof(struct bpf_insn)) != 0)
743		goto free_prog;
 
744
745	prog->orig_prog = NULL;
746	prog->jited = 0;
747
748	atomic_set(&prog->aux->refcnt, 1);
749	prog->gpl_compatible = is_gpl ? 1 : 0;
750
 
 
 
 
 
 
751	/* find program type: socket_filter vs tracing_filter */
752	err = find_prog_type(type, prog);
753	if (err < 0)
754		goto free_prog;
 
 
 
 
 
 
755
756	/* run eBPF verifier */
757	err = bpf_check(&prog, attr);
758	if (err < 0)
759		goto free_used_maps;
760
761	/* fixup BPF_CALL->imm field */
762	fixup_bpf_calls(prog);
763
764	/* eBPF program is ready to be JITed */
765	err = bpf_prog_select_runtime(prog);
766	if (err < 0)
767		goto free_used_maps;
768
769	err = bpf_prog_new_fd(prog);
770	if (err < 0)
771		/* failed to allocate fd */
772		goto free_used_maps;
773
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
774	return err;
775
776free_used_maps:
777	free_used_maps(prog->aux);
 
 
 
 
 
 
 
 
778free_prog:
779	bpf_prog_uncharge_memlock(prog);
780free_prog_nouncharge:
781	bpf_prog_free(prog);
782	return err;
783}
784
785#define BPF_OBJ_LAST_FIELD bpf_fd
786
787static int bpf_obj_pin(const union bpf_attr *attr)
788{
789	if (CHECK_ATTR(BPF_OBJ))
790		return -EINVAL;
791
792	return bpf_obj_pin_user(attr->bpf_fd, u64_to_ptr(attr->pathname));
793}
794
795static int bpf_obj_get(const union bpf_attr *attr)
796{
797	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
 
798		return -EINVAL;
799
800	return bpf_obj_get_user(u64_to_ptr(attr->pathname));
 
801}
802
803SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
804{
805	union bpf_attr attr = {};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
806	int err;
807
808	if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
809		return -EPERM;
810
811	if (!access_ok(VERIFY_READ, uattr, 1))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
812		return -EFAULT;
813
814	if (size > PAGE_SIZE)	/* silly large */
815		return -E2BIG;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
816
817	/* If we're handed a bigger struct than we know of,
818	 * ensure all the unknown bits are 0 - i.e. new
819	 * user-space does not rely on any kernel feature
820	 * extensions we dont know about yet.
821	 */
822	if (size > sizeof(attr)) {
823		unsigned char __user *addr;
824		unsigned char __user *end;
825		unsigned char val;
826
827		addr = (void __user *)uattr + sizeof(attr);
828		end  = (void __user *)uattr + size;
 
 
 
 
 
 
 
 
 
 
 
 
 
829
830		for (; addr < end; addr++) {
831			err = get_user(val, addr);
832			if (err)
833				return err;
834			if (val)
835				return -E2BIG;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
836		}
837		size = sizeof(attr);
838	}
839
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
840	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
841	if (copy_from_user(&attr, uattr, size) != 0)
 
842		return -EFAULT;
843
 
 
 
 
844	switch (cmd) {
845	case BPF_MAP_CREATE:
846		err = map_create(&attr);
847		break;
848	case BPF_MAP_LOOKUP_ELEM:
849		err = map_lookup_elem(&attr);
850		break;
851	case BPF_MAP_UPDATE_ELEM:
852		err = map_update_elem(&attr);
853		break;
854	case BPF_MAP_DELETE_ELEM:
855		err = map_delete_elem(&attr);
856		break;
857	case BPF_MAP_GET_NEXT_KEY:
858		err = map_get_next_key(&attr);
859		break;
 
 
 
860	case BPF_PROG_LOAD:
861		err = bpf_prog_load(&attr);
862		break;
863	case BPF_OBJ_PIN:
864		err = bpf_obj_pin(&attr);
865		break;
866	case BPF_OBJ_GET:
867		err = bpf_obj_get(&attr);
868		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
869	default:
870		err = -EINVAL;
871		break;
872	}
873
874	return err;
875}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
 
 
 
 
 
 
 
 
 
   3 */
   4#include <linux/bpf.h>
   5#include <linux/bpf_trace.h>
   6#include <linux/bpf_lirc.h>
   7#include <linux/bpf_verifier.h>
   8#include <linux/btf.h>
   9#include <linux/syscalls.h>
  10#include <linux/slab.h>
  11#include <linux/sched/signal.h>
  12#include <linux/vmalloc.h>
  13#include <linux/mmzone.h>
  14#include <linux/anon_inodes.h>
  15#include <linux/fdtable.h>
  16#include <linux/file.h>
  17#include <linux/fs.h>
  18#include <linux/license.h>
  19#include <linux/filter.h>
  20#include <linux/kernel.h>
  21#include <linux/idr.h>
  22#include <linux/cred.h>
  23#include <linux/timekeeping.h>
  24#include <linux/ctype.h>
  25#include <linux/nospec.h>
  26#include <linux/audit.h>
  27#include <uapi/linux/btf.h>
  28#include <linux/pgtable.h>
  29#include <linux/bpf_lsm.h>
  30#include <linux/poll.h>
  31#include <linux/bpf-netns.h>
  32#include <linux/rcupdate_trace.h>
  33#include <linux/memcontrol.h>
  34
  35#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
  36			  (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
  37			  (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
  38#define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
  39#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
  40#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
  41			IS_FD_HASH(map))
  42
  43#define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
  44
  45DEFINE_PER_CPU(int, bpf_prog_active);
  46static DEFINE_IDR(prog_idr);
  47static DEFINE_SPINLOCK(prog_idr_lock);
  48static DEFINE_IDR(map_idr);
  49static DEFINE_SPINLOCK(map_idr_lock);
  50static DEFINE_IDR(link_idr);
  51static DEFINE_SPINLOCK(link_idr_lock);
  52
  53int sysctl_unprivileged_bpf_disabled __read_mostly =
  54	IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
  55
  56static const struct bpf_map_ops * const bpf_map_types[] = {
  57#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
  58#define BPF_MAP_TYPE(_id, _ops) \
  59	[_id] = &_ops,
  60#define BPF_LINK_TYPE(_id, _name)
  61#include <linux/bpf_types.h>
  62#undef BPF_PROG_TYPE
  63#undef BPF_MAP_TYPE
  64#undef BPF_LINK_TYPE
  65};
  66
  67/*
  68 * If we're handed a bigger struct than we know of, ensure all the unknown bits
  69 * are 0 - i.e. new user-space does not rely on any kernel feature extensions
  70 * we don't know about yet.
  71 *
  72 * There is a ToCToU between this function call and the following
  73 * copy_from_user() call. However, this is not a concern since this function is
  74 * meant to be a future-proofing of bits.
  75 */
  76int bpf_check_uarg_tail_zero(bpfptr_t uaddr,
  77			     size_t expected_size,
  78			     size_t actual_size)
  79{
  80	int res;
  81
  82	if (unlikely(actual_size > PAGE_SIZE))	/* silly large */
  83		return -E2BIG;
  84
  85	if (actual_size <= expected_size)
  86		return 0;
  87
  88	if (uaddr.is_kernel)
  89		res = memchr_inv(uaddr.kernel + expected_size, 0,
  90				 actual_size - expected_size) == NULL;
  91	else
  92		res = check_zeroed_user(uaddr.user + expected_size,
  93					actual_size - expected_size);
  94	if (res < 0)
  95		return res;
  96	return res ? 0 : -E2BIG;
  97}
  98
  99const struct bpf_map_ops bpf_map_offload_ops = {
 100	.map_meta_equal = bpf_map_meta_equal,
 101	.map_alloc = bpf_map_offload_map_alloc,
 102	.map_free = bpf_map_offload_map_free,
 103	.map_check_btf = map_check_no_btf,
 104};
 105
 106static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
 107{
 108	const struct bpf_map_ops *ops;
 109	u32 type = attr->map_type;
 110	struct bpf_map *map;
 111	int err;
 112
 113	if (type >= ARRAY_SIZE(bpf_map_types))
 114		return ERR_PTR(-EINVAL);
 115	type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
 116	ops = bpf_map_types[type];
 117	if (!ops)
 118		return ERR_PTR(-EINVAL);
 119
 120	if (ops->map_alloc_check) {
 121		err = ops->map_alloc_check(attr);
 122		if (err)
 123			return ERR_PTR(err);
 124	}
 125	if (attr->map_ifindex)
 126		ops = &bpf_map_offload_ops;
 127	map = ops->map_alloc(attr);
 128	if (IS_ERR(map))
 129		return map;
 130	map->ops = ops;
 131	map->map_type = type;
 132	return map;
 133}
 134
 135static u32 bpf_map_value_size(const struct bpf_map *map)
 
 136{
 137	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 138	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
 139	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
 140	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
 141		return round_up(map->value_size, 8) * num_possible_cpus();
 142	else if (IS_FD_MAP(map))
 143		return sizeof(u32);
 144	else
 145		return  map->value_size;
 146}
 147
 148static void maybe_wait_bpf_programs(struct bpf_map *map)
 149{
 150	/* Wait for any running BPF programs to complete so that
 151	 * userspace, when we return to it, knows that all programs
 152	 * that could be running use the new map value.
 153	 */
 154	if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
 155	    map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
 156		synchronize_rcu();
 157}
 158
 159static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
 160				void *value, __u64 flags)
 161{
 162	int err;
 163
 164	/* Need to create a kthread, thus must support schedule */
 165	if (bpf_map_is_dev_bound(map)) {
 166		return bpf_map_offload_update_elem(map, key, value, flags);
 167	} else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
 168		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
 169		return map->ops->map_update_elem(map, key, value, flags);
 170	} else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
 171		   map->map_type == BPF_MAP_TYPE_SOCKMAP) {
 172		return sock_map_update_elem_sys(map, key, value, flags);
 173	} else if (IS_FD_PROG_ARRAY(map)) {
 174		return bpf_fd_array_map_update_elem(map, f.file, key, value,
 175						    flags);
 176	}
 177
 178	bpf_disable_instrumentation();
 179	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 180	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
 181		err = bpf_percpu_hash_update(map, key, value, flags);
 182	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 183		err = bpf_percpu_array_update(map, key, value, flags);
 184	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
 185		err = bpf_percpu_cgroup_storage_update(map, key, value,
 186						       flags);
 187	} else if (IS_FD_ARRAY(map)) {
 188		rcu_read_lock();
 189		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
 190						   flags);
 191		rcu_read_unlock();
 192	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
 193		rcu_read_lock();
 194		err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
 195						  flags);
 196		rcu_read_unlock();
 197	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
 198		/* rcu_read_lock() is not needed */
 199		err = bpf_fd_reuseport_array_update_elem(map, key, value,
 200							 flags);
 201	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
 202		   map->map_type == BPF_MAP_TYPE_STACK) {
 203		err = map->ops->map_push_elem(map, value, flags);
 204	} else {
 205		rcu_read_lock();
 206		err = map->ops->map_update_elem(map, key, value, flags);
 207		rcu_read_unlock();
 208	}
 209	bpf_enable_instrumentation();
 210	maybe_wait_bpf_programs(map);
 211
 212	return err;
 213}
 214
 215static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
 216			      __u64 flags)
 217{
 218	void *ptr;
 219	int err;
 220
 221	if (bpf_map_is_dev_bound(map))
 222		return bpf_map_offload_lookup_elem(map, key, value);
 223
 224	bpf_disable_instrumentation();
 225	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 226	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
 227		err = bpf_percpu_hash_copy(map, key, value);
 228	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 229		err = bpf_percpu_array_copy(map, key, value);
 230	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
 231		err = bpf_percpu_cgroup_storage_copy(map, key, value);
 232	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
 233		err = bpf_stackmap_copy(map, key, value);
 234	} else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
 235		err = bpf_fd_array_map_lookup_elem(map, key, value);
 236	} else if (IS_FD_HASH(map)) {
 237		err = bpf_fd_htab_map_lookup_elem(map, key, value);
 238	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
 239		err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
 240	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
 241		   map->map_type == BPF_MAP_TYPE_STACK) {
 242		err = map->ops->map_peek_elem(map, value);
 243	} else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
 244		/* struct_ops map requires directly updating "value" */
 245		err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
 246	} else {
 247		rcu_read_lock();
 248		if (map->ops->map_lookup_elem_sys_only)
 249			ptr = map->ops->map_lookup_elem_sys_only(map, key);
 250		else
 251			ptr = map->ops->map_lookup_elem(map, key);
 252		if (IS_ERR(ptr)) {
 253			err = PTR_ERR(ptr);
 254		} else if (!ptr) {
 255			err = -ENOENT;
 256		} else {
 257			err = 0;
 258			if (flags & BPF_F_LOCK)
 259				/* lock 'ptr' and copy everything but lock */
 260				copy_map_value_locked(map, value, ptr, true);
 261			else
 262				copy_map_value(map, value, ptr);
 263			/* mask lock, since value wasn't zero inited */
 264			check_and_init_map_lock(map, value);
 265		}
 266		rcu_read_unlock();
 267	}
 268
 269	bpf_enable_instrumentation();
 270	maybe_wait_bpf_programs(map);
 271
 272	return err;
 273}
 274
 275/* Please, do not use this function outside from the map creation path
 276 * (e.g. in map update path) without taking care of setting the active
 277 * memory cgroup (see at bpf_map_kmalloc_node() for example).
 278 */
 279static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
 280{
 281	/* We really just want to fail instead of triggering OOM killer
 282	 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
 283	 * which is used for lower order allocation requests.
 284	 *
 285	 * It has been observed that higher order allocation requests done by
 286	 * vmalloc with __GFP_NORETRY being set might fail due to not trying
 287	 * to reclaim memory from the page cache, thus we set
 288	 * __GFP_RETRY_MAYFAIL to avoid such situations.
 289	 */
 290
 291	const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_ACCOUNT;
 292	unsigned int flags = 0;
 293	unsigned long align = 1;
 294	void *area;
 295
 296	if (size >= SIZE_MAX)
 297		return NULL;
 298
 299	/* kmalloc()'ed memory can't be mmap()'ed */
 300	if (mmapable) {
 301		BUG_ON(!PAGE_ALIGNED(size));
 302		align = SHMLBA;
 303		flags = VM_USERMAP;
 304	} else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
 305		area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
 306				    numa_node);
 307		if (area != NULL)
 308			return area;
 309	}
 310
 311	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
 312			gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
 313			flags, numa_node, __builtin_return_address(0));
 314}
 315
 316void *bpf_map_area_alloc(u64 size, int numa_node)
 317{
 318	return __bpf_map_area_alloc(size, numa_node, false);
 319}
 320
 321void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
 322{
 323	return __bpf_map_area_alloc(size, numa_node, true);
 324}
 325
 326void bpf_map_area_free(void *area)
 327{
 328	kvfree(area);
 329}
 330
 331static u32 bpf_map_flags_retain_permanent(u32 flags)
 332{
 333	/* Some map creation flags are not tied to the map object but
 334	 * rather to the map fd instead, so they have no meaning upon
 335	 * map object inspection since multiple file descriptors with
 336	 * different (access) properties can exist here. Thus, given
 337	 * this has zero meaning for the map itself, lets clear these
 338	 * from here.
 339	 */
 340	return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
 341}
 342
 343void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
 344{
 345	map->map_type = attr->map_type;
 346	map->key_size = attr->key_size;
 347	map->value_size = attr->value_size;
 348	map->max_entries = attr->max_entries;
 349	map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
 350	map->numa_node = bpf_map_attr_numa_node(attr);
 351}
 352
 353static int bpf_map_alloc_id(struct bpf_map *map)
 354{
 355	int id;
 356
 357	idr_preload(GFP_KERNEL);
 358	spin_lock_bh(&map_idr_lock);
 359	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
 360	if (id > 0)
 361		map->id = id;
 362	spin_unlock_bh(&map_idr_lock);
 363	idr_preload_end();
 364
 365	if (WARN_ON_ONCE(!id))
 366		return -ENOSPC;
 367
 368	return id > 0 ? 0 : id;
 369}
 370
 371void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
 372{
 373	unsigned long flags;
 374
 375	/* Offloaded maps are removed from the IDR store when their device
 376	 * disappears - even if someone holds an fd to them they are unusable,
 377	 * the memory is gone, all ops will fail; they are simply waiting for
 378	 * refcnt to drop to be freed.
 379	 */
 380	if (!map->id)
 381		return;
 382
 383	if (do_idr_lock)
 384		spin_lock_irqsave(&map_idr_lock, flags);
 385	else
 386		__acquire(&map_idr_lock);
 387
 388	idr_remove(&map_idr, map->id);
 389	map->id = 0;
 390
 391	if (do_idr_lock)
 392		spin_unlock_irqrestore(&map_idr_lock, flags);
 393	else
 394		__release(&map_idr_lock);
 395}
 396
 397#ifdef CONFIG_MEMCG_KMEM
 398static void bpf_map_save_memcg(struct bpf_map *map)
 399{
 400	map->memcg = get_mem_cgroup_from_mm(current->mm);
 401}
 402
 403static void bpf_map_release_memcg(struct bpf_map *map)
 404{
 405	mem_cgroup_put(map->memcg);
 406}
 407
 408void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
 409			   int node)
 410{
 411	struct mem_cgroup *old_memcg;
 412	void *ptr;
 413
 414	old_memcg = set_active_memcg(map->memcg);
 415	ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
 416	set_active_memcg(old_memcg);
 417
 418	return ptr;
 419}
 420
 421void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
 422{
 423	struct mem_cgroup *old_memcg;
 424	void *ptr;
 425
 426	old_memcg = set_active_memcg(map->memcg);
 427	ptr = kzalloc(size, flags | __GFP_ACCOUNT);
 428	set_active_memcg(old_memcg);
 429
 430	return ptr;
 431}
 432
 433void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
 434				    size_t align, gfp_t flags)
 435{
 436	struct mem_cgroup *old_memcg;
 437	void __percpu *ptr;
 438
 439	old_memcg = set_active_memcg(map->memcg);
 440	ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT);
 441	set_active_memcg(old_memcg);
 442
 443	return ptr;
 444}
 445
 446#else
 447static void bpf_map_save_memcg(struct bpf_map *map)
 448{
 449}
 450
 451static void bpf_map_release_memcg(struct bpf_map *map)
 452{
 453}
 454#endif
 455
 456/* called from workqueue */
 457static void bpf_map_free_deferred(struct work_struct *work)
 458{
 459	struct bpf_map *map = container_of(work, struct bpf_map, work);
 460
 461	security_bpf_map_free(map);
 462	bpf_map_release_memcg(map);
 463	/* implementation dependent freeing */
 464	map->ops->map_free(map);
 465}
 466
 467static void bpf_map_put_uref(struct bpf_map *map)
 468{
 469	if (atomic64_dec_and_test(&map->usercnt)) {
 470		if (map->ops->map_release_uref)
 471			map->ops->map_release_uref(map);
 472	}
 473}
 474
 475/* decrement map refcnt and schedule it for freeing via workqueue
 476 * (unrelying map implementation ops->map_free() might sleep)
 477 */
 478static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
 479{
 480	if (atomic64_dec_and_test(&map->refcnt)) {
 481		/* bpf_map_free_id() must be called first */
 482		bpf_map_free_id(map, do_idr_lock);
 483		btf_put(map->btf);
 484		INIT_WORK(&map->work, bpf_map_free_deferred);
 485		schedule_work(&map->work);
 486	}
 487}
 488
 489void bpf_map_put(struct bpf_map *map)
 490{
 491	__bpf_map_put(map, true);
 492}
 493EXPORT_SYMBOL_GPL(bpf_map_put);
 494
 495void bpf_map_put_with_uref(struct bpf_map *map)
 496{
 497	bpf_map_put_uref(map);
 498	bpf_map_put(map);
 499}
 500
 501static int bpf_map_release(struct inode *inode, struct file *filp)
 502{
 503	struct bpf_map *map = filp->private_data;
 504
 505	if (map->ops->map_release)
 506		map->ops->map_release(map, filp);
 507
 508	bpf_map_put_with_uref(map);
 509	return 0;
 510}
 511
 512static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
 513{
 514	fmode_t mode = f.file->f_mode;
 515
 516	/* Our file permissions may have been overridden by global
 517	 * map permissions facing syscall side.
 518	 */
 519	if (READ_ONCE(map->frozen))
 520		mode &= ~FMODE_CAN_WRITE;
 521	return mode;
 522}
 523
 524#ifdef CONFIG_PROC_FS
 525/* Provides an approximation of the map's memory footprint.
 526 * Used only to provide a backward compatibility and display
 527 * a reasonable "memlock" info.
 528 */
 529static unsigned long bpf_map_memory_footprint(const struct bpf_map *map)
 530{
 531	unsigned long size;
 532
 533	size = round_up(map->key_size + bpf_map_value_size(map), 8);
 534
 535	return round_up(map->max_entries * size, PAGE_SIZE);
 536}
 537
 538static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
 539{
 540	const struct bpf_map *map = filp->private_data;
 541	const struct bpf_array *array;
 542	u32 type = 0, jited = 0;
 543
 544	if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
 545		array = container_of(map, struct bpf_array, map);
 546		type  = array->aux->type;
 547		jited = array->aux->jited;
 548	}
 549
 550	seq_printf(m,
 551		   "map_type:\t%u\n"
 552		   "key_size:\t%u\n"
 553		   "value_size:\t%u\n"
 554		   "max_entries:\t%u\n"
 555		   "map_flags:\t%#x\n"
 556		   "memlock:\t%lu\n"
 557		   "map_id:\t%u\n"
 558		   "frozen:\t%u\n",
 559		   map->map_type,
 560		   map->key_size,
 561		   map->value_size,
 562		   map->max_entries,
 563		   map->map_flags,
 564		   bpf_map_memory_footprint(map),
 565		   map->id,
 566		   READ_ONCE(map->frozen));
 567	if (type) {
 568		seq_printf(m, "owner_prog_type:\t%u\n", type);
 569		seq_printf(m, "owner_jited:\t%u\n", jited);
 570	}
 571}
 572#endif
 573
 574static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
 575			      loff_t *ppos)
 576{
 577	/* We need this handler such that alloc_file() enables
 578	 * f_mode with FMODE_CAN_READ.
 579	 */
 580	return -EINVAL;
 581}
 582
 583static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
 584			       size_t siz, loff_t *ppos)
 585{
 586	/* We need this handler such that alloc_file() enables
 587	 * f_mode with FMODE_CAN_WRITE.
 588	 */
 589	return -EINVAL;
 590}
 591
 592/* called for any extra memory-mapped regions (except initial) */
 593static void bpf_map_mmap_open(struct vm_area_struct *vma)
 594{
 595	struct bpf_map *map = vma->vm_file->private_data;
 596
 597	if (vma->vm_flags & VM_MAYWRITE) {
 598		mutex_lock(&map->freeze_mutex);
 599		map->writecnt++;
 600		mutex_unlock(&map->freeze_mutex);
 601	}
 602}
 603
 604/* called for all unmapped memory region (including initial) */
 605static void bpf_map_mmap_close(struct vm_area_struct *vma)
 606{
 607	struct bpf_map *map = vma->vm_file->private_data;
 608
 609	if (vma->vm_flags & VM_MAYWRITE) {
 610		mutex_lock(&map->freeze_mutex);
 611		map->writecnt--;
 612		mutex_unlock(&map->freeze_mutex);
 613	}
 614}
 615
 616static const struct vm_operations_struct bpf_map_default_vmops = {
 617	.open		= bpf_map_mmap_open,
 618	.close		= bpf_map_mmap_close,
 619};
 620
 621static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
 622{
 623	struct bpf_map *map = filp->private_data;
 624	int err;
 625
 626	if (!map->ops->map_mmap || map_value_has_spin_lock(map))
 627		return -ENOTSUPP;
 628
 629	if (!(vma->vm_flags & VM_SHARED))
 630		return -EINVAL;
 631
 632	mutex_lock(&map->freeze_mutex);
 633
 634	if (vma->vm_flags & VM_WRITE) {
 635		if (map->frozen) {
 636			err = -EPERM;
 637			goto out;
 638		}
 639		/* map is meant to be read-only, so do not allow mapping as
 640		 * writable, because it's possible to leak a writable page
 641		 * reference and allows user-space to still modify it after
 642		 * freezing, while verifier will assume contents do not change
 643		 */
 644		if (map->map_flags & BPF_F_RDONLY_PROG) {
 645			err = -EACCES;
 646			goto out;
 647		}
 648	}
 649
 650	/* set default open/close callbacks */
 651	vma->vm_ops = &bpf_map_default_vmops;
 652	vma->vm_private_data = map;
 653	vma->vm_flags &= ~VM_MAYEXEC;
 654	if (!(vma->vm_flags & VM_WRITE))
 655		/* disallow re-mapping with PROT_WRITE */
 656		vma->vm_flags &= ~VM_MAYWRITE;
 657
 658	err = map->ops->map_mmap(map, vma);
 659	if (err)
 660		goto out;
 661
 662	if (vma->vm_flags & VM_MAYWRITE)
 663		map->writecnt++;
 664out:
 665	mutex_unlock(&map->freeze_mutex);
 666	return err;
 667}
 668
 669static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
 670{
 671	struct bpf_map *map = filp->private_data;
 672
 673	if (map->ops->map_poll)
 674		return map->ops->map_poll(map, filp, pts);
 675
 676	return EPOLLERR;
 677}
 678
 679const struct file_operations bpf_map_fops = {
 680#ifdef CONFIG_PROC_FS
 681	.show_fdinfo	= bpf_map_show_fdinfo,
 682#endif
 683	.release	= bpf_map_release,
 684	.read		= bpf_dummy_read,
 685	.write		= bpf_dummy_write,
 686	.mmap		= bpf_map_mmap,
 687	.poll		= bpf_map_poll,
 688};
 689
 690int bpf_map_new_fd(struct bpf_map *map, int flags)
 691{
 692	int ret;
 693
 694	ret = security_bpf_map(map, OPEN_FMODE(flags));
 695	if (ret < 0)
 696		return ret;
 697
 698	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
 699				flags | O_CLOEXEC);
 700}
 701
 702int bpf_get_file_flag(int flags)
 703{
 704	if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
 705		return -EINVAL;
 706	if (flags & BPF_F_RDONLY)
 707		return O_RDONLY;
 708	if (flags & BPF_F_WRONLY)
 709		return O_WRONLY;
 710	return O_RDWR;
 711}
 712
 713/* helper macro to check that unused fields 'union bpf_attr' are zero */
 714#define CHECK_ATTR(CMD) \
 715	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
 716		   sizeof(attr->CMD##_LAST_FIELD), 0, \
 717		   sizeof(*attr) - \
 718		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
 719		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
 720
 721/* dst and src must have at least "size" number of bytes.
 722 * Return strlen on success and < 0 on error.
 723 */
 724int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
 725{
 726	const char *end = src + size;
 727	const char *orig_src = src;
 728
 729	memset(dst, 0, size);
 730	/* Copy all isalnum(), '_' and '.' chars. */
 731	while (src < end && *src) {
 732		if (!isalnum(*src) &&
 733		    *src != '_' && *src != '.')
 734			return -EINVAL;
 735		*dst++ = *src++;
 736	}
 737
 738	/* No '\0' found in "size" number of bytes */
 739	if (src == end)
 740		return -EINVAL;
 741
 742	return src - orig_src;
 743}
 744
 745int map_check_no_btf(const struct bpf_map *map,
 746		     const struct btf *btf,
 747		     const struct btf_type *key_type,
 748		     const struct btf_type *value_type)
 749{
 750	return -ENOTSUPP;
 751}
 752
 753static int map_check_btf(struct bpf_map *map, const struct btf *btf,
 754			 u32 btf_key_id, u32 btf_value_id)
 755{
 756	const struct btf_type *key_type, *value_type;
 757	u32 key_size, value_size;
 758	int ret = 0;
 759
 760	/* Some maps allow key to be unspecified. */
 761	if (btf_key_id) {
 762		key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
 763		if (!key_type || key_size != map->key_size)
 764			return -EINVAL;
 765	} else {
 766		key_type = btf_type_by_id(btf, 0);
 767		if (!map->ops->map_check_btf)
 768			return -EINVAL;
 769	}
 770
 771	value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
 772	if (!value_type || value_size != map->value_size)
 773		return -EINVAL;
 774
 775	map->spin_lock_off = btf_find_spin_lock(btf, value_type);
 776
 777	if (map_value_has_spin_lock(map)) {
 778		if (map->map_flags & BPF_F_RDONLY_PROG)
 779			return -EACCES;
 780		if (map->map_type != BPF_MAP_TYPE_HASH &&
 781		    map->map_type != BPF_MAP_TYPE_ARRAY &&
 782		    map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
 783		    map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
 784		    map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
 785		    map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
 786			return -ENOTSUPP;
 787		if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
 788		    map->value_size) {
 789			WARN_ONCE(1,
 790				  "verifier bug spin_lock_off %d value_size %d\n",
 791				  map->spin_lock_off, map->value_size);
 792			return -EFAULT;
 793		}
 794	}
 795
 796	if (map->ops->map_check_btf)
 797		ret = map->ops->map_check_btf(map, btf, key_type, value_type);
 798
 799	return ret;
 800}
 801
 802#define BPF_MAP_CREATE_LAST_FIELD btf_vmlinux_value_type_id
 803/* called via syscall */
 804static int map_create(union bpf_attr *attr)
 805{
 806	int numa_node = bpf_map_attr_numa_node(attr);
 807	struct bpf_map *map;
 808	int f_flags;
 809	int err;
 810
 811	err = CHECK_ATTR(BPF_MAP_CREATE);
 812	if (err)
 813		return -EINVAL;
 814
 815	if (attr->btf_vmlinux_value_type_id) {
 816		if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
 817		    attr->btf_key_type_id || attr->btf_value_type_id)
 818			return -EINVAL;
 819	} else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
 820		return -EINVAL;
 821	}
 822
 823	f_flags = bpf_get_file_flag(attr->map_flags);
 824	if (f_flags < 0)
 825		return f_flags;
 826
 827	if (numa_node != NUMA_NO_NODE &&
 828	    ((unsigned int)numa_node >= nr_node_ids ||
 829	     !node_online(numa_node)))
 830		return -EINVAL;
 831
 832	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
 833	map = find_and_alloc_map(attr);
 834	if (IS_ERR(map))
 835		return PTR_ERR(map);
 836
 837	err = bpf_obj_name_cpy(map->name, attr->map_name,
 838			       sizeof(attr->map_name));
 839	if (err < 0)
 840		goto free_map;
 841
 842	atomic64_set(&map->refcnt, 1);
 843	atomic64_set(&map->usercnt, 1);
 844	mutex_init(&map->freeze_mutex);
 845
 846	map->spin_lock_off = -EINVAL;
 847	if (attr->btf_key_type_id || attr->btf_value_type_id ||
 848	    /* Even the map's value is a kernel's struct,
 849	     * the bpf_prog.o must have BTF to begin with
 850	     * to figure out the corresponding kernel's
 851	     * counter part.  Thus, attr->btf_fd has
 852	     * to be valid also.
 853	     */
 854	    attr->btf_vmlinux_value_type_id) {
 855		struct btf *btf;
 856
 857		btf = btf_get_by_fd(attr->btf_fd);
 858		if (IS_ERR(btf)) {
 859			err = PTR_ERR(btf);
 860			goto free_map;
 861		}
 862		if (btf_is_kernel(btf)) {
 863			btf_put(btf);
 864			err = -EACCES;
 865			goto free_map;
 866		}
 867		map->btf = btf;
 868
 869		if (attr->btf_value_type_id) {
 870			err = map_check_btf(map, btf, attr->btf_key_type_id,
 871					    attr->btf_value_type_id);
 872			if (err)
 873				goto free_map;
 874		}
 875
 876		map->btf_key_type_id = attr->btf_key_type_id;
 877		map->btf_value_type_id = attr->btf_value_type_id;
 878		map->btf_vmlinux_value_type_id =
 879			attr->btf_vmlinux_value_type_id;
 880	}
 881
 882	err = security_bpf_map_alloc(map);
 883	if (err)
 884		goto free_map;
 885
 886	err = bpf_map_alloc_id(map);
 887	if (err)
 888		goto free_map_sec;
 889
 890	bpf_map_save_memcg(map);
 891
 892	err = bpf_map_new_fd(map, f_flags);
 893	if (err < 0) {
 894		/* failed to allocate fd.
 895		 * bpf_map_put_with_uref() is needed because the above
 896		 * bpf_map_alloc_id() has published the map
 897		 * to the userspace and the userspace may
 898		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
 899		 */
 900		bpf_map_put_with_uref(map);
 901		return err;
 902	}
 903
 904	return err;
 905
 906free_map_sec:
 907	security_bpf_map_free(map);
 908free_map:
 909	btf_put(map->btf);
 910	map->ops->map_free(map);
 911	return err;
 912}
 913
 914/* if error is returned, fd is released.
 915 * On success caller should complete fd access with matching fdput()
 916 */
 917struct bpf_map *__bpf_map_get(struct fd f)
 918{
 919	if (!f.file)
 920		return ERR_PTR(-EBADF);
 921	if (f.file->f_op != &bpf_map_fops) {
 922		fdput(f);
 923		return ERR_PTR(-EINVAL);
 924	}
 925
 926	return f.file->private_data;
 927}
 928
 929void bpf_map_inc(struct bpf_map *map)
 930{
 931	atomic64_inc(&map->refcnt);
 932}
 933EXPORT_SYMBOL_GPL(bpf_map_inc);
 934
 935void bpf_map_inc_with_uref(struct bpf_map *map)
 936{
 937	atomic64_inc(&map->refcnt);
 938	atomic64_inc(&map->usercnt);
 939}
 940EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
 941
 942struct bpf_map *bpf_map_get(u32 ufd)
 943{
 944	struct fd f = fdget(ufd);
 945	struct bpf_map *map;
 946
 947	map = __bpf_map_get(f);
 948	if (IS_ERR(map))
 949		return map;
 950
 951	bpf_map_inc(map);
 952	fdput(f);
 953
 954	return map;
 955}
 956
 957struct bpf_map *bpf_map_get_with_uref(u32 ufd)
 958{
 959	struct fd f = fdget(ufd);
 960	struct bpf_map *map;
 961
 962	map = __bpf_map_get(f);
 963	if (IS_ERR(map))
 964		return map;
 965
 966	bpf_map_inc_with_uref(map);
 967	fdput(f);
 968
 969	return map;
 970}
 971
 972/* map_idr_lock should have been held */
 973static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
 974{
 975	int refold;
 976
 977	refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
 978	if (!refold)
 979		return ERR_PTR(-ENOENT);
 980	if (uref)
 981		atomic64_inc(&map->usercnt);
 982
 983	return map;
 984}
 985
 986struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
 987{
 988	spin_lock_bh(&map_idr_lock);
 989	map = __bpf_map_inc_not_zero(map, false);
 990	spin_unlock_bh(&map_idr_lock);
 991
 992	return map;
 993}
 994EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
 995
 996int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
 997{
 998	return -ENOTSUPP;
 999}
1000
1001static void *__bpf_copy_key(void __user *ukey, u64 key_size)
1002{
1003	if (key_size)
1004		return memdup_user(ukey, key_size);
1005
1006	if (ukey)
1007		return ERR_PTR(-EINVAL);
1008
1009	return NULL;
1010}
1011
1012static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size)
1013{
1014	if (key_size)
1015		return memdup_bpfptr(ukey, key_size);
1016
1017	if (!bpfptr_is_null(ukey))
1018		return ERR_PTR(-EINVAL);
1019
1020	return NULL;
1021}
1022
1023/* last field in 'union bpf_attr' used by this command */
1024#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
1025
1026static int map_lookup_elem(union bpf_attr *attr)
1027{
1028	void __user *ukey = u64_to_user_ptr(attr->key);
1029	void __user *uvalue = u64_to_user_ptr(attr->value);
1030	int ufd = attr->map_fd;
1031	struct bpf_map *map;
1032	void *key, *value;
1033	u32 value_size;
1034	struct fd f;
1035	int err;
1036
1037	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1038		return -EINVAL;
1039
1040	if (attr->flags & ~BPF_F_LOCK)
1041		return -EINVAL;
1042
1043	f = fdget(ufd);
1044	map = __bpf_map_get(f);
1045	if (IS_ERR(map))
1046		return PTR_ERR(map);
1047	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1048		err = -EPERM;
1049		goto err_put;
1050	}
1051
1052	if ((attr->flags & BPF_F_LOCK) &&
1053	    !map_value_has_spin_lock(map)) {
1054		err = -EINVAL;
1055		goto err_put;
1056	}
1057
1058	key = __bpf_copy_key(ukey, map->key_size);
1059	if (IS_ERR(key)) {
1060		err = PTR_ERR(key);
1061		goto err_put;
1062	}
1063
1064	value_size = bpf_map_value_size(map);
 
 
 
 
1065
1066	err = -ENOMEM;
1067	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1068	if (!value)
1069		goto free_key;
1070
1071	err = bpf_map_copy_value(map, key, value, attr->flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1072	if (err)
1073		goto free_value;
1074
1075	err = -EFAULT;
1076	if (copy_to_user(uvalue, value, value_size) != 0)
1077		goto free_value;
1078
1079	err = 0;
1080
1081free_value:
1082	kfree(value);
1083free_key:
1084	kfree(key);
1085err_put:
1086	fdput(f);
1087	return err;
1088}
1089
1090
1091#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1092
1093static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
1094{
1095	bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1096	bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
1097	int ufd = attr->map_fd;
1098	struct bpf_map *map;
1099	void *key, *value;
1100	u32 value_size;
1101	struct fd f;
1102	int err;
1103
1104	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1105		return -EINVAL;
1106
1107	f = fdget(ufd);
1108	map = __bpf_map_get(f);
1109	if (IS_ERR(map))
1110		return PTR_ERR(map);
1111	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1112		err = -EPERM;
1113		goto err_put;
1114	}
1115
1116	if ((attr->flags & BPF_F_LOCK) &&
1117	    !map_value_has_spin_lock(map)) {
1118		err = -EINVAL;
1119		goto err_put;
1120	}
1121
1122	key = ___bpf_copy_key(ukey, map->key_size);
1123	if (IS_ERR(key)) {
1124		err = PTR_ERR(key);
1125		goto err_put;
1126	}
1127
1128	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
1129	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
1130	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
1131	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
1132		value_size = round_up(map->value_size, 8) * num_possible_cpus();
1133	else
1134		value_size = map->value_size;
1135
1136	err = -ENOMEM;
1137	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1138	if (!value)
1139		goto free_key;
1140
1141	err = -EFAULT;
1142	if (copy_from_bpfptr(value, uvalue, value_size) != 0)
1143		goto free_value;
1144
1145	err = bpf_map_update_value(map, f, key, value, attr->flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1146
1147free_value:
1148	kfree(value);
1149free_key:
1150	kfree(key);
1151err_put:
1152	fdput(f);
1153	return err;
1154}
1155
1156#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1157
1158static int map_delete_elem(union bpf_attr *attr)
1159{
1160	void __user *ukey = u64_to_user_ptr(attr->key);
1161	int ufd = attr->map_fd;
1162	struct bpf_map *map;
1163	struct fd f;
1164	void *key;
1165	int err;
1166
1167	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1168		return -EINVAL;
1169
1170	f = fdget(ufd);
1171	map = __bpf_map_get(f);
1172	if (IS_ERR(map))
1173		return PTR_ERR(map);
1174	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1175		err = -EPERM;
1176		goto err_put;
1177	}
1178
1179	key = __bpf_copy_key(ukey, map->key_size);
1180	if (IS_ERR(key)) {
1181		err = PTR_ERR(key);
1182		goto err_put;
1183	}
1184
1185	if (bpf_map_is_dev_bound(map)) {
1186		err = bpf_map_offload_delete_elem(map, key);
1187		goto out;
1188	} else if (IS_FD_PROG_ARRAY(map) ||
1189		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1190		/* These maps require sleepable context */
1191		err = map->ops->map_delete_elem(map, key);
1192		goto out;
1193	}
1194
1195	bpf_disable_instrumentation();
 
1196	rcu_read_lock();
1197	err = map->ops->map_delete_elem(map, key);
1198	rcu_read_unlock();
1199	bpf_enable_instrumentation();
1200	maybe_wait_bpf_programs(map);
1201out:
 
1202	kfree(key);
1203err_put:
1204	fdput(f);
1205	return err;
1206}
1207
1208/* last field in 'union bpf_attr' used by this command */
1209#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1210
1211static int map_get_next_key(union bpf_attr *attr)
1212{
1213	void __user *ukey = u64_to_user_ptr(attr->key);
1214	void __user *unext_key = u64_to_user_ptr(attr->next_key);
1215	int ufd = attr->map_fd;
1216	struct bpf_map *map;
1217	void *key, *next_key;
1218	struct fd f;
1219	int err;
1220
1221	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1222		return -EINVAL;
1223
1224	f = fdget(ufd);
1225	map = __bpf_map_get(f);
1226	if (IS_ERR(map))
1227		return PTR_ERR(map);
1228	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1229		err = -EPERM;
 
 
1230		goto err_put;
1231	}
1232
1233	if (ukey) {
1234		key = __bpf_copy_key(ukey, map->key_size);
1235		if (IS_ERR(key)) {
1236			err = PTR_ERR(key);
1237			goto err_put;
1238		}
1239	} else {
1240		key = NULL;
1241	}
1242
1243	err = -ENOMEM;
1244	next_key = kmalloc(map->key_size, GFP_USER);
1245	if (!next_key)
1246		goto free_key;
1247
1248	if (bpf_map_is_dev_bound(map)) {
1249		err = bpf_map_offload_get_next_key(map, key, next_key);
1250		goto out;
1251	}
1252
1253	rcu_read_lock();
1254	err = map->ops->map_get_next_key(map, key, next_key);
1255	rcu_read_unlock();
1256out:
1257	if (err)
1258		goto free_next_key;
1259
1260	err = -EFAULT;
1261	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1262		goto free_next_key;
1263
1264	err = 0;
1265
1266free_next_key:
1267	kfree(next_key);
1268free_key:
1269	kfree(key);
1270err_put:
1271	fdput(f);
1272	return err;
1273}
1274
1275int generic_map_delete_batch(struct bpf_map *map,
1276			     const union bpf_attr *attr,
1277			     union bpf_attr __user *uattr)
1278{
1279	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1280	u32 cp, max_count;
1281	int err = 0;
1282	void *key;
1283
1284	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1285		return -EINVAL;
1286
1287	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1288	    !map_value_has_spin_lock(map)) {
1289		return -EINVAL;
1290	}
1291
1292	max_count = attr->batch.count;
1293	if (!max_count)
1294		return 0;
1295
1296	key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1297	if (!key)
1298		return -ENOMEM;
1299
1300	for (cp = 0; cp < max_count; cp++) {
1301		err = -EFAULT;
1302		if (copy_from_user(key, keys + cp * map->key_size,
1303				   map->key_size))
1304			break;
1305
1306		if (bpf_map_is_dev_bound(map)) {
1307			err = bpf_map_offload_delete_elem(map, key);
1308			break;
1309		}
1310
1311		bpf_disable_instrumentation();
1312		rcu_read_lock();
1313		err = map->ops->map_delete_elem(map, key);
1314		rcu_read_unlock();
1315		bpf_enable_instrumentation();
1316		maybe_wait_bpf_programs(map);
1317		if (err)
1318			break;
1319	}
1320	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1321		err = -EFAULT;
1322
1323	kfree(key);
1324	return err;
1325}
1326
1327int generic_map_update_batch(struct bpf_map *map,
1328			     const union bpf_attr *attr,
1329			     union bpf_attr __user *uattr)
1330{
1331	void __user *values = u64_to_user_ptr(attr->batch.values);
1332	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1333	u32 value_size, cp, max_count;
1334	int ufd = attr->map_fd;
1335	void *key, *value;
1336	struct fd f;
1337	int err = 0;
1338
1339	f = fdget(ufd);
1340	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1341		return -EINVAL;
1342
1343	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1344	    !map_value_has_spin_lock(map)) {
1345		return -EINVAL;
1346	}
1347
1348	value_size = bpf_map_value_size(map);
1349
1350	max_count = attr->batch.count;
1351	if (!max_count)
1352		return 0;
1353
1354	key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1355	if (!key)
1356		return -ENOMEM;
1357
1358	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1359	if (!value) {
1360		kfree(key);
1361		return -ENOMEM;
1362	}
1363
1364	for (cp = 0; cp < max_count; cp++) {
1365		err = -EFAULT;
1366		if (copy_from_user(key, keys + cp * map->key_size,
1367		    map->key_size) ||
1368		    copy_from_user(value, values + cp * value_size, value_size))
1369			break;
1370
1371		err = bpf_map_update_value(map, f, key, value,
1372					   attr->batch.elem_flags);
1373
1374		if (err)
1375			break;
1376	}
1377
1378	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1379		err = -EFAULT;
1380
1381	kfree(value);
1382	kfree(key);
1383	return err;
1384}
1385
1386#define MAP_LOOKUP_RETRIES 3
1387
1388int generic_map_lookup_batch(struct bpf_map *map,
1389				    const union bpf_attr *attr,
1390				    union bpf_attr __user *uattr)
 
 
 
 
 
1391{
1392	void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1393	void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1394	void __user *values = u64_to_user_ptr(attr->batch.values);
1395	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1396	void *buf, *buf_prevkey, *prev_key, *key, *value;
1397	int err, retry = MAP_LOOKUP_RETRIES;
1398	u32 value_size, cp, max_count;
1399
1400	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1401		return -EINVAL;
1402
1403	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1404	    !map_value_has_spin_lock(map))
1405		return -EINVAL;
 
 
 
1406
1407	value_size = bpf_map_value_size(map);
1408
1409	max_count = attr->batch.count;
1410	if (!max_count)
1411		return 0;
1412
1413	if (put_user(0, &uattr->batch.count))
1414		return -EFAULT;
1415
1416	buf_prevkey = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1417	if (!buf_prevkey)
1418		return -ENOMEM;
1419
1420	buf = kmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
1421	if (!buf) {
1422		kfree(buf_prevkey);
1423		return -ENOMEM;
1424	}
1425
1426	err = -EFAULT;
1427	prev_key = NULL;
1428	if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
1429		goto free_buf;
1430	key = buf;
1431	value = key + map->key_size;
1432	if (ubatch)
1433		prev_key = buf_prevkey;
1434
1435	for (cp = 0; cp < max_count;) {
1436		rcu_read_lock();
1437		err = map->ops->map_get_next_key(map, prev_key, key);
1438		rcu_read_unlock();
1439		if (err)
1440			break;
1441		err = bpf_map_copy_value(map, key, value,
1442					 attr->batch.elem_flags);
1443
1444		if (err == -ENOENT) {
1445			if (retry) {
1446				retry--;
1447				continue;
1448			}
1449			err = -EINTR;
1450			break;
1451		}
1452
1453		if (err)
1454			goto free_buf;
1455
1456		if (copy_to_user(keys + cp * map->key_size, key,
1457				 map->key_size)) {
1458			err = -EFAULT;
1459			goto free_buf;
1460		}
1461		if (copy_to_user(values + cp * value_size, value, value_size)) {
1462			err = -EFAULT;
1463			goto free_buf;
1464		}
1465
1466		if (!prev_key)
1467			prev_key = buf_prevkey;
1468
1469		swap(prev_key, key);
1470		retry = MAP_LOOKUP_RETRIES;
1471		cp++;
1472	}
1473
1474	if (err == -EFAULT)
1475		goto free_buf;
1476
1477	if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
1478		    (cp && copy_to_user(uobatch, prev_key, map->key_size))))
1479		err = -EFAULT;
1480
1481free_buf:
1482	kfree(buf_prevkey);
1483	kfree(buf);
1484	return err;
1485}
1486
1487#define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags
1488
1489static int map_lookup_and_delete_elem(union bpf_attr *attr)
1490{
1491	void __user *ukey = u64_to_user_ptr(attr->key);
1492	void __user *uvalue = u64_to_user_ptr(attr->value);
1493	int ufd = attr->map_fd;
1494	struct bpf_map *map;
1495	void *key, *value;
1496	u32 value_size;
1497	struct fd f;
1498	int err;
1499
1500	if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
1501		return -EINVAL;
1502
1503	if (attr->flags & ~BPF_F_LOCK)
1504		return -EINVAL;
1505
1506	f = fdget(ufd);
1507	map = __bpf_map_get(f);
1508	if (IS_ERR(map))
1509		return PTR_ERR(map);
1510	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
1511	    !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1512		err = -EPERM;
1513		goto err_put;
1514	}
1515
1516	if (attr->flags &&
1517	    (map->map_type == BPF_MAP_TYPE_QUEUE ||
1518	     map->map_type == BPF_MAP_TYPE_STACK)) {
1519		err = -EINVAL;
1520		goto err_put;
1521	}
1522
1523	if ((attr->flags & BPF_F_LOCK) &&
1524	    !map_value_has_spin_lock(map)) {
1525		err = -EINVAL;
1526		goto err_put;
1527	}
1528
1529	key = __bpf_copy_key(ukey, map->key_size);
1530	if (IS_ERR(key)) {
1531		err = PTR_ERR(key);
1532		goto err_put;
1533	}
1534
1535	value_size = bpf_map_value_size(map);
1536
1537	err = -ENOMEM;
1538	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1539	if (!value)
1540		goto free_key;
1541
1542	err = -ENOTSUPP;
1543	if (map->map_type == BPF_MAP_TYPE_QUEUE ||
1544	    map->map_type == BPF_MAP_TYPE_STACK) {
1545		err = map->ops->map_pop_elem(map, value);
1546	} else if (map->map_type == BPF_MAP_TYPE_HASH ||
1547		   map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
1548		   map->map_type == BPF_MAP_TYPE_LRU_HASH ||
1549		   map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
1550		if (!bpf_map_is_dev_bound(map)) {
1551			bpf_disable_instrumentation();
1552			rcu_read_lock();
1553			err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags);
1554			rcu_read_unlock();
1555			bpf_enable_instrumentation();
1556		}
1557	}
1558
1559	if (err)
1560		goto free_value;
1561
1562	if (copy_to_user(uvalue, value, value_size) != 0) {
1563		err = -EFAULT;
1564		goto free_value;
1565	}
1566
1567	err = 0;
1568
1569free_value:
1570	kfree(value);
1571free_key:
1572	kfree(key);
1573err_put:
1574	fdput(f);
1575	return err;
1576}
1577
1578#define BPF_MAP_FREEZE_LAST_FIELD map_fd
1579
1580static int map_freeze(const union bpf_attr *attr)
1581{
1582	int err = 0, ufd = attr->map_fd;
1583	struct bpf_map *map;
1584	struct fd f;
1585
1586	if (CHECK_ATTR(BPF_MAP_FREEZE))
1587		return -EINVAL;
1588
1589	f = fdget(ufd);
1590	map = __bpf_map_get(f);
1591	if (IS_ERR(map))
1592		return PTR_ERR(map);
1593
1594	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1595		fdput(f);
1596		return -ENOTSUPP;
1597	}
1598
1599	mutex_lock(&map->freeze_mutex);
1600
1601	if (map->writecnt) {
1602		err = -EBUSY;
1603		goto err_put;
1604	}
1605	if (READ_ONCE(map->frozen)) {
1606		err = -EBUSY;
1607		goto err_put;
1608	}
1609	if (!bpf_capable()) {
1610		err = -EPERM;
1611		goto err_put;
1612	}
1613
1614	WRITE_ONCE(map->frozen, true);
1615err_put:
1616	mutex_unlock(&map->freeze_mutex);
1617	fdput(f);
1618	return err;
1619}
1620
1621static const struct bpf_prog_ops * const bpf_prog_types[] = {
1622#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1623	[_id] = & _name ## _prog_ops,
1624#define BPF_MAP_TYPE(_id, _ops)
1625#define BPF_LINK_TYPE(_id, _name)
1626#include <linux/bpf_types.h>
1627#undef BPF_PROG_TYPE
1628#undef BPF_MAP_TYPE
1629#undef BPF_LINK_TYPE
1630};
1631
1632static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
1633{
1634	const struct bpf_prog_ops *ops;
1635
1636	if (type >= ARRAY_SIZE(bpf_prog_types))
1637		return -EINVAL;
1638	type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
1639	ops = bpf_prog_types[type];
1640	if (!ops)
1641		return -EINVAL;
1642
1643	if (!bpf_prog_is_dev_bound(prog->aux))
1644		prog->aux->ops = ops;
1645	else
1646		prog->aux->ops = &bpf_offload_prog_ops;
1647	prog->type = type;
1648	return 0;
1649}
1650
1651enum bpf_audit {
1652	BPF_AUDIT_LOAD,
1653	BPF_AUDIT_UNLOAD,
1654	BPF_AUDIT_MAX,
1655};
1656
1657static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
1658	[BPF_AUDIT_LOAD]   = "LOAD",
1659	[BPF_AUDIT_UNLOAD] = "UNLOAD",
1660};
1661
1662static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
1663{
1664	struct audit_context *ctx = NULL;
1665	struct audit_buffer *ab;
1666
1667	if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
1668		return;
1669	if (audit_enabled == AUDIT_OFF)
1670		return;
1671	if (op == BPF_AUDIT_LOAD)
1672		ctx = audit_context();
1673	ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
1674	if (unlikely(!ab))
1675		return;
1676	audit_log_format(ab, "prog-id=%u op=%s",
1677			 prog->aux->id, bpf_audit_str[op]);
1678	audit_log_end(ab);
1679}
1680
1681static int bpf_prog_alloc_id(struct bpf_prog *prog)
1682{
1683	int id;
1684
1685	idr_preload(GFP_KERNEL);
1686	spin_lock_bh(&prog_idr_lock);
1687	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
1688	if (id > 0)
1689		prog->aux->id = id;
1690	spin_unlock_bh(&prog_idr_lock);
1691	idr_preload_end();
1692
1693	/* id is in [1, INT_MAX) */
1694	if (WARN_ON_ONCE(!id))
1695		return -ENOSPC;
1696
1697	return id > 0 ? 0 : id;
1698}
1699
1700void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
1701{
1702	/* cBPF to eBPF migrations are currently not in the idr store.
1703	 * Offloaded programs are removed from the store when their device
1704	 * disappears - even if someone grabs an fd to them they are unusable,
1705	 * simply waiting for refcnt to drop to be freed.
1706	 */
1707	if (!prog->aux->id)
1708		return;
1709
1710	if (do_idr_lock)
1711		spin_lock_bh(&prog_idr_lock);
1712	else
1713		__acquire(&prog_idr_lock);
1714
1715	idr_remove(&prog_idr, prog->aux->id);
1716	prog->aux->id = 0;
1717
1718	if (do_idr_lock)
1719		spin_unlock_bh(&prog_idr_lock);
1720	else
1721		__release(&prog_idr_lock);
1722}
1723
1724static void __bpf_prog_put_rcu(struct rcu_head *rcu)
1725{
1726	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
1727
1728	kvfree(aux->func_info);
1729	kfree(aux->func_info_aux);
1730	free_uid(aux->user);
1731	security_bpf_prog_free(aux);
1732	bpf_prog_free(aux->prog);
1733}
1734
1735static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
 
1736{
1737	bpf_prog_kallsyms_del_all(prog);
1738	btf_put(prog->aux->btf);
1739	kvfree(prog->aux->jited_linfo);
1740	kvfree(prog->aux->linfo);
1741	kfree(prog->aux->kfunc_tab);
1742	if (prog->aux->attach_btf)
1743		btf_put(prog->aux->attach_btf);
1744
1745	if (deferred) {
1746		if (prog->aux->sleepable)
1747			call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
1748		else
1749			call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
1750	} else {
1751		__bpf_prog_put_rcu(&prog->aux->rcu);
1752	}
1753}
1754
1755static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
1756{
1757	if (atomic64_dec_and_test(&prog->aux->refcnt)) {
1758		perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
1759		bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
1760		/* bpf_prog_free_id() must be called first */
1761		bpf_prog_free_id(prog, do_idr_lock);
1762		__bpf_prog_put_noref(prog, true);
1763	}
1764}
1765
1766void bpf_prog_put(struct bpf_prog *prog)
1767{
1768	__bpf_prog_put(prog, true);
 
1769}
1770EXPORT_SYMBOL_GPL(bpf_prog_put);
1771
1772static int bpf_prog_release(struct inode *inode, struct file *filp)
1773{
1774	struct bpf_prog *prog = filp->private_data;
1775
1776	bpf_prog_put(prog);
1777	return 0;
1778}
1779
1780static void bpf_prog_get_stats(const struct bpf_prog *prog,
1781			       struct bpf_prog_stats *stats)
1782{
1783	u64 nsecs = 0, cnt = 0, misses = 0;
1784	int cpu;
1785
1786	for_each_possible_cpu(cpu) {
1787		const struct bpf_prog_stats *st;
1788		unsigned int start;
1789		u64 tnsecs, tcnt, tmisses;
1790
1791		st = per_cpu_ptr(prog->stats, cpu);
1792		do {
1793			start = u64_stats_fetch_begin_irq(&st->syncp);
1794			tnsecs = st->nsecs;
1795			tcnt = st->cnt;
1796			tmisses = st->misses;
1797		} while (u64_stats_fetch_retry_irq(&st->syncp, start));
1798		nsecs += tnsecs;
1799		cnt += tcnt;
1800		misses += tmisses;
1801	}
1802	stats->nsecs = nsecs;
1803	stats->cnt = cnt;
1804	stats->misses = misses;
1805}
1806
1807#ifdef CONFIG_PROC_FS
1808static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
1809{
1810	const struct bpf_prog *prog = filp->private_data;
1811	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
1812	struct bpf_prog_stats stats;
1813
1814	bpf_prog_get_stats(prog, &stats);
1815	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
1816	seq_printf(m,
1817		   "prog_type:\t%u\n"
1818		   "prog_jited:\t%u\n"
1819		   "prog_tag:\t%s\n"
1820		   "memlock:\t%llu\n"
1821		   "prog_id:\t%u\n"
1822		   "run_time_ns:\t%llu\n"
1823		   "run_cnt:\t%llu\n"
1824		   "recursion_misses:\t%llu\n",
1825		   prog->type,
1826		   prog->jited,
1827		   prog_tag,
1828		   prog->pages * 1ULL << PAGE_SHIFT,
1829		   prog->aux->id,
1830		   stats.nsecs,
1831		   stats.cnt,
1832		   stats.misses);
1833}
1834#endif
1835
1836const struct file_operations bpf_prog_fops = {
1837#ifdef CONFIG_PROC_FS
1838	.show_fdinfo	= bpf_prog_show_fdinfo,
1839#endif
1840	.release	= bpf_prog_release,
1841	.read		= bpf_dummy_read,
1842	.write		= bpf_dummy_write,
1843};
1844
1845int bpf_prog_new_fd(struct bpf_prog *prog)
1846{
1847	int ret;
1848
1849	ret = security_bpf_prog(prog);
1850	if (ret < 0)
1851		return ret;
1852
1853	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
1854				O_RDWR | O_CLOEXEC);
1855}
1856
1857static struct bpf_prog *____bpf_prog_get(struct fd f)
1858{
1859	if (!f.file)
1860		return ERR_PTR(-EBADF);
1861	if (f.file->f_op != &bpf_prog_fops) {
1862		fdput(f);
1863		return ERR_PTR(-EINVAL);
1864	}
1865
1866	return f.file->private_data;
1867}
1868
1869void bpf_prog_add(struct bpf_prog *prog, int i)
1870{
1871	atomic64_add(i, &prog->aux->refcnt);
1872}
1873EXPORT_SYMBOL_GPL(bpf_prog_add);
1874
1875void bpf_prog_sub(struct bpf_prog *prog, int i)
1876{
1877	/* Only to be used for undoing previous bpf_prog_add() in some
1878	 * error path. We still know that another entity in our call
1879	 * path holds a reference to the program, thus atomic_sub() can
1880	 * be safely used in such cases!
1881	 */
1882	WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
1883}
1884EXPORT_SYMBOL_GPL(bpf_prog_sub);
1885
1886void bpf_prog_inc(struct bpf_prog *prog)
1887{
1888	atomic64_inc(&prog->aux->refcnt);
1889}
1890EXPORT_SYMBOL_GPL(bpf_prog_inc);
1891
1892/* prog_idr_lock should have been held */
1893struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1894{
1895	int refold;
1896
1897	refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
1898
1899	if (!refold)
1900		return ERR_PTR(-ENOENT);
1901
1902	return prog;
1903}
1904EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1905
1906bool bpf_prog_get_ok(struct bpf_prog *prog,
1907			    enum bpf_prog_type *attach_type, bool attach_drv)
1908{
1909	/* not an attachment, just a refcount inc, always allow */
1910	if (!attach_type)
1911		return true;
1912
1913	if (prog->type != *attach_type)
1914		return false;
1915	if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
1916		return false;
1917
1918	return true;
1919}
1920
1921static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
1922				       bool attach_drv)
1923{
1924	struct fd f = fdget(ufd);
1925	struct bpf_prog *prog;
1926
1927	prog = ____bpf_prog_get(f);
1928	if (IS_ERR(prog))
1929		return prog;
1930	if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
1931		prog = ERR_PTR(-EINVAL);
1932		goto out;
1933	}
1934
1935	bpf_prog_inc(prog);
1936out:
1937	fdput(f);
 
1938	return prog;
1939}
1940
1941struct bpf_prog *bpf_prog_get(u32 ufd)
1942{
1943	return __bpf_prog_get(ufd, NULL, false);
1944}
1945
1946struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1947				       bool attach_drv)
1948{
1949	return __bpf_prog_get(ufd, &type, attach_drv);
1950}
1951EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
1952
1953/* Initially all BPF programs could be loaded w/o specifying
1954 * expected_attach_type. Later for some of them specifying expected_attach_type
1955 * at load time became required so that program could be validated properly.
1956 * Programs of types that are allowed to be loaded both w/ and w/o (for
1957 * backward compatibility) expected_attach_type, should have the default attach
1958 * type assigned to expected_attach_type for the latter case, so that it can be
1959 * validated later at attach time.
1960 *
1961 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
1962 * prog type requires it but has some attach types that have to be backward
1963 * compatible.
1964 */
1965static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
1966{
1967	switch (attr->prog_type) {
1968	case BPF_PROG_TYPE_CGROUP_SOCK:
1969		/* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
1970		 * exist so checking for non-zero is the way to go here.
1971		 */
1972		if (!attr->expected_attach_type)
1973			attr->expected_attach_type =
1974				BPF_CGROUP_INET_SOCK_CREATE;
1975		break;
1976	case BPF_PROG_TYPE_SK_REUSEPORT:
1977		if (!attr->expected_attach_type)
1978			attr->expected_attach_type =
1979				BPF_SK_REUSEPORT_SELECT;
1980		break;
1981	}
1982}
1983
1984static int
1985bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
1986			   enum bpf_attach_type expected_attach_type,
1987			   struct btf *attach_btf, u32 btf_id,
1988			   struct bpf_prog *dst_prog)
1989{
1990	if (btf_id) {
1991		if (btf_id > BTF_MAX_TYPE)
1992			return -EINVAL;
1993
1994		if (!attach_btf && !dst_prog)
1995			return -EINVAL;
1996
1997		switch (prog_type) {
1998		case BPF_PROG_TYPE_TRACING:
1999		case BPF_PROG_TYPE_LSM:
2000		case BPF_PROG_TYPE_STRUCT_OPS:
2001		case BPF_PROG_TYPE_EXT:
2002			break;
2003		default:
2004			return -EINVAL;
2005		}
2006	}
2007
2008	if (attach_btf && (!btf_id || dst_prog))
2009		return -EINVAL;
2010
2011	if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING &&
2012	    prog_type != BPF_PROG_TYPE_EXT)
2013		return -EINVAL;
2014
2015	switch (prog_type) {
2016	case BPF_PROG_TYPE_CGROUP_SOCK:
2017		switch (expected_attach_type) {
2018		case BPF_CGROUP_INET_SOCK_CREATE:
2019		case BPF_CGROUP_INET_SOCK_RELEASE:
2020		case BPF_CGROUP_INET4_POST_BIND:
2021		case BPF_CGROUP_INET6_POST_BIND:
2022			return 0;
2023		default:
2024			return -EINVAL;
2025		}
2026	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2027		switch (expected_attach_type) {
2028		case BPF_CGROUP_INET4_BIND:
2029		case BPF_CGROUP_INET6_BIND:
2030		case BPF_CGROUP_INET4_CONNECT:
2031		case BPF_CGROUP_INET6_CONNECT:
2032		case BPF_CGROUP_INET4_GETPEERNAME:
2033		case BPF_CGROUP_INET6_GETPEERNAME:
2034		case BPF_CGROUP_INET4_GETSOCKNAME:
2035		case BPF_CGROUP_INET6_GETSOCKNAME:
2036		case BPF_CGROUP_UDP4_SENDMSG:
2037		case BPF_CGROUP_UDP6_SENDMSG:
2038		case BPF_CGROUP_UDP4_RECVMSG:
2039		case BPF_CGROUP_UDP6_RECVMSG:
2040			return 0;
2041		default:
2042			return -EINVAL;
2043		}
2044	case BPF_PROG_TYPE_CGROUP_SKB:
2045		switch (expected_attach_type) {
2046		case BPF_CGROUP_INET_INGRESS:
2047		case BPF_CGROUP_INET_EGRESS:
2048			return 0;
2049		default:
2050			return -EINVAL;
2051		}
2052	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2053		switch (expected_attach_type) {
2054		case BPF_CGROUP_SETSOCKOPT:
2055		case BPF_CGROUP_GETSOCKOPT:
2056			return 0;
2057		default:
2058			return -EINVAL;
2059		}
2060	case BPF_PROG_TYPE_SK_LOOKUP:
2061		if (expected_attach_type == BPF_SK_LOOKUP)
2062			return 0;
2063		return -EINVAL;
2064	case BPF_PROG_TYPE_SK_REUSEPORT:
2065		switch (expected_attach_type) {
2066		case BPF_SK_REUSEPORT_SELECT:
2067		case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE:
2068			return 0;
2069		default:
2070			return -EINVAL;
2071		}
2072	case BPF_PROG_TYPE_SYSCALL:
2073	case BPF_PROG_TYPE_EXT:
2074		if (expected_attach_type)
2075			return -EINVAL;
2076		fallthrough;
2077	default:
2078		return 0;
2079	}
2080}
2081
2082static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2083{
2084	switch (prog_type) {
2085	case BPF_PROG_TYPE_SCHED_CLS:
2086	case BPF_PROG_TYPE_SCHED_ACT:
2087	case BPF_PROG_TYPE_XDP:
2088	case BPF_PROG_TYPE_LWT_IN:
2089	case BPF_PROG_TYPE_LWT_OUT:
2090	case BPF_PROG_TYPE_LWT_XMIT:
2091	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2092	case BPF_PROG_TYPE_SK_SKB:
2093	case BPF_PROG_TYPE_SK_MSG:
2094	case BPF_PROG_TYPE_LIRC_MODE2:
2095	case BPF_PROG_TYPE_FLOW_DISSECTOR:
2096	case BPF_PROG_TYPE_CGROUP_DEVICE:
2097	case BPF_PROG_TYPE_CGROUP_SOCK:
2098	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2099	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2100	case BPF_PROG_TYPE_CGROUP_SYSCTL:
2101	case BPF_PROG_TYPE_SOCK_OPS:
2102	case BPF_PROG_TYPE_EXT: /* extends any prog */
2103		return true;
2104	case BPF_PROG_TYPE_CGROUP_SKB:
2105		/* always unpriv */
2106	case BPF_PROG_TYPE_SK_REUSEPORT:
2107		/* equivalent to SOCKET_FILTER. need CAP_BPF only */
2108	default:
2109		return false;
2110	}
2111}
2112
2113static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2114{
2115	switch (prog_type) {
2116	case BPF_PROG_TYPE_KPROBE:
2117	case BPF_PROG_TYPE_TRACEPOINT:
2118	case BPF_PROG_TYPE_PERF_EVENT:
2119	case BPF_PROG_TYPE_RAW_TRACEPOINT:
2120	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2121	case BPF_PROG_TYPE_TRACING:
2122	case BPF_PROG_TYPE_LSM:
2123	case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2124	case BPF_PROG_TYPE_EXT: /* extends any prog */
2125		return true;
2126	default:
2127		return false;
2128	}
2129}
2130
2131/* last field in 'union bpf_attr' used by this command */
2132#define	BPF_PROG_LOAD_LAST_FIELD fd_array
2133
2134static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr)
2135{
2136	enum bpf_prog_type type = attr->prog_type;
2137	struct bpf_prog *prog, *dst_prog = NULL;
2138	struct btf *attach_btf = NULL;
2139	int err;
2140	char license[128];
2141	bool is_gpl;
2142
2143	if (CHECK_ATTR(BPF_PROG_LOAD))
2144		return -EINVAL;
2145
2146	if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2147				 BPF_F_ANY_ALIGNMENT |
2148				 BPF_F_TEST_STATE_FREQ |
2149				 BPF_F_SLEEPABLE |
2150				 BPF_F_TEST_RND_HI32))
2151		return -EINVAL;
2152
2153	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2154	    (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2155	    !bpf_capable())
2156		return -EPERM;
2157
2158	/* copy eBPF program license from user space */
2159	if (strncpy_from_bpfptr(license,
2160				make_bpfptr(attr->license, uattr.is_kernel),
2161				sizeof(license) - 1) < 0)
2162		return -EFAULT;
2163	license[sizeof(license) - 1] = 0;
2164
2165	/* eBPF programs must be GPL compatible to use GPL-ed functions */
2166	is_gpl = license_is_gpl_compatible(license);
2167
2168	if (attr->insn_cnt == 0 ||
2169	    attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
2170		return -E2BIG;
2171	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2172	    type != BPF_PROG_TYPE_CGROUP_SKB &&
2173	    !bpf_capable())
2174		return -EPERM;
2175
2176	if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
2177		return -EPERM;
2178	if (is_perfmon_prog_type(type) && !perfmon_capable())
2179		return -EPERM;
2180
2181	/* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
2182	 * or btf, we need to check which one it is
2183	 */
2184	if (attr->attach_prog_fd) {
2185		dst_prog = bpf_prog_get(attr->attach_prog_fd);
2186		if (IS_ERR(dst_prog)) {
2187			dst_prog = NULL;
2188			attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
2189			if (IS_ERR(attach_btf))
2190				return -EINVAL;
2191			if (!btf_is_kernel(attach_btf)) {
2192				/* attaching through specifying bpf_prog's BTF
2193				 * objects directly might be supported eventually
2194				 */
2195				btf_put(attach_btf);
2196				return -ENOTSUPP;
2197			}
2198		}
2199	} else if (attr->attach_btf_id) {
2200		/* fall back to vmlinux BTF, if BTF type ID is specified */
2201		attach_btf = bpf_get_btf_vmlinux();
2202		if (IS_ERR(attach_btf))
2203			return PTR_ERR(attach_btf);
2204		if (!attach_btf)
2205			return -EINVAL;
2206		btf_get(attach_btf);
2207	}
2208
2209	bpf_prog_load_fixup_attach_type(attr);
2210	if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2211				       attach_btf, attr->attach_btf_id,
2212				       dst_prog)) {
2213		if (dst_prog)
2214			bpf_prog_put(dst_prog);
2215		if (attach_btf)
2216			btf_put(attach_btf);
2217		return -EINVAL;
2218	}
2219
2220	/* plain bpf_prog allocation */
2221	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2222	if (!prog) {
2223		if (dst_prog)
2224			bpf_prog_put(dst_prog);
2225		if (attach_btf)
2226			btf_put(attach_btf);
2227		return -ENOMEM;
2228	}
2229
2230	prog->expected_attach_type = attr->expected_attach_type;
2231	prog->aux->attach_btf = attach_btf;
2232	prog->aux->attach_btf_id = attr->attach_btf_id;
2233	prog->aux->dst_prog = dst_prog;
2234	prog->aux->offload_requested = !!attr->prog_ifindex;
2235	prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
2236
2237	err = security_bpf_prog_alloc(prog->aux);
2238	if (err)
2239		goto free_prog;
2240
2241	prog->aux->user = get_current_user();
2242	prog->len = attr->insn_cnt;
2243
2244	err = -EFAULT;
2245	if (copy_from_bpfptr(prog->insns,
2246			     make_bpfptr(attr->insns, uattr.is_kernel),
2247			     bpf_prog_insn_size(prog)) != 0)
2248		goto free_prog_sec;
2249
2250	prog->orig_prog = NULL;
2251	prog->jited = 0;
2252
2253	atomic64_set(&prog->aux->refcnt, 1);
2254	prog->gpl_compatible = is_gpl ? 1 : 0;
2255
2256	if (bpf_prog_is_dev_bound(prog->aux)) {
2257		err = bpf_prog_offload_init(prog, attr);
2258		if (err)
2259			goto free_prog_sec;
2260	}
2261
2262	/* find program type: socket_filter vs tracing_filter */
2263	err = find_prog_type(type, prog);
2264	if (err < 0)
2265		goto free_prog_sec;
2266
2267	prog->aux->load_time = ktime_get_boottime_ns();
2268	err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2269			       sizeof(attr->prog_name));
2270	if (err < 0)
2271		goto free_prog_sec;
2272
2273	/* run eBPF verifier */
2274	err = bpf_check(&prog, attr, uattr);
2275	if (err < 0)
2276		goto free_used_maps;
2277
2278	prog = bpf_prog_select_runtime(prog, &err);
 
 
 
 
2279	if (err < 0)
2280		goto free_used_maps;
2281
2282	err = bpf_prog_alloc_id(prog);
2283	if (err)
 
2284		goto free_used_maps;
2285
2286	/* Upon success of bpf_prog_alloc_id(), the BPF prog is
2287	 * effectively publicly exposed. However, retrieving via
2288	 * bpf_prog_get_fd_by_id() will take another reference,
2289	 * therefore it cannot be gone underneath us.
2290	 *
2291	 * Only for the time /after/ successful bpf_prog_new_fd()
2292	 * and before returning to userspace, we might just hold
2293	 * one reference and any parallel close on that fd could
2294	 * rip everything out. Hence, below notifications must
2295	 * happen before bpf_prog_new_fd().
2296	 *
2297	 * Also, any failure handling from this point onwards must
2298	 * be using bpf_prog_put() given the program is exposed.
2299	 */
2300	bpf_prog_kallsyms_add(prog);
2301	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2302	bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2303
2304	err = bpf_prog_new_fd(prog);
2305	if (err < 0)
2306		bpf_prog_put(prog);
2307	return err;
2308
2309free_used_maps:
2310	/* In case we have subprogs, we need to wait for a grace
2311	 * period before we can tear down JIT memory since symbols
2312	 * are already exposed under kallsyms.
2313	 */
2314	__bpf_prog_put_noref(prog, prog->aux->func_cnt);
2315	return err;
2316free_prog_sec:
2317	free_uid(prog->aux->user);
2318	security_bpf_prog_free(prog->aux);
2319free_prog:
2320	if (prog->aux->attach_btf)
2321		btf_put(prog->aux->attach_btf);
2322	bpf_prog_free(prog);
2323	return err;
2324}
2325
2326#define BPF_OBJ_LAST_FIELD file_flags
2327
2328static int bpf_obj_pin(const union bpf_attr *attr)
2329{
2330	if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
2331		return -EINVAL;
2332
2333	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
2334}
2335
2336static int bpf_obj_get(const union bpf_attr *attr)
2337{
2338	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2339	    attr->file_flags & ~BPF_OBJ_FLAG_MASK)
2340		return -EINVAL;
2341
2342	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
2343				attr->file_flags);
2344}
2345
2346void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2347		   const struct bpf_link_ops *ops, struct bpf_prog *prog)
2348{
2349	atomic64_set(&link->refcnt, 1);
2350	link->type = type;
2351	link->id = 0;
2352	link->ops = ops;
2353	link->prog = prog;
2354}
2355
2356static void bpf_link_free_id(int id)
2357{
2358	if (!id)
2359		return;
2360
2361	spin_lock_bh(&link_idr_lock);
2362	idr_remove(&link_idr, id);
2363	spin_unlock_bh(&link_idr_lock);
2364}
2365
2366/* Clean up bpf_link and corresponding anon_inode file and FD. After
2367 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
2368 * anon_inode's release() call. This helper marksbpf_link as
2369 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
2370 * is not decremented, it's the responsibility of a calling code that failed
2371 * to complete bpf_link initialization.
2372 */
2373void bpf_link_cleanup(struct bpf_link_primer *primer)
2374{
2375	primer->link->prog = NULL;
2376	bpf_link_free_id(primer->id);
2377	fput(primer->file);
2378	put_unused_fd(primer->fd);
2379}
2380
2381void bpf_link_inc(struct bpf_link *link)
2382{
2383	atomic64_inc(&link->refcnt);
2384}
2385
2386/* bpf_link_free is guaranteed to be called from process context */
2387static void bpf_link_free(struct bpf_link *link)
2388{
2389	bpf_link_free_id(link->id);
2390	if (link->prog) {
2391		/* detach BPF program, clean up used resources */
2392		link->ops->release(link);
2393		bpf_prog_put(link->prog);
2394	}
2395	/* free bpf_link and its containing memory */
2396	link->ops->dealloc(link);
2397}
2398
2399static void bpf_link_put_deferred(struct work_struct *work)
2400{
2401	struct bpf_link *link = container_of(work, struct bpf_link, work);
2402
2403	bpf_link_free(link);
2404}
2405
2406/* bpf_link_put can be called from atomic context, but ensures that resources
2407 * are freed from process context
2408 */
2409void bpf_link_put(struct bpf_link *link)
2410{
2411	if (!atomic64_dec_and_test(&link->refcnt))
2412		return;
2413
2414	if (in_atomic()) {
2415		INIT_WORK(&link->work, bpf_link_put_deferred);
2416		schedule_work(&link->work);
2417	} else {
2418		bpf_link_free(link);
2419	}
2420}
2421
2422static int bpf_link_release(struct inode *inode, struct file *filp)
2423{
2424	struct bpf_link *link = filp->private_data;
2425
2426	bpf_link_put(link);
2427	return 0;
2428}
2429
2430#ifdef CONFIG_PROC_FS
2431#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
2432#define BPF_MAP_TYPE(_id, _ops)
2433#define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
2434static const char *bpf_link_type_strs[] = {
2435	[BPF_LINK_TYPE_UNSPEC] = "<invalid>",
2436#include <linux/bpf_types.h>
2437};
2438#undef BPF_PROG_TYPE
2439#undef BPF_MAP_TYPE
2440#undef BPF_LINK_TYPE
2441
2442static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
2443{
2444	const struct bpf_link *link = filp->private_data;
2445	const struct bpf_prog *prog = link->prog;
2446	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2447
2448	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2449	seq_printf(m,
2450		   "link_type:\t%s\n"
2451		   "link_id:\t%u\n"
2452		   "prog_tag:\t%s\n"
2453		   "prog_id:\t%u\n",
2454		   bpf_link_type_strs[link->type],
2455		   link->id,
2456		   prog_tag,
2457		   prog->aux->id);
2458	if (link->ops->show_fdinfo)
2459		link->ops->show_fdinfo(link, m);
2460}
2461#endif
2462
2463static const struct file_operations bpf_link_fops = {
2464#ifdef CONFIG_PROC_FS
2465	.show_fdinfo	= bpf_link_show_fdinfo,
2466#endif
2467	.release	= bpf_link_release,
2468	.read		= bpf_dummy_read,
2469	.write		= bpf_dummy_write,
2470};
2471
2472static int bpf_link_alloc_id(struct bpf_link *link)
2473{
2474	int id;
2475
2476	idr_preload(GFP_KERNEL);
2477	spin_lock_bh(&link_idr_lock);
2478	id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
2479	spin_unlock_bh(&link_idr_lock);
2480	idr_preload_end();
2481
2482	return id;
2483}
2484
2485/* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
2486 * reserving unused FD and allocating ID from link_idr. This is to be paired
2487 * with bpf_link_settle() to install FD and ID and expose bpf_link to
2488 * user-space, if bpf_link is successfully attached. If not, bpf_link and
2489 * pre-allocated resources are to be freed with bpf_cleanup() call. All the
2490 * transient state is passed around in struct bpf_link_primer.
2491 * This is preferred way to create and initialize bpf_link, especially when
2492 * there are complicated and expensive operations inbetween creating bpf_link
2493 * itself and attaching it to BPF hook. By using bpf_link_prime() and
2494 * bpf_link_settle() kernel code using bpf_link doesn't have to perform
2495 * expensive (and potentially failing) roll back operations in a rare case
2496 * that file, FD, or ID can't be allocated.
2497 */
2498int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
2499{
2500	struct file *file;
2501	int fd, id;
2502
2503	fd = get_unused_fd_flags(O_CLOEXEC);
2504	if (fd < 0)
2505		return fd;
2506
2507
2508	id = bpf_link_alloc_id(link);
2509	if (id < 0) {
2510		put_unused_fd(fd);
2511		return id;
2512	}
2513
2514	file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
2515	if (IS_ERR(file)) {
2516		bpf_link_free_id(id);
2517		put_unused_fd(fd);
2518		return PTR_ERR(file);
2519	}
2520
2521	primer->link = link;
2522	primer->file = file;
2523	primer->fd = fd;
2524	primer->id = id;
2525	return 0;
2526}
2527
2528int bpf_link_settle(struct bpf_link_primer *primer)
2529{
2530	/* make bpf_link fetchable by ID */
2531	spin_lock_bh(&link_idr_lock);
2532	primer->link->id = primer->id;
2533	spin_unlock_bh(&link_idr_lock);
2534	/* make bpf_link fetchable by FD */
2535	fd_install(primer->fd, primer->file);
2536	/* pass through installed FD */
2537	return primer->fd;
2538}
2539
2540int bpf_link_new_fd(struct bpf_link *link)
2541{
2542	return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
2543}
2544
2545struct bpf_link *bpf_link_get_from_fd(u32 ufd)
2546{
2547	struct fd f = fdget(ufd);
2548	struct bpf_link *link;
2549
2550	if (!f.file)
2551		return ERR_PTR(-EBADF);
2552	if (f.file->f_op != &bpf_link_fops) {
2553		fdput(f);
2554		return ERR_PTR(-EINVAL);
2555	}
2556
2557	link = f.file->private_data;
2558	bpf_link_inc(link);
2559	fdput(f);
2560
2561	return link;
2562}
2563
2564struct bpf_tracing_link {
2565	struct bpf_link link;
2566	enum bpf_attach_type attach_type;
2567	struct bpf_trampoline *trampoline;
2568	struct bpf_prog *tgt_prog;
2569};
2570
2571static void bpf_tracing_link_release(struct bpf_link *link)
2572{
2573	struct bpf_tracing_link *tr_link =
2574		container_of(link, struct bpf_tracing_link, link);
2575
2576	WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog,
2577						tr_link->trampoline));
2578
2579	bpf_trampoline_put(tr_link->trampoline);
2580
2581	/* tgt_prog is NULL if target is a kernel function */
2582	if (tr_link->tgt_prog)
2583		bpf_prog_put(tr_link->tgt_prog);
2584}
2585
2586static void bpf_tracing_link_dealloc(struct bpf_link *link)
2587{
2588	struct bpf_tracing_link *tr_link =
2589		container_of(link, struct bpf_tracing_link, link);
2590
2591	kfree(tr_link);
2592}
2593
2594static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
2595					 struct seq_file *seq)
2596{
2597	struct bpf_tracing_link *tr_link =
2598		container_of(link, struct bpf_tracing_link, link);
2599
2600	seq_printf(seq,
2601		   "attach_type:\t%d\n",
2602		   tr_link->attach_type);
2603}
2604
2605static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
2606					   struct bpf_link_info *info)
2607{
2608	struct bpf_tracing_link *tr_link =
2609		container_of(link, struct bpf_tracing_link, link);
2610
2611	info->tracing.attach_type = tr_link->attach_type;
2612	bpf_trampoline_unpack_key(tr_link->trampoline->key,
2613				  &info->tracing.target_obj_id,
2614				  &info->tracing.target_btf_id);
2615
2616	return 0;
2617}
2618
2619static const struct bpf_link_ops bpf_tracing_link_lops = {
2620	.release = bpf_tracing_link_release,
2621	.dealloc = bpf_tracing_link_dealloc,
2622	.show_fdinfo = bpf_tracing_link_show_fdinfo,
2623	.fill_link_info = bpf_tracing_link_fill_link_info,
2624};
2625
2626static int bpf_tracing_prog_attach(struct bpf_prog *prog,
2627				   int tgt_prog_fd,
2628				   u32 btf_id)
2629{
2630	struct bpf_link_primer link_primer;
2631	struct bpf_prog *tgt_prog = NULL;
2632	struct bpf_trampoline *tr = NULL;
2633	struct bpf_tracing_link *link;
2634	u64 key = 0;
2635	int err;
2636
2637	switch (prog->type) {
2638	case BPF_PROG_TYPE_TRACING:
2639		if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
2640		    prog->expected_attach_type != BPF_TRACE_FEXIT &&
2641		    prog->expected_attach_type != BPF_MODIFY_RETURN) {
2642			err = -EINVAL;
2643			goto out_put_prog;
2644		}
2645		break;
2646	case BPF_PROG_TYPE_EXT:
2647		if (prog->expected_attach_type != 0) {
2648			err = -EINVAL;
2649			goto out_put_prog;
2650		}
2651		break;
2652	case BPF_PROG_TYPE_LSM:
2653		if (prog->expected_attach_type != BPF_LSM_MAC) {
2654			err = -EINVAL;
2655			goto out_put_prog;
2656		}
2657		break;
2658	default:
2659		err = -EINVAL;
2660		goto out_put_prog;
2661	}
2662
2663	if (!!tgt_prog_fd != !!btf_id) {
2664		err = -EINVAL;
2665		goto out_put_prog;
2666	}
2667
2668	if (tgt_prog_fd) {
2669		/* For now we only allow new targets for BPF_PROG_TYPE_EXT */
2670		if (prog->type != BPF_PROG_TYPE_EXT) {
2671			err = -EINVAL;
2672			goto out_put_prog;
2673		}
2674
2675		tgt_prog = bpf_prog_get(tgt_prog_fd);
2676		if (IS_ERR(tgt_prog)) {
2677			err = PTR_ERR(tgt_prog);
2678			tgt_prog = NULL;
2679			goto out_put_prog;
2680		}
2681
2682		key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id);
2683	}
2684
2685	link = kzalloc(sizeof(*link), GFP_USER);
2686	if (!link) {
2687		err = -ENOMEM;
2688		goto out_put_prog;
2689	}
2690	bpf_link_init(&link->link, BPF_LINK_TYPE_TRACING,
2691		      &bpf_tracing_link_lops, prog);
2692	link->attach_type = prog->expected_attach_type;
2693
2694	mutex_lock(&prog->aux->dst_mutex);
2695
2696	/* There are a few possible cases here:
2697	 *
2698	 * - if prog->aux->dst_trampoline is set, the program was just loaded
2699	 *   and not yet attached to anything, so we can use the values stored
2700	 *   in prog->aux
2701	 *
2702	 * - if prog->aux->dst_trampoline is NULL, the program has already been
2703         *   attached to a target and its initial target was cleared (below)
2704	 *
2705	 * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
2706	 *   target_btf_id using the link_create API.
2707	 *
2708	 * - if tgt_prog == NULL when this function was called using the old
2709	 *   raw_tracepoint_open API, and we need a target from prog->aux
2710	 *
2711	 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
2712	 *   was detached and is going for re-attachment.
2713	 */
2714	if (!prog->aux->dst_trampoline && !tgt_prog) {
2715		/*
2716		 * Allow re-attach for TRACING and LSM programs. If it's
2717		 * currently linked, bpf_trampoline_link_prog will fail.
2718		 * EXT programs need to specify tgt_prog_fd, so they
2719		 * re-attach in separate code path.
2720		 */
2721		if (prog->type != BPF_PROG_TYPE_TRACING &&
2722		    prog->type != BPF_PROG_TYPE_LSM) {
2723			err = -EINVAL;
2724			goto out_unlock;
2725		}
2726		btf_id = prog->aux->attach_btf_id;
2727		key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
2728	}
2729
2730	if (!prog->aux->dst_trampoline ||
2731	    (key && key != prog->aux->dst_trampoline->key)) {
2732		/* If there is no saved target, or the specified target is
2733		 * different from the destination specified at load time, we
2734		 * need a new trampoline and a check for compatibility
2735		 */
2736		struct bpf_attach_target_info tgt_info = {};
2737
2738		err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
2739					      &tgt_info);
2740		if (err)
2741			goto out_unlock;
2742
2743		tr = bpf_trampoline_get(key, &tgt_info);
2744		if (!tr) {
2745			err = -ENOMEM;
2746			goto out_unlock;
2747		}
2748	} else {
2749		/* The caller didn't specify a target, or the target was the
2750		 * same as the destination supplied during program load. This
2751		 * means we can reuse the trampoline and reference from program
2752		 * load time, and there is no need to allocate a new one. This
2753		 * can only happen once for any program, as the saved values in
2754		 * prog->aux are cleared below.
2755		 */
2756		tr = prog->aux->dst_trampoline;
2757		tgt_prog = prog->aux->dst_prog;
2758	}
2759
2760	err = bpf_link_prime(&link->link, &link_primer);
2761	if (err)
2762		goto out_unlock;
2763
2764	err = bpf_trampoline_link_prog(prog, tr);
2765	if (err) {
2766		bpf_link_cleanup(&link_primer);
2767		link = NULL;
2768		goto out_unlock;
2769	}
2770
2771	link->tgt_prog = tgt_prog;
2772	link->trampoline = tr;
2773
2774	/* Always clear the trampoline and target prog from prog->aux to make
2775	 * sure the original attach destination is not kept alive after a
2776	 * program is (re-)attached to another target.
2777	 */
2778	if (prog->aux->dst_prog &&
2779	    (tgt_prog_fd || tr != prog->aux->dst_trampoline))
2780		/* got extra prog ref from syscall, or attaching to different prog */
2781		bpf_prog_put(prog->aux->dst_prog);
2782	if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
2783		/* we allocated a new trampoline, so free the old one */
2784		bpf_trampoline_put(prog->aux->dst_trampoline);
2785
2786	prog->aux->dst_prog = NULL;
2787	prog->aux->dst_trampoline = NULL;
2788	mutex_unlock(&prog->aux->dst_mutex);
2789
2790	return bpf_link_settle(&link_primer);
2791out_unlock:
2792	if (tr && tr != prog->aux->dst_trampoline)
2793		bpf_trampoline_put(tr);
2794	mutex_unlock(&prog->aux->dst_mutex);
2795	kfree(link);
2796out_put_prog:
2797	if (tgt_prog_fd && tgt_prog)
2798		bpf_prog_put(tgt_prog);
2799	return err;
2800}
2801
2802struct bpf_raw_tp_link {
2803	struct bpf_link link;
2804	struct bpf_raw_event_map *btp;
2805};
2806
2807static void bpf_raw_tp_link_release(struct bpf_link *link)
2808{
2809	struct bpf_raw_tp_link *raw_tp =
2810		container_of(link, struct bpf_raw_tp_link, link);
2811
2812	bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
2813	bpf_put_raw_tracepoint(raw_tp->btp);
2814}
2815
2816static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
2817{
2818	struct bpf_raw_tp_link *raw_tp =
2819		container_of(link, struct bpf_raw_tp_link, link);
2820
2821	kfree(raw_tp);
2822}
2823
2824static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
2825					struct seq_file *seq)
2826{
2827	struct bpf_raw_tp_link *raw_tp_link =
2828		container_of(link, struct bpf_raw_tp_link, link);
2829
2830	seq_printf(seq,
2831		   "tp_name:\t%s\n",
2832		   raw_tp_link->btp->tp->name);
2833}
2834
2835static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
2836					  struct bpf_link_info *info)
2837{
2838	struct bpf_raw_tp_link *raw_tp_link =
2839		container_of(link, struct bpf_raw_tp_link, link);
2840	char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
2841	const char *tp_name = raw_tp_link->btp->tp->name;
2842	u32 ulen = info->raw_tracepoint.tp_name_len;
2843	size_t tp_len = strlen(tp_name);
2844
2845	if (!ulen ^ !ubuf)
2846		return -EINVAL;
2847
2848	info->raw_tracepoint.tp_name_len = tp_len + 1;
2849
2850	if (!ubuf)
2851		return 0;
2852
2853	if (ulen >= tp_len + 1) {
2854		if (copy_to_user(ubuf, tp_name, tp_len + 1))
2855			return -EFAULT;
2856	} else {
2857		char zero = '\0';
2858
2859		if (copy_to_user(ubuf, tp_name, ulen - 1))
2860			return -EFAULT;
2861		if (put_user(zero, ubuf + ulen - 1))
2862			return -EFAULT;
2863		return -ENOSPC;
2864	}
2865
2866	return 0;
2867}
2868
2869static const struct bpf_link_ops bpf_raw_tp_link_lops = {
2870	.release = bpf_raw_tp_link_release,
2871	.dealloc = bpf_raw_tp_link_dealloc,
2872	.show_fdinfo = bpf_raw_tp_link_show_fdinfo,
2873	.fill_link_info = bpf_raw_tp_link_fill_link_info,
2874};
2875
2876#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
2877
2878static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
2879{
2880	struct bpf_link_primer link_primer;
2881	struct bpf_raw_tp_link *link;
2882	struct bpf_raw_event_map *btp;
2883	struct bpf_prog *prog;
2884	const char *tp_name;
2885	char buf[128];
2886	int err;
2887
2888	if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
2889		return -EINVAL;
2890
2891	prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
2892	if (IS_ERR(prog))
2893		return PTR_ERR(prog);
2894
2895	switch (prog->type) {
2896	case BPF_PROG_TYPE_TRACING:
2897	case BPF_PROG_TYPE_EXT:
2898	case BPF_PROG_TYPE_LSM:
2899		if (attr->raw_tracepoint.name) {
2900			/* The attach point for this category of programs
2901			 * should be specified via btf_id during program load.
2902			 */
2903			err = -EINVAL;
2904			goto out_put_prog;
2905		}
2906		if (prog->type == BPF_PROG_TYPE_TRACING &&
2907		    prog->expected_attach_type == BPF_TRACE_RAW_TP) {
2908			tp_name = prog->aux->attach_func_name;
2909			break;
2910		}
2911		err = bpf_tracing_prog_attach(prog, 0, 0);
2912		if (err >= 0)
2913			return err;
2914		goto out_put_prog;
2915	case BPF_PROG_TYPE_RAW_TRACEPOINT:
2916	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2917		if (strncpy_from_user(buf,
2918				      u64_to_user_ptr(attr->raw_tracepoint.name),
2919				      sizeof(buf) - 1) < 0) {
2920			err = -EFAULT;
2921			goto out_put_prog;
2922		}
2923		buf[sizeof(buf) - 1] = 0;
2924		tp_name = buf;
2925		break;
2926	default:
2927		err = -EINVAL;
2928		goto out_put_prog;
2929	}
2930
2931	btp = bpf_get_raw_tracepoint(tp_name);
2932	if (!btp) {
2933		err = -ENOENT;
2934		goto out_put_prog;
2935	}
2936
2937	link = kzalloc(sizeof(*link), GFP_USER);
2938	if (!link) {
2939		err = -ENOMEM;
2940		goto out_put_btp;
2941	}
2942	bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
2943		      &bpf_raw_tp_link_lops, prog);
2944	link->btp = btp;
2945
2946	err = bpf_link_prime(&link->link, &link_primer);
2947	if (err) {
2948		kfree(link);
2949		goto out_put_btp;
2950	}
2951
2952	err = bpf_probe_register(link->btp, prog);
2953	if (err) {
2954		bpf_link_cleanup(&link_primer);
2955		goto out_put_btp;
2956	}
2957
2958	return bpf_link_settle(&link_primer);
2959
2960out_put_btp:
2961	bpf_put_raw_tracepoint(btp);
2962out_put_prog:
2963	bpf_prog_put(prog);
2964	return err;
2965}
2966
2967static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
2968					     enum bpf_attach_type attach_type)
2969{
2970	switch (prog->type) {
2971	case BPF_PROG_TYPE_CGROUP_SOCK:
2972	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2973	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2974	case BPF_PROG_TYPE_SK_LOOKUP:
2975		return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
2976	case BPF_PROG_TYPE_CGROUP_SKB:
2977		if (!capable(CAP_NET_ADMIN))
2978			/* cg-skb progs can be loaded by unpriv user.
2979			 * check permissions at attach time.
2980			 */
2981			return -EPERM;
2982		return prog->enforce_expected_attach_type &&
2983			prog->expected_attach_type != attach_type ?
2984			-EINVAL : 0;
2985	default:
2986		return 0;
2987	}
2988}
2989
2990static enum bpf_prog_type
2991attach_type_to_prog_type(enum bpf_attach_type attach_type)
2992{
2993	switch (attach_type) {
2994	case BPF_CGROUP_INET_INGRESS:
2995	case BPF_CGROUP_INET_EGRESS:
2996		return BPF_PROG_TYPE_CGROUP_SKB;
2997	case BPF_CGROUP_INET_SOCK_CREATE:
2998	case BPF_CGROUP_INET_SOCK_RELEASE:
2999	case BPF_CGROUP_INET4_POST_BIND:
3000	case BPF_CGROUP_INET6_POST_BIND:
3001		return BPF_PROG_TYPE_CGROUP_SOCK;
3002	case BPF_CGROUP_INET4_BIND:
3003	case BPF_CGROUP_INET6_BIND:
3004	case BPF_CGROUP_INET4_CONNECT:
3005	case BPF_CGROUP_INET6_CONNECT:
3006	case BPF_CGROUP_INET4_GETPEERNAME:
3007	case BPF_CGROUP_INET6_GETPEERNAME:
3008	case BPF_CGROUP_INET4_GETSOCKNAME:
3009	case BPF_CGROUP_INET6_GETSOCKNAME:
3010	case BPF_CGROUP_UDP4_SENDMSG:
3011	case BPF_CGROUP_UDP6_SENDMSG:
3012	case BPF_CGROUP_UDP4_RECVMSG:
3013	case BPF_CGROUP_UDP6_RECVMSG:
3014		return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
3015	case BPF_CGROUP_SOCK_OPS:
3016		return BPF_PROG_TYPE_SOCK_OPS;
3017	case BPF_CGROUP_DEVICE:
3018		return BPF_PROG_TYPE_CGROUP_DEVICE;
3019	case BPF_SK_MSG_VERDICT:
3020		return BPF_PROG_TYPE_SK_MSG;
3021	case BPF_SK_SKB_STREAM_PARSER:
3022	case BPF_SK_SKB_STREAM_VERDICT:
3023	case BPF_SK_SKB_VERDICT:
3024		return BPF_PROG_TYPE_SK_SKB;
3025	case BPF_LIRC_MODE2:
3026		return BPF_PROG_TYPE_LIRC_MODE2;
3027	case BPF_FLOW_DISSECTOR:
3028		return BPF_PROG_TYPE_FLOW_DISSECTOR;
3029	case BPF_CGROUP_SYSCTL:
3030		return BPF_PROG_TYPE_CGROUP_SYSCTL;
3031	case BPF_CGROUP_GETSOCKOPT:
3032	case BPF_CGROUP_SETSOCKOPT:
3033		return BPF_PROG_TYPE_CGROUP_SOCKOPT;
3034	case BPF_TRACE_ITER:
3035		return BPF_PROG_TYPE_TRACING;
3036	case BPF_SK_LOOKUP:
3037		return BPF_PROG_TYPE_SK_LOOKUP;
3038	case BPF_XDP:
3039		return BPF_PROG_TYPE_XDP;
3040	default:
3041		return BPF_PROG_TYPE_UNSPEC;
3042	}
3043}
3044
3045#define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd
3046
3047#define BPF_F_ATTACH_MASK \
3048	(BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE)
3049
3050static int bpf_prog_attach(const union bpf_attr *attr)
3051{
3052	enum bpf_prog_type ptype;
3053	struct bpf_prog *prog;
3054	int ret;
3055
3056	if (CHECK_ATTR(BPF_PROG_ATTACH))
3057		return -EINVAL;
3058
3059	if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
3060		return -EINVAL;
3061
3062	ptype = attach_type_to_prog_type(attr->attach_type);
3063	if (ptype == BPF_PROG_TYPE_UNSPEC)
3064		return -EINVAL;
3065
3066	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
3067	if (IS_ERR(prog))
3068		return PTR_ERR(prog);
3069
3070	if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
3071		bpf_prog_put(prog);
3072		return -EINVAL;
3073	}
3074
3075	switch (ptype) {
3076	case BPF_PROG_TYPE_SK_SKB:
3077	case BPF_PROG_TYPE_SK_MSG:
3078		ret = sock_map_get_from_fd(attr, prog);
3079		break;
3080	case BPF_PROG_TYPE_LIRC_MODE2:
3081		ret = lirc_prog_attach(attr, prog);
3082		break;
3083	case BPF_PROG_TYPE_FLOW_DISSECTOR:
3084		ret = netns_bpf_prog_attach(attr, prog);
3085		break;
3086	case BPF_PROG_TYPE_CGROUP_DEVICE:
3087	case BPF_PROG_TYPE_CGROUP_SKB:
3088	case BPF_PROG_TYPE_CGROUP_SOCK:
3089	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3090	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3091	case BPF_PROG_TYPE_CGROUP_SYSCTL:
3092	case BPF_PROG_TYPE_SOCK_OPS:
3093		ret = cgroup_bpf_prog_attach(attr, ptype, prog);
3094		break;
3095	default:
3096		ret = -EINVAL;
3097	}
3098
3099	if (ret)
3100		bpf_prog_put(prog);
3101	return ret;
3102}
3103
3104#define BPF_PROG_DETACH_LAST_FIELD attach_type
3105
3106static int bpf_prog_detach(const union bpf_attr *attr)
3107{
3108	enum bpf_prog_type ptype;
3109
3110	if (CHECK_ATTR(BPF_PROG_DETACH))
3111		return -EINVAL;
3112
3113	ptype = attach_type_to_prog_type(attr->attach_type);
3114
3115	switch (ptype) {
3116	case BPF_PROG_TYPE_SK_MSG:
3117	case BPF_PROG_TYPE_SK_SKB:
3118		return sock_map_prog_detach(attr, ptype);
3119	case BPF_PROG_TYPE_LIRC_MODE2:
3120		return lirc_prog_detach(attr);
3121	case BPF_PROG_TYPE_FLOW_DISSECTOR:
3122		return netns_bpf_prog_detach(attr, ptype);
3123	case BPF_PROG_TYPE_CGROUP_DEVICE:
3124	case BPF_PROG_TYPE_CGROUP_SKB:
3125	case BPF_PROG_TYPE_CGROUP_SOCK:
3126	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3127	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3128	case BPF_PROG_TYPE_CGROUP_SYSCTL:
3129	case BPF_PROG_TYPE_SOCK_OPS:
3130		return cgroup_bpf_prog_detach(attr, ptype);
3131	default:
3132		return -EINVAL;
3133	}
3134}
3135
3136#define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
3137
3138static int bpf_prog_query(const union bpf_attr *attr,
3139			  union bpf_attr __user *uattr)
3140{
3141	if (!capable(CAP_NET_ADMIN))
3142		return -EPERM;
3143	if (CHECK_ATTR(BPF_PROG_QUERY))
3144		return -EINVAL;
3145	if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
3146		return -EINVAL;
3147
3148	switch (attr->query.attach_type) {
3149	case BPF_CGROUP_INET_INGRESS:
3150	case BPF_CGROUP_INET_EGRESS:
3151	case BPF_CGROUP_INET_SOCK_CREATE:
3152	case BPF_CGROUP_INET_SOCK_RELEASE:
3153	case BPF_CGROUP_INET4_BIND:
3154	case BPF_CGROUP_INET6_BIND:
3155	case BPF_CGROUP_INET4_POST_BIND:
3156	case BPF_CGROUP_INET6_POST_BIND:
3157	case BPF_CGROUP_INET4_CONNECT:
3158	case BPF_CGROUP_INET6_CONNECT:
3159	case BPF_CGROUP_INET4_GETPEERNAME:
3160	case BPF_CGROUP_INET6_GETPEERNAME:
3161	case BPF_CGROUP_INET4_GETSOCKNAME:
3162	case BPF_CGROUP_INET6_GETSOCKNAME:
3163	case BPF_CGROUP_UDP4_SENDMSG:
3164	case BPF_CGROUP_UDP6_SENDMSG:
3165	case BPF_CGROUP_UDP4_RECVMSG:
3166	case BPF_CGROUP_UDP6_RECVMSG:
3167	case BPF_CGROUP_SOCK_OPS:
3168	case BPF_CGROUP_DEVICE:
3169	case BPF_CGROUP_SYSCTL:
3170	case BPF_CGROUP_GETSOCKOPT:
3171	case BPF_CGROUP_SETSOCKOPT:
3172		return cgroup_bpf_prog_query(attr, uattr);
3173	case BPF_LIRC_MODE2:
3174		return lirc_prog_query(attr, uattr);
3175	case BPF_FLOW_DISSECTOR:
3176	case BPF_SK_LOOKUP:
3177		return netns_bpf_prog_query(attr, uattr);
3178	default:
3179		return -EINVAL;
3180	}
3181}
3182
3183#define BPF_PROG_TEST_RUN_LAST_FIELD test.cpu
3184
3185static int bpf_prog_test_run(const union bpf_attr *attr,
3186			     union bpf_attr __user *uattr)
3187{
3188	struct bpf_prog *prog;
3189	int ret = -ENOTSUPP;
3190
3191	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
3192		return -EINVAL;
3193
3194	if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
3195	    (!attr->test.ctx_size_in && attr->test.ctx_in))
3196		return -EINVAL;
3197
3198	if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
3199	    (!attr->test.ctx_size_out && attr->test.ctx_out))
3200		return -EINVAL;
3201
3202	prog = bpf_prog_get(attr->test.prog_fd);
3203	if (IS_ERR(prog))
3204		return PTR_ERR(prog);
3205
3206	if (prog->aux->ops->test_run)
3207		ret = prog->aux->ops->test_run(prog, attr, uattr);
3208
3209	bpf_prog_put(prog);
3210	return ret;
3211}
3212
3213#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
3214
3215static int bpf_obj_get_next_id(const union bpf_attr *attr,
3216			       union bpf_attr __user *uattr,
3217			       struct idr *idr,
3218			       spinlock_t *lock)
3219{
3220	u32 next_id = attr->start_id;
3221	int err = 0;
3222
3223	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
3224		return -EINVAL;
3225
3226	if (!capable(CAP_SYS_ADMIN))
3227		return -EPERM;
3228
3229	next_id++;
3230	spin_lock_bh(lock);
3231	if (!idr_get_next(idr, &next_id))
3232		err = -ENOENT;
3233	spin_unlock_bh(lock);
3234
3235	if (!err)
3236		err = put_user(next_id, &uattr->next_id);
3237
3238	return err;
3239}
3240
3241struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
3242{
3243	struct bpf_map *map;
3244
3245	spin_lock_bh(&map_idr_lock);
3246again:
3247	map = idr_get_next(&map_idr, id);
3248	if (map) {
3249		map = __bpf_map_inc_not_zero(map, false);
3250		if (IS_ERR(map)) {
3251			(*id)++;
3252			goto again;
3253		}
3254	}
3255	spin_unlock_bh(&map_idr_lock);
3256
3257	return map;
3258}
3259
3260struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
3261{
3262	struct bpf_prog *prog;
3263
3264	spin_lock_bh(&prog_idr_lock);
3265again:
3266	prog = idr_get_next(&prog_idr, id);
3267	if (prog) {
3268		prog = bpf_prog_inc_not_zero(prog);
3269		if (IS_ERR(prog)) {
3270			(*id)++;
3271			goto again;
3272		}
3273	}
3274	spin_unlock_bh(&prog_idr_lock);
3275
3276	return prog;
3277}
3278
3279#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
3280
3281struct bpf_prog *bpf_prog_by_id(u32 id)
3282{
3283	struct bpf_prog *prog;
3284
3285	if (!id)
3286		return ERR_PTR(-ENOENT);
3287
3288	spin_lock_bh(&prog_idr_lock);
3289	prog = idr_find(&prog_idr, id);
3290	if (prog)
3291		prog = bpf_prog_inc_not_zero(prog);
3292	else
3293		prog = ERR_PTR(-ENOENT);
3294	spin_unlock_bh(&prog_idr_lock);
3295	return prog;
3296}
3297
3298static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
3299{
3300	struct bpf_prog *prog;
3301	u32 id = attr->prog_id;
3302	int fd;
3303
3304	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
3305		return -EINVAL;
3306
3307	if (!capable(CAP_SYS_ADMIN))
3308		return -EPERM;
3309
3310	prog = bpf_prog_by_id(id);
3311	if (IS_ERR(prog))
3312		return PTR_ERR(prog);
3313
3314	fd = bpf_prog_new_fd(prog);
3315	if (fd < 0)
3316		bpf_prog_put(prog);
3317
3318	return fd;
3319}
3320
3321#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
3322
3323static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
3324{
3325	struct bpf_map *map;
3326	u32 id = attr->map_id;
3327	int f_flags;
3328	int fd;
3329
3330	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
3331	    attr->open_flags & ~BPF_OBJ_FLAG_MASK)
3332		return -EINVAL;
3333
3334	if (!capable(CAP_SYS_ADMIN))
3335		return -EPERM;
3336
3337	f_flags = bpf_get_file_flag(attr->open_flags);
3338	if (f_flags < 0)
3339		return f_flags;
3340
3341	spin_lock_bh(&map_idr_lock);
3342	map = idr_find(&map_idr, id);
3343	if (map)
3344		map = __bpf_map_inc_not_zero(map, true);
3345	else
3346		map = ERR_PTR(-ENOENT);
3347	spin_unlock_bh(&map_idr_lock);
3348
3349	if (IS_ERR(map))
3350		return PTR_ERR(map);
3351
3352	fd = bpf_map_new_fd(map, f_flags);
3353	if (fd < 0)
3354		bpf_map_put_with_uref(map);
3355
3356	return fd;
3357}
3358
3359static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
3360					      unsigned long addr, u32 *off,
3361					      u32 *type)
3362{
3363	const struct bpf_map *map;
3364	int i;
3365
3366	mutex_lock(&prog->aux->used_maps_mutex);
3367	for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
3368		map = prog->aux->used_maps[i];
3369		if (map == (void *)addr) {
3370			*type = BPF_PSEUDO_MAP_FD;
3371			goto out;
3372		}
3373		if (!map->ops->map_direct_value_meta)
3374			continue;
3375		if (!map->ops->map_direct_value_meta(map, addr, off)) {
3376			*type = BPF_PSEUDO_MAP_VALUE;
3377			goto out;
3378		}
3379	}
3380	map = NULL;
3381
3382out:
3383	mutex_unlock(&prog->aux->used_maps_mutex);
3384	return map;
3385}
3386
3387static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
3388					      const struct cred *f_cred)
3389{
3390	const struct bpf_map *map;
3391	struct bpf_insn *insns;
3392	u32 off, type;
3393	u64 imm;
3394	u8 code;
3395	int i;
3396
3397	insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
3398			GFP_USER);
3399	if (!insns)
3400		return insns;
3401
3402	for (i = 0; i < prog->len; i++) {
3403		code = insns[i].code;
3404
3405		if (code == (BPF_JMP | BPF_TAIL_CALL)) {
3406			insns[i].code = BPF_JMP | BPF_CALL;
3407			insns[i].imm = BPF_FUNC_tail_call;
3408			/* fall-through */
3409		}
3410		if (code == (BPF_JMP | BPF_CALL) ||
3411		    code == (BPF_JMP | BPF_CALL_ARGS)) {
3412			if (code == (BPF_JMP | BPF_CALL_ARGS))
3413				insns[i].code = BPF_JMP | BPF_CALL;
3414			if (!bpf_dump_raw_ok(f_cred))
3415				insns[i].imm = 0;
3416			continue;
3417		}
3418		if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
3419			insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
3420			continue;
3421		}
3422
3423		if (code != (BPF_LD | BPF_IMM | BPF_DW))
3424			continue;
3425
3426		imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
3427		map = bpf_map_from_imm(prog, imm, &off, &type);
3428		if (map) {
3429			insns[i].src_reg = type;
3430			insns[i].imm = map->id;
3431			insns[i + 1].imm = off;
3432			continue;
3433		}
3434	}
3435
3436	return insns;
3437}
3438
3439static int set_info_rec_size(struct bpf_prog_info *info)
3440{
3441	/*
3442	 * Ensure info.*_rec_size is the same as kernel expected size
3443	 *
3444	 * or
3445	 *
3446	 * Only allow zero *_rec_size if both _rec_size and _cnt are
3447	 * zero.  In this case, the kernel will set the expected
3448	 * _rec_size back to the info.
3449	 */
3450
3451	if ((info->nr_func_info || info->func_info_rec_size) &&
3452	    info->func_info_rec_size != sizeof(struct bpf_func_info))
3453		return -EINVAL;
3454
3455	if ((info->nr_line_info || info->line_info_rec_size) &&
3456	    info->line_info_rec_size != sizeof(struct bpf_line_info))
3457		return -EINVAL;
3458
3459	if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
3460	    info->jited_line_info_rec_size != sizeof(__u64))
3461		return -EINVAL;
3462
3463	info->func_info_rec_size = sizeof(struct bpf_func_info);
3464	info->line_info_rec_size = sizeof(struct bpf_line_info);
3465	info->jited_line_info_rec_size = sizeof(__u64);
3466
3467	return 0;
3468}
3469
3470static int bpf_prog_get_info_by_fd(struct file *file,
3471				   struct bpf_prog *prog,
3472				   const union bpf_attr *attr,
3473				   union bpf_attr __user *uattr)
3474{
3475	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3476	struct bpf_prog_info info;
3477	u32 info_len = attr->info.info_len;
3478	struct bpf_prog_stats stats;
3479	char __user *uinsns;
3480	u32 ulen;
3481	int err;
3482
3483	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
3484	if (err)
3485		return err;
3486	info_len = min_t(u32, sizeof(info), info_len);
3487
3488	memset(&info, 0, sizeof(info));
3489	if (copy_from_user(&info, uinfo, info_len))
3490		return -EFAULT;
3491
3492	info.type = prog->type;
3493	info.id = prog->aux->id;
3494	info.load_time = prog->aux->load_time;
3495	info.created_by_uid = from_kuid_munged(current_user_ns(),
3496					       prog->aux->user->uid);
3497	info.gpl_compatible = prog->gpl_compatible;
3498
3499	memcpy(info.tag, prog->tag, sizeof(prog->tag));
3500	memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
3501
3502	mutex_lock(&prog->aux->used_maps_mutex);
3503	ulen = info.nr_map_ids;
3504	info.nr_map_ids = prog->aux->used_map_cnt;
3505	ulen = min_t(u32, info.nr_map_ids, ulen);
3506	if (ulen) {
3507		u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
3508		u32 i;
3509
3510		for (i = 0; i < ulen; i++)
3511			if (put_user(prog->aux->used_maps[i]->id,
3512				     &user_map_ids[i])) {
3513				mutex_unlock(&prog->aux->used_maps_mutex);
3514				return -EFAULT;
3515			}
3516	}
3517	mutex_unlock(&prog->aux->used_maps_mutex);
3518
3519	err = set_info_rec_size(&info);
3520	if (err)
3521		return err;
 
 
 
 
 
 
3522
3523	bpf_prog_get_stats(prog, &stats);
3524	info.run_time_ns = stats.nsecs;
3525	info.run_cnt = stats.cnt;
3526	info.recursion_misses = stats.misses;
3527
3528	if (!bpf_capable()) {
3529		info.jited_prog_len = 0;
3530		info.xlated_prog_len = 0;
3531		info.nr_jited_ksyms = 0;
3532		info.nr_jited_func_lens = 0;
3533		info.nr_func_info = 0;
3534		info.nr_line_info = 0;
3535		info.nr_jited_line_info = 0;
3536		goto done;
3537	}
3538
3539	ulen = info.xlated_prog_len;
3540	info.xlated_prog_len = bpf_prog_insn_size(prog);
3541	if (info.xlated_prog_len && ulen) {
3542		struct bpf_insn *insns_sanitized;
3543		bool fault;
3544
3545		if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
3546			info.xlated_prog_insns = 0;
3547			goto done;
3548		}
3549		insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
3550		if (!insns_sanitized)
3551			return -ENOMEM;
3552		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
3553		ulen = min_t(u32, info.xlated_prog_len, ulen);
3554		fault = copy_to_user(uinsns, insns_sanitized, ulen);
3555		kfree(insns_sanitized);
3556		if (fault)
3557			return -EFAULT;
3558	}
3559
3560	if (bpf_prog_is_dev_bound(prog->aux)) {
3561		err = bpf_prog_offload_info_fill(&info, prog);
3562		if (err)
3563			return err;
3564		goto done;
3565	}
3566
3567	/* NOTE: the following code is supposed to be skipped for offload.
3568	 * bpf_prog_offload_info_fill() is the place to fill similar fields
3569	 * for offload.
3570	 */
3571	ulen = info.jited_prog_len;
3572	if (prog->aux->func_cnt) {
3573		u32 i;
3574
3575		info.jited_prog_len = 0;
3576		for (i = 0; i < prog->aux->func_cnt; i++)
3577			info.jited_prog_len += prog->aux->func[i]->jited_len;
3578	} else {
3579		info.jited_prog_len = prog->jited_len;
3580	}
3581
3582	if (info.jited_prog_len && ulen) {
3583		if (bpf_dump_raw_ok(file->f_cred)) {
3584			uinsns = u64_to_user_ptr(info.jited_prog_insns);
3585			ulen = min_t(u32, info.jited_prog_len, ulen);
3586
3587			/* for multi-function programs, copy the JITed
3588			 * instructions for all the functions
3589			 */
3590			if (prog->aux->func_cnt) {
3591				u32 len, free, i;
3592				u8 *img;
3593
3594				free = ulen;
3595				for (i = 0; i < prog->aux->func_cnt; i++) {
3596					len = prog->aux->func[i]->jited_len;
3597					len = min_t(u32, len, free);
3598					img = (u8 *) prog->aux->func[i]->bpf_func;
3599					if (copy_to_user(uinsns, img, len))
3600						return -EFAULT;
3601					uinsns += len;
3602					free -= len;
3603					if (!free)
3604						break;
3605				}
3606			} else {
3607				if (copy_to_user(uinsns, prog->bpf_func, ulen))
3608					return -EFAULT;
3609			}
3610		} else {
3611			info.jited_prog_insns = 0;
3612		}
3613	}
3614
3615	ulen = info.nr_jited_ksyms;
3616	info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
3617	if (ulen) {
3618		if (bpf_dump_raw_ok(file->f_cred)) {
3619			unsigned long ksym_addr;
3620			u64 __user *user_ksyms;
3621			u32 i;
3622
3623			/* copy the address of the kernel symbol
3624			 * corresponding to each function
3625			 */
3626			ulen = min_t(u32, info.nr_jited_ksyms, ulen);
3627			user_ksyms = u64_to_user_ptr(info.jited_ksyms);
3628			if (prog->aux->func_cnt) {
3629				for (i = 0; i < ulen; i++) {
3630					ksym_addr = (unsigned long)
3631						prog->aux->func[i]->bpf_func;
3632					if (put_user((u64) ksym_addr,
3633						     &user_ksyms[i]))
3634						return -EFAULT;
3635				}
3636			} else {
3637				ksym_addr = (unsigned long) prog->bpf_func;
3638				if (put_user((u64) ksym_addr, &user_ksyms[0]))
3639					return -EFAULT;
3640			}
3641		} else {
3642			info.jited_ksyms = 0;
3643		}
3644	}
3645
3646	ulen = info.nr_jited_func_lens;
3647	info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
3648	if (ulen) {
3649		if (bpf_dump_raw_ok(file->f_cred)) {
3650			u32 __user *user_lens;
3651			u32 func_len, i;
3652
3653			/* copy the JITed image lengths for each function */
3654			ulen = min_t(u32, info.nr_jited_func_lens, ulen);
3655			user_lens = u64_to_user_ptr(info.jited_func_lens);
3656			if (prog->aux->func_cnt) {
3657				for (i = 0; i < ulen; i++) {
3658					func_len =
3659						prog->aux->func[i]->jited_len;
3660					if (put_user(func_len, &user_lens[i]))
3661						return -EFAULT;
3662				}
3663			} else {
3664				func_len = prog->jited_len;
3665				if (put_user(func_len, &user_lens[0]))
3666					return -EFAULT;
3667			}
3668		} else {
3669			info.jited_func_lens = 0;
3670		}
 
3671	}
3672
3673	if (prog->aux->btf)
3674		info.btf_id = btf_obj_id(prog->aux->btf);
3675
3676	ulen = info.nr_func_info;
3677	info.nr_func_info = prog->aux->func_info_cnt;
3678	if (info.nr_func_info && ulen) {
3679		char __user *user_finfo;
3680
3681		user_finfo = u64_to_user_ptr(info.func_info);
3682		ulen = min_t(u32, info.nr_func_info, ulen);
3683		if (copy_to_user(user_finfo, prog->aux->func_info,
3684				 info.func_info_rec_size * ulen))
3685			return -EFAULT;
3686	}
3687
3688	ulen = info.nr_line_info;
3689	info.nr_line_info = prog->aux->nr_linfo;
3690	if (info.nr_line_info && ulen) {
3691		__u8 __user *user_linfo;
3692
3693		user_linfo = u64_to_user_ptr(info.line_info);
3694		ulen = min_t(u32, info.nr_line_info, ulen);
3695		if (copy_to_user(user_linfo, prog->aux->linfo,
3696				 info.line_info_rec_size * ulen))
3697			return -EFAULT;
3698	}
3699
3700	ulen = info.nr_jited_line_info;
3701	if (prog->aux->jited_linfo)
3702		info.nr_jited_line_info = prog->aux->nr_linfo;
3703	else
3704		info.nr_jited_line_info = 0;
3705	if (info.nr_jited_line_info && ulen) {
3706		if (bpf_dump_raw_ok(file->f_cred)) {
3707			__u64 __user *user_linfo;
3708			u32 i;
3709
3710			user_linfo = u64_to_user_ptr(info.jited_line_info);
3711			ulen = min_t(u32, info.nr_jited_line_info, ulen);
3712			for (i = 0; i < ulen; i++) {
3713				if (put_user((__u64)(long)prog->aux->jited_linfo[i],
3714					     &user_linfo[i]))
3715					return -EFAULT;
3716			}
3717		} else {
3718			info.jited_line_info = 0;
3719		}
3720	}
3721
3722	ulen = info.nr_prog_tags;
3723	info.nr_prog_tags = prog->aux->func_cnt ? : 1;
3724	if (ulen) {
3725		__u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
3726		u32 i;
3727
3728		user_prog_tags = u64_to_user_ptr(info.prog_tags);
3729		ulen = min_t(u32, info.nr_prog_tags, ulen);
3730		if (prog->aux->func_cnt) {
3731			for (i = 0; i < ulen; i++) {
3732				if (copy_to_user(user_prog_tags[i],
3733						 prog->aux->func[i]->tag,
3734						 BPF_TAG_SIZE))
3735					return -EFAULT;
3736			}
3737		} else {
3738			if (copy_to_user(user_prog_tags[0],
3739					 prog->tag, BPF_TAG_SIZE))
3740				return -EFAULT;
3741		}
3742	}
3743
3744done:
3745	if (copy_to_user(uinfo, &info, info_len) ||
3746	    put_user(info_len, &uattr->info.info_len))
3747		return -EFAULT;
3748
3749	return 0;
3750}
3751
3752static int bpf_map_get_info_by_fd(struct file *file,
3753				  struct bpf_map *map,
3754				  const union bpf_attr *attr,
3755				  union bpf_attr __user *uattr)
3756{
3757	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3758	struct bpf_map_info info;
3759	u32 info_len = attr->info.info_len;
3760	int err;
3761
3762	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
3763	if (err)
3764		return err;
3765	info_len = min_t(u32, sizeof(info), info_len);
3766
3767	memset(&info, 0, sizeof(info));
3768	info.type = map->map_type;
3769	info.id = map->id;
3770	info.key_size = map->key_size;
3771	info.value_size = map->value_size;
3772	info.max_entries = map->max_entries;
3773	info.map_flags = map->map_flags;
3774	memcpy(info.name, map->name, sizeof(map->name));
3775
3776	if (map->btf) {
3777		info.btf_id = btf_obj_id(map->btf);
3778		info.btf_key_type_id = map->btf_key_type_id;
3779		info.btf_value_type_id = map->btf_value_type_id;
3780	}
3781	info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
3782
3783	if (bpf_map_is_dev_bound(map)) {
3784		err = bpf_map_offload_info_fill(&info, map);
3785		if (err)
3786			return err;
3787	}
3788
3789	if (copy_to_user(uinfo, &info, info_len) ||
3790	    put_user(info_len, &uattr->info.info_len))
3791		return -EFAULT;
3792
3793	return 0;
3794}
3795
3796static int bpf_btf_get_info_by_fd(struct file *file,
3797				  struct btf *btf,
3798				  const union bpf_attr *attr,
3799				  union bpf_attr __user *uattr)
3800{
3801	struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3802	u32 info_len = attr->info.info_len;
3803	int err;
3804
3805	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len);
3806	if (err)
3807		return err;
3808
3809	return btf_get_info_by_fd(btf, attr, uattr);
3810}
3811
3812static int bpf_link_get_info_by_fd(struct file *file,
3813				  struct bpf_link *link,
3814				  const union bpf_attr *attr,
3815				  union bpf_attr __user *uattr)
3816{
3817	struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3818	struct bpf_link_info info;
3819	u32 info_len = attr->info.info_len;
3820	int err;
3821
3822	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
3823	if (err)
3824		return err;
3825	info_len = min_t(u32, sizeof(info), info_len);
3826
3827	memset(&info, 0, sizeof(info));
3828	if (copy_from_user(&info, uinfo, info_len))
3829		return -EFAULT;
3830
3831	info.type = link->type;
3832	info.id = link->id;
3833	info.prog_id = link->prog->aux->id;
3834
3835	if (link->ops->fill_link_info) {
3836		err = link->ops->fill_link_info(link, &info);
3837		if (err)
3838			return err;
3839	}
3840
3841	if (copy_to_user(uinfo, &info, info_len) ||
3842	    put_user(info_len, &uattr->info.info_len))
3843		return -EFAULT;
3844
3845	return 0;
3846}
3847
3848
3849#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
3850
3851static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
3852				  union bpf_attr __user *uattr)
3853{
3854	int ufd = attr->info.bpf_fd;
3855	struct fd f;
3856	int err;
3857
3858	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
3859		return -EINVAL;
3860
3861	f = fdget(ufd);
3862	if (!f.file)
3863		return -EBADFD;
3864
3865	if (f.file->f_op == &bpf_prog_fops)
3866		err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
3867					      uattr);
3868	else if (f.file->f_op == &bpf_map_fops)
3869		err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
3870					     uattr);
3871	else if (f.file->f_op == &btf_fops)
3872		err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
3873	else if (f.file->f_op == &bpf_link_fops)
3874		err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
3875					      attr, uattr);
3876	else
3877		err = -EINVAL;
3878
3879	fdput(f);
3880	return err;
3881}
3882
3883#define BPF_BTF_LOAD_LAST_FIELD btf_log_level
3884
3885static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr)
3886{
3887	if (CHECK_ATTR(BPF_BTF_LOAD))
3888		return -EINVAL;
3889
3890	if (!bpf_capable())
3891		return -EPERM;
3892
3893	return btf_new_fd(attr, uattr);
3894}
3895
3896#define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
3897
3898static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
3899{
3900	if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
3901		return -EINVAL;
3902
3903	if (!capable(CAP_SYS_ADMIN))
3904		return -EPERM;
3905
3906	return btf_get_fd_by_id(attr->btf_id);
3907}
3908
3909static int bpf_task_fd_query_copy(const union bpf_attr *attr,
3910				    union bpf_attr __user *uattr,
3911				    u32 prog_id, u32 fd_type,
3912				    const char *buf, u64 probe_offset,
3913				    u64 probe_addr)
3914{
3915	char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
3916	u32 len = buf ? strlen(buf) : 0, input_len;
3917	int err = 0;
3918
3919	if (put_user(len, &uattr->task_fd_query.buf_len))
3920		return -EFAULT;
3921	input_len = attr->task_fd_query.buf_len;
3922	if (input_len && ubuf) {
3923		if (!len) {
3924			/* nothing to copy, just make ubuf NULL terminated */
3925			char zero = '\0';
3926
3927			if (put_user(zero, ubuf))
3928				return -EFAULT;
3929		} else if (input_len >= len + 1) {
3930			/* ubuf can hold the string with NULL terminator */
3931			if (copy_to_user(ubuf, buf, len + 1))
3932				return -EFAULT;
3933		} else {
3934			/* ubuf cannot hold the string with NULL terminator,
3935			 * do a partial copy with NULL terminator.
3936			 */
3937			char zero = '\0';
3938
3939			err = -ENOSPC;
3940			if (copy_to_user(ubuf, buf, input_len - 1))
3941				return -EFAULT;
3942			if (put_user(zero, ubuf + input_len - 1))
3943				return -EFAULT;
3944		}
3945	}
3946
3947	if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
3948	    put_user(fd_type, &uattr->task_fd_query.fd_type) ||
3949	    put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
3950	    put_user(probe_addr, &uattr->task_fd_query.probe_addr))
3951		return -EFAULT;
3952
3953	return err;
3954}
3955
3956#define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
3957
3958static int bpf_task_fd_query(const union bpf_attr *attr,
3959			     union bpf_attr __user *uattr)
3960{
3961	pid_t pid = attr->task_fd_query.pid;
3962	u32 fd = attr->task_fd_query.fd;
3963	const struct perf_event *event;
3964	struct task_struct *task;
3965	struct file *file;
3966	int err;
3967
3968	if (CHECK_ATTR(BPF_TASK_FD_QUERY))
3969		return -EINVAL;
3970
3971	if (!capable(CAP_SYS_ADMIN))
3972		return -EPERM;
3973
3974	if (attr->task_fd_query.flags != 0)
3975		return -EINVAL;
3976
3977	task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
3978	if (!task)
3979		return -ENOENT;
3980
3981	err = 0;
3982	file = fget_task(task, fd);
3983	put_task_struct(task);
3984	if (!file)
3985		return -EBADF;
3986
3987	if (file->f_op == &bpf_link_fops) {
3988		struct bpf_link *link = file->private_data;
3989
3990		if (link->ops == &bpf_raw_tp_link_lops) {
3991			struct bpf_raw_tp_link *raw_tp =
3992				container_of(link, struct bpf_raw_tp_link, link);
3993			struct bpf_raw_event_map *btp = raw_tp->btp;
3994
3995			err = bpf_task_fd_query_copy(attr, uattr,
3996						     raw_tp->link.prog->aux->id,
3997						     BPF_FD_TYPE_RAW_TRACEPOINT,
3998						     btp->tp->name, 0, 0);
3999			goto put_file;
4000		}
4001		goto out_not_supp;
4002	}
4003
4004	event = perf_get_event(file);
4005	if (!IS_ERR(event)) {
4006		u64 probe_offset, probe_addr;
4007		u32 prog_id, fd_type;
4008		const char *buf;
4009
4010		err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
4011					      &buf, &probe_offset,
4012					      &probe_addr);
4013		if (!err)
4014			err = bpf_task_fd_query_copy(attr, uattr, prog_id,
4015						     fd_type, buf,
4016						     probe_offset,
4017						     probe_addr);
4018		goto put_file;
4019	}
4020
4021out_not_supp:
4022	err = -ENOTSUPP;
4023put_file:
4024	fput(file);
4025	return err;
4026}
4027
4028#define BPF_MAP_BATCH_LAST_FIELD batch.flags
4029
4030#define BPF_DO_BATCH(fn)			\
4031	do {					\
4032		if (!fn) {			\
4033			err = -ENOTSUPP;	\
4034			goto err_put;		\
4035		}				\
4036		err = fn(map, attr, uattr);	\
4037	} while (0)
4038
4039static int bpf_map_do_batch(const union bpf_attr *attr,
4040			    union bpf_attr __user *uattr,
4041			    int cmd)
4042{
4043	struct bpf_map *map;
4044	int err, ufd;
4045	struct fd f;
4046
4047	if (CHECK_ATTR(BPF_MAP_BATCH))
4048		return -EINVAL;
4049
4050	ufd = attr->batch.map_fd;
4051	f = fdget(ufd);
4052	map = __bpf_map_get(f);
4053	if (IS_ERR(map))
4054		return PTR_ERR(map);
4055
4056	if ((cmd == BPF_MAP_LOOKUP_BATCH ||
4057	     cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) &&
4058	    !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
4059		err = -EPERM;
4060		goto err_put;
4061	}
4062
4063	if (cmd != BPF_MAP_LOOKUP_BATCH &&
4064	    !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
4065		err = -EPERM;
4066		goto err_put;
4067	}
4068
4069	if (cmd == BPF_MAP_LOOKUP_BATCH)
4070		BPF_DO_BATCH(map->ops->map_lookup_batch);
4071	else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
4072		BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch);
4073	else if (cmd == BPF_MAP_UPDATE_BATCH)
4074		BPF_DO_BATCH(map->ops->map_update_batch);
4075	else
4076		BPF_DO_BATCH(map->ops->map_delete_batch);
4077
4078err_put:
4079	fdput(f);
4080	return err;
4081}
4082
4083static int tracing_bpf_link_attach(const union bpf_attr *attr, bpfptr_t uattr,
4084				   struct bpf_prog *prog)
4085{
4086	if (attr->link_create.attach_type != prog->expected_attach_type)
4087		return -EINVAL;
4088
4089	if (prog->expected_attach_type == BPF_TRACE_ITER)
4090		return bpf_iter_link_attach(attr, uattr, prog);
4091	else if (prog->type == BPF_PROG_TYPE_EXT)
4092		return bpf_tracing_prog_attach(prog,
4093					       attr->link_create.target_fd,
4094					       attr->link_create.target_btf_id);
4095	return -EINVAL;
4096}
4097
4098#define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len
4099static int link_create(union bpf_attr *attr, bpfptr_t uattr)
4100{
4101	enum bpf_prog_type ptype;
4102	struct bpf_prog *prog;
4103	int ret;
4104
4105	if (CHECK_ATTR(BPF_LINK_CREATE))
4106		return -EINVAL;
4107
4108	prog = bpf_prog_get(attr->link_create.prog_fd);
4109	if (IS_ERR(prog))
4110		return PTR_ERR(prog);
4111
4112	ret = bpf_prog_attach_check_attach_type(prog,
4113						attr->link_create.attach_type);
4114	if (ret)
4115		goto out;
4116
4117	if (prog->type == BPF_PROG_TYPE_EXT) {
4118		ret = tracing_bpf_link_attach(attr, uattr, prog);
4119		goto out;
4120	}
4121
4122	ptype = attach_type_to_prog_type(attr->link_create.attach_type);
4123	if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) {
4124		ret = -EINVAL;
4125		goto out;
4126	}
4127
4128	switch (ptype) {
4129	case BPF_PROG_TYPE_CGROUP_SKB:
4130	case BPF_PROG_TYPE_CGROUP_SOCK:
4131	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4132	case BPF_PROG_TYPE_SOCK_OPS:
4133	case BPF_PROG_TYPE_CGROUP_DEVICE:
4134	case BPF_PROG_TYPE_CGROUP_SYSCTL:
4135	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4136		ret = cgroup_bpf_link_attach(attr, prog);
4137		break;
4138	case BPF_PROG_TYPE_TRACING:
4139		ret = tracing_bpf_link_attach(attr, uattr, prog);
4140		break;
4141	case BPF_PROG_TYPE_FLOW_DISSECTOR:
4142	case BPF_PROG_TYPE_SK_LOOKUP:
4143		ret = netns_bpf_link_create(attr, prog);
4144		break;
4145#ifdef CONFIG_NET
4146	case BPF_PROG_TYPE_XDP:
4147		ret = bpf_xdp_link_attach(attr, prog);
4148		break;
4149#endif
4150	default:
4151		ret = -EINVAL;
4152	}
4153
4154out:
4155	if (ret < 0)
4156		bpf_prog_put(prog);
4157	return ret;
4158}
4159
4160#define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
4161
4162static int link_update(union bpf_attr *attr)
4163{
4164	struct bpf_prog *old_prog = NULL, *new_prog;
4165	struct bpf_link *link;
4166	u32 flags;
4167	int ret;
4168
4169	if (CHECK_ATTR(BPF_LINK_UPDATE))
4170		return -EINVAL;
4171
4172	flags = attr->link_update.flags;
4173	if (flags & ~BPF_F_REPLACE)
4174		return -EINVAL;
4175
4176	link = bpf_link_get_from_fd(attr->link_update.link_fd);
4177	if (IS_ERR(link))
4178		return PTR_ERR(link);
4179
4180	new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
4181	if (IS_ERR(new_prog)) {
4182		ret = PTR_ERR(new_prog);
4183		goto out_put_link;
4184	}
4185
4186	if (flags & BPF_F_REPLACE) {
4187		old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
4188		if (IS_ERR(old_prog)) {
4189			ret = PTR_ERR(old_prog);
4190			old_prog = NULL;
4191			goto out_put_progs;
4192		}
4193	} else if (attr->link_update.old_prog_fd) {
4194		ret = -EINVAL;
4195		goto out_put_progs;
4196	}
4197
4198	if (link->ops->update_prog)
4199		ret = link->ops->update_prog(link, new_prog, old_prog);
4200	else
4201		ret = -EINVAL;
4202
4203out_put_progs:
4204	if (old_prog)
4205		bpf_prog_put(old_prog);
4206	if (ret)
4207		bpf_prog_put(new_prog);
4208out_put_link:
4209	bpf_link_put(link);
4210	return ret;
4211}
4212
4213#define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
4214
4215static int link_detach(union bpf_attr *attr)
4216{
4217	struct bpf_link *link;
4218	int ret;
4219
4220	if (CHECK_ATTR(BPF_LINK_DETACH))
4221		return -EINVAL;
4222
4223	link = bpf_link_get_from_fd(attr->link_detach.link_fd);
4224	if (IS_ERR(link))
4225		return PTR_ERR(link);
4226
4227	if (link->ops->detach)
4228		ret = link->ops->detach(link);
4229	else
4230		ret = -EOPNOTSUPP;
4231
4232	bpf_link_put(link);
4233	return ret;
4234}
4235
4236static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
4237{
4238	return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
4239}
4240
4241struct bpf_link *bpf_link_by_id(u32 id)
4242{
4243	struct bpf_link *link;
4244
4245	if (!id)
4246		return ERR_PTR(-ENOENT);
4247
4248	spin_lock_bh(&link_idr_lock);
4249	/* before link is "settled", ID is 0, pretend it doesn't exist yet */
4250	link = idr_find(&link_idr, id);
4251	if (link) {
4252		if (link->id)
4253			link = bpf_link_inc_not_zero(link);
4254		else
4255			link = ERR_PTR(-EAGAIN);
4256	} else {
4257		link = ERR_PTR(-ENOENT);
4258	}
4259	spin_unlock_bh(&link_idr_lock);
4260	return link;
4261}
4262
4263#define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
4264
4265static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
4266{
4267	struct bpf_link *link;
4268	u32 id = attr->link_id;
4269	int fd;
4270
4271	if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
4272		return -EINVAL;
4273
4274	if (!capable(CAP_SYS_ADMIN))
4275		return -EPERM;
4276
4277	link = bpf_link_by_id(id);
4278	if (IS_ERR(link))
4279		return PTR_ERR(link);
4280
4281	fd = bpf_link_new_fd(link);
4282	if (fd < 0)
4283		bpf_link_put(link);
4284
4285	return fd;
4286}
4287
4288DEFINE_MUTEX(bpf_stats_enabled_mutex);
4289
4290static int bpf_stats_release(struct inode *inode, struct file *file)
4291{
4292	mutex_lock(&bpf_stats_enabled_mutex);
4293	static_key_slow_dec(&bpf_stats_enabled_key.key);
4294	mutex_unlock(&bpf_stats_enabled_mutex);
4295	return 0;
4296}
4297
4298static const struct file_operations bpf_stats_fops = {
4299	.release = bpf_stats_release,
4300};
4301
4302static int bpf_enable_runtime_stats(void)
4303{
4304	int fd;
4305
4306	mutex_lock(&bpf_stats_enabled_mutex);
4307
4308	/* Set a very high limit to avoid overflow */
4309	if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
4310		mutex_unlock(&bpf_stats_enabled_mutex);
4311		return -EBUSY;
4312	}
4313
4314	fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
4315	if (fd >= 0)
4316		static_key_slow_inc(&bpf_stats_enabled_key.key);
4317
4318	mutex_unlock(&bpf_stats_enabled_mutex);
4319	return fd;
4320}
4321
4322#define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
4323
4324static int bpf_enable_stats(union bpf_attr *attr)
4325{
4326
4327	if (CHECK_ATTR(BPF_ENABLE_STATS))
4328		return -EINVAL;
4329
4330	if (!capable(CAP_SYS_ADMIN))
4331		return -EPERM;
4332
4333	switch (attr->enable_stats.type) {
4334	case BPF_STATS_RUN_TIME:
4335		return bpf_enable_runtime_stats();
4336	default:
4337		break;
4338	}
4339	return -EINVAL;
4340}
4341
4342#define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
4343
4344static int bpf_iter_create(union bpf_attr *attr)
4345{
4346	struct bpf_link *link;
4347	int err;
4348
4349	if (CHECK_ATTR(BPF_ITER_CREATE))
4350		return -EINVAL;
4351
4352	if (attr->iter_create.flags)
4353		return -EINVAL;
4354
4355	link = bpf_link_get_from_fd(attr->iter_create.link_fd);
4356	if (IS_ERR(link))
4357		return PTR_ERR(link);
4358
4359	err = bpf_iter_new_fd(link);
4360	bpf_link_put(link);
4361
4362	return err;
4363}
4364
4365#define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
4366
4367static int bpf_prog_bind_map(union bpf_attr *attr)
4368{
4369	struct bpf_prog *prog;
4370	struct bpf_map *map;
4371	struct bpf_map **used_maps_old, **used_maps_new;
4372	int i, ret = 0;
4373
4374	if (CHECK_ATTR(BPF_PROG_BIND_MAP))
4375		return -EINVAL;
4376
4377	if (attr->prog_bind_map.flags)
4378		return -EINVAL;
4379
4380	prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
4381	if (IS_ERR(prog))
4382		return PTR_ERR(prog);
4383
4384	map = bpf_map_get(attr->prog_bind_map.map_fd);
4385	if (IS_ERR(map)) {
4386		ret = PTR_ERR(map);
4387		goto out_prog_put;
4388	}
4389
4390	mutex_lock(&prog->aux->used_maps_mutex);
4391
4392	used_maps_old = prog->aux->used_maps;
4393
4394	for (i = 0; i < prog->aux->used_map_cnt; i++)
4395		if (used_maps_old[i] == map) {
4396			bpf_map_put(map);
4397			goto out_unlock;
4398		}
4399
4400	used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
4401				      sizeof(used_maps_new[0]),
4402				      GFP_KERNEL);
4403	if (!used_maps_new) {
4404		ret = -ENOMEM;
4405		goto out_unlock;
4406	}
4407
4408	memcpy(used_maps_new, used_maps_old,
4409	       sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
4410	used_maps_new[prog->aux->used_map_cnt] = map;
4411
4412	prog->aux->used_map_cnt++;
4413	prog->aux->used_maps = used_maps_new;
4414
4415	kfree(used_maps_old);
4416
4417out_unlock:
4418	mutex_unlock(&prog->aux->used_maps_mutex);
4419
4420	if (ret)
4421		bpf_map_put(map);
4422out_prog_put:
4423	bpf_prog_put(prog);
4424	return ret;
4425}
4426
4427static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
4428{
4429	union bpf_attr attr;
4430	int err;
4431
4432	if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
4433		return -EPERM;
4434
4435	err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
4436	if (err)
4437		return err;
4438	size = min_t(u32, size, sizeof(attr));
4439
4440	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
4441	memset(&attr, 0, sizeof(attr));
4442	if (copy_from_bpfptr(&attr, uattr, size) != 0)
4443		return -EFAULT;
4444
4445	err = security_bpf(cmd, &attr, size);
4446	if (err < 0)
4447		return err;
4448
4449	switch (cmd) {
4450	case BPF_MAP_CREATE:
4451		err = map_create(&attr);
4452		break;
4453	case BPF_MAP_LOOKUP_ELEM:
4454		err = map_lookup_elem(&attr);
4455		break;
4456	case BPF_MAP_UPDATE_ELEM:
4457		err = map_update_elem(&attr, uattr);
4458		break;
4459	case BPF_MAP_DELETE_ELEM:
4460		err = map_delete_elem(&attr);
4461		break;
4462	case BPF_MAP_GET_NEXT_KEY:
4463		err = map_get_next_key(&attr);
4464		break;
4465	case BPF_MAP_FREEZE:
4466		err = map_freeze(&attr);
4467		break;
4468	case BPF_PROG_LOAD:
4469		err = bpf_prog_load(&attr, uattr);
4470		break;
4471	case BPF_OBJ_PIN:
4472		err = bpf_obj_pin(&attr);
4473		break;
4474	case BPF_OBJ_GET:
4475		err = bpf_obj_get(&attr);
4476		break;
4477	case BPF_PROG_ATTACH:
4478		err = bpf_prog_attach(&attr);
4479		break;
4480	case BPF_PROG_DETACH:
4481		err = bpf_prog_detach(&attr);
4482		break;
4483	case BPF_PROG_QUERY:
4484		err = bpf_prog_query(&attr, uattr.user);
4485		break;
4486	case BPF_PROG_TEST_RUN:
4487		err = bpf_prog_test_run(&attr, uattr.user);
4488		break;
4489	case BPF_PROG_GET_NEXT_ID:
4490		err = bpf_obj_get_next_id(&attr, uattr.user,
4491					  &prog_idr, &prog_idr_lock);
4492		break;
4493	case BPF_MAP_GET_NEXT_ID:
4494		err = bpf_obj_get_next_id(&attr, uattr.user,
4495					  &map_idr, &map_idr_lock);
4496		break;
4497	case BPF_BTF_GET_NEXT_ID:
4498		err = bpf_obj_get_next_id(&attr, uattr.user,
4499					  &btf_idr, &btf_idr_lock);
4500		break;
4501	case BPF_PROG_GET_FD_BY_ID:
4502		err = bpf_prog_get_fd_by_id(&attr);
4503		break;
4504	case BPF_MAP_GET_FD_BY_ID:
4505		err = bpf_map_get_fd_by_id(&attr);
4506		break;
4507	case BPF_OBJ_GET_INFO_BY_FD:
4508		err = bpf_obj_get_info_by_fd(&attr, uattr.user);
4509		break;
4510	case BPF_RAW_TRACEPOINT_OPEN:
4511		err = bpf_raw_tracepoint_open(&attr);
4512		break;
4513	case BPF_BTF_LOAD:
4514		err = bpf_btf_load(&attr, uattr);
4515		break;
4516	case BPF_BTF_GET_FD_BY_ID:
4517		err = bpf_btf_get_fd_by_id(&attr);
4518		break;
4519	case BPF_TASK_FD_QUERY:
4520		err = bpf_task_fd_query(&attr, uattr.user);
4521		break;
4522	case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
4523		err = map_lookup_and_delete_elem(&attr);
4524		break;
4525	case BPF_MAP_LOOKUP_BATCH:
4526		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH);
4527		break;
4528	case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
4529		err = bpf_map_do_batch(&attr, uattr.user,
4530				       BPF_MAP_LOOKUP_AND_DELETE_BATCH);
4531		break;
4532	case BPF_MAP_UPDATE_BATCH:
4533		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH);
4534		break;
4535	case BPF_MAP_DELETE_BATCH:
4536		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
4537		break;
4538	case BPF_LINK_CREATE:
4539		err = link_create(&attr, uattr);
4540		break;
4541	case BPF_LINK_UPDATE:
4542		err = link_update(&attr);
4543		break;
4544	case BPF_LINK_GET_FD_BY_ID:
4545		err = bpf_link_get_fd_by_id(&attr);
4546		break;
4547	case BPF_LINK_GET_NEXT_ID:
4548		err = bpf_obj_get_next_id(&attr, uattr.user,
4549					  &link_idr, &link_idr_lock);
4550		break;
4551	case BPF_ENABLE_STATS:
4552		err = bpf_enable_stats(&attr);
4553		break;
4554	case BPF_ITER_CREATE:
4555		err = bpf_iter_create(&attr);
4556		break;
4557	case BPF_LINK_DETACH:
4558		err = link_detach(&attr);
4559		break;
4560	case BPF_PROG_BIND_MAP:
4561		err = bpf_prog_bind_map(&attr);
4562		break;
4563	default:
4564		err = -EINVAL;
4565		break;
4566	}
4567
4568	return err;
4569}
4570
4571SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
4572{
4573	return __sys_bpf(cmd, USER_BPFPTR(uattr), size);
4574}
4575
4576static bool syscall_prog_is_valid_access(int off, int size,
4577					 enum bpf_access_type type,
4578					 const struct bpf_prog *prog,
4579					 struct bpf_insn_access_aux *info)
4580{
4581	if (off < 0 || off >= U16_MAX)
4582		return false;
4583	if (off % size != 0)
4584		return false;
4585	return true;
4586}
4587
4588BPF_CALL_3(bpf_sys_bpf, int, cmd, void *, attr, u32, attr_size)
4589{
4590	switch (cmd) {
4591	case BPF_MAP_CREATE:
4592	case BPF_MAP_UPDATE_ELEM:
4593	case BPF_MAP_FREEZE:
4594	case BPF_PROG_LOAD:
4595	case BPF_BTF_LOAD:
4596		break;
4597	/* case BPF_PROG_TEST_RUN:
4598	 * is not part of this list to prevent recursive test_run
4599	 */
4600	default:
4601		return -EINVAL;
4602	}
4603	return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
4604}
4605
4606static const struct bpf_func_proto bpf_sys_bpf_proto = {
4607	.func		= bpf_sys_bpf,
4608	.gpl_only	= false,
4609	.ret_type	= RET_INTEGER,
4610	.arg1_type	= ARG_ANYTHING,
4611	.arg2_type	= ARG_PTR_TO_MEM,
4612	.arg3_type	= ARG_CONST_SIZE,
4613};
4614
4615const struct bpf_func_proto * __weak
4616tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4617{
4618	return bpf_base_func_proto(func_id);
4619}
4620
4621BPF_CALL_1(bpf_sys_close, u32, fd)
4622{
4623	/* When bpf program calls this helper there should not be
4624	 * an fdget() without matching completed fdput().
4625	 * This helper is allowed in the following callchain only:
4626	 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close
4627	 */
4628	return close_fd(fd);
4629}
4630
4631static const struct bpf_func_proto bpf_sys_close_proto = {
4632	.func		= bpf_sys_close,
4633	.gpl_only	= false,
4634	.ret_type	= RET_INTEGER,
4635	.arg1_type	= ARG_ANYTHING,
4636};
4637
4638static const struct bpf_func_proto *
4639syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4640{
4641	switch (func_id) {
4642	case BPF_FUNC_sys_bpf:
4643		return &bpf_sys_bpf_proto;
4644	case BPF_FUNC_btf_find_by_name_kind:
4645		return &bpf_btf_find_by_name_kind_proto;
4646	case BPF_FUNC_sys_close:
4647		return &bpf_sys_close_proto;
4648	default:
4649		return tracing_prog_func_proto(func_id, prog);
4650	}
4651}
4652
4653const struct bpf_verifier_ops bpf_syscall_verifier_ops = {
4654	.get_func_proto  = syscall_prog_func_proto,
4655	.is_valid_access = syscall_prog_is_valid_access,
4656};
4657
4658const struct bpf_prog_ops bpf_syscall_prog_ops = {
4659	.test_run = bpf_prog_test_run_syscall,
4660};