Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2 *
  3 * This program is free software; you can redistribute it and/or
  4 * modify it under the terms of version 2 of the GNU General Public
  5 * License as published by the Free Software Foundation.
  6 *
  7 * This program is distributed in the hope that it will be useful, but
  8 * WITHOUT ANY WARRANTY; without even the implied warranty of
  9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 10 * General Public License for more details.
 11 */
 12#include <linux/bpf.h>
 13#include <linux/syscalls.h>
 14#include <linux/slab.h>
 15#include <linux/anon_inodes.h>
 16#include <linux/file.h>
 17#include <linux/license.h>
 18#include <linux/filter.h>
 19#include <linux/version.h>
 20
 21DEFINE_PER_CPU(int, bpf_prog_active);
 22
 23int sysctl_unprivileged_bpf_disabled __read_mostly;
 24
 25static LIST_HEAD(bpf_map_types);
 26
 27static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
 28{
 29	struct bpf_map_type_list *tl;
 30	struct bpf_map *map;
 31
 32	list_for_each_entry(tl, &bpf_map_types, list_node) {
 33		if (tl->type == attr->map_type) {
 34			map = tl->ops->map_alloc(attr);
 35			if (IS_ERR(map))
 36				return map;
 37			map->ops = tl->ops;
 38			map->map_type = attr->map_type;
 39			return map;
 40		}
 41	}
 42	return ERR_PTR(-EINVAL);
 43}
 44
 45/* boot time registration of different map implementations */
 46void bpf_register_map_type(struct bpf_map_type_list *tl)
 47{
 48	list_add(&tl->list_node, &bpf_map_types);
 49}
 50
 51int bpf_map_precharge_memlock(u32 pages)
 52{
 53	struct user_struct *user = get_current_user();
 54	unsigned long memlock_limit, cur;
 55
 56	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 57	cur = atomic_long_read(&user->locked_vm);
 58	free_uid(user);
 59	if (cur + pages > memlock_limit)
 60		return -EPERM;
 61	return 0;
 62}
 63
 64static int bpf_map_charge_memlock(struct bpf_map *map)
 65{
 66	struct user_struct *user = get_current_user();
 67	unsigned long memlock_limit;
 68
 69	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 70
 71	atomic_long_add(map->pages, &user->locked_vm);
 72
 73	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
 74		atomic_long_sub(map->pages, &user->locked_vm);
 75		free_uid(user);
 76		return -EPERM;
 77	}
 78	map->user = user;
 79	return 0;
 80}
 81
 82static void bpf_map_uncharge_memlock(struct bpf_map *map)
 83{
 84	struct user_struct *user = map->user;
 85
 86	atomic_long_sub(map->pages, &user->locked_vm);
 87	free_uid(user);
 88}
 89
 90/* called from workqueue */
 91static void bpf_map_free_deferred(struct work_struct *work)
 92{
 93	struct bpf_map *map = container_of(work, struct bpf_map, work);
 94
 95	bpf_map_uncharge_memlock(map);
 96	/* implementation dependent freeing */
 97	map->ops->map_free(map);
 98}
 99
100static void bpf_map_put_uref(struct bpf_map *map)
101{
102	if (atomic_dec_and_test(&map->usercnt)) {
103		if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
104			bpf_fd_array_map_clear(map);
105	}
106}
107
108/* decrement map refcnt and schedule it for freeing via workqueue
109 * (unrelying map implementation ops->map_free() might sleep)
110 */
111void bpf_map_put(struct bpf_map *map)
112{
113	if (atomic_dec_and_test(&map->refcnt)) {
114		INIT_WORK(&map->work, bpf_map_free_deferred);
115		schedule_work(&map->work);
116	}
117}
118
119void bpf_map_put_with_uref(struct bpf_map *map)
120{
121	bpf_map_put_uref(map);
122	bpf_map_put(map);
123}
124
125static int bpf_map_release(struct inode *inode, struct file *filp)
126{
127	bpf_map_put_with_uref(filp->private_data);
128	return 0;
129}
130
131#ifdef CONFIG_PROC_FS
132static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
133{
134	const struct bpf_map *map = filp->private_data;
135
136	seq_printf(m,
137		   "map_type:\t%u\n"
138		   "key_size:\t%u\n"
139		   "value_size:\t%u\n"
140		   "max_entries:\t%u\n"
141		   "map_flags:\t%#x\n",
142		   map->map_type,
143		   map->key_size,
144		   map->value_size,
145		   map->max_entries,
146		   map->map_flags);
147}
148#endif
149
150static const struct file_operations bpf_map_fops = {
151#ifdef CONFIG_PROC_FS
152	.show_fdinfo	= bpf_map_show_fdinfo,
153#endif
154	.release	= bpf_map_release,
155};
156
157int bpf_map_new_fd(struct bpf_map *map)
158{
159	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
160				O_RDWR | O_CLOEXEC);
161}
162
163/* helper macro to check that unused fields 'union bpf_attr' are zero */
164#define CHECK_ATTR(CMD) \
165	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
166		   sizeof(attr->CMD##_LAST_FIELD), 0, \
167		   sizeof(*attr) - \
168		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
169		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
170
171#define BPF_MAP_CREATE_LAST_FIELD map_flags
172/* called via syscall */
173static int map_create(union bpf_attr *attr)
174{
175	struct bpf_map *map;
176	int err;
177
178	err = CHECK_ATTR(BPF_MAP_CREATE);
179	if (err)
180		return -EINVAL;
181
182	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
183	map = find_and_alloc_map(attr);
184	if (IS_ERR(map))
185		return PTR_ERR(map);
186
187	atomic_set(&map->refcnt, 1);
188	atomic_set(&map->usercnt, 1);
189
190	err = bpf_map_charge_memlock(map);
191	if (err)
192		goto free_map;
193
194	err = bpf_map_new_fd(map);
195	if (err < 0)
196		/* failed to allocate fd */
197		goto free_map;
198
199	return err;
200
201free_map:
202	map->ops->map_free(map);
203	return err;
204}
205
206/* if error is returned, fd is released.
207 * On success caller should complete fd access with matching fdput()
208 */
209struct bpf_map *__bpf_map_get(struct fd f)
210{
211	if (!f.file)
212		return ERR_PTR(-EBADF);
213	if (f.file->f_op != &bpf_map_fops) {
214		fdput(f);
215		return ERR_PTR(-EINVAL);
216	}
217
218	return f.file->private_data;
219}
220
221/* prog's and map's refcnt limit */
222#define BPF_MAX_REFCNT 32768
223
224struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
225{
226	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
227		atomic_dec(&map->refcnt);
228		return ERR_PTR(-EBUSY);
229	}
230	if (uref)
231		atomic_inc(&map->usercnt);
232	return map;
233}
234
235struct bpf_map *bpf_map_get_with_uref(u32 ufd)
236{
237	struct fd f = fdget(ufd);
238	struct bpf_map *map;
239
240	map = __bpf_map_get(f);
241	if (IS_ERR(map))
242		return map;
243
244	map = bpf_map_inc(map, true);
245	fdput(f);
246
247	return map;
248}
249
250/* helper to convert user pointers passed inside __aligned_u64 fields */
251static void __user *u64_to_ptr(__u64 val)
252{
253	return (void __user *) (unsigned long) val;
254}
255
256int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
257{
258	return -ENOTSUPP;
259}
260
261/* last field in 'union bpf_attr' used by this command */
262#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
263
264static int map_lookup_elem(union bpf_attr *attr)
265{
266	void __user *ukey = u64_to_ptr(attr->key);
267	void __user *uvalue = u64_to_ptr(attr->value);
268	int ufd = attr->map_fd;
269	struct bpf_map *map;
270	void *key, *value, *ptr;
271	u32 value_size;
272	struct fd f;
273	int err;
274
275	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
276		return -EINVAL;
277
278	f = fdget(ufd);
279	map = __bpf_map_get(f);
280	if (IS_ERR(map))
281		return PTR_ERR(map);
282
283	err = -ENOMEM;
284	key = kmalloc(map->key_size, GFP_USER);
285	if (!key)
286		goto err_put;
287
288	err = -EFAULT;
289	if (copy_from_user(key, ukey, map->key_size) != 0)
290		goto free_key;
291
292	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
293	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
294		value_size = round_up(map->value_size, 8) * num_possible_cpus();
295	else
296		value_size = map->value_size;
297
298	err = -ENOMEM;
299	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
300	if (!value)
301		goto free_key;
302
303	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
304		err = bpf_percpu_hash_copy(map, key, value);
305	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
306		err = bpf_percpu_array_copy(map, key, value);
307	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
308		err = bpf_stackmap_copy(map, key, value);
309	} else {
310		rcu_read_lock();
311		ptr = map->ops->map_lookup_elem(map, key);
312		if (ptr)
313			memcpy(value, ptr, value_size);
314		rcu_read_unlock();
315		err = ptr ? 0 : -ENOENT;
316	}
317
318	if (err)
319		goto free_value;
320
321	err = -EFAULT;
322	if (copy_to_user(uvalue, value, value_size) != 0)
323		goto free_value;
324
325	err = 0;
326
327free_value:
328	kfree(value);
329free_key:
330	kfree(key);
331err_put:
332	fdput(f);
333	return err;
334}
335
336#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
337
338static int map_update_elem(union bpf_attr *attr)
339{
340	void __user *ukey = u64_to_ptr(attr->key);
341	void __user *uvalue = u64_to_ptr(attr->value);
342	int ufd = attr->map_fd;
343	struct bpf_map *map;
344	void *key, *value;
345	u32 value_size;
346	struct fd f;
347	int err;
348
349	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
350		return -EINVAL;
351
352	f = fdget(ufd);
353	map = __bpf_map_get(f);
354	if (IS_ERR(map))
355		return PTR_ERR(map);
356
357	err = -ENOMEM;
358	key = kmalloc(map->key_size, GFP_USER);
359	if (!key)
360		goto err_put;
361
362	err = -EFAULT;
363	if (copy_from_user(key, ukey, map->key_size) != 0)
364		goto free_key;
365
366	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
367	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
368		value_size = round_up(map->value_size, 8) * num_possible_cpus();
369	else
370		value_size = map->value_size;
371
372	err = -ENOMEM;
373	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
374	if (!value)
375		goto free_key;
376
377	err = -EFAULT;
378	if (copy_from_user(value, uvalue, value_size) != 0)
379		goto free_value;
380
381	/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
382	 * inside bpf map update or delete otherwise deadlocks are possible
383	 */
384	preempt_disable();
385	__this_cpu_inc(bpf_prog_active);
386	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
387		err = bpf_percpu_hash_update(map, key, value, attr->flags);
388	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
389		err = bpf_percpu_array_update(map, key, value, attr->flags);
390	} else {
391		rcu_read_lock();
392		err = map->ops->map_update_elem(map, key, value, attr->flags);
393		rcu_read_unlock();
394	}
395	__this_cpu_dec(bpf_prog_active);
396	preempt_enable();
397
398free_value:
399	kfree(value);
400free_key:
401	kfree(key);
402err_put:
403	fdput(f);
404	return err;
405}
406
407#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
408
409static int map_delete_elem(union bpf_attr *attr)
410{
411	void __user *ukey = u64_to_ptr(attr->key);
412	int ufd = attr->map_fd;
413	struct bpf_map *map;
414	struct fd f;
415	void *key;
416	int err;
417
418	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
419		return -EINVAL;
420
421	f = fdget(ufd);
422	map = __bpf_map_get(f);
423	if (IS_ERR(map))
424		return PTR_ERR(map);
425
426	err = -ENOMEM;
427	key = kmalloc(map->key_size, GFP_USER);
428	if (!key)
429		goto err_put;
430
431	err = -EFAULT;
432	if (copy_from_user(key, ukey, map->key_size) != 0)
433		goto free_key;
434
435	preempt_disable();
436	__this_cpu_inc(bpf_prog_active);
437	rcu_read_lock();
438	err = map->ops->map_delete_elem(map, key);
439	rcu_read_unlock();
440	__this_cpu_dec(bpf_prog_active);
441	preempt_enable();
442
443free_key:
444	kfree(key);
445err_put:
446	fdput(f);
447	return err;
448}
449
450/* last field in 'union bpf_attr' used by this command */
451#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
452
453static int map_get_next_key(union bpf_attr *attr)
454{
455	void __user *ukey = u64_to_ptr(attr->key);
456	void __user *unext_key = u64_to_ptr(attr->next_key);
457	int ufd = attr->map_fd;
458	struct bpf_map *map;
459	void *key, *next_key;
460	struct fd f;
461	int err;
462
463	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
464		return -EINVAL;
465
466	f = fdget(ufd);
467	map = __bpf_map_get(f);
468	if (IS_ERR(map))
469		return PTR_ERR(map);
470
471	err = -ENOMEM;
472	key = kmalloc(map->key_size, GFP_USER);
473	if (!key)
474		goto err_put;
475
476	err = -EFAULT;
477	if (copy_from_user(key, ukey, map->key_size) != 0)
478		goto free_key;
479
480	err = -ENOMEM;
481	next_key = kmalloc(map->key_size, GFP_USER);
482	if (!next_key)
483		goto free_key;
484
485	rcu_read_lock();
486	err = map->ops->map_get_next_key(map, key, next_key);
487	rcu_read_unlock();
488	if (err)
489		goto free_next_key;
490
491	err = -EFAULT;
492	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
493		goto free_next_key;
494
495	err = 0;
496
497free_next_key:
498	kfree(next_key);
499free_key:
500	kfree(key);
501err_put:
502	fdput(f);
503	return err;
504}
505
506static LIST_HEAD(bpf_prog_types);
507
508static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
509{
510	struct bpf_prog_type_list *tl;
511
512	list_for_each_entry(tl, &bpf_prog_types, list_node) {
513		if (tl->type == type) {
514			prog->aux->ops = tl->ops;
515			prog->type = type;
516			return 0;
517		}
518	}
519
520	return -EINVAL;
521}
522
523void bpf_register_prog_type(struct bpf_prog_type_list *tl)
524{
525	list_add(&tl->list_node, &bpf_prog_types);
526}
527
528/* fixup insn->imm field of bpf_call instructions:
529 * if (insn->imm == BPF_FUNC_map_lookup_elem)
530 *      insn->imm = bpf_map_lookup_elem - __bpf_call_base;
531 * else if (insn->imm == BPF_FUNC_map_update_elem)
532 *      insn->imm = bpf_map_update_elem - __bpf_call_base;
533 * else ...
534 *
535 * this function is called after eBPF program passed verification
536 */
537static void fixup_bpf_calls(struct bpf_prog *prog)
538{
539	const struct bpf_func_proto *fn;
540	int i;
541
542	for (i = 0; i < prog->len; i++) {
543		struct bpf_insn *insn = &prog->insnsi[i];
544
545		if (insn->code == (BPF_JMP | BPF_CALL)) {
546			/* we reach here when program has bpf_call instructions
547			 * and it passed bpf_check(), means that
548			 * ops->get_func_proto must have been supplied, check it
549			 */
550			BUG_ON(!prog->aux->ops->get_func_proto);
551
552			if (insn->imm == BPF_FUNC_get_route_realm)
553				prog->dst_needed = 1;
554			if (insn->imm == BPF_FUNC_get_prandom_u32)
555				bpf_user_rnd_init_once();
556			if (insn->imm == BPF_FUNC_tail_call) {
557				/* mark bpf_tail_call as different opcode
558				 * to avoid conditional branch in
559				 * interpeter for every normal call
560				 * and to prevent accidental JITing by
561				 * JIT compiler that doesn't support
562				 * bpf_tail_call yet
563				 */
564				insn->imm = 0;
565				insn->code |= BPF_X;
566				continue;
567			}
568
569			fn = prog->aux->ops->get_func_proto(insn->imm);
570			/* all functions that have prototype and verifier allowed
571			 * programs to call them, must be real in-kernel functions
572			 */
573			BUG_ON(!fn->func);
574			insn->imm = fn->func - __bpf_call_base;
575		}
576	}
577}
578
579/* drop refcnt on maps used by eBPF program and free auxilary data */
580static void free_used_maps(struct bpf_prog_aux *aux)
581{
582	int i;
583
584	for (i = 0; i < aux->used_map_cnt; i++)
585		bpf_map_put(aux->used_maps[i]);
586
587	kfree(aux->used_maps);
588}
589
590static int bpf_prog_charge_memlock(struct bpf_prog *prog)
591{
592	struct user_struct *user = get_current_user();
593	unsigned long memlock_limit;
594
595	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
596
597	atomic_long_add(prog->pages, &user->locked_vm);
598	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
599		atomic_long_sub(prog->pages, &user->locked_vm);
600		free_uid(user);
601		return -EPERM;
602	}
603	prog->aux->user = user;
604	return 0;
605}
606
607static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
608{
609	struct user_struct *user = prog->aux->user;
610
611	atomic_long_sub(prog->pages, &user->locked_vm);
612	free_uid(user);
613}
614
615static void __prog_put_common(struct rcu_head *rcu)
616{
617	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
618
619	free_used_maps(aux);
620	bpf_prog_uncharge_memlock(aux->prog);
621	bpf_prog_free(aux->prog);
622}
623
624/* version of bpf_prog_put() that is called after a grace period */
625void bpf_prog_put_rcu(struct bpf_prog *prog)
626{
627	if (atomic_dec_and_test(&prog->aux->refcnt))
628		call_rcu(&prog->aux->rcu, __prog_put_common);
629}
630
631void bpf_prog_put(struct bpf_prog *prog)
632{
633	if (atomic_dec_and_test(&prog->aux->refcnt))
634		__prog_put_common(&prog->aux->rcu);
635}
636EXPORT_SYMBOL_GPL(bpf_prog_put);
637
638static int bpf_prog_release(struct inode *inode, struct file *filp)
639{
640	struct bpf_prog *prog = filp->private_data;
641
642	bpf_prog_put_rcu(prog);
643	return 0;
644}
645
646static const struct file_operations bpf_prog_fops = {
647        .release = bpf_prog_release,
648};
649
650int bpf_prog_new_fd(struct bpf_prog *prog)
651{
652	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
653				O_RDWR | O_CLOEXEC);
654}
655
656static struct bpf_prog *__bpf_prog_get(struct fd f)
657{
658	if (!f.file)
659		return ERR_PTR(-EBADF);
660	if (f.file->f_op != &bpf_prog_fops) {
661		fdput(f);
662		return ERR_PTR(-EINVAL);
663	}
664
665	return f.file->private_data;
666}
667
668struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
669{
670	if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
671		atomic_dec(&prog->aux->refcnt);
672		return ERR_PTR(-EBUSY);
673	}
674	return prog;
675}
676
677/* called by sockets/tracing/seccomp before attaching program to an event
678 * pairs with bpf_prog_put()
679 */
680struct bpf_prog *bpf_prog_get(u32 ufd)
681{
682	struct fd f = fdget(ufd);
683	struct bpf_prog *prog;
684
685	prog = __bpf_prog_get(f);
686	if (IS_ERR(prog))
687		return prog;
688
689	prog = bpf_prog_inc(prog);
690	fdput(f);
691
692	return prog;
693}
694EXPORT_SYMBOL_GPL(bpf_prog_get);
695
696/* last field in 'union bpf_attr' used by this command */
697#define	BPF_PROG_LOAD_LAST_FIELD kern_version
698
699static int bpf_prog_load(union bpf_attr *attr)
700{
701	enum bpf_prog_type type = attr->prog_type;
702	struct bpf_prog *prog;
703	int err;
704	char license[128];
705	bool is_gpl;
706
707	if (CHECK_ATTR(BPF_PROG_LOAD))
708		return -EINVAL;
709
710	/* copy eBPF program license from user space */
711	if (strncpy_from_user(license, u64_to_ptr(attr->license),
712			      sizeof(license) - 1) < 0)
713		return -EFAULT;
714	license[sizeof(license) - 1] = 0;
715
716	/* eBPF programs must be GPL compatible to use GPL-ed functions */
717	is_gpl = license_is_gpl_compatible(license);
718
719	if (attr->insn_cnt >= BPF_MAXINSNS)
720		return -EINVAL;
721
722	if (type == BPF_PROG_TYPE_KPROBE &&
723	    attr->kern_version != LINUX_VERSION_CODE)
724		return -EINVAL;
725
726	if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
727		return -EPERM;
728
729	/* plain bpf_prog allocation */
730	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
731	if (!prog)
732		return -ENOMEM;
733
734	err = bpf_prog_charge_memlock(prog);
735	if (err)
736		goto free_prog_nouncharge;
737
738	prog->len = attr->insn_cnt;
739
740	err = -EFAULT;
741	if (copy_from_user(prog->insns, u64_to_ptr(attr->insns),
742			   prog->len * sizeof(struct bpf_insn)) != 0)
743		goto free_prog;
744
745	prog->orig_prog = NULL;
746	prog->jited = 0;
747
748	atomic_set(&prog->aux->refcnt, 1);
749	prog->gpl_compatible = is_gpl ? 1 : 0;
750
751	/* find program type: socket_filter vs tracing_filter */
752	err = find_prog_type(type, prog);
753	if (err < 0)
754		goto free_prog;
755
756	/* run eBPF verifier */
757	err = bpf_check(&prog, attr);
758	if (err < 0)
759		goto free_used_maps;
760
761	/* fixup BPF_CALL->imm field */
762	fixup_bpf_calls(prog);
763
764	/* eBPF program is ready to be JITed */
765	err = bpf_prog_select_runtime(prog);
766	if (err < 0)
767		goto free_used_maps;
768
769	err = bpf_prog_new_fd(prog);
770	if (err < 0)
771		/* failed to allocate fd */
772		goto free_used_maps;
773
774	return err;
775
776free_used_maps:
777	free_used_maps(prog->aux);
778free_prog:
779	bpf_prog_uncharge_memlock(prog);
780free_prog_nouncharge:
781	bpf_prog_free(prog);
782	return err;
783}
784
785#define BPF_OBJ_LAST_FIELD bpf_fd
786
787static int bpf_obj_pin(const union bpf_attr *attr)
788{
789	if (CHECK_ATTR(BPF_OBJ))
790		return -EINVAL;
791
792	return bpf_obj_pin_user(attr->bpf_fd, u64_to_ptr(attr->pathname));
793}
794
795static int bpf_obj_get(const union bpf_attr *attr)
796{
797	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
798		return -EINVAL;
799
800	return bpf_obj_get_user(u64_to_ptr(attr->pathname));
801}
802
803SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
804{
805	union bpf_attr attr = {};
806	int err;
807
808	if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
809		return -EPERM;
810
811	if (!access_ok(VERIFY_READ, uattr, 1))
812		return -EFAULT;
813
814	if (size > PAGE_SIZE)	/* silly large */
815		return -E2BIG;
816
817	/* If we're handed a bigger struct than we know of,
818	 * ensure all the unknown bits are 0 - i.e. new
819	 * user-space does not rely on any kernel feature
820	 * extensions we dont know about yet.
821	 */
822	if (size > sizeof(attr)) {
823		unsigned char __user *addr;
824		unsigned char __user *end;
825		unsigned char val;
826
827		addr = (void __user *)uattr + sizeof(attr);
828		end  = (void __user *)uattr + size;
829
830		for (; addr < end; addr++) {
831			err = get_user(val, addr);
832			if (err)
833				return err;
834			if (val)
835				return -E2BIG;
836		}
837		size = sizeof(attr);
838	}
839
840	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
841	if (copy_from_user(&attr, uattr, size) != 0)
842		return -EFAULT;
843
844	switch (cmd) {
845	case BPF_MAP_CREATE:
846		err = map_create(&attr);
847		break;
848	case BPF_MAP_LOOKUP_ELEM:
849		err = map_lookup_elem(&attr);
850		break;
851	case BPF_MAP_UPDATE_ELEM:
852		err = map_update_elem(&attr);
853		break;
854	case BPF_MAP_DELETE_ELEM:
855		err = map_delete_elem(&attr);
856		break;
857	case BPF_MAP_GET_NEXT_KEY:
858		err = map_get_next_key(&attr);
859		break;
860	case BPF_PROG_LOAD:
861		err = bpf_prog_load(&attr);
862		break;
863	case BPF_OBJ_PIN:
864		err = bpf_obj_pin(&attr);
865		break;
866	case BPF_OBJ_GET:
867		err = bpf_obj_get(&attr);
868		break;
869	default:
870		err = -EINVAL;
871		break;
872	}
873
874	return err;
875}