Linux Audio

Check our new training course

Loading...
v4.6
  1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2 *
  3 * This program is free software; you can redistribute it and/or
  4 * modify it under the terms of version 2 of the GNU General Public
  5 * License as published by the Free Software Foundation.
  6 *
  7 * This program is distributed in the hope that it will be useful, but
  8 * WITHOUT ANY WARRANTY; without even the implied warranty of
  9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 10 * General Public License for more details.
 11 */
 12#include <linux/bpf.h>
 13#include <linux/syscalls.h>
 14#include <linux/slab.h>
 
 
 15#include <linux/anon_inodes.h>
 16#include <linux/file.h>
 17#include <linux/license.h>
 18#include <linux/filter.h>
 19#include <linux/version.h>
 
 20
 21DEFINE_PER_CPU(int, bpf_prog_active);
 22
 23int sysctl_unprivileged_bpf_disabled __read_mostly;
 24
 25static LIST_HEAD(bpf_map_types);
 26
 27static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
 28{
 29	struct bpf_map_type_list *tl;
 30	struct bpf_map *map;
 31
 32	list_for_each_entry(tl, &bpf_map_types, list_node) {
 33		if (tl->type == attr->map_type) {
 34			map = tl->ops->map_alloc(attr);
 35			if (IS_ERR(map))
 36				return map;
 37			map->ops = tl->ops;
 38			map->map_type = attr->map_type;
 39			return map;
 40		}
 41	}
 42	return ERR_PTR(-EINVAL);
 43}
 44
 45/* boot time registration of different map implementations */
 46void bpf_register_map_type(struct bpf_map_type_list *tl)
 47{
 48	list_add(&tl->list_node, &bpf_map_types);
 49}
 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 51int bpf_map_precharge_memlock(u32 pages)
 52{
 53	struct user_struct *user = get_current_user();
 54	unsigned long memlock_limit, cur;
 55
 56	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 57	cur = atomic_long_read(&user->locked_vm);
 58	free_uid(user);
 59	if (cur + pages > memlock_limit)
 60		return -EPERM;
 61	return 0;
 62}
 63
 64static int bpf_map_charge_memlock(struct bpf_map *map)
 65{
 66	struct user_struct *user = get_current_user();
 67	unsigned long memlock_limit;
 68
 69	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 70
 71	atomic_long_add(map->pages, &user->locked_vm);
 72
 73	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
 74		atomic_long_sub(map->pages, &user->locked_vm);
 75		free_uid(user);
 76		return -EPERM;
 77	}
 78	map->user = user;
 79	return 0;
 80}
 81
 82static void bpf_map_uncharge_memlock(struct bpf_map *map)
 83{
 84	struct user_struct *user = map->user;
 85
 86	atomic_long_sub(map->pages, &user->locked_vm);
 87	free_uid(user);
 88}
 89
 90/* called from workqueue */
 91static void bpf_map_free_deferred(struct work_struct *work)
 92{
 93	struct bpf_map *map = container_of(work, struct bpf_map, work);
 94
 95	bpf_map_uncharge_memlock(map);
 96	/* implementation dependent freeing */
 97	map->ops->map_free(map);
 98}
 99
100static void bpf_map_put_uref(struct bpf_map *map)
101{
102	if (atomic_dec_and_test(&map->usercnt)) {
103		if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
104			bpf_fd_array_map_clear(map);
105	}
106}
107
108/* decrement map refcnt and schedule it for freeing via workqueue
109 * (unrelying map implementation ops->map_free() might sleep)
110 */
111void bpf_map_put(struct bpf_map *map)
112{
113	if (atomic_dec_and_test(&map->refcnt)) {
114		INIT_WORK(&map->work, bpf_map_free_deferred);
115		schedule_work(&map->work);
116	}
117}
118
119void bpf_map_put_with_uref(struct bpf_map *map)
120{
121	bpf_map_put_uref(map);
122	bpf_map_put(map);
123}
124
125static int bpf_map_release(struct inode *inode, struct file *filp)
126{
127	bpf_map_put_with_uref(filp->private_data);
 
 
 
 
 
128	return 0;
129}
130
131#ifdef CONFIG_PROC_FS
132static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
133{
134	const struct bpf_map *map = filp->private_data;
 
 
 
 
 
 
 
135
136	seq_printf(m,
137		   "map_type:\t%u\n"
138		   "key_size:\t%u\n"
139		   "value_size:\t%u\n"
140		   "max_entries:\t%u\n"
141		   "map_flags:\t%#x\n",
 
142		   map->map_type,
143		   map->key_size,
144		   map->value_size,
145		   map->max_entries,
146		   map->map_flags);
 
 
 
 
 
147}
148#endif
149
150static const struct file_operations bpf_map_fops = {
151#ifdef CONFIG_PROC_FS
152	.show_fdinfo	= bpf_map_show_fdinfo,
153#endif
154	.release	= bpf_map_release,
155};
156
157int bpf_map_new_fd(struct bpf_map *map)
158{
159	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
160				O_RDWR | O_CLOEXEC);
161}
162
163/* helper macro to check that unused fields 'union bpf_attr' are zero */
164#define CHECK_ATTR(CMD) \
165	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
166		   sizeof(attr->CMD##_LAST_FIELD), 0, \
167		   sizeof(*attr) - \
168		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
169		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
170
171#define BPF_MAP_CREATE_LAST_FIELD map_flags
172/* called via syscall */
173static int map_create(union bpf_attr *attr)
174{
175	struct bpf_map *map;
176	int err;
177
178	err = CHECK_ATTR(BPF_MAP_CREATE);
179	if (err)
180		return -EINVAL;
181
182	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
183	map = find_and_alloc_map(attr);
184	if (IS_ERR(map))
185		return PTR_ERR(map);
186
187	atomic_set(&map->refcnt, 1);
188	atomic_set(&map->usercnt, 1);
189
190	err = bpf_map_charge_memlock(map);
191	if (err)
192		goto free_map;
193
194	err = bpf_map_new_fd(map);
195	if (err < 0)
196		/* failed to allocate fd */
197		goto free_map;
198
199	return err;
200
201free_map:
 
 
202	map->ops->map_free(map);
203	return err;
204}
205
206/* if error is returned, fd is released.
207 * On success caller should complete fd access with matching fdput()
208 */
209struct bpf_map *__bpf_map_get(struct fd f)
210{
211	if (!f.file)
212		return ERR_PTR(-EBADF);
213	if (f.file->f_op != &bpf_map_fops) {
214		fdput(f);
215		return ERR_PTR(-EINVAL);
216	}
217
218	return f.file->private_data;
219}
220
221/* prog's and map's refcnt limit */
222#define BPF_MAX_REFCNT 32768
223
224struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
225{
226	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
227		atomic_dec(&map->refcnt);
228		return ERR_PTR(-EBUSY);
229	}
230	if (uref)
231		atomic_inc(&map->usercnt);
232	return map;
233}
234
235struct bpf_map *bpf_map_get_with_uref(u32 ufd)
236{
237	struct fd f = fdget(ufd);
238	struct bpf_map *map;
239
240	map = __bpf_map_get(f);
241	if (IS_ERR(map))
242		return map;
243
244	map = bpf_map_inc(map, true);
245	fdput(f);
246
247	return map;
248}
249
250/* helper to convert user pointers passed inside __aligned_u64 fields */
251static void __user *u64_to_ptr(__u64 val)
252{
253	return (void __user *) (unsigned long) val;
254}
255
256int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
257{
258	return -ENOTSUPP;
259}
260
261/* last field in 'union bpf_attr' used by this command */
262#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
263
264static int map_lookup_elem(union bpf_attr *attr)
265{
266	void __user *ukey = u64_to_ptr(attr->key);
267	void __user *uvalue = u64_to_ptr(attr->value);
268	int ufd = attr->map_fd;
269	struct bpf_map *map;
270	void *key, *value, *ptr;
271	u32 value_size;
272	struct fd f;
273	int err;
274
275	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
276		return -EINVAL;
277
278	f = fdget(ufd);
279	map = __bpf_map_get(f);
280	if (IS_ERR(map))
281		return PTR_ERR(map);
282
283	err = -ENOMEM;
284	key = kmalloc(map->key_size, GFP_USER);
285	if (!key)
286		goto err_put;
287
288	err = -EFAULT;
289	if (copy_from_user(key, ukey, map->key_size) != 0)
290		goto free_key;
291
292	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 
293	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
294		value_size = round_up(map->value_size, 8) * num_possible_cpus();
295	else
296		value_size = map->value_size;
297
298	err = -ENOMEM;
299	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
300	if (!value)
301		goto free_key;
302
303	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
 
304		err = bpf_percpu_hash_copy(map, key, value);
305	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
306		err = bpf_percpu_array_copy(map, key, value);
307	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
308		err = bpf_stackmap_copy(map, key, value);
309	} else {
310		rcu_read_lock();
311		ptr = map->ops->map_lookup_elem(map, key);
312		if (ptr)
313			memcpy(value, ptr, value_size);
314		rcu_read_unlock();
315		err = ptr ? 0 : -ENOENT;
316	}
317
318	if (err)
319		goto free_value;
320
321	err = -EFAULT;
322	if (copy_to_user(uvalue, value, value_size) != 0)
323		goto free_value;
324
325	err = 0;
326
327free_value:
328	kfree(value);
329free_key:
330	kfree(key);
331err_put:
332	fdput(f);
333	return err;
334}
335
336#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
337
338static int map_update_elem(union bpf_attr *attr)
339{
340	void __user *ukey = u64_to_ptr(attr->key);
341	void __user *uvalue = u64_to_ptr(attr->value);
342	int ufd = attr->map_fd;
343	struct bpf_map *map;
344	void *key, *value;
345	u32 value_size;
346	struct fd f;
347	int err;
348
349	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
350		return -EINVAL;
351
352	f = fdget(ufd);
353	map = __bpf_map_get(f);
354	if (IS_ERR(map))
355		return PTR_ERR(map);
356
357	err = -ENOMEM;
358	key = kmalloc(map->key_size, GFP_USER);
359	if (!key)
360		goto err_put;
361
362	err = -EFAULT;
363	if (copy_from_user(key, ukey, map->key_size) != 0)
364		goto free_key;
365
366	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 
367	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
368		value_size = round_up(map->value_size, 8) * num_possible_cpus();
369	else
370		value_size = map->value_size;
371
372	err = -ENOMEM;
373	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
374	if (!value)
375		goto free_key;
376
377	err = -EFAULT;
378	if (copy_from_user(value, uvalue, value_size) != 0)
379		goto free_value;
380
381	/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
382	 * inside bpf map update or delete otherwise deadlocks are possible
383	 */
384	preempt_disable();
385	__this_cpu_inc(bpf_prog_active);
386	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
 
387		err = bpf_percpu_hash_update(map, key, value, attr->flags);
388	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
389		err = bpf_percpu_array_update(map, key, value, attr->flags);
 
 
 
 
 
 
 
390	} else {
391		rcu_read_lock();
392		err = map->ops->map_update_elem(map, key, value, attr->flags);
393		rcu_read_unlock();
394	}
395	__this_cpu_dec(bpf_prog_active);
396	preempt_enable();
397
398free_value:
399	kfree(value);
400free_key:
401	kfree(key);
402err_put:
403	fdput(f);
404	return err;
405}
406
407#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
408
409static int map_delete_elem(union bpf_attr *attr)
410{
411	void __user *ukey = u64_to_ptr(attr->key);
412	int ufd = attr->map_fd;
413	struct bpf_map *map;
414	struct fd f;
415	void *key;
416	int err;
417
418	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
419		return -EINVAL;
420
421	f = fdget(ufd);
422	map = __bpf_map_get(f);
423	if (IS_ERR(map))
424		return PTR_ERR(map);
425
426	err = -ENOMEM;
427	key = kmalloc(map->key_size, GFP_USER);
428	if (!key)
429		goto err_put;
430
431	err = -EFAULT;
432	if (copy_from_user(key, ukey, map->key_size) != 0)
433		goto free_key;
434
435	preempt_disable();
436	__this_cpu_inc(bpf_prog_active);
437	rcu_read_lock();
438	err = map->ops->map_delete_elem(map, key);
439	rcu_read_unlock();
440	__this_cpu_dec(bpf_prog_active);
441	preempt_enable();
442
443free_key:
444	kfree(key);
445err_put:
446	fdput(f);
447	return err;
448}
449
450/* last field in 'union bpf_attr' used by this command */
451#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
452
453static int map_get_next_key(union bpf_attr *attr)
454{
455	void __user *ukey = u64_to_ptr(attr->key);
456	void __user *unext_key = u64_to_ptr(attr->next_key);
457	int ufd = attr->map_fd;
458	struct bpf_map *map;
459	void *key, *next_key;
460	struct fd f;
461	int err;
462
463	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
464		return -EINVAL;
465
466	f = fdget(ufd);
467	map = __bpf_map_get(f);
468	if (IS_ERR(map))
469		return PTR_ERR(map);
470
471	err = -ENOMEM;
472	key = kmalloc(map->key_size, GFP_USER);
473	if (!key)
474		goto err_put;
475
476	err = -EFAULT;
477	if (copy_from_user(key, ukey, map->key_size) != 0)
478		goto free_key;
479
480	err = -ENOMEM;
481	next_key = kmalloc(map->key_size, GFP_USER);
482	if (!next_key)
483		goto free_key;
484
485	rcu_read_lock();
486	err = map->ops->map_get_next_key(map, key, next_key);
487	rcu_read_unlock();
488	if (err)
489		goto free_next_key;
490
491	err = -EFAULT;
492	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
493		goto free_next_key;
494
495	err = 0;
496
497free_next_key:
498	kfree(next_key);
499free_key:
500	kfree(key);
501err_put:
502	fdput(f);
503	return err;
504}
505
506static LIST_HEAD(bpf_prog_types);
507
508static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
509{
510	struct bpf_prog_type_list *tl;
511
512	list_for_each_entry(tl, &bpf_prog_types, list_node) {
513		if (tl->type == type) {
514			prog->aux->ops = tl->ops;
515			prog->type = type;
516			return 0;
517		}
518	}
519
520	return -EINVAL;
521}
522
523void bpf_register_prog_type(struct bpf_prog_type_list *tl)
524{
525	list_add(&tl->list_node, &bpf_prog_types);
526}
527
528/* fixup insn->imm field of bpf_call instructions:
529 * if (insn->imm == BPF_FUNC_map_lookup_elem)
530 *      insn->imm = bpf_map_lookup_elem - __bpf_call_base;
531 * else if (insn->imm == BPF_FUNC_map_update_elem)
532 *      insn->imm = bpf_map_update_elem - __bpf_call_base;
533 * else ...
534 *
535 * this function is called after eBPF program passed verification
536 */
537static void fixup_bpf_calls(struct bpf_prog *prog)
538{
539	const struct bpf_func_proto *fn;
540	int i;
541
542	for (i = 0; i < prog->len; i++) {
543		struct bpf_insn *insn = &prog->insnsi[i];
544
545		if (insn->code == (BPF_JMP | BPF_CALL)) {
546			/* we reach here when program has bpf_call instructions
547			 * and it passed bpf_check(), means that
548			 * ops->get_func_proto must have been supplied, check it
549			 */
550			BUG_ON(!prog->aux->ops->get_func_proto);
551
552			if (insn->imm == BPF_FUNC_get_route_realm)
553				prog->dst_needed = 1;
554			if (insn->imm == BPF_FUNC_get_prandom_u32)
555				bpf_user_rnd_init_once();
 
 
556			if (insn->imm == BPF_FUNC_tail_call) {
557				/* mark bpf_tail_call as different opcode
558				 * to avoid conditional branch in
559				 * interpeter for every normal call
560				 * and to prevent accidental JITing by
561				 * JIT compiler that doesn't support
562				 * bpf_tail_call yet
563				 */
564				insn->imm = 0;
565				insn->code |= BPF_X;
566				continue;
567			}
568
569			fn = prog->aux->ops->get_func_proto(insn->imm);
570			/* all functions that have prototype and verifier allowed
571			 * programs to call them, must be real in-kernel functions
572			 */
573			BUG_ON(!fn->func);
574			insn->imm = fn->func - __bpf_call_base;
575		}
576	}
577}
578
579/* drop refcnt on maps used by eBPF program and free auxilary data */
580static void free_used_maps(struct bpf_prog_aux *aux)
581{
582	int i;
583
584	for (i = 0; i < aux->used_map_cnt; i++)
585		bpf_map_put(aux->used_maps[i]);
586
587	kfree(aux->used_maps);
588}
589
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
590static int bpf_prog_charge_memlock(struct bpf_prog *prog)
591{
592	struct user_struct *user = get_current_user();
593	unsigned long memlock_limit;
594
595	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
596
597	atomic_long_add(prog->pages, &user->locked_vm);
598	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
599		atomic_long_sub(prog->pages, &user->locked_vm);
600		free_uid(user);
601		return -EPERM;
602	}
 
603	prog->aux->user = user;
604	return 0;
605}
606
607static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
608{
609	struct user_struct *user = prog->aux->user;
610
611	atomic_long_sub(prog->pages, &user->locked_vm);
612	free_uid(user);
613}
614
615static void __prog_put_common(struct rcu_head *rcu)
616{
617	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
618
619	free_used_maps(aux);
620	bpf_prog_uncharge_memlock(aux->prog);
621	bpf_prog_free(aux->prog);
622}
623
624/* version of bpf_prog_put() that is called after a grace period */
625void bpf_prog_put_rcu(struct bpf_prog *prog)
626{
627	if (atomic_dec_and_test(&prog->aux->refcnt))
628		call_rcu(&prog->aux->rcu, __prog_put_common);
629}
630
631void bpf_prog_put(struct bpf_prog *prog)
632{
633	if (atomic_dec_and_test(&prog->aux->refcnt))
634		__prog_put_common(&prog->aux->rcu);
635}
636EXPORT_SYMBOL_GPL(bpf_prog_put);
637
638static int bpf_prog_release(struct inode *inode, struct file *filp)
639{
640	struct bpf_prog *prog = filp->private_data;
641
642	bpf_prog_put_rcu(prog);
643	return 0;
644}
645
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
646static const struct file_operations bpf_prog_fops = {
647        .release = bpf_prog_release,
 
 
 
648};
649
650int bpf_prog_new_fd(struct bpf_prog *prog)
651{
652	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
653				O_RDWR | O_CLOEXEC);
654}
655
656static struct bpf_prog *__bpf_prog_get(struct fd f)
657{
658	if (!f.file)
659		return ERR_PTR(-EBADF);
660	if (f.file->f_op != &bpf_prog_fops) {
661		fdput(f);
662		return ERR_PTR(-EINVAL);
663	}
664
665	return f.file->private_data;
666}
667
668struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
669{
670	if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
671		atomic_dec(&prog->aux->refcnt);
672		return ERR_PTR(-EBUSY);
673	}
674	return prog;
675}
 
676
677/* called by sockets/tracing/seccomp before attaching program to an event
678 * pairs with bpf_prog_put()
679 */
680struct bpf_prog *bpf_prog_get(u32 ufd)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
681{
682	struct fd f = fdget(ufd);
683	struct bpf_prog *prog;
684
685	prog = __bpf_prog_get(f);
686	if (IS_ERR(prog))
687		return prog;
 
 
 
 
688
689	prog = bpf_prog_inc(prog);
 
690	fdput(f);
691
692	return prog;
693}
694EXPORT_SYMBOL_GPL(bpf_prog_get);
 
 
 
 
 
 
 
 
 
 
695
696/* last field in 'union bpf_attr' used by this command */
697#define	BPF_PROG_LOAD_LAST_FIELD kern_version
698
699static int bpf_prog_load(union bpf_attr *attr)
700{
701	enum bpf_prog_type type = attr->prog_type;
702	struct bpf_prog *prog;
703	int err;
704	char license[128];
705	bool is_gpl;
706
707	if (CHECK_ATTR(BPF_PROG_LOAD))
708		return -EINVAL;
709
710	/* copy eBPF program license from user space */
711	if (strncpy_from_user(license, u64_to_ptr(attr->license),
712			      sizeof(license) - 1) < 0)
713		return -EFAULT;
714	license[sizeof(license) - 1] = 0;
715
716	/* eBPF programs must be GPL compatible to use GPL-ed functions */
717	is_gpl = license_is_gpl_compatible(license);
718
719	if (attr->insn_cnt >= BPF_MAXINSNS)
720		return -EINVAL;
721
722	if (type == BPF_PROG_TYPE_KPROBE &&
723	    attr->kern_version != LINUX_VERSION_CODE)
724		return -EINVAL;
725
726	if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
727		return -EPERM;
728
729	/* plain bpf_prog allocation */
730	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
731	if (!prog)
732		return -ENOMEM;
733
734	err = bpf_prog_charge_memlock(prog);
735	if (err)
736		goto free_prog_nouncharge;
737
738	prog->len = attr->insn_cnt;
739
740	err = -EFAULT;
741	if (copy_from_user(prog->insns, u64_to_ptr(attr->insns),
742			   prog->len * sizeof(struct bpf_insn)) != 0)
743		goto free_prog;
744
745	prog->orig_prog = NULL;
746	prog->jited = 0;
747
748	atomic_set(&prog->aux->refcnt, 1);
749	prog->gpl_compatible = is_gpl ? 1 : 0;
750
751	/* find program type: socket_filter vs tracing_filter */
752	err = find_prog_type(type, prog);
753	if (err < 0)
754		goto free_prog;
755
756	/* run eBPF verifier */
757	err = bpf_check(&prog, attr);
758	if (err < 0)
759		goto free_used_maps;
760
761	/* fixup BPF_CALL->imm field */
762	fixup_bpf_calls(prog);
763
764	/* eBPF program is ready to be JITed */
765	err = bpf_prog_select_runtime(prog);
766	if (err < 0)
767		goto free_used_maps;
768
769	err = bpf_prog_new_fd(prog);
770	if (err < 0)
771		/* failed to allocate fd */
772		goto free_used_maps;
773
774	return err;
775
776free_used_maps:
777	free_used_maps(prog->aux);
778free_prog:
779	bpf_prog_uncharge_memlock(prog);
780free_prog_nouncharge:
781	bpf_prog_free(prog);
782	return err;
783}
784
785#define BPF_OBJ_LAST_FIELD bpf_fd
786
787static int bpf_obj_pin(const union bpf_attr *attr)
788{
789	if (CHECK_ATTR(BPF_OBJ))
790		return -EINVAL;
791
792	return bpf_obj_pin_user(attr->bpf_fd, u64_to_ptr(attr->pathname));
793}
794
795static int bpf_obj_get(const union bpf_attr *attr)
796{
797	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
798		return -EINVAL;
799
800	return bpf_obj_get_user(u64_to_ptr(attr->pathname));
801}
802
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
803SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
804{
805	union bpf_attr attr = {};
806	int err;
807
808	if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
809		return -EPERM;
810
811	if (!access_ok(VERIFY_READ, uattr, 1))
812		return -EFAULT;
813
814	if (size > PAGE_SIZE)	/* silly large */
815		return -E2BIG;
816
817	/* If we're handed a bigger struct than we know of,
818	 * ensure all the unknown bits are 0 - i.e. new
819	 * user-space does not rely on any kernel feature
820	 * extensions we dont know about yet.
821	 */
822	if (size > sizeof(attr)) {
823		unsigned char __user *addr;
824		unsigned char __user *end;
825		unsigned char val;
826
827		addr = (void __user *)uattr + sizeof(attr);
828		end  = (void __user *)uattr + size;
829
830		for (; addr < end; addr++) {
831			err = get_user(val, addr);
832			if (err)
833				return err;
834			if (val)
835				return -E2BIG;
836		}
837		size = sizeof(attr);
838	}
839
840	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
841	if (copy_from_user(&attr, uattr, size) != 0)
842		return -EFAULT;
843
844	switch (cmd) {
845	case BPF_MAP_CREATE:
846		err = map_create(&attr);
847		break;
848	case BPF_MAP_LOOKUP_ELEM:
849		err = map_lookup_elem(&attr);
850		break;
851	case BPF_MAP_UPDATE_ELEM:
852		err = map_update_elem(&attr);
853		break;
854	case BPF_MAP_DELETE_ELEM:
855		err = map_delete_elem(&attr);
856		break;
857	case BPF_MAP_GET_NEXT_KEY:
858		err = map_get_next_key(&attr);
859		break;
860	case BPF_PROG_LOAD:
861		err = bpf_prog_load(&attr);
862		break;
863	case BPF_OBJ_PIN:
864		err = bpf_obj_pin(&attr);
865		break;
866	case BPF_OBJ_GET:
867		err = bpf_obj_get(&attr);
868		break;
 
 
 
 
 
 
 
 
 
 
869	default:
870		err = -EINVAL;
871		break;
872	}
873
874	return err;
875}
v4.10.11
   1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   2 *
   3 * This program is free software; you can redistribute it and/or
   4 * modify it under the terms of version 2 of the GNU General Public
   5 * License as published by the Free Software Foundation.
   6 *
   7 * This program is distributed in the hope that it will be useful, but
   8 * WITHOUT ANY WARRANTY; without even the implied warranty of
   9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10 * General Public License for more details.
  11 */
  12#include <linux/bpf.h>
  13#include <linux/syscalls.h>
  14#include <linux/slab.h>
  15#include <linux/vmalloc.h>
  16#include <linux/mmzone.h>
  17#include <linux/anon_inodes.h>
  18#include <linux/file.h>
  19#include <linux/license.h>
  20#include <linux/filter.h>
  21#include <linux/version.h>
  22#include <linux/kernel.h>
  23
  24DEFINE_PER_CPU(int, bpf_prog_active);
  25
  26int sysctl_unprivileged_bpf_disabled __read_mostly;
  27
  28static LIST_HEAD(bpf_map_types);
  29
  30static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
  31{
  32	struct bpf_map_type_list *tl;
  33	struct bpf_map *map;
  34
  35	list_for_each_entry(tl, &bpf_map_types, list_node) {
  36		if (tl->type == attr->map_type) {
  37			map = tl->ops->map_alloc(attr);
  38			if (IS_ERR(map))
  39				return map;
  40			map->ops = tl->ops;
  41			map->map_type = attr->map_type;
  42			return map;
  43		}
  44	}
  45	return ERR_PTR(-EINVAL);
  46}
  47
  48/* boot time registration of different map implementations */
  49void bpf_register_map_type(struct bpf_map_type_list *tl)
  50{
  51	list_add(&tl->list_node, &bpf_map_types);
  52}
  53
  54void *bpf_map_area_alloc(size_t size)
  55{
  56	/* We definitely need __GFP_NORETRY, so OOM killer doesn't
  57	 * trigger under memory pressure as we really just want to
  58	 * fail instead.
  59	 */
  60	const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
  61	void *area;
  62
  63	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
  64		area = kmalloc(size, GFP_USER | flags);
  65		if (area != NULL)
  66			return area;
  67	}
  68
  69	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
  70			 PAGE_KERNEL);
  71}
  72
  73void bpf_map_area_free(void *area)
  74{
  75	kvfree(area);
  76}
  77
  78int bpf_map_precharge_memlock(u32 pages)
  79{
  80	struct user_struct *user = get_current_user();
  81	unsigned long memlock_limit, cur;
  82
  83	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  84	cur = atomic_long_read(&user->locked_vm);
  85	free_uid(user);
  86	if (cur + pages > memlock_limit)
  87		return -EPERM;
  88	return 0;
  89}
  90
  91static int bpf_map_charge_memlock(struct bpf_map *map)
  92{
  93	struct user_struct *user = get_current_user();
  94	unsigned long memlock_limit;
  95
  96	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  97
  98	atomic_long_add(map->pages, &user->locked_vm);
  99
 100	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
 101		atomic_long_sub(map->pages, &user->locked_vm);
 102		free_uid(user);
 103		return -EPERM;
 104	}
 105	map->user = user;
 106	return 0;
 107}
 108
 109static void bpf_map_uncharge_memlock(struct bpf_map *map)
 110{
 111	struct user_struct *user = map->user;
 112
 113	atomic_long_sub(map->pages, &user->locked_vm);
 114	free_uid(user);
 115}
 116
 117/* called from workqueue */
 118static void bpf_map_free_deferred(struct work_struct *work)
 119{
 120	struct bpf_map *map = container_of(work, struct bpf_map, work);
 121
 122	bpf_map_uncharge_memlock(map);
 123	/* implementation dependent freeing */
 124	map->ops->map_free(map);
 125}
 126
 127static void bpf_map_put_uref(struct bpf_map *map)
 128{
 129	if (atomic_dec_and_test(&map->usercnt)) {
 130		if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
 131			bpf_fd_array_map_clear(map);
 132	}
 133}
 134
 135/* decrement map refcnt and schedule it for freeing via workqueue
 136 * (unrelying map implementation ops->map_free() might sleep)
 137 */
 138void bpf_map_put(struct bpf_map *map)
 139{
 140	if (atomic_dec_and_test(&map->refcnt)) {
 141		INIT_WORK(&map->work, bpf_map_free_deferred);
 142		schedule_work(&map->work);
 143	}
 144}
 145
 146void bpf_map_put_with_uref(struct bpf_map *map)
 147{
 148	bpf_map_put_uref(map);
 149	bpf_map_put(map);
 150}
 151
 152static int bpf_map_release(struct inode *inode, struct file *filp)
 153{
 154	struct bpf_map *map = filp->private_data;
 155
 156	if (map->ops->map_release)
 157		map->ops->map_release(map, filp);
 158
 159	bpf_map_put_with_uref(map);
 160	return 0;
 161}
 162
 163#ifdef CONFIG_PROC_FS
 164static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
 165{
 166	const struct bpf_map *map = filp->private_data;
 167	const struct bpf_array *array;
 168	u32 owner_prog_type = 0;
 169
 170	if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
 171		array = container_of(map, struct bpf_array, map);
 172		owner_prog_type = array->owner_prog_type;
 173	}
 174
 175	seq_printf(m,
 176		   "map_type:\t%u\n"
 177		   "key_size:\t%u\n"
 178		   "value_size:\t%u\n"
 179		   "max_entries:\t%u\n"
 180		   "map_flags:\t%#x\n"
 181		   "memlock:\t%llu\n",
 182		   map->map_type,
 183		   map->key_size,
 184		   map->value_size,
 185		   map->max_entries,
 186		   map->map_flags,
 187		   map->pages * 1ULL << PAGE_SHIFT);
 188
 189	if (owner_prog_type)
 190		seq_printf(m, "owner_prog_type:\t%u\n",
 191			   owner_prog_type);
 192}
 193#endif
 194
 195static const struct file_operations bpf_map_fops = {
 196#ifdef CONFIG_PROC_FS
 197	.show_fdinfo	= bpf_map_show_fdinfo,
 198#endif
 199	.release	= bpf_map_release,
 200};
 201
 202int bpf_map_new_fd(struct bpf_map *map)
 203{
 204	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
 205				O_RDWR | O_CLOEXEC);
 206}
 207
 208/* helper macro to check that unused fields 'union bpf_attr' are zero */
 209#define CHECK_ATTR(CMD) \
 210	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
 211		   sizeof(attr->CMD##_LAST_FIELD), 0, \
 212		   sizeof(*attr) - \
 213		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
 214		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
 215
 216#define BPF_MAP_CREATE_LAST_FIELD map_flags
 217/* called via syscall */
 218static int map_create(union bpf_attr *attr)
 219{
 220	struct bpf_map *map;
 221	int err;
 222
 223	err = CHECK_ATTR(BPF_MAP_CREATE);
 224	if (err)
 225		return -EINVAL;
 226
 227	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
 228	map = find_and_alloc_map(attr);
 229	if (IS_ERR(map))
 230		return PTR_ERR(map);
 231
 232	atomic_set(&map->refcnt, 1);
 233	atomic_set(&map->usercnt, 1);
 234
 235	err = bpf_map_charge_memlock(map);
 236	if (err)
 237		goto free_map_nouncharge;
 238
 239	err = bpf_map_new_fd(map);
 240	if (err < 0)
 241		/* failed to allocate fd */
 242		goto free_map;
 243
 244	return err;
 245
 246free_map:
 247	bpf_map_uncharge_memlock(map);
 248free_map_nouncharge:
 249	map->ops->map_free(map);
 250	return err;
 251}
 252
 253/* if error is returned, fd is released.
 254 * On success caller should complete fd access with matching fdput()
 255 */
 256struct bpf_map *__bpf_map_get(struct fd f)
 257{
 258	if (!f.file)
 259		return ERR_PTR(-EBADF);
 260	if (f.file->f_op != &bpf_map_fops) {
 261		fdput(f);
 262		return ERR_PTR(-EINVAL);
 263	}
 264
 265	return f.file->private_data;
 266}
 267
 268/* prog's and map's refcnt limit */
 269#define BPF_MAX_REFCNT 32768
 270
 271struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
 272{
 273	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
 274		atomic_dec(&map->refcnt);
 275		return ERR_PTR(-EBUSY);
 276	}
 277	if (uref)
 278		atomic_inc(&map->usercnt);
 279	return map;
 280}
 281
 282struct bpf_map *bpf_map_get_with_uref(u32 ufd)
 283{
 284	struct fd f = fdget(ufd);
 285	struct bpf_map *map;
 286
 287	map = __bpf_map_get(f);
 288	if (IS_ERR(map))
 289		return map;
 290
 291	map = bpf_map_inc(map, true);
 292	fdput(f);
 293
 294	return map;
 295}
 296
 
 
 
 
 
 
 297int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
 298{
 299	return -ENOTSUPP;
 300}
 301
 302/* last field in 'union bpf_attr' used by this command */
 303#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
 304
 305static int map_lookup_elem(union bpf_attr *attr)
 306{
 307	void __user *ukey = u64_to_user_ptr(attr->key);
 308	void __user *uvalue = u64_to_user_ptr(attr->value);
 309	int ufd = attr->map_fd;
 310	struct bpf_map *map;
 311	void *key, *value, *ptr;
 312	u32 value_size;
 313	struct fd f;
 314	int err;
 315
 316	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
 317		return -EINVAL;
 318
 319	f = fdget(ufd);
 320	map = __bpf_map_get(f);
 321	if (IS_ERR(map))
 322		return PTR_ERR(map);
 323
 324	err = -ENOMEM;
 325	key = kmalloc(map->key_size, GFP_USER);
 326	if (!key)
 327		goto err_put;
 328
 329	err = -EFAULT;
 330	if (copy_from_user(key, ukey, map->key_size) != 0)
 331		goto free_key;
 332
 333	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 334	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
 335	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
 336		value_size = round_up(map->value_size, 8) * num_possible_cpus();
 337	else
 338		value_size = map->value_size;
 339
 340	err = -ENOMEM;
 341	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
 342	if (!value)
 343		goto free_key;
 344
 345	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 346	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
 347		err = bpf_percpu_hash_copy(map, key, value);
 348	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 349		err = bpf_percpu_array_copy(map, key, value);
 350	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
 351		err = bpf_stackmap_copy(map, key, value);
 352	} else {
 353		rcu_read_lock();
 354		ptr = map->ops->map_lookup_elem(map, key);
 355		if (ptr)
 356			memcpy(value, ptr, value_size);
 357		rcu_read_unlock();
 358		err = ptr ? 0 : -ENOENT;
 359	}
 360
 361	if (err)
 362		goto free_value;
 363
 364	err = -EFAULT;
 365	if (copy_to_user(uvalue, value, value_size) != 0)
 366		goto free_value;
 367
 368	err = 0;
 369
 370free_value:
 371	kfree(value);
 372free_key:
 373	kfree(key);
 374err_put:
 375	fdput(f);
 376	return err;
 377}
 378
 379#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
 380
 381static int map_update_elem(union bpf_attr *attr)
 382{
 383	void __user *ukey = u64_to_user_ptr(attr->key);
 384	void __user *uvalue = u64_to_user_ptr(attr->value);
 385	int ufd = attr->map_fd;
 386	struct bpf_map *map;
 387	void *key, *value;
 388	u32 value_size;
 389	struct fd f;
 390	int err;
 391
 392	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
 393		return -EINVAL;
 394
 395	f = fdget(ufd);
 396	map = __bpf_map_get(f);
 397	if (IS_ERR(map))
 398		return PTR_ERR(map);
 399
 400	err = -ENOMEM;
 401	key = kmalloc(map->key_size, GFP_USER);
 402	if (!key)
 403		goto err_put;
 404
 405	err = -EFAULT;
 406	if (copy_from_user(key, ukey, map->key_size) != 0)
 407		goto free_key;
 408
 409	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 410	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
 411	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
 412		value_size = round_up(map->value_size, 8) * num_possible_cpus();
 413	else
 414		value_size = map->value_size;
 415
 416	err = -ENOMEM;
 417	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
 418	if (!value)
 419		goto free_key;
 420
 421	err = -EFAULT;
 422	if (copy_from_user(value, uvalue, value_size) != 0)
 423		goto free_value;
 424
 425	/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
 426	 * inside bpf map update or delete otherwise deadlocks are possible
 427	 */
 428	preempt_disable();
 429	__this_cpu_inc(bpf_prog_active);
 430	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 431	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
 432		err = bpf_percpu_hash_update(map, key, value, attr->flags);
 433	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 434		err = bpf_percpu_array_update(map, key, value, attr->flags);
 435	} else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
 436		   map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
 437		   map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY) {
 438		rcu_read_lock();
 439		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
 440						   attr->flags);
 441		rcu_read_unlock();
 442	} else {
 443		rcu_read_lock();
 444		err = map->ops->map_update_elem(map, key, value, attr->flags);
 445		rcu_read_unlock();
 446	}
 447	__this_cpu_dec(bpf_prog_active);
 448	preempt_enable();
 449
 450free_value:
 451	kfree(value);
 452free_key:
 453	kfree(key);
 454err_put:
 455	fdput(f);
 456	return err;
 457}
 458
 459#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
 460
 461static int map_delete_elem(union bpf_attr *attr)
 462{
 463	void __user *ukey = u64_to_user_ptr(attr->key);
 464	int ufd = attr->map_fd;
 465	struct bpf_map *map;
 466	struct fd f;
 467	void *key;
 468	int err;
 469
 470	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
 471		return -EINVAL;
 472
 473	f = fdget(ufd);
 474	map = __bpf_map_get(f);
 475	if (IS_ERR(map))
 476		return PTR_ERR(map);
 477
 478	err = -ENOMEM;
 479	key = kmalloc(map->key_size, GFP_USER);
 480	if (!key)
 481		goto err_put;
 482
 483	err = -EFAULT;
 484	if (copy_from_user(key, ukey, map->key_size) != 0)
 485		goto free_key;
 486
 487	preempt_disable();
 488	__this_cpu_inc(bpf_prog_active);
 489	rcu_read_lock();
 490	err = map->ops->map_delete_elem(map, key);
 491	rcu_read_unlock();
 492	__this_cpu_dec(bpf_prog_active);
 493	preempt_enable();
 494
 495free_key:
 496	kfree(key);
 497err_put:
 498	fdput(f);
 499	return err;
 500}
 501
 502/* last field in 'union bpf_attr' used by this command */
 503#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
 504
 505static int map_get_next_key(union bpf_attr *attr)
 506{
 507	void __user *ukey = u64_to_user_ptr(attr->key);
 508	void __user *unext_key = u64_to_user_ptr(attr->next_key);
 509	int ufd = attr->map_fd;
 510	struct bpf_map *map;
 511	void *key, *next_key;
 512	struct fd f;
 513	int err;
 514
 515	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
 516		return -EINVAL;
 517
 518	f = fdget(ufd);
 519	map = __bpf_map_get(f);
 520	if (IS_ERR(map))
 521		return PTR_ERR(map);
 522
 523	err = -ENOMEM;
 524	key = kmalloc(map->key_size, GFP_USER);
 525	if (!key)
 526		goto err_put;
 527
 528	err = -EFAULT;
 529	if (copy_from_user(key, ukey, map->key_size) != 0)
 530		goto free_key;
 531
 532	err = -ENOMEM;
 533	next_key = kmalloc(map->key_size, GFP_USER);
 534	if (!next_key)
 535		goto free_key;
 536
 537	rcu_read_lock();
 538	err = map->ops->map_get_next_key(map, key, next_key);
 539	rcu_read_unlock();
 540	if (err)
 541		goto free_next_key;
 542
 543	err = -EFAULT;
 544	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
 545		goto free_next_key;
 546
 547	err = 0;
 548
 549free_next_key:
 550	kfree(next_key);
 551free_key:
 552	kfree(key);
 553err_put:
 554	fdput(f);
 555	return err;
 556}
 557
 558static LIST_HEAD(bpf_prog_types);
 559
 560static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
 561{
 562	struct bpf_prog_type_list *tl;
 563
 564	list_for_each_entry(tl, &bpf_prog_types, list_node) {
 565		if (tl->type == type) {
 566			prog->aux->ops = tl->ops;
 567			prog->type = type;
 568			return 0;
 569		}
 570	}
 571
 572	return -EINVAL;
 573}
 574
 575void bpf_register_prog_type(struct bpf_prog_type_list *tl)
 576{
 577	list_add(&tl->list_node, &bpf_prog_types);
 578}
 579
 580/* fixup insn->imm field of bpf_call instructions:
 581 * if (insn->imm == BPF_FUNC_map_lookup_elem)
 582 *      insn->imm = bpf_map_lookup_elem - __bpf_call_base;
 583 * else if (insn->imm == BPF_FUNC_map_update_elem)
 584 *      insn->imm = bpf_map_update_elem - __bpf_call_base;
 585 * else ...
 586 *
 587 * this function is called after eBPF program passed verification
 588 */
 589static void fixup_bpf_calls(struct bpf_prog *prog)
 590{
 591	const struct bpf_func_proto *fn;
 592	int i;
 593
 594	for (i = 0; i < prog->len; i++) {
 595		struct bpf_insn *insn = &prog->insnsi[i];
 596
 597		if (insn->code == (BPF_JMP | BPF_CALL)) {
 598			/* we reach here when program has bpf_call instructions
 599			 * and it passed bpf_check(), means that
 600			 * ops->get_func_proto must have been supplied, check it
 601			 */
 602			BUG_ON(!prog->aux->ops->get_func_proto);
 603
 604			if (insn->imm == BPF_FUNC_get_route_realm)
 605				prog->dst_needed = 1;
 606			if (insn->imm == BPF_FUNC_get_prandom_u32)
 607				bpf_user_rnd_init_once();
 608			if (insn->imm == BPF_FUNC_xdp_adjust_head)
 609				prog->xdp_adjust_head = 1;
 610			if (insn->imm == BPF_FUNC_tail_call) {
 611				/* mark bpf_tail_call as different opcode
 612				 * to avoid conditional branch in
 613				 * interpeter for every normal call
 614				 * and to prevent accidental JITing by
 615				 * JIT compiler that doesn't support
 616				 * bpf_tail_call yet
 617				 */
 618				insn->imm = 0;
 619				insn->code |= BPF_X;
 620				continue;
 621			}
 622
 623			fn = prog->aux->ops->get_func_proto(insn->imm);
 624			/* all functions that have prototype and verifier allowed
 625			 * programs to call them, must be real in-kernel functions
 626			 */
 627			BUG_ON(!fn->func);
 628			insn->imm = fn->func - __bpf_call_base;
 629		}
 630	}
 631}
 632
 633/* drop refcnt on maps used by eBPF program and free auxilary data */
 634static void free_used_maps(struct bpf_prog_aux *aux)
 635{
 636	int i;
 637
 638	for (i = 0; i < aux->used_map_cnt; i++)
 639		bpf_map_put(aux->used_maps[i]);
 640
 641	kfree(aux->used_maps);
 642}
 643
 644int __bpf_prog_charge(struct user_struct *user, u32 pages)
 645{
 646	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 647	unsigned long user_bufs;
 648
 649	if (user) {
 650		user_bufs = atomic_long_add_return(pages, &user->locked_vm);
 651		if (user_bufs > memlock_limit) {
 652			atomic_long_sub(pages, &user->locked_vm);
 653			return -EPERM;
 654		}
 655	}
 656
 657	return 0;
 658}
 659
 660void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
 661{
 662	if (user)
 663		atomic_long_sub(pages, &user->locked_vm);
 664}
 665
 666static int bpf_prog_charge_memlock(struct bpf_prog *prog)
 667{
 668	struct user_struct *user = get_current_user();
 669	int ret;
 
 
 670
 671	ret = __bpf_prog_charge(user, prog->pages);
 672	if (ret) {
 
 673		free_uid(user);
 674		return ret;
 675	}
 676
 677	prog->aux->user = user;
 678	return 0;
 679}
 680
 681static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
 682{
 683	struct user_struct *user = prog->aux->user;
 684
 685	__bpf_prog_uncharge(user, prog->pages);
 686	free_uid(user);
 687}
 688
 689static void __bpf_prog_put_rcu(struct rcu_head *rcu)
 690{
 691	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
 692
 693	free_used_maps(aux);
 694	bpf_prog_uncharge_memlock(aux->prog);
 695	bpf_prog_free(aux->prog);
 696}
 697
 
 
 
 
 
 
 
 698void bpf_prog_put(struct bpf_prog *prog)
 699{
 700	if (atomic_dec_and_test(&prog->aux->refcnt))
 701		call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
 702}
 703EXPORT_SYMBOL_GPL(bpf_prog_put);
 704
 705static int bpf_prog_release(struct inode *inode, struct file *filp)
 706{
 707	struct bpf_prog *prog = filp->private_data;
 708
 709	bpf_prog_put(prog);
 710	return 0;
 711}
 712
 713#ifdef CONFIG_PROC_FS
 714static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
 715{
 716	const struct bpf_prog *prog = filp->private_data;
 717	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
 718
 719	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
 720	seq_printf(m,
 721		   "prog_type:\t%u\n"
 722		   "prog_jited:\t%u\n"
 723		   "prog_tag:\t%s\n"
 724		   "memlock:\t%llu\n",
 725		   prog->type,
 726		   prog->jited,
 727		   prog_tag,
 728		   prog->pages * 1ULL << PAGE_SHIFT);
 729}
 730#endif
 731
 732static const struct file_operations bpf_prog_fops = {
 733#ifdef CONFIG_PROC_FS
 734	.show_fdinfo	= bpf_prog_show_fdinfo,
 735#endif
 736	.release	= bpf_prog_release,
 737};
 738
 739int bpf_prog_new_fd(struct bpf_prog *prog)
 740{
 741	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
 742				O_RDWR | O_CLOEXEC);
 743}
 744
 745static struct bpf_prog *____bpf_prog_get(struct fd f)
 746{
 747	if (!f.file)
 748		return ERR_PTR(-EBADF);
 749	if (f.file->f_op != &bpf_prog_fops) {
 750		fdput(f);
 751		return ERR_PTR(-EINVAL);
 752	}
 753
 754	return f.file->private_data;
 755}
 756
 757struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
 758{
 759	if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
 760		atomic_sub(i, &prog->aux->refcnt);
 761		return ERR_PTR(-EBUSY);
 762	}
 763	return prog;
 764}
 765EXPORT_SYMBOL_GPL(bpf_prog_add);
 766
 767void bpf_prog_sub(struct bpf_prog *prog, int i)
 768{
 769	/* Only to be used for undoing previous bpf_prog_add() in some
 770	 * error path. We still know that another entity in our call
 771	 * path holds a reference to the program, thus atomic_sub() can
 772	 * be safely used in such cases!
 773	 */
 774	WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
 775}
 776EXPORT_SYMBOL_GPL(bpf_prog_sub);
 777
 778struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
 779{
 780	return bpf_prog_add(prog, 1);
 781}
 782EXPORT_SYMBOL_GPL(bpf_prog_inc);
 783
 784static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
 785{
 786	struct fd f = fdget(ufd);
 787	struct bpf_prog *prog;
 788
 789	prog = ____bpf_prog_get(f);
 790	if (IS_ERR(prog))
 791		return prog;
 792	if (type && prog->type != *type) {
 793		prog = ERR_PTR(-EINVAL);
 794		goto out;
 795	}
 796
 797	prog = bpf_prog_inc(prog);
 798out:
 799	fdput(f);
 
 800	return prog;
 801}
 802
 803struct bpf_prog *bpf_prog_get(u32 ufd)
 804{
 805	return __bpf_prog_get(ufd, NULL);
 806}
 807
 808struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
 809{
 810	return __bpf_prog_get(ufd, &type);
 811}
 812EXPORT_SYMBOL_GPL(bpf_prog_get_type);
 813
 814/* last field in 'union bpf_attr' used by this command */
 815#define	BPF_PROG_LOAD_LAST_FIELD kern_version
 816
 817static int bpf_prog_load(union bpf_attr *attr)
 818{
 819	enum bpf_prog_type type = attr->prog_type;
 820	struct bpf_prog *prog;
 821	int err;
 822	char license[128];
 823	bool is_gpl;
 824
 825	if (CHECK_ATTR(BPF_PROG_LOAD))
 826		return -EINVAL;
 827
 828	/* copy eBPF program license from user space */
 829	if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
 830			      sizeof(license) - 1) < 0)
 831		return -EFAULT;
 832	license[sizeof(license) - 1] = 0;
 833
 834	/* eBPF programs must be GPL compatible to use GPL-ed functions */
 835	is_gpl = license_is_gpl_compatible(license);
 836
 837	if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
 838		return -E2BIG;
 839
 840	if (type == BPF_PROG_TYPE_KPROBE &&
 841	    attr->kern_version != LINUX_VERSION_CODE)
 842		return -EINVAL;
 843
 844	if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
 845		return -EPERM;
 846
 847	/* plain bpf_prog allocation */
 848	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
 849	if (!prog)
 850		return -ENOMEM;
 851
 852	err = bpf_prog_charge_memlock(prog);
 853	if (err)
 854		goto free_prog_nouncharge;
 855
 856	prog->len = attr->insn_cnt;
 857
 858	err = -EFAULT;
 859	if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
 860			   bpf_prog_insn_size(prog)) != 0)
 861		goto free_prog;
 862
 863	prog->orig_prog = NULL;
 864	prog->jited = 0;
 865
 866	atomic_set(&prog->aux->refcnt, 1);
 867	prog->gpl_compatible = is_gpl ? 1 : 0;
 868
 869	/* find program type: socket_filter vs tracing_filter */
 870	err = find_prog_type(type, prog);
 871	if (err < 0)
 872		goto free_prog;
 873
 874	/* run eBPF verifier */
 875	err = bpf_check(&prog, attr);
 876	if (err < 0)
 877		goto free_used_maps;
 878
 879	/* fixup BPF_CALL->imm field */
 880	fixup_bpf_calls(prog);
 881
 882	/* eBPF program is ready to be JITed */
 883	prog = bpf_prog_select_runtime(prog, &err);
 884	if (err < 0)
 885		goto free_used_maps;
 886
 887	err = bpf_prog_new_fd(prog);
 888	if (err < 0)
 889		/* failed to allocate fd */
 890		goto free_used_maps;
 891
 892	return err;
 893
 894free_used_maps:
 895	free_used_maps(prog->aux);
 896free_prog:
 897	bpf_prog_uncharge_memlock(prog);
 898free_prog_nouncharge:
 899	bpf_prog_free(prog);
 900	return err;
 901}
 902
 903#define BPF_OBJ_LAST_FIELD bpf_fd
 904
 905static int bpf_obj_pin(const union bpf_attr *attr)
 906{
 907	if (CHECK_ATTR(BPF_OBJ))
 908		return -EINVAL;
 909
 910	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
 911}
 912
 913static int bpf_obj_get(const union bpf_attr *attr)
 914{
 915	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
 916		return -EINVAL;
 917
 918	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
 919}
 920
 921#ifdef CONFIG_CGROUP_BPF
 922
 923#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
 924
 925static int bpf_prog_attach(const union bpf_attr *attr)
 926{
 927	enum bpf_prog_type ptype;
 928	struct bpf_prog *prog;
 929	struct cgroup *cgrp;
 930	int ret;
 931
 932	if (!capable(CAP_NET_ADMIN))
 933		return -EPERM;
 934
 935	if (CHECK_ATTR(BPF_PROG_ATTACH))
 936		return -EINVAL;
 937
 938	if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
 939		return -EINVAL;
 940
 941	switch (attr->attach_type) {
 942	case BPF_CGROUP_INET_INGRESS:
 943	case BPF_CGROUP_INET_EGRESS:
 944		ptype = BPF_PROG_TYPE_CGROUP_SKB;
 945		break;
 946	case BPF_CGROUP_INET_SOCK_CREATE:
 947		ptype = BPF_PROG_TYPE_CGROUP_SOCK;
 948		break;
 949	default:
 950		return -EINVAL;
 951	}
 952
 953	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
 954	if (IS_ERR(prog))
 955		return PTR_ERR(prog);
 956
 957	cgrp = cgroup_get_from_fd(attr->target_fd);
 958	if (IS_ERR(cgrp)) {
 959		bpf_prog_put(prog);
 960		return PTR_ERR(cgrp);
 961	}
 962
 963	ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
 964				attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
 965	if (ret)
 966		bpf_prog_put(prog);
 967	cgroup_put(cgrp);
 968
 969	return ret;
 970}
 971
 972#define BPF_PROG_DETACH_LAST_FIELD attach_type
 973
 974static int bpf_prog_detach(const union bpf_attr *attr)
 975{
 976	struct cgroup *cgrp;
 977	int ret;
 978
 979	if (!capable(CAP_NET_ADMIN))
 980		return -EPERM;
 981
 982	if (CHECK_ATTR(BPF_PROG_DETACH))
 983		return -EINVAL;
 984
 985	switch (attr->attach_type) {
 986	case BPF_CGROUP_INET_INGRESS:
 987	case BPF_CGROUP_INET_EGRESS:
 988	case BPF_CGROUP_INET_SOCK_CREATE:
 989		cgrp = cgroup_get_from_fd(attr->target_fd);
 990		if (IS_ERR(cgrp))
 991			return PTR_ERR(cgrp);
 992
 993		ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
 994		cgroup_put(cgrp);
 995		break;
 996
 997	default:
 998		return -EINVAL;
 999	}
1000
1001	return ret;
1002}
1003#endif /* CONFIG_CGROUP_BPF */
1004
1005SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
1006{
1007	union bpf_attr attr = {};
1008	int err;
1009
1010	if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
1011		return -EPERM;
1012
1013	if (!access_ok(VERIFY_READ, uattr, 1))
1014		return -EFAULT;
1015
1016	if (size > PAGE_SIZE)	/* silly large */
1017		return -E2BIG;
1018
1019	/* If we're handed a bigger struct than we know of,
1020	 * ensure all the unknown bits are 0 - i.e. new
1021	 * user-space does not rely on any kernel feature
1022	 * extensions we dont know about yet.
1023	 */
1024	if (size > sizeof(attr)) {
1025		unsigned char __user *addr;
1026		unsigned char __user *end;
1027		unsigned char val;
1028
1029		addr = (void __user *)uattr + sizeof(attr);
1030		end  = (void __user *)uattr + size;
1031
1032		for (; addr < end; addr++) {
1033			err = get_user(val, addr);
1034			if (err)
1035				return err;
1036			if (val)
1037				return -E2BIG;
1038		}
1039		size = sizeof(attr);
1040	}
1041
1042	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
1043	if (copy_from_user(&attr, uattr, size) != 0)
1044		return -EFAULT;
1045
1046	switch (cmd) {
1047	case BPF_MAP_CREATE:
1048		err = map_create(&attr);
1049		break;
1050	case BPF_MAP_LOOKUP_ELEM:
1051		err = map_lookup_elem(&attr);
1052		break;
1053	case BPF_MAP_UPDATE_ELEM:
1054		err = map_update_elem(&attr);
1055		break;
1056	case BPF_MAP_DELETE_ELEM:
1057		err = map_delete_elem(&attr);
1058		break;
1059	case BPF_MAP_GET_NEXT_KEY:
1060		err = map_get_next_key(&attr);
1061		break;
1062	case BPF_PROG_LOAD:
1063		err = bpf_prog_load(&attr);
1064		break;
1065	case BPF_OBJ_PIN:
1066		err = bpf_obj_pin(&attr);
1067		break;
1068	case BPF_OBJ_GET:
1069		err = bpf_obj_get(&attr);
1070		break;
1071
1072#ifdef CONFIG_CGROUP_BPF
1073	case BPF_PROG_ATTACH:
1074		err = bpf_prog_attach(&attr);
1075		break;
1076	case BPF_PROG_DETACH:
1077		err = bpf_prog_detach(&attr);
1078		break;
1079#endif
1080
1081	default:
1082		err = -EINVAL;
1083		break;
1084	}
1085
1086	return err;
1087}