Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 */
4#include <linux/bpf.h>
5#include <linux/bpf-cgroup.h>
6#include <linux/bpf_trace.h>
7#include <linux/bpf_lirc.h>
8#include <linux/bpf_verifier.h>
9#include <linux/bsearch.h>
10#include <linux/btf.h>
11#include <linux/syscalls.h>
12#include <linux/slab.h>
13#include <linux/sched/signal.h>
14#include <linux/vmalloc.h>
15#include <linux/mmzone.h>
16#include <linux/anon_inodes.h>
17#include <linux/fdtable.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/license.h>
21#include <linux/filter.h>
22#include <linux/kernel.h>
23#include <linux/idr.h>
24#include <linux/cred.h>
25#include <linux/timekeeping.h>
26#include <linux/ctype.h>
27#include <linux/nospec.h>
28#include <linux/audit.h>
29#include <uapi/linux/btf.h>
30#include <linux/pgtable.h>
31#include <linux/bpf_lsm.h>
32#include <linux/poll.h>
33#include <linux/sort.h>
34#include <linux/bpf-netns.h>
35#include <linux/rcupdate_trace.h>
36#include <linux/memcontrol.h>
37#include <linux/trace_events.h>
38
39#include <net/netfilter/nf_bpf_link.h>
40#include <net/netkit.h>
41#include <net/tcx.h>
42
43#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
44 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
45 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
46#define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
47#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
48#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
49 IS_FD_HASH(map))
50
51#define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
52
53DEFINE_PER_CPU(int, bpf_prog_active);
54static DEFINE_IDR(prog_idr);
55static DEFINE_SPINLOCK(prog_idr_lock);
56static DEFINE_IDR(map_idr);
57static DEFINE_SPINLOCK(map_idr_lock);
58static DEFINE_IDR(link_idr);
59static DEFINE_SPINLOCK(link_idr_lock);
60
61int sysctl_unprivileged_bpf_disabled __read_mostly =
62 IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
63
64static const struct bpf_map_ops * const bpf_map_types[] = {
65#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
66#define BPF_MAP_TYPE(_id, _ops) \
67 [_id] = &_ops,
68#define BPF_LINK_TYPE(_id, _name)
69#include <linux/bpf_types.h>
70#undef BPF_PROG_TYPE
71#undef BPF_MAP_TYPE
72#undef BPF_LINK_TYPE
73};
74
75/*
76 * If we're handed a bigger struct than we know of, ensure all the unknown bits
77 * are 0 - i.e. new user-space does not rely on any kernel feature extensions
78 * we don't know about yet.
79 *
80 * There is a ToCToU between this function call and the following
81 * copy_from_user() call. However, this is not a concern since this function is
82 * meant to be a future-proofing of bits.
83 */
84int bpf_check_uarg_tail_zero(bpfptr_t uaddr,
85 size_t expected_size,
86 size_t actual_size)
87{
88 int res;
89
90 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */
91 return -E2BIG;
92
93 if (actual_size <= expected_size)
94 return 0;
95
96 if (uaddr.is_kernel)
97 res = memchr_inv(uaddr.kernel + expected_size, 0,
98 actual_size - expected_size) == NULL;
99 else
100 res = check_zeroed_user(uaddr.user + expected_size,
101 actual_size - expected_size);
102 if (res < 0)
103 return res;
104 return res ? 0 : -E2BIG;
105}
106
107const struct bpf_map_ops bpf_map_offload_ops = {
108 .map_meta_equal = bpf_map_meta_equal,
109 .map_alloc = bpf_map_offload_map_alloc,
110 .map_free = bpf_map_offload_map_free,
111 .map_check_btf = map_check_no_btf,
112 .map_mem_usage = bpf_map_offload_map_mem_usage,
113};
114
115static void bpf_map_write_active_inc(struct bpf_map *map)
116{
117 atomic64_inc(&map->writecnt);
118}
119
120static void bpf_map_write_active_dec(struct bpf_map *map)
121{
122 atomic64_dec(&map->writecnt);
123}
124
125bool bpf_map_write_active(const struct bpf_map *map)
126{
127 return atomic64_read(&map->writecnt) != 0;
128}
129
130static u32 bpf_map_value_size(const struct bpf_map *map)
131{
132 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
133 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
134 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
135 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
136 return round_up(map->value_size, 8) * num_possible_cpus();
137 else if (IS_FD_MAP(map))
138 return sizeof(u32);
139 else
140 return map->value_size;
141}
142
143static void maybe_wait_bpf_programs(struct bpf_map *map)
144{
145 /* Wait for any running non-sleepable BPF programs to complete so that
146 * userspace, when we return to it, knows that all non-sleepable
147 * programs that could be running use the new map value. For sleepable
148 * BPF programs, synchronize_rcu_tasks_trace() should be used to wait
149 * for the completions of these programs, but considering the waiting
150 * time can be very long and userspace may think it will hang forever,
151 * so don't handle sleepable BPF programs now.
152 */
153 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
154 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
155 synchronize_rcu();
156}
157
158static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
159 void *key, void *value, __u64 flags)
160{
161 int err;
162
163 /* Need to create a kthread, thus must support schedule */
164 if (bpf_map_is_offloaded(map)) {
165 return bpf_map_offload_update_elem(map, key, value, flags);
166 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
167 map->map_type == BPF_MAP_TYPE_ARENA ||
168 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
169 return map->ops->map_update_elem(map, key, value, flags);
170 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
171 map->map_type == BPF_MAP_TYPE_SOCKMAP) {
172 return sock_map_update_elem_sys(map, key, value, flags);
173 } else if (IS_FD_PROG_ARRAY(map)) {
174 return bpf_fd_array_map_update_elem(map, map_file, key, value,
175 flags);
176 }
177
178 bpf_disable_instrumentation();
179 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
180 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
181 err = bpf_percpu_hash_update(map, key, value, flags);
182 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
183 err = bpf_percpu_array_update(map, key, value, flags);
184 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
185 err = bpf_percpu_cgroup_storage_update(map, key, value,
186 flags);
187 } else if (IS_FD_ARRAY(map)) {
188 err = bpf_fd_array_map_update_elem(map, map_file, key, value,
189 flags);
190 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
191 err = bpf_fd_htab_map_update_elem(map, map_file, key, value,
192 flags);
193 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
194 /* rcu_read_lock() is not needed */
195 err = bpf_fd_reuseport_array_update_elem(map, key, value,
196 flags);
197 } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
198 map->map_type == BPF_MAP_TYPE_STACK ||
199 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
200 err = map->ops->map_push_elem(map, value, flags);
201 } else {
202 rcu_read_lock();
203 err = map->ops->map_update_elem(map, key, value, flags);
204 rcu_read_unlock();
205 }
206 bpf_enable_instrumentation();
207
208 return err;
209}
210
211static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
212 __u64 flags)
213{
214 void *ptr;
215 int err;
216
217 if (bpf_map_is_offloaded(map))
218 return bpf_map_offload_lookup_elem(map, key, value);
219
220 bpf_disable_instrumentation();
221 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
222 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
223 err = bpf_percpu_hash_copy(map, key, value);
224 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
225 err = bpf_percpu_array_copy(map, key, value);
226 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
227 err = bpf_percpu_cgroup_storage_copy(map, key, value);
228 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
229 err = bpf_stackmap_copy(map, key, value);
230 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
231 err = bpf_fd_array_map_lookup_elem(map, key, value);
232 } else if (IS_FD_HASH(map)) {
233 err = bpf_fd_htab_map_lookup_elem(map, key, value);
234 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
235 err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
236 } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
237 map->map_type == BPF_MAP_TYPE_STACK ||
238 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
239 err = map->ops->map_peek_elem(map, value);
240 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
241 /* struct_ops map requires directly updating "value" */
242 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
243 } else {
244 rcu_read_lock();
245 if (map->ops->map_lookup_elem_sys_only)
246 ptr = map->ops->map_lookup_elem_sys_only(map, key);
247 else
248 ptr = map->ops->map_lookup_elem(map, key);
249 if (IS_ERR(ptr)) {
250 err = PTR_ERR(ptr);
251 } else if (!ptr) {
252 err = -ENOENT;
253 } else {
254 err = 0;
255 if (flags & BPF_F_LOCK)
256 /* lock 'ptr' and copy everything but lock */
257 copy_map_value_locked(map, value, ptr, true);
258 else
259 copy_map_value(map, value, ptr);
260 /* mask lock and timer, since value wasn't zero inited */
261 check_and_init_map_value(map, value);
262 }
263 rcu_read_unlock();
264 }
265
266 bpf_enable_instrumentation();
267
268 return err;
269}
270
271/* Please, do not use this function outside from the map creation path
272 * (e.g. in map update path) without taking care of setting the active
273 * memory cgroup (see at bpf_map_kmalloc_node() for example).
274 */
275static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
276{
277 /* We really just want to fail instead of triggering OOM killer
278 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
279 * which is used for lower order allocation requests.
280 *
281 * It has been observed that higher order allocation requests done by
282 * vmalloc with __GFP_NORETRY being set might fail due to not trying
283 * to reclaim memory from the page cache, thus we set
284 * __GFP_RETRY_MAYFAIL to avoid such situations.
285 */
286
287 gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO);
288 unsigned int flags = 0;
289 unsigned long align = 1;
290 void *area;
291
292 if (size >= SIZE_MAX)
293 return NULL;
294
295 /* kmalloc()'ed memory can't be mmap()'ed */
296 if (mmapable) {
297 BUG_ON(!PAGE_ALIGNED(size));
298 align = SHMLBA;
299 flags = VM_USERMAP;
300 } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
301 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
302 numa_node);
303 if (area != NULL)
304 return area;
305 }
306
307 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
308 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
309 flags, numa_node, __builtin_return_address(0));
310}
311
312void *bpf_map_area_alloc(u64 size, int numa_node)
313{
314 return __bpf_map_area_alloc(size, numa_node, false);
315}
316
317void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
318{
319 return __bpf_map_area_alloc(size, numa_node, true);
320}
321
322void bpf_map_area_free(void *area)
323{
324 kvfree(area);
325}
326
327static u32 bpf_map_flags_retain_permanent(u32 flags)
328{
329 /* Some map creation flags are not tied to the map object but
330 * rather to the map fd instead, so they have no meaning upon
331 * map object inspection since multiple file descriptors with
332 * different (access) properties can exist here. Thus, given
333 * this has zero meaning for the map itself, lets clear these
334 * from here.
335 */
336 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
337}
338
339void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
340{
341 map->map_type = attr->map_type;
342 map->key_size = attr->key_size;
343 map->value_size = attr->value_size;
344 map->max_entries = attr->max_entries;
345 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
346 map->numa_node = bpf_map_attr_numa_node(attr);
347 map->map_extra = attr->map_extra;
348}
349
350static int bpf_map_alloc_id(struct bpf_map *map)
351{
352 int id;
353
354 idr_preload(GFP_KERNEL);
355 spin_lock_bh(&map_idr_lock);
356 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
357 if (id > 0)
358 map->id = id;
359 spin_unlock_bh(&map_idr_lock);
360 idr_preload_end();
361
362 if (WARN_ON_ONCE(!id))
363 return -ENOSPC;
364
365 return id > 0 ? 0 : id;
366}
367
368void bpf_map_free_id(struct bpf_map *map)
369{
370 unsigned long flags;
371
372 /* Offloaded maps are removed from the IDR store when their device
373 * disappears - even if someone holds an fd to them they are unusable,
374 * the memory is gone, all ops will fail; they are simply waiting for
375 * refcnt to drop to be freed.
376 */
377 if (!map->id)
378 return;
379
380 spin_lock_irqsave(&map_idr_lock, flags);
381
382 idr_remove(&map_idr, map->id);
383 map->id = 0;
384
385 spin_unlock_irqrestore(&map_idr_lock, flags);
386}
387
388#ifdef CONFIG_MEMCG_KMEM
389static void bpf_map_save_memcg(struct bpf_map *map)
390{
391 /* Currently if a map is created by a process belonging to the root
392 * memory cgroup, get_obj_cgroup_from_current() will return NULL.
393 * So we have to check map->objcg for being NULL each time it's
394 * being used.
395 */
396 if (memcg_bpf_enabled())
397 map->objcg = get_obj_cgroup_from_current();
398}
399
400static void bpf_map_release_memcg(struct bpf_map *map)
401{
402 if (map->objcg)
403 obj_cgroup_put(map->objcg);
404}
405
406static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map)
407{
408 if (map->objcg)
409 return get_mem_cgroup_from_objcg(map->objcg);
410
411 return root_mem_cgroup;
412}
413
414void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
415 int node)
416{
417 struct mem_cgroup *memcg, *old_memcg;
418 void *ptr;
419
420 memcg = bpf_map_get_memcg(map);
421 old_memcg = set_active_memcg(memcg);
422 ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
423 set_active_memcg(old_memcg);
424 mem_cgroup_put(memcg);
425
426 return ptr;
427}
428
429void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
430{
431 struct mem_cgroup *memcg, *old_memcg;
432 void *ptr;
433
434 memcg = bpf_map_get_memcg(map);
435 old_memcg = set_active_memcg(memcg);
436 ptr = kzalloc(size, flags | __GFP_ACCOUNT);
437 set_active_memcg(old_memcg);
438 mem_cgroup_put(memcg);
439
440 return ptr;
441}
442
443void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
444 gfp_t flags)
445{
446 struct mem_cgroup *memcg, *old_memcg;
447 void *ptr;
448
449 memcg = bpf_map_get_memcg(map);
450 old_memcg = set_active_memcg(memcg);
451 ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT);
452 set_active_memcg(old_memcg);
453 mem_cgroup_put(memcg);
454
455 return ptr;
456}
457
458void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
459 size_t align, gfp_t flags)
460{
461 struct mem_cgroup *memcg, *old_memcg;
462 void __percpu *ptr;
463
464 memcg = bpf_map_get_memcg(map);
465 old_memcg = set_active_memcg(memcg);
466 ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT);
467 set_active_memcg(old_memcg);
468 mem_cgroup_put(memcg);
469
470 return ptr;
471}
472
473#else
474static void bpf_map_save_memcg(struct bpf_map *map)
475{
476}
477
478static void bpf_map_release_memcg(struct bpf_map *map)
479{
480}
481#endif
482
483int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
484 unsigned long nr_pages, struct page **pages)
485{
486 unsigned long i, j;
487 struct page *pg;
488 int ret = 0;
489#ifdef CONFIG_MEMCG_KMEM
490 struct mem_cgroup *memcg, *old_memcg;
491
492 memcg = bpf_map_get_memcg(map);
493 old_memcg = set_active_memcg(memcg);
494#endif
495 for (i = 0; i < nr_pages; i++) {
496 pg = alloc_pages_node(nid, gfp | __GFP_ACCOUNT, 0);
497
498 if (pg) {
499 pages[i] = pg;
500 continue;
501 }
502 for (j = 0; j < i; j++)
503 __free_page(pages[j]);
504 ret = -ENOMEM;
505 break;
506 }
507
508#ifdef CONFIG_MEMCG_KMEM
509 set_active_memcg(old_memcg);
510 mem_cgroup_put(memcg);
511#endif
512 return ret;
513}
514
515
516static int btf_field_cmp(const void *a, const void *b)
517{
518 const struct btf_field *f1 = a, *f2 = b;
519
520 if (f1->offset < f2->offset)
521 return -1;
522 else if (f1->offset > f2->offset)
523 return 1;
524 return 0;
525}
526
527struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset,
528 u32 field_mask)
529{
530 struct btf_field *field;
531
532 if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask))
533 return NULL;
534 field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp);
535 if (!field || !(field->type & field_mask))
536 return NULL;
537 return field;
538}
539
540void btf_record_free(struct btf_record *rec)
541{
542 int i;
543
544 if (IS_ERR_OR_NULL(rec))
545 return;
546 for (i = 0; i < rec->cnt; i++) {
547 switch (rec->fields[i].type) {
548 case BPF_KPTR_UNREF:
549 case BPF_KPTR_REF:
550 case BPF_KPTR_PERCPU:
551 if (rec->fields[i].kptr.module)
552 module_put(rec->fields[i].kptr.module);
553 btf_put(rec->fields[i].kptr.btf);
554 break;
555 case BPF_LIST_HEAD:
556 case BPF_LIST_NODE:
557 case BPF_RB_ROOT:
558 case BPF_RB_NODE:
559 case BPF_SPIN_LOCK:
560 case BPF_TIMER:
561 case BPF_REFCOUNT:
562 /* Nothing to release */
563 break;
564 default:
565 WARN_ON_ONCE(1);
566 continue;
567 }
568 }
569 kfree(rec);
570}
571
572void bpf_map_free_record(struct bpf_map *map)
573{
574 btf_record_free(map->record);
575 map->record = NULL;
576}
577
578struct btf_record *btf_record_dup(const struct btf_record *rec)
579{
580 const struct btf_field *fields;
581 struct btf_record *new_rec;
582 int ret, size, i;
583
584 if (IS_ERR_OR_NULL(rec))
585 return NULL;
586 size = offsetof(struct btf_record, fields[rec->cnt]);
587 new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN);
588 if (!new_rec)
589 return ERR_PTR(-ENOMEM);
590 /* Do a deep copy of the btf_record */
591 fields = rec->fields;
592 new_rec->cnt = 0;
593 for (i = 0; i < rec->cnt; i++) {
594 switch (fields[i].type) {
595 case BPF_KPTR_UNREF:
596 case BPF_KPTR_REF:
597 case BPF_KPTR_PERCPU:
598 btf_get(fields[i].kptr.btf);
599 if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) {
600 ret = -ENXIO;
601 goto free;
602 }
603 break;
604 case BPF_LIST_HEAD:
605 case BPF_LIST_NODE:
606 case BPF_RB_ROOT:
607 case BPF_RB_NODE:
608 case BPF_SPIN_LOCK:
609 case BPF_TIMER:
610 case BPF_REFCOUNT:
611 /* Nothing to acquire */
612 break;
613 default:
614 ret = -EFAULT;
615 WARN_ON_ONCE(1);
616 goto free;
617 }
618 new_rec->cnt++;
619 }
620 return new_rec;
621free:
622 btf_record_free(new_rec);
623 return ERR_PTR(ret);
624}
625
626bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b)
627{
628 bool a_has_fields = !IS_ERR_OR_NULL(rec_a), b_has_fields = !IS_ERR_OR_NULL(rec_b);
629 int size;
630
631 if (!a_has_fields && !b_has_fields)
632 return true;
633 if (a_has_fields != b_has_fields)
634 return false;
635 if (rec_a->cnt != rec_b->cnt)
636 return false;
637 size = offsetof(struct btf_record, fields[rec_a->cnt]);
638 /* btf_parse_fields uses kzalloc to allocate a btf_record, so unused
639 * members are zeroed out. So memcmp is safe to do without worrying
640 * about padding/unused fields.
641 *
642 * While spin_lock, timer, and kptr have no relation to map BTF,
643 * list_head metadata is specific to map BTF, the btf and value_rec
644 * members in particular. btf is the map BTF, while value_rec points to
645 * btf_record in that map BTF.
646 *
647 * So while by default, we don't rely on the map BTF (which the records
648 * were parsed from) matching for both records, which is not backwards
649 * compatible, in case list_head is part of it, we implicitly rely on
650 * that by way of depending on memcmp succeeding for it.
651 */
652 return !memcmp(rec_a, rec_b, size);
653}
654
655void bpf_obj_free_timer(const struct btf_record *rec, void *obj)
656{
657 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_TIMER)))
658 return;
659 bpf_timer_cancel_and_free(obj + rec->timer_off);
660}
661
662void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
663{
664 const struct btf_field *fields;
665 int i;
666
667 if (IS_ERR_OR_NULL(rec))
668 return;
669 fields = rec->fields;
670 for (i = 0; i < rec->cnt; i++) {
671 struct btf_struct_meta *pointee_struct_meta;
672 const struct btf_field *field = &fields[i];
673 void *field_ptr = obj + field->offset;
674 void *xchgd_field;
675
676 switch (fields[i].type) {
677 case BPF_SPIN_LOCK:
678 break;
679 case BPF_TIMER:
680 bpf_timer_cancel_and_free(field_ptr);
681 break;
682 case BPF_KPTR_UNREF:
683 WRITE_ONCE(*(u64 *)field_ptr, 0);
684 break;
685 case BPF_KPTR_REF:
686 case BPF_KPTR_PERCPU:
687 xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
688 if (!xchgd_field)
689 break;
690
691 if (!btf_is_kernel(field->kptr.btf)) {
692 pointee_struct_meta = btf_find_struct_meta(field->kptr.btf,
693 field->kptr.btf_id);
694 migrate_disable();
695 __bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ?
696 pointee_struct_meta->record : NULL,
697 fields[i].type == BPF_KPTR_PERCPU);
698 migrate_enable();
699 } else {
700 field->kptr.dtor(xchgd_field);
701 }
702 break;
703 case BPF_LIST_HEAD:
704 if (WARN_ON_ONCE(rec->spin_lock_off < 0))
705 continue;
706 bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off);
707 break;
708 case BPF_RB_ROOT:
709 if (WARN_ON_ONCE(rec->spin_lock_off < 0))
710 continue;
711 bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off);
712 break;
713 case BPF_LIST_NODE:
714 case BPF_RB_NODE:
715 case BPF_REFCOUNT:
716 break;
717 default:
718 WARN_ON_ONCE(1);
719 continue;
720 }
721 }
722}
723
724/* called from workqueue */
725static void bpf_map_free_deferred(struct work_struct *work)
726{
727 struct bpf_map *map = container_of(work, struct bpf_map, work);
728 struct btf_record *rec = map->record;
729 struct btf *btf = map->btf;
730
731 security_bpf_map_free(map);
732 bpf_map_release_memcg(map);
733 /* implementation dependent freeing */
734 map->ops->map_free(map);
735 /* Delay freeing of btf_record for maps, as map_free
736 * callback usually needs access to them. It is better to do it here
737 * than require each callback to do the free itself manually.
738 *
739 * Note that the btf_record stashed in map->inner_map_meta->record was
740 * already freed using the map_free callback for map in map case which
741 * eventually calls bpf_map_free_meta, since inner_map_meta is only a
742 * template bpf_map struct used during verification.
743 */
744 btf_record_free(rec);
745 /* Delay freeing of btf for maps, as map_free callback may need
746 * struct_meta info which will be freed with btf_put().
747 */
748 btf_put(btf);
749}
750
751static void bpf_map_put_uref(struct bpf_map *map)
752{
753 if (atomic64_dec_and_test(&map->usercnt)) {
754 if (map->ops->map_release_uref)
755 map->ops->map_release_uref(map);
756 }
757}
758
759static void bpf_map_free_in_work(struct bpf_map *map)
760{
761 INIT_WORK(&map->work, bpf_map_free_deferred);
762 /* Avoid spawning kworkers, since they all might contend
763 * for the same mutex like slab_mutex.
764 */
765 queue_work(system_unbound_wq, &map->work);
766}
767
768static void bpf_map_free_rcu_gp(struct rcu_head *rcu)
769{
770 bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu));
771}
772
773static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu)
774{
775 if (rcu_trace_implies_rcu_gp())
776 bpf_map_free_rcu_gp(rcu);
777 else
778 call_rcu(rcu, bpf_map_free_rcu_gp);
779}
780
781/* decrement map refcnt and schedule it for freeing via workqueue
782 * (underlying map implementation ops->map_free() might sleep)
783 */
784void bpf_map_put(struct bpf_map *map)
785{
786 if (atomic64_dec_and_test(&map->refcnt)) {
787 /* bpf_map_free_id() must be called first */
788 bpf_map_free_id(map);
789
790 WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt));
791 if (READ_ONCE(map->free_after_mult_rcu_gp))
792 call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp);
793 else if (READ_ONCE(map->free_after_rcu_gp))
794 call_rcu(&map->rcu, bpf_map_free_rcu_gp);
795 else
796 bpf_map_free_in_work(map);
797 }
798}
799EXPORT_SYMBOL_GPL(bpf_map_put);
800
801void bpf_map_put_with_uref(struct bpf_map *map)
802{
803 bpf_map_put_uref(map);
804 bpf_map_put(map);
805}
806
807static int bpf_map_release(struct inode *inode, struct file *filp)
808{
809 struct bpf_map *map = filp->private_data;
810
811 if (map->ops->map_release)
812 map->ops->map_release(map, filp);
813
814 bpf_map_put_with_uref(map);
815 return 0;
816}
817
818static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
819{
820 fmode_t mode = f.file->f_mode;
821
822 /* Our file permissions may have been overridden by global
823 * map permissions facing syscall side.
824 */
825 if (READ_ONCE(map->frozen))
826 mode &= ~FMODE_CAN_WRITE;
827 return mode;
828}
829
830#ifdef CONFIG_PROC_FS
831/* Show the memory usage of a bpf map */
832static u64 bpf_map_memory_usage(const struct bpf_map *map)
833{
834 return map->ops->map_mem_usage(map);
835}
836
837static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
838{
839 struct bpf_map *map = filp->private_data;
840 u32 type = 0, jited = 0;
841
842 if (map_type_contains_progs(map)) {
843 spin_lock(&map->owner.lock);
844 type = map->owner.type;
845 jited = map->owner.jited;
846 spin_unlock(&map->owner.lock);
847 }
848
849 seq_printf(m,
850 "map_type:\t%u\n"
851 "key_size:\t%u\n"
852 "value_size:\t%u\n"
853 "max_entries:\t%u\n"
854 "map_flags:\t%#x\n"
855 "map_extra:\t%#llx\n"
856 "memlock:\t%llu\n"
857 "map_id:\t%u\n"
858 "frozen:\t%u\n",
859 map->map_type,
860 map->key_size,
861 map->value_size,
862 map->max_entries,
863 map->map_flags,
864 (unsigned long long)map->map_extra,
865 bpf_map_memory_usage(map),
866 map->id,
867 READ_ONCE(map->frozen));
868 if (type) {
869 seq_printf(m, "owner_prog_type:\t%u\n", type);
870 seq_printf(m, "owner_jited:\t%u\n", jited);
871 }
872}
873#endif
874
875static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
876 loff_t *ppos)
877{
878 /* We need this handler such that alloc_file() enables
879 * f_mode with FMODE_CAN_READ.
880 */
881 return -EINVAL;
882}
883
884static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
885 size_t siz, loff_t *ppos)
886{
887 /* We need this handler such that alloc_file() enables
888 * f_mode with FMODE_CAN_WRITE.
889 */
890 return -EINVAL;
891}
892
893/* called for any extra memory-mapped regions (except initial) */
894static void bpf_map_mmap_open(struct vm_area_struct *vma)
895{
896 struct bpf_map *map = vma->vm_file->private_data;
897
898 if (vma->vm_flags & VM_MAYWRITE)
899 bpf_map_write_active_inc(map);
900}
901
902/* called for all unmapped memory region (including initial) */
903static void bpf_map_mmap_close(struct vm_area_struct *vma)
904{
905 struct bpf_map *map = vma->vm_file->private_data;
906
907 if (vma->vm_flags & VM_MAYWRITE)
908 bpf_map_write_active_dec(map);
909}
910
911static const struct vm_operations_struct bpf_map_default_vmops = {
912 .open = bpf_map_mmap_open,
913 .close = bpf_map_mmap_close,
914};
915
916static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
917{
918 struct bpf_map *map = filp->private_data;
919 int err;
920
921 if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record))
922 return -ENOTSUPP;
923
924 if (!(vma->vm_flags & VM_SHARED))
925 return -EINVAL;
926
927 mutex_lock(&map->freeze_mutex);
928
929 if (vma->vm_flags & VM_WRITE) {
930 if (map->frozen) {
931 err = -EPERM;
932 goto out;
933 }
934 /* map is meant to be read-only, so do not allow mapping as
935 * writable, because it's possible to leak a writable page
936 * reference and allows user-space to still modify it after
937 * freezing, while verifier will assume contents do not change
938 */
939 if (map->map_flags & BPF_F_RDONLY_PROG) {
940 err = -EACCES;
941 goto out;
942 }
943 }
944
945 /* set default open/close callbacks */
946 vma->vm_ops = &bpf_map_default_vmops;
947 vma->vm_private_data = map;
948 vm_flags_clear(vma, VM_MAYEXEC);
949 if (!(vma->vm_flags & VM_WRITE))
950 /* disallow re-mapping with PROT_WRITE */
951 vm_flags_clear(vma, VM_MAYWRITE);
952
953 err = map->ops->map_mmap(map, vma);
954 if (err)
955 goto out;
956
957 if (vma->vm_flags & VM_MAYWRITE)
958 bpf_map_write_active_inc(map);
959out:
960 mutex_unlock(&map->freeze_mutex);
961 return err;
962}
963
964static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
965{
966 struct bpf_map *map = filp->private_data;
967
968 if (map->ops->map_poll)
969 return map->ops->map_poll(map, filp, pts);
970
971 return EPOLLERR;
972}
973
974static unsigned long bpf_get_unmapped_area(struct file *filp, unsigned long addr,
975 unsigned long len, unsigned long pgoff,
976 unsigned long flags)
977{
978 struct bpf_map *map = filp->private_data;
979
980 if (map->ops->map_get_unmapped_area)
981 return map->ops->map_get_unmapped_area(filp, addr, len, pgoff, flags);
982#ifdef CONFIG_MMU
983 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
984#else
985 return addr;
986#endif
987}
988
989const struct file_operations bpf_map_fops = {
990#ifdef CONFIG_PROC_FS
991 .show_fdinfo = bpf_map_show_fdinfo,
992#endif
993 .release = bpf_map_release,
994 .read = bpf_dummy_read,
995 .write = bpf_dummy_write,
996 .mmap = bpf_map_mmap,
997 .poll = bpf_map_poll,
998 .get_unmapped_area = bpf_get_unmapped_area,
999};
1000
1001int bpf_map_new_fd(struct bpf_map *map, int flags)
1002{
1003 int ret;
1004
1005 ret = security_bpf_map(map, OPEN_FMODE(flags));
1006 if (ret < 0)
1007 return ret;
1008
1009 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
1010 flags | O_CLOEXEC);
1011}
1012
1013int bpf_get_file_flag(int flags)
1014{
1015 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
1016 return -EINVAL;
1017 if (flags & BPF_F_RDONLY)
1018 return O_RDONLY;
1019 if (flags & BPF_F_WRONLY)
1020 return O_WRONLY;
1021 return O_RDWR;
1022}
1023
1024/* helper macro to check that unused fields 'union bpf_attr' are zero */
1025#define CHECK_ATTR(CMD) \
1026 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
1027 sizeof(attr->CMD##_LAST_FIELD), 0, \
1028 sizeof(*attr) - \
1029 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
1030 sizeof(attr->CMD##_LAST_FIELD)) != NULL
1031
1032/* dst and src must have at least "size" number of bytes.
1033 * Return strlen on success and < 0 on error.
1034 */
1035int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
1036{
1037 const char *end = src + size;
1038 const char *orig_src = src;
1039
1040 memset(dst, 0, size);
1041 /* Copy all isalnum(), '_' and '.' chars. */
1042 while (src < end && *src) {
1043 if (!isalnum(*src) &&
1044 *src != '_' && *src != '.')
1045 return -EINVAL;
1046 *dst++ = *src++;
1047 }
1048
1049 /* No '\0' found in "size" number of bytes */
1050 if (src == end)
1051 return -EINVAL;
1052
1053 return src - orig_src;
1054}
1055
1056int map_check_no_btf(const struct bpf_map *map,
1057 const struct btf *btf,
1058 const struct btf_type *key_type,
1059 const struct btf_type *value_type)
1060{
1061 return -ENOTSUPP;
1062}
1063
1064static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
1065 const struct btf *btf, u32 btf_key_id, u32 btf_value_id)
1066{
1067 const struct btf_type *key_type, *value_type;
1068 u32 key_size, value_size;
1069 int ret = 0;
1070
1071 /* Some maps allow key to be unspecified. */
1072 if (btf_key_id) {
1073 key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
1074 if (!key_type || key_size != map->key_size)
1075 return -EINVAL;
1076 } else {
1077 key_type = btf_type_by_id(btf, 0);
1078 if (!map->ops->map_check_btf)
1079 return -EINVAL;
1080 }
1081
1082 value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
1083 if (!value_type || value_size != map->value_size)
1084 return -EINVAL;
1085
1086 map->record = btf_parse_fields(btf, value_type,
1087 BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD |
1088 BPF_RB_ROOT | BPF_REFCOUNT,
1089 map->value_size);
1090 if (!IS_ERR_OR_NULL(map->record)) {
1091 int i;
1092
1093 if (!bpf_token_capable(token, CAP_BPF)) {
1094 ret = -EPERM;
1095 goto free_map_tab;
1096 }
1097 if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) {
1098 ret = -EACCES;
1099 goto free_map_tab;
1100 }
1101 for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) {
1102 switch (map->record->field_mask & (1 << i)) {
1103 case 0:
1104 continue;
1105 case BPF_SPIN_LOCK:
1106 if (map->map_type != BPF_MAP_TYPE_HASH &&
1107 map->map_type != BPF_MAP_TYPE_ARRAY &&
1108 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
1109 map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1110 map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1111 map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1112 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1113 ret = -EOPNOTSUPP;
1114 goto free_map_tab;
1115 }
1116 break;
1117 case BPF_TIMER:
1118 if (map->map_type != BPF_MAP_TYPE_HASH &&
1119 map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1120 map->map_type != BPF_MAP_TYPE_ARRAY) {
1121 ret = -EOPNOTSUPP;
1122 goto free_map_tab;
1123 }
1124 break;
1125 case BPF_KPTR_UNREF:
1126 case BPF_KPTR_REF:
1127 case BPF_KPTR_PERCPU:
1128 case BPF_REFCOUNT:
1129 if (map->map_type != BPF_MAP_TYPE_HASH &&
1130 map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
1131 map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1132 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH &&
1133 map->map_type != BPF_MAP_TYPE_ARRAY &&
1134 map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
1135 map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1136 map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1137 map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1138 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1139 ret = -EOPNOTSUPP;
1140 goto free_map_tab;
1141 }
1142 break;
1143 case BPF_LIST_HEAD:
1144 case BPF_RB_ROOT:
1145 if (map->map_type != BPF_MAP_TYPE_HASH &&
1146 map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1147 map->map_type != BPF_MAP_TYPE_ARRAY) {
1148 ret = -EOPNOTSUPP;
1149 goto free_map_tab;
1150 }
1151 break;
1152 default:
1153 /* Fail if map_type checks are missing for a field type */
1154 ret = -EOPNOTSUPP;
1155 goto free_map_tab;
1156 }
1157 }
1158 }
1159
1160 ret = btf_check_and_fixup_fields(btf, map->record);
1161 if (ret < 0)
1162 goto free_map_tab;
1163
1164 if (map->ops->map_check_btf) {
1165 ret = map->ops->map_check_btf(map, btf, key_type, value_type);
1166 if (ret < 0)
1167 goto free_map_tab;
1168 }
1169
1170 return ret;
1171free_map_tab:
1172 bpf_map_free_record(map);
1173 return ret;
1174}
1175
1176static bool bpf_net_capable(void)
1177{
1178 return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN);
1179}
1180
1181#define BPF_MAP_CREATE_LAST_FIELD map_token_fd
1182/* called via syscall */
1183static int map_create(union bpf_attr *attr)
1184{
1185 const struct bpf_map_ops *ops;
1186 struct bpf_token *token = NULL;
1187 int numa_node = bpf_map_attr_numa_node(attr);
1188 u32 map_type = attr->map_type;
1189 struct bpf_map *map;
1190 bool token_flag;
1191 int f_flags;
1192 int err;
1193
1194 err = CHECK_ATTR(BPF_MAP_CREATE);
1195 if (err)
1196 return -EINVAL;
1197
1198 /* check BPF_F_TOKEN_FD flag, remember if it's set, and then clear it
1199 * to avoid per-map type checks tripping on unknown flag
1200 */
1201 token_flag = attr->map_flags & BPF_F_TOKEN_FD;
1202 attr->map_flags &= ~BPF_F_TOKEN_FD;
1203
1204 if (attr->btf_vmlinux_value_type_id) {
1205 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
1206 attr->btf_key_type_id || attr->btf_value_type_id)
1207 return -EINVAL;
1208 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
1209 return -EINVAL;
1210 }
1211
1212 if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER &&
1213 attr->map_type != BPF_MAP_TYPE_ARENA &&
1214 attr->map_extra != 0)
1215 return -EINVAL;
1216
1217 f_flags = bpf_get_file_flag(attr->map_flags);
1218 if (f_flags < 0)
1219 return f_flags;
1220
1221 if (numa_node != NUMA_NO_NODE &&
1222 ((unsigned int)numa_node >= nr_node_ids ||
1223 !node_online(numa_node)))
1224 return -EINVAL;
1225
1226 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
1227 map_type = attr->map_type;
1228 if (map_type >= ARRAY_SIZE(bpf_map_types))
1229 return -EINVAL;
1230 map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types));
1231 ops = bpf_map_types[map_type];
1232 if (!ops)
1233 return -EINVAL;
1234
1235 if (ops->map_alloc_check) {
1236 err = ops->map_alloc_check(attr);
1237 if (err)
1238 return err;
1239 }
1240 if (attr->map_ifindex)
1241 ops = &bpf_map_offload_ops;
1242 if (!ops->map_mem_usage)
1243 return -EINVAL;
1244
1245 if (token_flag) {
1246 token = bpf_token_get_from_fd(attr->map_token_fd);
1247 if (IS_ERR(token))
1248 return PTR_ERR(token);
1249
1250 /* if current token doesn't grant map creation permissions,
1251 * then we can't use this token, so ignore it and rely on
1252 * system-wide capabilities checks
1253 */
1254 if (!bpf_token_allow_cmd(token, BPF_MAP_CREATE) ||
1255 !bpf_token_allow_map_type(token, attr->map_type)) {
1256 bpf_token_put(token);
1257 token = NULL;
1258 }
1259 }
1260
1261 err = -EPERM;
1262
1263 /* Intent here is for unprivileged_bpf_disabled to block BPF map
1264 * creation for unprivileged users; other actions depend
1265 * on fd availability and access to bpffs, so are dependent on
1266 * object creation success. Even with unprivileged BPF disabled,
1267 * capability checks are still carried out.
1268 */
1269 if (sysctl_unprivileged_bpf_disabled && !bpf_token_capable(token, CAP_BPF))
1270 goto put_token;
1271
1272 /* check privileged map type permissions */
1273 switch (map_type) {
1274 case BPF_MAP_TYPE_ARRAY:
1275 case BPF_MAP_TYPE_PERCPU_ARRAY:
1276 case BPF_MAP_TYPE_PROG_ARRAY:
1277 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1278 case BPF_MAP_TYPE_CGROUP_ARRAY:
1279 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
1280 case BPF_MAP_TYPE_HASH:
1281 case BPF_MAP_TYPE_PERCPU_HASH:
1282 case BPF_MAP_TYPE_HASH_OF_MAPS:
1283 case BPF_MAP_TYPE_RINGBUF:
1284 case BPF_MAP_TYPE_USER_RINGBUF:
1285 case BPF_MAP_TYPE_CGROUP_STORAGE:
1286 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
1287 /* unprivileged */
1288 break;
1289 case BPF_MAP_TYPE_SK_STORAGE:
1290 case BPF_MAP_TYPE_INODE_STORAGE:
1291 case BPF_MAP_TYPE_TASK_STORAGE:
1292 case BPF_MAP_TYPE_CGRP_STORAGE:
1293 case BPF_MAP_TYPE_BLOOM_FILTER:
1294 case BPF_MAP_TYPE_LPM_TRIE:
1295 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
1296 case BPF_MAP_TYPE_STACK_TRACE:
1297 case BPF_MAP_TYPE_QUEUE:
1298 case BPF_MAP_TYPE_STACK:
1299 case BPF_MAP_TYPE_LRU_HASH:
1300 case BPF_MAP_TYPE_LRU_PERCPU_HASH:
1301 case BPF_MAP_TYPE_STRUCT_OPS:
1302 case BPF_MAP_TYPE_CPUMAP:
1303 case BPF_MAP_TYPE_ARENA:
1304 if (!bpf_token_capable(token, CAP_BPF))
1305 goto put_token;
1306 break;
1307 case BPF_MAP_TYPE_SOCKMAP:
1308 case BPF_MAP_TYPE_SOCKHASH:
1309 case BPF_MAP_TYPE_DEVMAP:
1310 case BPF_MAP_TYPE_DEVMAP_HASH:
1311 case BPF_MAP_TYPE_XSKMAP:
1312 if (!bpf_token_capable(token, CAP_NET_ADMIN))
1313 goto put_token;
1314 break;
1315 default:
1316 WARN(1, "unsupported map type %d", map_type);
1317 goto put_token;
1318 }
1319
1320 map = ops->map_alloc(attr);
1321 if (IS_ERR(map)) {
1322 err = PTR_ERR(map);
1323 goto put_token;
1324 }
1325 map->ops = ops;
1326 map->map_type = map_type;
1327
1328 err = bpf_obj_name_cpy(map->name, attr->map_name,
1329 sizeof(attr->map_name));
1330 if (err < 0)
1331 goto free_map;
1332
1333 atomic64_set(&map->refcnt, 1);
1334 atomic64_set(&map->usercnt, 1);
1335 mutex_init(&map->freeze_mutex);
1336 spin_lock_init(&map->owner.lock);
1337
1338 if (attr->btf_key_type_id || attr->btf_value_type_id ||
1339 /* Even the map's value is a kernel's struct,
1340 * the bpf_prog.o must have BTF to begin with
1341 * to figure out the corresponding kernel's
1342 * counter part. Thus, attr->btf_fd has
1343 * to be valid also.
1344 */
1345 attr->btf_vmlinux_value_type_id) {
1346 struct btf *btf;
1347
1348 btf = btf_get_by_fd(attr->btf_fd);
1349 if (IS_ERR(btf)) {
1350 err = PTR_ERR(btf);
1351 goto free_map;
1352 }
1353 if (btf_is_kernel(btf)) {
1354 btf_put(btf);
1355 err = -EACCES;
1356 goto free_map;
1357 }
1358 map->btf = btf;
1359
1360 if (attr->btf_value_type_id) {
1361 err = map_check_btf(map, token, btf, attr->btf_key_type_id,
1362 attr->btf_value_type_id);
1363 if (err)
1364 goto free_map;
1365 }
1366
1367 map->btf_key_type_id = attr->btf_key_type_id;
1368 map->btf_value_type_id = attr->btf_value_type_id;
1369 map->btf_vmlinux_value_type_id =
1370 attr->btf_vmlinux_value_type_id;
1371 }
1372
1373 err = security_bpf_map_create(map, attr, token);
1374 if (err)
1375 goto free_map_sec;
1376
1377 err = bpf_map_alloc_id(map);
1378 if (err)
1379 goto free_map_sec;
1380
1381 bpf_map_save_memcg(map);
1382 bpf_token_put(token);
1383
1384 err = bpf_map_new_fd(map, f_flags);
1385 if (err < 0) {
1386 /* failed to allocate fd.
1387 * bpf_map_put_with_uref() is needed because the above
1388 * bpf_map_alloc_id() has published the map
1389 * to the userspace and the userspace may
1390 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
1391 */
1392 bpf_map_put_with_uref(map);
1393 return err;
1394 }
1395
1396 return err;
1397
1398free_map_sec:
1399 security_bpf_map_free(map);
1400free_map:
1401 btf_put(map->btf);
1402 map->ops->map_free(map);
1403put_token:
1404 bpf_token_put(token);
1405 return err;
1406}
1407
1408/* if error is returned, fd is released.
1409 * On success caller should complete fd access with matching fdput()
1410 */
1411struct bpf_map *__bpf_map_get(struct fd f)
1412{
1413 if (!f.file)
1414 return ERR_PTR(-EBADF);
1415 if (f.file->f_op != &bpf_map_fops) {
1416 fdput(f);
1417 return ERR_PTR(-EINVAL);
1418 }
1419
1420 return f.file->private_data;
1421}
1422
1423void bpf_map_inc(struct bpf_map *map)
1424{
1425 atomic64_inc(&map->refcnt);
1426}
1427EXPORT_SYMBOL_GPL(bpf_map_inc);
1428
1429void bpf_map_inc_with_uref(struct bpf_map *map)
1430{
1431 atomic64_inc(&map->refcnt);
1432 atomic64_inc(&map->usercnt);
1433}
1434EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
1435
1436struct bpf_map *bpf_map_get(u32 ufd)
1437{
1438 struct fd f = fdget(ufd);
1439 struct bpf_map *map;
1440
1441 map = __bpf_map_get(f);
1442 if (IS_ERR(map))
1443 return map;
1444
1445 bpf_map_inc(map);
1446 fdput(f);
1447
1448 return map;
1449}
1450EXPORT_SYMBOL(bpf_map_get);
1451
1452struct bpf_map *bpf_map_get_with_uref(u32 ufd)
1453{
1454 struct fd f = fdget(ufd);
1455 struct bpf_map *map;
1456
1457 map = __bpf_map_get(f);
1458 if (IS_ERR(map))
1459 return map;
1460
1461 bpf_map_inc_with_uref(map);
1462 fdput(f);
1463
1464 return map;
1465}
1466
1467/* map_idr_lock should have been held or the map should have been
1468 * protected by rcu read lock.
1469 */
1470struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
1471{
1472 int refold;
1473
1474 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
1475 if (!refold)
1476 return ERR_PTR(-ENOENT);
1477 if (uref)
1478 atomic64_inc(&map->usercnt);
1479
1480 return map;
1481}
1482
1483struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
1484{
1485 spin_lock_bh(&map_idr_lock);
1486 map = __bpf_map_inc_not_zero(map, false);
1487 spin_unlock_bh(&map_idr_lock);
1488
1489 return map;
1490}
1491EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
1492
1493int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
1494{
1495 return -ENOTSUPP;
1496}
1497
1498static void *__bpf_copy_key(void __user *ukey, u64 key_size)
1499{
1500 if (key_size)
1501 return vmemdup_user(ukey, key_size);
1502
1503 if (ukey)
1504 return ERR_PTR(-EINVAL);
1505
1506 return NULL;
1507}
1508
1509static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size)
1510{
1511 if (key_size)
1512 return kvmemdup_bpfptr(ukey, key_size);
1513
1514 if (!bpfptr_is_null(ukey))
1515 return ERR_PTR(-EINVAL);
1516
1517 return NULL;
1518}
1519
1520/* last field in 'union bpf_attr' used by this command */
1521#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
1522
1523static int map_lookup_elem(union bpf_attr *attr)
1524{
1525 void __user *ukey = u64_to_user_ptr(attr->key);
1526 void __user *uvalue = u64_to_user_ptr(attr->value);
1527 int ufd = attr->map_fd;
1528 struct bpf_map *map;
1529 void *key, *value;
1530 u32 value_size;
1531 struct fd f;
1532 int err;
1533
1534 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1535 return -EINVAL;
1536
1537 if (attr->flags & ~BPF_F_LOCK)
1538 return -EINVAL;
1539
1540 f = fdget(ufd);
1541 map = __bpf_map_get(f);
1542 if (IS_ERR(map))
1543 return PTR_ERR(map);
1544 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1545 err = -EPERM;
1546 goto err_put;
1547 }
1548
1549 if ((attr->flags & BPF_F_LOCK) &&
1550 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1551 err = -EINVAL;
1552 goto err_put;
1553 }
1554
1555 key = __bpf_copy_key(ukey, map->key_size);
1556 if (IS_ERR(key)) {
1557 err = PTR_ERR(key);
1558 goto err_put;
1559 }
1560
1561 value_size = bpf_map_value_size(map);
1562
1563 err = -ENOMEM;
1564 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1565 if (!value)
1566 goto free_key;
1567
1568 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
1569 if (copy_from_user(value, uvalue, value_size))
1570 err = -EFAULT;
1571 else
1572 err = bpf_map_copy_value(map, key, value, attr->flags);
1573 goto free_value;
1574 }
1575
1576 err = bpf_map_copy_value(map, key, value, attr->flags);
1577 if (err)
1578 goto free_value;
1579
1580 err = -EFAULT;
1581 if (copy_to_user(uvalue, value, value_size) != 0)
1582 goto free_value;
1583
1584 err = 0;
1585
1586free_value:
1587 kvfree(value);
1588free_key:
1589 kvfree(key);
1590err_put:
1591 fdput(f);
1592 return err;
1593}
1594
1595
1596#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1597
1598static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
1599{
1600 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1601 bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
1602 int ufd = attr->map_fd;
1603 struct bpf_map *map;
1604 void *key, *value;
1605 u32 value_size;
1606 struct fd f;
1607 int err;
1608
1609 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1610 return -EINVAL;
1611
1612 f = fdget(ufd);
1613 map = __bpf_map_get(f);
1614 if (IS_ERR(map))
1615 return PTR_ERR(map);
1616 bpf_map_write_active_inc(map);
1617 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1618 err = -EPERM;
1619 goto err_put;
1620 }
1621
1622 if ((attr->flags & BPF_F_LOCK) &&
1623 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1624 err = -EINVAL;
1625 goto err_put;
1626 }
1627
1628 key = ___bpf_copy_key(ukey, map->key_size);
1629 if (IS_ERR(key)) {
1630 err = PTR_ERR(key);
1631 goto err_put;
1632 }
1633
1634 value_size = bpf_map_value_size(map);
1635 value = kvmemdup_bpfptr(uvalue, value_size);
1636 if (IS_ERR(value)) {
1637 err = PTR_ERR(value);
1638 goto free_key;
1639 }
1640
1641 err = bpf_map_update_value(map, f.file, key, value, attr->flags);
1642 if (!err)
1643 maybe_wait_bpf_programs(map);
1644
1645 kvfree(value);
1646free_key:
1647 kvfree(key);
1648err_put:
1649 bpf_map_write_active_dec(map);
1650 fdput(f);
1651 return err;
1652}
1653
1654#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1655
1656static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr)
1657{
1658 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1659 int ufd = attr->map_fd;
1660 struct bpf_map *map;
1661 struct fd f;
1662 void *key;
1663 int err;
1664
1665 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1666 return -EINVAL;
1667
1668 f = fdget(ufd);
1669 map = __bpf_map_get(f);
1670 if (IS_ERR(map))
1671 return PTR_ERR(map);
1672 bpf_map_write_active_inc(map);
1673 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1674 err = -EPERM;
1675 goto err_put;
1676 }
1677
1678 key = ___bpf_copy_key(ukey, map->key_size);
1679 if (IS_ERR(key)) {
1680 err = PTR_ERR(key);
1681 goto err_put;
1682 }
1683
1684 if (bpf_map_is_offloaded(map)) {
1685 err = bpf_map_offload_delete_elem(map, key);
1686 goto out;
1687 } else if (IS_FD_PROG_ARRAY(map) ||
1688 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1689 /* These maps require sleepable context */
1690 err = map->ops->map_delete_elem(map, key);
1691 goto out;
1692 }
1693
1694 bpf_disable_instrumentation();
1695 rcu_read_lock();
1696 err = map->ops->map_delete_elem(map, key);
1697 rcu_read_unlock();
1698 bpf_enable_instrumentation();
1699 if (!err)
1700 maybe_wait_bpf_programs(map);
1701out:
1702 kvfree(key);
1703err_put:
1704 bpf_map_write_active_dec(map);
1705 fdput(f);
1706 return err;
1707}
1708
1709/* last field in 'union bpf_attr' used by this command */
1710#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1711
1712static int map_get_next_key(union bpf_attr *attr)
1713{
1714 void __user *ukey = u64_to_user_ptr(attr->key);
1715 void __user *unext_key = u64_to_user_ptr(attr->next_key);
1716 int ufd = attr->map_fd;
1717 struct bpf_map *map;
1718 void *key, *next_key;
1719 struct fd f;
1720 int err;
1721
1722 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1723 return -EINVAL;
1724
1725 f = fdget(ufd);
1726 map = __bpf_map_get(f);
1727 if (IS_ERR(map))
1728 return PTR_ERR(map);
1729 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1730 err = -EPERM;
1731 goto err_put;
1732 }
1733
1734 if (ukey) {
1735 key = __bpf_copy_key(ukey, map->key_size);
1736 if (IS_ERR(key)) {
1737 err = PTR_ERR(key);
1738 goto err_put;
1739 }
1740 } else {
1741 key = NULL;
1742 }
1743
1744 err = -ENOMEM;
1745 next_key = kvmalloc(map->key_size, GFP_USER);
1746 if (!next_key)
1747 goto free_key;
1748
1749 if (bpf_map_is_offloaded(map)) {
1750 err = bpf_map_offload_get_next_key(map, key, next_key);
1751 goto out;
1752 }
1753
1754 rcu_read_lock();
1755 err = map->ops->map_get_next_key(map, key, next_key);
1756 rcu_read_unlock();
1757out:
1758 if (err)
1759 goto free_next_key;
1760
1761 err = -EFAULT;
1762 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1763 goto free_next_key;
1764
1765 err = 0;
1766
1767free_next_key:
1768 kvfree(next_key);
1769free_key:
1770 kvfree(key);
1771err_put:
1772 fdput(f);
1773 return err;
1774}
1775
1776int generic_map_delete_batch(struct bpf_map *map,
1777 const union bpf_attr *attr,
1778 union bpf_attr __user *uattr)
1779{
1780 void __user *keys = u64_to_user_ptr(attr->batch.keys);
1781 u32 cp, max_count;
1782 int err = 0;
1783 void *key;
1784
1785 if (attr->batch.elem_flags & ~BPF_F_LOCK)
1786 return -EINVAL;
1787
1788 if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1789 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1790 return -EINVAL;
1791 }
1792
1793 max_count = attr->batch.count;
1794 if (!max_count)
1795 return 0;
1796
1797 if (put_user(0, &uattr->batch.count))
1798 return -EFAULT;
1799
1800 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1801 if (!key)
1802 return -ENOMEM;
1803
1804 for (cp = 0; cp < max_count; cp++) {
1805 err = -EFAULT;
1806 if (copy_from_user(key, keys + cp * map->key_size,
1807 map->key_size))
1808 break;
1809
1810 if (bpf_map_is_offloaded(map)) {
1811 err = bpf_map_offload_delete_elem(map, key);
1812 break;
1813 }
1814
1815 bpf_disable_instrumentation();
1816 rcu_read_lock();
1817 err = map->ops->map_delete_elem(map, key);
1818 rcu_read_unlock();
1819 bpf_enable_instrumentation();
1820 if (err)
1821 break;
1822 cond_resched();
1823 }
1824 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1825 err = -EFAULT;
1826
1827 kvfree(key);
1828
1829 return err;
1830}
1831
1832int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
1833 const union bpf_attr *attr,
1834 union bpf_attr __user *uattr)
1835{
1836 void __user *values = u64_to_user_ptr(attr->batch.values);
1837 void __user *keys = u64_to_user_ptr(attr->batch.keys);
1838 u32 value_size, cp, max_count;
1839 void *key, *value;
1840 int err = 0;
1841
1842 if (attr->batch.elem_flags & ~BPF_F_LOCK)
1843 return -EINVAL;
1844
1845 if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1846 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1847 return -EINVAL;
1848 }
1849
1850 value_size = bpf_map_value_size(map);
1851
1852 max_count = attr->batch.count;
1853 if (!max_count)
1854 return 0;
1855
1856 if (put_user(0, &uattr->batch.count))
1857 return -EFAULT;
1858
1859 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1860 if (!key)
1861 return -ENOMEM;
1862
1863 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1864 if (!value) {
1865 kvfree(key);
1866 return -ENOMEM;
1867 }
1868
1869 for (cp = 0; cp < max_count; cp++) {
1870 err = -EFAULT;
1871 if (copy_from_user(key, keys + cp * map->key_size,
1872 map->key_size) ||
1873 copy_from_user(value, values + cp * value_size, value_size))
1874 break;
1875
1876 err = bpf_map_update_value(map, map_file, key, value,
1877 attr->batch.elem_flags);
1878
1879 if (err)
1880 break;
1881 cond_resched();
1882 }
1883
1884 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1885 err = -EFAULT;
1886
1887 kvfree(value);
1888 kvfree(key);
1889
1890 return err;
1891}
1892
1893#define MAP_LOOKUP_RETRIES 3
1894
1895int generic_map_lookup_batch(struct bpf_map *map,
1896 const union bpf_attr *attr,
1897 union bpf_attr __user *uattr)
1898{
1899 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1900 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1901 void __user *values = u64_to_user_ptr(attr->batch.values);
1902 void __user *keys = u64_to_user_ptr(attr->batch.keys);
1903 void *buf, *buf_prevkey, *prev_key, *key, *value;
1904 int err, retry = MAP_LOOKUP_RETRIES;
1905 u32 value_size, cp, max_count;
1906
1907 if (attr->batch.elem_flags & ~BPF_F_LOCK)
1908 return -EINVAL;
1909
1910 if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1911 !btf_record_has_field(map->record, BPF_SPIN_LOCK))
1912 return -EINVAL;
1913
1914 value_size = bpf_map_value_size(map);
1915
1916 max_count = attr->batch.count;
1917 if (!max_count)
1918 return 0;
1919
1920 if (put_user(0, &uattr->batch.count))
1921 return -EFAULT;
1922
1923 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1924 if (!buf_prevkey)
1925 return -ENOMEM;
1926
1927 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
1928 if (!buf) {
1929 kvfree(buf_prevkey);
1930 return -ENOMEM;
1931 }
1932
1933 err = -EFAULT;
1934 prev_key = NULL;
1935 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
1936 goto free_buf;
1937 key = buf;
1938 value = key + map->key_size;
1939 if (ubatch)
1940 prev_key = buf_prevkey;
1941
1942 for (cp = 0; cp < max_count;) {
1943 rcu_read_lock();
1944 err = map->ops->map_get_next_key(map, prev_key, key);
1945 rcu_read_unlock();
1946 if (err)
1947 break;
1948 err = bpf_map_copy_value(map, key, value,
1949 attr->batch.elem_flags);
1950
1951 if (err == -ENOENT) {
1952 if (retry) {
1953 retry--;
1954 continue;
1955 }
1956 err = -EINTR;
1957 break;
1958 }
1959
1960 if (err)
1961 goto free_buf;
1962
1963 if (copy_to_user(keys + cp * map->key_size, key,
1964 map->key_size)) {
1965 err = -EFAULT;
1966 goto free_buf;
1967 }
1968 if (copy_to_user(values + cp * value_size, value, value_size)) {
1969 err = -EFAULT;
1970 goto free_buf;
1971 }
1972
1973 if (!prev_key)
1974 prev_key = buf_prevkey;
1975
1976 swap(prev_key, key);
1977 retry = MAP_LOOKUP_RETRIES;
1978 cp++;
1979 cond_resched();
1980 }
1981
1982 if (err == -EFAULT)
1983 goto free_buf;
1984
1985 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
1986 (cp && copy_to_user(uobatch, prev_key, map->key_size))))
1987 err = -EFAULT;
1988
1989free_buf:
1990 kvfree(buf_prevkey);
1991 kvfree(buf);
1992 return err;
1993}
1994
1995#define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags
1996
1997static int map_lookup_and_delete_elem(union bpf_attr *attr)
1998{
1999 void __user *ukey = u64_to_user_ptr(attr->key);
2000 void __user *uvalue = u64_to_user_ptr(attr->value);
2001 int ufd = attr->map_fd;
2002 struct bpf_map *map;
2003 void *key, *value;
2004 u32 value_size;
2005 struct fd f;
2006 int err;
2007
2008 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
2009 return -EINVAL;
2010
2011 if (attr->flags & ~BPF_F_LOCK)
2012 return -EINVAL;
2013
2014 f = fdget(ufd);
2015 map = __bpf_map_get(f);
2016 if (IS_ERR(map))
2017 return PTR_ERR(map);
2018 bpf_map_write_active_inc(map);
2019 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
2020 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
2021 err = -EPERM;
2022 goto err_put;
2023 }
2024
2025 if (attr->flags &&
2026 (map->map_type == BPF_MAP_TYPE_QUEUE ||
2027 map->map_type == BPF_MAP_TYPE_STACK)) {
2028 err = -EINVAL;
2029 goto err_put;
2030 }
2031
2032 if ((attr->flags & BPF_F_LOCK) &&
2033 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
2034 err = -EINVAL;
2035 goto err_put;
2036 }
2037
2038 key = __bpf_copy_key(ukey, map->key_size);
2039 if (IS_ERR(key)) {
2040 err = PTR_ERR(key);
2041 goto err_put;
2042 }
2043
2044 value_size = bpf_map_value_size(map);
2045
2046 err = -ENOMEM;
2047 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
2048 if (!value)
2049 goto free_key;
2050
2051 err = -ENOTSUPP;
2052 if (map->map_type == BPF_MAP_TYPE_QUEUE ||
2053 map->map_type == BPF_MAP_TYPE_STACK) {
2054 err = map->ops->map_pop_elem(map, value);
2055 } else if (map->map_type == BPF_MAP_TYPE_HASH ||
2056 map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
2057 map->map_type == BPF_MAP_TYPE_LRU_HASH ||
2058 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
2059 if (!bpf_map_is_offloaded(map)) {
2060 bpf_disable_instrumentation();
2061 rcu_read_lock();
2062 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags);
2063 rcu_read_unlock();
2064 bpf_enable_instrumentation();
2065 }
2066 }
2067
2068 if (err)
2069 goto free_value;
2070
2071 if (copy_to_user(uvalue, value, value_size) != 0) {
2072 err = -EFAULT;
2073 goto free_value;
2074 }
2075
2076 err = 0;
2077
2078free_value:
2079 kvfree(value);
2080free_key:
2081 kvfree(key);
2082err_put:
2083 bpf_map_write_active_dec(map);
2084 fdput(f);
2085 return err;
2086}
2087
2088#define BPF_MAP_FREEZE_LAST_FIELD map_fd
2089
2090static int map_freeze(const union bpf_attr *attr)
2091{
2092 int err = 0, ufd = attr->map_fd;
2093 struct bpf_map *map;
2094 struct fd f;
2095
2096 if (CHECK_ATTR(BPF_MAP_FREEZE))
2097 return -EINVAL;
2098
2099 f = fdget(ufd);
2100 map = __bpf_map_get(f);
2101 if (IS_ERR(map))
2102 return PTR_ERR(map);
2103
2104 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) {
2105 fdput(f);
2106 return -ENOTSUPP;
2107 }
2108
2109 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
2110 fdput(f);
2111 return -EPERM;
2112 }
2113
2114 mutex_lock(&map->freeze_mutex);
2115 if (bpf_map_write_active(map)) {
2116 err = -EBUSY;
2117 goto err_put;
2118 }
2119 if (READ_ONCE(map->frozen)) {
2120 err = -EBUSY;
2121 goto err_put;
2122 }
2123
2124 WRITE_ONCE(map->frozen, true);
2125err_put:
2126 mutex_unlock(&map->freeze_mutex);
2127 fdput(f);
2128 return err;
2129}
2130
2131static const struct bpf_prog_ops * const bpf_prog_types[] = {
2132#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
2133 [_id] = & _name ## _prog_ops,
2134#define BPF_MAP_TYPE(_id, _ops)
2135#define BPF_LINK_TYPE(_id, _name)
2136#include <linux/bpf_types.h>
2137#undef BPF_PROG_TYPE
2138#undef BPF_MAP_TYPE
2139#undef BPF_LINK_TYPE
2140};
2141
2142static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
2143{
2144 const struct bpf_prog_ops *ops;
2145
2146 if (type >= ARRAY_SIZE(bpf_prog_types))
2147 return -EINVAL;
2148 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
2149 ops = bpf_prog_types[type];
2150 if (!ops)
2151 return -EINVAL;
2152
2153 if (!bpf_prog_is_offloaded(prog->aux))
2154 prog->aux->ops = ops;
2155 else
2156 prog->aux->ops = &bpf_offload_prog_ops;
2157 prog->type = type;
2158 return 0;
2159}
2160
2161enum bpf_audit {
2162 BPF_AUDIT_LOAD,
2163 BPF_AUDIT_UNLOAD,
2164 BPF_AUDIT_MAX,
2165};
2166
2167static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
2168 [BPF_AUDIT_LOAD] = "LOAD",
2169 [BPF_AUDIT_UNLOAD] = "UNLOAD",
2170};
2171
2172static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
2173{
2174 struct audit_context *ctx = NULL;
2175 struct audit_buffer *ab;
2176
2177 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
2178 return;
2179 if (audit_enabled == AUDIT_OFF)
2180 return;
2181 if (!in_irq() && !irqs_disabled())
2182 ctx = audit_context();
2183 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
2184 if (unlikely(!ab))
2185 return;
2186 audit_log_format(ab, "prog-id=%u op=%s",
2187 prog->aux->id, bpf_audit_str[op]);
2188 audit_log_end(ab);
2189}
2190
2191static int bpf_prog_alloc_id(struct bpf_prog *prog)
2192{
2193 int id;
2194
2195 idr_preload(GFP_KERNEL);
2196 spin_lock_bh(&prog_idr_lock);
2197 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
2198 if (id > 0)
2199 prog->aux->id = id;
2200 spin_unlock_bh(&prog_idr_lock);
2201 idr_preload_end();
2202
2203 /* id is in [1, INT_MAX) */
2204 if (WARN_ON_ONCE(!id))
2205 return -ENOSPC;
2206
2207 return id > 0 ? 0 : id;
2208}
2209
2210void bpf_prog_free_id(struct bpf_prog *prog)
2211{
2212 unsigned long flags;
2213
2214 /* cBPF to eBPF migrations are currently not in the idr store.
2215 * Offloaded programs are removed from the store when their device
2216 * disappears - even if someone grabs an fd to them they are unusable,
2217 * simply waiting for refcnt to drop to be freed.
2218 */
2219 if (!prog->aux->id)
2220 return;
2221
2222 spin_lock_irqsave(&prog_idr_lock, flags);
2223 idr_remove(&prog_idr, prog->aux->id);
2224 prog->aux->id = 0;
2225 spin_unlock_irqrestore(&prog_idr_lock, flags);
2226}
2227
2228static void __bpf_prog_put_rcu(struct rcu_head *rcu)
2229{
2230 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
2231
2232 kvfree(aux->func_info);
2233 kfree(aux->func_info_aux);
2234 free_uid(aux->user);
2235 security_bpf_prog_free(aux->prog);
2236 bpf_prog_free(aux->prog);
2237}
2238
2239static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
2240{
2241 bpf_prog_kallsyms_del_all(prog);
2242 btf_put(prog->aux->btf);
2243 module_put(prog->aux->mod);
2244 kvfree(prog->aux->jited_linfo);
2245 kvfree(prog->aux->linfo);
2246 kfree(prog->aux->kfunc_tab);
2247 if (prog->aux->attach_btf)
2248 btf_put(prog->aux->attach_btf);
2249
2250 if (deferred) {
2251 if (prog->sleepable)
2252 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
2253 else
2254 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
2255 } else {
2256 __bpf_prog_put_rcu(&prog->aux->rcu);
2257 }
2258}
2259
2260static void bpf_prog_put_deferred(struct work_struct *work)
2261{
2262 struct bpf_prog_aux *aux;
2263 struct bpf_prog *prog;
2264
2265 aux = container_of(work, struct bpf_prog_aux, work);
2266 prog = aux->prog;
2267 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
2268 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
2269 bpf_prog_free_id(prog);
2270 __bpf_prog_put_noref(prog, true);
2271}
2272
2273static void __bpf_prog_put(struct bpf_prog *prog)
2274{
2275 struct bpf_prog_aux *aux = prog->aux;
2276
2277 if (atomic64_dec_and_test(&aux->refcnt)) {
2278 if (in_irq() || irqs_disabled()) {
2279 INIT_WORK(&aux->work, bpf_prog_put_deferred);
2280 schedule_work(&aux->work);
2281 } else {
2282 bpf_prog_put_deferred(&aux->work);
2283 }
2284 }
2285}
2286
2287void bpf_prog_put(struct bpf_prog *prog)
2288{
2289 __bpf_prog_put(prog);
2290}
2291EXPORT_SYMBOL_GPL(bpf_prog_put);
2292
2293static int bpf_prog_release(struct inode *inode, struct file *filp)
2294{
2295 struct bpf_prog *prog = filp->private_data;
2296
2297 bpf_prog_put(prog);
2298 return 0;
2299}
2300
2301struct bpf_prog_kstats {
2302 u64 nsecs;
2303 u64 cnt;
2304 u64 misses;
2305};
2306
2307void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog)
2308{
2309 struct bpf_prog_stats *stats;
2310 unsigned int flags;
2311
2312 stats = this_cpu_ptr(prog->stats);
2313 flags = u64_stats_update_begin_irqsave(&stats->syncp);
2314 u64_stats_inc(&stats->misses);
2315 u64_stats_update_end_irqrestore(&stats->syncp, flags);
2316}
2317
2318static void bpf_prog_get_stats(const struct bpf_prog *prog,
2319 struct bpf_prog_kstats *stats)
2320{
2321 u64 nsecs = 0, cnt = 0, misses = 0;
2322 int cpu;
2323
2324 for_each_possible_cpu(cpu) {
2325 const struct bpf_prog_stats *st;
2326 unsigned int start;
2327 u64 tnsecs, tcnt, tmisses;
2328
2329 st = per_cpu_ptr(prog->stats, cpu);
2330 do {
2331 start = u64_stats_fetch_begin(&st->syncp);
2332 tnsecs = u64_stats_read(&st->nsecs);
2333 tcnt = u64_stats_read(&st->cnt);
2334 tmisses = u64_stats_read(&st->misses);
2335 } while (u64_stats_fetch_retry(&st->syncp, start));
2336 nsecs += tnsecs;
2337 cnt += tcnt;
2338 misses += tmisses;
2339 }
2340 stats->nsecs = nsecs;
2341 stats->cnt = cnt;
2342 stats->misses = misses;
2343}
2344
2345#ifdef CONFIG_PROC_FS
2346static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
2347{
2348 const struct bpf_prog *prog = filp->private_data;
2349 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2350 struct bpf_prog_kstats stats;
2351
2352 bpf_prog_get_stats(prog, &stats);
2353 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2354 seq_printf(m,
2355 "prog_type:\t%u\n"
2356 "prog_jited:\t%u\n"
2357 "prog_tag:\t%s\n"
2358 "memlock:\t%llu\n"
2359 "prog_id:\t%u\n"
2360 "run_time_ns:\t%llu\n"
2361 "run_cnt:\t%llu\n"
2362 "recursion_misses:\t%llu\n"
2363 "verified_insns:\t%u\n",
2364 prog->type,
2365 prog->jited,
2366 prog_tag,
2367 prog->pages * 1ULL << PAGE_SHIFT,
2368 prog->aux->id,
2369 stats.nsecs,
2370 stats.cnt,
2371 stats.misses,
2372 prog->aux->verified_insns);
2373}
2374#endif
2375
2376const struct file_operations bpf_prog_fops = {
2377#ifdef CONFIG_PROC_FS
2378 .show_fdinfo = bpf_prog_show_fdinfo,
2379#endif
2380 .release = bpf_prog_release,
2381 .read = bpf_dummy_read,
2382 .write = bpf_dummy_write,
2383};
2384
2385int bpf_prog_new_fd(struct bpf_prog *prog)
2386{
2387 int ret;
2388
2389 ret = security_bpf_prog(prog);
2390 if (ret < 0)
2391 return ret;
2392
2393 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
2394 O_RDWR | O_CLOEXEC);
2395}
2396
2397static struct bpf_prog *____bpf_prog_get(struct fd f)
2398{
2399 if (!f.file)
2400 return ERR_PTR(-EBADF);
2401 if (f.file->f_op != &bpf_prog_fops) {
2402 fdput(f);
2403 return ERR_PTR(-EINVAL);
2404 }
2405
2406 return f.file->private_data;
2407}
2408
2409void bpf_prog_add(struct bpf_prog *prog, int i)
2410{
2411 atomic64_add(i, &prog->aux->refcnt);
2412}
2413EXPORT_SYMBOL_GPL(bpf_prog_add);
2414
2415void bpf_prog_sub(struct bpf_prog *prog, int i)
2416{
2417 /* Only to be used for undoing previous bpf_prog_add() in some
2418 * error path. We still know that another entity in our call
2419 * path holds a reference to the program, thus atomic_sub() can
2420 * be safely used in such cases!
2421 */
2422 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
2423}
2424EXPORT_SYMBOL_GPL(bpf_prog_sub);
2425
2426void bpf_prog_inc(struct bpf_prog *prog)
2427{
2428 atomic64_inc(&prog->aux->refcnt);
2429}
2430EXPORT_SYMBOL_GPL(bpf_prog_inc);
2431
2432/* prog_idr_lock should have been held */
2433struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
2434{
2435 int refold;
2436
2437 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
2438
2439 if (!refold)
2440 return ERR_PTR(-ENOENT);
2441
2442 return prog;
2443}
2444EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
2445
2446bool bpf_prog_get_ok(struct bpf_prog *prog,
2447 enum bpf_prog_type *attach_type, bool attach_drv)
2448{
2449 /* not an attachment, just a refcount inc, always allow */
2450 if (!attach_type)
2451 return true;
2452
2453 if (prog->type != *attach_type)
2454 return false;
2455 if (bpf_prog_is_offloaded(prog->aux) && !attach_drv)
2456 return false;
2457
2458 return true;
2459}
2460
2461static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
2462 bool attach_drv)
2463{
2464 struct fd f = fdget(ufd);
2465 struct bpf_prog *prog;
2466
2467 prog = ____bpf_prog_get(f);
2468 if (IS_ERR(prog))
2469 return prog;
2470 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
2471 prog = ERR_PTR(-EINVAL);
2472 goto out;
2473 }
2474
2475 bpf_prog_inc(prog);
2476out:
2477 fdput(f);
2478 return prog;
2479}
2480
2481struct bpf_prog *bpf_prog_get(u32 ufd)
2482{
2483 return __bpf_prog_get(ufd, NULL, false);
2484}
2485
2486struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
2487 bool attach_drv)
2488{
2489 return __bpf_prog_get(ufd, &type, attach_drv);
2490}
2491EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
2492
2493/* Initially all BPF programs could be loaded w/o specifying
2494 * expected_attach_type. Later for some of them specifying expected_attach_type
2495 * at load time became required so that program could be validated properly.
2496 * Programs of types that are allowed to be loaded both w/ and w/o (for
2497 * backward compatibility) expected_attach_type, should have the default attach
2498 * type assigned to expected_attach_type for the latter case, so that it can be
2499 * validated later at attach time.
2500 *
2501 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
2502 * prog type requires it but has some attach types that have to be backward
2503 * compatible.
2504 */
2505static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
2506{
2507 switch (attr->prog_type) {
2508 case BPF_PROG_TYPE_CGROUP_SOCK:
2509 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
2510 * exist so checking for non-zero is the way to go here.
2511 */
2512 if (!attr->expected_attach_type)
2513 attr->expected_attach_type =
2514 BPF_CGROUP_INET_SOCK_CREATE;
2515 break;
2516 case BPF_PROG_TYPE_SK_REUSEPORT:
2517 if (!attr->expected_attach_type)
2518 attr->expected_attach_type =
2519 BPF_SK_REUSEPORT_SELECT;
2520 break;
2521 }
2522}
2523
2524static int
2525bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
2526 enum bpf_attach_type expected_attach_type,
2527 struct btf *attach_btf, u32 btf_id,
2528 struct bpf_prog *dst_prog)
2529{
2530 if (btf_id) {
2531 if (btf_id > BTF_MAX_TYPE)
2532 return -EINVAL;
2533
2534 if (!attach_btf && !dst_prog)
2535 return -EINVAL;
2536
2537 switch (prog_type) {
2538 case BPF_PROG_TYPE_TRACING:
2539 case BPF_PROG_TYPE_LSM:
2540 case BPF_PROG_TYPE_STRUCT_OPS:
2541 case BPF_PROG_TYPE_EXT:
2542 break;
2543 default:
2544 return -EINVAL;
2545 }
2546 }
2547
2548 if (attach_btf && (!btf_id || dst_prog))
2549 return -EINVAL;
2550
2551 if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING &&
2552 prog_type != BPF_PROG_TYPE_EXT)
2553 return -EINVAL;
2554
2555 switch (prog_type) {
2556 case BPF_PROG_TYPE_CGROUP_SOCK:
2557 switch (expected_attach_type) {
2558 case BPF_CGROUP_INET_SOCK_CREATE:
2559 case BPF_CGROUP_INET_SOCK_RELEASE:
2560 case BPF_CGROUP_INET4_POST_BIND:
2561 case BPF_CGROUP_INET6_POST_BIND:
2562 return 0;
2563 default:
2564 return -EINVAL;
2565 }
2566 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2567 switch (expected_attach_type) {
2568 case BPF_CGROUP_INET4_BIND:
2569 case BPF_CGROUP_INET6_BIND:
2570 case BPF_CGROUP_INET4_CONNECT:
2571 case BPF_CGROUP_INET6_CONNECT:
2572 case BPF_CGROUP_UNIX_CONNECT:
2573 case BPF_CGROUP_INET4_GETPEERNAME:
2574 case BPF_CGROUP_INET6_GETPEERNAME:
2575 case BPF_CGROUP_UNIX_GETPEERNAME:
2576 case BPF_CGROUP_INET4_GETSOCKNAME:
2577 case BPF_CGROUP_INET6_GETSOCKNAME:
2578 case BPF_CGROUP_UNIX_GETSOCKNAME:
2579 case BPF_CGROUP_UDP4_SENDMSG:
2580 case BPF_CGROUP_UDP6_SENDMSG:
2581 case BPF_CGROUP_UNIX_SENDMSG:
2582 case BPF_CGROUP_UDP4_RECVMSG:
2583 case BPF_CGROUP_UDP6_RECVMSG:
2584 case BPF_CGROUP_UNIX_RECVMSG:
2585 return 0;
2586 default:
2587 return -EINVAL;
2588 }
2589 case BPF_PROG_TYPE_CGROUP_SKB:
2590 switch (expected_attach_type) {
2591 case BPF_CGROUP_INET_INGRESS:
2592 case BPF_CGROUP_INET_EGRESS:
2593 return 0;
2594 default:
2595 return -EINVAL;
2596 }
2597 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2598 switch (expected_attach_type) {
2599 case BPF_CGROUP_SETSOCKOPT:
2600 case BPF_CGROUP_GETSOCKOPT:
2601 return 0;
2602 default:
2603 return -EINVAL;
2604 }
2605 case BPF_PROG_TYPE_SK_LOOKUP:
2606 if (expected_attach_type == BPF_SK_LOOKUP)
2607 return 0;
2608 return -EINVAL;
2609 case BPF_PROG_TYPE_SK_REUSEPORT:
2610 switch (expected_attach_type) {
2611 case BPF_SK_REUSEPORT_SELECT:
2612 case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE:
2613 return 0;
2614 default:
2615 return -EINVAL;
2616 }
2617 case BPF_PROG_TYPE_NETFILTER:
2618 if (expected_attach_type == BPF_NETFILTER)
2619 return 0;
2620 return -EINVAL;
2621 case BPF_PROG_TYPE_SYSCALL:
2622 case BPF_PROG_TYPE_EXT:
2623 if (expected_attach_type)
2624 return -EINVAL;
2625 fallthrough;
2626 default:
2627 return 0;
2628 }
2629}
2630
2631static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2632{
2633 switch (prog_type) {
2634 case BPF_PROG_TYPE_SCHED_CLS:
2635 case BPF_PROG_TYPE_SCHED_ACT:
2636 case BPF_PROG_TYPE_XDP:
2637 case BPF_PROG_TYPE_LWT_IN:
2638 case BPF_PROG_TYPE_LWT_OUT:
2639 case BPF_PROG_TYPE_LWT_XMIT:
2640 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2641 case BPF_PROG_TYPE_SK_SKB:
2642 case BPF_PROG_TYPE_SK_MSG:
2643 case BPF_PROG_TYPE_FLOW_DISSECTOR:
2644 case BPF_PROG_TYPE_CGROUP_DEVICE:
2645 case BPF_PROG_TYPE_CGROUP_SOCK:
2646 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2647 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2648 case BPF_PROG_TYPE_CGROUP_SYSCTL:
2649 case BPF_PROG_TYPE_SOCK_OPS:
2650 case BPF_PROG_TYPE_EXT: /* extends any prog */
2651 case BPF_PROG_TYPE_NETFILTER:
2652 return true;
2653 case BPF_PROG_TYPE_CGROUP_SKB:
2654 /* always unpriv */
2655 case BPF_PROG_TYPE_SK_REUSEPORT:
2656 /* equivalent to SOCKET_FILTER. need CAP_BPF only */
2657 default:
2658 return false;
2659 }
2660}
2661
2662static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2663{
2664 switch (prog_type) {
2665 case BPF_PROG_TYPE_KPROBE:
2666 case BPF_PROG_TYPE_TRACEPOINT:
2667 case BPF_PROG_TYPE_PERF_EVENT:
2668 case BPF_PROG_TYPE_RAW_TRACEPOINT:
2669 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2670 case BPF_PROG_TYPE_TRACING:
2671 case BPF_PROG_TYPE_LSM:
2672 case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2673 case BPF_PROG_TYPE_EXT: /* extends any prog */
2674 return true;
2675 default:
2676 return false;
2677 }
2678}
2679
2680/* last field in 'union bpf_attr' used by this command */
2681#define BPF_PROG_LOAD_LAST_FIELD prog_token_fd
2682
2683static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
2684{
2685 enum bpf_prog_type type = attr->prog_type;
2686 struct bpf_prog *prog, *dst_prog = NULL;
2687 struct btf *attach_btf = NULL;
2688 struct bpf_token *token = NULL;
2689 bool bpf_cap;
2690 int err;
2691 char license[128];
2692
2693 if (CHECK_ATTR(BPF_PROG_LOAD))
2694 return -EINVAL;
2695
2696 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2697 BPF_F_ANY_ALIGNMENT |
2698 BPF_F_TEST_STATE_FREQ |
2699 BPF_F_SLEEPABLE |
2700 BPF_F_TEST_RND_HI32 |
2701 BPF_F_XDP_HAS_FRAGS |
2702 BPF_F_XDP_DEV_BOUND_ONLY |
2703 BPF_F_TEST_REG_INVARIANTS |
2704 BPF_F_TOKEN_FD))
2705 return -EINVAL;
2706
2707 bpf_prog_load_fixup_attach_type(attr);
2708
2709 if (attr->prog_flags & BPF_F_TOKEN_FD) {
2710 token = bpf_token_get_from_fd(attr->prog_token_fd);
2711 if (IS_ERR(token))
2712 return PTR_ERR(token);
2713 /* if current token doesn't grant prog loading permissions,
2714 * then we can't use this token, so ignore it and rely on
2715 * system-wide capabilities checks
2716 */
2717 if (!bpf_token_allow_cmd(token, BPF_PROG_LOAD) ||
2718 !bpf_token_allow_prog_type(token, attr->prog_type,
2719 attr->expected_attach_type)) {
2720 bpf_token_put(token);
2721 token = NULL;
2722 }
2723 }
2724
2725 bpf_cap = bpf_token_capable(token, CAP_BPF);
2726 err = -EPERM;
2727
2728 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2729 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2730 !bpf_cap)
2731 goto put_token;
2732
2733 /* Intent here is for unprivileged_bpf_disabled to block BPF program
2734 * creation for unprivileged users; other actions depend
2735 * on fd availability and access to bpffs, so are dependent on
2736 * object creation success. Even with unprivileged BPF disabled,
2737 * capability checks are still carried out for these
2738 * and other operations.
2739 */
2740 if (sysctl_unprivileged_bpf_disabled && !bpf_cap)
2741 goto put_token;
2742
2743 if (attr->insn_cnt == 0 ||
2744 attr->insn_cnt > (bpf_cap ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) {
2745 err = -E2BIG;
2746 goto put_token;
2747 }
2748 if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2749 type != BPF_PROG_TYPE_CGROUP_SKB &&
2750 !bpf_cap)
2751 goto put_token;
2752
2753 if (is_net_admin_prog_type(type) && !bpf_token_capable(token, CAP_NET_ADMIN))
2754 goto put_token;
2755 if (is_perfmon_prog_type(type) && !bpf_token_capable(token, CAP_PERFMON))
2756 goto put_token;
2757
2758 /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
2759 * or btf, we need to check which one it is
2760 */
2761 if (attr->attach_prog_fd) {
2762 dst_prog = bpf_prog_get(attr->attach_prog_fd);
2763 if (IS_ERR(dst_prog)) {
2764 dst_prog = NULL;
2765 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
2766 if (IS_ERR(attach_btf)) {
2767 err = -EINVAL;
2768 goto put_token;
2769 }
2770 if (!btf_is_kernel(attach_btf)) {
2771 /* attaching through specifying bpf_prog's BTF
2772 * objects directly might be supported eventually
2773 */
2774 btf_put(attach_btf);
2775 err = -ENOTSUPP;
2776 goto put_token;
2777 }
2778 }
2779 } else if (attr->attach_btf_id) {
2780 /* fall back to vmlinux BTF, if BTF type ID is specified */
2781 attach_btf = bpf_get_btf_vmlinux();
2782 if (IS_ERR(attach_btf)) {
2783 err = PTR_ERR(attach_btf);
2784 goto put_token;
2785 }
2786 if (!attach_btf) {
2787 err = -EINVAL;
2788 goto put_token;
2789 }
2790 btf_get(attach_btf);
2791 }
2792
2793 if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2794 attach_btf, attr->attach_btf_id,
2795 dst_prog)) {
2796 if (dst_prog)
2797 bpf_prog_put(dst_prog);
2798 if (attach_btf)
2799 btf_put(attach_btf);
2800 err = -EINVAL;
2801 goto put_token;
2802 }
2803
2804 /* plain bpf_prog allocation */
2805 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2806 if (!prog) {
2807 if (dst_prog)
2808 bpf_prog_put(dst_prog);
2809 if (attach_btf)
2810 btf_put(attach_btf);
2811 err = -EINVAL;
2812 goto put_token;
2813 }
2814
2815 prog->expected_attach_type = attr->expected_attach_type;
2816 prog->sleepable = !!(attr->prog_flags & BPF_F_SLEEPABLE);
2817 prog->aux->attach_btf = attach_btf;
2818 prog->aux->attach_btf_id = attr->attach_btf_id;
2819 prog->aux->dst_prog = dst_prog;
2820 prog->aux->dev_bound = !!attr->prog_ifindex;
2821 prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS;
2822
2823 /* move token into prog->aux, reuse taken refcnt */
2824 prog->aux->token = token;
2825 token = NULL;
2826
2827 prog->aux->user = get_current_user();
2828 prog->len = attr->insn_cnt;
2829
2830 err = -EFAULT;
2831 if (copy_from_bpfptr(prog->insns,
2832 make_bpfptr(attr->insns, uattr.is_kernel),
2833 bpf_prog_insn_size(prog)) != 0)
2834 goto free_prog;
2835 /* copy eBPF program license from user space */
2836 if (strncpy_from_bpfptr(license,
2837 make_bpfptr(attr->license, uattr.is_kernel),
2838 sizeof(license) - 1) < 0)
2839 goto free_prog;
2840 license[sizeof(license) - 1] = 0;
2841
2842 /* eBPF programs must be GPL compatible to use GPL-ed functions */
2843 prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0;
2844
2845 prog->orig_prog = NULL;
2846 prog->jited = 0;
2847
2848 atomic64_set(&prog->aux->refcnt, 1);
2849
2850 if (bpf_prog_is_dev_bound(prog->aux)) {
2851 err = bpf_prog_dev_bound_init(prog, attr);
2852 if (err)
2853 goto free_prog;
2854 }
2855
2856 if (type == BPF_PROG_TYPE_EXT && dst_prog &&
2857 bpf_prog_is_dev_bound(dst_prog->aux)) {
2858 err = bpf_prog_dev_bound_inherit(prog, dst_prog);
2859 if (err)
2860 goto free_prog;
2861 }
2862
2863 /*
2864 * Bookkeeping for managing the program attachment chain.
2865 *
2866 * It might be tempting to set attach_tracing_prog flag at the attachment
2867 * time, but this will not prevent from loading bunch of tracing prog
2868 * first, then attach them one to another.
2869 *
2870 * The flag attach_tracing_prog is set for the whole program lifecycle, and
2871 * doesn't have to be cleared in bpf_tracing_link_release, since tracing
2872 * programs cannot change attachment target.
2873 */
2874 if (type == BPF_PROG_TYPE_TRACING && dst_prog &&
2875 dst_prog->type == BPF_PROG_TYPE_TRACING) {
2876 prog->aux->attach_tracing_prog = true;
2877 }
2878
2879 /* find program type: socket_filter vs tracing_filter */
2880 err = find_prog_type(type, prog);
2881 if (err < 0)
2882 goto free_prog;
2883
2884 prog->aux->load_time = ktime_get_boottime_ns();
2885 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2886 sizeof(attr->prog_name));
2887 if (err < 0)
2888 goto free_prog;
2889
2890 err = security_bpf_prog_load(prog, attr, token);
2891 if (err)
2892 goto free_prog_sec;
2893
2894 /* run eBPF verifier */
2895 err = bpf_check(&prog, attr, uattr, uattr_size);
2896 if (err < 0)
2897 goto free_used_maps;
2898
2899 prog = bpf_prog_select_runtime(prog, &err);
2900 if (err < 0)
2901 goto free_used_maps;
2902
2903 err = bpf_prog_alloc_id(prog);
2904 if (err)
2905 goto free_used_maps;
2906
2907 /* Upon success of bpf_prog_alloc_id(), the BPF prog is
2908 * effectively publicly exposed. However, retrieving via
2909 * bpf_prog_get_fd_by_id() will take another reference,
2910 * therefore it cannot be gone underneath us.
2911 *
2912 * Only for the time /after/ successful bpf_prog_new_fd()
2913 * and before returning to userspace, we might just hold
2914 * one reference and any parallel close on that fd could
2915 * rip everything out. Hence, below notifications must
2916 * happen before bpf_prog_new_fd().
2917 *
2918 * Also, any failure handling from this point onwards must
2919 * be using bpf_prog_put() given the program is exposed.
2920 */
2921 bpf_prog_kallsyms_add(prog);
2922 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2923 bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2924
2925 err = bpf_prog_new_fd(prog);
2926 if (err < 0)
2927 bpf_prog_put(prog);
2928 return err;
2929
2930free_used_maps:
2931 /* In case we have subprogs, we need to wait for a grace
2932 * period before we can tear down JIT memory since symbols
2933 * are already exposed under kallsyms.
2934 */
2935 __bpf_prog_put_noref(prog, prog->aux->real_func_cnt);
2936 return err;
2937
2938free_prog_sec:
2939 security_bpf_prog_free(prog);
2940free_prog:
2941 free_uid(prog->aux->user);
2942 if (prog->aux->attach_btf)
2943 btf_put(prog->aux->attach_btf);
2944 bpf_prog_free(prog);
2945put_token:
2946 bpf_token_put(token);
2947 return err;
2948}
2949
2950#define BPF_OBJ_LAST_FIELD path_fd
2951
2952static int bpf_obj_pin(const union bpf_attr *attr)
2953{
2954 int path_fd;
2955
2956 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD)
2957 return -EINVAL;
2958
2959 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */
2960 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
2961 return -EINVAL;
2962
2963 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
2964 return bpf_obj_pin_user(attr->bpf_fd, path_fd,
2965 u64_to_user_ptr(attr->pathname));
2966}
2967
2968static int bpf_obj_get(const union bpf_attr *attr)
2969{
2970 int path_fd;
2971
2972 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2973 attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD))
2974 return -EINVAL;
2975
2976 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */
2977 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
2978 return -EINVAL;
2979
2980 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
2981 return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname),
2982 attr->file_flags);
2983}
2984
2985void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2986 const struct bpf_link_ops *ops, struct bpf_prog *prog)
2987{
2988 atomic64_set(&link->refcnt, 1);
2989 link->type = type;
2990 link->id = 0;
2991 link->ops = ops;
2992 link->prog = prog;
2993}
2994
2995static void bpf_link_free_id(int id)
2996{
2997 if (!id)
2998 return;
2999
3000 spin_lock_bh(&link_idr_lock);
3001 idr_remove(&link_idr, id);
3002 spin_unlock_bh(&link_idr_lock);
3003}
3004
3005/* Clean up bpf_link and corresponding anon_inode file and FD. After
3006 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
3007 * anon_inode's release() call. This helper marks bpf_link as
3008 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
3009 * is not decremented, it's the responsibility of a calling code that failed
3010 * to complete bpf_link initialization.
3011 * This helper eventually calls link's dealloc callback, but does not call
3012 * link's release callback.
3013 */
3014void bpf_link_cleanup(struct bpf_link_primer *primer)
3015{
3016 primer->link->prog = NULL;
3017 bpf_link_free_id(primer->id);
3018 fput(primer->file);
3019 put_unused_fd(primer->fd);
3020}
3021
3022void bpf_link_inc(struct bpf_link *link)
3023{
3024 atomic64_inc(&link->refcnt);
3025}
3026
3027static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu)
3028{
3029 struct bpf_link *link = container_of(rcu, struct bpf_link, rcu);
3030
3031 /* free bpf_link and its containing memory */
3032 link->ops->dealloc_deferred(link);
3033}
3034
3035static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
3036{
3037 if (rcu_trace_implies_rcu_gp())
3038 bpf_link_defer_dealloc_rcu_gp(rcu);
3039 else
3040 call_rcu(rcu, bpf_link_defer_dealloc_rcu_gp);
3041}
3042
3043/* bpf_link_free is guaranteed to be called from process context */
3044static void bpf_link_free(struct bpf_link *link)
3045{
3046 bool sleepable = false;
3047
3048 bpf_link_free_id(link->id);
3049 if (link->prog) {
3050 sleepable = link->prog->sleepable;
3051 /* detach BPF program, clean up used resources */
3052 link->ops->release(link);
3053 bpf_prog_put(link->prog);
3054 }
3055 if (link->ops->dealloc_deferred) {
3056 /* schedule BPF link deallocation; if underlying BPF program
3057 * is sleepable, we need to first wait for RCU tasks trace
3058 * sync, then go through "classic" RCU grace period
3059 */
3060 if (sleepable)
3061 call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
3062 else
3063 call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
3064 }
3065 if (link->ops->dealloc)
3066 link->ops->dealloc(link);
3067}
3068
3069static void bpf_link_put_deferred(struct work_struct *work)
3070{
3071 struct bpf_link *link = container_of(work, struct bpf_link, work);
3072
3073 bpf_link_free(link);
3074}
3075
3076/* bpf_link_put might be called from atomic context. It needs to be called
3077 * from sleepable context in order to acquire sleeping locks during the process.
3078 */
3079void bpf_link_put(struct bpf_link *link)
3080{
3081 if (!atomic64_dec_and_test(&link->refcnt))
3082 return;
3083
3084 INIT_WORK(&link->work, bpf_link_put_deferred);
3085 schedule_work(&link->work);
3086}
3087EXPORT_SYMBOL(bpf_link_put);
3088
3089static void bpf_link_put_direct(struct bpf_link *link)
3090{
3091 if (!atomic64_dec_and_test(&link->refcnt))
3092 return;
3093 bpf_link_free(link);
3094}
3095
3096static int bpf_link_release(struct inode *inode, struct file *filp)
3097{
3098 struct bpf_link *link = filp->private_data;
3099
3100 bpf_link_put_direct(link);
3101 return 0;
3102}
3103
3104#ifdef CONFIG_PROC_FS
3105#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
3106#define BPF_MAP_TYPE(_id, _ops)
3107#define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
3108static const char *bpf_link_type_strs[] = {
3109 [BPF_LINK_TYPE_UNSPEC] = "<invalid>",
3110#include <linux/bpf_types.h>
3111};
3112#undef BPF_PROG_TYPE
3113#undef BPF_MAP_TYPE
3114#undef BPF_LINK_TYPE
3115
3116static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
3117{
3118 const struct bpf_link *link = filp->private_data;
3119 const struct bpf_prog *prog = link->prog;
3120 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
3121
3122 seq_printf(m,
3123 "link_type:\t%s\n"
3124 "link_id:\t%u\n",
3125 bpf_link_type_strs[link->type],
3126 link->id);
3127 if (prog) {
3128 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
3129 seq_printf(m,
3130 "prog_tag:\t%s\n"
3131 "prog_id:\t%u\n",
3132 prog_tag,
3133 prog->aux->id);
3134 }
3135 if (link->ops->show_fdinfo)
3136 link->ops->show_fdinfo(link, m);
3137}
3138#endif
3139
3140static const struct file_operations bpf_link_fops = {
3141#ifdef CONFIG_PROC_FS
3142 .show_fdinfo = bpf_link_show_fdinfo,
3143#endif
3144 .release = bpf_link_release,
3145 .read = bpf_dummy_read,
3146 .write = bpf_dummy_write,
3147};
3148
3149static int bpf_link_alloc_id(struct bpf_link *link)
3150{
3151 int id;
3152
3153 idr_preload(GFP_KERNEL);
3154 spin_lock_bh(&link_idr_lock);
3155 id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
3156 spin_unlock_bh(&link_idr_lock);
3157 idr_preload_end();
3158
3159 return id;
3160}
3161
3162/* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
3163 * reserving unused FD and allocating ID from link_idr. This is to be paired
3164 * with bpf_link_settle() to install FD and ID and expose bpf_link to
3165 * user-space, if bpf_link is successfully attached. If not, bpf_link and
3166 * pre-allocated resources are to be freed with bpf_cleanup() call. All the
3167 * transient state is passed around in struct bpf_link_primer.
3168 * This is preferred way to create and initialize bpf_link, especially when
3169 * there are complicated and expensive operations in between creating bpf_link
3170 * itself and attaching it to BPF hook. By using bpf_link_prime() and
3171 * bpf_link_settle() kernel code using bpf_link doesn't have to perform
3172 * expensive (and potentially failing) roll back operations in a rare case
3173 * that file, FD, or ID can't be allocated.
3174 */
3175int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
3176{
3177 struct file *file;
3178 int fd, id;
3179
3180 fd = get_unused_fd_flags(O_CLOEXEC);
3181 if (fd < 0)
3182 return fd;
3183
3184
3185 id = bpf_link_alloc_id(link);
3186 if (id < 0) {
3187 put_unused_fd(fd);
3188 return id;
3189 }
3190
3191 file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
3192 if (IS_ERR(file)) {
3193 bpf_link_free_id(id);
3194 put_unused_fd(fd);
3195 return PTR_ERR(file);
3196 }
3197
3198 primer->link = link;
3199 primer->file = file;
3200 primer->fd = fd;
3201 primer->id = id;
3202 return 0;
3203}
3204
3205int bpf_link_settle(struct bpf_link_primer *primer)
3206{
3207 /* make bpf_link fetchable by ID */
3208 spin_lock_bh(&link_idr_lock);
3209 primer->link->id = primer->id;
3210 spin_unlock_bh(&link_idr_lock);
3211 /* make bpf_link fetchable by FD */
3212 fd_install(primer->fd, primer->file);
3213 /* pass through installed FD */
3214 return primer->fd;
3215}
3216
3217int bpf_link_new_fd(struct bpf_link *link)
3218{
3219 return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
3220}
3221
3222struct bpf_link *bpf_link_get_from_fd(u32 ufd)
3223{
3224 struct fd f = fdget(ufd);
3225 struct bpf_link *link;
3226
3227 if (!f.file)
3228 return ERR_PTR(-EBADF);
3229 if (f.file->f_op != &bpf_link_fops) {
3230 fdput(f);
3231 return ERR_PTR(-EINVAL);
3232 }
3233
3234 link = f.file->private_data;
3235 bpf_link_inc(link);
3236 fdput(f);
3237
3238 return link;
3239}
3240EXPORT_SYMBOL(bpf_link_get_from_fd);
3241
3242static void bpf_tracing_link_release(struct bpf_link *link)
3243{
3244 struct bpf_tracing_link *tr_link =
3245 container_of(link, struct bpf_tracing_link, link.link);
3246
3247 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link,
3248 tr_link->trampoline));
3249
3250 bpf_trampoline_put(tr_link->trampoline);
3251
3252 /* tgt_prog is NULL if target is a kernel function */
3253 if (tr_link->tgt_prog)
3254 bpf_prog_put(tr_link->tgt_prog);
3255}
3256
3257static void bpf_tracing_link_dealloc(struct bpf_link *link)
3258{
3259 struct bpf_tracing_link *tr_link =
3260 container_of(link, struct bpf_tracing_link, link.link);
3261
3262 kfree(tr_link);
3263}
3264
3265static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
3266 struct seq_file *seq)
3267{
3268 struct bpf_tracing_link *tr_link =
3269 container_of(link, struct bpf_tracing_link, link.link);
3270 u32 target_btf_id, target_obj_id;
3271
3272 bpf_trampoline_unpack_key(tr_link->trampoline->key,
3273 &target_obj_id, &target_btf_id);
3274 seq_printf(seq,
3275 "attach_type:\t%d\n"
3276 "target_obj_id:\t%u\n"
3277 "target_btf_id:\t%u\n",
3278 tr_link->attach_type,
3279 target_obj_id,
3280 target_btf_id);
3281}
3282
3283static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
3284 struct bpf_link_info *info)
3285{
3286 struct bpf_tracing_link *tr_link =
3287 container_of(link, struct bpf_tracing_link, link.link);
3288
3289 info->tracing.attach_type = tr_link->attach_type;
3290 bpf_trampoline_unpack_key(tr_link->trampoline->key,
3291 &info->tracing.target_obj_id,
3292 &info->tracing.target_btf_id);
3293
3294 return 0;
3295}
3296
3297static const struct bpf_link_ops bpf_tracing_link_lops = {
3298 .release = bpf_tracing_link_release,
3299 .dealloc = bpf_tracing_link_dealloc,
3300 .show_fdinfo = bpf_tracing_link_show_fdinfo,
3301 .fill_link_info = bpf_tracing_link_fill_link_info,
3302};
3303
3304static int bpf_tracing_prog_attach(struct bpf_prog *prog,
3305 int tgt_prog_fd,
3306 u32 btf_id,
3307 u64 bpf_cookie)
3308{
3309 struct bpf_link_primer link_primer;
3310 struct bpf_prog *tgt_prog = NULL;
3311 struct bpf_trampoline *tr = NULL;
3312 struct bpf_tracing_link *link;
3313 u64 key = 0;
3314 int err;
3315
3316 switch (prog->type) {
3317 case BPF_PROG_TYPE_TRACING:
3318 if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
3319 prog->expected_attach_type != BPF_TRACE_FEXIT &&
3320 prog->expected_attach_type != BPF_MODIFY_RETURN) {
3321 err = -EINVAL;
3322 goto out_put_prog;
3323 }
3324 break;
3325 case BPF_PROG_TYPE_EXT:
3326 if (prog->expected_attach_type != 0) {
3327 err = -EINVAL;
3328 goto out_put_prog;
3329 }
3330 break;
3331 case BPF_PROG_TYPE_LSM:
3332 if (prog->expected_attach_type != BPF_LSM_MAC) {
3333 err = -EINVAL;
3334 goto out_put_prog;
3335 }
3336 break;
3337 default:
3338 err = -EINVAL;
3339 goto out_put_prog;
3340 }
3341
3342 if (!!tgt_prog_fd != !!btf_id) {
3343 err = -EINVAL;
3344 goto out_put_prog;
3345 }
3346
3347 if (tgt_prog_fd) {
3348 /*
3349 * For now we only allow new targets for BPF_PROG_TYPE_EXT. If this
3350 * part would be changed to implement the same for
3351 * BPF_PROG_TYPE_TRACING, do not forget to update the way how
3352 * attach_tracing_prog flag is set.
3353 */
3354 if (prog->type != BPF_PROG_TYPE_EXT) {
3355 err = -EINVAL;
3356 goto out_put_prog;
3357 }
3358
3359 tgt_prog = bpf_prog_get(tgt_prog_fd);
3360 if (IS_ERR(tgt_prog)) {
3361 err = PTR_ERR(tgt_prog);
3362 tgt_prog = NULL;
3363 goto out_put_prog;
3364 }
3365
3366 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id);
3367 }
3368
3369 link = kzalloc(sizeof(*link), GFP_USER);
3370 if (!link) {
3371 err = -ENOMEM;
3372 goto out_put_prog;
3373 }
3374 bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING,
3375 &bpf_tracing_link_lops, prog);
3376 link->attach_type = prog->expected_attach_type;
3377 link->link.cookie = bpf_cookie;
3378
3379 mutex_lock(&prog->aux->dst_mutex);
3380
3381 /* There are a few possible cases here:
3382 *
3383 * - if prog->aux->dst_trampoline is set, the program was just loaded
3384 * and not yet attached to anything, so we can use the values stored
3385 * in prog->aux
3386 *
3387 * - if prog->aux->dst_trampoline is NULL, the program has already been
3388 * attached to a target and its initial target was cleared (below)
3389 *
3390 * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
3391 * target_btf_id using the link_create API.
3392 *
3393 * - if tgt_prog == NULL when this function was called using the old
3394 * raw_tracepoint_open API, and we need a target from prog->aux
3395 *
3396 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
3397 * was detached and is going for re-attachment.
3398 *
3399 * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf
3400 * are NULL, then program was already attached and user did not provide
3401 * tgt_prog_fd so we have no way to find out or create trampoline
3402 */
3403 if (!prog->aux->dst_trampoline && !tgt_prog) {
3404 /*
3405 * Allow re-attach for TRACING and LSM programs. If it's
3406 * currently linked, bpf_trampoline_link_prog will fail.
3407 * EXT programs need to specify tgt_prog_fd, so they
3408 * re-attach in separate code path.
3409 */
3410 if (prog->type != BPF_PROG_TYPE_TRACING &&
3411 prog->type != BPF_PROG_TYPE_LSM) {
3412 err = -EINVAL;
3413 goto out_unlock;
3414 }
3415 /* We can allow re-attach only if we have valid attach_btf. */
3416 if (!prog->aux->attach_btf) {
3417 err = -EINVAL;
3418 goto out_unlock;
3419 }
3420 btf_id = prog->aux->attach_btf_id;
3421 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
3422 }
3423
3424 if (!prog->aux->dst_trampoline ||
3425 (key && key != prog->aux->dst_trampoline->key)) {
3426 /* If there is no saved target, or the specified target is
3427 * different from the destination specified at load time, we
3428 * need a new trampoline and a check for compatibility
3429 */
3430 struct bpf_attach_target_info tgt_info = {};
3431
3432 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
3433 &tgt_info);
3434 if (err)
3435 goto out_unlock;
3436
3437 if (tgt_info.tgt_mod) {
3438 module_put(prog->aux->mod);
3439 prog->aux->mod = tgt_info.tgt_mod;
3440 }
3441
3442 tr = bpf_trampoline_get(key, &tgt_info);
3443 if (!tr) {
3444 err = -ENOMEM;
3445 goto out_unlock;
3446 }
3447 } else {
3448 /* The caller didn't specify a target, or the target was the
3449 * same as the destination supplied during program load. This
3450 * means we can reuse the trampoline and reference from program
3451 * load time, and there is no need to allocate a new one. This
3452 * can only happen once for any program, as the saved values in
3453 * prog->aux are cleared below.
3454 */
3455 tr = prog->aux->dst_trampoline;
3456 tgt_prog = prog->aux->dst_prog;
3457 }
3458
3459 err = bpf_link_prime(&link->link.link, &link_primer);
3460 if (err)
3461 goto out_unlock;
3462
3463 err = bpf_trampoline_link_prog(&link->link, tr);
3464 if (err) {
3465 bpf_link_cleanup(&link_primer);
3466 link = NULL;
3467 goto out_unlock;
3468 }
3469
3470 link->tgt_prog = tgt_prog;
3471 link->trampoline = tr;
3472
3473 /* Always clear the trampoline and target prog from prog->aux to make
3474 * sure the original attach destination is not kept alive after a
3475 * program is (re-)attached to another target.
3476 */
3477 if (prog->aux->dst_prog &&
3478 (tgt_prog_fd || tr != prog->aux->dst_trampoline))
3479 /* got extra prog ref from syscall, or attaching to different prog */
3480 bpf_prog_put(prog->aux->dst_prog);
3481 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
3482 /* we allocated a new trampoline, so free the old one */
3483 bpf_trampoline_put(prog->aux->dst_trampoline);
3484
3485 prog->aux->dst_prog = NULL;
3486 prog->aux->dst_trampoline = NULL;
3487 mutex_unlock(&prog->aux->dst_mutex);
3488
3489 return bpf_link_settle(&link_primer);
3490out_unlock:
3491 if (tr && tr != prog->aux->dst_trampoline)
3492 bpf_trampoline_put(tr);
3493 mutex_unlock(&prog->aux->dst_mutex);
3494 kfree(link);
3495out_put_prog:
3496 if (tgt_prog_fd && tgt_prog)
3497 bpf_prog_put(tgt_prog);
3498 return err;
3499}
3500
3501struct bpf_raw_tp_link {
3502 struct bpf_link link;
3503 struct bpf_raw_event_map *btp;
3504};
3505
3506static void bpf_raw_tp_link_release(struct bpf_link *link)
3507{
3508 struct bpf_raw_tp_link *raw_tp =
3509 container_of(link, struct bpf_raw_tp_link, link);
3510
3511 bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
3512 bpf_put_raw_tracepoint(raw_tp->btp);
3513}
3514
3515static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
3516{
3517 struct bpf_raw_tp_link *raw_tp =
3518 container_of(link, struct bpf_raw_tp_link, link);
3519
3520 kfree(raw_tp);
3521}
3522
3523static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
3524 struct seq_file *seq)
3525{
3526 struct bpf_raw_tp_link *raw_tp_link =
3527 container_of(link, struct bpf_raw_tp_link, link);
3528
3529 seq_printf(seq,
3530 "tp_name:\t%s\n",
3531 raw_tp_link->btp->tp->name);
3532}
3533
3534static int bpf_copy_to_user(char __user *ubuf, const char *buf, u32 ulen,
3535 u32 len)
3536{
3537 if (ulen >= len + 1) {
3538 if (copy_to_user(ubuf, buf, len + 1))
3539 return -EFAULT;
3540 } else {
3541 char zero = '\0';
3542
3543 if (copy_to_user(ubuf, buf, ulen - 1))
3544 return -EFAULT;
3545 if (put_user(zero, ubuf + ulen - 1))
3546 return -EFAULT;
3547 return -ENOSPC;
3548 }
3549
3550 return 0;
3551}
3552
3553static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
3554 struct bpf_link_info *info)
3555{
3556 struct bpf_raw_tp_link *raw_tp_link =
3557 container_of(link, struct bpf_raw_tp_link, link);
3558 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
3559 const char *tp_name = raw_tp_link->btp->tp->name;
3560 u32 ulen = info->raw_tracepoint.tp_name_len;
3561 size_t tp_len = strlen(tp_name);
3562
3563 if (!ulen ^ !ubuf)
3564 return -EINVAL;
3565
3566 info->raw_tracepoint.tp_name_len = tp_len + 1;
3567
3568 if (!ubuf)
3569 return 0;
3570
3571 return bpf_copy_to_user(ubuf, tp_name, ulen, tp_len);
3572}
3573
3574static const struct bpf_link_ops bpf_raw_tp_link_lops = {
3575 .release = bpf_raw_tp_link_release,
3576 .dealloc_deferred = bpf_raw_tp_link_dealloc,
3577 .show_fdinfo = bpf_raw_tp_link_show_fdinfo,
3578 .fill_link_info = bpf_raw_tp_link_fill_link_info,
3579};
3580
3581#ifdef CONFIG_PERF_EVENTS
3582struct bpf_perf_link {
3583 struct bpf_link link;
3584 struct file *perf_file;
3585};
3586
3587static void bpf_perf_link_release(struct bpf_link *link)
3588{
3589 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3590 struct perf_event *event = perf_link->perf_file->private_data;
3591
3592 perf_event_free_bpf_prog(event);
3593 fput(perf_link->perf_file);
3594}
3595
3596static void bpf_perf_link_dealloc(struct bpf_link *link)
3597{
3598 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3599
3600 kfree(perf_link);
3601}
3602
3603static int bpf_perf_link_fill_common(const struct perf_event *event,
3604 char __user *uname, u32 ulen,
3605 u64 *probe_offset, u64 *probe_addr,
3606 u32 *fd_type, unsigned long *missed)
3607{
3608 const char *buf;
3609 u32 prog_id;
3610 size_t len;
3611 int err;
3612
3613 if (!ulen ^ !uname)
3614 return -EINVAL;
3615
3616 err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf,
3617 probe_offset, probe_addr, missed);
3618 if (err)
3619 return err;
3620 if (!uname)
3621 return 0;
3622 if (buf) {
3623 len = strlen(buf);
3624 err = bpf_copy_to_user(uname, buf, ulen, len);
3625 if (err)
3626 return err;
3627 } else {
3628 char zero = '\0';
3629
3630 if (put_user(zero, uname))
3631 return -EFAULT;
3632 }
3633 return 0;
3634}
3635
3636#ifdef CONFIG_KPROBE_EVENTS
3637static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
3638 struct bpf_link_info *info)
3639{
3640 unsigned long missed;
3641 char __user *uname;
3642 u64 addr, offset;
3643 u32 ulen, type;
3644 int err;
3645
3646 uname = u64_to_user_ptr(info->perf_event.kprobe.func_name);
3647 ulen = info->perf_event.kprobe.name_len;
3648 err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
3649 &type, &missed);
3650 if (err)
3651 return err;
3652 if (type == BPF_FD_TYPE_KRETPROBE)
3653 info->perf_event.type = BPF_PERF_EVENT_KRETPROBE;
3654 else
3655 info->perf_event.type = BPF_PERF_EVENT_KPROBE;
3656
3657 info->perf_event.kprobe.offset = offset;
3658 info->perf_event.kprobe.missed = missed;
3659 if (!kallsyms_show_value(current_cred()))
3660 addr = 0;
3661 info->perf_event.kprobe.addr = addr;
3662 info->perf_event.kprobe.cookie = event->bpf_cookie;
3663 return 0;
3664}
3665#endif
3666
3667#ifdef CONFIG_UPROBE_EVENTS
3668static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
3669 struct bpf_link_info *info)
3670{
3671 char __user *uname;
3672 u64 addr, offset;
3673 u32 ulen, type;
3674 int err;
3675
3676 uname = u64_to_user_ptr(info->perf_event.uprobe.file_name);
3677 ulen = info->perf_event.uprobe.name_len;
3678 err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
3679 &type, NULL);
3680 if (err)
3681 return err;
3682
3683 if (type == BPF_FD_TYPE_URETPROBE)
3684 info->perf_event.type = BPF_PERF_EVENT_URETPROBE;
3685 else
3686 info->perf_event.type = BPF_PERF_EVENT_UPROBE;
3687 info->perf_event.uprobe.offset = offset;
3688 info->perf_event.uprobe.cookie = event->bpf_cookie;
3689 return 0;
3690}
3691#endif
3692
3693static int bpf_perf_link_fill_probe(const struct perf_event *event,
3694 struct bpf_link_info *info)
3695{
3696#ifdef CONFIG_KPROBE_EVENTS
3697 if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE)
3698 return bpf_perf_link_fill_kprobe(event, info);
3699#endif
3700#ifdef CONFIG_UPROBE_EVENTS
3701 if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE)
3702 return bpf_perf_link_fill_uprobe(event, info);
3703#endif
3704 return -EOPNOTSUPP;
3705}
3706
3707static int bpf_perf_link_fill_tracepoint(const struct perf_event *event,
3708 struct bpf_link_info *info)
3709{
3710 char __user *uname;
3711 u32 ulen;
3712
3713 uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name);
3714 ulen = info->perf_event.tracepoint.name_len;
3715 info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT;
3716 info->perf_event.tracepoint.cookie = event->bpf_cookie;
3717 return bpf_perf_link_fill_common(event, uname, ulen, NULL, NULL, NULL, NULL);
3718}
3719
3720static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
3721 struct bpf_link_info *info)
3722{
3723 info->perf_event.event.type = event->attr.type;
3724 info->perf_event.event.config = event->attr.config;
3725 info->perf_event.event.cookie = event->bpf_cookie;
3726 info->perf_event.type = BPF_PERF_EVENT_EVENT;
3727 return 0;
3728}
3729
3730static int bpf_perf_link_fill_link_info(const struct bpf_link *link,
3731 struct bpf_link_info *info)
3732{
3733 struct bpf_perf_link *perf_link;
3734 const struct perf_event *event;
3735
3736 perf_link = container_of(link, struct bpf_perf_link, link);
3737 event = perf_get_event(perf_link->perf_file);
3738 if (IS_ERR(event))
3739 return PTR_ERR(event);
3740
3741 switch (event->prog->type) {
3742 case BPF_PROG_TYPE_PERF_EVENT:
3743 return bpf_perf_link_fill_perf_event(event, info);
3744 case BPF_PROG_TYPE_TRACEPOINT:
3745 return bpf_perf_link_fill_tracepoint(event, info);
3746 case BPF_PROG_TYPE_KPROBE:
3747 return bpf_perf_link_fill_probe(event, info);
3748 default:
3749 return -EOPNOTSUPP;
3750 }
3751}
3752
3753static const struct bpf_link_ops bpf_perf_link_lops = {
3754 .release = bpf_perf_link_release,
3755 .dealloc = bpf_perf_link_dealloc,
3756 .fill_link_info = bpf_perf_link_fill_link_info,
3757};
3758
3759static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3760{
3761 struct bpf_link_primer link_primer;
3762 struct bpf_perf_link *link;
3763 struct perf_event *event;
3764 struct file *perf_file;
3765 int err;
3766
3767 if (attr->link_create.flags)
3768 return -EINVAL;
3769
3770 perf_file = perf_event_get(attr->link_create.target_fd);
3771 if (IS_ERR(perf_file))
3772 return PTR_ERR(perf_file);
3773
3774 link = kzalloc(sizeof(*link), GFP_USER);
3775 if (!link) {
3776 err = -ENOMEM;
3777 goto out_put_file;
3778 }
3779 bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog);
3780 link->perf_file = perf_file;
3781
3782 err = bpf_link_prime(&link->link, &link_primer);
3783 if (err) {
3784 kfree(link);
3785 goto out_put_file;
3786 }
3787
3788 event = perf_file->private_data;
3789 err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie);
3790 if (err) {
3791 bpf_link_cleanup(&link_primer);
3792 goto out_put_file;
3793 }
3794 /* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */
3795 bpf_prog_inc(prog);
3796
3797 return bpf_link_settle(&link_primer);
3798
3799out_put_file:
3800 fput(perf_file);
3801 return err;
3802}
3803#else
3804static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3805{
3806 return -EOPNOTSUPP;
3807}
3808#endif /* CONFIG_PERF_EVENTS */
3809
3810static int bpf_raw_tp_link_attach(struct bpf_prog *prog,
3811 const char __user *user_tp_name)
3812{
3813 struct bpf_link_primer link_primer;
3814 struct bpf_raw_tp_link *link;
3815 struct bpf_raw_event_map *btp;
3816 const char *tp_name;
3817 char buf[128];
3818 int err;
3819
3820 switch (prog->type) {
3821 case BPF_PROG_TYPE_TRACING:
3822 case BPF_PROG_TYPE_EXT:
3823 case BPF_PROG_TYPE_LSM:
3824 if (user_tp_name)
3825 /* The attach point for this category of programs
3826 * should be specified via btf_id during program load.
3827 */
3828 return -EINVAL;
3829 if (prog->type == BPF_PROG_TYPE_TRACING &&
3830 prog->expected_attach_type == BPF_TRACE_RAW_TP) {
3831 tp_name = prog->aux->attach_func_name;
3832 break;
3833 }
3834 return bpf_tracing_prog_attach(prog, 0, 0, 0);
3835 case BPF_PROG_TYPE_RAW_TRACEPOINT:
3836 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
3837 if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0)
3838 return -EFAULT;
3839 buf[sizeof(buf) - 1] = 0;
3840 tp_name = buf;
3841 break;
3842 default:
3843 return -EINVAL;
3844 }
3845
3846 btp = bpf_get_raw_tracepoint(tp_name);
3847 if (!btp)
3848 return -ENOENT;
3849
3850 link = kzalloc(sizeof(*link), GFP_USER);
3851 if (!link) {
3852 err = -ENOMEM;
3853 goto out_put_btp;
3854 }
3855 bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
3856 &bpf_raw_tp_link_lops, prog);
3857 link->btp = btp;
3858
3859 err = bpf_link_prime(&link->link, &link_primer);
3860 if (err) {
3861 kfree(link);
3862 goto out_put_btp;
3863 }
3864
3865 err = bpf_probe_register(link->btp, prog);
3866 if (err) {
3867 bpf_link_cleanup(&link_primer);
3868 goto out_put_btp;
3869 }
3870
3871 return bpf_link_settle(&link_primer);
3872
3873out_put_btp:
3874 bpf_put_raw_tracepoint(btp);
3875 return err;
3876}
3877
3878#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
3879
3880static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
3881{
3882 struct bpf_prog *prog;
3883 int fd;
3884
3885 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
3886 return -EINVAL;
3887
3888 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
3889 if (IS_ERR(prog))
3890 return PTR_ERR(prog);
3891
3892 fd = bpf_raw_tp_link_attach(prog, u64_to_user_ptr(attr->raw_tracepoint.name));
3893 if (fd < 0)
3894 bpf_prog_put(prog);
3895 return fd;
3896}
3897
3898static enum bpf_prog_type
3899attach_type_to_prog_type(enum bpf_attach_type attach_type)
3900{
3901 switch (attach_type) {
3902 case BPF_CGROUP_INET_INGRESS:
3903 case BPF_CGROUP_INET_EGRESS:
3904 return BPF_PROG_TYPE_CGROUP_SKB;
3905 case BPF_CGROUP_INET_SOCK_CREATE:
3906 case BPF_CGROUP_INET_SOCK_RELEASE:
3907 case BPF_CGROUP_INET4_POST_BIND:
3908 case BPF_CGROUP_INET6_POST_BIND:
3909 return BPF_PROG_TYPE_CGROUP_SOCK;
3910 case BPF_CGROUP_INET4_BIND:
3911 case BPF_CGROUP_INET6_BIND:
3912 case BPF_CGROUP_INET4_CONNECT:
3913 case BPF_CGROUP_INET6_CONNECT:
3914 case BPF_CGROUP_UNIX_CONNECT:
3915 case BPF_CGROUP_INET4_GETPEERNAME:
3916 case BPF_CGROUP_INET6_GETPEERNAME:
3917 case BPF_CGROUP_UNIX_GETPEERNAME:
3918 case BPF_CGROUP_INET4_GETSOCKNAME:
3919 case BPF_CGROUP_INET6_GETSOCKNAME:
3920 case BPF_CGROUP_UNIX_GETSOCKNAME:
3921 case BPF_CGROUP_UDP4_SENDMSG:
3922 case BPF_CGROUP_UDP6_SENDMSG:
3923 case BPF_CGROUP_UNIX_SENDMSG:
3924 case BPF_CGROUP_UDP4_RECVMSG:
3925 case BPF_CGROUP_UDP6_RECVMSG:
3926 case BPF_CGROUP_UNIX_RECVMSG:
3927 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
3928 case BPF_CGROUP_SOCK_OPS:
3929 return BPF_PROG_TYPE_SOCK_OPS;
3930 case BPF_CGROUP_DEVICE:
3931 return BPF_PROG_TYPE_CGROUP_DEVICE;
3932 case BPF_SK_MSG_VERDICT:
3933 return BPF_PROG_TYPE_SK_MSG;
3934 case BPF_SK_SKB_STREAM_PARSER:
3935 case BPF_SK_SKB_STREAM_VERDICT:
3936 case BPF_SK_SKB_VERDICT:
3937 return BPF_PROG_TYPE_SK_SKB;
3938 case BPF_LIRC_MODE2:
3939 return BPF_PROG_TYPE_LIRC_MODE2;
3940 case BPF_FLOW_DISSECTOR:
3941 return BPF_PROG_TYPE_FLOW_DISSECTOR;
3942 case BPF_CGROUP_SYSCTL:
3943 return BPF_PROG_TYPE_CGROUP_SYSCTL;
3944 case BPF_CGROUP_GETSOCKOPT:
3945 case BPF_CGROUP_SETSOCKOPT:
3946 return BPF_PROG_TYPE_CGROUP_SOCKOPT;
3947 case BPF_TRACE_ITER:
3948 case BPF_TRACE_RAW_TP:
3949 case BPF_TRACE_FENTRY:
3950 case BPF_TRACE_FEXIT:
3951 case BPF_MODIFY_RETURN:
3952 return BPF_PROG_TYPE_TRACING;
3953 case BPF_LSM_MAC:
3954 return BPF_PROG_TYPE_LSM;
3955 case BPF_SK_LOOKUP:
3956 return BPF_PROG_TYPE_SK_LOOKUP;
3957 case BPF_XDP:
3958 return BPF_PROG_TYPE_XDP;
3959 case BPF_LSM_CGROUP:
3960 return BPF_PROG_TYPE_LSM;
3961 case BPF_TCX_INGRESS:
3962 case BPF_TCX_EGRESS:
3963 case BPF_NETKIT_PRIMARY:
3964 case BPF_NETKIT_PEER:
3965 return BPF_PROG_TYPE_SCHED_CLS;
3966 default:
3967 return BPF_PROG_TYPE_UNSPEC;
3968 }
3969}
3970
3971static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
3972 enum bpf_attach_type attach_type)
3973{
3974 enum bpf_prog_type ptype;
3975
3976 switch (prog->type) {
3977 case BPF_PROG_TYPE_CGROUP_SOCK:
3978 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3979 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3980 case BPF_PROG_TYPE_SK_LOOKUP:
3981 return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
3982 case BPF_PROG_TYPE_CGROUP_SKB:
3983 if (!bpf_token_capable(prog->aux->token, CAP_NET_ADMIN))
3984 /* cg-skb progs can be loaded by unpriv user.
3985 * check permissions at attach time.
3986 */
3987 return -EPERM;
3988
3989 ptype = attach_type_to_prog_type(attach_type);
3990 if (prog->type != ptype)
3991 return -EINVAL;
3992
3993 return prog->enforce_expected_attach_type &&
3994 prog->expected_attach_type != attach_type ?
3995 -EINVAL : 0;
3996 case BPF_PROG_TYPE_EXT:
3997 return 0;
3998 case BPF_PROG_TYPE_NETFILTER:
3999 if (attach_type != BPF_NETFILTER)
4000 return -EINVAL;
4001 return 0;
4002 case BPF_PROG_TYPE_PERF_EVENT:
4003 case BPF_PROG_TYPE_TRACEPOINT:
4004 if (attach_type != BPF_PERF_EVENT)
4005 return -EINVAL;
4006 return 0;
4007 case BPF_PROG_TYPE_KPROBE:
4008 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI &&
4009 attach_type != BPF_TRACE_KPROBE_MULTI)
4010 return -EINVAL;
4011 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI &&
4012 attach_type != BPF_TRACE_UPROBE_MULTI)
4013 return -EINVAL;
4014 if (attach_type != BPF_PERF_EVENT &&
4015 attach_type != BPF_TRACE_KPROBE_MULTI &&
4016 attach_type != BPF_TRACE_UPROBE_MULTI)
4017 return -EINVAL;
4018 return 0;
4019 case BPF_PROG_TYPE_SCHED_CLS:
4020 if (attach_type != BPF_TCX_INGRESS &&
4021 attach_type != BPF_TCX_EGRESS &&
4022 attach_type != BPF_NETKIT_PRIMARY &&
4023 attach_type != BPF_NETKIT_PEER)
4024 return -EINVAL;
4025 return 0;
4026 default:
4027 ptype = attach_type_to_prog_type(attach_type);
4028 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type)
4029 return -EINVAL;
4030 return 0;
4031 }
4032}
4033
4034#define BPF_PROG_ATTACH_LAST_FIELD expected_revision
4035
4036#define BPF_F_ATTACH_MASK_BASE \
4037 (BPF_F_ALLOW_OVERRIDE | \
4038 BPF_F_ALLOW_MULTI | \
4039 BPF_F_REPLACE)
4040
4041#define BPF_F_ATTACH_MASK_MPROG \
4042 (BPF_F_REPLACE | \
4043 BPF_F_BEFORE | \
4044 BPF_F_AFTER | \
4045 BPF_F_ID | \
4046 BPF_F_LINK)
4047
4048static int bpf_prog_attach(const union bpf_attr *attr)
4049{
4050 enum bpf_prog_type ptype;
4051 struct bpf_prog *prog;
4052 int ret;
4053
4054 if (CHECK_ATTR(BPF_PROG_ATTACH))
4055 return -EINVAL;
4056
4057 ptype = attach_type_to_prog_type(attr->attach_type);
4058 if (ptype == BPF_PROG_TYPE_UNSPEC)
4059 return -EINVAL;
4060 if (bpf_mprog_supported(ptype)) {
4061 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
4062 return -EINVAL;
4063 } else {
4064 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE)
4065 return -EINVAL;
4066 if (attr->relative_fd ||
4067 attr->expected_revision)
4068 return -EINVAL;
4069 }
4070
4071 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
4072 if (IS_ERR(prog))
4073 return PTR_ERR(prog);
4074
4075 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
4076 bpf_prog_put(prog);
4077 return -EINVAL;
4078 }
4079
4080 switch (ptype) {
4081 case BPF_PROG_TYPE_SK_SKB:
4082 case BPF_PROG_TYPE_SK_MSG:
4083 ret = sock_map_get_from_fd(attr, prog);
4084 break;
4085 case BPF_PROG_TYPE_LIRC_MODE2:
4086 ret = lirc_prog_attach(attr, prog);
4087 break;
4088 case BPF_PROG_TYPE_FLOW_DISSECTOR:
4089 ret = netns_bpf_prog_attach(attr, prog);
4090 break;
4091 case BPF_PROG_TYPE_CGROUP_DEVICE:
4092 case BPF_PROG_TYPE_CGROUP_SKB:
4093 case BPF_PROG_TYPE_CGROUP_SOCK:
4094 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4095 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4096 case BPF_PROG_TYPE_CGROUP_SYSCTL:
4097 case BPF_PROG_TYPE_SOCK_OPS:
4098 case BPF_PROG_TYPE_LSM:
4099 if (ptype == BPF_PROG_TYPE_LSM &&
4100 prog->expected_attach_type != BPF_LSM_CGROUP)
4101 ret = -EINVAL;
4102 else
4103 ret = cgroup_bpf_prog_attach(attr, ptype, prog);
4104 break;
4105 case BPF_PROG_TYPE_SCHED_CLS:
4106 if (attr->attach_type == BPF_TCX_INGRESS ||
4107 attr->attach_type == BPF_TCX_EGRESS)
4108 ret = tcx_prog_attach(attr, prog);
4109 else
4110 ret = netkit_prog_attach(attr, prog);
4111 break;
4112 default:
4113 ret = -EINVAL;
4114 }
4115
4116 if (ret)
4117 bpf_prog_put(prog);
4118 return ret;
4119}
4120
4121#define BPF_PROG_DETACH_LAST_FIELD expected_revision
4122
4123static int bpf_prog_detach(const union bpf_attr *attr)
4124{
4125 struct bpf_prog *prog = NULL;
4126 enum bpf_prog_type ptype;
4127 int ret;
4128
4129 if (CHECK_ATTR(BPF_PROG_DETACH))
4130 return -EINVAL;
4131
4132 ptype = attach_type_to_prog_type(attr->attach_type);
4133 if (bpf_mprog_supported(ptype)) {
4134 if (ptype == BPF_PROG_TYPE_UNSPEC)
4135 return -EINVAL;
4136 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
4137 return -EINVAL;
4138 if (attr->attach_bpf_fd) {
4139 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
4140 if (IS_ERR(prog))
4141 return PTR_ERR(prog);
4142 }
4143 } else if (attr->attach_flags ||
4144 attr->relative_fd ||
4145 attr->expected_revision) {
4146 return -EINVAL;
4147 }
4148
4149 switch (ptype) {
4150 case BPF_PROG_TYPE_SK_MSG:
4151 case BPF_PROG_TYPE_SK_SKB:
4152 ret = sock_map_prog_detach(attr, ptype);
4153 break;
4154 case BPF_PROG_TYPE_LIRC_MODE2:
4155 ret = lirc_prog_detach(attr);
4156 break;
4157 case BPF_PROG_TYPE_FLOW_DISSECTOR:
4158 ret = netns_bpf_prog_detach(attr, ptype);
4159 break;
4160 case BPF_PROG_TYPE_CGROUP_DEVICE:
4161 case BPF_PROG_TYPE_CGROUP_SKB:
4162 case BPF_PROG_TYPE_CGROUP_SOCK:
4163 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4164 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4165 case BPF_PROG_TYPE_CGROUP_SYSCTL:
4166 case BPF_PROG_TYPE_SOCK_OPS:
4167 case BPF_PROG_TYPE_LSM:
4168 ret = cgroup_bpf_prog_detach(attr, ptype);
4169 break;
4170 case BPF_PROG_TYPE_SCHED_CLS:
4171 if (attr->attach_type == BPF_TCX_INGRESS ||
4172 attr->attach_type == BPF_TCX_EGRESS)
4173 ret = tcx_prog_detach(attr, prog);
4174 else
4175 ret = netkit_prog_detach(attr, prog);
4176 break;
4177 default:
4178 ret = -EINVAL;
4179 }
4180
4181 if (prog)
4182 bpf_prog_put(prog);
4183 return ret;
4184}
4185
4186#define BPF_PROG_QUERY_LAST_FIELD query.revision
4187
4188static int bpf_prog_query(const union bpf_attr *attr,
4189 union bpf_attr __user *uattr)
4190{
4191 if (!bpf_net_capable())
4192 return -EPERM;
4193 if (CHECK_ATTR(BPF_PROG_QUERY))
4194 return -EINVAL;
4195 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
4196 return -EINVAL;
4197
4198 switch (attr->query.attach_type) {
4199 case BPF_CGROUP_INET_INGRESS:
4200 case BPF_CGROUP_INET_EGRESS:
4201 case BPF_CGROUP_INET_SOCK_CREATE:
4202 case BPF_CGROUP_INET_SOCK_RELEASE:
4203 case BPF_CGROUP_INET4_BIND:
4204 case BPF_CGROUP_INET6_BIND:
4205 case BPF_CGROUP_INET4_POST_BIND:
4206 case BPF_CGROUP_INET6_POST_BIND:
4207 case BPF_CGROUP_INET4_CONNECT:
4208 case BPF_CGROUP_INET6_CONNECT:
4209 case BPF_CGROUP_UNIX_CONNECT:
4210 case BPF_CGROUP_INET4_GETPEERNAME:
4211 case BPF_CGROUP_INET6_GETPEERNAME:
4212 case BPF_CGROUP_UNIX_GETPEERNAME:
4213 case BPF_CGROUP_INET4_GETSOCKNAME:
4214 case BPF_CGROUP_INET6_GETSOCKNAME:
4215 case BPF_CGROUP_UNIX_GETSOCKNAME:
4216 case BPF_CGROUP_UDP4_SENDMSG:
4217 case BPF_CGROUP_UDP6_SENDMSG:
4218 case BPF_CGROUP_UNIX_SENDMSG:
4219 case BPF_CGROUP_UDP4_RECVMSG:
4220 case BPF_CGROUP_UDP6_RECVMSG:
4221 case BPF_CGROUP_UNIX_RECVMSG:
4222 case BPF_CGROUP_SOCK_OPS:
4223 case BPF_CGROUP_DEVICE:
4224 case BPF_CGROUP_SYSCTL:
4225 case BPF_CGROUP_GETSOCKOPT:
4226 case BPF_CGROUP_SETSOCKOPT:
4227 case BPF_LSM_CGROUP:
4228 return cgroup_bpf_prog_query(attr, uattr);
4229 case BPF_LIRC_MODE2:
4230 return lirc_prog_query(attr, uattr);
4231 case BPF_FLOW_DISSECTOR:
4232 case BPF_SK_LOOKUP:
4233 return netns_bpf_prog_query(attr, uattr);
4234 case BPF_SK_SKB_STREAM_PARSER:
4235 case BPF_SK_SKB_STREAM_VERDICT:
4236 case BPF_SK_MSG_VERDICT:
4237 case BPF_SK_SKB_VERDICT:
4238 return sock_map_bpf_prog_query(attr, uattr);
4239 case BPF_TCX_INGRESS:
4240 case BPF_TCX_EGRESS:
4241 return tcx_prog_query(attr, uattr);
4242 case BPF_NETKIT_PRIMARY:
4243 case BPF_NETKIT_PEER:
4244 return netkit_prog_query(attr, uattr);
4245 default:
4246 return -EINVAL;
4247 }
4248}
4249
4250#define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size
4251
4252static int bpf_prog_test_run(const union bpf_attr *attr,
4253 union bpf_attr __user *uattr)
4254{
4255 struct bpf_prog *prog;
4256 int ret = -ENOTSUPP;
4257
4258 if (CHECK_ATTR(BPF_PROG_TEST_RUN))
4259 return -EINVAL;
4260
4261 if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
4262 (!attr->test.ctx_size_in && attr->test.ctx_in))
4263 return -EINVAL;
4264
4265 if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
4266 (!attr->test.ctx_size_out && attr->test.ctx_out))
4267 return -EINVAL;
4268
4269 prog = bpf_prog_get(attr->test.prog_fd);
4270 if (IS_ERR(prog))
4271 return PTR_ERR(prog);
4272
4273 if (prog->aux->ops->test_run)
4274 ret = prog->aux->ops->test_run(prog, attr, uattr);
4275
4276 bpf_prog_put(prog);
4277 return ret;
4278}
4279
4280#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
4281
4282static int bpf_obj_get_next_id(const union bpf_attr *attr,
4283 union bpf_attr __user *uattr,
4284 struct idr *idr,
4285 spinlock_t *lock)
4286{
4287 u32 next_id = attr->start_id;
4288 int err = 0;
4289
4290 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
4291 return -EINVAL;
4292
4293 if (!capable(CAP_SYS_ADMIN))
4294 return -EPERM;
4295
4296 next_id++;
4297 spin_lock_bh(lock);
4298 if (!idr_get_next(idr, &next_id))
4299 err = -ENOENT;
4300 spin_unlock_bh(lock);
4301
4302 if (!err)
4303 err = put_user(next_id, &uattr->next_id);
4304
4305 return err;
4306}
4307
4308struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
4309{
4310 struct bpf_map *map;
4311
4312 spin_lock_bh(&map_idr_lock);
4313again:
4314 map = idr_get_next(&map_idr, id);
4315 if (map) {
4316 map = __bpf_map_inc_not_zero(map, false);
4317 if (IS_ERR(map)) {
4318 (*id)++;
4319 goto again;
4320 }
4321 }
4322 spin_unlock_bh(&map_idr_lock);
4323
4324 return map;
4325}
4326
4327struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
4328{
4329 struct bpf_prog *prog;
4330
4331 spin_lock_bh(&prog_idr_lock);
4332again:
4333 prog = idr_get_next(&prog_idr, id);
4334 if (prog) {
4335 prog = bpf_prog_inc_not_zero(prog);
4336 if (IS_ERR(prog)) {
4337 (*id)++;
4338 goto again;
4339 }
4340 }
4341 spin_unlock_bh(&prog_idr_lock);
4342
4343 return prog;
4344}
4345
4346#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
4347
4348struct bpf_prog *bpf_prog_by_id(u32 id)
4349{
4350 struct bpf_prog *prog;
4351
4352 if (!id)
4353 return ERR_PTR(-ENOENT);
4354
4355 spin_lock_bh(&prog_idr_lock);
4356 prog = idr_find(&prog_idr, id);
4357 if (prog)
4358 prog = bpf_prog_inc_not_zero(prog);
4359 else
4360 prog = ERR_PTR(-ENOENT);
4361 spin_unlock_bh(&prog_idr_lock);
4362 return prog;
4363}
4364
4365static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
4366{
4367 struct bpf_prog *prog;
4368 u32 id = attr->prog_id;
4369 int fd;
4370
4371 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
4372 return -EINVAL;
4373
4374 if (!capable(CAP_SYS_ADMIN))
4375 return -EPERM;
4376
4377 prog = bpf_prog_by_id(id);
4378 if (IS_ERR(prog))
4379 return PTR_ERR(prog);
4380
4381 fd = bpf_prog_new_fd(prog);
4382 if (fd < 0)
4383 bpf_prog_put(prog);
4384
4385 return fd;
4386}
4387
4388#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
4389
4390static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
4391{
4392 struct bpf_map *map;
4393 u32 id = attr->map_id;
4394 int f_flags;
4395 int fd;
4396
4397 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
4398 attr->open_flags & ~BPF_OBJ_FLAG_MASK)
4399 return -EINVAL;
4400
4401 if (!capable(CAP_SYS_ADMIN))
4402 return -EPERM;
4403
4404 f_flags = bpf_get_file_flag(attr->open_flags);
4405 if (f_flags < 0)
4406 return f_flags;
4407
4408 spin_lock_bh(&map_idr_lock);
4409 map = idr_find(&map_idr, id);
4410 if (map)
4411 map = __bpf_map_inc_not_zero(map, true);
4412 else
4413 map = ERR_PTR(-ENOENT);
4414 spin_unlock_bh(&map_idr_lock);
4415
4416 if (IS_ERR(map))
4417 return PTR_ERR(map);
4418
4419 fd = bpf_map_new_fd(map, f_flags);
4420 if (fd < 0)
4421 bpf_map_put_with_uref(map);
4422
4423 return fd;
4424}
4425
4426static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
4427 unsigned long addr, u32 *off,
4428 u32 *type)
4429{
4430 const struct bpf_map *map;
4431 int i;
4432
4433 mutex_lock(&prog->aux->used_maps_mutex);
4434 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
4435 map = prog->aux->used_maps[i];
4436 if (map == (void *)addr) {
4437 *type = BPF_PSEUDO_MAP_FD;
4438 goto out;
4439 }
4440 if (!map->ops->map_direct_value_meta)
4441 continue;
4442 if (!map->ops->map_direct_value_meta(map, addr, off)) {
4443 *type = BPF_PSEUDO_MAP_VALUE;
4444 goto out;
4445 }
4446 }
4447 map = NULL;
4448
4449out:
4450 mutex_unlock(&prog->aux->used_maps_mutex);
4451 return map;
4452}
4453
4454static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
4455 const struct cred *f_cred)
4456{
4457 const struct bpf_map *map;
4458 struct bpf_insn *insns;
4459 u32 off, type;
4460 u64 imm;
4461 u8 code;
4462 int i;
4463
4464 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
4465 GFP_USER);
4466 if (!insns)
4467 return insns;
4468
4469 for (i = 0; i < prog->len; i++) {
4470 code = insns[i].code;
4471
4472 if (code == (BPF_JMP | BPF_TAIL_CALL)) {
4473 insns[i].code = BPF_JMP | BPF_CALL;
4474 insns[i].imm = BPF_FUNC_tail_call;
4475 /* fall-through */
4476 }
4477 if (code == (BPF_JMP | BPF_CALL) ||
4478 code == (BPF_JMP | BPF_CALL_ARGS)) {
4479 if (code == (BPF_JMP | BPF_CALL_ARGS))
4480 insns[i].code = BPF_JMP | BPF_CALL;
4481 if (!bpf_dump_raw_ok(f_cred))
4482 insns[i].imm = 0;
4483 continue;
4484 }
4485 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
4486 insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
4487 continue;
4488 }
4489
4490 if ((BPF_CLASS(code) == BPF_LDX || BPF_CLASS(code) == BPF_STX ||
4491 BPF_CLASS(code) == BPF_ST) && BPF_MODE(code) == BPF_PROBE_MEM32) {
4492 insns[i].code = BPF_CLASS(code) | BPF_SIZE(code) | BPF_MEM;
4493 continue;
4494 }
4495
4496 if (code != (BPF_LD | BPF_IMM | BPF_DW))
4497 continue;
4498
4499 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
4500 map = bpf_map_from_imm(prog, imm, &off, &type);
4501 if (map) {
4502 insns[i].src_reg = type;
4503 insns[i].imm = map->id;
4504 insns[i + 1].imm = off;
4505 continue;
4506 }
4507 }
4508
4509 return insns;
4510}
4511
4512static int set_info_rec_size(struct bpf_prog_info *info)
4513{
4514 /*
4515 * Ensure info.*_rec_size is the same as kernel expected size
4516 *
4517 * or
4518 *
4519 * Only allow zero *_rec_size if both _rec_size and _cnt are
4520 * zero. In this case, the kernel will set the expected
4521 * _rec_size back to the info.
4522 */
4523
4524 if ((info->nr_func_info || info->func_info_rec_size) &&
4525 info->func_info_rec_size != sizeof(struct bpf_func_info))
4526 return -EINVAL;
4527
4528 if ((info->nr_line_info || info->line_info_rec_size) &&
4529 info->line_info_rec_size != sizeof(struct bpf_line_info))
4530 return -EINVAL;
4531
4532 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
4533 info->jited_line_info_rec_size != sizeof(__u64))
4534 return -EINVAL;
4535
4536 info->func_info_rec_size = sizeof(struct bpf_func_info);
4537 info->line_info_rec_size = sizeof(struct bpf_line_info);
4538 info->jited_line_info_rec_size = sizeof(__u64);
4539
4540 return 0;
4541}
4542
4543static int bpf_prog_get_info_by_fd(struct file *file,
4544 struct bpf_prog *prog,
4545 const union bpf_attr *attr,
4546 union bpf_attr __user *uattr)
4547{
4548 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4549 struct btf *attach_btf = bpf_prog_get_target_btf(prog);
4550 struct bpf_prog_info info;
4551 u32 info_len = attr->info.info_len;
4552 struct bpf_prog_kstats stats;
4553 char __user *uinsns;
4554 u32 ulen;
4555 int err;
4556
4557 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4558 if (err)
4559 return err;
4560 info_len = min_t(u32, sizeof(info), info_len);
4561
4562 memset(&info, 0, sizeof(info));
4563 if (copy_from_user(&info, uinfo, info_len))
4564 return -EFAULT;
4565
4566 info.type = prog->type;
4567 info.id = prog->aux->id;
4568 info.load_time = prog->aux->load_time;
4569 info.created_by_uid = from_kuid_munged(current_user_ns(),
4570 prog->aux->user->uid);
4571 info.gpl_compatible = prog->gpl_compatible;
4572
4573 memcpy(info.tag, prog->tag, sizeof(prog->tag));
4574 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
4575
4576 mutex_lock(&prog->aux->used_maps_mutex);
4577 ulen = info.nr_map_ids;
4578 info.nr_map_ids = prog->aux->used_map_cnt;
4579 ulen = min_t(u32, info.nr_map_ids, ulen);
4580 if (ulen) {
4581 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
4582 u32 i;
4583
4584 for (i = 0; i < ulen; i++)
4585 if (put_user(prog->aux->used_maps[i]->id,
4586 &user_map_ids[i])) {
4587 mutex_unlock(&prog->aux->used_maps_mutex);
4588 return -EFAULT;
4589 }
4590 }
4591 mutex_unlock(&prog->aux->used_maps_mutex);
4592
4593 err = set_info_rec_size(&info);
4594 if (err)
4595 return err;
4596
4597 bpf_prog_get_stats(prog, &stats);
4598 info.run_time_ns = stats.nsecs;
4599 info.run_cnt = stats.cnt;
4600 info.recursion_misses = stats.misses;
4601
4602 info.verified_insns = prog->aux->verified_insns;
4603
4604 if (!bpf_capable()) {
4605 info.jited_prog_len = 0;
4606 info.xlated_prog_len = 0;
4607 info.nr_jited_ksyms = 0;
4608 info.nr_jited_func_lens = 0;
4609 info.nr_func_info = 0;
4610 info.nr_line_info = 0;
4611 info.nr_jited_line_info = 0;
4612 goto done;
4613 }
4614
4615 ulen = info.xlated_prog_len;
4616 info.xlated_prog_len = bpf_prog_insn_size(prog);
4617 if (info.xlated_prog_len && ulen) {
4618 struct bpf_insn *insns_sanitized;
4619 bool fault;
4620
4621 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
4622 info.xlated_prog_insns = 0;
4623 goto done;
4624 }
4625 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
4626 if (!insns_sanitized)
4627 return -ENOMEM;
4628 uinsns = u64_to_user_ptr(info.xlated_prog_insns);
4629 ulen = min_t(u32, info.xlated_prog_len, ulen);
4630 fault = copy_to_user(uinsns, insns_sanitized, ulen);
4631 kfree(insns_sanitized);
4632 if (fault)
4633 return -EFAULT;
4634 }
4635
4636 if (bpf_prog_is_offloaded(prog->aux)) {
4637 err = bpf_prog_offload_info_fill(&info, prog);
4638 if (err)
4639 return err;
4640 goto done;
4641 }
4642
4643 /* NOTE: the following code is supposed to be skipped for offload.
4644 * bpf_prog_offload_info_fill() is the place to fill similar fields
4645 * for offload.
4646 */
4647 ulen = info.jited_prog_len;
4648 if (prog->aux->func_cnt) {
4649 u32 i;
4650
4651 info.jited_prog_len = 0;
4652 for (i = 0; i < prog->aux->func_cnt; i++)
4653 info.jited_prog_len += prog->aux->func[i]->jited_len;
4654 } else {
4655 info.jited_prog_len = prog->jited_len;
4656 }
4657
4658 if (info.jited_prog_len && ulen) {
4659 if (bpf_dump_raw_ok(file->f_cred)) {
4660 uinsns = u64_to_user_ptr(info.jited_prog_insns);
4661 ulen = min_t(u32, info.jited_prog_len, ulen);
4662
4663 /* for multi-function programs, copy the JITed
4664 * instructions for all the functions
4665 */
4666 if (prog->aux->func_cnt) {
4667 u32 len, free, i;
4668 u8 *img;
4669
4670 free = ulen;
4671 for (i = 0; i < prog->aux->func_cnt; i++) {
4672 len = prog->aux->func[i]->jited_len;
4673 len = min_t(u32, len, free);
4674 img = (u8 *) prog->aux->func[i]->bpf_func;
4675 if (copy_to_user(uinsns, img, len))
4676 return -EFAULT;
4677 uinsns += len;
4678 free -= len;
4679 if (!free)
4680 break;
4681 }
4682 } else {
4683 if (copy_to_user(uinsns, prog->bpf_func, ulen))
4684 return -EFAULT;
4685 }
4686 } else {
4687 info.jited_prog_insns = 0;
4688 }
4689 }
4690
4691 ulen = info.nr_jited_ksyms;
4692 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
4693 if (ulen) {
4694 if (bpf_dump_raw_ok(file->f_cred)) {
4695 unsigned long ksym_addr;
4696 u64 __user *user_ksyms;
4697 u32 i;
4698
4699 /* copy the address of the kernel symbol
4700 * corresponding to each function
4701 */
4702 ulen = min_t(u32, info.nr_jited_ksyms, ulen);
4703 user_ksyms = u64_to_user_ptr(info.jited_ksyms);
4704 if (prog->aux->func_cnt) {
4705 for (i = 0; i < ulen; i++) {
4706 ksym_addr = (unsigned long)
4707 prog->aux->func[i]->bpf_func;
4708 if (put_user((u64) ksym_addr,
4709 &user_ksyms[i]))
4710 return -EFAULT;
4711 }
4712 } else {
4713 ksym_addr = (unsigned long) prog->bpf_func;
4714 if (put_user((u64) ksym_addr, &user_ksyms[0]))
4715 return -EFAULT;
4716 }
4717 } else {
4718 info.jited_ksyms = 0;
4719 }
4720 }
4721
4722 ulen = info.nr_jited_func_lens;
4723 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
4724 if (ulen) {
4725 if (bpf_dump_raw_ok(file->f_cred)) {
4726 u32 __user *user_lens;
4727 u32 func_len, i;
4728
4729 /* copy the JITed image lengths for each function */
4730 ulen = min_t(u32, info.nr_jited_func_lens, ulen);
4731 user_lens = u64_to_user_ptr(info.jited_func_lens);
4732 if (prog->aux->func_cnt) {
4733 for (i = 0; i < ulen; i++) {
4734 func_len =
4735 prog->aux->func[i]->jited_len;
4736 if (put_user(func_len, &user_lens[i]))
4737 return -EFAULT;
4738 }
4739 } else {
4740 func_len = prog->jited_len;
4741 if (put_user(func_len, &user_lens[0]))
4742 return -EFAULT;
4743 }
4744 } else {
4745 info.jited_func_lens = 0;
4746 }
4747 }
4748
4749 if (prog->aux->btf)
4750 info.btf_id = btf_obj_id(prog->aux->btf);
4751 info.attach_btf_id = prog->aux->attach_btf_id;
4752 if (attach_btf)
4753 info.attach_btf_obj_id = btf_obj_id(attach_btf);
4754
4755 ulen = info.nr_func_info;
4756 info.nr_func_info = prog->aux->func_info_cnt;
4757 if (info.nr_func_info && ulen) {
4758 char __user *user_finfo;
4759
4760 user_finfo = u64_to_user_ptr(info.func_info);
4761 ulen = min_t(u32, info.nr_func_info, ulen);
4762 if (copy_to_user(user_finfo, prog->aux->func_info,
4763 info.func_info_rec_size * ulen))
4764 return -EFAULT;
4765 }
4766
4767 ulen = info.nr_line_info;
4768 info.nr_line_info = prog->aux->nr_linfo;
4769 if (info.nr_line_info && ulen) {
4770 __u8 __user *user_linfo;
4771
4772 user_linfo = u64_to_user_ptr(info.line_info);
4773 ulen = min_t(u32, info.nr_line_info, ulen);
4774 if (copy_to_user(user_linfo, prog->aux->linfo,
4775 info.line_info_rec_size * ulen))
4776 return -EFAULT;
4777 }
4778
4779 ulen = info.nr_jited_line_info;
4780 if (prog->aux->jited_linfo)
4781 info.nr_jited_line_info = prog->aux->nr_linfo;
4782 else
4783 info.nr_jited_line_info = 0;
4784 if (info.nr_jited_line_info && ulen) {
4785 if (bpf_dump_raw_ok(file->f_cred)) {
4786 unsigned long line_addr;
4787 __u64 __user *user_linfo;
4788 u32 i;
4789
4790 user_linfo = u64_to_user_ptr(info.jited_line_info);
4791 ulen = min_t(u32, info.nr_jited_line_info, ulen);
4792 for (i = 0; i < ulen; i++) {
4793 line_addr = (unsigned long)prog->aux->jited_linfo[i];
4794 if (put_user((__u64)line_addr, &user_linfo[i]))
4795 return -EFAULT;
4796 }
4797 } else {
4798 info.jited_line_info = 0;
4799 }
4800 }
4801
4802 ulen = info.nr_prog_tags;
4803 info.nr_prog_tags = prog->aux->func_cnt ? : 1;
4804 if (ulen) {
4805 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
4806 u32 i;
4807
4808 user_prog_tags = u64_to_user_ptr(info.prog_tags);
4809 ulen = min_t(u32, info.nr_prog_tags, ulen);
4810 if (prog->aux->func_cnt) {
4811 for (i = 0; i < ulen; i++) {
4812 if (copy_to_user(user_prog_tags[i],
4813 prog->aux->func[i]->tag,
4814 BPF_TAG_SIZE))
4815 return -EFAULT;
4816 }
4817 } else {
4818 if (copy_to_user(user_prog_tags[0],
4819 prog->tag, BPF_TAG_SIZE))
4820 return -EFAULT;
4821 }
4822 }
4823
4824done:
4825 if (copy_to_user(uinfo, &info, info_len) ||
4826 put_user(info_len, &uattr->info.info_len))
4827 return -EFAULT;
4828
4829 return 0;
4830}
4831
4832static int bpf_map_get_info_by_fd(struct file *file,
4833 struct bpf_map *map,
4834 const union bpf_attr *attr,
4835 union bpf_attr __user *uattr)
4836{
4837 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4838 struct bpf_map_info info;
4839 u32 info_len = attr->info.info_len;
4840 int err;
4841
4842 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4843 if (err)
4844 return err;
4845 info_len = min_t(u32, sizeof(info), info_len);
4846
4847 memset(&info, 0, sizeof(info));
4848 info.type = map->map_type;
4849 info.id = map->id;
4850 info.key_size = map->key_size;
4851 info.value_size = map->value_size;
4852 info.max_entries = map->max_entries;
4853 info.map_flags = map->map_flags;
4854 info.map_extra = map->map_extra;
4855 memcpy(info.name, map->name, sizeof(map->name));
4856
4857 if (map->btf) {
4858 info.btf_id = btf_obj_id(map->btf);
4859 info.btf_key_type_id = map->btf_key_type_id;
4860 info.btf_value_type_id = map->btf_value_type_id;
4861 }
4862 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
4863 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS)
4864 bpf_map_struct_ops_info_fill(&info, map);
4865
4866 if (bpf_map_is_offloaded(map)) {
4867 err = bpf_map_offload_info_fill(&info, map);
4868 if (err)
4869 return err;
4870 }
4871
4872 if (copy_to_user(uinfo, &info, info_len) ||
4873 put_user(info_len, &uattr->info.info_len))
4874 return -EFAULT;
4875
4876 return 0;
4877}
4878
4879static int bpf_btf_get_info_by_fd(struct file *file,
4880 struct btf *btf,
4881 const union bpf_attr *attr,
4882 union bpf_attr __user *uattr)
4883{
4884 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4885 u32 info_len = attr->info.info_len;
4886 int err;
4887
4888 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len);
4889 if (err)
4890 return err;
4891
4892 return btf_get_info_by_fd(btf, attr, uattr);
4893}
4894
4895static int bpf_link_get_info_by_fd(struct file *file,
4896 struct bpf_link *link,
4897 const union bpf_attr *attr,
4898 union bpf_attr __user *uattr)
4899{
4900 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4901 struct bpf_link_info info;
4902 u32 info_len = attr->info.info_len;
4903 int err;
4904
4905 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4906 if (err)
4907 return err;
4908 info_len = min_t(u32, sizeof(info), info_len);
4909
4910 memset(&info, 0, sizeof(info));
4911 if (copy_from_user(&info, uinfo, info_len))
4912 return -EFAULT;
4913
4914 info.type = link->type;
4915 info.id = link->id;
4916 if (link->prog)
4917 info.prog_id = link->prog->aux->id;
4918
4919 if (link->ops->fill_link_info) {
4920 err = link->ops->fill_link_info(link, &info);
4921 if (err)
4922 return err;
4923 }
4924
4925 if (copy_to_user(uinfo, &info, info_len) ||
4926 put_user(info_len, &uattr->info.info_len))
4927 return -EFAULT;
4928
4929 return 0;
4930}
4931
4932
4933#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
4934
4935static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
4936 union bpf_attr __user *uattr)
4937{
4938 int ufd = attr->info.bpf_fd;
4939 struct fd f;
4940 int err;
4941
4942 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
4943 return -EINVAL;
4944
4945 f = fdget(ufd);
4946 if (!f.file)
4947 return -EBADFD;
4948
4949 if (f.file->f_op == &bpf_prog_fops)
4950 err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
4951 uattr);
4952 else if (f.file->f_op == &bpf_map_fops)
4953 err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
4954 uattr);
4955 else if (f.file->f_op == &btf_fops)
4956 err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
4957 else if (f.file->f_op == &bpf_link_fops)
4958 err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
4959 attr, uattr);
4960 else
4961 err = -EINVAL;
4962
4963 fdput(f);
4964 return err;
4965}
4966
4967#define BPF_BTF_LOAD_LAST_FIELD btf_token_fd
4968
4969static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
4970{
4971 struct bpf_token *token = NULL;
4972
4973 if (CHECK_ATTR(BPF_BTF_LOAD))
4974 return -EINVAL;
4975
4976 if (attr->btf_flags & ~BPF_F_TOKEN_FD)
4977 return -EINVAL;
4978
4979 if (attr->btf_flags & BPF_F_TOKEN_FD) {
4980 token = bpf_token_get_from_fd(attr->btf_token_fd);
4981 if (IS_ERR(token))
4982 return PTR_ERR(token);
4983 if (!bpf_token_allow_cmd(token, BPF_BTF_LOAD)) {
4984 bpf_token_put(token);
4985 token = NULL;
4986 }
4987 }
4988
4989 if (!bpf_token_capable(token, CAP_BPF)) {
4990 bpf_token_put(token);
4991 return -EPERM;
4992 }
4993
4994 bpf_token_put(token);
4995
4996 return btf_new_fd(attr, uattr, uattr_size);
4997}
4998
4999#define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
5000
5001static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
5002{
5003 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
5004 return -EINVAL;
5005
5006 if (!capable(CAP_SYS_ADMIN))
5007 return -EPERM;
5008
5009 return btf_get_fd_by_id(attr->btf_id);
5010}
5011
5012static int bpf_task_fd_query_copy(const union bpf_attr *attr,
5013 union bpf_attr __user *uattr,
5014 u32 prog_id, u32 fd_type,
5015 const char *buf, u64 probe_offset,
5016 u64 probe_addr)
5017{
5018 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
5019 u32 len = buf ? strlen(buf) : 0, input_len;
5020 int err = 0;
5021
5022 if (put_user(len, &uattr->task_fd_query.buf_len))
5023 return -EFAULT;
5024 input_len = attr->task_fd_query.buf_len;
5025 if (input_len && ubuf) {
5026 if (!len) {
5027 /* nothing to copy, just make ubuf NULL terminated */
5028 char zero = '\0';
5029
5030 if (put_user(zero, ubuf))
5031 return -EFAULT;
5032 } else if (input_len >= len + 1) {
5033 /* ubuf can hold the string with NULL terminator */
5034 if (copy_to_user(ubuf, buf, len + 1))
5035 return -EFAULT;
5036 } else {
5037 /* ubuf cannot hold the string with NULL terminator,
5038 * do a partial copy with NULL terminator.
5039 */
5040 char zero = '\0';
5041
5042 err = -ENOSPC;
5043 if (copy_to_user(ubuf, buf, input_len - 1))
5044 return -EFAULT;
5045 if (put_user(zero, ubuf + input_len - 1))
5046 return -EFAULT;
5047 }
5048 }
5049
5050 if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
5051 put_user(fd_type, &uattr->task_fd_query.fd_type) ||
5052 put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
5053 put_user(probe_addr, &uattr->task_fd_query.probe_addr))
5054 return -EFAULT;
5055
5056 return err;
5057}
5058
5059#define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
5060
5061static int bpf_task_fd_query(const union bpf_attr *attr,
5062 union bpf_attr __user *uattr)
5063{
5064 pid_t pid = attr->task_fd_query.pid;
5065 u32 fd = attr->task_fd_query.fd;
5066 const struct perf_event *event;
5067 struct task_struct *task;
5068 struct file *file;
5069 int err;
5070
5071 if (CHECK_ATTR(BPF_TASK_FD_QUERY))
5072 return -EINVAL;
5073
5074 if (!capable(CAP_SYS_ADMIN))
5075 return -EPERM;
5076
5077 if (attr->task_fd_query.flags != 0)
5078 return -EINVAL;
5079
5080 rcu_read_lock();
5081 task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
5082 rcu_read_unlock();
5083 if (!task)
5084 return -ENOENT;
5085
5086 err = 0;
5087 file = fget_task(task, fd);
5088 put_task_struct(task);
5089 if (!file)
5090 return -EBADF;
5091
5092 if (file->f_op == &bpf_link_fops) {
5093 struct bpf_link *link = file->private_data;
5094
5095 if (link->ops == &bpf_raw_tp_link_lops) {
5096 struct bpf_raw_tp_link *raw_tp =
5097 container_of(link, struct bpf_raw_tp_link, link);
5098 struct bpf_raw_event_map *btp = raw_tp->btp;
5099
5100 err = bpf_task_fd_query_copy(attr, uattr,
5101 raw_tp->link.prog->aux->id,
5102 BPF_FD_TYPE_RAW_TRACEPOINT,
5103 btp->tp->name, 0, 0);
5104 goto put_file;
5105 }
5106 goto out_not_supp;
5107 }
5108
5109 event = perf_get_event(file);
5110 if (!IS_ERR(event)) {
5111 u64 probe_offset, probe_addr;
5112 u32 prog_id, fd_type;
5113 const char *buf;
5114
5115 err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
5116 &buf, &probe_offset,
5117 &probe_addr, NULL);
5118 if (!err)
5119 err = bpf_task_fd_query_copy(attr, uattr, prog_id,
5120 fd_type, buf,
5121 probe_offset,
5122 probe_addr);
5123 goto put_file;
5124 }
5125
5126out_not_supp:
5127 err = -ENOTSUPP;
5128put_file:
5129 fput(file);
5130 return err;
5131}
5132
5133#define BPF_MAP_BATCH_LAST_FIELD batch.flags
5134
5135#define BPF_DO_BATCH(fn, ...) \
5136 do { \
5137 if (!fn) { \
5138 err = -ENOTSUPP; \
5139 goto err_put; \
5140 } \
5141 err = fn(__VA_ARGS__); \
5142 } while (0)
5143
5144static int bpf_map_do_batch(const union bpf_attr *attr,
5145 union bpf_attr __user *uattr,
5146 int cmd)
5147{
5148 bool has_read = cmd == BPF_MAP_LOOKUP_BATCH ||
5149 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
5150 bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
5151 struct bpf_map *map;
5152 int err, ufd;
5153 struct fd f;
5154
5155 if (CHECK_ATTR(BPF_MAP_BATCH))
5156 return -EINVAL;
5157
5158 ufd = attr->batch.map_fd;
5159 f = fdget(ufd);
5160 map = __bpf_map_get(f);
5161 if (IS_ERR(map))
5162 return PTR_ERR(map);
5163 if (has_write)
5164 bpf_map_write_active_inc(map);
5165 if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
5166 err = -EPERM;
5167 goto err_put;
5168 }
5169 if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
5170 err = -EPERM;
5171 goto err_put;
5172 }
5173
5174 if (cmd == BPF_MAP_LOOKUP_BATCH)
5175 BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr);
5176 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
5177 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr);
5178 else if (cmd == BPF_MAP_UPDATE_BATCH)
5179 BPF_DO_BATCH(map->ops->map_update_batch, map, f.file, attr, uattr);
5180 else
5181 BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr);
5182err_put:
5183 if (has_write) {
5184 maybe_wait_bpf_programs(map);
5185 bpf_map_write_active_dec(map);
5186 }
5187 fdput(f);
5188 return err;
5189}
5190
5191#define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.pid
5192static int link_create(union bpf_attr *attr, bpfptr_t uattr)
5193{
5194 struct bpf_prog *prog;
5195 int ret;
5196
5197 if (CHECK_ATTR(BPF_LINK_CREATE))
5198 return -EINVAL;
5199
5200 if (attr->link_create.attach_type == BPF_STRUCT_OPS)
5201 return bpf_struct_ops_link_create(attr);
5202
5203 prog = bpf_prog_get(attr->link_create.prog_fd);
5204 if (IS_ERR(prog))
5205 return PTR_ERR(prog);
5206
5207 ret = bpf_prog_attach_check_attach_type(prog,
5208 attr->link_create.attach_type);
5209 if (ret)
5210 goto out;
5211
5212 switch (prog->type) {
5213 case BPF_PROG_TYPE_CGROUP_SKB:
5214 case BPF_PROG_TYPE_CGROUP_SOCK:
5215 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
5216 case BPF_PROG_TYPE_SOCK_OPS:
5217 case BPF_PROG_TYPE_CGROUP_DEVICE:
5218 case BPF_PROG_TYPE_CGROUP_SYSCTL:
5219 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
5220 ret = cgroup_bpf_link_attach(attr, prog);
5221 break;
5222 case BPF_PROG_TYPE_EXT:
5223 ret = bpf_tracing_prog_attach(prog,
5224 attr->link_create.target_fd,
5225 attr->link_create.target_btf_id,
5226 attr->link_create.tracing.cookie);
5227 break;
5228 case BPF_PROG_TYPE_LSM:
5229 case BPF_PROG_TYPE_TRACING:
5230 if (attr->link_create.attach_type != prog->expected_attach_type) {
5231 ret = -EINVAL;
5232 goto out;
5233 }
5234 if (prog->expected_attach_type == BPF_TRACE_RAW_TP)
5235 ret = bpf_raw_tp_link_attach(prog, NULL);
5236 else if (prog->expected_attach_type == BPF_TRACE_ITER)
5237 ret = bpf_iter_link_attach(attr, uattr, prog);
5238 else if (prog->expected_attach_type == BPF_LSM_CGROUP)
5239 ret = cgroup_bpf_link_attach(attr, prog);
5240 else
5241 ret = bpf_tracing_prog_attach(prog,
5242 attr->link_create.target_fd,
5243 attr->link_create.target_btf_id,
5244 attr->link_create.tracing.cookie);
5245 break;
5246 case BPF_PROG_TYPE_FLOW_DISSECTOR:
5247 case BPF_PROG_TYPE_SK_LOOKUP:
5248 ret = netns_bpf_link_create(attr, prog);
5249 break;
5250#ifdef CONFIG_NET
5251 case BPF_PROG_TYPE_XDP:
5252 ret = bpf_xdp_link_attach(attr, prog);
5253 break;
5254 case BPF_PROG_TYPE_SCHED_CLS:
5255 if (attr->link_create.attach_type == BPF_TCX_INGRESS ||
5256 attr->link_create.attach_type == BPF_TCX_EGRESS)
5257 ret = tcx_link_attach(attr, prog);
5258 else
5259 ret = netkit_link_attach(attr, prog);
5260 break;
5261 case BPF_PROG_TYPE_NETFILTER:
5262 ret = bpf_nf_link_attach(attr, prog);
5263 break;
5264#endif
5265 case BPF_PROG_TYPE_PERF_EVENT:
5266 case BPF_PROG_TYPE_TRACEPOINT:
5267 ret = bpf_perf_link_attach(attr, prog);
5268 break;
5269 case BPF_PROG_TYPE_KPROBE:
5270 if (attr->link_create.attach_type == BPF_PERF_EVENT)
5271 ret = bpf_perf_link_attach(attr, prog);
5272 else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI)
5273 ret = bpf_kprobe_multi_link_attach(attr, prog);
5274 else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI)
5275 ret = bpf_uprobe_multi_link_attach(attr, prog);
5276 break;
5277 default:
5278 ret = -EINVAL;
5279 }
5280
5281out:
5282 if (ret < 0)
5283 bpf_prog_put(prog);
5284 return ret;
5285}
5286
5287static int link_update_map(struct bpf_link *link, union bpf_attr *attr)
5288{
5289 struct bpf_map *new_map, *old_map = NULL;
5290 int ret;
5291
5292 new_map = bpf_map_get(attr->link_update.new_map_fd);
5293 if (IS_ERR(new_map))
5294 return PTR_ERR(new_map);
5295
5296 if (attr->link_update.flags & BPF_F_REPLACE) {
5297 old_map = bpf_map_get(attr->link_update.old_map_fd);
5298 if (IS_ERR(old_map)) {
5299 ret = PTR_ERR(old_map);
5300 goto out_put;
5301 }
5302 } else if (attr->link_update.old_map_fd) {
5303 ret = -EINVAL;
5304 goto out_put;
5305 }
5306
5307 ret = link->ops->update_map(link, new_map, old_map);
5308
5309 if (old_map)
5310 bpf_map_put(old_map);
5311out_put:
5312 bpf_map_put(new_map);
5313 return ret;
5314}
5315
5316#define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
5317
5318static int link_update(union bpf_attr *attr)
5319{
5320 struct bpf_prog *old_prog = NULL, *new_prog;
5321 struct bpf_link *link;
5322 u32 flags;
5323 int ret;
5324
5325 if (CHECK_ATTR(BPF_LINK_UPDATE))
5326 return -EINVAL;
5327
5328 flags = attr->link_update.flags;
5329 if (flags & ~BPF_F_REPLACE)
5330 return -EINVAL;
5331
5332 link = bpf_link_get_from_fd(attr->link_update.link_fd);
5333 if (IS_ERR(link))
5334 return PTR_ERR(link);
5335
5336 if (link->ops->update_map) {
5337 ret = link_update_map(link, attr);
5338 goto out_put_link;
5339 }
5340
5341 new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
5342 if (IS_ERR(new_prog)) {
5343 ret = PTR_ERR(new_prog);
5344 goto out_put_link;
5345 }
5346
5347 if (flags & BPF_F_REPLACE) {
5348 old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
5349 if (IS_ERR(old_prog)) {
5350 ret = PTR_ERR(old_prog);
5351 old_prog = NULL;
5352 goto out_put_progs;
5353 }
5354 } else if (attr->link_update.old_prog_fd) {
5355 ret = -EINVAL;
5356 goto out_put_progs;
5357 }
5358
5359 if (link->ops->update_prog)
5360 ret = link->ops->update_prog(link, new_prog, old_prog);
5361 else
5362 ret = -EINVAL;
5363
5364out_put_progs:
5365 if (old_prog)
5366 bpf_prog_put(old_prog);
5367 if (ret)
5368 bpf_prog_put(new_prog);
5369out_put_link:
5370 bpf_link_put_direct(link);
5371 return ret;
5372}
5373
5374#define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
5375
5376static int link_detach(union bpf_attr *attr)
5377{
5378 struct bpf_link *link;
5379 int ret;
5380
5381 if (CHECK_ATTR(BPF_LINK_DETACH))
5382 return -EINVAL;
5383
5384 link = bpf_link_get_from_fd(attr->link_detach.link_fd);
5385 if (IS_ERR(link))
5386 return PTR_ERR(link);
5387
5388 if (link->ops->detach)
5389 ret = link->ops->detach(link);
5390 else
5391 ret = -EOPNOTSUPP;
5392
5393 bpf_link_put_direct(link);
5394 return ret;
5395}
5396
5397static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
5398{
5399 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
5400}
5401
5402struct bpf_link *bpf_link_by_id(u32 id)
5403{
5404 struct bpf_link *link;
5405
5406 if (!id)
5407 return ERR_PTR(-ENOENT);
5408
5409 spin_lock_bh(&link_idr_lock);
5410 /* before link is "settled", ID is 0, pretend it doesn't exist yet */
5411 link = idr_find(&link_idr, id);
5412 if (link) {
5413 if (link->id)
5414 link = bpf_link_inc_not_zero(link);
5415 else
5416 link = ERR_PTR(-EAGAIN);
5417 } else {
5418 link = ERR_PTR(-ENOENT);
5419 }
5420 spin_unlock_bh(&link_idr_lock);
5421 return link;
5422}
5423
5424struct bpf_link *bpf_link_get_curr_or_next(u32 *id)
5425{
5426 struct bpf_link *link;
5427
5428 spin_lock_bh(&link_idr_lock);
5429again:
5430 link = idr_get_next(&link_idr, id);
5431 if (link) {
5432 link = bpf_link_inc_not_zero(link);
5433 if (IS_ERR(link)) {
5434 (*id)++;
5435 goto again;
5436 }
5437 }
5438 spin_unlock_bh(&link_idr_lock);
5439
5440 return link;
5441}
5442
5443#define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
5444
5445static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
5446{
5447 struct bpf_link *link;
5448 u32 id = attr->link_id;
5449 int fd;
5450
5451 if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
5452 return -EINVAL;
5453
5454 if (!capable(CAP_SYS_ADMIN))
5455 return -EPERM;
5456
5457 link = bpf_link_by_id(id);
5458 if (IS_ERR(link))
5459 return PTR_ERR(link);
5460
5461 fd = bpf_link_new_fd(link);
5462 if (fd < 0)
5463 bpf_link_put_direct(link);
5464
5465 return fd;
5466}
5467
5468DEFINE_MUTEX(bpf_stats_enabled_mutex);
5469
5470static int bpf_stats_release(struct inode *inode, struct file *file)
5471{
5472 mutex_lock(&bpf_stats_enabled_mutex);
5473 static_key_slow_dec(&bpf_stats_enabled_key.key);
5474 mutex_unlock(&bpf_stats_enabled_mutex);
5475 return 0;
5476}
5477
5478static const struct file_operations bpf_stats_fops = {
5479 .release = bpf_stats_release,
5480};
5481
5482static int bpf_enable_runtime_stats(void)
5483{
5484 int fd;
5485
5486 mutex_lock(&bpf_stats_enabled_mutex);
5487
5488 /* Set a very high limit to avoid overflow */
5489 if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
5490 mutex_unlock(&bpf_stats_enabled_mutex);
5491 return -EBUSY;
5492 }
5493
5494 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
5495 if (fd >= 0)
5496 static_key_slow_inc(&bpf_stats_enabled_key.key);
5497
5498 mutex_unlock(&bpf_stats_enabled_mutex);
5499 return fd;
5500}
5501
5502#define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
5503
5504static int bpf_enable_stats(union bpf_attr *attr)
5505{
5506
5507 if (CHECK_ATTR(BPF_ENABLE_STATS))
5508 return -EINVAL;
5509
5510 if (!capable(CAP_SYS_ADMIN))
5511 return -EPERM;
5512
5513 switch (attr->enable_stats.type) {
5514 case BPF_STATS_RUN_TIME:
5515 return bpf_enable_runtime_stats();
5516 default:
5517 break;
5518 }
5519 return -EINVAL;
5520}
5521
5522#define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
5523
5524static int bpf_iter_create(union bpf_attr *attr)
5525{
5526 struct bpf_link *link;
5527 int err;
5528
5529 if (CHECK_ATTR(BPF_ITER_CREATE))
5530 return -EINVAL;
5531
5532 if (attr->iter_create.flags)
5533 return -EINVAL;
5534
5535 link = bpf_link_get_from_fd(attr->iter_create.link_fd);
5536 if (IS_ERR(link))
5537 return PTR_ERR(link);
5538
5539 err = bpf_iter_new_fd(link);
5540 bpf_link_put_direct(link);
5541
5542 return err;
5543}
5544
5545#define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
5546
5547static int bpf_prog_bind_map(union bpf_attr *attr)
5548{
5549 struct bpf_prog *prog;
5550 struct bpf_map *map;
5551 struct bpf_map **used_maps_old, **used_maps_new;
5552 int i, ret = 0;
5553
5554 if (CHECK_ATTR(BPF_PROG_BIND_MAP))
5555 return -EINVAL;
5556
5557 if (attr->prog_bind_map.flags)
5558 return -EINVAL;
5559
5560 prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
5561 if (IS_ERR(prog))
5562 return PTR_ERR(prog);
5563
5564 map = bpf_map_get(attr->prog_bind_map.map_fd);
5565 if (IS_ERR(map)) {
5566 ret = PTR_ERR(map);
5567 goto out_prog_put;
5568 }
5569
5570 mutex_lock(&prog->aux->used_maps_mutex);
5571
5572 used_maps_old = prog->aux->used_maps;
5573
5574 for (i = 0; i < prog->aux->used_map_cnt; i++)
5575 if (used_maps_old[i] == map) {
5576 bpf_map_put(map);
5577 goto out_unlock;
5578 }
5579
5580 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
5581 sizeof(used_maps_new[0]),
5582 GFP_KERNEL);
5583 if (!used_maps_new) {
5584 ret = -ENOMEM;
5585 goto out_unlock;
5586 }
5587
5588 /* The bpf program will not access the bpf map, but for the sake of
5589 * simplicity, increase sleepable_refcnt for sleepable program as well.
5590 */
5591 if (prog->sleepable)
5592 atomic64_inc(&map->sleepable_refcnt);
5593 memcpy(used_maps_new, used_maps_old,
5594 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
5595 used_maps_new[prog->aux->used_map_cnt] = map;
5596
5597 prog->aux->used_map_cnt++;
5598 prog->aux->used_maps = used_maps_new;
5599
5600 kfree(used_maps_old);
5601
5602out_unlock:
5603 mutex_unlock(&prog->aux->used_maps_mutex);
5604
5605 if (ret)
5606 bpf_map_put(map);
5607out_prog_put:
5608 bpf_prog_put(prog);
5609 return ret;
5610}
5611
5612#define BPF_TOKEN_CREATE_LAST_FIELD token_create.bpffs_fd
5613
5614static int token_create(union bpf_attr *attr)
5615{
5616 if (CHECK_ATTR(BPF_TOKEN_CREATE))
5617 return -EINVAL;
5618
5619 /* no flags are supported yet */
5620 if (attr->token_create.flags)
5621 return -EINVAL;
5622
5623 return bpf_token_create(attr);
5624}
5625
5626static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
5627{
5628 union bpf_attr attr;
5629 int err;
5630
5631 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
5632 if (err)
5633 return err;
5634 size = min_t(u32, size, sizeof(attr));
5635
5636 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
5637 memset(&attr, 0, sizeof(attr));
5638 if (copy_from_bpfptr(&attr, uattr, size) != 0)
5639 return -EFAULT;
5640
5641 err = security_bpf(cmd, &attr, size);
5642 if (err < 0)
5643 return err;
5644
5645 switch (cmd) {
5646 case BPF_MAP_CREATE:
5647 err = map_create(&attr);
5648 break;
5649 case BPF_MAP_LOOKUP_ELEM:
5650 err = map_lookup_elem(&attr);
5651 break;
5652 case BPF_MAP_UPDATE_ELEM:
5653 err = map_update_elem(&attr, uattr);
5654 break;
5655 case BPF_MAP_DELETE_ELEM:
5656 err = map_delete_elem(&attr, uattr);
5657 break;
5658 case BPF_MAP_GET_NEXT_KEY:
5659 err = map_get_next_key(&attr);
5660 break;
5661 case BPF_MAP_FREEZE:
5662 err = map_freeze(&attr);
5663 break;
5664 case BPF_PROG_LOAD:
5665 err = bpf_prog_load(&attr, uattr, size);
5666 break;
5667 case BPF_OBJ_PIN:
5668 err = bpf_obj_pin(&attr);
5669 break;
5670 case BPF_OBJ_GET:
5671 err = bpf_obj_get(&attr);
5672 break;
5673 case BPF_PROG_ATTACH:
5674 err = bpf_prog_attach(&attr);
5675 break;
5676 case BPF_PROG_DETACH:
5677 err = bpf_prog_detach(&attr);
5678 break;
5679 case BPF_PROG_QUERY:
5680 err = bpf_prog_query(&attr, uattr.user);
5681 break;
5682 case BPF_PROG_TEST_RUN:
5683 err = bpf_prog_test_run(&attr, uattr.user);
5684 break;
5685 case BPF_PROG_GET_NEXT_ID:
5686 err = bpf_obj_get_next_id(&attr, uattr.user,
5687 &prog_idr, &prog_idr_lock);
5688 break;
5689 case BPF_MAP_GET_NEXT_ID:
5690 err = bpf_obj_get_next_id(&attr, uattr.user,
5691 &map_idr, &map_idr_lock);
5692 break;
5693 case BPF_BTF_GET_NEXT_ID:
5694 err = bpf_obj_get_next_id(&attr, uattr.user,
5695 &btf_idr, &btf_idr_lock);
5696 break;
5697 case BPF_PROG_GET_FD_BY_ID:
5698 err = bpf_prog_get_fd_by_id(&attr);
5699 break;
5700 case BPF_MAP_GET_FD_BY_ID:
5701 err = bpf_map_get_fd_by_id(&attr);
5702 break;
5703 case BPF_OBJ_GET_INFO_BY_FD:
5704 err = bpf_obj_get_info_by_fd(&attr, uattr.user);
5705 break;
5706 case BPF_RAW_TRACEPOINT_OPEN:
5707 err = bpf_raw_tracepoint_open(&attr);
5708 break;
5709 case BPF_BTF_LOAD:
5710 err = bpf_btf_load(&attr, uattr, size);
5711 break;
5712 case BPF_BTF_GET_FD_BY_ID:
5713 err = bpf_btf_get_fd_by_id(&attr);
5714 break;
5715 case BPF_TASK_FD_QUERY:
5716 err = bpf_task_fd_query(&attr, uattr.user);
5717 break;
5718 case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
5719 err = map_lookup_and_delete_elem(&attr);
5720 break;
5721 case BPF_MAP_LOOKUP_BATCH:
5722 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH);
5723 break;
5724 case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
5725 err = bpf_map_do_batch(&attr, uattr.user,
5726 BPF_MAP_LOOKUP_AND_DELETE_BATCH);
5727 break;
5728 case BPF_MAP_UPDATE_BATCH:
5729 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH);
5730 break;
5731 case BPF_MAP_DELETE_BATCH:
5732 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
5733 break;
5734 case BPF_LINK_CREATE:
5735 err = link_create(&attr, uattr);
5736 break;
5737 case BPF_LINK_UPDATE:
5738 err = link_update(&attr);
5739 break;
5740 case BPF_LINK_GET_FD_BY_ID:
5741 err = bpf_link_get_fd_by_id(&attr);
5742 break;
5743 case BPF_LINK_GET_NEXT_ID:
5744 err = bpf_obj_get_next_id(&attr, uattr.user,
5745 &link_idr, &link_idr_lock);
5746 break;
5747 case BPF_ENABLE_STATS:
5748 err = bpf_enable_stats(&attr);
5749 break;
5750 case BPF_ITER_CREATE:
5751 err = bpf_iter_create(&attr);
5752 break;
5753 case BPF_LINK_DETACH:
5754 err = link_detach(&attr);
5755 break;
5756 case BPF_PROG_BIND_MAP:
5757 err = bpf_prog_bind_map(&attr);
5758 break;
5759 case BPF_TOKEN_CREATE:
5760 err = token_create(&attr);
5761 break;
5762 default:
5763 err = -EINVAL;
5764 break;
5765 }
5766
5767 return err;
5768}
5769
5770SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
5771{
5772 return __sys_bpf(cmd, USER_BPFPTR(uattr), size);
5773}
5774
5775static bool syscall_prog_is_valid_access(int off, int size,
5776 enum bpf_access_type type,
5777 const struct bpf_prog *prog,
5778 struct bpf_insn_access_aux *info)
5779{
5780 if (off < 0 || off >= U16_MAX)
5781 return false;
5782 if (off % size != 0)
5783 return false;
5784 return true;
5785}
5786
5787BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size)
5788{
5789 switch (cmd) {
5790 case BPF_MAP_CREATE:
5791 case BPF_MAP_DELETE_ELEM:
5792 case BPF_MAP_UPDATE_ELEM:
5793 case BPF_MAP_FREEZE:
5794 case BPF_MAP_GET_FD_BY_ID:
5795 case BPF_PROG_LOAD:
5796 case BPF_BTF_LOAD:
5797 case BPF_LINK_CREATE:
5798 case BPF_RAW_TRACEPOINT_OPEN:
5799 break;
5800 default:
5801 return -EINVAL;
5802 }
5803 return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
5804}
5805
5806
5807/* To shut up -Wmissing-prototypes.
5808 * This function is used by the kernel light skeleton
5809 * to load bpf programs when modules are loaded or during kernel boot.
5810 * See tools/lib/bpf/skel_internal.h
5811 */
5812int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
5813
5814int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
5815{
5816 struct bpf_prog * __maybe_unused prog;
5817 struct bpf_tramp_run_ctx __maybe_unused run_ctx;
5818
5819 switch (cmd) {
5820#ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */
5821 case BPF_PROG_TEST_RUN:
5822 if (attr->test.data_in || attr->test.data_out ||
5823 attr->test.ctx_out || attr->test.duration ||
5824 attr->test.repeat || attr->test.flags)
5825 return -EINVAL;
5826
5827 prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL);
5828 if (IS_ERR(prog))
5829 return PTR_ERR(prog);
5830
5831 if (attr->test.ctx_size_in < prog->aux->max_ctx_offset ||
5832 attr->test.ctx_size_in > U16_MAX) {
5833 bpf_prog_put(prog);
5834 return -EINVAL;
5835 }
5836
5837 run_ctx.bpf_cookie = 0;
5838 if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
5839 /* recursion detected */
5840 __bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx);
5841 bpf_prog_put(prog);
5842 return -EBUSY;
5843 }
5844 attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in);
5845 __bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */,
5846 &run_ctx);
5847 bpf_prog_put(prog);
5848 return 0;
5849#endif
5850 default:
5851 return ____bpf_sys_bpf(cmd, attr, size);
5852 }
5853}
5854EXPORT_SYMBOL(kern_sys_bpf);
5855
5856static const struct bpf_func_proto bpf_sys_bpf_proto = {
5857 .func = bpf_sys_bpf,
5858 .gpl_only = false,
5859 .ret_type = RET_INTEGER,
5860 .arg1_type = ARG_ANYTHING,
5861 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
5862 .arg3_type = ARG_CONST_SIZE,
5863};
5864
5865const struct bpf_func_proto * __weak
5866tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5867{
5868 return bpf_base_func_proto(func_id, prog);
5869}
5870
5871BPF_CALL_1(bpf_sys_close, u32, fd)
5872{
5873 /* When bpf program calls this helper there should not be
5874 * an fdget() without matching completed fdput().
5875 * This helper is allowed in the following callchain only:
5876 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close
5877 */
5878 return close_fd(fd);
5879}
5880
5881static const struct bpf_func_proto bpf_sys_close_proto = {
5882 .func = bpf_sys_close,
5883 .gpl_only = false,
5884 .ret_type = RET_INTEGER,
5885 .arg1_type = ARG_ANYTHING,
5886};
5887
5888BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res)
5889{
5890 if (flags)
5891 return -EINVAL;
5892
5893 if (name_sz <= 1 || name[name_sz - 1])
5894 return -EINVAL;
5895
5896 if (!bpf_dump_raw_ok(current_cred()))
5897 return -EPERM;
5898
5899 *res = kallsyms_lookup_name(name);
5900 return *res ? 0 : -ENOENT;
5901}
5902
5903static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
5904 .func = bpf_kallsyms_lookup_name,
5905 .gpl_only = false,
5906 .ret_type = RET_INTEGER,
5907 .arg1_type = ARG_PTR_TO_MEM,
5908 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
5909 .arg3_type = ARG_ANYTHING,
5910 .arg4_type = ARG_PTR_TO_LONG,
5911};
5912
5913static const struct bpf_func_proto *
5914syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5915{
5916 switch (func_id) {
5917 case BPF_FUNC_sys_bpf:
5918 return !bpf_token_capable(prog->aux->token, CAP_PERFMON)
5919 ? NULL : &bpf_sys_bpf_proto;
5920 case BPF_FUNC_btf_find_by_name_kind:
5921 return &bpf_btf_find_by_name_kind_proto;
5922 case BPF_FUNC_sys_close:
5923 return &bpf_sys_close_proto;
5924 case BPF_FUNC_kallsyms_lookup_name:
5925 return &bpf_kallsyms_lookup_name_proto;
5926 default:
5927 return tracing_prog_func_proto(func_id, prog);
5928 }
5929}
5930
5931const struct bpf_verifier_ops bpf_syscall_verifier_ops = {
5932 .get_func_proto = syscall_prog_func_proto,
5933 .is_valid_access = syscall_prog_is_valid_access,
5934};
5935
5936const struct bpf_prog_ops bpf_syscall_prog_ops = {
5937 .test_run = bpf_prog_test_run_syscall,
5938};
5939
5940#ifdef CONFIG_SYSCTL
5941static int bpf_stats_handler(struct ctl_table *table, int write,
5942 void *buffer, size_t *lenp, loff_t *ppos)
5943{
5944 struct static_key *key = (struct static_key *)table->data;
5945 static int saved_val;
5946 int val, ret;
5947 struct ctl_table tmp = {
5948 .data = &val,
5949 .maxlen = sizeof(val),
5950 .mode = table->mode,
5951 .extra1 = SYSCTL_ZERO,
5952 .extra2 = SYSCTL_ONE,
5953 };
5954
5955 if (write && !capable(CAP_SYS_ADMIN))
5956 return -EPERM;
5957
5958 mutex_lock(&bpf_stats_enabled_mutex);
5959 val = saved_val;
5960 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
5961 if (write && !ret && val != saved_val) {
5962 if (val)
5963 static_key_slow_inc(key);
5964 else
5965 static_key_slow_dec(key);
5966 saved_val = val;
5967 }
5968 mutex_unlock(&bpf_stats_enabled_mutex);
5969 return ret;
5970}
5971
5972void __weak unpriv_ebpf_notify(int new_state)
5973{
5974}
5975
5976static int bpf_unpriv_handler(struct ctl_table *table, int write,
5977 void *buffer, size_t *lenp, loff_t *ppos)
5978{
5979 int ret, unpriv_enable = *(int *)table->data;
5980 bool locked_state = unpriv_enable == 1;
5981 struct ctl_table tmp = *table;
5982
5983 if (write && !capable(CAP_SYS_ADMIN))
5984 return -EPERM;
5985
5986 tmp.data = &unpriv_enable;
5987 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
5988 if (write && !ret) {
5989 if (locked_state && unpriv_enable != 1)
5990 return -EPERM;
5991 *(int *)table->data = unpriv_enable;
5992 }
5993
5994 if (write)
5995 unpriv_ebpf_notify(unpriv_enable);
5996
5997 return ret;
5998}
5999
6000static struct ctl_table bpf_syscall_table[] = {
6001 {
6002 .procname = "unprivileged_bpf_disabled",
6003 .data = &sysctl_unprivileged_bpf_disabled,
6004 .maxlen = sizeof(sysctl_unprivileged_bpf_disabled),
6005 .mode = 0644,
6006 .proc_handler = bpf_unpriv_handler,
6007 .extra1 = SYSCTL_ZERO,
6008 .extra2 = SYSCTL_TWO,
6009 },
6010 {
6011 .procname = "bpf_stats_enabled",
6012 .data = &bpf_stats_enabled_key.key,
6013 .mode = 0644,
6014 .proc_handler = bpf_stats_handler,
6015 },
6016 { }
6017};
6018
6019static int __init bpf_syscall_sysctl_init(void)
6020{
6021 register_sysctl_init("kernel", bpf_syscall_table);
6022 return 0;
6023}
6024late_initcall(bpf_syscall_sysctl_init);
6025#endif /* CONFIG_SYSCTL */
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 */
4#include <linux/bpf.h>
5#include <linux/bpf-cgroup.h>
6#include <linux/bpf_trace.h>
7#include <linux/bpf_lirc.h>
8#include <linux/bpf_verifier.h>
9#include <linux/bsearch.h>
10#include <linux/btf.h>
11#include <linux/syscalls.h>
12#include <linux/slab.h>
13#include <linux/sched/signal.h>
14#include <linux/vmalloc.h>
15#include <linux/mmzone.h>
16#include <linux/anon_inodes.h>
17#include <linux/fdtable.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/license.h>
21#include <linux/filter.h>
22#include <linux/kernel.h>
23#include <linux/idr.h>
24#include <linux/cred.h>
25#include <linux/timekeeping.h>
26#include <linux/ctype.h>
27#include <linux/nospec.h>
28#include <linux/audit.h>
29#include <uapi/linux/btf.h>
30#include <linux/pgtable.h>
31#include <linux/bpf_lsm.h>
32#include <linux/poll.h>
33#include <linux/sort.h>
34#include <linux/bpf-netns.h>
35#include <linux/rcupdate_trace.h>
36#include <linux/memcontrol.h>
37#include <linux/trace_events.h>
38
39#include <net/netfilter/nf_bpf_link.h>
40#include <net/netkit.h>
41#include <net/tcx.h>
42
43#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
44 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
45 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
46#define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
47#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
48#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
49 IS_FD_HASH(map))
50
51#define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
52
53DEFINE_PER_CPU(int, bpf_prog_active);
54static DEFINE_IDR(prog_idr);
55static DEFINE_SPINLOCK(prog_idr_lock);
56static DEFINE_IDR(map_idr);
57static DEFINE_SPINLOCK(map_idr_lock);
58static DEFINE_IDR(link_idr);
59static DEFINE_SPINLOCK(link_idr_lock);
60
61int sysctl_unprivileged_bpf_disabled __read_mostly =
62 IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
63
64static const struct bpf_map_ops * const bpf_map_types[] = {
65#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
66#define BPF_MAP_TYPE(_id, _ops) \
67 [_id] = &_ops,
68#define BPF_LINK_TYPE(_id, _name)
69#include <linux/bpf_types.h>
70#undef BPF_PROG_TYPE
71#undef BPF_MAP_TYPE
72#undef BPF_LINK_TYPE
73};
74
75/*
76 * If we're handed a bigger struct than we know of, ensure all the unknown bits
77 * are 0 - i.e. new user-space does not rely on any kernel feature extensions
78 * we don't know about yet.
79 *
80 * There is a ToCToU between this function call and the following
81 * copy_from_user() call. However, this is not a concern since this function is
82 * meant to be a future-proofing of bits.
83 */
84int bpf_check_uarg_tail_zero(bpfptr_t uaddr,
85 size_t expected_size,
86 size_t actual_size)
87{
88 int res;
89
90 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */
91 return -E2BIG;
92
93 if (actual_size <= expected_size)
94 return 0;
95
96 if (uaddr.is_kernel)
97 res = memchr_inv(uaddr.kernel + expected_size, 0,
98 actual_size - expected_size) == NULL;
99 else
100 res = check_zeroed_user(uaddr.user + expected_size,
101 actual_size - expected_size);
102 if (res < 0)
103 return res;
104 return res ? 0 : -E2BIG;
105}
106
107const struct bpf_map_ops bpf_map_offload_ops = {
108 .map_meta_equal = bpf_map_meta_equal,
109 .map_alloc = bpf_map_offload_map_alloc,
110 .map_free = bpf_map_offload_map_free,
111 .map_check_btf = map_check_no_btf,
112 .map_mem_usage = bpf_map_offload_map_mem_usage,
113};
114
115static void bpf_map_write_active_inc(struct bpf_map *map)
116{
117 atomic64_inc(&map->writecnt);
118}
119
120static void bpf_map_write_active_dec(struct bpf_map *map)
121{
122 atomic64_dec(&map->writecnt);
123}
124
125bool bpf_map_write_active(const struct bpf_map *map)
126{
127 return atomic64_read(&map->writecnt) != 0;
128}
129
130static u32 bpf_map_value_size(const struct bpf_map *map)
131{
132 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
133 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
134 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
135 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
136 return round_up(map->value_size, 8) * num_possible_cpus();
137 else if (IS_FD_MAP(map))
138 return sizeof(u32);
139 else
140 return map->value_size;
141}
142
143static void maybe_wait_bpf_programs(struct bpf_map *map)
144{
145 /* Wait for any running non-sleepable BPF programs to complete so that
146 * userspace, when we return to it, knows that all non-sleepable
147 * programs that could be running use the new map value. For sleepable
148 * BPF programs, synchronize_rcu_tasks_trace() should be used to wait
149 * for the completions of these programs, but considering the waiting
150 * time can be very long and userspace may think it will hang forever,
151 * so don't handle sleepable BPF programs now.
152 */
153 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
154 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
155 synchronize_rcu();
156}
157
158static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
159 void *key, void *value, __u64 flags)
160{
161 int err;
162
163 /* Need to create a kthread, thus must support schedule */
164 if (bpf_map_is_offloaded(map)) {
165 return bpf_map_offload_update_elem(map, key, value, flags);
166 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
167 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
168 return map->ops->map_update_elem(map, key, value, flags);
169 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
170 map->map_type == BPF_MAP_TYPE_SOCKMAP) {
171 return sock_map_update_elem_sys(map, key, value, flags);
172 } else if (IS_FD_PROG_ARRAY(map)) {
173 return bpf_fd_array_map_update_elem(map, map_file, key, value,
174 flags);
175 }
176
177 bpf_disable_instrumentation();
178 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
179 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
180 err = bpf_percpu_hash_update(map, key, value, flags);
181 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
182 err = bpf_percpu_array_update(map, key, value, flags);
183 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
184 err = bpf_percpu_cgroup_storage_update(map, key, value,
185 flags);
186 } else if (IS_FD_ARRAY(map)) {
187 err = bpf_fd_array_map_update_elem(map, map_file, key, value,
188 flags);
189 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
190 err = bpf_fd_htab_map_update_elem(map, map_file, key, value,
191 flags);
192 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
193 /* rcu_read_lock() is not needed */
194 err = bpf_fd_reuseport_array_update_elem(map, key, value,
195 flags);
196 } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
197 map->map_type == BPF_MAP_TYPE_STACK ||
198 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
199 err = map->ops->map_push_elem(map, value, flags);
200 } else {
201 rcu_read_lock();
202 err = map->ops->map_update_elem(map, key, value, flags);
203 rcu_read_unlock();
204 }
205 bpf_enable_instrumentation();
206
207 return err;
208}
209
210static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
211 __u64 flags)
212{
213 void *ptr;
214 int err;
215
216 if (bpf_map_is_offloaded(map))
217 return bpf_map_offload_lookup_elem(map, key, value);
218
219 bpf_disable_instrumentation();
220 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
221 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
222 err = bpf_percpu_hash_copy(map, key, value);
223 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
224 err = bpf_percpu_array_copy(map, key, value);
225 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
226 err = bpf_percpu_cgroup_storage_copy(map, key, value);
227 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
228 err = bpf_stackmap_copy(map, key, value);
229 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
230 err = bpf_fd_array_map_lookup_elem(map, key, value);
231 } else if (IS_FD_HASH(map)) {
232 err = bpf_fd_htab_map_lookup_elem(map, key, value);
233 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
234 err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
235 } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
236 map->map_type == BPF_MAP_TYPE_STACK ||
237 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
238 err = map->ops->map_peek_elem(map, value);
239 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
240 /* struct_ops map requires directly updating "value" */
241 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
242 } else {
243 rcu_read_lock();
244 if (map->ops->map_lookup_elem_sys_only)
245 ptr = map->ops->map_lookup_elem_sys_only(map, key);
246 else
247 ptr = map->ops->map_lookup_elem(map, key);
248 if (IS_ERR(ptr)) {
249 err = PTR_ERR(ptr);
250 } else if (!ptr) {
251 err = -ENOENT;
252 } else {
253 err = 0;
254 if (flags & BPF_F_LOCK)
255 /* lock 'ptr' and copy everything but lock */
256 copy_map_value_locked(map, value, ptr, true);
257 else
258 copy_map_value(map, value, ptr);
259 /* mask lock and timer, since value wasn't zero inited */
260 check_and_init_map_value(map, value);
261 }
262 rcu_read_unlock();
263 }
264
265 bpf_enable_instrumentation();
266
267 return err;
268}
269
270/* Please, do not use this function outside from the map creation path
271 * (e.g. in map update path) without taking care of setting the active
272 * memory cgroup (see at bpf_map_kmalloc_node() for example).
273 */
274static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
275{
276 /* We really just want to fail instead of triggering OOM killer
277 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
278 * which is used for lower order allocation requests.
279 *
280 * It has been observed that higher order allocation requests done by
281 * vmalloc with __GFP_NORETRY being set might fail due to not trying
282 * to reclaim memory from the page cache, thus we set
283 * __GFP_RETRY_MAYFAIL to avoid such situations.
284 */
285
286 gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO);
287 unsigned int flags = 0;
288 unsigned long align = 1;
289 void *area;
290
291 if (size >= SIZE_MAX)
292 return NULL;
293
294 /* kmalloc()'ed memory can't be mmap()'ed */
295 if (mmapable) {
296 BUG_ON(!PAGE_ALIGNED(size));
297 align = SHMLBA;
298 flags = VM_USERMAP;
299 } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
300 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
301 numa_node);
302 if (area != NULL)
303 return area;
304 }
305
306 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
307 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
308 flags, numa_node, __builtin_return_address(0));
309}
310
311void *bpf_map_area_alloc(u64 size, int numa_node)
312{
313 return __bpf_map_area_alloc(size, numa_node, false);
314}
315
316void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
317{
318 return __bpf_map_area_alloc(size, numa_node, true);
319}
320
321void bpf_map_area_free(void *area)
322{
323 kvfree(area);
324}
325
326static u32 bpf_map_flags_retain_permanent(u32 flags)
327{
328 /* Some map creation flags are not tied to the map object but
329 * rather to the map fd instead, so they have no meaning upon
330 * map object inspection since multiple file descriptors with
331 * different (access) properties can exist here. Thus, given
332 * this has zero meaning for the map itself, lets clear these
333 * from here.
334 */
335 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
336}
337
338void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
339{
340 map->map_type = attr->map_type;
341 map->key_size = attr->key_size;
342 map->value_size = attr->value_size;
343 map->max_entries = attr->max_entries;
344 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
345 map->numa_node = bpf_map_attr_numa_node(attr);
346 map->map_extra = attr->map_extra;
347}
348
349static int bpf_map_alloc_id(struct bpf_map *map)
350{
351 int id;
352
353 idr_preload(GFP_KERNEL);
354 spin_lock_bh(&map_idr_lock);
355 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
356 if (id > 0)
357 map->id = id;
358 spin_unlock_bh(&map_idr_lock);
359 idr_preload_end();
360
361 if (WARN_ON_ONCE(!id))
362 return -ENOSPC;
363
364 return id > 0 ? 0 : id;
365}
366
367void bpf_map_free_id(struct bpf_map *map)
368{
369 unsigned long flags;
370
371 /* Offloaded maps are removed from the IDR store when their device
372 * disappears - even if someone holds an fd to them they are unusable,
373 * the memory is gone, all ops will fail; they are simply waiting for
374 * refcnt to drop to be freed.
375 */
376 if (!map->id)
377 return;
378
379 spin_lock_irqsave(&map_idr_lock, flags);
380
381 idr_remove(&map_idr, map->id);
382 map->id = 0;
383
384 spin_unlock_irqrestore(&map_idr_lock, flags);
385}
386
387#ifdef CONFIG_MEMCG_KMEM
388static void bpf_map_save_memcg(struct bpf_map *map)
389{
390 /* Currently if a map is created by a process belonging to the root
391 * memory cgroup, get_obj_cgroup_from_current() will return NULL.
392 * So we have to check map->objcg for being NULL each time it's
393 * being used.
394 */
395 if (memcg_bpf_enabled())
396 map->objcg = get_obj_cgroup_from_current();
397}
398
399static void bpf_map_release_memcg(struct bpf_map *map)
400{
401 if (map->objcg)
402 obj_cgroup_put(map->objcg);
403}
404
405static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map)
406{
407 if (map->objcg)
408 return get_mem_cgroup_from_objcg(map->objcg);
409
410 return root_mem_cgroup;
411}
412
413void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
414 int node)
415{
416 struct mem_cgroup *memcg, *old_memcg;
417 void *ptr;
418
419 memcg = bpf_map_get_memcg(map);
420 old_memcg = set_active_memcg(memcg);
421 ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
422 set_active_memcg(old_memcg);
423 mem_cgroup_put(memcg);
424
425 return ptr;
426}
427
428void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
429{
430 struct mem_cgroup *memcg, *old_memcg;
431 void *ptr;
432
433 memcg = bpf_map_get_memcg(map);
434 old_memcg = set_active_memcg(memcg);
435 ptr = kzalloc(size, flags | __GFP_ACCOUNT);
436 set_active_memcg(old_memcg);
437 mem_cgroup_put(memcg);
438
439 return ptr;
440}
441
442void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
443 gfp_t flags)
444{
445 struct mem_cgroup *memcg, *old_memcg;
446 void *ptr;
447
448 memcg = bpf_map_get_memcg(map);
449 old_memcg = set_active_memcg(memcg);
450 ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT);
451 set_active_memcg(old_memcg);
452 mem_cgroup_put(memcg);
453
454 return ptr;
455}
456
457void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
458 size_t align, gfp_t flags)
459{
460 struct mem_cgroup *memcg, *old_memcg;
461 void __percpu *ptr;
462
463 memcg = bpf_map_get_memcg(map);
464 old_memcg = set_active_memcg(memcg);
465 ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT);
466 set_active_memcg(old_memcg);
467 mem_cgroup_put(memcg);
468
469 return ptr;
470}
471
472#else
473static void bpf_map_save_memcg(struct bpf_map *map)
474{
475}
476
477static void bpf_map_release_memcg(struct bpf_map *map)
478{
479}
480#endif
481
482static int btf_field_cmp(const void *a, const void *b)
483{
484 const struct btf_field *f1 = a, *f2 = b;
485
486 if (f1->offset < f2->offset)
487 return -1;
488 else if (f1->offset > f2->offset)
489 return 1;
490 return 0;
491}
492
493struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset,
494 u32 field_mask)
495{
496 struct btf_field *field;
497
498 if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask))
499 return NULL;
500 field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp);
501 if (!field || !(field->type & field_mask))
502 return NULL;
503 return field;
504}
505
506void btf_record_free(struct btf_record *rec)
507{
508 int i;
509
510 if (IS_ERR_OR_NULL(rec))
511 return;
512 for (i = 0; i < rec->cnt; i++) {
513 switch (rec->fields[i].type) {
514 case BPF_KPTR_UNREF:
515 case BPF_KPTR_REF:
516 case BPF_KPTR_PERCPU:
517 if (rec->fields[i].kptr.module)
518 module_put(rec->fields[i].kptr.module);
519 btf_put(rec->fields[i].kptr.btf);
520 break;
521 case BPF_LIST_HEAD:
522 case BPF_LIST_NODE:
523 case BPF_RB_ROOT:
524 case BPF_RB_NODE:
525 case BPF_SPIN_LOCK:
526 case BPF_TIMER:
527 case BPF_REFCOUNT:
528 /* Nothing to release */
529 break;
530 default:
531 WARN_ON_ONCE(1);
532 continue;
533 }
534 }
535 kfree(rec);
536}
537
538void bpf_map_free_record(struct bpf_map *map)
539{
540 btf_record_free(map->record);
541 map->record = NULL;
542}
543
544struct btf_record *btf_record_dup(const struct btf_record *rec)
545{
546 const struct btf_field *fields;
547 struct btf_record *new_rec;
548 int ret, size, i;
549
550 if (IS_ERR_OR_NULL(rec))
551 return NULL;
552 size = offsetof(struct btf_record, fields[rec->cnt]);
553 new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN);
554 if (!new_rec)
555 return ERR_PTR(-ENOMEM);
556 /* Do a deep copy of the btf_record */
557 fields = rec->fields;
558 new_rec->cnt = 0;
559 for (i = 0; i < rec->cnt; i++) {
560 switch (fields[i].type) {
561 case BPF_KPTR_UNREF:
562 case BPF_KPTR_REF:
563 case BPF_KPTR_PERCPU:
564 btf_get(fields[i].kptr.btf);
565 if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) {
566 ret = -ENXIO;
567 goto free;
568 }
569 break;
570 case BPF_LIST_HEAD:
571 case BPF_LIST_NODE:
572 case BPF_RB_ROOT:
573 case BPF_RB_NODE:
574 case BPF_SPIN_LOCK:
575 case BPF_TIMER:
576 case BPF_REFCOUNT:
577 /* Nothing to acquire */
578 break;
579 default:
580 ret = -EFAULT;
581 WARN_ON_ONCE(1);
582 goto free;
583 }
584 new_rec->cnt++;
585 }
586 return new_rec;
587free:
588 btf_record_free(new_rec);
589 return ERR_PTR(ret);
590}
591
592bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b)
593{
594 bool a_has_fields = !IS_ERR_OR_NULL(rec_a), b_has_fields = !IS_ERR_OR_NULL(rec_b);
595 int size;
596
597 if (!a_has_fields && !b_has_fields)
598 return true;
599 if (a_has_fields != b_has_fields)
600 return false;
601 if (rec_a->cnt != rec_b->cnt)
602 return false;
603 size = offsetof(struct btf_record, fields[rec_a->cnt]);
604 /* btf_parse_fields uses kzalloc to allocate a btf_record, so unused
605 * members are zeroed out. So memcmp is safe to do without worrying
606 * about padding/unused fields.
607 *
608 * While spin_lock, timer, and kptr have no relation to map BTF,
609 * list_head metadata is specific to map BTF, the btf and value_rec
610 * members in particular. btf is the map BTF, while value_rec points to
611 * btf_record in that map BTF.
612 *
613 * So while by default, we don't rely on the map BTF (which the records
614 * were parsed from) matching for both records, which is not backwards
615 * compatible, in case list_head is part of it, we implicitly rely on
616 * that by way of depending on memcmp succeeding for it.
617 */
618 return !memcmp(rec_a, rec_b, size);
619}
620
621void bpf_obj_free_timer(const struct btf_record *rec, void *obj)
622{
623 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_TIMER)))
624 return;
625 bpf_timer_cancel_and_free(obj + rec->timer_off);
626}
627
628void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
629{
630 const struct btf_field *fields;
631 int i;
632
633 if (IS_ERR_OR_NULL(rec))
634 return;
635 fields = rec->fields;
636 for (i = 0; i < rec->cnt; i++) {
637 struct btf_struct_meta *pointee_struct_meta;
638 const struct btf_field *field = &fields[i];
639 void *field_ptr = obj + field->offset;
640 void *xchgd_field;
641
642 switch (fields[i].type) {
643 case BPF_SPIN_LOCK:
644 break;
645 case BPF_TIMER:
646 bpf_timer_cancel_and_free(field_ptr);
647 break;
648 case BPF_KPTR_UNREF:
649 WRITE_ONCE(*(u64 *)field_ptr, 0);
650 break;
651 case BPF_KPTR_REF:
652 case BPF_KPTR_PERCPU:
653 xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
654 if (!xchgd_field)
655 break;
656
657 if (!btf_is_kernel(field->kptr.btf)) {
658 pointee_struct_meta = btf_find_struct_meta(field->kptr.btf,
659 field->kptr.btf_id);
660 migrate_disable();
661 __bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ?
662 pointee_struct_meta->record : NULL,
663 fields[i].type == BPF_KPTR_PERCPU);
664 migrate_enable();
665 } else {
666 field->kptr.dtor(xchgd_field);
667 }
668 break;
669 case BPF_LIST_HEAD:
670 if (WARN_ON_ONCE(rec->spin_lock_off < 0))
671 continue;
672 bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off);
673 break;
674 case BPF_RB_ROOT:
675 if (WARN_ON_ONCE(rec->spin_lock_off < 0))
676 continue;
677 bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off);
678 break;
679 case BPF_LIST_NODE:
680 case BPF_RB_NODE:
681 case BPF_REFCOUNT:
682 break;
683 default:
684 WARN_ON_ONCE(1);
685 continue;
686 }
687 }
688}
689
690/* called from workqueue */
691static void bpf_map_free_deferred(struct work_struct *work)
692{
693 struct bpf_map *map = container_of(work, struct bpf_map, work);
694 struct btf_record *rec = map->record;
695 struct btf *btf = map->btf;
696
697 security_bpf_map_free(map);
698 bpf_map_release_memcg(map);
699 /* implementation dependent freeing */
700 map->ops->map_free(map);
701 /* Delay freeing of btf_record for maps, as map_free
702 * callback usually needs access to them. It is better to do it here
703 * than require each callback to do the free itself manually.
704 *
705 * Note that the btf_record stashed in map->inner_map_meta->record was
706 * already freed using the map_free callback for map in map case which
707 * eventually calls bpf_map_free_meta, since inner_map_meta is only a
708 * template bpf_map struct used during verification.
709 */
710 btf_record_free(rec);
711 /* Delay freeing of btf for maps, as map_free callback may need
712 * struct_meta info which will be freed with btf_put().
713 */
714 btf_put(btf);
715}
716
717static void bpf_map_put_uref(struct bpf_map *map)
718{
719 if (atomic64_dec_and_test(&map->usercnt)) {
720 if (map->ops->map_release_uref)
721 map->ops->map_release_uref(map);
722 }
723}
724
725static void bpf_map_free_in_work(struct bpf_map *map)
726{
727 INIT_WORK(&map->work, bpf_map_free_deferred);
728 /* Avoid spawning kworkers, since they all might contend
729 * for the same mutex like slab_mutex.
730 */
731 queue_work(system_unbound_wq, &map->work);
732}
733
734static void bpf_map_free_rcu_gp(struct rcu_head *rcu)
735{
736 bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu));
737}
738
739static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu)
740{
741 if (rcu_trace_implies_rcu_gp())
742 bpf_map_free_rcu_gp(rcu);
743 else
744 call_rcu(rcu, bpf_map_free_rcu_gp);
745}
746
747/* decrement map refcnt and schedule it for freeing via workqueue
748 * (underlying map implementation ops->map_free() might sleep)
749 */
750void bpf_map_put(struct bpf_map *map)
751{
752 if (atomic64_dec_and_test(&map->refcnt)) {
753 /* bpf_map_free_id() must be called first */
754 bpf_map_free_id(map);
755
756 WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt));
757 if (READ_ONCE(map->free_after_mult_rcu_gp))
758 call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp);
759 else if (READ_ONCE(map->free_after_rcu_gp))
760 call_rcu(&map->rcu, bpf_map_free_rcu_gp);
761 else
762 bpf_map_free_in_work(map);
763 }
764}
765EXPORT_SYMBOL_GPL(bpf_map_put);
766
767void bpf_map_put_with_uref(struct bpf_map *map)
768{
769 bpf_map_put_uref(map);
770 bpf_map_put(map);
771}
772
773static int bpf_map_release(struct inode *inode, struct file *filp)
774{
775 struct bpf_map *map = filp->private_data;
776
777 if (map->ops->map_release)
778 map->ops->map_release(map, filp);
779
780 bpf_map_put_with_uref(map);
781 return 0;
782}
783
784static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
785{
786 fmode_t mode = f.file->f_mode;
787
788 /* Our file permissions may have been overridden by global
789 * map permissions facing syscall side.
790 */
791 if (READ_ONCE(map->frozen))
792 mode &= ~FMODE_CAN_WRITE;
793 return mode;
794}
795
796#ifdef CONFIG_PROC_FS
797/* Show the memory usage of a bpf map */
798static u64 bpf_map_memory_usage(const struct bpf_map *map)
799{
800 return map->ops->map_mem_usage(map);
801}
802
803static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
804{
805 struct bpf_map *map = filp->private_data;
806 u32 type = 0, jited = 0;
807
808 if (map_type_contains_progs(map)) {
809 spin_lock(&map->owner.lock);
810 type = map->owner.type;
811 jited = map->owner.jited;
812 spin_unlock(&map->owner.lock);
813 }
814
815 seq_printf(m,
816 "map_type:\t%u\n"
817 "key_size:\t%u\n"
818 "value_size:\t%u\n"
819 "max_entries:\t%u\n"
820 "map_flags:\t%#x\n"
821 "map_extra:\t%#llx\n"
822 "memlock:\t%llu\n"
823 "map_id:\t%u\n"
824 "frozen:\t%u\n",
825 map->map_type,
826 map->key_size,
827 map->value_size,
828 map->max_entries,
829 map->map_flags,
830 (unsigned long long)map->map_extra,
831 bpf_map_memory_usage(map),
832 map->id,
833 READ_ONCE(map->frozen));
834 if (type) {
835 seq_printf(m, "owner_prog_type:\t%u\n", type);
836 seq_printf(m, "owner_jited:\t%u\n", jited);
837 }
838}
839#endif
840
841static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
842 loff_t *ppos)
843{
844 /* We need this handler such that alloc_file() enables
845 * f_mode with FMODE_CAN_READ.
846 */
847 return -EINVAL;
848}
849
850static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
851 size_t siz, loff_t *ppos)
852{
853 /* We need this handler such that alloc_file() enables
854 * f_mode with FMODE_CAN_WRITE.
855 */
856 return -EINVAL;
857}
858
859/* called for any extra memory-mapped regions (except initial) */
860static void bpf_map_mmap_open(struct vm_area_struct *vma)
861{
862 struct bpf_map *map = vma->vm_file->private_data;
863
864 if (vma->vm_flags & VM_MAYWRITE)
865 bpf_map_write_active_inc(map);
866}
867
868/* called for all unmapped memory region (including initial) */
869static void bpf_map_mmap_close(struct vm_area_struct *vma)
870{
871 struct bpf_map *map = vma->vm_file->private_data;
872
873 if (vma->vm_flags & VM_MAYWRITE)
874 bpf_map_write_active_dec(map);
875}
876
877static const struct vm_operations_struct bpf_map_default_vmops = {
878 .open = bpf_map_mmap_open,
879 .close = bpf_map_mmap_close,
880};
881
882static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
883{
884 struct bpf_map *map = filp->private_data;
885 int err;
886
887 if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record))
888 return -ENOTSUPP;
889
890 if (!(vma->vm_flags & VM_SHARED))
891 return -EINVAL;
892
893 mutex_lock(&map->freeze_mutex);
894
895 if (vma->vm_flags & VM_WRITE) {
896 if (map->frozen) {
897 err = -EPERM;
898 goto out;
899 }
900 /* map is meant to be read-only, so do not allow mapping as
901 * writable, because it's possible to leak a writable page
902 * reference and allows user-space to still modify it after
903 * freezing, while verifier will assume contents do not change
904 */
905 if (map->map_flags & BPF_F_RDONLY_PROG) {
906 err = -EACCES;
907 goto out;
908 }
909 }
910
911 /* set default open/close callbacks */
912 vma->vm_ops = &bpf_map_default_vmops;
913 vma->vm_private_data = map;
914 vm_flags_clear(vma, VM_MAYEXEC);
915 if (!(vma->vm_flags & VM_WRITE))
916 /* disallow re-mapping with PROT_WRITE */
917 vm_flags_clear(vma, VM_MAYWRITE);
918
919 err = map->ops->map_mmap(map, vma);
920 if (err)
921 goto out;
922
923 if (vma->vm_flags & VM_MAYWRITE)
924 bpf_map_write_active_inc(map);
925out:
926 mutex_unlock(&map->freeze_mutex);
927 return err;
928}
929
930static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
931{
932 struct bpf_map *map = filp->private_data;
933
934 if (map->ops->map_poll)
935 return map->ops->map_poll(map, filp, pts);
936
937 return EPOLLERR;
938}
939
940const struct file_operations bpf_map_fops = {
941#ifdef CONFIG_PROC_FS
942 .show_fdinfo = bpf_map_show_fdinfo,
943#endif
944 .release = bpf_map_release,
945 .read = bpf_dummy_read,
946 .write = bpf_dummy_write,
947 .mmap = bpf_map_mmap,
948 .poll = bpf_map_poll,
949};
950
951int bpf_map_new_fd(struct bpf_map *map, int flags)
952{
953 int ret;
954
955 ret = security_bpf_map(map, OPEN_FMODE(flags));
956 if (ret < 0)
957 return ret;
958
959 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
960 flags | O_CLOEXEC);
961}
962
963int bpf_get_file_flag(int flags)
964{
965 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
966 return -EINVAL;
967 if (flags & BPF_F_RDONLY)
968 return O_RDONLY;
969 if (flags & BPF_F_WRONLY)
970 return O_WRONLY;
971 return O_RDWR;
972}
973
974/* helper macro to check that unused fields 'union bpf_attr' are zero */
975#define CHECK_ATTR(CMD) \
976 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
977 sizeof(attr->CMD##_LAST_FIELD), 0, \
978 sizeof(*attr) - \
979 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
980 sizeof(attr->CMD##_LAST_FIELD)) != NULL
981
982/* dst and src must have at least "size" number of bytes.
983 * Return strlen on success and < 0 on error.
984 */
985int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
986{
987 const char *end = src + size;
988 const char *orig_src = src;
989
990 memset(dst, 0, size);
991 /* Copy all isalnum(), '_' and '.' chars. */
992 while (src < end && *src) {
993 if (!isalnum(*src) &&
994 *src != '_' && *src != '.')
995 return -EINVAL;
996 *dst++ = *src++;
997 }
998
999 /* No '\0' found in "size" number of bytes */
1000 if (src == end)
1001 return -EINVAL;
1002
1003 return src - orig_src;
1004}
1005
1006int map_check_no_btf(const struct bpf_map *map,
1007 const struct btf *btf,
1008 const struct btf_type *key_type,
1009 const struct btf_type *value_type)
1010{
1011 return -ENOTSUPP;
1012}
1013
1014static int map_check_btf(struct bpf_map *map, const struct btf *btf,
1015 u32 btf_key_id, u32 btf_value_id)
1016{
1017 const struct btf_type *key_type, *value_type;
1018 u32 key_size, value_size;
1019 int ret = 0;
1020
1021 /* Some maps allow key to be unspecified. */
1022 if (btf_key_id) {
1023 key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
1024 if (!key_type || key_size != map->key_size)
1025 return -EINVAL;
1026 } else {
1027 key_type = btf_type_by_id(btf, 0);
1028 if (!map->ops->map_check_btf)
1029 return -EINVAL;
1030 }
1031
1032 value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
1033 if (!value_type || value_size != map->value_size)
1034 return -EINVAL;
1035
1036 map->record = btf_parse_fields(btf, value_type,
1037 BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD |
1038 BPF_RB_ROOT | BPF_REFCOUNT,
1039 map->value_size);
1040 if (!IS_ERR_OR_NULL(map->record)) {
1041 int i;
1042
1043 if (!bpf_capable()) {
1044 ret = -EPERM;
1045 goto free_map_tab;
1046 }
1047 if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) {
1048 ret = -EACCES;
1049 goto free_map_tab;
1050 }
1051 for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) {
1052 switch (map->record->field_mask & (1 << i)) {
1053 case 0:
1054 continue;
1055 case BPF_SPIN_LOCK:
1056 if (map->map_type != BPF_MAP_TYPE_HASH &&
1057 map->map_type != BPF_MAP_TYPE_ARRAY &&
1058 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
1059 map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1060 map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1061 map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1062 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1063 ret = -EOPNOTSUPP;
1064 goto free_map_tab;
1065 }
1066 break;
1067 case BPF_TIMER:
1068 if (map->map_type != BPF_MAP_TYPE_HASH &&
1069 map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1070 map->map_type != BPF_MAP_TYPE_ARRAY) {
1071 ret = -EOPNOTSUPP;
1072 goto free_map_tab;
1073 }
1074 break;
1075 case BPF_KPTR_UNREF:
1076 case BPF_KPTR_REF:
1077 case BPF_KPTR_PERCPU:
1078 case BPF_REFCOUNT:
1079 if (map->map_type != BPF_MAP_TYPE_HASH &&
1080 map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
1081 map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1082 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH &&
1083 map->map_type != BPF_MAP_TYPE_ARRAY &&
1084 map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
1085 map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1086 map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1087 map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1088 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1089 ret = -EOPNOTSUPP;
1090 goto free_map_tab;
1091 }
1092 break;
1093 case BPF_LIST_HEAD:
1094 case BPF_RB_ROOT:
1095 if (map->map_type != BPF_MAP_TYPE_HASH &&
1096 map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1097 map->map_type != BPF_MAP_TYPE_ARRAY) {
1098 ret = -EOPNOTSUPP;
1099 goto free_map_tab;
1100 }
1101 break;
1102 default:
1103 /* Fail if map_type checks are missing for a field type */
1104 ret = -EOPNOTSUPP;
1105 goto free_map_tab;
1106 }
1107 }
1108 }
1109
1110 ret = btf_check_and_fixup_fields(btf, map->record);
1111 if (ret < 0)
1112 goto free_map_tab;
1113
1114 if (map->ops->map_check_btf) {
1115 ret = map->ops->map_check_btf(map, btf, key_type, value_type);
1116 if (ret < 0)
1117 goto free_map_tab;
1118 }
1119
1120 return ret;
1121free_map_tab:
1122 bpf_map_free_record(map);
1123 return ret;
1124}
1125
1126#define BPF_MAP_CREATE_LAST_FIELD map_extra
1127/* called via syscall */
1128static int map_create(union bpf_attr *attr)
1129{
1130 const struct bpf_map_ops *ops;
1131 int numa_node = bpf_map_attr_numa_node(attr);
1132 u32 map_type = attr->map_type;
1133 struct bpf_map *map;
1134 int f_flags;
1135 int err;
1136
1137 err = CHECK_ATTR(BPF_MAP_CREATE);
1138 if (err)
1139 return -EINVAL;
1140
1141 if (attr->btf_vmlinux_value_type_id) {
1142 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
1143 attr->btf_key_type_id || attr->btf_value_type_id)
1144 return -EINVAL;
1145 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
1146 return -EINVAL;
1147 }
1148
1149 if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER &&
1150 attr->map_extra != 0)
1151 return -EINVAL;
1152
1153 f_flags = bpf_get_file_flag(attr->map_flags);
1154 if (f_flags < 0)
1155 return f_flags;
1156
1157 if (numa_node != NUMA_NO_NODE &&
1158 ((unsigned int)numa_node >= nr_node_ids ||
1159 !node_online(numa_node)))
1160 return -EINVAL;
1161
1162 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
1163 map_type = attr->map_type;
1164 if (map_type >= ARRAY_SIZE(bpf_map_types))
1165 return -EINVAL;
1166 map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types));
1167 ops = bpf_map_types[map_type];
1168 if (!ops)
1169 return -EINVAL;
1170
1171 if (ops->map_alloc_check) {
1172 err = ops->map_alloc_check(attr);
1173 if (err)
1174 return err;
1175 }
1176 if (attr->map_ifindex)
1177 ops = &bpf_map_offload_ops;
1178 if (!ops->map_mem_usage)
1179 return -EINVAL;
1180
1181 /* Intent here is for unprivileged_bpf_disabled to block BPF map
1182 * creation for unprivileged users; other actions depend
1183 * on fd availability and access to bpffs, so are dependent on
1184 * object creation success. Even with unprivileged BPF disabled,
1185 * capability checks are still carried out.
1186 */
1187 if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
1188 return -EPERM;
1189
1190 /* check privileged map type permissions */
1191 switch (map_type) {
1192 case BPF_MAP_TYPE_ARRAY:
1193 case BPF_MAP_TYPE_PERCPU_ARRAY:
1194 case BPF_MAP_TYPE_PROG_ARRAY:
1195 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1196 case BPF_MAP_TYPE_CGROUP_ARRAY:
1197 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
1198 case BPF_MAP_TYPE_HASH:
1199 case BPF_MAP_TYPE_PERCPU_HASH:
1200 case BPF_MAP_TYPE_HASH_OF_MAPS:
1201 case BPF_MAP_TYPE_RINGBUF:
1202 case BPF_MAP_TYPE_USER_RINGBUF:
1203 case BPF_MAP_TYPE_CGROUP_STORAGE:
1204 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
1205 /* unprivileged */
1206 break;
1207 case BPF_MAP_TYPE_SK_STORAGE:
1208 case BPF_MAP_TYPE_INODE_STORAGE:
1209 case BPF_MAP_TYPE_TASK_STORAGE:
1210 case BPF_MAP_TYPE_CGRP_STORAGE:
1211 case BPF_MAP_TYPE_BLOOM_FILTER:
1212 case BPF_MAP_TYPE_LPM_TRIE:
1213 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
1214 case BPF_MAP_TYPE_STACK_TRACE:
1215 case BPF_MAP_TYPE_QUEUE:
1216 case BPF_MAP_TYPE_STACK:
1217 case BPF_MAP_TYPE_LRU_HASH:
1218 case BPF_MAP_TYPE_LRU_PERCPU_HASH:
1219 case BPF_MAP_TYPE_STRUCT_OPS:
1220 case BPF_MAP_TYPE_CPUMAP:
1221 if (!bpf_capable())
1222 return -EPERM;
1223 break;
1224 case BPF_MAP_TYPE_SOCKMAP:
1225 case BPF_MAP_TYPE_SOCKHASH:
1226 case BPF_MAP_TYPE_DEVMAP:
1227 case BPF_MAP_TYPE_DEVMAP_HASH:
1228 case BPF_MAP_TYPE_XSKMAP:
1229 if (!capable(CAP_NET_ADMIN))
1230 return -EPERM;
1231 break;
1232 default:
1233 WARN(1, "unsupported map type %d", map_type);
1234 return -EPERM;
1235 }
1236
1237 map = ops->map_alloc(attr);
1238 if (IS_ERR(map))
1239 return PTR_ERR(map);
1240 map->ops = ops;
1241 map->map_type = map_type;
1242
1243 err = bpf_obj_name_cpy(map->name, attr->map_name,
1244 sizeof(attr->map_name));
1245 if (err < 0)
1246 goto free_map;
1247
1248 atomic64_set(&map->refcnt, 1);
1249 atomic64_set(&map->usercnt, 1);
1250 mutex_init(&map->freeze_mutex);
1251 spin_lock_init(&map->owner.lock);
1252
1253 if (attr->btf_key_type_id || attr->btf_value_type_id ||
1254 /* Even the map's value is a kernel's struct,
1255 * the bpf_prog.o must have BTF to begin with
1256 * to figure out the corresponding kernel's
1257 * counter part. Thus, attr->btf_fd has
1258 * to be valid also.
1259 */
1260 attr->btf_vmlinux_value_type_id) {
1261 struct btf *btf;
1262
1263 btf = btf_get_by_fd(attr->btf_fd);
1264 if (IS_ERR(btf)) {
1265 err = PTR_ERR(btf);
1266 goto free_map;
1267 }
1268 if (btf_is_kernel(btf)) {
1269 btf_put(btf);
1270 err = -EACCES;
1271 goto free_map;
1272 }
1273 map->btf = btf;
1274
1275 if (attr->btf_value_type_id) {
1276 err = map_check_btf(map, btf, attr->btf_key_type_id,
1277 attr->btf_value_type_id);
1278 if (err)
1279 goto free_map;
1280 }
1281
1282 map->btf_key_type_id = attr->btf_key_type_id;
1283 map->btf_value_type_id = attr->btf_value_type_id;
1284 map->btf_vmlinux_value_type_id =
1285 attr->btf_vmlinux_value_type_id;
1286 }
1287
1288 err = security_bpf_map_alloc(map);
1289 if (err)
1290 goto free_map;
1291
1292 err = bpf_map_alloc_id(map);
1293 if (err)
1294 goto free_map_sec;
1295
1296 bpf_map_save_memcg(map);
1297
1298 err = bpf_map_new_fd(map, f_flags);
1299 if (err < 0) {
1300 /* failed to allocate fd.
1301 * bpf_map_put_with_uref() is needed because the above
1302 * bpf_map_alloc_id() has published the map
1303 * to the userspace and the userspace may
1304 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
1305 */
1306 bpf_map_put_with_uref(map);
1307 return err;
1308 }
1309
1310 return err;
1311
1312free_map_sec:
1313 security_bpf_map_free(map);
1314free_map:
1315 btf_put(map->btf);
1316 map->ops->map_free(map);
1317 return err;
1318}
1319
1320/* if error is returned, fd is released.
1321 * On success caller should complete fd access with matching fdput()
1322 */
1323struct bpf_map *__bpf_map_get(struct fd f)
1324{
1325 if (!f.file)
1326 return ERR_PTR(-EBADF);
1327 if (f.file->f_op != &bpf_map_fops) {
1328 fdput(f);
1329 return ERR_PTR(-EINVAL);
1330 }
1331
1332 return f.file->private_data;
1333}
1334
1335void bpf_map_inc(struct bpf_map *map)
1336{
1337 atomic64_inc(&map->refcnt);
1338}
1339EXPORT_SYMBOL_GPL(bpf_map_inc);
1340
1341void bpf_map_inc_with_uref(struct bpf_map *map)
1342{
1343 atomic64_inc(&map->refcnt);
1344 atomic64_inc(&map->usercnt);
1345}
1346EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
1347
1348struct bpf_map *bpf_map_get(u32 ufd)
1349{
1350 struct fd f = fdget(ufd);
1351 struct bpf_map *map;
1352
1353 map = __bpf_map_get(f);
1354 if (IS_ERR(map))
1355 return map;
1356
1357 bpf_map_inc(map);
1358 fdput(f);
1359
1360 return map;
1361}
1362EXPORT_SYMBOL(bpf_map_get);
1363
1364struct bpf_map *bpf_map_get_with_uref(u32 ufd)
1365{
1366 struct fd f = fdget(ufd);
1367 struct bpf_map *map;
1368
1369 map = __bpf_map_get(f);
1370 if (IS_ERR(map))
1371 return map;
1372
1373 bpf_map_inc_with_uref(map);
1374 fdput(f);
1375
1376 return map;
1377}
1378
1379/* map_idr_lock should have been held or the map should have been
1380 * protected by rcu read lock.
1381 */
1382struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
1383{
1384 int refold;
1385
1386 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
1387 if (!refold)
1388 return ERR_PTR(-ENOENT);
1389 if (uref)
1390 atomic64_inc(&map->usercnt);
1391
1392 return map;
1393}
1394
1395struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
1396{
1397 spin_lock_bh(&map_idr_lock);
1398 map = __bpf_map_inc_not_zero(map, false);
1399 spin_unlock_bh(&map_idr_lock);
1400
1401 return map;
1402}
1403EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
1404
1405int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
1406{
1407 return -ENOTSUPP;
1408}
1409
1410static void *__bpf_copy_key(void __user *ukey, u64 key_size)
1411{
1412 if (key_size)
1413 return vmemdup_user(ukey, key_size);
1414
1415 if (ukey)
1416 return ERR_PTR(-EINVAL);
1417
1418 return NULL;
1419}
1420
1421static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size)
1422{
1423 if (key_size)
1424 return kvmemdup_bpfptr(ukey, key_size);
1425
1426 if (!bpfptr_is_null(ukey))
1427 return ERR_PTR(-EINVAL);
1428
1429 return NULL;
1430}
1431
1432/* last field in 'union bpf_attr' used by this command */
1433#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
1434
1435static int map_lookup_elem(union bpf_attr *attr)
1436{
1437 void __user *ukey = u64_to_user_ptr(attr->key);
1438 void __user *uvalue = u64_to_user_ptr(attr->value);
1439 int ufd = attr->map_fd;
1440 struct bpf_map *map;
1441 void *key, *value;
1442 u32 value_size;
1443 struct fd f;
1444 int err;
1445
1446 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1447 return -EINVAL;
1448
1449 if (attr->flags & ~BPF_F_LOCK)
1450 return -EINVAL;
1451
1452 f = fdget(ufd);
1453 map = __bpf_map_get(f);
1454 if (IS_ERR(map))
1455 return PTR_ERR(map);
1456 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1457 err = -EPERM;
1458 goto err_put;
1459 }
1460
1461 if ((attr->flags & BPF_F_LOCK) &&
1462 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1463 err = -EINVAL;
1464 goto err_put;
1465 }
1466
1467 key = __bpf_copy_key(ukey, map->key_size);
1468 if (IS_ERR(key)) {
1469 err = PTR_ERR(key);
1470 goto err_put;
1471 }
1472
1473 value_size = bpf_map_value_size(map);
1474
1475 err = -ENOMEM;
1476 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1477 if (!value)
1478 goto free_key;
1479
1480 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
1481 if (copy_from_user(value, uvalue, value_size))
1482 err = -EFAULT;
1483 else
1484 err = bpf_map_copy_value(map, key, value, attr->flags);
1485 goto free_value;
1486 }
1487
1488 err = bpf_map_copy_value(map, key, value, attr->flags);
1489 if (err)
1490 goto free_value;
1491
1492 err = -EFAULT;
1493 if (copy_to_user(uvalue, value, value_size) != 0)
1494 goto free_value;
1495
1496 err = 0;
1497
1498free_value:
1499 kvfree(value);
1500free_key:
1501 kvfree(key);
1502err_put:
1503 fdput(f);
1504 return err;
1505}
1506
1507
1508#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1509
1510static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
1511{
1512 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1513 bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
1514 int ufd = attr->map_fd;
1515 struct bpf_map *map;
1516 void *key, *value;
1517 u32 value_size;
1518 struct fd f;
1519 int err;
1520
1521 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1522 return -EINVAL;
1523
1524 f = fdget(ufd);
1525 map = __bpf_map_get(f);
1526 if (IS_ERR(map))
1527 return PTR_ERR(map);
1528 bpf_map_write_active_inc(map);
1529 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1530 err = -EPERM;
1531 goto err_put;
1532 }
1533
1534 if ((attr->flags & BPF_F_LOCK) &&
1535 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1536 err = -EINVAL;
1537 goto err_put;
1538 }
1539
1540 key = ___bpf_copy_key(ukey, map->key_size);
1541 if (IS_ERR(key)) {
1542 err = PTR_ERR(key);
1543 goto err_put;
1544 }
1545
1546 value_size = bpf_map_value_size(map);
1547 value = kvmemdup_bpfptr(uvalue, value_size);
1548 if (IS_ERR(value)) {
1549 err = PTR_ERR(value);
1550 goto free_key;
1551 }
1552
1553 err = bpf_map_update_value(map, f.file, key, value, attr->flags);
1554 if (!err)
1555 maybe_wait_bpf_programs(map);
1556
1557 kvfree(value);
1558free_key:
1559 kvfree(key);
1560err_put:
1561 bpf_map_write_active_dec(map);
1562 fdput(f);
1563 return err;
1564}
1565
1566#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1567
1568static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr)
1569{
1570 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1571 int ufd = attr->map_fd;
1572 struct bpf_map *map;
1573 struct fd f;
1574 void *key;
1575 int err;
1576
1577 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1578 return -EINVAL;
1579
1580 f = fdget(ufd);
1581 map = __bpf_map_get(f);
1582 if (IS_ERR(map))
1583 return PTR_ERR(map);
1584 bpf_map_write_active_inc(map);
1585 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1586 err = -EPERM;
1587 goto err_put;
1588 }
1589
1590 key = ___bpf_copy_key(ukey, map->key_size);
1591 if (IS_ERR(key)) {
1592 err = PTR_ERR(key);
1593 goto err_put;
1594 }
1595
1596 if (bpf_map_is_offloaded(map)) {
1597 err = bpf_map_offload_delete_elem(map, key);
1598 goto out;
1599 } else if (IS_FD_PROG_ARRAY(map) ||
1600 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1601 /* These maps require sleepable context */
1602 err = map->ops->map_delete_elem(map, key);
1603 goto out;
1604 }
1605
1606 bpf_disable_instrumentation();
1607 rcu_read_lock();
1608 err = map->ops->map_delete_elem(map, key);
1609 rcu_read_unlock();
1610 bpf_enable_instrumentation();
1611 if (!err)
1612 maybe_wait_bpf_programs(map);
1613out:
1614 kvfree(key);
1615err_put:
1616 bpf_map_write_active_dec(map);
1617 fdput(f);
1618 return err;
1619}
1620
1621/* last field in 'union bpf_attr' used by this command */
1622#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1623
1624static int map_get_next_key(union bpf_attr *attr)
1625{
1626 void __user *ukey = u64_to_user_ptr(attr->key);
1627 void __user *unext_key = u64_to_user_ptr(attr->next_key);
1628 int ufd = attr->map_fd;
1629 struct bpf_map *map;
1630 void *key, *next_key;
1631 struct fd f;
1632 int err;
1633
1634 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1635 return -EINVAL;
1636
1637 f = fdget(ufd);
1638 map = __bpf_map_get(f);
1639 if (IS_ERR(map))
1640 return PTR_ERR(map);
1641 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1642 err = -EPERM;
1643 goto err_put;
1644 }
1645
1646 if (ukey) {
1647 key = __bpf_copy_key(ukey, map->key_size);
1648 if (IS_ERR(key)) {
1649 err = PTR_ERR(key);
1650 goto err_put;
1651 }
1652 } else {
1653 key = NULL;
1654 }
1655
1656 err = -ENOMEM;
1657 next_key = kvmalloc(map->key_size, GFP_USER);
1658 if (!next_key)
1659 goto free_key;
1660
1661 if (bpf_map_is_offloaded(map)) {
1662 err = bpf_map_offload_get_next_key(map, key, next_key);
1663 goto out;
1664 }
1665
1666 rcu_read_lock();
1667 err = map->ops->map_get_next_key(map, key, next_key);
1668 rcu_read_unlock();
1669out:
1670 if (err)
1671 goto free_next_key;
1672
1673 err = -EFAULT;
1674 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1675 goto free_next_key;
1676
1677 err = 0;
1678
1679free_next_key:
1680 kvfree(next_key);
1681free_key:
1682 kvfree(key);
1683err_put:
1684 fdput(f);
1685 return err;
1686}
1687
1688int generic_map_delete_batch(struct bpf_map *map,
1689 const union bpf_attr *attr,
1690 union bpf_attr __user *uattr)
1691{
1692 void __user *keys = u64_to_user_ptr(attr->batch.keys);
1693 u32 cp, max_count;
1694 int err = 0;
1695 void *key;
1696
1697 if (attr->batch.elem_flags & ~BPF_F_LOCK)
1698 return -EINVAL;
1699
1700 if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1701 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1702 return -EINVAL;
1703 }
1704
1705 max_count = attr->batch.count;
1706 if (!max_count)
1707 return 0;
1708
1709 if (put_user(0, &uattr->batch.count))
1710 return -EFAULT;
1711
1712 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1713 if (!key)
1714 return -ENOMEM;
1715
1716 for (cp = 0; cp < max_count; cp++) {
1717 err = -EFAULT;
1718 if (copy_from_user(key, keys + cp * map->key_size,
1719 map->key_size))
1720 break;
1721
1722 if (bpf_map_is_offloaded(map)) {
1723 err = bpf_map_offload_delete_elem(map, key);
1724 break;
1725 }
1726
1727 bpf_disable_instrumentation();
1728 rcu_read_lock();
1729 err = map->ops->map_delete_elem(map, key);
1730 rcu_read_unlock();
1731 bpf_enable_instrumentation();
1732 if (err)
1733 break;
1734 cond_resched();
1735 }
1736 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1737 err = -EFAULT;
1738
1739 kvfree(key);
1740
1741 return err;
1742}
1743
1744int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
1745 const union bpf_attr *attr,
1746 union bpf_attr __user *uattr)
1747{
1748 void __user *values = u64_to_user_ptr(attr->batch.values);
1749 void __user *keys = u64_to_user_ptr(attr->batch.keys);
1750 u32 value_size, cp, max_count;
1751 void *key, *value;
1752 int err = 0;
1753
1754 if (attr->batch.elem_flags & ~BPF_F_LOCK)
1755 return -EINVAL;
1756
1757 if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1758 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1759 return -EINVAL;
1760 }
1761
1762 value_size = bpf_map_value_size(map);
1763
1764 max_count = attr->batch.count;
1765 if (!max_count)
1766 return 0;
1767
1768 if (put_user(0, &uattr->batch.count))
1769 return -EFAULT;
1770
1771 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1772 if (!key)
1773 return -ENOMEM;
1774
1775 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1776 if (!value) {
1777 kvfree(key);
1778 return -ENOMEM;
1779 }
1780
1781 for (cp = 0; cp < max_count; cp++) {
1782 err = -EFAULT;
1783 if (copy_from_user(key, keys + cp * map->key_size,
1784 map->key_size) ||
1785 copy_from_user(value, values + cp * value_size, value_size))
1786 break;
1787
1788 err = bpf_map_update_value(map, map_file, key, value,
1789 attr->batch.elem_flags);
1790
1791 if (err)
1792 break;
1793 cond_resched();
1794 }
1795
1796 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1797 err = -EFAULT;
1798
1799 kvfree(value);
1800 kvfree(key);
1801
1802 return err;
1803}
1804
1805#define MAP_LOOKUP_RETRIES 3
1806
1807int generic_map_lookup_batch(struct bpf_map *map,
1808 const union bpf_attr *attr,
1809 union bpf_attr __user *uattr)
1810{
1811 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1812 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1813 void __user *values = u64_to_user_ptr(attr->batch.values);
1814 void __user *keys = u64_to_user_ptr(attr->batch.keys);
1815 void *buf, *buf_prevkey, *prev_key, *key, *value;
1816 int err, retry = MAP_LOOKUP_RETRIES;
1817 u32 value_size, cp, max_count;
1818
1819 if (attr->batch.elem_flags & ~BPF_F_LOCK)
1820 return -EINVAL;
1821
1822 if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1823 !btf_record_has_field(map->record, BPF_SPIN_LOCK))
1824 return -EINVAL;
1825
1826 value_size = bpf_map_value_size(map);
1827
1828 max_count = attr->batch.count;
1829 if (!max_count)
1830 return 0;
1831
1832 if (put_user(0, &uattr->batch.count))
1833 return -EFAULT;
1834
1835 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1836 if (!buf_prevkey)
1837 return -ENOMEM;
1838
1839 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
1840 if (!buf) {
1841 kvfree(buf_prevkey);
1842 return -ENOMEM;
1843 }
1844
1845 err = -EFAULT;
1846 prev_key = NULL;
1847 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
1848 goto free_buf;
1849 key = buf;
1850 value = key + map->key_size;
1851 if (ubatch)
1852 prev_key = buf_prevkey;
1853
1854 for (cp = 0; cp < max_count;) {
1855 rcu_read_lock();
1856 err = map->ops->map_get_next_key(map, prev_key, key);
1857 rcu_read_unlock();
1858 if (err)
1859 break;
1860 err = bpf_map_copy_value(map, key, value,
1861 attr->batch.elem_flags);
1862
1863 if (err == -ENOENT) {
1864 if (retry) {
1865 retry--;
1866 continue;
1867 }
1868 err = -EINTR;
1869 break;
1870 }
1871
1872 if (err)
1873 goto free_buf;
1874
1875 if (copy_to_user(keys + cp * map->key_size, key,
1876 map->key_size)) {
1877 err = -EFAULT;
1878 goto free_buf;
1879 }
1880 if (copy_to_user(values + cp * value_size, value, value_size)) {
1881 err = -EFAULT;
1882 goto free_buf;
1883 }
1884
1885 if (!prev_key)
1886 prev_key = buf_prevkey;
1887
1888 swap(prev_key, key);
1889 retry = MAP_LOOKUP_RETRIES;
1890 cp++;
1891 cond_resched();
1892 }
1893
1894 if (err == -EFAULT)
1895 goto free_buf;
1896
1897 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
1898 (cp && copy_to_user(uobatch, prev_key, map->key_size))))
1899 err = -EFAULT;
1900
1901free_buf:
1902 kvfree(buf_prevkey);
1903 kvfree(buf);
1904 return err;
1905}
1906
1907#define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags
1908
1909static int map_lookup_and_delete_elem(union bpf_attr *attr)
1910{
1911 void __user *ukey = u64_to_user_ptr(attr->key);
1912 void __user *uvalue = u64_to_user_ptr(attr->value);
1913 int ufd = attr->map_fd;
1914 struct bpf_map *map;
1915 void *key, *value;
1916 u32 value_size;
1917 struct fd f;
1918 int err;
1919
1920 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
1921 return -EINVAL;
1922
1923 if (attr->flags & ~BPF_F_LOCK)
1924 return -EINVAL;
1925
1926 f = fdget(ufd);
1927 map = __bpf_map_get(f);
1928 if (IS_ERR(map))
1929 return PTR_ERR(map);
1930 bpf_map_write_active_inc(map);
1931 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
1932 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1933 err = -EPERM;
1934 goto err_put;
1935 }
1936
1937 if (attr->flags &&
1938 (map->map_type == BPF_MAP_TYPE_QUEUE ||
1939 map->map_type == BPF_MAP_TYPE_STACK)) {
1940 err = -EINVAL;
1941 goto err_put;
1942 }
1943
1944 if ((attr->flags & BPF_F_LOCK) &&
1945 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1946 err = -EINVAL;
1947 goto err_put;
1948 }
1949
1950 key = __bpf_copy_key(ukey, map->key_size);
1951 if (IS_ERR(key)) {
1952 err = PTR_ERR(key);
1953 goto err_put;
1954 }
1955
1956 value_size = bpf_map_value_size(map);
1957
1958 err = -ENOMEM;
1959 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1960 if (!value)
1961 goto free_key;
1962
1963 err = -ENOTSUPP;
1964 if (map->map_type == BPF_MAP_TYPE_QUEUE ||
1965 map->map_type == BPF_MAP_TYPE_STACK) {
1966 err = map->ops->map_pop_elem(map, value);
1967 } else if (map->map_type == BPF_MAP_TYPE_HASH ||
1968 map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
1969 map->map_type == BPF_MAP_TYPE_LRU_HASH ||
1970 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
1971 if (!bpf_map_is_offloaded(map)) {
1972 bpf_disable_instrumentation();
1973 rcu_read_lock();
1974 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags);
1975 rcu_read_unlock();
1976 bpf_enable_instrumentation();
1977 }
1978 }
1979
1980 if (err)
1981 goto free_value;
1982
1983 if (copy_to_user(uvalue, value, value_size) != 0) {
1984 err = -EFAULT;
1985 goto free_value;
1986 }
1987
1988 err = 0;
1989
1990free_value:
1991 kvfree(value);
1992free_key:
1993 kvfree(key);
1994err_put:
1995 bpf_map_write_active_dec(map);
1996 fdput(f);
1997 return err;
1998}
1999
2000#define BPF_MAP_FREEZE_LAST_FIELD map_fd
2001
2002static int map_freeze(const union bpf_attr *attr)
2003{
2004 int err = 0, ufd = attr->map_fd;
2005 struct bpf_map *map;
2006 struct fd f;
2007
2008 if (CHECK_ATTR(BPF_MAP_FREEZE))
2009 return -EINVAL;
2010
2011 f = fdget(ufd);
2012 map = __bpf_map_get(f);
2013 if (IS_ERR(map))
2014 return PTR_ERR(map);
2015
2016 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) {
2017 fdput(f);
2018 return -ENOTSUPP;
2019 }
2020
2021 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
2022 fdput(f);
2023 return -EPERM;
2024 }
2025
2026 mutex_lock(&map->freeze_mutex);
2027 if (bpf_map_write_active(map)) {
2028 err = -EBUSY;
2029 goto err_put;
2030 }
2031 if (READ_ONCE(map->frozen)) {
2032 err = -EBUSY;
2033 goto err_put;
2034 }
2035
2036 WRITE_ONCE(map->frozen, true);
2037err_put:
2038 mutex_unlock(&map->freeze_mutex);
2039 fdput(f);
2040 return err;
2041}
2042
2043static const struct bpf_prog_ops * const bpf_prog_types[] = {
2044#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
2045 [_id] = & _name ## _prog_ops,
2046#define BPF_MAP_TYPE(_id, _ops)
2047#define BPF_LINK_TYPE(_id, _name)
2048#include <linux/bpf_types.h>
2049#undef BPF_PROG_TYPE
2050#undef BPF_MAP_TYPE
2051#undef BPF_LINK_TYPE
2052};
2053
2054static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
2055{
2056 const struct bpf_prog_ops *ops;
2057
2058 if (type >= ARRAY_SIZE(bpf_prog_types))
2059 return -EINVAL;
2060 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
2061 ops = bpf_prog_types[type];
2062 if (!ops)
2063 return -EINVAL;
2064
2065 if (!bpf_prog_is_offloaded(prog->aux))
2066 prog->aux->ops = ops;
2067 else
2068 prog->aux->ops = &bpf_offload_prog_ops;
2069 prog->type = type;
2070 return 0;
2071}
2072
2073enum bpf_audit {
2074 BPF_AUDIT_LOAD,
2075 BPF_AUDIT_UNLOAD,
2076 BPF_AUDIT_MAX,
2077};
2078
2079static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
2080 [BPF_AUDIT_LOAD] = "LOAD",
2081 [BPF_AUDIT_UNLOAD] = "UNLOAD",
2082};
2083
2084static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
2085{
2086 struct audit_context *ctx = NULL;
2087 struct audit_buffer *ab;
2088
2089 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
2090 return;
2091 if (audit_enabled == AUDIT_OFF)
2092 return;
2093 if (!in_irq() && !irqs_disabled())
2094 ctx = audit_context();
2095 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
2096 if (unlikely(!ab))
2097 return;
2098 audit_log_format(ab, "prog-id=%u op=%s",
2099 prog->aux->id, bpf_audit_str[op]);
2100 audit_log_end(ab);
2101}
2102
2103static int bpf_prog_alloc_id(struct bpf_prog *prog)
2104{
2105 int id;
2106
2107 idr_preload(GFP_KERNEL);
2108 spin_lock_bh(&prog_idr_lock);
2109 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
2110 if (id > 0)
2111 prog->aux->id = id;
2112 spin_unlock_bh(&prog_idr_lock);
2113 idr_preload_end();
2114
2115 /* id is in [1, INT_MAX) */
2116 if (WARN_ON_ONCE(!id))
2117 return -ENOSPC;
2118
2119 return id > 0 ? 0 : id;
2120}
2121
2122void bpf_prog_free_id(struct bpf_prog *prog)
2123{
2124 unsigned long flags;
2125
2126 /* cBPF to eBPF migrations are currently not in the idr store.
2127 * Offloaded programs are removed from the store when their device
2128 * disappears - even if someone grabs an fd to them they are unusable,
2129 * simply waiting for refcnt to drop to be freed.
2130 */
2131 if (!prog->aux->id)
2132 return;
2133
2134 spin_lock_irqsave(&prog_idr_lock, flags);
2135 idr_remove(&prog_idr, prog->aux->id);
2136 prog->aux->id = 0;
2137 spin_unlock_irqrestore(&prog_idr_lock, flags);
2138}
2139
2140static void __bpf_prog_put_rcu(struct rcu_head *rcu)
2141{
2142 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
2143
2144 kvfree(aux->func_info);
2145 kfree(aux->func_info_aux);
2146 free_uid(aux->user);
2147 security_bpf_prog_free(aux);
2148 bpf_prog_free(aux->prog);
2149}
2150
2151static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
2152{
2153 bpf_prog_kallsyms_del_all(prog);
2154 btf_put(prog->aux->btf);
2155 module_put(prog->aux->mod);
2156 kvfree(prog->aux->jited_linfo);
2157 kvfree(prog->aux->linfo);
2158 kfree(prog->aux->kfunc_tab);
2159 if (prog->aux->attach_btf)
2160 btf_put(prog->aux->attach_btf);
2161
2162 if (deferred) {
2163 if (prog->aux->sleepable)
2164 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
2165 else
2166 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
2167 } else {
2168 __bpf_prog_put_rcu(&prog->aux->rcu);
2169 }
2170}
2171
2172static void bpf_prog_put_deferred(struct work_struct *work)
2173{
2174 struct bpf_prog_aux *aux;
2175 struct bpf_prog *prog;
2176
2177 aux = container_of(work, struct bpf_prog_aux, work);
2178 prog = aux->prog;
2179 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
2180 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
2181 bpf_prog_free_id(prog);
2182 __bpf_prog_put_noref(prog, true);
2183}
2184
2185static void __bpf_prog_put(struct bpf_prog *prog)
2186{
2187 struct bpf_prog_aux *aux = prog->aux;
2188
2189 if (atomic64_dec_and_test(&aux->refcnt)) {
2190 if (in_irq() || irqs_disabled()) {
2191 INIT_WORK(&aux->work, bpf_prog_put_deferred);
2192 schedule_work(&aux->work);
2193 } else {
2194 bpf_prog_put_deferred(&aux->work);
2195 }
2196 }
2197}
2198
2199void bpf_prog_put(struct bpf_prog *prog)
2200{
2201 __bpf_prog_put(prog);
2202}
2203EXPORT_SYMBOL_GPL(bpf_prog_put);
2204
2205static int bpf_prog_release(struct inode *inode, struct file *filp)
2206{
2207 struct bpf_prog *prog = filp->private_data;
2208
2209 bpf_prog_put(prog);
2210 return 0;
2211}
2212
2213struct bpf_prog_kstats {
2214 u64 nsecs;
2215 u64 cnt;
2216 u64 misses;
2217};
2218
2219void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog)
2220{
2221 struct bpf_prog_stats *stats;
2222 unsigned int flags;
2223
2224 stats = this_cpu_ptr(prog->stats);
2225 flags = u64_stats_update_begin_irqsave(&stats->syncp);
2226 u64_stats_inc(&stats->misses);
2227 u64_stats_update_end_irqrestore(&stats->syncp, flags);
2228}
2229
2230static void bpf_prog_get_stats(const struct bpf_prog *prog,
2231 struct bpf_prog_kstats *stats)
2232{
2233 u64 nsecs = 0, cnt = 0, misses = 0;
2234 int cpu;
2235
2236 for_each_possible_cpu(cpu) {
2237 const struct bpf_prog_stats *st;
2238 unsigned int start;
2239 u64 tnsecs, tcnt, tmisses;
2240
2241 st = per_cpu_ptr(prog->stats, cpu);
2242 do {
2243 start = u64_stats_fetch_begin(&st->syncp);
2244 tnsecs = u64_stats_read(&st->nsecs);
2245 tcnt = u64_stats_read(&st->cnt);
2246 tmisses = u64_stats_read(&st->misses);
2247 } while (u64_stats_fetch_retry(&st->syncp, start));
2248 nsecs += tnsecs;
2249 cnt += tcnt;
2250 misses += tmisses;
2251 }
2252 stats->nsecs = nsecs;
2253 stats->cnt = cnt;
2254 stats->misses = misses;
2255}
2256
2257#ifdef CONFIG_PROC_FS
2258static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
2259{
2260 const struct bpf_prog *prog = filp->private_data;
2261 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2262 struct bpf_prog_kstats stats;
2263
2264 bpf_prog_get_stats(prog, &stats);
2265 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2266 seq_printf(m,
2267 "prog_type:\t%u\n"
2268 "prog_jited:\t%u\n"
2269 "prog_tag:\t%s\n"
2270 "memlock:\t%llu\n"
2271 "prog_id:\t%u\n"
2272 "run_time_ns:\t%llu\n"
2273 "run_cnt:\t%llu\n"
2274 "recursion_misses:\t%llu\n"
2275 "verified_insns:\t%u\n",
2276 prog->type,
2277 prog->jited,
2278 prog_tag,
2279 prog->pages * 1ULL << PAGE_SHIFT,
2280 prog->aux->id,
2281 stats.nsecs,
2282 stats.cnt,
2283 stats.misses,
2284 prog->aux->verified_insns);
2285}
2286#endif
2287
2288const struct file_operations bpf_prog_fops = {
2289#ifdef CONFIG_PROC_FS
2290 .show_fdinfo = bpf_prog_show_fdinfo,
2291#endif
2292 .release = bpf_prog_release,
2293 .read = bpf_dummy_read,
2294 .write = bpf_dummy_write,
2295};
2296
2297int bpf_prog_new_fd(struct bpf_prog *prog)
2298{
2299 int ret;
2300
2301 ret = security_bpf_prog(prog);
2302 if (ret < 0)
2303 return ret;
2304
2305 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
2306 O_RDWR | O_CLOEXEC);
2307}
2308
2309static struct bpf_prog *____bpf_prog_get(struct fd f)
2310{
2311 if (!f.file)
2312 return ERR_PTR(-EBADF);
2313 if (f.file->f_op != &bpf_prog_fops) {
2314 fdput(f);
2315 return ERR_PTR(-EINVAL);
2316 }
2317
2318 return f.file->private_data;
2319}
2320
2321void bpf_prog_add(struct bpf_prog *prog, int i)
2322{
2323 atomic64_add(i, &prog->aux->refcnt);
2324}
2325EXPORT_SYMBOL_GPL(bpf_prog_add);
2326
2327void bpf_prog_sub(struct bpf_prog *prog, int i)
2328{
2329 /* Only to be used for undoing previous bpf_prog_add() in some
2330 * error path. We still know that another entity in our call
2331 * path holds a reference to the program, thus atomic_sub() can
2332 * be safely used in such cases!
2333 */
2334 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
2335}
2336EXPORT_SYMBOL_GPL(bpf_prog_sub);
2337
2338void bpf_prog_inc(struct bpf_prog *prog)
2339{
2340 atomic64_inc(&prog->aux->refcnt);
2341}
2342EXPORT_SYMBOL_GPL(bpf_prog_inc);
2343
2344/* prog_idr_lock should have been held */
2345struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
2346{
2347 int refold;
2348
2349 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
2350
2351 if (!refold)
2352 return ERR_PTR(-ENOENT);
2353
2354 return prog;
2355}
2356EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
2357
2358bool bpf_prog_get_ok(struct bpf_prog *prog,
2359 enum bpf_prog_type *attach_type, bool attach_drv)
2360{
2361 /* not an attachment, just a refcount inc, always allow */
2362 if (!attach_type)
2363 return true;
2364
2365 if (prog->type != *attach_type)
2366 return false;
2367 if (bpf_prog_is_offloaded(prog->aux) && !attach_drv)
2368 return false;
2369
2370 return true;
2371}
2372
2373static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
2374 bool attach_drv)
2375{
2376 struct fd f = fdget(ufd);
2377 struct bpf_prog *prog;
2378
2379 prog = ____bpf_prog_get(f);
2380 if (IS_ERR(prog))
2381 return prog;
2382 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
2383 prog = ERR_PTR(-EINVAL);
2384 goto out;
2385 }
2386
2387 bpf_prog_inc(prog);
2388out:
2389 fdput(f);
2390 return prog;
2391}
2392
2393struct bpf_prog *bpf_prog_get(u32 ufd)
2394{
2395 return __bpf_prog_get(ufd, NULL, false);
2396}
2397
2398struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
2399 bool attach_drv)
2400{
2401 return __bpf_prog_get(ufd, &type, attach_drv);
2402}
2403EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
2404
2405/* Initially all BPF programs could be loaded w/o specifying
2406 * expected_attach_type. Later for some of them specifying expected_attach_type
2407 * at load time became required so that program could be validated properly.
2408 * Programs of types that are allowed to be loaded both w/ and w/o (for
2409 * backward compatibility) expected_attach_type, should have the default attach
2410 * type assigned to expected_attach_type for the latter case, so that it can be
2411 * validated later at attach time.
2412 *
2413 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
2414 * prog type requires it but has some attach types that have to be backward
2415 * compatible.
2416 */
2417static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
2418{
2419 switch (attr->prog_type) {
2420 case BPF_PROG_TYPE_CGROUP_SOCK:
2421 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
2422 * exist so checking for non-zero is the way to go here.
2423 */
2424 if (!attr->expected_attach_type)
2425 attr->expected_attach_type =
2426 BPF_CGROUP_INET_SOCK_CREATE;
2427 break;
2428 case BPF_PROG_TYPE_SK_REUSEPORT:
2429 if (!attr->expected_attach_type)
2430 attr->expected_attach_type =
2431 BPF_SK_REUSEPORT_SELECT;
2432 break;
2433 }
2434}
2435
2436static int
2437bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
2438 enum bpf_attach_type expected_attach_type,
2439 struct btf *attach_btf, u32 btf_id,
2440 struct bpf_prog *dst_prog)
2441{
2442 if (btf_id) {
2443 if (btf_id > BTF_MAX_TYPE)
2444 return -EINVAL;
2445
2446 if (!attach_btf && !dst_prog)
2447 return -EINVAL;
2448
2449 switch (prog_type) {
2450 case BPF_PROG_TYPE_TRACING:
2451 case BPF_PROG_TYPE_LSM:
2452 case BPF_PROG_TYPE_STRUCT_OPS:
2453 case BPF_PROG_TYPE_EXT:
2454 break;
2455 default:
2456 return -EINVAL;
2457 }
2458 }
2459
2460 if (attach_btf && (!btf_id || dst_prog))
2461 return -EINVAL;
2462
2463 if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING &&
2464 prog_type != BPF_PROG_TYPE_EXT)
2465 return -EINVAL;
2466
2467 switch (prog_type) {
2468 case BPF_PROG_TYPE_CGROUP_SOCK:
2469 switch (expected_attach_type) {
2470 case BPF_CGROUP_INET_SOCK_CREATE:
2471 case BPF_CGROUP_INET_SOCK_RELEASE:
2472 case BPF_CGROUP_INET4_POST_BIND:
2473 case BPF_CGROUP_INET6_POST_BIND:
2474 return 0;
2475 default:
2476 return -EINVAL;
2477 }
2478 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2479 switch (expected_attach_type) {
2480 case BPF_CGROUP_INET4_BIND:
2481 case BPF_CGROUP_INET6_BIND:
2482 case BPF_CGROUP_INET4_CONNECT:
2483 case BPF_CGROUP_INET6_CONNECT:
2484 case BPF_CGROUP_UNIX_CONNECT:
2485 case BPF_CGROUP_INET4_GETPEERNAME:
2486 case BPF_CGROUP_INET6_GETPEERNAME:
2487 case BPF_CGROUP_UNIX_GETPEERNAME:
2488 case BPF_CGROUP_INET4_GETSOCKNAME:
2489 case BPF_CGROUP_INET6_GETSOCKNAME:
2490 case BPF_CGROUP_UNIX_GETSOCKNAME:
2491 case BPF_CGROUP_UDP4_SENDMSG:
2492 case BPF_CGROUP_UDP6_SENDMSG:
2493 case BPF_CGROUP_UNIX_SENDMSG:
2494 case BPF_CGROUP_UDP4_RECVMSG:
2495 case BPF_CGROUP_UDP6_RECVMSG:
2496 case BPF_CGROUP_UNIX_RECVMSG:
2497 return 0;
2498 default:
2499 return -EINVAL;
2500 }
2501 case BPF_PROG_TYPE_CGROUP_SKB:
2502 switch (expected_attach_type) {
2503 case BPF_CGROUP_INET_INGRESS:
2504 case BPF_CGROUP_INET_EGRESS:
2505 return 0;
2506 default:
2507 return -EINVAL;
2508 }
2509 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2510 switch (expected_attach_type) {
2511 case BPF_CGROUP_SETSOCKOPT:
2512 case BPF_CGROUP_GETSOCKOPT:
2513 return 0;
2514 default:
2515 return -EINVAL;
2516 }
2517 case BPF_PROG_TYPE_SK_LOOKUP:
2518 if (expected_attach_type == BPF_SK_LOOKUP)
2519 return 0;
2520 return -EINVAL;
2521 case BPF_PROG_TYPE_SK_REUSEPORT:
2522 switch (expected_attach_type) {
2523 case BPF_SK_REUSEPORT_SELECT:
2524 case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE:
2525 return 0;
2526 default:
2527 return -EINVAL;
2528 }
2529 case BPF_PROG_TYPE_NETFILTER:
2530 if (expected_attach_type == BPF_NETFILTER)
2531 return 0;
2532 return -EINVAL;
2533 case BPF_PROG_TYPE_SYSCALL:
2534 case BPF_PROG_TYPE_EXT:
2535 if (expected_attach_type)
2536 return -EINVAL;
2537 fallthrough;
2538 default:
2539 return 0;
2540 }
2541}
2542
2543static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2544{
2545 switch (prog_type) {
2546 case BPF_PROG_TYPE_SCHED_CLS:
2547 case BPF_PROG_TYPE_SCHED_ACT:
2548 case BPF_PROG_TYPE_XDP:
2549 case BPF_PROG_TYPE_LWT_IN:
2550 case BPF_PROG_TYPE_LWT_OUT:
2551 case BPF_PROG_TYPE_LWT_XMIT:
2552 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2553 case BPF_PROG_TYPE_SK_SKB:
2554 case BPF_PROG_TYPE_SK_MSG:
2555 case BPF_PROG_TYPE_FLOW_DISSECTOR:
2556 case BPF_PROG_TYPE_CGROUP_DEVICE:
2557 case BPF_PROG_TYPE_CGROUP_SOCK:
2558 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2559 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2560 case BPF_PROG_TYPE_CGROUP_SYSCTL:
2561 case BPF_PROG_TYPE_SOCK_OPS:
2562 case BPF_PROG_TYPE_EXT: /* extends any prog */
2563 case BPF_PROG_TYPE_NETFILTER:
2564 return true;
2565 case BPF_PROG_TYPE_CGROUP_SKB:
2566 /* always unpriv */
2567 case BPF_PROG_TYPE_SK_REUSEPORT:
2568 /* equivalent to SOCKET_FILTER. need CAP_BPF only */
2569 default:
2570 return false;
2571 }
2572}
2573
2574static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2575{
2576 switch (prog_type) {
2577 case BPF_PROG_TYPE_KPROBE:
2578 case BPF_PROG_TYPE_TRACEPOINT:
2579 case BPF_PROG_TYPE_PERF_EVENT:
2580 case BPF_PROG_TYPE_RAW_TRACEPOINT:
2581 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2582 case BPF_PROG_TYPE_TRACING:
2583 case BPF_PROG_TYPE_LSM:
2584 case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2585 case BPF_PROG_TYPE_EXT: /* extends any prog */
2586 return true;
2587 default:
2588 return false;
2589 }
2590}
2591
2592/* last field in 'union bpf_attr' used by this command */
2593#define BPF_PROG_LOAD_LAST_FIELD log_true_size
2594
2595static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
2596{
2597 enum bpf_prog_type type = attr->prog_type;
2598 struct bpf_prog *prog, *dst_prog = NULL;
2599 struct btf *attach_btf = NULL;
2600 int err;
2601 char license[128];
2602
2603 if (CHECK_ATTR(BPF_PROG_LOAD))
2604 return -EINVAL;
2605
2606 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2607 BPF_F_ANY_ALIGNMENT |
2608 BPF_F_TEST_STATE_FREQ |
2609 BPF_F_SLEEPABLE |
2610 BPF_F_TEST_RND_HI32 |
2611 BPF_F_XDP_HAS_FRAGS |
2612 BPF_F_XDP_DEV_BOUND_ONLY |
2613 BPF_F_TEST_REG_INVARIANTS))
2614 return -EINVAL;
2615
2616 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2617 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2618 !bpf_capable())
2619 return -EPERM;
2620
2621 /* Intent here is for unprivileged_bpf_disabled to block BPF program
2622 * creation for unprivileged users; other actions depend
2623 * on fd availability and access to bpffs, so are dependent on
2624 * object creation success. Even with unprivileged BPF disabled,
2625 * capability checks are still carried out for these
2626 * and other operations.
2627 */
2628 if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
2629 return -EPERM;
2630
2631 if (attr->insn_cnt == 0 ||
2632 attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
2633 return -E2BIG;
2634 if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2635 type != BPF_PROG_TYPE_CGROUP_SKB &&
2636 !bpf_capable())
2637 return -EPERM;
2638
2639 if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
2640 return -EPERM;
2641 if (is_perfmon_prog_type(type) && !perfmon_capable())
2642 return -EPERM;
2643
2644 /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
2645 * or btf, we need to check which one it is
2646 */
2647 if (attr->attach_prog_fd) {
2648 dst_prog = bpf_prog_get(attr->attach_prog_fd);
2649 if (IS_ERR(dst_prog)) {
2650 dst_prog = NULL;
2651 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
2652 if (IS_ERR(attach_btf))
2653 return -EINVAL;
2654 if (!btf_is_kernel(attach_btf)) {
2655 /* attaching through specifying bpf_prog's BTF
2656 * objects directly might be supported eventually
2657 */
2658 btf_put(attach_btf);
2659 return -ENOTSUPP;
2660 }
2661 }
2662 } else if (attr->attach_btf_id) {
2663 /* fall back to vmlinux BTF, if BTF type ID is specified */
2664 attach_btf = bpf_get_btf_vmlinux();
2665 if (IS_ERR(attach_btf))
2666 return PTR_ERR(attach_btf);
2667 if (!attach_btf)
2668 return -EINVAL;
2669 btf_get(attach_btf);
2670 }
2671
2672 bpf_prog_load_fixup_attach_type(attr);
2673 if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2674 attach_btf, attr->attach_btf_id,
2675 dst_prog)) {
2676 if (dst_prog)
2677 bpf_prog_put(dst_prog);
2678 if (attach_btf)
2679 btf_put(attach_btf);
2680 return -EINVAL;
2681 }
2682
2683 /* plain bpf_prog allocation */
2684 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2685 if (!prog) {
2686 if (dst_prog)
2687 bpf_prog_put(dst_prog);
2688 if (attach_btf)
2689 btf_put(attach_btf);
2690 return -ENOMEM;
2691 }
2692
2693 prog->expected_attach_type = attr->expected_attach_type;
2694 prog->aux->attach_btf = attach_btf;
2695 prog->aux->attach_btf_id = attr->attach_btf_id;
2696 prog->aux->dst_prog = dst_prog;
2697 prog->aux->dev_bound = !!attr->prog_ifindex;
2698 prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
2699 prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS;
2700
2701 err = security_bpf_prog_alloc(prog->aux);
2702 if (err)
2703 goto free_prog;
2704
2705 prog->aux->user = get_current_user();
2706 prog->len = attr->insn_cnt;
2707
2708 err = -EFAULT;
2709 if (copy_from_bpfptr(prog->insns,
2710 make_bpfptr(attr->insns, uattr.is_kernel),
2711 bpf_prog_insn_size(prog)) != 0)
2712 goto free_prog_sec;
2713 /* copy eBPF program license from user space */
2714 if (strncpy_from_bpfptr(license,
2715 make_bpfptr(attr->license, uattr.is_kernel),
2716 sizeof(license) - 1) < 0)
2717 goto free_prog_sec;
2718 license[sizeof(license) - 1] = 0;
2719
2720 /* eBPF programs must be GPL compatible to use GPL-ed functions */
2721 prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0;
2722
2723 prog->orig_prog = NULL;
2724 prog->jited = 0;
2725
2726 atomic64_set(&prog->aux->refcnt, 1);
2727
2728 if (bpf_prog_is_dev_bound(prog->aux)) {
2729 err = bpf_prog_dev_bound_init(prog, attr);
2730 if (err)
2731 goto free_prog_sec;
2732 }
2733
2734 if (type == BPF_PROG_TYPE_EXT && dst_prog &&
2735 bpf_prog_is_dev_bound(dst_prog->aux)) {
2736 err = bpf_prog_dev_bound_inherit(prog, dst_prog);
2737 if (err)
2738 goto free_prog_sec;
2739 }
2740
2741 /*
2742 * Bookkeeping for managing the program attachment chain.
2743 *
2744 * It might be tempting to set attach_tracing_prog flag at the attachment
2745 * time, but this will not prevent from loading bunch of tracing prog
2746 * first, then attach them one to another.
2747 *
2748 * The flag attach_tracing_prog is set for the whole program lifecycle, and
2749 * doesn't have to be cleared in bpf_tracing_link_release, since tracing
2750 * programs cannot change attachment target.
2751 */
2752 if (type == BPF_PROG_TYPE_TRACING && dst_prog &&
2753 dst_prog->type == BPF_PROG_TYPE_TRACING) {
2754 prog->aux->attach_tracing_prog = true;
2755 }
2756
2757 /* find program type: socket_filter vs tracing_filter */
2758 err = find_prog_type(type, prog);
2759 if (err < 0)
2760 goto free_prog_sec;
2761
2762 prog->aux->load_time = ktime_get_boottime_ns();
2763 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2764 sizeof(attr->prog_name));
2765 if (err < 0)
2766 goto free_prog_sec;
2767
2768 /* run eBPF verifier */
2769 err = bpf_check(&prog, attr, uattr, uattr_size);
2770 if (err < 0)
2771 goto free_used_maps;
2772
2773 prog = bpf_prog_select_runtime(prog, &err);
2774 if (err < 0)
2775 goto free_used_maps;
2776
2777 err = bpf_prog_alloc_id(prog);
2778 if (err)
2779 goto free_used_maps;
2780
2781 /* Upon success of bpf_prog_alloc_id(), the BPF prog is
2782 * effectively publicly exposed. However, retrieving via
2783 * bpf_prog_get_fd_by_id() will take another reference,
2784 * therefore it cannot be gone underneath us.
2785 *
2786 * Only for the time /after/ successful bpf_prog_new_fd()
2787 * and before returning to userspace, we might just hold
2788 * one reference and any parallel close on that fd could
2789 * rip everything out. Hence, below notifications must
2790 * happen before bpf_prog_new_fd().
2791 *
2792 * Also, any failure handling from this point onwards must
2793 * be using bpf_prog_put() given the program is exposed.
2794 */
2795 bpf_prog_kallsyms_add(prog);
2796 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2797 bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2798
2799 err = bpf_prog_new_fd(prog);
2800 if (err < 0)
2801 bpf_prog_put(prog);
2802 return err;
2803
2804free_used_maps:
2805 /* In case we have subprogs, we need to wait for a grace
2806 * period before we can tear down JIT memory since symbols
2807 * are already exposed under kallsyms.
2808 */
2809 __bpf_prog_put_noref(prog, prog->aux->real_func_cnt);
2810 return err;
2811free_prog_sec:
2812 free_uid(prog->aux->user);
2813 security_bpf_prog_free(prog->aux);
2814free_prog:
2815 if (prog->aux->attach_btf)
2816 btf_put(prog->aux->attach_btf);
2817 bpf_prog_free(prog);
2818 return err;
2819}
2820
2821#define BPF_OBJ_LAST_FIELD path_fd
2822
2823static int bpf_obj_pin(const union bpf_attr *attr)
2824{
2825 int path_fd;
2826
2827 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD)
2828 return -EINVAL;
2829
2830 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */
2831 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
2832 return -EINVAL;
2833
2834 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
2835 return bpf_obj_pin_user(attr->bpf_fd, path_fd,
2836 u64_to_user_ptr(attr->pathname));
2837}
2838
2839static int bpf_obj_get(const union bpf_attr *attr)
2840{
2841 int path_fd;
2842
2843 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2844 attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD))
2845 return -EINVAL;
2846
2847 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */
2848 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
2849 return -EINVAL;
2850
2851 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
2852 return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname),
2853 attr->file_flags);
2854}
2855
2856void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2857 const struct bpf_link_ops *ops, struct bpf_prog *prog)
2858{
2859 atomic64_set(&link->refcnt, 1);
2860 link->type = type;
2861 link->id = 0;
2862 link->ops = ops;
2863 link->prog = prog;
2864}
2865
2866static void bpf_link_free_id(int id)
2867{
2868 if (!id)
2869 return;
2870
2871 spin_lock_bh(&link_idr_lock);
2872 idr_remove(&link_idr, id);
2873 spin_unlock_bh(&link_idr_lock);
2874}
2875
2876/* Clean up bpf_link and corresponding anon_inode file and FD. After
2877 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
2878 * anon_inode's release() call. This helper marks bpf_link as
2879 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
2880 * is not decremented, it's the responsibility of a calling code that failed
2881 * to complete bpf_link initialization.
2882 * This helper eventually calls link's dealloc callback, but does not call
2883 * link's release callback.
2884 */
2885void bpf_link_cleanup(struct bpf_link_primer *primer)
2886{
2887 primer->link->prog = NULL;
2888 bpf_link_free_id(primer->id);
2889 fput(primer->file);
2890 put_unused_fd(primer->fd);
2891}
2892
2893void bpf_link_inc(struct bpf_link *link)
2894{
2895 atomic64_inc(&link->refcnt);
2896}
2897
2898/* bpf_link_free is guaranteed to be called from process context */
2899static void bpf_link_free(struct bpf_link *link)
2900{
2901 bpf_link_free_id(link->id);
2902 if (link->prog) {
2903 /* detach BPF program, clean up used resources */
2904 link->ops->release(link);
2905 bpf_prog_put(link->prog);
2906 }
2907 /* free bpf_link and its containing memory */
2908 link->ops->dealloc(link);
2909}
2910
2911static void bpf_link_put_deferred(struct work_struct *work)
2912{
2913 struct bpf_link *link = container_of(work, struct bpf_link, work);
2914
2915 bpf_link_free(link);
2916}
2917
2918/* bpf_link_put might be called from atomic context. It needs to be called
2919 * from sleepable context in order to acquire sleeping locks during the process.
2920 */
2921void bpf_link_put(struct bpf_link *link)
2922{
2923 if (!atomic64_dec_and_test(&link->refcnt))
2924 return;
2925
2926 INIT_WORK(&link->work, bpf_link_put_deferred);
2927 schedule_work(&link->work);
2928}
2929EXPORT_SYMBOL(bpf_link_put);
2930
2931static void bpf_link_put_direct(struct bpf_link *link)
2932{
2933 if (!atomic64_dec_and_test(&link->refcnt))
2934 return;
2935 bpf_link_free(link);
2936}
2937
2938static int bpf_link_release(struct inode *inode, struct file *filp)
2939{
2940 struct bpf_link *link = filp->private_data;
2941
2942 bpf_link_put_direct(link);
2943 return 0;
2944}
2945
2946#ifdef CONFIG_PROC_FS
2947#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
2948#define BPF_MAP_TYPE(_id, _ops)
2949#define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
2950static const char *bpf_link_type_strs[] = {
2951 [BPF_LINK_TYPE_UNSPEC] = "<invalid>",
2952#include <linux/bpf_types.h>
2953};
2954#undef BPF_PROG_TYPE
2955#undef BPF_MAP_TYPE
2956#undef BPF_LINK_TYPE
2957
2958static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
2959{
2960 const struct bpf_link *link = filp->private_data;
2961 const struct bpf_prog *prog = link->prog;
2962 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2963
2964 seq_printf(m,
2965 "link_type:\t%s\n"
2966 "link_id:\t%u\n",
2967 bpf_link_type_strs[link->type],
2968 link->id);
2969 if (prog) {
2970 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2971 seq_printf(m,
2972 "prog_tag:\t%s\n"
2973 "prog_id:\t%u\n",
2974 prog_tag,
2975 prog->aux->id);
2976 }
2977 if (link->ops->show_fdinfo)
2978 link->ops->show_fdinfo(link, m);
2979}
2980#endif
2981
2982static const struct file_operations bpf_link_fops = {
2983#ifdef CONFIG_PROC_FS
2984 .show_fdinfo = bpf_link_show_fdinfo,
2985#endif
2986 .release = bpf_link_release,
2987 .read = bpf_dummy_read,
2988 .write = bpf_dummy_write,
2989};
2990
2991static int bpf_link_alloc_id(struct bpf_link *link)
2992{
2993 int id;
2994
2995 idr_preload(GFP_KERNEL);
2996 spin_lock_bh(&link_idr_lock);
2997 id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
2998 spin_unlock_bh(&link_idr_lock);
2999 idr_preload_end();
3000
3001 return id;
3002}
3003
3004/* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
3005 * reserving unused FD and allocating ID from link_idr. This is to be paired
3006 * with bpf_link_settle() to install FD and ID and expose bpf_link to
3007 * user-space, if bpf_link is successfully attached. If not, bpf_link and
3008 * pre-allocated resources are to be freed with bpf_cleanup() call. All the
3009 * transient state is passed around in struct bpf_link_primer.
3010 * This is preferred way to create and initialize bpf_link, especially when
3011 * there are complicated and expensive operations in between creating bpf_link
3012 * itself and attaching it to BPF hook. By using bpf_link_prime() and
3013 * bpf_link_settle() kernel code using bpf_link doesn't have to perform
3014 * expensive (and potentially failing) roll back operations in a rare case
3015 * that file, FD, or ID can't be allocated.
3016 */
3017int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
3018{
3019 struct file *file;
3020 int fd, id;
3021
3022 fd = get_unused_fd_flags(O_CLOEXEC);
3023 if (fd < 0)
3024 return fd;
3025
3026
3027 id = bpf_link_alloc_id(link);
3028 if (id < 0) {
3029 put_unused_fd(fd);
3030 return id;
3031 }
3032
3033 file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
3034 if (IS_ERR(file)) {
3035 bpf_link_free_id(id);
3036 put_unused_fd(fd);
3037 return PTR_ERR(file);
3038 }
3039
3040 primer->link = link;
3041 primer->file = file;
3042 primer->fd = fd;
3043 primer->id = id;
3044 return 0;
3045}
3046
3047int bpf_link_settle(struct bpf_link_primer *primer)
3048{
3049 /* make bpf_link fetchable by ID */
3050 spin_lock_bh(&link_idr_lock);
3051 primer->link->id = primer->id;
3052 spin_unlock_bh(&link_idr_lock);
3053 /* make bpf_link fetchable by FD */
3054 fd_install(primer->fd, primer->file);
3055 /* pass through installed FD */
3056 return primer->fd;
3057}
3058
3059int bpf_link_new_fd(struct bpf_link *link)
3060{
3061 return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
3062}
3063
3064struct bpf_link *bpf_link_get_from_fd(u32 ufd)
3065{
3066 struct fd f = fdget(ufd);
3067 struct bpf_link *link;
3068
3069 if (!f.file)
3070 return ERR_PTR(-EBADF);
3071 if (f.file->f_op != &bpf_link_fops) {
3072 fdput(f);
3073 return ERR_PTR(-EINVAL);
3074 }
3075
3076 link = f.file->private_data;
3077 bpf_link_inc(link);
3078 fdput(f);
3079
3080 return link;
3081}
3082EXPORT_SYMBOL(bpf_link_get_from_fd);
3083
3084static void bpf_tracing_link_release(struct bpf_link *link)
3085{
3086 struct bpf_tracing_link *tr_link =
3087 container_of(link, struct bpf_tracing_link, link.link);
3088
3089 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link,
3090 tr_link->trampoline));
3091
3092 bpf_trampoline_put(tr_link->trampoline);
3093
3094 /* tgt_prog is NULL if target is a kernel function */
3095 if (tr_link->tgt_prog)
3096 bpf_prog_put(tr_link->tgt_prog);
3097}
3098
3099static void bpf_tracing_link_dealloc(struct bpf_link *link)
3100{
3101 struct bpf_tracing_link *tr_link =
3102 container_of(link, struct bpf_tracing_link, link.link);
3103
3104 kfree(tr_link);
3105}
3106
3107static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
3108 struct seq_file *seq)
3109{
3110 struct bpf_tracing_link *tr_link =
3111 container_of(link, struct bpf_tracing_link, link.link);
3112 u32 target_btf_id, target_obj_id;
3113
3114 bpf_trampoline_unpack_key(tr_link->trampoline->key,
3115 &target_obj_id, &target_btf_id);
3116 seq_printf(seq,
3117 "attach_type:\t%d\n"
3118 "target_obj_id:\t%u\n"
3119 "target_btf_id:\t%u\n",
3120 tr_link->attach_type,
3121 target_obj_id,
3122 target_btf_id);
3123}
3124
3125static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
3126 struct bpf_link_info *info)
3127{
3128 struct bpf_tracing_link *tr_link =
3129 container_of(link, struct bpf_tracing_link, link.link);
3130
3131 info->tracing.attach_type = tr_link->attach_type;
3132 bpf_trampoline_unpack_key(tr_link->trampoline->key,
3133 &info->tracing.target_obj_id,
3134 &info->tracing.target_btf_id);
3135
3136 return 0;
3137}
3138
3139static const struct bpf_link_ops bpf_tracing_link_lops = {
3140 .release = bpf_tracing_link_release,
3141 .dealloc = bpf_tracing_link_dealloc,
3142 .show_fdinfo = bpf_tracing_link_show_fdinfo,
3143 .fill_link_info = bpf_tracing_link_fill_link_info,
3144};
3145
3146static int bpf_tracing_prog_attach(struct bpf_prog *prog,
3147 int tgt_prog_fd,
3148 u32 btf_id,
3149 u64 bpf_cookie)
3150{
3151 struct bpf_link_primer link_primer;
3152 struct bpf_prog *tgt_prog = NULL;
3153 struct bpf_trampoline *tr = NULL;
3154 struct bpf_tracing_link *link;
3155 u64 key = 0;
3156 int err;
3157
3158 switch (prog->type) {
3159 case BPF_PROG_TYPE_TRACING:
3160 if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
3161 prog->expected_attach_type != BPF_TRACE_FEXIT &&
3162 prog->expected_attach_type != BPF_MODIFY_RETURN) {
3163 err = -EINVAL;
3164 goto out_put_prog;
3165 }
3166 break;
3167 case BPF_PROG_TYPE_EXT:
3168 if (prog->expected_attach_type != 0) {
3169 err = -EINVAL;
3170 goto out_put_prog;
3171 }
3172 break;
3173 case BPF_PROG_TYPE_LSM:
3174 if (prog->expected_attach_type != BPF_LSM_MAC) {
3175 err = -EINVAL;
3176 goto out_put_prog;
3177 }
3178 break;
3179 default:
3180 err = -EINVAL;
3181 goto out_put_prog;
3182 }
3183
3184 if (!!tgt_prog_fd != !!btf_id) {
3185 err = -EINVAL;
3186 goto out_put_prog;
3187 }
3188
3189 if (tgt_prog_fd) {
3190 /*
3191 * For now we only allow new targets for BPF_PROG_TYPE_EXT. If this
3192 * part would be changed to implement the same for
3193 * BPF_PROG_TYPE_TRACING, do not forget to update the way how
3194 * attach_tracing_prog flag is set.
3195 */
3196 if (prog->type != BPF_PROG_TYPE_EXT) {
3197 err = -EINVAL;
3198 goto out_put_prog;
3199 }
3200
3201 tgt_prog = bpf_prog_get(tgt_prog_fd);
3202 if (IS_ERR(tgt_prog)) {
3203 err = PTR_ERR(tgt_prog);
3204 tgt_prog = NULL;
3205 goto out_put_prog;
3206 }
3207
3208 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id);
3209 }
3210
3211 link = kzalloc(sizeof(*link), GFP_USER);
3212 if (!link) {
3213 err = -ENOMEM;
3214 goto out_put_prog;
3215 }
3216 bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING,
3217 &bpf_tracing_link_lops, prog);
3218 link->attach_type = prog->expected_attach_type;
3219 link->link.cookie = bpf_cookie;
3220
3221 mutex_lock(&prog->aux->dst_mutex);
3222
3223 /* There are a few possible cases here:
3224 *
3225 * - if prog->aux->dst_trampoline is set, the program was just loaded
3226 * and not yet attached to anything, so we can use the values stored
3227 * in prog->aux
3228 *
3229 * - if prog->aux->dst_trampoline is NULL, the program has already been
3230 * attached to a target and its initial target was cleared (below)
3231 *
3232 * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
3233 * target_btf_id using the link_create API.
3234 *
3235 * - if tgt_prog == NULL when this function was called using the old
3236 * raw_tracepoint_open API, and we need a target from prog->aux
3237 *
3238 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
3239 * was detached and is going for re-attachment.
3240 *
3241 * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf
3242 * are NULL, then program was already attached and user did not provide
3243 * tgt_prog_fd so we have no way to find out or create trampoline
3244 */
3245 if (!prog->aux->dst_trampoline && !tgt_prog) {
3246 /*
3247 * Allow re-attach for TRACING and LSM programs. If it's
3248 * currently linked, bpf_trampoline_link_prog will fail.
3249 * EXT programs need to specify tgt_prog_fd, so they
3250 * re-attach in separate code path.
3251 */
3252 if (prog->type != BPF_PROG_TYPE_TRACING &&
3253 prog->type != BPF_PROG_TYPE_LSM) {
3254 err = -EINVAL;
3255 goto out_unlock;
3256 }
3257 /* We can allow re-attach only if we have valid attach_btf. */
3258 if (!prog->aux->attach_btf) {
3259 err = -EINVAL;
3260 goto out_unlock;
3261 }
3262 btf_id = prog->aux->attach_btf_id;
3263 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
3264 }
3265
3266 if (!prog->aux->dst_trampoline ||
3267 (key && key != prog->aux->dst_trampoline->key)) {
3268 /* If there is no saved target, or the specified target is
3269 * different from the destination specified at load time, we
3270 * need a new trampoline and a check for compatibility
3271 */
3272 struct bpf_attach_target_info tgt_info = {};
3273
3274 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
3275 &tgt_info);
3276 if (err)
3277 goto out_unlock;
3278
3279 if (tgt_info.tgt_mod) {
3280 module_put(prog->aux->mod);
3281 prog->aux->mod = tgt_info.tgt_mod;
3282 }
3283
3284 tr = bpf_trampoline_get(key, &tgt_info);
3285 if (!tr) {
3286 err = -ENOMEM;
3287 goto out_unlock;
3288 }
3289 } else {
3290 /* The caller didn't specify a target, or the target was the
3291 * same as the destination supplied during program load. This
3292 * means we can reuse the trampoline and reference from program
3293 * load time, and there is no need to allocate a new one. This
3294 * can only happen once for any program, as the saved values in
3295 * prog->aux are cleared below.
3296 */
3297 tr = prog->aux->dst_trampoline;
3298 tgt_prog = prog->aux->dst_prog;
3299 }
3300
3301 err = bpf_link_prime(&link->link.link, &link_primer);
3302 if (err)
3303 goto out_unlock;
3304
3305 err = bpf_trampoline_link_prog(&link->link, tr);
3306 if (err) {
3307 bpf_link_cleanup(&link_primer);
3308 link = NULL;
3309 goto out_unlock;
3310 }
3311
3312 link->tgt_prog = tgt_prog;
3313 link->trampoline = tr;
3314
3315 /* Always clear the trampoline and target prog from prog->aux to make
3316 * sure the original attach destination is not kept alive after a
3317 * program is (re-)attached to another target.
3318 */
3319 if (prog->aux->dst_prog &&
3320 (tgt_prog_fd || tr != prog->aux->dst_trampoline))
3321 /* got extra prog ref from syscall, or attaching to different prog */
3322 bpf_prog_put(prog->aux->dst_prog);
3323 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
3324 /* we allocated a new trampoline, so free the old one */
3325 bpf_trampoline_put(prog->aux->dst_trampoline);
3326
3327 prog->aux->dst_prog = NULL;
3328 prog->aux->dst_trampoline = NULL;
3329 mutex_unlock(&prog->aux->dst_mutex);
3330
3331 return bpf_link_settle(&link_primer);
3332out_unlock:
3333 if (tr && tr != prog->aux->dst_trampoline)
3334 bpf_trampoline_put(tr);
3335 mutex_unlock(&prog->aux->dst_mutex);
3336 kfree(link);
3337out_put_prog:
3338 if (tgt_prog_fd && tgt_prog)
3339 bpf_prog_put(tgt_prog);
3340 return err;
3341}
3342
3343struct bpf_raw_tp_link {
3344 struct bpf_link link;
3345 struct bpf_raw_event_map *btp;
3346};
3347
3348static void bpf_raw_tp_link_release(struct bpf_link *link)
3349{
3350 struct bpf_raw_tp_link *raw_tp =
3351 container_of(link, struct bpf_raw_tp_link, link);
3352
3353 bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
3354 bpf_put_raw_tracepoint(raw_tp->btp);
3355}
3356
3357static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
3358{
3359 struct bpf_raw_tp_link *raw_tp =
3360 container_of(link, struct bpf_raw_tp_link, link);
3361
3362 kfree(raw_tp);
3363}
3364
3365static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
3366 struct seq_file *seq)
3367{
3368 struct bpf_raw_tp_link *raw_tp_link =
3369 container_of(link, struct bpf_raw_tp_link, link);
3370
3371 seq_printf(seq,
3372 "tp_name:\t%s\n",
3373 raw_tp_link->btp->tp->name);
3374}
3375
3376static int bpf_copy_to_user(char __user *ubuf, const char *buf, u32 ulen,
3377 u32 len)
3378{
3379 if (ulen >= len + 1) {
3380 if (copy_to_user(ubuf, buf, len + 1))
3381 return -EFAULT;
3382 } else {
3383 char zero = '\0';
3384
3385 if (copy_to_user(ubuf, buf, ulen - 1))
3386 return -EFAULT;
3387 if (put_user(zero, ubuf + ulen - 1))
3388 return -EFAULT;
3389 return -ENOSPC;
3390 }
3391
3392 return 0;
3393}
3394
3395static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
3396 struct bpf_link_info *info)
3397{
3398 struct bpf_raw_tp_link *raw_tp_link =
3399 container_of(link, struct bpf_raw_tp_link, link);
3400 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
3401 const char *tp_name = raw_tp_link->btp->tp->name;
3402 u32 ulen = info->raw_tracepoint.tp_name_len;
3403 size_t tp_len = strlen(tp_name);
3404
3405 if (!ulen ^ !ubuf)
3406 return -EINVAL;
3407
3408 info->raw_tracepoint.tp_name_len = tp_len + 1;
3409
3410 if (!ubuf)
3411 return 0;
3412
3413 return bpf_copy_to_user(ubuf, tp_name, ulen, tp_len);
3414}
3415
3416static const struct bpf_link_ops bpf_raw_tp_link_lops = {
3417 .release = bpf_raw_tp_link_release,
3418 .dealloc = bpf_raw_tp_link_dealloc,
3419 .show_fdinfo = bpf_raw_tp_link_show_fdinfo,
3420 .fill_link_info = bpf_raw_tp_link_fill_link_info,
3421};
3422
3423#ifdef CONFIG_PERF_EVENTS
3424struct bpf_perf_link {
3425 struct bpf_link link;
3426 struct file *perf_file;
3427};
3428
3429static void bpf_perf_link_release(struct bpf_link *link)
3430{
3431 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3432 struct perf_event *event = perf_link->perf_file->private_data;
3433
3434 perf_event_free_bpf_prog(event);
3435 fput(perf_link->perf_file);
3436}
3437
3438static void bpf_perf_link_dealloc(struct bpf_link *link)
3439{
3440 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3441
3442 kfree(perf_link);
3443}
3444
3445static int bpf_perf_link_fill_common(const struct perf_event *event,
3446 char __user *uname, u32 ulen,
3447 u64 *probe_offset, u64 *probe_addr,
3448 u32 *fd_type, unsigned long *missed)
3449{
3450 const char *buf;
3451 u32 prog_id;
3452 size_t len;
3453 int err;
3454
3455 if (!ulen ^ !uname)
3456 return -EINVAL;
3457
3458 err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf,
3459 probe_offset, probe_addr, missed);
3460 if (err)
3461 return err;
3462 if (!uname)
3463 return 0;
3464 if (buf) {
3465 len = strlen(buf);
3466 err = bpf_copy_to_user(uname, buf, ulen, len);
3467 if (err)
3468 return err;
3469 } else {
3470 char zero = '\0';
3471
3472 if (put_user(zero, uname))
3473 return -EFAULT;
3474 }
3475 return 0;
3476}
3477
3478#ifdef CONFIG_KPROBE_EVENTS
3479static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
3480 struct bpf_link_info *info)
3481{
3482 unsigned long missed;
3483 char __user *uname;
3484 u64 addr, offset;
3485 u32 ulen, type;
3486 int err;
3487
3488 uname = u64_to_user_ptr(info->perf_event.kprobe.func_name);
3489 ulen = info->perf_event.kprobe.name_len;
3490 err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
3491 &type, &missed);
3492 if (err)
3493 return err;
3494 if (type == BPF_FD_TYPE_KRETPROBE)
3495 info->perf_event.type = BPF_PERF_EVENT_KRETPROBE;
3496 else
3497 info->perf_event.type = BPF_PERF_EVENT_KPROBE;
3498
3499 info->perf_event.kprobe.offset = offset;
3500 info->perf_event.kprobe.missed = missed;
3501 if (!kallsyms_show_value(current_cred()))
3502 addr = 0;
3503 info->perf_event.kprobe.addr = addr;
3504 return 0;
3505}
3506#endif
3507
3508#ifdef CONFIG_UPROBE_EVENTS
3509static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
3510 struct bpf_link_info *info)
3511{
3512 char __user *uname;
3513 u64 addr, offset;
3514 u32 ulen, type;
3515 int err;
3516
3517 uname = u64_to_user_ptr(info->perf_event.uprobe.file_name);
3518 ulen = info->perf_event.uprobe.name_len;
3519 err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
3520 &type, NULL);
3521 if (err)
3522 return err;
3523
3524 if (type == BPF_FD_TYPE_URETPROBE)
3525 info->perf_event.type = BPF_PERF_EVENT_URETPROBE;
3526 else
3527 info->perf_event.type = BPF_PERF_EVENT_UPROBE;
3528 info->perf_event.uprobe.offset = offset;
3529 return 0;
3530}
3531#endif
3532
3533static int bpf_perf_link_fill_probe(const struct perf_event *event,
3534 struct bpf_link_info *info)
3535{
3536#ifdef CONFIG_KPROBE_EVENTS
3537 if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE)
3538 return bpf_perf_link_fill_kprobe(event, info);
3539#endif
3540#ifdef CONFIG_UPROBE_EVENTS
3541 if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE)
3542 return bpf_perf_link_fill_uprobe(event, info);
3543#endif
3544 return -EOPNOTSUPP;
3545}
3546
3547static int bpf_perf_link_fill_tracepoint(const struct perf_event *event,
3548 struct bpf_link_info *info)
3549{
3550 char __user *uname;
3551 u32 ulen;
3552
3553 uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name);
3554 ulen = info->perf_event.tracepoint.name_len;
3555 info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT;
3556 return bpf_perf_link_fill_common(event, uname, ulen, NULL, NULL, NULL, NULL);
3557}
3558
3559static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
3560 struct bpf_link_info *info)
3561{
3562 info->perf_event.event.type = event->attr.type;
3563 info->perf_event.event.config = event->attr.config;
3564 info->perf_event.type = BPF_PERF_EVENT_EVENT;
3565 return 0;
3566}
3567
3568static int bpf_perf_link_fill_link_info(const struct bpf_link *link,
3569 struct bpf_link_info *info)
3570{
3571 struct bpf_perf_link *perf_link;
3572 const struct perf_event *event;
3573
3574 perf_link = container_of(link, struct bpf_perf_link, link);
3575 event = perf_get_event(perf_link->perf_file);
3576 if (IS_ERR(event))
3577 return PTR_ERR(event);
3578
3579 switch (event->prog->type) {
3580 case BPF_PROG_TYPE_PERF_EVENT:
3581 return bpf_perf_link_fill_perf_event(event, info);
3582 case BPF_PROG_TYPE_TRACEPOINT:
3583 return bpf_perf_link_fill_tracepoint(event, info);
3584 case BPF_PROG_TYPE_KPROBE:
3585 return bpf_perf_link_fill_probe(event, info);
3586 default:
3587 return -EOPNOTSUPP;
3588 }
3589}
3590
3591static const struct bpf_link_ops bpf_perf_link_lops = {
3592 .release = bpf_perf_link_release,
3593 .dealloc = bpf_perf_link_dealloc,
3594 .fill_link_info = bpf_perf_link_fill_link_info,
3595};
3596
3597static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3598{
3599 struct bpf_link_primer link_primer;
3600 struct bpf_perf_link *link;
3601 struct perf_event *event;
3602 struct file *perf_file;
3603 int err;
3604
3605 if (attr->link_create.flags)
3606 return -EINVAL;
3607
3608 perf_file = perf_event_get(attr->link_create.target_fd);
3609 if (IS_ERR(perf_file))
3610 return PTR_ERR(perf_file);
3611
3612 link = kzalloc(sizeof(*link), GFP_USER);
3613 if (!link) {
3614 err = -ENOMEM;
3615 goto out_put_file;
3616 }
3617 bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog);
3618 link->perf_file = perf_file;
3619
3620 err = bpf_link_prime(&link->link, &link_primer);
3621 if (err) {
3622 kfree(link);
3623 goto out_put_file;
3624 }
3625
3626 event = perf_file->private_data;
3627 err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie);
3628 if (err) {
3629 bpf_link_cleanup(&link_primer);
3630 goto out_put_file;
3631 }
3632 /* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */
3633 bpf_prog_inc(prog);
3634
3635 return bpf_link_settle(&link_primer);
3636
3637out_put_file:
3638 fput(perf_file);
3639 return err;
3640}
3641#else
3642static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3643{
3644 return -EOPNOTSUPP;
3645}
3646#endif /* CONFIG_PERF_EVENTS */
3647
3648static int bpf_raw_tp_link_attach(struct bpf_prog *prog,
3649 const char __user *user_tp_name)
3650{
3651 struct bpf_link_primer link_primer;
3652 struct bpf_raw_tp_link *link;
3653 struct bpf_raw_event_map *btp;
3654 const char *tp_name;
3655 char buf[128];
3656 int err;
3657
3658 switch (prog->type) {
3659 case BPF_PROG_TYPE_TRACING:
3660 case BPF_PROG_TYPE_EXT:
3661 case BPF_PROG_TYPE_LSM:
3662 if (user_tp_name)
3663 /* The attach point for this category of programs
3664 * should be specified via btf_id during program load.
3665 */
3666 return -EINVAL;
3667 if (prog->type == BPF_PROG_TYPE_TRACING &&
3668 prog->expected_attach_type == BPF_TRACE_RAW_TP) {
3669 tp_name = prog->aux->attach_func_name;
3670 break;
3671 }
3672 return bpf_tracing_prog_attach(prog, 0, 0, 0);
3673 case BPF_PROG_TYPE_RAW_TRACEPOINT:
3674 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
3675 if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0)
3676 return -EFAULT;
3677 buf[sizeof(buf) - 1] = 0;
3678 tp_name = buf;
3679 break;
3680 default:
3681 return -EINVAL;
3682 }
3683
3684 btp = bpf_get_raw_tracepoint(tp_name);
3685 if (!btp)
3686 return -ENOENT;
3687
3688 link = kzalloc(sizeof(*link), GFP_USER);
3689 if (!link) {
3690 err = -ENOMEM;
3691 goto out_put_btp;
3692 }
3693 bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
3694 &bpf_raw_tp_link_lops, prog);
3695 link->btp = btp;
3696
3697 err = bpf_link_prime(&link->link, &link_primer);
3698 if (err) {
3699 kfree(link);
3700 goto out_put_btp;
3701 }
3702
3703 err = bpf_probe_register(link->btp, prog);
3704 if (err) {
3705 bpf_link_cleanup(&link_primer);
3706 goto out_put_btp;
3707 }
3708
3709 return bpf_link_settle(&link_primer);
3710
3711out_put_btp:
3712 bpf_put_raw_tracepoint(btp);
3713 return err;
3714}
3715
3716#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
3717
3718static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
3719{
3720 struct bpf_prog *prog;
3721 int fd;
3722
3723 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
3724 return -EINVAL;
3725
3726 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
3727 if (IS_ERR(prog))
3728 return PTR_ERR(prog);
3729
3730 fd = bpf_raw_tp_link_attach(prog, u64_to_user_ptr(attr->raw_tracepoint.name));
3731 if (fd < 0)
3732 bpf_prog_put(prog);
3733 return fd;
3734}
3735
3736static enum bpf_prog_type
3737attach_type_to_prog_type(enum bpf_attach_type attach_type)
3738{
3739 switch (attach_type) {
3740 case BPF_CGROUP_INET_INGRESS:
3741 case BPF_CGROUP_INET_EGRESS:
3742 return BPF_PROG_TYPE_CGROUP_SKB;
3743 case BPF_CGROUP_INET_SOCK_CREATE:
3744 case BPF_CGROUP_INET_SOCK_RELEASE:
3745 case BPF_CGROUP_INET4_POST_BIND:
3746 case BPF_CGROUP_INET6_POST_BIND:
3747 return BPF_PROG_TYPE_CGROUP_SOCK;
3748 case BPF_CGROUP_INET4_BIND:
3749 case BPF_CGROUP_INET6_BIND:
3750 case BPF_CGROUP_INET4_CONNECT:
3751 case BPF_CGROUP_INET6_CONNECT:
3752 case BPF_CGROUP_UNIX_CONNECT:
3753 case BPF_CGROUP_INET4_GETPEERNAME:
3754 case BPF_CGROUP_INET6_GETPEERNAME:
3755 case BPF_CGROUP_UNIX_GETPEERNAME:
3756 case BPF_CGROUP_INET4_GETSOCKNAME:
3757 case BPF_CGROUP_INET6_GETSOCKNAME:
3758 case BPF_CGROUP_UNIX_GETSOCKNAME:
3759 case BPF_CGROUP_UDP4_SENDMSG:
3760 case BPF_CGROUP_UDP6_SENDMSG:
3761 case BPF_CGROUP_UNIX_SENDMSG:
3762 case BPF_CGROUP_UDP4_RECVMSG:
3763 case BPF_CGROUP_UDP6_RECVMSG:
3764 case BPF_CGROUP_UNIX_RECVMSG:
3765 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
3766 case BPF_CGROUP_SOCK_OPS:
3767 return BPF_PROG_TYPE_SOCK_OPS;
3768 case BPF_CGROUP_DEVICE:
3769 return BPF_PROG_TYPE_CGROUP_DEVICE;
3770 case BPF_SK_MSG_VERDICT:
3771 return BPF_PROG_TYPE_SK_MSG;
3772 case BPF_SK_SKB_STREAM_PARSER:
3773 case BPF_SK_SKB_STREAM_VERDICT:
3774 case BPF_SK_SKB_VERDICT:
3775 return BPF_PROG_TYPE_SK_SKB;
3776 case BPF_LIRC_MODE2:
3777 return BPF_PROG_TYPE_LIRC_MODE2;
3778 case BPF_FLOW_DISSECTOR:
3779 return BPF_PROG_TYPE_FLOW_DISSECTOR;
3780 case BPF_CGROUP_SYSCTL:
3781 return BPF_PROG_TYPE_CGROUP_SYSCTL;
3782 case BPF_CGROUP_GETSOCKOPT:
3783 case BPF_CGROUP_SETSOCKOPT:
3784 return BPF_PROG_TYPE_CGROUP_SOCKOPT;
3785 case BPF_TRACE_ITER:
3786 case BPF_TRACE_RAW_TP:
3787 case BPF_TRACE_FENTRY:
3788 case BPF_TRACE_FEXIT:
3789 case BPF_MODIFY_RETURN:
3790 return BPF_PROG_TYPE_TRACING;
3791 case BPF_LSM_MAC:
3792 return BPF_PROG_TYPE_LSM;
3793 case BPF_SK_LOOKUP:
3794 return BPF_PROG_TYPE_SK_LOOKUP;
3795 case BPF_XDP:
3796 return BPF_PROG_TYPE_XDP;
3797 case BPF_LSM_CGROUP:
3798 return BPF_PROG_TYPE_LSM;
3799 case BPF_TCX_INGRESS:
3800 case BPF_TCX_EGRESS:
3801 case BPF_NETKIT_PRIMARY:
3802 case BPF_NETKIT_PEER:
3803 return BPF_PROG_TYPE_SCHED_CLS;
3804 default:
3805 return BPF_PROG_TYPE_UNSPEC;
3806 }
3807}
3808
3809static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
3810 enum bpf_attach_type attach_type)
3811{
3812 enum bpf_prog_type ptype;
3813
3814 switch (prog->type) {
3815 case BPF_PROG_TYPE_CGROUP_SOCK:
3816 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3817 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3818 case BPF_PROG_TYPE_SK_LOOKUP:
3819 return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
3820 case BPF_PROG_TYPE_CGROUP_SKB:
3821 if (!capable(CAP_NET_ADMIN))
3822 /* cg-skb progs can be loaded by unpriv user.
3823 * check permissions at attach time.
3824 */
3825 return -EPERM;
3826 return prog->enforce_expected_attach_type &&
3827 prog->expected_attach_type != attach_type ?
3828 -EINVAL : 0;
3829 case BPF_PROG_TYPE_EXT:
3830 return 0;
3831 case BPF_PROG_TYPE_NETFILTER:
3832 if (attach_type != BPF_NETFILTER)
3833 return -EINVAL;
3834 return 0;
3835 case BPF_PROG_TYPE_PERF_EVENT:
3836 case BPF_PROG_TYPE_TRACEPOINT:
3837 if (attach_type != BPF_PERF_EVENT)
3838 return -EINVAL;
3839 return 0;
3840 case BPF_PROG_TYPE_KPROBE:
3841 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI &&
3842 attach_type != BPF_TRACE_KPROBE_MULTI)
3843 return -EINVAL;
3844 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI &&
3845 attach_type != BPF_TRACE_UPROBE_MULTI)
3846 return -EINVAL;
3847 if (attach_type != BPF_PERF_EVENT &&
3848 attach_type != BPF_TRACE_KPROBE_MULTI &&
3849 attach_type != BPF_TRACE_UPROBE_MULTI)
3850 return -EINVAL;
3851 return 0;
3852 case BPF_PROG_TYPE_SCHED_CLS:
3853 if (attach_type != BPF_TCX_INGRESS &&
3854 attach_type != BPF_TCX_EGRESS &&
3855 attach_type != BPF_NETKIT_PRIMARY &&
3856 attach_type != BPF_NETKIT_PEER)
3857 return -EINVAL;
3858 return 0;
3859 default:
3860 ptype = attach_type_to_prog_type(attach_type);
3861 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type)
3862 return -EINVAL;
3863 return 0;
3864 }
3865}
3866
3867#define BPF_PROG_ATTACH_LAST_FIELD expected_revision
3868
3869#define BPF_F_ATTACH_MASK_BASE \
3870 (BPF_F_ALLOW_OVERRIDE | \
3871 BPF_F_ALLOW_MULTI | \
3872 BPF_F_REPLACE)
3873
3874#define BPF_F_ATTACH_MASK_MPROG \
3875 (BPF_F_REPLACE | \
3876 BPF_F_BEFORE | \
3877 BPF_F_AFTER | \
3878 BPF_F_ID | \
3879 BPF_F_LINK)
3880
3881static int bpf_prog_attach(const union bpf_attr *attr)
3882{
3883 enum bpf_prog_type ptype;
3884 struct bpf_prog *prog;
3885 int ret;
3886
3887 if (CHECK_ATTR(BPF_PROG_ATTACH))
3888 return -EINVAL;
3889
3890 ptype = attach_type_to_prog_type(attr->attach_type);
3891 if (ptype == BPF_PROG_TYPE_UNSPEC)
3892 return -EINVAL;
3893 if (bpf_mprog_supported(ptype)) {
3894 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
3895 return -EINVAL;
3896 } else {
3897 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE)
3898 return -EINVAL;
3899 if (attr->relative_fd ||
3900 attr->expected_revision)
3901 return -EINVAL;
3902 }
3903
3904 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
3905 if (IS_ERR(prog))
3906 return PTR_ERR(prog);
3907
3908 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
3909 bpf_prog_put(prog);
3910 return -EINVAL;
3911 }
3912
3913 switch (ptype) {
3914 case BPF_PROG_TYPE_SK_SKB:
3915 case BPF_PROG_TYPE_SK_MSG:
3916 ret = sock_map_get_from_fd(attr, prog);
3917 break;
3918 case BPF_PROG_TYPE_LIRC_MODE2:
3919 ret = lirc_prog_attach(attr, prog);
3920 break;
3921 case BPF_PROG_TYPE_FLOW_DISSECTOR:
3922 ret = netns_bpf_prog_attach(attr, prog);
3923 break;
3924 case BPF_PROG_TYPE_CGROUP_DEVICE:
3925 case BPF_PROG_TYPE_CGROUP_SKB:
3926 case BPF_PROG_TYPE_CGROUP_SOCK:
3927 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3928 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3929 case BPF_PROG_TYPE_CGROUP_SYSCTL:
3930 case BPF_PROG_TYPE_SOCK_OPS:
3931 case BPF_PROG_TYPE_LSM:
3932 if (ptype == BPF_PROG_TYPE_LSM &&
3933 prog->expected_attach_type != BPF_LSM_CGROUP)
3934 ret = -EINVAL;
3935 else
3936 ret = cgroup_bpf_prog_attach(attr, ptype, prog);
3937 break;
3938 case BPF_PROG_TYPE_SCHED_CLS:
3939 if (attr->attach_type == BPF_TCX_INGRESS ||
3940 attr->attach_type == BPF_TCX_EGRESS)
3941 ret = tcx_prog_attach(attr, prog);
3942 else
3943 ret = netkit_prog_attach(attr, prog);
3944 break;
3945 default:
3946 ret = -EINVAL;
3947 }
3948
3949 if (ret)
3950 bpf_prog_put(prog);
3951 return ret;
3952}
3953
3954#define BPF_PROG_DETACH_LAST_FIELD expected_revision
3955
3956static int bpf_prog_detach(const union bpf_attr *attr)
3957{
3958 struct bpf_prog *prog = NULL;
3959 enum bpf_prog_type ptype;
3960 int ret;
3961
3962 if (CHECK_ATTR(BPF_PROG_DETACH))
3963 return -EINVAL;
3964
3965 ptype = attach_type_to_prog_type(attr->attach_type);
3966 if (bpf_mprog_supported(ptype)) {
3967 if (ptype == BPF_PROG_TYPE_UNSPEC)
3968 return -EINVAL;
3969 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
3970 return -EINVAL;
3971 if (attr->attach_bpf_fd) {
3972 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
3973 if (IS_ERR(prog))
3974 return PTR_ERR(prog);
3975 }
3976 } else if (attr->attach_flags ||
3977 attr->relative_fd ||
3978 attr->expected_revision) {
3979 return -EINVAL;
3980 }
3981
3982 switch (ptype) {
3983 case BPF_PROG_TYPE_SK_MSG:
3984 case BPF_PROG_TYPE_SK_SKB:
3985 ret = sock_map_prog_detach(attr, ptype);
3986 break;
3987 case BPF_PROG_TYPE_LIRC_MODE2:
3988 ret = lirc_prog_detach(attr);
3989 break;
3990 case BPF_PROG_TYPE_FLOW_DISSECTOR:
3991 ret = netns_bpf_prog_detach(attr, ptype);
3992 break;
3993 case BPF_PROG_TYPE_CGROUP_DEVICE:
3994 case BPF_PROG_TYPE_CGROUP_SKB:
3995 case BPF_PROG_TYPE_CGROUP_SOCK:
3996 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3997 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3998 case BPF_PROG_TYPE_CGROUP_SYSCTL:
3999 case BPF_PROG_TYPE_SOCK_OPS:
4000 case BPF_PROG_TYPE_LSM:
4001 ret = cgroup_bpf_prog_detach(attr, ptype);
4002 break;
4003 case BPF_PROG_TYPE_SCHED_CLS:
4004 if (attr->attach_type == BPF_TCX_INGRESS ||
4005 attr->attach_type == BPF_TCX_EGRESS)
4006 ret = tcx_prog_detach(attr, prog);
4007 else
4008 ret = netkit_prog_detach(attr, prog);
4009 break;
4010 default:
4011 ret = -EINVAL;
4012 }
4013
4014 if (prog)
4015 bpf_prog_put(prog);
4016 return ret;
4017}
4018
4019#define BPF_PROG_QUERY_LAST_FIELD query.revision
4020
4021static int bpf_prog_query(const union bpf_attr *attr,
4022 union bpf_attr __user *uattr)
4023{
4024 if (!capable(CAP_NET_ADMIN))
4025 return -EPERM;
4026 if (CHECK_ATTR(BPF_PROG_QUERY))
4027 return -EINVAL;
4028 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
4029 return -EINVAL;
4030
4031 switch (attr->query.attach_type) {
4032 case BPF_CGROUP_INET_INGRESS:
4033 case BPF_CGROUP_INET_EGRESS:
4034 case BPF_CGROUP_INET_SOCK_CREATE:
4035 case BPF_CGROUP_INET_SOCK_RELEASE:
4036 case BPF_CGROUP_INET4_BIND:
4037 case BPF_CGROUP_INET6_BIND:
4038 case BPF_CGROUP_INET4_POST_BIND:
4039 case BPF_CGROUP_INET6_POST_BIND:
4040 case BPF_CGROUP_INET4_CONNECT:
4041 case BPF_CGROUP_INET6_CONNECT:
4042 case BPF_CGROUP_UNIX_CONNECT:
4043 case BPF_CGROUP_INET4_GETPEERNAME:
4044 case BPF_CGROUP_INET6_GETPEERNAME:
4045 case BPF_CGROUP_UNIX_GETPEERNAME:
4046 case BPF_CGROUP_INET4_GETSOCKNAME:
4047 case BPF_CGROUP_INET6_GETSOCKNAME:
4048 case BPF_CGROUP_UNIX_GETSOCKNAME:
4049 case BPF_CGROUP_UDP4_SENDMSG:
4050 case BPF_CGROUP_UDP6_SENDMSG:
4051 case BPF_CGROUP_UNIX_SENDMSG:
4052 case BPF_CGROUP_UDP4_RECVMSG:
4053 case BPF_CGROUP_UDP6_RECVMSG:
4054 case BPF_CGROUP_UNIX_RECVMSG:
4055 case BPF_CGROUP_SOCK_OPS:
4056 case BPF_CGROUP_DEVICE:
4057 case BPF_CGROUP_SYSCTL:
4058 case BPF_CGROUP_GETSOCKOPT:
4059 case BPF_CGROUP_SETSOCKOPT:
4060 case BPF_LSM_CGROUP:
4061 return cgroup_bpf_prog_query(attr, uattr);
4062 case BPF_LIRC_MODE2:
4063 return lirc_prog_query(attr, uattr);
4064 case BPF_FLOW_DISSECTOR:
4065 case BPF_SK_LOOKUP:
4066 return netns_bpf_prog_query(attr, uattr);
4067 case BPF_SK_SKB_STREAM_PARSER:
4068 case BPF_SK_SKB_STREAM_VERDICT:
4069 case BPF_SK_MSG_VERDICT:
4070 case BPF_SK_SKB_VERDICT:
4071 return sock_map_bpf_prog_query(attr, uattr);
4072 case BPF_TCX_INGRESS:
4073 case BPF_TCX_EGRESS:
4074 return tcx_prog_query(attr, uattr);
4075 case BPF_NETKIT_PRIMARY:
4076 case BPF_NETKIT_PEER:
4077 return netkit_prog_query(attr, uattr);
4078 default:
4079 return -EINVAL;
4080 }
4081}
4082
4083#define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size
4084
4085static int bpf_prog_test_run(const union bpf_attr *attr,
4086 union bpf_attr __user *uattr)
4087{
4088 struct bpf_prog *prog;
4089 int ret = -ENOTSUPP;
4090
4091 if (CHECK_ATTR(BPF_PROG_TEST_RUN))
4092 return -EINVAL;
4093
4094 if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
4095 (!attr->test.ctx_size_in && attr->test.ctx_in))
4096 return -EINVAL;
4097
4098 if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
4099 (!attr->test.ctx_size_out && attr->test.ctx_out))
4100 return -EINVAL;
4101
4102 prog = bpf_prog_get(attr->test.prog_fd);
4103 if (IS_ERR(prog))
4104 return PTR_ERR(prog);
4105
4106 if (prog->aux->ops->test_run)
4107 ret = prog->aux->ops->test_run(prog, attr, uattr);
4108
4109 bpf_prog_put(prog);
4110 return ret;
4111}
4112
4113#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
4114
4115static int bpf_obj_get_next_id(const union bpf_attr *attr,
4116 union bpf_attr __user *uattr,
4117 struct idr *idr,
4118 spinlock_t *lock)
4119{
4120 u32 next_id = attr->start_id;
4121 int err = 0;
4122
4123 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
4124 return -EINVAL;
4125
4126 if (!capable(CAP_SYS_ADMIN))
4127 return -EPERM;
4128
4129 next_id++;
4130 spin_lock_bh(lock);
4131 if (!idr_get_next(idr, &next_id))
4132 err = -ENOENT;
4133 spin_unlock_bh(lock);
4134
4135 if (!err)
4136 err = put_user(next_id, &uattr->next_id);
4137
4138 return err;
4139}
4140
4141struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
4142{
4143 struct bpf_map *map;
4144
4145 spin_lock_bh(&map_idr_lock);
4146again:
4147 map = idr_get_next(&map_idr, id);
4148 if (map) {
4149 map = __bpf_map_inc_not_zero(map, false);
4150 if (IS_ERR(map)) {
4151 (*id)++;
4152 goto again;
4153 }
4154 }
4155 spin_unlock_bh(&map_idr_lock);
4156
4157 return map;
4158}
4159
4160struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
4161{
4162 struct bpf_prog *prog;
4163
4164 spin_lock_bh(&prog_idr_lock);
4165again:
4166 prog = idr_get_next(&prog_idr, id);
4167 if (prog) {
4168 prog = bpf_prog_inc_not_zero(prog);
4169 if (IS_ERR(prog)) {
4170 (*id)++;
4171 goto again;
4172 }
4173 }
4174 spin_unlock_bh(&prog_idr_lock);
4175
4176 return prog;
4177}
4178
4179#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
4180
4181struct bpf_prog *bpf_prog_by_id(u32 id)
4182{
4183 struct bpf_prog *prog;
4184
4185 if (!id)
4186 return ERR_PTR(-ENOENT);
4187
4188 spin_lock_bh(&prog_idr_lock);
4189 prog = idr_find(&prog_idr, id);
4190 if (prog)
4191 prog = bpf_prog_inc_not_zero(prog);
4192 else
4193 prog = ERR_PTR(-ENOENT);
4194 spin_unlock_bh(&prog_idr_lock);
4195 return prog;
4196}
4197
4198static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
4199{
4200 struct bpf_prog *prog;
4201 u32 id = attr->prog_id;
4202 int fd;
4203
4204 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
4205 return -EINVAL;
4206
4207 if (!capable(CAP_SYS_ADMIN))
4208 return -EPERM;
4209
4210 prog = bpf_prog_by_id(id);
4211 if (IS_ERR(prog))
4212 return PTR_ERR(prog);
4213
4214 fd = bpf_prog_new_fd(prog);
4215 if (fd < 0)
4216 bpf_prog_put(prog);
4217
4218 return fd;
4219}
4220
4221#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
4222
4223static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
4224{
4225 struct bpf_map *map;
4226 u32 id = attr->map_id;
4227 int f_flags;
4228 int fd;
4229
4230 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
4231 attr->open_flags & ~BPF_OBJ_FLAG_MASK)
4232 return -EINVAL;
4233
4234 if (!capable(CAP_SYS_ADMIN))
4235 return -EPERM;
4236
4237 f_flags = bpf_get_file_flag(attr->open_flags);
4238 if (f_flags < 0)
4239 return f_flags;
4240
4241 spin_lock_bh(&map_idr_lock);
4242 map = idr_find(&map_idr, id);
4243 if (map)
4244 map = __bpf_map_inc_not_zero(map, true);
4245 else
4246 map = ERR_PTR(-ENOENT);
4247 spin_unlock_bh(&map_idr_lock);
4248
4249 if (IS_ERR(map))
4250 return PTR_ERR(map);
4251
4252 fd = bpf_map_new_fd(map, f_flags);
4253 if (fd < 0)
4254 bpf_map_put_with_uref(map);
4255
4256 return fd;
4257}
4258
4259static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
4260 unsigned long addr, u32 *off,
4261 u32 *type)
4262{
4263 const struct bpf_map *map;
4264 int i;
4265
4266 mutex_lock(&prog->aux->used_maps_mutex);
4267 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
4268 map = prog->aux->used_maps[i];
4269 if (map == (void *)addr) {
4270 *type = BPF_PSEUDO_MAP_FD;
4271 goto out;
4272 }
4273 if (!map->ops->map_direct_value_meta)
4274 continue;
4275 if (!map->ops->map_direct_value_meta(map, addr, off)) {
4276 *type = BPF_PSEUDO_MAP_VALUE;
4277 goto out;
4278 }
4279 }
4280 map = NULL;
4281
4282out:
4283 mutex_unlock(&prog->aux->used_maps_mutex);
4284 return map;
4285}
4286
4287static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
4288 const struct cred *f_cred)
4289{
4290 const struct bpf_map *map;
4291 struct bpf_insn *insns;
4292 u32 off, type;
4293 u64 imm;
4294 u8 code;
4295 int i;
4296
4297 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
4298 GFP_USER);
4299 if (!insns)
4300 return insns;
4301
4302 for (i = 0; i < prog->len; i++) {
4303 code = insns[i].code;
4304
4305 if (code == (BPF_JMP | BPF_TAIL_CALL)) {
4306 insns[i].code = BPF_JMP | BPF_CALL;
4307 insns[i].imm = BPF_FUNC_tail_call;
4308 /* fall-through */
4309 }
4310 if (code == (BPF_JMP | BPF_CALL) ||
4311 code == (BPF_JMP | BPF_CALL_ARGS)) {
4312 if (code == (BPF_JMP | BPF_CALL_ARGS))
4313 insns[i].code = BPF_JMP | BPF_CALL;
4314 if (!bpf_dump_raw_ok(f_cred))
4315 insns[i].imm = 0;
4316 continue;
4317 }
4318 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
4319 insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
4320 continue;
4321 }
4322
4323 if (code != (BPF_LD | BPF_IMM | BPF_DW))
4324 continue;
4325
4326 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
4327 map = bpf_map_from_imm(prog, imm, &off, &type);
4328 if (map) {
4329 insns[i].src_reg = type;
4330 insns[i].imm = map->id;
4331 insns[i + 1].imm = off;
4332 continue;
4333 }
4334 }
4335
4336 return insns;
4337}
4338
4339static int set_info_rec_size(struct bpf_prog_info *info)
4340{
4341 /*
4342 * Ensure info.*_rec_size is the same as kernel expected size
4343 *
4344 * or
4345 *
4346 * Only allow zero *_rec_size if both _rec_size and _cnt are
4347 * zero. In this case, the kernel will set the expected
4348 * _rec_size back to the info.
4349 */
4350
4351 if ((info->nr_func_info || info->func_info_rec_size) &&
4352 info->func_info_rec_size != sizeof(struct bpf_func_info))
4353 return -EINVAL;
4354
4355 if ((info->nr_line_info || info->line_info_rec_size) &&
4356 info->line_info_rec_size != sizeof(struct bpf_line_info))
4357 return -EINVAL;
4358
4359 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
4360 info->jited_line_info_rec_size != sizeof(__u64))
4361 return -EINVAL;
4362
4363 info->func_info_rec_size = sizeof(struct bpf_func_info);
4364 info->line_info_rec_size = sizeof(struct bpf_line_info);
4365 info->jited_line_info_rec_size = sizeof(__u64);
4366
4367 return 0;
4368}
4369
4370static int bpf_prog_get_info_by_fd(struct file *file,
4371 struct bpf_prog *prog,
4372 const union bpf_attr *attr,
4373 union bpf_attr __user *uattr)
4374{
4375 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4376 struct btf *attach_btf = bpf_prog_get_target_btf(prog);
4377 struct bpf_prog_info info;
4378 u32 info_len = attr->info.info_len;
4379 struct bpf_prog_kstats stats;
4380 char __user *uinsns;
4381 u32 ulen;
4382 int err;
4383
4384 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4385 if (err)
4386 return err;
4387 info_len = min_t(u32, sizeof(info), info_len);
4388
4389 memset(&info, 0, sizeof(info));
4390 if (copy_from_user(&info, uinfo, info_len))
4391 return -EFAULT;
4392
4393 info.type = prog->type;
4394 info.id = prog->aux->id;
4395 info.load_time = prog->aux->load_time;
4396 info.created_by_uid = from_kuid_munged(current_user_ns(),
4397 prog->aux->user->uid);
4398 info.gpl_compatible = prog->gpl_compatible;
4399
4400 memcpy(info.tag, prog->tag, sizeof(prog->tag));
4401 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
4402
4403 mutex_lock(&prog->aux->used_maps_mutex);
4404 ulen = info.nr_map_ids;
4405 info.nr_map_ids = prog->aux->used_map_cnt;
4406 ulen = min_t(u32, info.nr_map_ids, ulen);
4407 if (ulen) {
4408 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
4409 u32 i;
4410
4411 for (i = 0; i < ulen; i++)
4412 if (put_user(prog->aux->used_maps[i]->id,
4413 &user_map_ids[i])) {
4414 mutex_unlock(&prog->aux->used_maps_mutex);
4415 return -EFAULT;
4416 }
4417 }
4418 mutex_unlock(&prog->aux->used_maps_mutex);
4419
4420 err = set_info_rec_size(&info);
4421 if (err)
4422 return err;
4423
4424 bpf_prog_get_stats(prog, &stats);
4425 info.run_time_ns = stats.nsecs;
4426 info.run_cnt = stats.cnt;
4427 info.recursion_misses = stats.misses;
4428
4429 info.verified_insns = prog->aux->verified_insns;
4430
4431 if (!bpf_capable()) {
4432 info.jited_prog_len = 0;
4433 info.xlated_prog_len = 0;
4434 info.nr_jited_ksyms = 0;
4435 info.nr_jited_func_lens = 0;
4436 info.nr_func_info = 0;
4437 info.nr_line_info = 0;
4438 info.nr_jited_line_info = 0;
4439 goto done;
4440 }
4441
4442 ulen = info.xlated_prog_len;
4443 info.xlated_prog_len = bpf_prog_insn_size(prog);
4444 if (info.xlated_prog_len && ulen) {
4445 struct bpf_insn *insns_sanitized;
4446 bool fault;
4447
4448 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
4449 info.xlated_prog_insns = 0;
4450 goto done;
4451 }
4452 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
4453 if (!insns_sanitized)
4454 return -ENOMEM;
4455 uinsns = u64_to_user_ptr(info.xlated_prog_insns);
4456 ulen = min_t(u32, info.xlated_prog_len, ulen);
4457 fault = copy_to_user(uinsns, insns_sanitized, ulen);
4458 kfree(insns_sanitized);
4459 if (fault)
4460 return -EFAULT;
4461 }
4462
4463 if (bpf_prog_is_offloaded(prog->aux)) {
4464 err = bpf_prog_offload_info_fill(&info, prog);
4465 if (err)
4466 return err;
4467 goto done;
4468 }
4469
4470 /* NOTE: the following code is supposed to be skipped for offload.
4471 * bpf_prog_offload_info_fill() is the place to fill similar fields
4472 * for offload.
4473 */
4474 ulen = info.jited_prog_len;
4475 if (prog->aux->func_cnt) {
4476 u32 i;
4477
4478 info.jited_prog_len = 0;
4479 for (i = 0; i < prog->aux->func_cnt; i++)
4480 info.jited_prog_len += prog->aux->func[i]->jited_len;
4481 } else {
4482 info.jited_prog_len = prog->jited_len;
4483 }
4484
4485 if (info.jited_prog_len && ulen) {
4486 if (bpf_dump_raw_ok(file->f_cred)) {
4487 uinsns = u64_to_user_ptr(info.jited_prog_insns);
4488 ulen = min_t(u32, info.jited_prog_len, ulen);
4489
4490 /* for multi-function programs, copy the JITed
4491 * instructions for all the functions
4492 */
4493 if (prog->aux->func_cnt) {
4494 u32 len, free, i;
4495 u8 *img;
4496
4497 free = ulen;
4498 for (i = 0; i < prog->aux->func_cnt; i++) {
4499 len = prog->aux->func[i]->jited_len;
4500 len = min_t(u32, len, free);
4501 img = (u8 *) prog->aux->func[i]->bpf_func;
4502 if (copy_to_user(uinsns, img, len))
4503 return -EFAULT;
4504 uinsns += len;
4505 free -= len;
4506 if (!free)
4507 break;
4508 }
4509 } else {
4510 if (copy_to_user(uinsns, prog->bpf_func, ulen))
4511 return -EFAULT;
4512 }
4513 } else {
4514 info.jited_prog_insns = 0;
4515 }
4516 }
4517
4518 ulen = info.nr_jited_ksyms;
4519 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
4520 if (ulen) {
4521 if (bpf_dump_raw_ok(file->f_cred)) {
4522 unsigned long ksym_addr;
4523 u64 __user *user_ksyms;
4524 u32 i;
4525
4526 /* copy the address of the kernel symbol
4527 * corresponding to each function
4528 */
4529 ulen = min_t(u32, info.nr_jited_ksyms, ulen);
4530 user_ksyms = u64_to_user_ptr(info.jited_ksyms);
4531 if (prog->aux->func_cnt) {
4532 for (i = 0; i < ulen; i++) {
4533 ksym_addr = (unsigned long)
4534 prog->aux->func[i]->bpf_func;
4535 if (put_user((u64) ksym_addr,
4536 &user_ksyms[i]))
4537 return -EFAULT;
4538 }
4539 } else {
4540 ksym_addr = (unsigned long) prog->bpf_func;
4541 if (put_user((u64) ksym_addr, &user_ksyms[0]))
4542 return -EFAULT;
4543 }
4544 } else {
4545 info.jited_ksyms = 0;
4546 }
4547 }
4548
4549 ulen = info.nr_jited_func_lens;
4550 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
4551 if (ulen) {
4552 if (bpf_dump_raw_ok(file->f_cred)) {
4553 u32 __user *user_lens;
4554 u32 func_len, i;
4555
4556 /* copy the JITed image lengths for each function */
4557 ulen = min_t(u32, info.nr_jited_func_lens, ulen);
4558 user_lens = u64_to_user_ptr(info.jited_func_lens);
4559 if (prog->aux->func_cnt) {
4560 for (i = 0; i < ulen; i++) {
4561 func_len =
4562 prog->aux->func[i]->jited_len;
4563 if (put_user(func_len, &user_lens[i]))
4564 return -EFAULT;
4565 }
4566 } else {
4567 func_len = prog->jited_len;
4568 if (put_user(func_len, &user_lens[0]))
4569 return -EFAULT;
4570 }
4571 } else {
4572 info.jited_func_lens = 0;
4573 }
4574 }
4575
4576 if (prog->aux->btf)
4577 info.btf_id = btf_obj_id(prog->aux->btf);
4578 info.attach_btf_id = prog->aux->attach_btf_id;
4579 if (attach_btf)
4580 info.attach_btf_obj_id = btf_obj_id(attach_btf);
4581
4582 ulen = info.nr_func_info;
4583 info.nr_func_info = prog->aux->func_info_cnt;
4584 if (info.nr_func_info && ulen) {
4585 char __user *user_finfo;
4586
4587 user_finfo = u64_to_user_ptr(info.func_info);
4588 ulen = min_t(u32, info.nr_func_info, ulen);
4589 if (copy_to_user(user_finfo, prog->aux->func_info,
4590 info.func_info_rec_size * ulen))
4591 return -EFAULT;
4592 }
4593
4594 ulen = info.nr_line_info;
4595 info.nr_line_info = prog->aux->nr_linfo;
4596 if (info.nr_line_info && ulen) {
4597 __u8 __user *user_linfo;
4598
4599 user_linfo = u64_to_user_ptr(info.line_info);
4600 ulen = min_t(u32, info.nr_line_info, ulen);
4601 if (copy_to_user(user_linfo, prog->aux->linfo,
4602 info.line_info_rec_size * ulen))
4603 return -EFAULT;
4604 }
4605
4606 ulen = info.nr_jited_line_info;
4607 if (prog->aux->jited_linfo)
4608 info.nr_jited_line_info = prog->aux->nr_linfo;
4609 else
4610 info.nr_jited_line_info = 0;
4611 if (info.nr_jited_line_info && ulen) {
4612 if (bpf_dump_raw_ok(file->f_cred)) {
4613 unsigned long line_addr;
4614 __u64 __user *user_linfo;
4615 u32 i;
4616
4617 user_linfo = u64_to_user_ptr(info.jited_line_info);
4618 ulen = min_t(u32, info.nr_jited_line_info, ulen);
4619 for (i = 0; i < ulen; i++) {
4620 line_addr = (unsigned long)prog->aux->jited_linfo[i];
4621 if (put_user((__u64)line_addr, &user_linfo[i]))
4622 return -EFAULT;
4623 }
4624 } else {
4625 info.jited_line_info = 0;
4626 }
4627 }
4628
4629 ulen = info.nr_prog_tags;
4630 info.nr_prog_tags = prog->aux->func_cnt ? : 1;
4631 if (ulen) {
4632 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
4633 u32 i;
4634
4635 user_prog_tags = u64_to_user_ptr(info.prog_tags);
4636 ulen = min_t(u32, info.nr_prog_tags, ulen);
4637 if (prog->aux->func_cnt) {
4638 for (i = 0; i < ulen; i++) {
4639 if (copy_to_user(user_prog_tags[i],
4640 prog->aux->func[i]->tag,
4641 BPF_TAG_SIZE))
4642 return -EFAULT;
4643 }
4644 } else {
4645 if (copy_to_user(user_prog_tags[0],
4646 prog->tag, BPF_TAG_SIZE))
4647 return -EFAULT;
4648 }
4649 }
4650
4651done:
4652 if (copy_to_user(uinfo, &info, info_len) ||
4653 put_user(info_len, &uattr->info.info_len))
4654 return -EFAULT;
4655
4656 return 0;
4657}
4658
4659static int bpf_map_get_info_by_fd(struct file *file,
4660 struct bpf_map *map,
4661 const union bpf_attr *attr,
4662 union bpf_attr __user *uattr)
4663{
4664 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4665 struct bpf_map_info info;
4666 u32 info_len = attr->info.info_len;
4667 int err;
4668
4669 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4670 if (err)
4671 return err;
4672 info_len = min_t(u32, sizeof(info), info_len);
4673
4674 memset(&info, 0, sizeof(info));
4675 info.type = map->map_type;
4676 info.id = map->id;
4677 info.key_size = map->key_size;
4678 info.value_size = map->value_size;
4679 info.max_entries = map->max_entries;
4680 info.map_flags = map->map_flags;
4681 info.map_extra = map->map_extra;
4682 memcpy(info.name, map->name, sizeof(map->name));
4683
4684 if (map->btf) {
4685 info.btf_id = btf_obj_id(map->btf);
4686 info.btf_key_type_id = map->btf_key_type_id;
4687 info.btf_value_type_id = map->btf_value_type_id;
4688 }
4689 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
4690
4691 if (bpf_map_is_offloaded(map)) {
4692 err = bpf_map_offload_info_fill(&info, map);
4693 if (err)
4694 return err;
4695 }
4696
4697 if (copy_to_user(uinfo, &info, info_len) ||
4698 put_user(info_len, &uattr->info.info_len))
4699 return -EFAULT;
4700
4701 return 0;
4702}
4703
4704static int bpf_btf_get_info_by_fd(struct file *file,
4705 struct btf *btf,
4706 const union bpf_attr *attr,
4707 union bpf_attr __user *uattr)
4708{
4709 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4710 u32 info_len = attr->info.info_len;
4711 int err;
4712
4713 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len);
4714 if (err)
4715 return err;
4716
4717 return btf_get_info_by_fd(btf, attr, uattr);
4718}
4719
4720static int bpf_link_get_info_by_fd(struct file *file,
4721 struct bpf_link *link,
4722 const union bpf_attr *attr,
4723 union bpf_attr __user *uattr)
4724{
4725 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4726 struct bpf_link_info info;
4727 u32 info_len = attr->info.info_len;
4728 int err;
4729
4730 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4731 if (err)
4732 return err;
4733 info_len = min_t(u32, sizeof(info), info_len);
4734
4735 memset(&info, 0, sizeof(info));
4736 if (copy_from_user(&info, uinfo, info_len))
4737 return -EFAULT;
4738
4739 info.type = link->type;
4740 info.id = link->id;
4741 if (link->prog)
4742 info.prog_id = link->prog->aux->id;
4743
4744 if (link->ops->fill_link_info) {
4745 err = link->ops->fill_link_info(link, &info);
4746 if (err)
4747 return err;
4748 }
4749
4750 if (copy_to_user(uinfo, &info, info_len) ||
4751 put_user(info_len, &uattr->info.info_len))
4752 return -EFAULT;
4753
4754 return 0;
4755}
4756
4757
4758#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
4759
4760static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
4761 union bpf_attr __user *uattr)
4762{
4763 int ufd = attr->info.bpf_fd;
4764 struct fd f;
4765 int err;
4766
4767 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
4768 return -EINVAL;
4769
4770 f = fdget(ufd);
4771 if (!f.file)
4772 return -EBADFD;
4773
4774 if (f.file->f_op == &bpf_prog_fops)
4775 err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
4776 uattr);
4777 else if (f.file->f_op == &bpf_map_fops)
4778 err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
4779 uattr);
4780 else if (f.file->f_op == &btf_fops)
4781 err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
4782 else if (f.file->f_op == &bpf_link_fops)
4783 err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
4784 attr, uattr);
4785 else
4786 err = -EINVAL;
4787
4788 fdput(f);
4789 return err;
4790}
4791
4792#define BPF_BTF_LOAD_LAST_FIELD btf_log_true_size
4793
4794static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
4795{
4796 if (CHECK_ATTR(BPF_BTF_LOAD))
4797 return -EINVAL;
4798
4799 if (!bpf_capable())
4800 return -EPERM;
4801
4802 return btf_new_fd(attr, uattr, uattr_size);
4803}
4804
4805#define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
4806
4807static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
4808{
4809 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
4810 return -EINVAL;
4811
4812 if (!capable(CAP_SYS_ADMIN))
4813 return -EPERM;
4814
4815 return btf_get_fd_by_id(attr->btf_id);
4816}
4817
4818static int bpf_task_fd_query_copy(const union bpf_attr *attr,
4819 union bpf_attr __user *uattr,
4820 u32 prog_id, u32 fd_type,
4821 const char *buf, u64 probe_offset,
4822 u64 probe_addr)
4823{
4824 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
4825 u32 len = buf ? strlen(buf) : 0, input_len;
4826 int err = 0;
4827
4828 if (put_user(len, &uattr->task_fd_query.buf_len))
4829 return -EFAULT;
4830 input_len = attr->task_fd_query.buf_len;
4831 if (input_len && ubuf) {
4832 if (!len) {
4833 /* nothing to copy, just make ubuf NULL terminated */
4834 char zero = '\0';
4835
4836 if (put_user(zero, ubuf))
4837 return -EFAULT;
4838 } else if (input_len >= len + 1) {
4839 /* ubuf can hold the string with NULL terminator */
4840 if (copy_to_user(ubuf, buf, len + 1))
4841 return -EFAULT;
4842 } else {
4843 /* ubuf cannot hold the string with NULL terminator,
4844 * do a partial copy with NULL terminator.
4845 */
4846 char zero = '\0';
4847
4848 err = -ENOSPC;
4849 if (copy_to_user(ubuf, buf, input_len - 1))
4850 return -EFAULT;
4851 if (put_user(zero, ubuf + input_len - 1))
4852 return -EFAULT;
4853 }
4854 }
4855
4856 if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
4857 put_user(fd_type, &uattr->task_fd_query.fd_type) ||
4858 put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
4859 put_user(probe_addr, &uattr->task_fd_query.probe_addr))
4860 return -EFAULT;
4861
4862 return err;
4863}
4864
4865#define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
4866
4867static int bpf_task_fd_query(const union bpf_attr *attr,
4868 union bpf_attr __user *uattr)
4869{
4870 pid_t pid = attr->task_fd_query.pid;
4871 u32 fd = attr->task_fd_query.fd;
4872 const struct perf_event *event;
4873 struct task_struct *task;
4874 struct file *file;
4875 int err;
4876
4877 if (CHECK_ATTR(BPF_TASK_FD_QUERY))
4878 return -EINVAL;
4879
4880 if (!capable(CAP_SYS_ADMIN))
4881 return -EPERM;
4882
4883 if (attr->task_fd_query.flags != 0)
4884 return -EINVAL;
4885
4886 rcu_read_lock();
4887 task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
4888 rcu_read_unlock();
4889 if (!task)
4890 return -ENOENT;
4891
4892 err = 0;
4893 file = fget_task(task, fd);
4894 put_task_struct(task);
4895 if (!file)
4896 return -EBADF;
4897
4898 if (file->f_op == &bpf_link_fops) {
4899 struct bpf_link *link = file->private_data;
4900
4901 if (link->ops == &bpf_raw_tp_link_lops) {
4902 struct bpf_raw_tp_link *raw_tp =
4903 container_of(link, struct bpf_raw_tp_link, link);
4904 struct bpf_raw_event_map *btp = raw_tp->btp;
4905
4906 err = bpf_task_fd_query_copy(attr, uattr,
4907 raw_tp->link.prog->aux->id,
4908 BPF_FD_TYPE_RAW_TRACEPOINT,
4909 btp->tp->name, 0, 0);
4910 goto put_file;
4911 }
4912 goto out_not_supp;
4913 }
4914
4915 event = perf_get_event(file);
4916 if (!IS_ERR(event)) {
4917 u64 probe_offset, probe_addr;
4918 u32 prog_id, fd_type;
4919 const char *buf;
4920
4921 err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
4922 &buf, &probe_offset,
4923 &probe_addr, NULL);
4924 if (!err)
4925 err = bpf_task_fd_query_copy(attr, uattr, prog_id,
4926 fd_type, buf,
4927 probe_offset,
4928 probe_addr);
4929 goto put_file;
4930 }
4931
4932out_not_supp:
4933 err = -ENOTSUPP;
4934put_file:
4935 fput(file);
4936 return err;
4937}
4938
4939#define BPF_MAP_BATCH_LAST_FIELD batch.flags
4940
4941#define BPF_DO_BATCH(fn, ...) \
4942 do { \
4943 if (!fn) { \
4944 err = -ENOTSUPP; \
4945 goto err_put; \
4946 } \
4947 err = fn(__VA_ARGS__); \
4948 } while (0)
4949
4950static int bpf_map_do_batch(const union bpf_attr *attr,
4951 union bpf_attr __user *uattr,
4952 int cmd)
4953{
4954 bool has_read = cmd == BPF_MAP_LOOKUP_BATCH ||
4955 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
4956 bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
4957 struct bpf_map *map;
4958 int err, ufd;
4959 struct fd f;
4960
4961 if (CHECK_ATTR(BPF_MAP_BATCH))
4962 return -EINVAL;
4963
4964 ufd = attr->batch.map_fd;
4965 f = fdget(ufd);
4966 map = __bpf_map_get(f);
4967 if (IS_ERR(map))
4968 return PTR_ERR(map);
4969 if (has_write)
4970 bpf_map_write_active_inc(map);
4971 if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
4972 err = -EPERM;
4973 goto err_put;
4974 }
4975 if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
4976 err = -EPERM;
4977 goto err_put;
4978 }
4979
4980 if (cmd == BPF_MAP_LOOKUP_BATCH)
4981 BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr);
4982 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
4983 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr);
4984 else if (cmd == BPF_MAP_UPDATE_BATCH)
4985 BPF_DO_BATCH(map->ops->map_update_batch, map, f.file, attr, uattr);
4986 else
4987 BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr);
4988err_put:
4989 if (has_write) {
4990 maybe_wait_bpf_programs(map);
4991 bpf_map_write_active_dec(map);
4992 }
4993 fdput(f);
4994 return err;
4995}
4996
4997#define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.pid
4998static int link_create(union bpf_attr *attr, bpfptr_t uattr)
4999{
5000 struct bpf_prog *prog;
5001 int ret;
5002
5003 if (CHECK_ATTR(BPF_LINK_CREATE))
5004 return -EINVAL;
5005
5006 if (attr->link_create.attach_type == BPF_STRUCT_OPS)
5007 return bpf_struct_ops_link_create(attr);
5008
5009 prog = bpf_prog_get(attr->link_create.prog_fd);
5010 if (IS_ERR(prog))
5011 return PTR_ERR(prog);
5012
5013 ret = bpf_prog_attach_check_attach_type(prog,
5014 attr->link_create.attach_type);
5015 if (ret)
5016 goto out;
5017
5018 switch (prog->type) {
5019 case BPF_PROG_TYPE_CGROUP_SKB:
5020 case BPF_PROG_TYPE_CGROUP_SOCK:
5021 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
5022 case BPF_PROG_TYPE_SOCK_OPS:
5023 case BPF_PROG_TYPE_CGROUP_DEVICE:
5024 case BPF_PROG_TYPE_CGROUP_SYSCTL:
5025 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
5026 ret = cgroup_bpf_link_attach(attr, prog);
5027 break;
5028 case BPF_PROG_TYPE_EXT:
5029 ret = bpf_tracing_prog_attach(prog,
5030 attr->link_create.target_fd,
5031 attr->link_create.target_btf_id,
5032 attr->link_create.tracing.cookie);
5033 break;
5034 case BPF_PROG_TYPE_LSM:
5035 case BPF_PROG_TYPE_TRACING:
5036 if (attr->link_create.attach_type != prog->expected_attach_type) {
5037 ret = -EINVAL;
5038 goto out;
5039 }
5040 if (prog->expected_attach_type == BPF_TRACE_RAW_TP)
5041 ret = bpf_raw_tp_link_attach(prog, NULL);
5042 else if (prog->expected_attach_type == BPF_TRACE_ITER)
5043 ret = bpf_iter_link_attach(attr, uattr, prog);
5044 else if (prog->expected_attach_type == BPF_LSM_CGROUP)
5045 ret = cgroup_bpf_link_attach(attr, prog);
5046 else
5047 ret = bpf_tracing_prog_attach(prog,
5048 attr->link_create.target_fd,
5049 attr->link_create.target_btf_id,
5050 attr->link_create.tracing.cookie);
5051 break;
5052 case BPF_PROG_TYPE_FLOW_DISSECTOR:
5053 case BPF_PROG_TYPE_SK_LOOKUP:
5054 ret = netns_bpf_link_create(attr, prog);
5055 break;
5056#ifdef CONFIG_NET
5057 case BPF_PROG_TYPE_XDP:
5058 ret = bpf_xdp_link_attach(attr, prog);
5059 break;
5060 case BPF_PROG_TYPE_SCHED_CLS:
5061 if (attr->link_create.attach_type == BPF_TCX_INGRESS ||
5062 attr->link_create.attach_type == BPF_TCX_EGRESS)
5063 ret = tcx_link_attach(attr, prog);
5064 else
5065 ret = netkit_link_attach(attr, prog);
5066 break;
5067 case BPF_PROG_TYPE_NETFILTER:
5068 ret = bpf_nf_link_attach(attr, prog);
5069 break;
5070#endif
5071 case BPF_PROG_TYPE_PERF_EVENT:
5072 case BPF_PROG_TYPE_TRACEPOINT:
5073 ret = bpf_perf_link_attach(attr, prog);
5074 break;
5075 case BPF_PROG_TYPE_KPROBE:
5076 if (attr->link_create.attach_type == BPF_PERF_EVENT)
5077 ret = bpf_perf_link_attach(attr, prog);
5078 else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI)
5079 ret = bpf_kprobe_multi_link_attach(attr, prog);
5080 else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI)
5081 ret = bpf_uprobe_multi_link_attach(attr, prog);
5082 break;
5083 default:
5084 ret = -EINVAL;
5085 }
5086
5087out:
5088 if (ret < 0)
5089 bpf_prog_put(prog);
5090 return ret;
5091}
5092
5093static int link_update_map(struct bpf_link *link, union bpf_attr *attr)
5094{
5095 struct bpf_map *new_map, *old_map = NULL;
5096 int ret;
5097
5098 new_map = bpf_map_get(attr->link_update.new_map_fd);
5099 if (IS_ERR(new_map))
5100 return PTR_ERR(new_map);
5101
5102 if (attr->link_update.flags & BPF_F_REPLACE) {
5103 old_map = bpf_map_get(attr->link_update.old_map_fd);
5104 if (IS_ERR(old_map)) {
5105 ret = PTR_ERR(old_map);
5106 goto out_put;
5107 }
5108 } else if (attr->link_update.old_map_fd) {
5109 ret = -EINVAL;
5110 goto out_put;
5111 }
5112
5113 ret = link->ops->update_map(link, new_map, old_map);
5114
5115 if (old_map)
5116 bpf_map_put(old_map);
5117out_put:
5118 bpf_map_put(new_map);
5119 return ret;
5120}
5121
5122#define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
5123
5124static int link_update(union bpf_attr *attr)
5125{
5126 struct bpf_prog *old_prog = NULL, *new_prog;
5127 struct bpf_link *link;
5128 u32 flags;
5129 int ret;
5130
5131 if (CHECK_ATTR(BPF_LINK_UPDATE))
5132 return -EINVAL;
5133
5134 flags = attr->link_update.flags;
5135 if (flags & ~BPF_F_REPLACE)
5136 return -EINVAL;
5137
5138 link = bpf_link_get_from_fd(attr->link_update.link_fd);
5139 if (IS_ERR(link))
5140 return PTR_ERR(link);
5141
5142 if (link->ops->update_map) {
5143 ret = link_update_map(link, attr);
5144 goto out_put_link;
5145 }
5146
5147 new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
5148 if (IS_ERR(new_prog)) {
5149 ret = PTR_ERR(new_prog);
5150 goto out_put_link;
5151 }
5152
5153 if (flags & BPF_F_REPLACE) {
5154 old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
5155 if (IS_ERR(old_prog)) {
5156 ret = PTR_ERR(old_prog);
5157 old_prog = NULL;
5158 goto out_put_progs;
5159 }
5160 } else if (attr->link_update.old_prog_fd) {
5161 ret = -EINVAL;
5162 goto out_put_progs;
5163 }
5164
5165 if (link->ops->update_prog)
5166 ret = link->ops->update_prog(link, new_prog, old_prog);
5167 else
5168 ret = -EINVAL;
5169
5170out_put_progs:
5171 if (old_prog)
5172 bpf_prog_put(old_prog);
5173 if (ret)
5174 bpf_prog_put(new_prog);
5175out_put_link:
5176 bpf_link_put_direct(link);
5177 return ret;
5178}
5179
5180#define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
5181
5182static int link_detach(union bpf_attr *attr)
5183{
5184 struct bpf_link *link;
5185 int ret;
5186
5187 if (CHECK_ATTR(BPF_LINK_DETACH))
5188 return -EINVAL;
5189
5190 link = bpf_link_get_from_fd(attr->link_detach.link_fd);
5191 if (IS_ERR(link))
5192 return PTR_ERR(link);
5193
5194 if (link->ops->detach)
5195 ret = link->ops->detach(link);
5196 else
5197 ret = -EOPNOTSUPP;
5198
5199 bpf_link_put_direct(link);
5200 return ret;
5201}
5202
5203static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
5204{
5205 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
5206}
5207
5208struct bpf_link *bpf_link_by_id(u32 id)
5209{
5210 struct bpf_link *link;
5211
5212 if (!id)
5213 return ERR_PTR(-ENOENT);
5214
5215 spin_lock_bh(&link_idr_lock);
5216 /* before link is "settled", ID is 0, pretend it doesn't exist yet */
5217 link = idr_find(&link_idr, id);
5218 if (link) {
5219 if (link->id)
5220 link = bpf_link_inc_not_zero(link);
5221 else
5222 link = ERR_PTR(-EAGAIN);
5223 } else {
5224 link = ERR_PTR(-ENOENT);
5225 }
5226 spin_unlock_bh(&link_idr_lock);
5227 return link;
5228}
5229
5230struct bpf_link *bpf_link_get_curr_or_next(u32 *id)
5231{
5232 struct bpf_link *link;
5233
5234 spin_lock_bh(&link_idr_lock);
5235again:
5236 link = idr_get_next(&link_idr, id);
5237 if (link) {
5238 link = bpf_link_inc_not_zero(link);
5239 if (IS_ERR(link)) {
5240 (*id)++;
5241 goto again;
5242 }
5243 }
5244 spin_unlock_bh(&link_idr_lock);
5245
5246 return link;
5247}
5248
5249#define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
5250
5251static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
5252{
5253 struct bpf_link *link;
5254 u32 id = attr->link_id;
5255 int fd;
5256
5257 if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
5258 return -EINVAL;
5259
5260 if (!capable(CAP_SYS_ADMIN))
5261 return -EPERM;
5262
5263 link = bpf_link_by_id(id);
5264 if (IS_ERR(link))
5265 return PTR_ERR(link);
5266
5267 fd = bpf_link_new_fd(link);
5268 if (fd < 0)
5269 bpf_link_put_direct(link);
5270
5271 return fd;
5272}
5273
5274DEFINE_MUTEX(bpf_stats_enabled_mutex);
5275
5276static int bpf_stats_release(struct inode *inode, struct file *file)
5277{
5278 mutex_lock(&bpf_stats_enabled_mutex);
5279 static_key_slow_dec(&bpf_stats_enabled_key.key);
5280 mutex_unlock(&bpf_stats_enabled_mutex);
5281 return 0;
5282}
5283
5284static const struct file_operations bpf_stats_fops = {
5285 .release = bpf_stats_release,
5286};
5287
5288static int bpf_enable_runtime_stats(void)
5289{
5290 int fd;
5291
5292 mutex_lock(&bpf_stats_enabled_mutex);
5293
5294 /* Set a very high limit to avoid overflow */
5295 if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
5296 mutex_unlock(&bpf_stats_enabled_mutex);
5297 return -EBUSY;
5298 }
5299
5300 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
5301 if (fd >= 0)
5302 static_key_slow_inc(&bpf_stats_enabled_key.key);
5303
5304 mutex_unlock(&bpf_stats_enabled_mutex);
5305 return fd;
5306}
5307
5308#define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
5309
5310static int bpf_enable_stats(union bpf_attr *attr)
5311{
5312
5313 if (CHECK_ATTR(BPF_ENABLE_STATS))
5314 return -EINVAL;
5315
5316 if (!capable(CAP_SYS_ADMIN))
5317 return -EPERM;
5318
5319 switch (attr->enable_stats.type) {
5320 case BPF_STATS_RUN_TIME:
5321 return bpf_enable_runtime_stats();
5322 default:
5323 break;
5324 }
5325 return -EINVAL;
5326}
5327
5328#define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
5329
5330static int bpf_iter_create(union bpf_attr *attr)
5331{
5332 struct bpf_link *link;
5333 int err;
5334
5335 if (CHECK_ATTR(BPF_ITER_CREATE))
5336 return -EINVAL;
5337
5338 if (attr->iter_create.flags)
5339 return -EINVAL;
5340
5341 link = bpf_link_get_from_fd(attr->iter_create.link_fd);
5342 if (IS_ERR(link))
5343 return PTR_ERR(link);
5344
5345 err = bpf_iter_new_fd(link);
5346 bpf_link_put_direct(link);
5347
5348 return err;
5349}
5350
5351#define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
5352
5353static int bpf_prog_bind_map(union bpf_attr *attr)
5354{
5355 struct bpf_prog *prog;
5356 struct bpf_map *map;
5357 struct bpf_map **used_maps_old, **used_maps_new;
5358 int i, ret = 0;
5359
5360 if (CHECK_ATTR(BPF_PROG_BIND_MAP))
5361 return -EINVAL;
5362
5363 if (attr->prog_bind_map.flags)
5364 return -EINVAL;
5365
5366 prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
5367 if (IS_ERR(prog))
5368 return PTR_ERR(prog);
5369
5370 map = bpf_map_get(attr->prog_bind_map.map_fd);
5371 if (IS_ERR(map)) {
5372 ret = PTR_ERR(map);
5373 goto out_prog_put;
5374 }
5375
5376 mutex_lock(&prog->aux->used_maps_mutex);
5377
5378 used_maps_old = prog->aux->used_maps;
5379
5380 for (i = 0; i < prog->aux->used_map_cnt; i++)
5381 if (used_maps_old[i] == map) {
5382 bpf_map_put(map);
5383 goto out_unlock;
5384 }
5385
5386 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
5387 sizeof(used_maps_new[0]),
5388 GFP_KERNEL);
5389 if (!used_maps_new) {
5390 ret = -ENOMEM;
5391 goto out_unlock;
5392 }
5393
5394 /* The bpf program will not access the bpf map, but for the sake of
5395 * simplicity, increase sleepable_refcnt for sleepable program as well.
5396 */
5397 if (prog->aux->sleepable)
5398 atomic64_inc(&map->sleepable_refcnt);
5399 memcpy(used_maps_new, used_maps_old,
5400 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
5401 used_maps_new[prog->aux->used_map_cnt] = map;
5402
5403 prog->aux->used_map_cnt++;
5404 prog->aux->used_maps = used_maps_new;
5405
5406 kfree(used_maps_old);
5407
5408out_unlock:
5409 mutex_unlock(&prog->aux->used_maps_mutex);
5410
5411 if (ret)
5412 bpf_map_put(map);
5413out_prog_put:
5414 bpf_prog_put(prog);
5415 return ret;
5416}
5417
5418static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
5419{
5420 union bpf_attr attr;
5421 int err;
5422
5423 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
5424 if (err)
5425 return err;
5426 size = min_t(u32, size, sizeof(attr));
5427
5428 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
5429 memset(&attr, 0, sizeof(attr));
5430 if (copy_from_bpfptr(&attr, uattr, size) != 0)
5431 return -EFAULT;
5432
5433 err = security_bpf(cmd, &attr, size);
5434 if (err < 0)
5435 return err;
5436
5437 switch (cmd) {
5438 case BPF_MAP_CREATE:
5439 err = map_create(&attr);
5440 break;
5441 case BPF_MAP_LOOKUP_ELEM:
5442 err = map_lookup_elem(&attr);
5443 break;
5444 case BPF_MAP_UPDATE_ELEM:
5445 err = map_update_elem(&attr, uattr);
5446 break;
5447 case BPF_MAP_DELETE_ELEM:
5448 err = map_delete_elem(&attr, uattr);
5449 break;
5450 case BPF_MAP_GET_NEXT_KEY:
5451 err = map_get_next_key(&attr);
5452 break;
5453 case BPF_MAP_FREEZE:
5454 err = map_freeze(&attr);
5455 break;
5456 case BPF_PROG_LOAD:
5457 err = bpf_prog_load(&attr, uattr, size);
5458 break;
5459 case BPF_OBJ_PIN:
5460 err = bpf_obj_pin(&attr);
5461 break;
5462 case BPF_OBJ_GET:
5463 err = bpf_obj_get(&attr);
5464 break;
5465 case BPF_PROG_ATTACH:
5466 err = bpf_prog_attach(&attr);
5467 break;
5468 case BPF_PROG_DETACH:
5469 err = bpf_prog_detach(&attr);
5470 break;
5471 case BPF_PROG_QUERY:
5472 err = bpf_prog_query(&attr, uattr.user);
5473 break;
5474 case BPF_PROG_TEST_RUN:
5475 err = bpf_prog_test_run(&attr, uattr.user);
5476 break;
5477 case BPF_PROG_GET_NEXT_ID:
5478 err = bpf_obj_get_next_id(&attr, uattr.user,
5479 &prog_idr, &prog_idr_lock);
5480 break;
5481 case BPF_MAP_GET_NEXT_ID:
5482 err = bpf_obj_get_next_id(&attr, uattr.user,
5483 &map_idr, &map_idr_lock);
5484 break;
5485 case BPF_BTF_GET_NEXT_ID:
5486 err = bpf_obj_get_next_id(&attr, uattr.user,
5487 &btf_idr, &btf_idr_lock);
5488 break;
5489 case BPF_PROG_GET_FD_BY_ID:
5490 err = bpf_prog_get_fd_by_id(&attr);
5491 break;
5492 case BPF_MAP_GET_FD_BY_ID:
5493 err = bpf_map_get_fd_by_id(&attr);
5494 break;
5495 case BPF_OBJ_GET_INFO_BY_FD:
5496 err = bpf_obj_get_info_by_fd(&attr, uattr.user);
5497 break;
5498 case BPF_RAW_TRACEPOINT_OPEN:
5499 err = bpf_raw_tracepoint_open(&attr);
5500 break;
5501 case BPF_BTF_LOAD:
5502 err = bpf_btf_load(&attr, uattr, size);
5503 break;
5504 case BPF_BTF_GET_FD_BY_ID:
5505 err = bpf_btf_get_fd_by_id(&attr);
5506 break;
5507 case BPF_TASK_FD_QUERY:
5508 err = bpf_task_fd_query(&attr, uattr.user);
5509 break;
5510 case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
5511 err = map_lookup_and_delete_elem(&attr);
5512 break;
5513 case BPF_MAP_LOOKUP_BATCH:
5514 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH);
5515 break;
5516 case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
5517 err = bpf_map_do_batch(&attr, uattr.user,
5518 BPF_MAP_LOOKUP_AND_DELETE_BATCH);
5519 break;
5520 case BPF_MAP_UPDATE_BATCH:
5521 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH);
5522 break;
5523 case BPF_MAP_DELETE_BATCH:
5524 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
5525 break;
5526 case BPF_LINK_CREATE:
5527 err = link_create(&attr, uattr);
5528 break;
5529 case BPF_LINK_UPDATE:
5530 err = link_update(&attr);
5531 break;
5532 case BPF_LINK_GET_FD_BY_ID:
5533 err = bpf_link_get_fd_by_id(&attr);
5534 break;
5535 case BPF_LINK_GET_NEXT_ID:
5536 err = bpf_obj_get_next_id(&attr, uattr.user,
5537 &link_idr, &link_idr_lock);
5538 break;
5539 case BPF_ENABLE_STATS:
5540 err = bpf_enable_stats(&attr);
5541 break;
5542 case BPF_ITER_CREATE:
5543 err = bpf_iter_create(&attr);
5544 break;
5545 case BPF_LINK_DETACH:
5546 err = link_detach(&attr);
5547 break;
5548 case BPF_PROG_BIND_MAP:
5549 err = bpf_prog_bind_map(&attr);
5550 break;
5551 default:
5552 err = -EINVAL;
5553 break;
5554 }
5555
5556 return err;
5557}
5558
5559SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
5560{
5561 return __sys_bpf(cmd, USER_BPFPTR(uattr), size);
5562}
5563
5564static bool syscall_prog_is_valid_access(int off, int size,
5565 enum bpf_access_type type,
5566 const struct bpf_prog *prog,
5567 struct bpf_insn_access_aux *info)
5568{
5569 if (off < 0 || off >= U16_MAX)
5570 return false;
5571 if (off % size != 0)
5572 return false;
5573 return true;
5574}
5575
5576BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size)
5577{
5578 switch (cmd) {
5579 case BPF_MAP_CREATE:
5580 case BPF_MAP_DELETE_ELEM:
5581 case BPF_MAP_UPDATE_ELEM:
5582 case BPF_MAP_FREEZE:
5583 case BPF_MAP_GET_FD_BY_ID:
5584 case BPF_PROG_LOAD:
5585 case BPF_BTF_LOAD:
5586 case BPF_LINK_CREATE:
5587 case BPF_RAW_TRACEPOINT_OPEN:
5588 break;
5589 default:
5590 return -EINVAL;
5591 }
5592 return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
5593}
5594
5595
5596/* To shut up -Wmissing-prototypes.
5597 * This function is used by the kernel light skeleton
5598 * to load bpf programs when modules are loaded or during kernel boot.
5599 * See tools/lib/bpf/skel_internal.h
5600 */
5601int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
5602
5603int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
5604{
5605 struct bpf_prog * __maybe_unused prog;
5606 struct bpf_tramp_run_ctx __maybe_unused run_ctx;
5607
5608 switch (cmd) {
5609#ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */
5610 case BPF_PROG_TEST_RUN:
5611 if (attr->test.data_in || attr->test.data_out ||
5612 attr->test.ctx_out || attr->test.duration ||
5613 attr->test.repeat || attr->test.flags)
5614 return -EINVAL;
5615
5616 prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL);
5617 if (IS_ERR(prog))
5618 return PTR_ERR(prog);
5619
5620 if (attr->test.ctx_size_in < prog->aux->max_ctx_offset ||
5621 attr->test.ctx_size_in > U16_MAX) {
5622 bpf_prog_put(prog);
5623 return -EINVAL;
5624 }
5625
5626 run_ctx.bpf_cookie = 0;
5627 if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
5628 /* recursion detected */
5629 __bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx);
5630 bpf_prog_put(prog);
5631 return -EBUSY;
5632 }
5633 attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in);
5634 __bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */,
5635 &run_ctx);
5636 bpf_prog_put(prog);
5637 return 0;
5638#endif
5639 default:
5640 return ____bpf_sys_bpf(cmd, attr, size);
5641 }
5642}
5643EXPORT_SYMBOL(kern_sys_bpf);
5644
5645static const struct bpf_func_proto bpf_sys_bpf_proto = {
5646 .func = bpf_sys_bpf,
5647 .gpl_only = false,
5648 .ret_type = RET_INTEGER,
5649 .arg1_type = ARG_ANYTHING,
5650 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
5651 .arg3_type = ARG_CONST_SIZE,
5652};
5653
5654const struct bpf_func_proto * __weak
5655tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5656{
5657 return bpf_base_func_proto(func_id);
5658}
5659
5660BPF_CALL_1(bpf_sys_close, u32, fd)
5661{
5662 /* When bpf program calls this helper there should not be
5663 * an fdget() without matching completed fdput().
5664 * This helper is allowed in the following callchain only:
5665 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close
5666 */
5667 return close_fd(fd);
5668}
5669
5670static const struct bpf_func_proto bpf_sys_close_proto = {
5671 .func = bpf_sys_close,
5672 .gpl_only = false,
5673 .ret_type = RET_INTEGER,
5674 .arg1_type = ARG_ANYTHING,
5675};
5676
5677BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res)
5678{
5679 if (flags)
5680 return -EINVAL;
5681
5682 if (name_sz <= 1 || name[name_sz - 1])
5683 return -EINVAL;
5684
5685 if (!bpf_dump_raw_ok(current_cred()))
5686 return -EPERM;
5687
5688 *res = kallsyms_lookup_name(name);
5689 return *res ? 0 : -ENOENT;
5690}
5691
5692static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
5693 .func = bpf_kallsyms_lookup_name,
5694 .gpl_only = false,
5695 .ret_type = RET_INTEGER,
5696 .arg1_type = ARG_PTR_TO_MEM,
5697 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
5698 .arg3_type = ARG_ANYTHING,
5699 .arg4_type = ARG_PTR_TO_LONG,
5700};
5701
5702static const struct bpf_func_proto *
5703syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5704{
5705 switch (func_id) {
5706 case BPF_FUNC_sys_bpf:
5707 return !perfmon_capable() ? NULL : &bpf_sys_bpf_proto;
5708 case BPF_FUNC_btf_find_by_name_kind:
5709 return &bpf_btf_find_by_name_kind_proto;
5710 case BPF_FUNC_sys_close:
5711 return &bpf_sys_close_proto;
5712 case BPF_FUNC_kallsyms_lookup_name:
5713 return &bpf_kallsyms_lookup_name_proto;
5714 default:
5715 return tracing_prog_func_proto(func_id, prog);
5716 }
5717}
5718
5719const struct bpf_verifier_ops bpf_syscall_verifier_ops = {
5720 .get_func_proto = syscall_prog_func_proto,
5721 .is_valid_access = syscall_prog_is_valid_access,
5722};
5723
5724const struct bpf_prog_ops bpf_syscall_prog_ops = {
5725 .test_run = bpf_prog_test_run_syscall,
5726};
5727
5728#ifdef CONFIG_SYSCTL
5729static int bpf_stats_handler(struct ctl_table *table, int write,
5730 void *buffer, size_t *lenp, loff_t *ppos)
5731{
5732 struct static_key *key = (struct static_key *)table->data;
5733 static int saved_val;
5734 int val, ret;
5735 struct ctl_table tmp = {
5736 .data = &val,
5737 .maxlen = sizeof(val),
5738 .mode = table->mode,
5739 .extra1 = SYSCTL_ZERO,
5740 .extra2 = SYSCTL_ONE,
5741 };
5742
5743 if (write && !capable(CAP_SYS_ADMIN))
5744 return -EPERM;
5745
5746 mutex_lock(&bpf_stats_enabled_mutex);
5747 val = saved_val;
5748 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
5749 if (write && !ret && val != saved_val) {
5750 if (val)
5751 static_key_slow_inc(key);
5752 else
5753 static_key_slow_dec(key);
5754 saved_val = val;
5755 }
5756 mutex_unlock(&bpf_stats_enabled_mutex);
5757 return ret;
5758}
5759
5760void __weak unpriv_ebpf_notify(int new_state)
5761{
5762}
5763
5764static int bpf_unpriv_handler(struct ctl_table *table, int write,
5765 void *buffer, size_t *lenp, loff_t *ppos)
5766{
5767 int ret, unpriv_enable = *(int *)table->data;
5768 bool locked_state = unpriv_enable == 1;
5769 struct ctl_table tmp = *table;
5770
5771 if (write && !capable(CAP_SYS_ADMIN))
5772 return -EPERM;
5773
5774 tmp.data = &unpriv_enable;
5775 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
5776 if (write && !ret) {
5777 if (locked_state && unpriv_enable != 1)
5778 return -EPERM;
5779 *(int *)table->data = unpriv_enable;
5780 }
5781
5782 if (write)
5783 unpriv_ebpf_notify(unpriv_enable);
5784
5785 return ret;
5786}
5787
5788static struct ctl_table bpf_syscall_table[] = {
5789 {
5790 .procname = "unprivileged_bpf_disabled",
5791 .data = &sysctl_unprivileged_bpf_disabled,
5792 .maxlen = sizeof(sysctl_unprivileged_bpf_disabled),
5793 .mode = 0644,
5794 .proc_handler = bpf_unpriv_handler,
5795 .extra1 = SYSCTL_ZERO,
5796 .extra2 = SYSCTL_TWO,
5797 },
5798 {
5799 .procname = "bpf_stats_enabled",
5800 .data = &bpf_stats_enabled_key.key,
5801 .mode = 0644,
5802 .proc_handler = bpf_stats_handler,
5803 },
5804 { }
5805};
5806
5807static int __init bpf_syscall_sysctl_init(void)
5808{
5809 register_sysctl_init("kernel", bpf_syscall_table);
5810 return 0;
5811}
5812late_initcall(bpf_syscall_sysctl_init);
5813#endif /* CONFIG_SYSCTL */