Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 */
4#include <linux/bpf.h>
5#include <linux/bpf-cgroup.h>
6#include <linux/bpf_trace.h>
7#include <linux/bpf_lirc.h>
8#include <linux/bpf_verifier.h>
9#include <linux/bsearch.h>
10#include <linux/btf.h>
11#include <linux/syscalls.h>
12#include <linux/slab.h>
13#include <linux/sched/signal.h>
14#include <linux/vmalloc.h>
15#include <linux/mmzone.h>
16#include <linux/anon_inodes.h>
17#include <linux/fdtable.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/license.h>
21#include <linux/filter.h>
22#include <linux/kernel.h>
23#include <linux/idr.h>
24#include <linux/cred.h>
25#include <linux/timekeeping.h>
26#include <linux/ctype.h>
27#include <linux/nospec.h>
28#include <linux/audit.h>
29#include <uapi/linux/btf.h>
30#include <linux/pgtable.h>
31#include <linux/bpf_lsm.h>
32#include <linux/poll.h>
33#include <linux/sort.h>
34#include <linux/bpf-netns.h>
35#include <linux/rcupdate_trace.h>
36#include <linux/memcontrol.h>
37#include <linux/trace_events.h>
38
39#include <net/netfilter/nf_bpf_link.h>
40#include <net/netkit.h>
41#include <net/tcx.h>
42
43#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
44 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
45 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
46#define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
47#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
48#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
49 IS_FD_HASH(map))
50
51#define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
52
53DEFINE_PER_CPU(int, bpf_prog_active);
54static DEFINE_IDR(prog_idr);
55static DEFINE_SPINLOCK(prog_idr_lock);
56static DEFINE_IDR(map_idr);
57static DEFINE_SPINLOCK(map_idr_lock);
58static DEFINE_IDR(link_idr);
59static DEFINE_SPINLOCK(link_idr_lock);
60
61int sysctl_unprivileged_bpf_disabled __read_mostly =
62 IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
63
64static const struct bpf_map_ops * const bpf_map_types[] = {
65#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
66#define BPF_MAP_TYPE(_id, _ops) \
67 [_id] = &_ops,
68#define BPF_LINK_TYPE(_id, _name)
69#include <linux/bpf_types.h>
70#undef BPF_PROG_TYPE
71#undef BPF_MAP_TYPE
72#undef BPF_LINK_TYPE
73};
74
75/*
76 * If we're handed a bigger struct than we know of, ensure all the unknown bits
77 * are 0 - i.e. new user-space does not rely on any kernel feature extensions
78 * we don't know about yet.
79 *
80 * There is a ToCToU between this function call and the following
81 * copy_from_user() call. However, this is not a concern since this function is
82 * meant to be a future-proofing of bits.
83 */
84int bpf_check_uarg_tail_zero(bpfptr_t uaddr,
85 size_t expected_size,
86 size_t actual_size)
87{
88 int res;
89
90 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */
91 return -E2BIG;
92
93 if (actual_size <= expected_size)
94 return 0;
95
96 if (uaddr.is_kernel)
97 res = memchr_inv(uaddr.kernel + expected_size, 0,
98 actual_size - expected_size) == NULL;
99 else
100 res = check_zeroed_user(uaddr.user + expected_size,
101 actual_size - expected_size);
102 if (res < 0)
103 return res;
104 return res ? 0 : -E2BIG;
105}
106
107const struct bpf_map_ops bpf_map_offload_ops = {
108 .map_meta_equal = bpf_map_meta_equal,
109 .map_alloc = bpf_map_offload_map_alloc,
110 .map_free = bpf_map_offload_map_free,
111 .map_check_btf = map_check_no_btf,
112 .map_mem_usage = bpf_map_offload_map_mem_usage,
113};
114
115static void bpf_map_write_active_inc(struct bpf_map *map)
116{
117 atomic64_inc(&map->writecnt);
118}
119
120static void bpf_map_write_active_dec(struct bpf_map *map)
121{
122 atomic64_dec(&map->writecnt);
123}
124
125bool bpf_map_write_active(const struct bpf_map *map)
126{
127 return atomic64_read(&map->writecnt) != 0;
128}
129
130static u32 bpf_map_value_size(const struct bpf_map *map)
131{
132 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
133 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
134 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
135 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
136 return round_up(map->value_size, 8) * num_possible_cpus();
137 else if (IS_FD_MAP(map))
138 return sizeof(u32);
139 else
140 return map->value_size;
141}
142
143static void maybe_wait_bpf_programs(struct bpf_map *map)
144{
145 /* Wait for any running non-sleepable BPF programs to complete so that
146 * userspace, when we return to it, knows that all non-sleepable
147 * programs that could be running use the new map value. For sleepable
148 * BPF programs, synchronize_rcu_tasks_trace() should be used to wait
149 * for the completions of these programs, but considering the waiting
150 * time can be very long and userspace may think it will hang forever,
151 * so don't handle sleepable BPF programs now.
152 */
153 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
154 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
155 synchronize_rcu();
156}
157
158static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
159 void *key, void *value, __u64 flags)
160{
161 int err;
162
163 /* Need to create a kthread, thus must support schedule */
164 if (bpf_map_is_offloaded(map)) {
165 return bpf_map_offload_update_elem(map, key, value, flags);
166 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
167 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
168 return map->ops->map_update_elem(map, key, value, flags);
169 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
170 map->map_type == BPF_MAP_TYPE_SOCKMAP) {
171 return sock_map_update_elem_sys(map, key, value, flags);
172 } else if (IS_FD_PROG_ARRAY(map)) {
173 return bpf_fd_array_map_update_elem(map, map_file, key, value,
174 flags);
175 }
176
177 bpf_disable_instrumentation();
178 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
179 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
180 err = bpf_percpu_hash_update(map, key, value, flags);
181 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
182 err = bpf_percpu_array_update(map, key, value, flags);
183 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
184 err = bpf_percpu_cgroup_storage_update(map, key, value,
185 flags);
186 } else if (IS_FD_ARRAY(map)) {
187 err = bpf_fd_array_map_update_elem(map, map_file, key, value,
188 flags);
189 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
190 err = bpf_fd_htab_map_update_elem(map, map_file, key, value,
191 flags);
192 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
193 /* rcu_read_lock() is not needed */
194 err = bpf_fd_reuseport_array_update_elem(map, key, value,
195 flags);
196 } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
197 map->map_type == BPF_MAP_TYPE_STACK ||
198 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
199 err = map->ops->map_push_elem(map, value, flags);
200 } else {
201 rcu_read_lock();
202 err = map->ops->map_update_elem(map, key, value, flags);
203 rcu_read_unlock();
204 }
205 bpf_enable_instrumentation();
206
207 return err;
208}
209
210static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
211 __u64 flags)
212{
213 void *ptr;
214 int err;
215
216 if (bpf_map_is_offloaded(map))
217 return bpf_map_offload_lookup_elem(map, key, value);
218
219 bpf_disable_instrumentation();
220 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
221 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
222 err = bpf_percpu_hash_copy(map, key, value);
223 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
224 err = bpf_percpu_array_copy(map, key, value);
225 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
226 err = bpf_percpu_cgroup_storage_copy(map, key, value);
227 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
228 err = bpf_stackmap_copy(map, key, value);
229 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
230 err = bpf_fd_array_map_lookup_elem(map, key, value);
231 } else if (IS_FD_HASH(map)) {
232 err = bpf_fd_htab_map_lookup_elem(map, key, value);
233 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
234 err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
235 } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
236 map->map_type == BPF_MAP_TYPE_STACK ||
237 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
238 err = map->ops->map_peek_elem(map, value);
239 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
240 /* struct_ops map requires directly updating "value" */
241 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
242 } else {
243 rcu_read_lock();
244 if (map->ops->map_lookup_elem_sys_only)
245 ptr = map->ops->map_lookup_elem_sys_only(map, key);
246 else
247 ptr = map->ops->map_lookup_elem(map, key);
248 if (IS_ERR(ptr)) {
249 err = PTR_ERR(ptr);
250 } else if (!ptr) {
251 err = -ENOENT;
252 } else {
253 err = 0;
254 if (flags & BPF_F_LOCK)
255 /* lock 'ptr' and copy everything but lock */
256 copy_map_value_locked(map, value, ptr, true);
257 else
258 copy_map_value(map, value, ptr);
259 /* mask lock and timer, since value wasn't zero inited */
260 check_and_init_map_value(map, value);
261 }
262 rcu_read_unlock();
263 }
264
265 bpf_enable_instrumentation();
266
267 return err;
268}
269
270/* Please, do not use this function outside from the map creation path
271 * (e.g. in map update path) without taking care of setting the active
272 * memory cgroup (see at bpf_map_kmalloc_node() for example).
273 */
274static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
275{
276 /* We really just want to fail instead of triggering OOM killer
277 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
278 * which is used for lower order allocation requests.
279 *
280 * It has been observed that higher order allocation requests done by
281 * vmalloc with __GFP_NORETRY being set might fail due to not trying
282 * to reclaim memory from the page cache, thus we set
283 * __GFP_RETRY_MAYFAIL to avoid such situations.
284 */
285
286 gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO);
287 unsigned int flags = 0;
288 unsigned long align = 1;
289 void *area;
290
291 if (size >= SIZE_MAX)
292 return NULL;
293
294 /* kmalloc()'ed memory can't be mmap()'ed */
295 if (mmapable) {
296 BUG_ON(!PAGE_ALIGNED(size));
297 align = SHMLBA;
298 flags = VM_USERMAP;
299 } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
300 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
301 numa_node);
302 if (area != NULL)
303 return area;
304 }
305
306 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
307 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
308 flags, numa_node, __builtin_return_address(0));
309}
310
311void *bpf_map_area_alloc(u64 size, int numa_node)
312{
313 return __bpf_map_area_alloc(size, numa_node, false);
314}
315
316void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
317{
318 return __bpf_map_area_alloc(size, numa_node, true);
319}
320
321void bpf_map_area_free(void *area)
322{
323 kvfree(area);
324}
325
326static u32 bpf_map_flags_retain_permanent(u32 flags)
327{
328 /* Some map creation flags are not tied to the map object but
329 * rather to the map fd instead, so they have no meaning upon
330 * map object inspection since multiple file descriptors with
331 * different (access) properties can exist here. Thus, given
332 * this has zero meaning for the map itself, lets clear these
333 * from here.
334 */
335 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
336}
337
338void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
339{
340 map->map_type = attr->map_type;
341 map->key_size = attr->key_size;
342 map->value_size = attr->value_size;
343 map->max_entries = attr->max_entries;
344 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
345 map->numa_node = bpf_map_attr_numa_node(attr);
346 map->map_extra = attr->map_extra;
347}
348
349static int bpf_map_alloc_id(struct bpf_map *map)
350{
351 int id;
352
353 idr_preload(GFP_KERNEL);
354 spin_lock_bh(&map_idr_lock);
355 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
356 if (id > 0)
357 map->id = id;
358 spin_unlock_bh(&map_idr_lock);
359 idr_preload_end();
360
361 if (WARN_ON_ONCE(!id))
362 return -ENOSPC;
363
364 return id > 0 ? 0 : id;
365}
366
367void bpf_map_free_id(struct bpf_map *map)
368{
369 unsigned long flags;
370
371 /* Offloaded maps are removed from the IDR store when their device
372 * disappears - even if someone holds an fd to them they are unusable,
373 * the memory is gone, all ops will fail; they are simply waiting for
374 * refcnt to drop to be freed.
375 */
376 if (!map->id)
377 return;
378
379 spin_lock_irqsave(&map_idr_lock, flags);
380
381 idr_remove(&map_idr, map->id);
382 map->id = 0;
383
384 spin_unlock_irqrestore(&map_idr_lock, flags);
385}
386
387#ifdef CONFIG_MEMCG_KMEM
388static void bpf_map_save_memcg(struct bpf_map *map)
389{
390 /* Currently if a map is created by a process belonging to the root
391 * memory cgroup, get_obj_cgroup_from_current() will return NULL.
392 * So we have to check map->objcg for being NULL each time it's
393 * being used.
394 */
395 if (memcg_bpf_enabled())
396 map->objcg = get_obj_cgroup_from_current();
397}
398
399static void bpf_map_release_memcg(struct bpf_map *map)
400{
401 if (map->objcg)
402 obj_cgroup_put(map->objcg);
403}
404
405static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map)
406{
407 if (map->objcg)
408 return get_mem_cgroup_from_objcg(map->objcg);
409
410 return root_mem_cgroup;
411}
412
413void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
414 int node)
415{
416 struct mem_cgroup *memcg, *old_memcg;
417 void *ptr;
418
419 memcg = bpf_map_get_memcg(map);
420 old_memcg = set_active_memcg(memcg);
421 ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
422 set_active_memcg(old_memcg);
423 mem_cgroup_put(memcg);
424
425 return ptr;
426}
427
428void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
429{
430 struct mem_cgroup *memcg, *old_memcg;
431 void *ptr;
432
433 memcg = bpf_map_get_memcg(map);
434 old_memcg = set_active_memcg(memcg);
435 ptr = kzalloc(size, flags | __GFP_ACCOUNT);
436 set_active_memcg(old_memcg);
437 mem_cgroup_put(memcg);
438
439 return ptr;
440}
441
442void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
443 gfp_t flags)
444{
445 struct mem_cgroup *memcg, *old_memcg;
446 void *ptr;
447
448 memcg = bpf_map_get_memcg(map);
449 old_memcg = set_active_memcg(memcg);
450 ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT);
451 set_active_memcg(old_memcg);
452 mem_cgroup_put(memcg);
453
454 return ptr;
455}
456
457void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
458 size_t align, gfp_t flags)
459{
460 struct mem_cgroup *memcg, *old_memcg;
461 void __percpu *ptr;
462
463 memcg = bpf_map_get_memcg(map);
464 old_memcg = set_active_memcg(memcg);
465 ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT);
466 set_active_memcg(old_memcg);
467 mem_cgroup_put(memcg);
468
469 return ptr;
470}
471
472#else
473static void bpf_map_save_memcg(struct bpf_map *map)
474{
475}
476
477static void bpf_map_release_memcg(struct bpf_map *map)
478{
479}
480#endif
481
482static int btf_field_cmp(const void *a, const void *b)
483{
484 const struct btf_field *f1 = a, *f2 = b;
485
486 if (f1->offset < f2->offset)
487 return -1;
488 else if (f1->offset > f2->offset)
489 return 1;
490 return 0;
491}
492
493struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset,
494 u32 field_mask)
495{
496 struct btf_field *field;
497
498 if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask))
499 return NULL;
500 field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp);
501 if (!field || !(field->type & field_mask))
502 return NULL;
503 return field;
504}
505
506void btf_record_free(struct btf_record *rec)
507{
508 int i;
509
510 if (IS_ERR_OR_NULL(rec))
511 return;
512 for (i = 0; i < rec->cnt; i++) {
513 switch (rec->fields[i].type) {
514 case BPF_KPTR_UNREF:
515 case BPF_KPTR_REF:
516 case BPF_KPTR_PERCPU:
517 if (rec->fields[i].kptr.module)
518 module_put(rec->fields[i].kptr.module);
519 btf_put(rec->fields[i].kptr.btf);
520 break;
521 case BPF_LIST_HEAD:
522 case BPF_LIST_NODE:
523 case BPF_RB_ROOT:
524 case BPF_RB_NODE:
525 case BPF_SPIN_LOCK:
526 case BPF_TIMER:
527 case BPF_REFCOUNT:
528 /* Nothing to release */
529 break;
530 default:
531 WARN_ON_ONCE(1);
532 continue;
533 }
534 }
535 kfree(rec);
536}
537
538void bpf_map_free_record(struct bpf_map *map)
539{
540 btf_record_free(map->record);
541 map->record = NULL;
542}
543
544struct btf_record *btf_record_dup(const struct btf_record *rec)
545{
546 const struct btf_field *fields;
547 struct btf_record *new_rec;
548 int ret, size, i;
549
550 if (IS_ERR_OR_NULL(rec))
551 return NULL;
552 size = offsetof(struct btf_record, fields[rec->cnt]);
553 new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN);
554 if (!new_rec)
555 return ERR_PTR(-ENOMEM);
556 /* Do a deep copy of the btf_record */
557 fields = rec->fields;
558 new_rec->cnt = 0;
559 for (i = 0; i < rec->cnt; i++) {
560 switch (fields[i].type) {
561 case BPF_KPTR_UNREF:
562 case BPF_KPTR_REF:
563 case BPF_KPTR_PERCPU:
564 btf_get(fields[i].kptr.btf);
565 if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) {
566 ret = -ENXIO;
567 goto free;
568 }
569 break;
570 case BPF_LIST_HEAD:
571 case BPF_LIST_NODE:
572 case BPF_RB_ROOT:
573 case BPF_RB_NODE:
574 case BPF_SPIN_LOCK:
575 case BPF_TIMER:
576 case BPF_REFCOUNT:
577 /* Nothing to acquire */
578 break;
579 default:
580 ret = -EFAULT;
581 WARN_ON_ONCE(1);
582 goto free;
583 }
584 new_rec->cnt++;
585 }
586 return new_rec;
587free:
588 btf_record_free(new_rec);
589 return ERR_PTR(ret);
590}
591
592bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b)
593{
594 bool a_has_fields = !IS_ERR_OR_NULL(rec_a), b_has_fields = !IS_ERR_OR_NULL(rec_b);
595 int size;
596
597 if (!a_has_fields && !b_has_fields)
598 return true;
599 if (a_has_fields != b_has_fields)
600 return false;
601 if (rec_a->cnt != rec_b->cnt)
602 return false;
603 size = offsetof(struct btf_record, fields[rec_a->cnt]);
604 /* btf_parse_fields uses kzalloc to allocate a btf_record, so unused
605 * members are zeroed out. So memcmp is safe to do without worrying
606 * about padding/unused fields.
607 *
608 * While spin_lock, timer, and kptr have no relation to map BTF,
609 * list_head metadata is specific to map BTF, the btf and value_rec
610 * members in particular. btf is the map BTF, while value_rec points to
611 * btf_record in that map BTF.
612 *
613 * So while by default, we don't rely on the map BTF (which the records
614 * were parsed from) matching for both records, which is not backwards
615 * compatible, in case list_head is part of it, we implicitly rely on
616 * that by way of depending on memcmp succeeding for it.
617 */
618 return !memcmp(rec_a, rec_b, size);
619}
620
621void bpf_obj_free_timer(const struct btf_record *rec, void *obj)
622{
623 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_TIMER)))
624 return;
625 bpf_timer_cancel_and_free(obj + rec->timer_off);
626}
627
628void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
629{
630 const struct btf_field *fields;
631 int i;
632
633 if (IS_ERR_OR_NULL(rec))
634 return;
635 fields = rec->fields;
636 for (i = 0; i < rec->cnt; i++) {
637 struct btf_struct_meta *pointee_struct_meta;
638 const struct btf_field *field = &fields[i];
639 void *field_ptr = obj + field->offset;
640 void *xchgd_field;
641
642 switch (fields[i].type) {
643 case BPF_SPIN_LOCK:
644 break;
645 case BPF_TIMER:
646 bpf_timer_cancel_and_free(field_ptr);
647 break;
648 case BPF_KPTR_UNREF:
649 WRITE_ONCE(*(u64 *)field_ptr, 0);
650 break;
651 case BPF_KPTR_REF:
652 case BPF_KPTR_PERCPU:
653 xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
654 if (!xchgd_field)
655 break;
656
657 if (!btf_is_kernel(field->kptr.btf)) {
658 pointee_struct_meta = btf_find_struct_meta(field->kptr.btf,
659 field->kptr.btf_id);
660 migrate_disable();
661 __bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ?
662 pointee_struct_meta->record : NULL,
663 fields[i].type == BPF_KPTR_PERCPU);
664 migrate_enable();
665 } else {
666 field->kptr.dtor(xchgd_field);
667 }
668 break;
669 case BPF_LIST_HEAD:
670 if (WARN_ON_ONCE(rec->spin_lock_off < 0))
671 continue;
672 bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off);
673 break;
674 case BPF_RB_ROOT:
675 if (WARN_ON_ONCE(rec->spin_lock_off < 0))
676 continue;
677 bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off);
678 break;
679 case BPF_LIST_NODE:
680 case BPF_RB_NODE:
681 case BPF_REFCOUNT:
682 break;
683 default:
684 WARN_ON_ONCE(1);
685 continue;
686 }
687 }
688}
689
690/* called from workqueue */
691static void bpf_map_free_deferred(struct work_struct *work)
692{
693 struct bpf_map *map = container_of(work, struct bpf_map, work);
694 struct btf_record *rec = map->record;
695 struct btf *btf = map->btf;
696
697 security_bpf_map_free(map);
698 bpf_map_release_memcg(map);
699 /* implementation dependent freeing */
700 map->ops->map_free(map);
701 /* Delay freeing of btf_record for maps, as map_free
702 * callback usually needs access to them. It is better to do it here
703 * than require each callback to do the free itself manually.
704 *
705 * Note that the btf_record stashed in map->inner_map_meta->record was
706 * already freed using the map_free callback for map in map case which
707 * eventually calls bpf_map_free_meta, since inner_map_meta is only a
708 * template bpf_map struct used during verification.
709 */
710 btf_record_free(rec);
711 /* Delay freeing of btf for maps, as map_free callback may need
712 * struct_meta info which will be freed with btf_put().
713 */
714 btf_put(btf);
715}
716
717static void bpf_map_put_uref(struct bpf_map *map)
718{
719 if (atomic64_dec_and_test(&map->usercnt)) {
720 if (map->ops->map_release_uref)
721 map->ops->map_release_uref(map);
722 }
723}
724
725static void bpf_map_free_in_work(struct bpf_map *map)
726{
727 INIT_WORK(&map->work, bpf_map_free_deferred);
728 /* Avoid spawning kworkers, since they all might contend
729 * for the same mutex like slab_mutex.
730 */
731 queue_work(system_unbound_wq, &map->work);
732}
733
734static void bpf_map_free_rcu_gp(struct rcu_head *rcu)
735{
736 bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu));
737}
738
739static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu)
740{
741 if (rcu_trace_implies_rcu_gp())
742 bpf_map_free_rcu_gp(rcu);
743 else
744 call_rcu(rcu, bpf_map_free_rcu_gp);
745}
746
747/* decrement map refcnt and schedule it for freeing via workqueue
748 * (underlying map implementation ops->map_free() might sleep)
749 */
750void bpf_map_put(struct bpf_map *map)
751{
752 if (atomic64_dec_and_test(&map->refcnt)) {
753 /* bpf_map_free_id() must be called first */
754 bpf_map_free_id(map);
755
756 WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt));
757 if (READ_ONCE(map->free_after_mult_rcu_gp))
758 call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp);
759 else if (READ_ONCE(map->free_after_rcu_gp))
760 call_rcu(&map->rcu, bpf_map_free_rcu_gp);
761 else
762 bpf_map_free_in_work(map);
763 }
764}
765EXPORT_SYMBOL_GPL(bpf_map_put);
766
767void bpf_map_put_with_uref(struct bpf_map *map)
768{
769 bpf_map_put_uref(map);
770 bpf_map_put(map);
771}
772
773static int bpf_map_release(struct inode *inode, struct file *filp)
774{
775 struct bpf_map *map = filp->private_data;
776
777 if (map->ops->map_release)
778 map->ops->map_release(map, filp);
779
780 bpf_map_put_with_uref(map);
781 return 0;
782}
783
784static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
785{
786 fmode_t mode = f.file->f_mode;
787
788 /* Our file permissions may have been overridden by global
789 * map permissions facing syscall side.
790 */
791 if (READ_ONCE(map->frozen))
792 mode &= ~FMODE_CAN_WRITE;
793 return mode;
794}
795
796#ifdef CONFIG_PROC_FS
797/* Show the memory usage of a bpf map */
798static u64 bpf_map_memory_usage(const struct bpf_map *map)
799{
800 return map->ops->map_mem_usage(map);
801}
802
803static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
804{
805 struct bpf_map *map = filp->private_data;
806 u32 type = 0, jited = 0;
807
808 if (map_type_contains_progs(map)) {
809 spin_lock(&map->owner.lock);
810 type = map->owner.type;
811 jited = map->owner.jited;
812 spin_unlock(&map->owner.lock);
813 }
814
815 seq_printf(m,
816 "map_type:\t%u\n"
817 "key_size:\t%u\n"
818 "value_size:\t%u\n"
819 "max_entries:\t%u\n"
820 "map_flags:\t%#x\n"
821 "map_extra:\t%#llx\n"
822 "memlock:\t%llu\n"
823 "map_id:\t%u\n"
824 "frozen:\t%u\n",
825 map->map_type,
826 map->key_size,
827 map->value_size,
828 map->max_entries,
829 map->map_flags,
830 (unsigned long long)map->map_extra,
831 bpf_map_memory_usage(map),
832 map->id,
833 READ_ONCE(map->frozen));
834 if (type) {
835 seq_printf(m, "owner_prog_type:\t%u\n", type);
836 seq_printf(m, "owner_jited:\t%u\n", jited);
837 }
838}
839#endif
840
841static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
842 loff_t *ppos)
843{
844 /* We need this handler such that alloc_file() enables
845 * f_mode with FMODE_CAN_READ.
846 */
847 return -EINVAL;
848}
849
850static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
851 size_t siz, loff_t *ppos)
852{
853 /* We need this handler such that alloc_file() enables
854 * f_mode with FMODE_CAN_WRITE.
855 */
856 return -EINVAL;
857}
858
859/* called for any extra memory-mapped regions (except initial) */
860static void bpf_map_mmap_open(struct vm_area_struct *vma)
861{
862 struct bpf_map *map = vma->vm_file->private_data;
863
864 if (vma->vm_flags & VM_MAYWRITE)
865 bpf_map_write_active_inc(map);
866}
867
868/* called for all unmapped memory region (including initial) */
869static void bpf_map_mmap_close(struct vm_area_struct *vma)
870{
871 struct bpf_map *map = vma->vm_file->private_data;
872
873 if (vma->vm_flags & VM_MAYWRITE)
874 bpf_map_write_active_dec(map);
875}
876
877static const struct vm_operations_struct bpf_map_default_vmops = {
878 .open = bpf_map_mmap_open,
879 .close = bpf_map_mmap_close,
880};
881
882static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
883{
884 struct bpf_map *map = filp->private_data;
885 int err;
886
887 if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record))
888 return -ENOTSUPP;
889
890 if (!(vma->vm_flags & VM_SHARED))
891 return -EINVAL;
892
893 mutex_lock(&map->freeze_mutex);
894
895 if (vma->vm_flags & VM_WRITE) {
896 if (map->frozen) {
897 err = -EPERM;
898 goto out;
899 }
900 /* map is meant to be read-only, so do not allow mapping as
901 * writable, because it's possible to leak a writable page
902 * reference and allows user-space to still modify it after
903 * freezing, while verifier will assume contents do not change
904 */
905 if (map->map_flags & BPF_F_RDONLY_PROG) {
906 err = -EACCES;
907 goto out;
908 }
909 }
910
911 /* set default open/close callbacks */
912 vma->vm_ops = &bpf_map_default_vmops;
913 vma->vm_private_data = map;
914 vm_flags_clear(vma, VM_MAYEXEC);
915 if (!(vma->vm_flags & VM_WRITE))
916 /* disallow re-mapping with PROT_WRITE */
917 vm_flags_clear(vma, VM_MAYWRITE);
918
919 err = map->ops->map_mmap(map, vma);
920 if (err)
921 goto out;
922
923 if (vma->vm_flags & VM_MAYWRITE)
924 bpf_map_write_active_inc(map);
925out:
926 mutex_unlock(&map->freeze_mutex);
927 return err;
928}
929
930static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
931{
932 struct bpf_map *map = filp->private_data;
933
934 if (map->ops->map_poll)
935 return map->ops->map_poll(map, filp, pts);
936
937 return EPOLLERR;
938}
939
940const struct file_operations bpf_map_fops = {
941#ifdef CONFIG_PROC_FS
942 .show_fdinfo = bpf_map_show_fdinfo,
943#endif
944 .release = bpf_map_release,
945 .read = bpf_dummy_read,
946 .write = bpf_dummy_write,
947 .mmap = bpf_map_mmap,
948 .poll = bpf_map_poll,
949};
950
951int bpf_map_new_fd(struct bpf_map *map, int flags)
952{
953 int ret;
954
955 ret = security_bpf_map(map, OPEN_FMODE(flags));
956 if (ret < 0)
957 return ret;
958
959 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
960 flags | O_CLOEXEC);
961}
962
963int bpf_get_file_flag(int flags)
964{
965 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
966 return -EINVAL;
967 if (flags & BPF_F_RDONLY)
968 return O_RDONLY;
969 if (flags & BPF_F_WRONLY)
970 return O_WRONLY;
971 return O_RDWR;
972}
973
974/* helper macro to check that unused fields 'union bpf_attr' are zero */
975#define CHECK_ATTR(CMD) \
976 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
977 sizeof(attr->CMD##_LAST_FIELD), 0, \
978 sizeof(*attr) - \
979 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
980 sizeof(attr->CMD##_LAST_FIELD)) != NULL
981
982/* dst and src must have at least "size" number of bytes.
983 * Return strlen on success and < 0 on error.
984 */
985int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
986{
987 const char *end = src + size;
988 const char *orig_src = src;
989
990 memset(dst, 0, size);
991 /* Copy all isalnum(), '_' and '.' chars. */
992 while (src < end && *src) {
993 if (!isalnum(*src) &&
994 *src != '_' && *src != '.')
995 return -EINVAL;
996 *dst++ = *src++;
997 }
998
999 /* No '\0' found in "size" number of bytes */
1000 if (src == end)
1001 return -EINVAL;
1002
1003 return src - orig_src;
1004}
1005
1006int map_check_no_btf(const struct bpf_map *map,
1007 const struct btf *btf,
1008 const struct btf_type *key_type,
1009 const struct btf_type *value_type)
1010{
1011 return -ENOTSUPP;
1012}
1013
1014static int map_check_btf(struct bpf_map *map, const struct btf *btf,
1015 u32 btf_key_id, u32 btf_value_id)
1016{
1017 const struct btf_type *key_type, *value_type;
1018 u32 key_size, value_size;
1019 int ret = 0;
1020
1021 /* Some maps allow key to be unspecified. */
1022 if (btf_key_id) {
1023 key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
1024 if (!key_type || key_size != map->key_size)
1025 return -EINVAL;
1026 } else {
1027 key_type = btf_type_by_id(btf, 0);
1028 if (!map->ops->map_check_btf)
1029 return -EINVAL;
1030 }
1031
1032 value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
1033 if (!value_type || value_size != map->value_size)
1034 return -EINVAL;
1035
1036 map->record = btf_parse_fields(btf, value_type,
1037 BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD |
1038 BPF_RB_ROOT | BPF_REFCOUNT,
1039 map->value_size);
1040 if (!IS_ERR_OR_NULL(map->record)) {
1041 int i;
1042
1043 if (!bpf_capable()) {
1044 ret = -EPERM;
1045 goto free_map_tab;
1046 }
1047 if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) {
1048 ret = -EACCES;
1049 goto free_map_tab;
1050 }
1051 for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) {
1052 switch (map->record->field_mask & (1 << i)) {
1053 case 0:
1054 continue;
1055 case BPF_SPIN_LOCK:
1056 if (map->map_type != BPF_MAP_TYPE_HASH &&
1057 map->map_type != BPF_MAP_TYPE_ARRAY &&
1058 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
1059 map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1060 map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1061 map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1062 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1063 ret = -EOPNOTSUPP;
1064 goto free_map_tab;
1065 }
1066 break;
1067 case BPF_TIMER:
1068 if (map->map_type != BPF_MAP_TYPE_HASH &&
1069 map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1070 map->map_type != BPF_MAP_TYPE_ARRAY) {
1071 ret = -EOPNOTSUPP;
1072 goto free_map_tab;
1073 }
1074 break;
1075 case BPF_KPTR_UNREF:
1076 case BPF_KPTR_REF:
1077 case BPF_KPTR_PERCPU:
1078 case BPF_REFCOUNT:
1079 if (map->map_type != BPF_MAP_TYPE_HASH &&
1080 map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
1081 map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1082 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH &&
1083 map->map_type != BPF_MAP_TYPE_ARRAY &&
1084 map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
1085 map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1086 map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1087 map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1088 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1089 ret = -EOPNOTSUPP;
1090 goto free_map_tab;
1091 }
1092 break;
1093 case BPF_LIST_HEAD:
1094 case BPF_RB_ROOT:
1095 if (map->map_type != BPF_MAP_TYPE_HASH &&
1096 map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1097 map->map_type != BPF_MAP_TYPE_ARRAY) {
1098 ret = -EOPNOTSUPP;
1099 goto free_map_tab;
1100 }
1101 break;
1102 default:
1103 /* Fail if map_type checks are missing for a field type */
1104 ret = -EOPNOTSUPP;
1105 goto free_map_tab;
1106 }
1107 }
1108 }
1109
1110 ret = btf_check_and_fixup_fields(btf, map->record);
1111 if (ret < 0)
1112 goto free_map_tab;
1113
1114 if (map->ops->map_check_btf) {
1115 ret = map->ops->map_check_btf(map, btf, key_type, value_type);
1116 if (ret < 0)
1117 goto free_map_tab;
1118 }
1119
1120 return ret;
1121free_map_tab:
1122 bpf_map_free_record(map);
1123 return ret;
1124}
1125
1126#define BPF_MAP_CREATE_LAST_FIELD map_extra
1127/* called via syscall */
1128static int map_create(union bpf_attr *attr)
1129{
1130 const struct bpf_map_ops *ops;
1131 int numa_node = bpf_map_attr_numa_node(attr);
1132 u32 map_type = attr->map_type;
1133 struct bpf_map *map;
1134 int f_flags;
1135 int err;
1136
1137 err = CHECK_ATTR(BPF_MAP_CREATE);
1138 if (err)
1139 return -EINVAL;
1140
1141 if (attr->btf_vmlinux_value_type_id) {
1142 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
1143 attr->btf_key_type_id || attr->btf_value_type_id)
1144 return -EINVAL;
1145 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
1146 return -EINVAL;
1147 }
1148
1149 if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER &&
1150 attr->map_extra != 0)
1151 return -EINVAL;
1152
1153 f_flags = bpf_get_file_flag(attr->map_flags);
1154 if (f_flags < 0)
1155 return f_flags;
1156
1157 if (numa_node != NUMA_NO_NODE &&
1158 ((unsigned int)numa_node >= nr_node_ids ||
1159 !node_online(numa_node)))
1160 return -EINVAL;
1161
1162 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
1163 map_type = attr->map_type;
1164 if (map_type >= ARRAY_SIZE(bpf_map_types))
1165 return -EINVAL;
1166 map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types));
1167 ops = bpf_map_types[map_type];
1168 if (!ops)
1169 return -EINVAL;
1170
1171 if (ops->map_alloc_check) {
1172 err = ops->map_alloc_check(attr);
1173 if (err)
1174 return err;
1175 }
1176 if (attr->map_ifindex)
1177 ops = &bpf_map_offload_ops;
1178 if (!ops->map_mem_usage)
1179 return -EINVAL;
1180
1181 /* Intent here is for unprivileged_bpf_disabled to block BPF map
1182 * creation for unprivileged users; other actions depend
1183 * on fd availability and access to bpffs, so are dependent on
1184 * object creation success. Even with unprivileged BPF disabled,
1185 * capability checks are still carried out.
1186 */
1187 if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
1188 return -EPERM;
1189
1190 /* check privileged map type permissions */
1191 switch (map_type) {
1192 case BPF_MAP_TYPE_ARRAY:
1193 case BPF_MAP_TYPE_PERCPU_ARRAY:
1194 case BPF_MAP_TYPE_PROG_ARRAY:
1195 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1196 case BPF_MAP_TYPE_CGROUP_ARRAY:
1197 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
1198 case BPF_MAP_TYPE_HASH:
1199 case BPF_MAP_TYPE_PERCPU_HASH:
1200 case BPF_MAP_TYPE_HASH_OF_MAPS:
1201 case BPF_MAP_TYPE_RINGBUF:
1202 case BPF_MAP_TYPE_USER_RINGBUF:
1203 case BPF_MAP_TYPE_CGROUP_STORAGE:
1204 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
1205 /* unprivileged */
1206 break;
1207 case BPF_MAP_TYPE_SK_STORAGE:
1208 case BPF_MAP_TYPE_INODE_STORAGE:
1209 case BPF_MAP_TYPE_TASK_STORAGE:
1210 case BPF_MAP_TYPE_CGRP_STORAGE:
1211 case BPF_MAP_TYPE_BLOOM_FILTER:
1212 case BPF_MAP_TYPE_LPM_TRIE:
1213 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
1214 case BPF_MAP_TYPE_STACK_TRACE:
1215 case BPF_MAP_TYPE_QUEUE:
1216 case BPF_MAP_TYPE_STACK:
1217 case BPF_MAP_TYPE_LRU_HASH:
1218 case BPF_MAP_TYPE_LRU_PERCPU_HASH:
1219 case BPF_MAP_TYPE_STRUCT_OPS:
1220 case BPF_MAP_TYPE_CPUMAP:
1221 if (!bpf_capable())
1222 return -EPERM;
1223 break;
1224 case BPF_MAP_TYPE_SOCKMAP:
1225 case BPF_MAP_TYPE_SOCKHASH:
1226 case BPF_MAP_TYPE_DEVMAP:
1227 case BPF_MAP_TYPE_DEVMAP_HASH:
1228 case BPF_MAP_TYPE_XSKMAP:
1229 if (!capable(CAP_NET_ADMIN))
1230 return -EPERM;
1231 break;
1232 default:
1233 WARN(1, "unsupported map type %d", map_type);
1234 return -EPERM;
1235 }
1236
1237 map = ops->map_alloc(attr);
1238 if (IS_ERR(map))
1239 return PTR_ERR(map);
1240 map->ops = ops;
1241 map->map_type = map_type;
1242
1243 err = bpf_obj_name_cpy(map->name, attr->map_name,
1244 sizeof(attr->map_name));
1245 if (err < 0)
1246 goto free_map;
1247
1248 atomic64_set(&map->refcnt, 1);
1249 atomic64_set(&map->usercnt, 1);
1250 mutex_init(&map->freeze_mutex);
1251 spin_lock_init(&map->owner.lock);
1252
1253 if (attr->btf_key_type_id || attr->btf_value_type_id ||
1254 /* Even the map's value is a kernel's struct,
1255 * the bpf_prog.o must have BTF to begin with
1256 * to figure out the corresponding kernel's
1257 * counter part. Thus, attr->btf_fd has
1258 * to be valid also.
1259 */
1260 attr->btf_vmlinux_value_type_id) {
1261 struct btf *btf;
1262
1263 btf = btf_get_by_fd(attr->btf_fd);
1264 if (IS_ERR(btf)) {
1265 err = PTR_ERR(btf);
1266 goto free_map;
1267 }
1268 if (btf_is_kernel(btf)) {
1269 btf_put(btf);
1270 err = -EACCES;
1271 goto free_map;
1272 }
1273 map->btf = btf;
1274
1275 if (attr->btf_value_type_id) {
1276 err = map_check_btf(map, btf, attr->btf_key_type_id,
1277 attr->btf_value_type_id);
1278 if (err)
1279 goto free_map;
1280 }
1281
1282 map->btf_key_type_id = attr->btf_key_type_id;
1283 map->btf_value_type_id = attr->btf_value_type_id;
1284 map->btf_vmlinux_value_type_id =
1285 attr->btf_vmlinux_value_type_id;
1286 }
1287
1288 err = security_bpf_map_alloc(map);
1289 if (err)
1290 goto free_map;
1291
1292 err = bpf_map_alloc_id(map);
1293 if (err)
1294 goto free_map_sec;
1295
1296 bpf_map_save_memcg(map);
1297
1298 err = bpf_map_new_fd(map, f_flags);
1299 if (err < 0) {
1300 /* failed to allocate fd.
1301 * bpf_map_put_with_uref() is needed because the above
1302 * bpf_map_alloc_id() has published the map
1303 * to the userspace and the userspace may
1304 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
1305 */
1306 bpf_map_put_with_uref(map);
1307 return err;
1308 }
1309
1310 return err;
1311
1312free_map_sec:
1313 security_bpf_map_free(map);
1314free_map:
1315 btf_put(map->btf);
1316 map->ops->map_free(map);
1317 return err;
1318}
1319
1320/* if error is returned, fd is released.
1321 * On success caller should complete fd access with matching fdput()
1322 */
1323struct bpf_map *__bpf_map_get(struct fd f)
1324{
1325 if (!f.file)
1326 return ERR_PTR(-EBADF);
1327 if (f.file->f_op != &bpf_map_fops) {
1328 fdput(f);
1329 return ERR_PTR(-EINVAL);
1330 }
1331
1332 return f.file->private_data;
1333}
1334
1335void bpf_map_inc(struct bpf_map *map)
1336{
1337 atomic64_inc(&map->refcnt);
1338}
1339EXPORT_SYMBOL_GPL(bpf_map_inc);
1340
1341void bpf_map_inc_with_uref(struct bpf_map *map)
1342{
1343 atomic64_inc(&map->refcnt);
1344 atomic64_inc(&map->usercnt);
1345}
1346EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
1347
1348struct bpf_map *bpf_map_get(u32 ufd)
1349{
1350 struct fd f = fdget(ufd);
1351 struct bpf_map *map;
1352
1353 map = __bpf_map_get(f);
1354 if (IS_ERR(map))
1355 return map;
1356
1357 bpf_map_inc(map);
1358 fdput(f);
1359
1360 return map;
1361}
1362EXPORT_SYMBOL(bpf_map_get);
1363
1364struct bpf_map *bpf_map_get_with_uref(u32 ufd)
1365{
1366 struct fd f = fdget(ufd);
1367 struct bpf_map *map;
1368
1369 map = __bpf_map_get(f);
1370 if (IS_ERR(map))
1371 return map;
1372
1373 bpf_map_inc_with_uref(map);
1374 fdput(f);
1375
1376 return map;
1377}
1378
1379/* map_idr_lock should have been held or the map should have been
1380 * protected by rcu read lock.
1381 */
1382struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
1383{
1384 int refold;
1385
1386 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
1387 if (!refold)
1388 return ERR_PTR(-ENOENT);
1389 if (uref)
1390 atomic64_inc(&map->usercnt);
1391
1392 return map;
1393}
1394
1395struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
1396{
1397 spin_lock_bh(&map_idr_lock);
1398 map = __bpf_map_inc_not_zero(map, false);
1399 spin_unlock_bh(&map_idr_lock);
1400
1401 return map;
1402}
1403EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
1404
1405int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
1406{
1407 return -ENOTSUPP;
1408}
1409
1410static void *__bpf_copy_key(void __user *ukey, u64 key_size)
1411{
1412 if (key_size)
1413 return vmemdup_user(ukey, key_size);
1414
1415 if (ukey)
1416 return ERR_PTR(-EINVAL);
1417
1418 return NULL;
1419}
1420
1421static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size)
1422{
1423 if (key_size)
1424 return kvmemdup_bpfptr(ukey, key_size);
1425
1426 if (!bpfptr_is_null(ukey))
1427 return ERR_PTR(-EINVAL);
1428
1429 return NULL;
1430}
1431
1432/* last field in 'union bpf_attr' used by this command */
1433#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
1434
1435static int map_lookup_elem(union bpf_attr *attr)
1436{
1437 void __user *ukey = u64_to_user_ptr(attr->key);
1438 void __user *uvalue = u64_to_user_ptr(attr->value);
1439 int ufd = attr->map_fd;
1440 struct bpf_map *map;
1441 void *key, *value;
1442 u32 value_size;
1443 struct fd f;
1444 int err;
1445
1446 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1447 return -EINVAL;
1448
1449 if (attr->flags & ~BPF_F_LOCK)
1450 return -EINVAL;
1451
1452 f = fdget(ufd);
1453 map = __bpf_map_get(f);
1454 if (IS_ERR(map))
1455 return PTR_ERR(map);
1456 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1457 err = -EPERM;
1458 goto err_put;
1459 }
1460
1461 if ((attr->flags & BPF_F_LOCK) &&
1462 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1463 err = -EINVAL;
1464 goto err_put;
1465 }
1466
1467 key = __bpf_copy_key(ukey, map->key_size);
1468 if (IS_ERR(key)) {
1469 err = PTR_ERR(key);
1470 goto err_put;
1471 }
1472
1473 value_size = bpf_map_value_size(map);
1474
1475 err = -ENOMEM;
1476 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1477 if (!value)
1478 goto free_key;
1479
1480 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
1481 if (copy_from_user(value, uvalue, value_size))
1482 err = -EFAULT;
1483 else
1484 err = bpf_map_copy_value(map, key, value, attr->flags);
1485 goto free_value;
1486 }
1487
1488 err = bpf_map_copy_value(map, key, value, attr->flags);
1489 if (err)
1490 goto free_value;
1491
1492 err = -EFAULT;
1493 if (copy_to_user(uvalue, value, value_size) != 0)
1494 goto free_value;
1495
1496 err = 0;
1497
1498free_value:
1499 kvfree(value);
1500free_key:
1501 kvfree(key);
1502err_put:
1503 fdput(f);
1504 return err;
1505}
1506
1507
1508#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1509
1510static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
1511{
1512 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1513 bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
1514 int ufd = attr->map_fd;
1515 struct bpf_map *map;
1516 void *key, *value;
1517 u32 value_size;
1518 struct fd f;
1519 int err;
1520
1521 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1522 return -EINVAL;
1523
1524 f = fdget(ufd);
1525 map = __bpf_map_get(f);
1526 if (IS_ERR(map))
1527 return PTR_ERR(map);
1528 bpf_map_write_active_inc(map);
1529 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1530 err = -EPERM;
1531 goto err_put;
1532 }
1533
1534 if ((attr->flags & BPF_F_LOCK) &&
1535 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1536 err = -EINVAL;
1537 goto err_put;
1538 }
1539
1540 key = ___bpf_copy_key(ukey, map->key_size);
1541 if (IS_ERR(key)) {
1542 err = PTR_ERR(key);
1543 goto err_put;
1544 }
1545
1546 value_size = bpf_map_value_size(map);
1547 value = kvmemdup_bpfptr(uvalue, value_size);
1548 if (IS_ERR(value)) {
1549 err = PTR_ERR(value);
1550 goto free_key;
1551 }
1552
1553 err = bpf_map_update_value(map, f.file, key, value, attr->flags);
1554 if (!err)
1555 maybe_wait_bpf_programs(map);
1556
1557 kvfree(value);
1558free_key:
1559 kvfree(key);
1560err_put:
1561 bpf_map_write_active_dec(map);
1562 fdput(f);
1563 return err;
1564}
1565
1566#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1567
1568static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr)
1569{
1570 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1571 int ufd = attr->map_fd;
1572 struct bpf_map *map;
1573 struct fd f;
1574 void *key;
1575 int err;
1576
1577 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1578 return -EINVAL;
1579
1580 f = fdget(ufd);
1581 map = __bpf_map_get(f);
1582 if (IS_ERR(map))
1583 return PTR_ERR(map);
1584 bpf_map_write_active_inc(map);
1585 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1586 err = -EPERM;
1587 goto err_put;
1588 }
1589
1590 key = ___bpf_copy_key(ukey, map->key_size);
1591 if (IS_ERR(key)) {
1592 err = PTR_ERR(key);
1593 goto err_put;
1594 }
1595
1596 if (bpf_map_is_offloaded(map)) {
1597 err = bpf_map_offload_delete_elem(map, key);
1598 goto out;
1599 } else if (IS_FD_PROG_ARRAY(map) ||
1600 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1601 /* These maps require sleepable context */
1602 err = map->ops->map_delete_elem(map, key);
1603 goto out;
1604 }
1605
1606 bpf_disable_instrumentation();
1607 rcu_read_lock();
1608 err = map->ops->map_delete_elem(map, key);
1609 rcu_read_unlock();
1610 bpf_enable_instrumentation();
1611 if (!err)
1612 maybe_wait_bpf_programs(map);
1613out:
1614 kvfree(key);
1615err_put:
1616 bpf_map_write_active_dec(map);
1617 fdput(f);
1618 return err;
1619}
1620
1621/* last field in 'union bpf_attr' used by this command */
1622#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1623
1624static int map_get_next_key(union bpf_attr *attr)
1625{
1626 void __user *ukey = u64_to_user_ptr(attr->key);
1627 void __user *unext_key = u64_to_user_ptr(attr->next_key);
1628 int ufd = attr->map_fd;
1629 struct bpf_map *map;
1630 void *key, *next_key;
1631 struct fd f;
1632 int err;
1633
1634 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1635 return -EINVAL;
1636
1637 f = fdget(ufd);
1638 map = __bpf_map_get(f);
1639 if (IS_ERR(map))
1640 return PTR_ERR(map);
1641 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1642 err = -EPERM;
1643 goto err_put;
1644 }
1645
1646 if (ukey) {
1647 key = __bpf_copy_key(ukey, map->key_size);
1648 if (IS_ERR(key)) {
1649 err = PTR_ERR(key);
1650 goto err_put;
1651 }
1652 } else {
1653 key = NULL;
1654 }
1655
1656 err = -ENOMEM;
1657 next_key = kvmalloc(map->key_size, GFP_USER);
1658 if (!next_key)
1659 goto free_key;
1660
1661 if (bpf_map_is_offloaded(map)) {
1662 err = bpf_map_offload_get_next_key(map, key, next_key);
1663 goto out;
1664 }
1665
1666 rcu_read_lock();
1667 err = map->ops->map_get_next_key(map, key, next_key);
1668 rcu_read_unlock();
1669out:
1670 if (err)
1671 goto free_next_key;
1672
1673 err = -EFAULT;
1674 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1675 goto free_next_key;
1676
1677 err = 0;
1678
1679free_next_key:
1680 kvfree(next_key);
1681free_key:
1682 kvfree(key);
1683err_put:
1684 fdput(f);
1685 return err;
1686}
1687
1688int generic_map_delete_batch(struct bpf_map *map,
1689 const union bpf_attr *attr,
1690 union bpf_attr __user *uattr)
1691{
1692 void __user *keys = u64_to_user_ptr(attr->batch.keys);
1693 u32 cp, max_count;
1694 int err = 0;
1695 void *key;
1696
1697 if (attr->batch.elem_flags & ~BPF_F_LOCK)
1698 return -EINVAL;
1699
1700 if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1701 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1702 return -EINVAL;
1703 }
1704
1705 max_count = attr->batch.count;
1706 if (!max_count)
1707 return 0;
1708
1709 if (put_user(0, &uattr->batch.count))
1710 return -EFAULT;
1711
1712 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1713 if (!key)
1714 return -ENOMEM;
1715
1716 for (cp = 0; cp < max_count; cp++) {
1717 err = -EFAULT;
1718 if (copy_from_user(key, keys + cp * map->key_size,
1719 map->key_size))
1720 break;
1721
1722 if (bpf_map_is_offloaded(map)) {
1723 err = bpf_map_offload_delete_elem(map, key);
1724 break;
1725 }
1726
1727 bpf_disable_instrumentation();
1728 rcu_read_lock();
1729 err = map->ops->map_delete_elem(map, key);
1730 rcu_read_unlock();
1731 bpf_enable_instrumentation();
1732 if (err)
1733 break;
1734 cond_resched();
1735 }
1736 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1737 err = -EFAULT;
1738
1739 kvfree(key);
1740
1741 return err;
1742}
1743
1744int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
1745 const union bpf_attr *attr,
1746 union bpf_attr __user *uattr)
1747{
1748 void __user *values = u64_to_user_ptr(attr->batch.values);
1749 void __user *keys = u64_to_user_ptr(attr->batch.keys);
1750 u32 value_size, cp, max_count;
1751 void *key, *value;
1752 int err = 0;
1753
1754 if (attr->batch.elem_flags & ~BPF_F_LOCK)
1755 return -EINVAL;
1756
1757 if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1758 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1759 return -EINVAL;
1760 }
1761
1762 value_size = bpf_map_value_size(map);
1763
1764 max_count = attr->batch.count;
1765 if (!max_count)
1766 return 0;
1767
1768 if (put_user(0, &uattr->batch.count))
1769 return -EFAULT;
1770
1771 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1772 if (!key)
1773 return -ENOMEM;
1774
1775 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1776 if (!value) {
1777 kvfree(key);
1778 return -ENOMEM;
1779 }
1780
1781 for (cp = 0; cp < max_count; cp++) {
1782 err = -EFAULT;
1783 if (copy_from_user(key, keys + cp * map->key_size,
1784 map->key_size) ||
1785 copy_from_user(value, values + cp * value_size, value_size))
1786 break;
1787
1788 err = bpf_map_update_value(map, map_file, key, value,
1789 attr->batch.elem_flags);
1790
1791 if (err)
1792 break;
1793 cond_resched();
1794 }
1795
1796 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1797 err = -EFAULT;
1798
1799 kvfree(value);
1800 kvfree(key);
1801
1802 return err;
1803}
1804
1805#define MAP_LOOKUP_RETRIES 3
1806
1807int generic_map_lookup_batch(struct bpf_map *map,
1808 const union bpf_attr *attr,
1809 union bpf_attr __user *uattr)
1810{
1811 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1812 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1813 void __user *values = u64_to_user_ptr(attr->batch.values);
1814 void __user *keys = u64_to_user_ptr(attr->batch.keys);
1815 void *buf, *buf_prevkey, *prev_key, *key, *value;
1816 int err, retry = MAP_LOOKUP_RETRIES;
1817 u32 value_size, cp, max_count;
1818
1819 if (attr->batch.elem_flags & ~BPF_F_LOCK)
1820 return -EINVAL;
1821
1822 if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1823 !btf_record_has_field(map->record, BPF_SPIN_LOCK))
1824 return -EINVAL;
1825
1826 value_size = bpf_map_value_size(map);
1827
1828 max_count = attr->batch.count;
1829 if (!max_count)
1830 return 0;
1831
1832 if (put_user(0, &uattr->batch.count))
1833 return -EFAULT;
1834
1835 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1836 if (!buf_prevkey)
1837 return -ENOMEM;
1838
1839 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
1840 if (!buf) {
1841 kvfree(buf_prevkey);
1842 return -ENOMEM;
1843 }
1844
1845 err = -EFAULT;
1846 prev_key = NULL;
1847 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
1848 goto free_buf;
1849 key = buf;
1850 value = key + map->key_size;
1851 if (ubatch)
1852 prev_key = buf_prevkey;
1853
1854 for (cp = 0; cp < max_count;) {
1855 rcu_read_lock();
1856 err = map->ops->map_get_next_key(map, prev_key, key);
1857 rcu_read_unlock();
1858 if (err)
1859 break;
1860 err = bpf_map_copy_value(map, key, value,
1861 attr->batch.elem_flags);
1862
1863 if (err == -ENOENT) {
1864 if (retry) {
1865 retry--;
1866 continue;
1867 }
1868 err = -EINTR;
1869 break;
1870 }
1871
1872 if (err)
1873 goto free_buf;
1874
1875 if (copy_to_user(keys + cp * map->key_size, key,
1876 map->key_size)) {
1877 err = -EFAULT;
1878 goto free_buf;
1879 }
1880 if (copy_to_user(values + cp * value_size, value, value_size)) {
1881 err = -EFAULT;
1882 goto free_buf;
1883 }
1884
1885 if (!prev_key)
1886 prev_key = buf_prevkey;
1887
1888 swap(prev_key, key);
1889 retry = MAP_LOOKUP_RETRIES;
1890 cp++;
1891 cond_resched();
1892 }
1893
1894 if (err == -EFAULT)
1895 goto free_buf;
1896
1897 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
1898 (cp && copy_to_user(uobatch, prev_key, map->key_size))))
1899 err = -EFAULT;
1900
1901free_buf:
1902 kvfree(buf_prevkey);
1903 kvfree(buf);
1904 return err;
1905}
1906
1907#define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags
1908
1909static int map_lookup_and_delete_elem(union bpf_attr *attr)
1910{
1911 void __user *ukey = u64_to_user_ptr(attr->key);
1912 void __user *uvalue = u64_to_user_ptr(attr->value);
1913 int ufd = attr->map_fd;
1914 struct bpf_map *map;
1915 void *key, *value;
1916 u32 value_size;
1917 struct fd f;
1918 int err;
1919
1920 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
1921 return -EINVAL;
1922
1923 if (attr->flags & ~BPF_F_LOCK)
1924 return -EINVAL;
1925
1926 f = fdget(ufd);
1927 map = __bpf_map_get(f);
1928 if (IS_ERR(map))
1929 return PTR_ERR(map);
1930 bpf_map_write_active_inc(map);
1931 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
1932 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1933 err = -EPERM;
1934 goto err_put;
1935 }
1936
1937 if (attr->flags &&
1938 (map->map_type == BPF_MAP_TYPE_QUEUE ||
1939 map->map_type == BPF_MAP_TYPE_STACK)) {
1940 err = -EINVAL;
1941 goto err_put;
1942 }
1943
1944 if ((attr->flags & BPF_F_LOCK) &&
1945 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1946 err = -EINVAL;
1947 goto err_put;
1948 }
1949
1950 key = __bpf_copy_key(ukey, map->key_size);
1951 if (IS_ERR(key)) {
1952 err = PTR_ERR(key);
1953 goto err_put;
1954 }
1955
1956 value_size = bpf_map_value_size(map);
1957
1958 err = -ENOMEM;
1959 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1960 if (!value)
1961 goto free_key;
1962
1963 err = -ENOTSUPP;
1964 if (map->map_type == BPF_MAP_TYPE_QUEUE ||
1965 map->map_type == BPF_MAP_TYPE_STACK) {
1966 err = map->ops->map_pop_elem(map, value);
1967 } else if (map->map_type == BPF_MAP_TYPE_HASH ||
1968 map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
1969 map->map_type == BPF_MAP_TYPE_LRU_HASH ||
1970 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
1971 if (!bpf_map_is_offloaded(map)) {
1972 bpf_disable_instrumentation();
1973 rcu_read_lock();
1974 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags);
1975 rcu_read_unlock();
1976 bpf_enable_instrumentation();
1977 }
1978 }
1979
1980 if (err)
1981 goto free_value;
1982
1983 if (copy_to_user(uvalue, value, value_size) != 0) {
1984 err = -EFAULT;
1985 goto free_value;
1986 }
1987
1988 err = 0;
1989
1990free_value:
1991 kvfree(value);
1992free_key:
1993 kvfree(key);
1994err_put:
1995 bpf_map_write_active_dec(map);
1996 fdput(f);
1997 return err;
1998}
1999
2000#define BPF_MAP_FREEZE_LAST_FIELD map_fd
2001
2002static int map_freeze(const union bpf_attr *attr)
2003{
2004 int err = 0, ufd = attr->map_fd;
2005 struct bpf_map *map;
2006 struct fd f;
2007
2008 if (CHECK_ATTR(BPF_MAP_FREEZE))
2009 return -EINVAL;
2010
2011 f = fdget(ufd);
2012 map = __bpf_map_get(f);
2013 if (IS_ERR(map))
2014 return PTR_ERR(map);
2015
2016 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) {
2017 fdput(f);
2018 return -ENOTSUPP;
2019 }
2020
2021 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
2022 fdput(f);
2023 return -EPERM;
2024 }
2025
2026 mutex_lock(&map->freeze_mutex);
2027 if (bpf_map_write_active(map)) {
2028 err = -EBUSY;
2029 goto err_put;
2030 }
2031 if (READ_ONCE(map->frozen)) {
2032 err = -EBUSY;
2033 goto err_put;
2034 }
2035
2036 WRITE_ONCE(map->frozen, true);
2037err_put:
2038 mutex_unlock(&map->freeze_mutex);
2039 fdput(f);
2040 return err;
2041}
2042
2043static const struct bpf_prog_ops * const bpf_prog_types[] = {
2044#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
2045 [_id] = & _name ## _prog_ops,
2046#define BPF_MAP_TYPE(_id, _ops)
2047#define BPF_LINK_TYPE(_id, _name)
2048#include <linux/bpf_types.h>
2049#undef BPF_PROG_TYPE
2050#undef BPF_MAP_TYPE
2051#undef BPF_LINK_TYPE
2052};
2053
2054static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
2055{
2056 const struct bpf_prog_ops *ops;
2057
2058 if (type >= ARRAY_SIZE(bpf_prog_types))
2059 return -EINVAL;
2060 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
2061 ops = bpf_prog_types[type];
2062 if (!ops)
2063 return -EINVAL;
2064
2065 if (!bpf_prog_is_offloaded(prog->aux))
2066 prog->aux->ops = ops;
2067 else
2068 prog->aux->ops = &bpf_offload_prog_ops;
2069 prog->type = type;
2070 return 0;
2071}
2072
2073enum bpf_audit {
2074 BPF_AUDIT_LOAD,
2075 BPF_AUDIT_UNLOAD,
2076 BPF_AUDIT_MAX,
2077};
2078
2079static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
2080 [BPF_AUDIT_LOAD] = "LOAD",
2081 [BPF_AUDIT_UNLOAD] = "UNLOAD",
2082};
2083
2084static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
2085{
2086 struct audit_context *ctx = NULL;
2087 struct audit_buffer *ab;
2088
2089 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
2090 return;
2091 if (audit_enabled == AUDIT_OFF)
2092 return;
2093 if (!in_irq() && !irqs_disabled())
2094 ctx = audit_context();
2095 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
2096 if (unlikely(!ab))
2097 return;
2098 audit_log_format(ab, "prog-id=%u op=%s",
2099 prog->aux->id, bpf_audit_str[op]);
2100 audit_log_end(ab);
2101}
2102
2103static int bpf_prog_alloc_id(struct bpf_prog *prog)
2104{
2105 int id;
2106
2107 idr_preload(GFP_KERNEL);
2108 spin_lock_bh(&prog_idr_lock);
2109 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
2110 if (id > 0)
2111 prog->aux->id = id;
2112 spin_unlock_bh(&prog_idr_lock);
2113 idr_preload_end();
2114
2115 /* id is in [1, INT_MAX) */
2116 if (WARN_ON_ONCE(!id))
2117 return -ENOSPC;
2118
2119 return id > 0 ? 0 : id;
2120}
2121
2122void bpf_prog_free_id(struct bpf_prog *prog)
2123{
2124 unsigned long flags;
2125
2126 /* cBPF to eBPF migrations are currently not in the idr store.
2127 * Offloaded programs are removed from the store when their device
2128 * disappears - even if someone grabs an fd to them they are unusable,
2129 * simply waiting for refcnt to drop to be freed.
2130 */
2131 if (!prog->aux->id)
2132 return;
2133
2134 spin_lock_irqsave(&prog_idr_lock, flags);
2135 idr_remove(&prog_idr, prog->aux->id);
2136 prog->aux->id = 0;
2137 spin_unlock_irqrestore(&prog_idr_lock, flags);
2138}
2139
2140static void __bpf_prog_put_rcu(struct rcu_head *rcu)
2141{
2142 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
2143
2144 kvfree(aux->func_info);
2145 kfree(aux->func_info_aux);
2146 free_uid(aux->user);
2147 security_bpf_prog_free(aux);
2148 bpf_prog_free(aux->prog);
2149}
2150
2151static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
2152{
2153 bpf_prog_kallsyms_del_all(prog);
2154 btf_put(prog->aux->btf);
2155 module_put(prog->aux->mod);
2156 kvfree(prog->aux->jited_linfo);
2157 kvfree(prog->aux->linfo);
2158 kfree(prog->aux->kfunc_tab);
2159 if (prog->aux->attach_btf)
2160 btf_put(prog->aux->attach_btf);
2161
2162 if (deferred) {
2163 if (prog->aux->sleepable)
2164 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
2165 else
2166 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
2167 } else {
2168 __bpf_prog_put_rcu(&prog->aux->rcu);
2169 }
2170}
2171
2172static void bpf_prog_put_deferred(struct work_struct *work)
2173{
2174 struct bpf_prog_aux *aux;
2175 struct bpf_prog *prog;
2176
2177 aux = container_of(work, struct bpf_prog_aux, work);
2178 prog = aux->prog;
2179 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
2180 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
2181 bpf_prog_free_id(prog);
2182 __bpf_prog_put_noref(prog, true);
2183}
2184
2185static void __bpf_prog_put(struct bpf_prog *prog)
2186{
2187 struct bpf_prog_aux *aux = prog->aux;
2188
2189 if (atomic64_dec_and_test(&aux->refcnt)) {
2190 if (in_irq() || irqs_disabled()) {
2191 INIT_WORK(&aux->work, bpf_prog_put_deferred);
2192 schedule_work(&aux->work);
2193 } else {
2194 bpf_prog_put_deferred(&aux->work);
2195 }
2196 }
2197}
2198
2199void bpf_prog_put(struct bpf_prog *prog)
2200{
2201 __bpf_prog_put(prog);
2202}
2203EXPORT_SYMBOL_GPL(bpf_prog_put);
2204
2205static int bpf_prog_release(struct inode *inode, struct file *filp)
2206{
2207 struct bpf_prog *prog = filp->private_data;
2208
2209 bpf_prog_put(prog);
2210 return 0;
2211}
2212
2213struct bpf_prog_kstats {
2214 u64 nsecs;
2215 u64 cnt;
2216 u64 misses;
2217};
2218
2219void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog)
2220{
2221 struct bpf_prog_stats *stats;
2222 unsigned int flags;
2223
2224 stats = this_cpu_ptr(prog->stats);
2225 flags = u64_stats_update_begin_irqsave(&stats->syncp);
2226 u64_stats_inc(&stats->misses);
2227 u64_stats_update_end_irqrestore(&stats->syncp, flags);
2228}
2229
2230static void bpf_prog_get_stats(const struct bpf_prog *prog,
2231 struct bpf_prog_kstats *stats)
2232{
2233 u64 nsecs = 0, cnt = 0, misses = 0;
2234 int cpu;
2235
2236 for_each_possible_cpu(cpu) {
2237 const struct bpf_prog_stats *st;
2238 unsigned int start;
2239 u64 tnsecs, tcnt, tmisses;
2240
2241 st = per_cpu_ptr(prog->stats, cpu);
2242 do {
2243 start = u64_stats_fetch_begin(&st->syncp);
2244 tnsecs = u64_stats_read(&st->nsecs);
2245 tcnt = u64_stats_read(&st->cnt);
2246 tmisses = u64_stats_read(&st->misses);
2247 } while (u64_stats_fetch_retry(&st->syncp, start));
2248 nsecs += tnsecs;
2249 cnt += tcnt;
2250 misses += tmisses;
2251 }
2252 stats->nsecs = nsecs;
2253 stats->cnt = cnt;
2254 stats->misses = misses;
2255}
2256
2257#ifdef CONFIG_PROC_FS
2258static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
2259{
2260 const struct bpf_prog *prog = filp->private_data;
2261 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2262 struct bpf_prog_kstats stats;
2263
2264 bpf_prog_get_stats(prog, &stats);
2265 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2266 seq_printf(m,
2267 "prog_type:\t%u\n"
2268 "prog_jited:\t%u\n"
2269 "prog_tag:\t%s\n"
2270 "memlock:\t%llu\n"
2271 "prog_id:\t%u\n"
2272 "run_time_ns:\t%llu\n"
2273 "run_cnt:\t%llu\n"
2274 "recursion_misses:\t%llu\n"
2275 "verified_insns:\t%u\n",
2276 prog->type,
2277 prog->jited,
2278 prog_tag,
2279 prog->pages * 1ULL << PAGE_SHIFT,
2280 prog->aux->id,
2281 stats.nsecs,
2282 stats.cnt,
2283 stats.misses,
2284 prog->aux->verified_insns);
2285}
2286#endif
2287
2288const struct file_operations bpf_prog_fops = {
2289#ifdef CONFIG_PROC_FS
2290 .show_fdinfo = bpf_prog_show_fdinfo,
2291#endif
2292 .release = bpf_prog_release,
2293 .read = bpf_dummy_read,
2294 .write = bpf_dummy_write,
2295};
2296
2297int bpf_prog_new_fd(struct bpf_prog *prog)
2298{
2299 int ret;
2300
2301 ret = security_bpf_prog(prog);
2302 if (ret < 0)
2303 return ret;
2304
2305 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
2306 O_RDWR | O_CLOEXEC);
2307}
2308
2309static struct bpf_prog *____bpf_prog_get(struct fd f)
2310{
2311 if (!f.file)
2312 return ERR_PTR(-EBADF);
2313 if (f.file->f_op != &bpf_prog_fops) {
2314 fdput(f);
2315 return ERR_PTR(-EINVAL);
2316 }
2317
2318 return f.file->private_data;
2319}
2320
2321void bpf_prog_add(struct bpf_prog *prog, int i)
2322{
2323 atomic64_add(i, &prog->aux->refcnt);
2324}
2325EXPORT_SYMBOL_GPL(bpf_prog_add);
2326
2327void bpf_prog_sub(struct bpf_prog *prog, int i)
2328{
2329 /* Only to be used for undoing previous bpf_prog_add() in some
2330 * error path. We still know that another entity in our call
2331 * path holds a reference to the program, thus atomic_sub() can
2332 * be safely used in such cases!
2333 */
2334 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
2335}
2336EXPORT_SYMBOL_GPL(bpf_prog_sub);
2337
2338void bpf_prog_inc(struct bpf_prog *prog)
2339{
2340 atomic64_inc(&prog->aux->refcnt);
2341}
2342EXPORT_SYMBOL_GPL(bpf_prog_inc);
2343
2344/* prog_idr_lock should have been held */
2345struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
2346{
2347 int refold;
2348
2349 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
2350
2351 if (!refold)
2352 return ERR_PTR(-ENOENT);
2353
2354 return prog;
2355}
2356EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
2357
2358bool bpf_prog_get_ok(struct bpf_prog *prog,
2359 enum bpf_prog_type *attach_type, bool attach_drv)
2360{
2361 /* not an attachment, just a refcount inc, always allow */
2362 if (!attach_type)
2363 return true;
2364
2365 if (prog->type != *attach_type)
2366 return false;
2367 if (bpf_prog_is_offloaded(prog->aux) && !attach_drv)
2368 return false;
2369
2370 return true;
2371}
2372
2373static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
2374 bool attach_drv)
2375{
2376 struct fd f = fdget(ufd);
2377 struct bpf_prog *prog;
2378
2379 prog = ____bpf_prog_get(f);
2380 if (IS_ERR(prog))
2381 return prog;
2382 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
2383 prog = ERR_PTR(-EINVAL);
2384 goto out;
2385 }
2386
2387 bpf_prog_inc(prog);
2388out:
2389 fdput(f);
2390 return prog;
2391}
2392
2393struct bpf_prog *bpf_prog_get(u32 ufd)
2394{
2395 return __bpf_prog_get(ufd, NULL, false);
2396}
2397
2398struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
2399 bool attach_drv)
2400{
2401 return __bpf_prog_get(ufd, &type, attach_drv);
2402}
2403EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
2404
2405/* Initially all BPF programs could be loaded w/o specifying
2406 * expected_attach_type. Later for some of them specifying expected_attach_type
2407 * at load time became required so that program could be validated properly.
2408 * Programs of types that are allowed to be loaded both w/ and w/o (for
2409 * backward compatibility) expected_attach_type, should have the default attach
2410 * type assigned to expected_attach_type for the latter case, so that it can be
2411 * validated later at attach time.
2412 *
2413 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
2414 * prog type requires it but has some attach types that have to be backward
2415 * compatible.
2416 */
2417static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
2418{
2419 switch (attr->prog_type) {
2420 case BPF_PROG_TYPE_CGROUP_SOCK:
2421 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
2422 * exist so checking for non-zero is the way to go here.
2423 */
2424 if (!attr->expected_attach_type)
2425 attr->expected_attach_type =
2426 BPF_CGROUP_INET_SOCK_CREATE;
2427 break;
2428 case BPF_PROG_TYPE_SK_REUSEPORT:
2429 if (!attr->expected_attach_type)
2430 attr->expected_attach_type =
2431 BPF_SK_REUSEPORT_SELECT;
2432 break;
2433 }
2434}
2435
2436static int
2437bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
2438 enum bpf_attach_type expected_attach_type,
2439 struct btf *attach_btf, u32 btf_id,
2440 struct bpf_prog *dst_prog)
2441{
2442 if (btf_id) {
2443 if (btf_id > BTF_MAX_TYPE)
2444 return -EINVAL;
2445
2446 if (!attach_btf && !dst_prog)
2447 return -EINVAL;
2448
2449 switch (prog_type) {
2450 case BPF_PROG_TYPE_TRACING:
2451 case BPF_PROG_TYPE_LSM:
2452 case BPF_PROG_TYPE_STRUCT_OPS:
2453 case BPF_PROG_TYPE_EXT:
2454 break;
2455 default:
2456 return -EINVAL;
2457 }
2458 }
2459
2460 if (attach_btf && (!btf_id || dst_prog))
2461 return -EINVAL;
2462
2463 if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING &&
2464 prog_type != BPF_PROG_TYPE_EXT)
2465 return -EINVAL;
2466
2467 switch (prog_type) {
2468 case BPF_PROG_TYPE_CGROUP_SOCK:
2469 switch (expected_attach_type) {
2470 case BPF_CGROUP_INET_SOCK_CREATE:
2471 case BPF_CGROUP_INET_SOCK_RELEASE:
2472 case BPF_CGROUP_INET4_POST_BIND:
2473 case BPF_CGROUP_INET6_POST_BIND:
2474 return 0;
2475 default:
2476 return -EINVAL;
2477 }
2478 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2479 switch (expected_attach_type) {
2480 case BPF_CGROUP_INET4_BIND:
2481 case BPF_CGROUP_INET6_BIND:
2482 case BPF_CGROUP_INET4_CONNECT:
2483 case BPF_CGROUP_INET6_CONNECT:
2484 case BPF_CGROUP_UNIX_CONNECT:
2485 case BPF_CGROUP_INET4_GETPEERNAME:
2486 case BPF_CGROUP_INET6_GETPEERNAME:
2487 case BPF_CGROUP_UNIX_GETPEERNAME:
2488 case BPF_CGROUP_INET4_GETSOCKNAME:
2489 case BPF_CGROUP_INET6_GETSOCKNAME:
2490 case BPF_CGROUP_UNIX_GETSOCKNAME:
2491 case BPF_CGROUP_UDP4_SENDMSG:
2492 case BPF_CGROUP_UDP6_SENDMSG:
2493 case BPF_CGROUP_UNIX_SENDMSG:
2494 case BPF_CGROUP_UDP4_RECVMSG:
2495 case BPF_CGROUP_UDP6_RECVMSG:
2496 case BPF_CGROUP_UNIX_RECVMSG:
2497 return 0;
2498 default:
2499 return -EINVAL;
2500 }
2501 case BPF_PROG_TYPE_CGROUP_SKB:
2502 switch (expected_attach_type) {
2503 case BPF_CGROUP_INET_INGRESS:
2504 case BPF_CGROUP_INET_EGRESS:
2505 return 0;
2506 default:
2507 return -EINVAL;
2508 }
2509 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2510 switch (expected_attach_type) {
2511 case BPF_CGROUP_SETSOCKOPT:
2512 case BPF_CGROUP_GETSOCKOPT:
2513 return 0;
2514 default:
2515 return -EINVAL;
2516 }
2517 case BPF_PROG_TYPE_SK_LOOKUP:
2518 if (expected_attach_type == BPF_SK_LOOKUP)
2519 return 0;
2520 return -EINVAL;
2521 case BPF_PROG_TYPE_SK_REUSEPORT:
2522 switch (expected_attach_type) {
2523 case BPF_SK_REUSEPORT_SELECT:
2524 case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE:
2525 return 0;
2526 default:
2527 return -EINVAL;
2528 }
2529 case BPF_PROG_TYPE_NETFILTER:
2530 if (expected_attach_type == BPF_NETFILTER)
2531 return 0;
2532 return -EINVAL;
2533 case BPF_PROG_TYPE_SYSCALL:
2534 case BPF_PROG_TYPE_EXT:
2535 if (expected_attach_type)
2536 return -EINVAL;
2537 fallthrough;
2538 default:
2539 return 0;
2540 }
2541}
2542
2543static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2544{
2545 switch (prog_type) {
2546 case BPF_PROG_TYPE_SCHED_CLS:
2547 case BPF_PROG_TYPE_SCHED_ACT:
2548 case BPF_PROG_TYPE_XDP:
2549 case BPF_PROG_TYPE_LWT_IN:
2550 case BPF_PROG_TYPE_LWT_OUT:
2551 case BPF_PROG_TYPE_LWT_XMIT:
2552 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2553 case BPF_PROG_TYPE_SK_SKB:
2554 case BPF_PROG_TYPE_SK_MSG:
2555 case BPF_PROG_TYPE_FLOW_DISSECTOR:
2556 case BPF_PROG_TYPE_CGROUP_DEVICE:
2557 case BPF_PROG_TYPE_CGROUP_SOCK:
2558 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2559 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2560 case BPF_PROG_TYPE_CGROUP_SYSCTL:
2561 case BPF_PROG_TYPE_SOCK_OPS:
2562 case BPF_PROG_TYPE_EXT: /* extends any prog */
2563 case BPF_PROG_TYPE_NETFILTER:
2564 return true;
2565 case BPF_PROG_TYPE_CGROUP_SKB:
2566 /* always unpriv */
2567 case BPF_PROG_TYPE_SK_REUSEPORT:
2568 /* equivalent to SOCKET_FILTER. need CAP_BPF only */
2569 default:
2570 return false;
2571 }
2572}
2573
2574static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2575{
2576 switch (prog_type) {
2577 case BPF_PROG_TYPE_KPROBE:
2578 case BPF_PROG_TYPE_TRACEPOINT:
2579 case BPF_PROG_TYPE_PERF_EVENT:
2580 case BPF_PROG_TYPE_RAW_TRACEPOINT:
2581 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2582 case BPF_PROG_TYPE_TRACING:
2583 case BPF_PROG_TYPE_LSM:
2584 case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2585 case BPF_PROG_TYPE_EXT: /* extends any prog */
2586 return true;
2587 default:
2588 return false;
2589 }
2590}
2591
2592/* last field in 'union bpf_attr' used by this command */
2593#define BPF_PROG_LOAD_LAST_FIELD log_true_size
2594
2595static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
2596{
2597 enum bpf_prog_type type = attr->prog_type;
2598 struct bpf_prog *prog, *dst_prog = NULL;
2599 struct btf *attach_btf = NULL;
2600 int err;
2601 char license[128];
2602
2603 if (CHECK_ATTR(BPF_PROG_LOAD))
2604 return -EINVAL;
2605
2606 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2607 BPF_F_ANY_ALIGNMENT |
2608 BPF_F_TEST_STATE_FREQ |
2609 BPF_F_SLEEPABLE |
2610 BPF_F_TEST_RND_HI32 |
2611 BPF_F_XDP_HAS_FRAGS |
2612 BPF_F_XDP_DEV_BOUND_ONLY |
2613 BPF_F_TEST_REG_INVARIANTS))
2614 return -EINVAL;
2615
2616 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2617 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2618 !bpf_capable())
2619 return -EPERM;
2620
2621 /* Intent here is for unprivileged_bpf_disabled to block BPF program
2622 * creation for unprivileged users; other actions depend
2623 * on fd availability and access to bpffs, so are dependent on
2624 * object creation success. Even with unprivileged BPF disabled,
2625 * capability checks are still carried out for these
2626 * and other operations.
2627 */
2628 if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
2629 return -EPERM;
2630
2631 if (attr->insn_cnt == 0 ||
2632 attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
2633 return -E2BIG;
2634 if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2635 type != BPF_PROG_TYPE_CGROUP_SKB &&
2636 !bpf_capable())
2637 return -EPERM;
2638
2639 if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
2640 return -EPERM;
2641 if (is_perfmon_prog_type(type) && !perfmon_capable())
2642 return -EPERM;
2643
2644 /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
2645 * or btf, we need to check which one it is
2646 */
2647 if (attr->attach_prog_fd) {
2648 dst_prog = bpf_prog_get(attr->attach_prog_fd);
2649 if (IS_ERR(dst_prog)) {
2650 dst_prog = NULL;
2651 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
2652 if (IS_ERR(attach_btf))
2653 return -EINVAL;
2654 if (!btf_is_kernel(attach_btf)) {
2655 /* attaching through specifying bpf_prog's BTF
2656 * objects directly might be supported eventually
2657 */
2658 btf_put(attach_btf);
2659 return -ENOTSUPP;
2660 }
2661 }
2662 } else if (attr->attach_btf_id) {
2663 /* fall back to vmlinux BTF, if BTF type ID is specified */
2664 attach_btf = bpf_get_btf_vmlinux();
2665 if (IS_ERR(attach_btf))
2666 return PTR_ERR(attach_btf);
2667 if (!attach_btf)
2668 return -EINVAL;
2669 btf_get(attach_btf);
2670 }
2671
2672 bpf_prog_load_fixup_attach_type(attr);
2673 if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2674 attach_btf, attr->attach_btf_id,
2675 dst_prog)) {
2676 if (dst_prog)
2677 bpf_prog_put(dst_prog);
2678 if (attach_btf)
2679 btf_put(attach_btf);
2680 return -EINVAL;
2681 }
2682
2683 /* plain bpf_prog allocation */
2684 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2685 if (!prog) {
2686 if (dst_prog)
2687 bpf_prog_put(dst_prog);
2688 if (attach_btf)
2689 btf_put(attach_btf);
2690 return -ENOMEM;
2691 }
2692
2693 prog->expected_attach_type = attr->expected_attach_type;
2694 prog->aux->attach_btf = attach_btf;
2695 prog->aux->attach_btf_id = attr->attach_btf_id;
2696 prog->aux->dst_prog = dst_prog;
2697 prog->aux->dev_bound = !!attr->prog_ifindex;
2698 prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
2699 prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS;
2700
2701 err = security_bpf_prog_alloc(prog->aux);
2702 if (err)
2703 goto free_prog;
2704
2705 prog->aux->user = get_current_user();
2706 prog->len = attr->insn_cnt;
2707
2708 err = -EFAULT;
2709 if (copy_from_bpfptr(prog->insns,
2710 make_bpfptr(attr->insns, uattr.is_kernel),
2711 bpf_prog_insn_size(prog)) != 0)
2712 goto free_prog_sec;
2713 /* copy eBPF program license from user space */
2714 if (strncpy_from_bpfptr(license,
2715 make_bpfptr(attr->license, uattr.is_kernel),
2716 sizeof(license) - 1) < 0)
2717 goto free_prog_sec;
2718 license[sizeof(license) - 1] = 0;
2719
2720 /* eBPF programs must be GPL compatible to use GPL-ed functions */
2721 prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0;
2722
2723 prog->orig_prog = NULL;
2724 prog->jited = 0;
2725
2726 atomic64_set(&prog->aux->refcnt, 1);
2727
2728 if (bpf_prog_is_dev_bound(prog->aux)) {
2729 err = bpf_prog_dev_bound_init(prog, attr);
2730 if (err)
2731 goto free_prog_sec;
2732 }
2733
2734 if (type == BPF_PROG_TYPE_EXT && dst_prog &&
2735 bpf_prog_is_dev_bound(dst_prog->aux)) {
2736 err = bpf_prog_dev_bound_inherit(prog, dst_prog);
2737 if (err)
2738 goto free_prog_sec;
2739 }
2740
2741 /*
2742 * Bookkeeping for managing the program attachment chain.
2743 *
2744 * It might be tempting to set attach_tracing_prog flag at the attachment
2745 * time, but this will not prevent from loading bunch of tracing prog
2746 * first, then attach them one to another.
2747 *
2748 * The flag attach_tracing_prog is set for the whole program lifecycle, and
2749 * doesn't have to be cleared in bpf_tracing_link_release, since tracing
2750 * programs cannot change attachment target.
2751 */
2752 if (type == BPF_PROG_TYPE_TRACING && dst_prog &&
2753 dst_prog->type == BPF_PROG_TYPE_TRACING) {
2754 prog->aux->attach_tracing_prog = true;
2755 }
2756
2757 /* find program type: socket_filter vs tracing_filter */
2758 err = find_prog_type(type, prog);
2759 if (err < 0)
2760 goto free_prog_sec;
2761
2762 prog->aux->load_time = ktime_get_boottime_ns();
2763 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2764 sizeof(attr->prog_name));
2765 if (err < 0)
2766 goto free_prog_sec;
2767
2768 /* run eBPF verifier */
2769 err = bpf_check(&prog, attr, uattr, uattr_size);
2770 if (err < 0)
2771 goto free_used_maps;
2772
2773 prog = bpf_prog_select_runtime(prog, &err);
2774 if (err < 0)
2775 goto free_used_maps;
2776
2777 err = bpf_prog_alloc_id(prog);
2778 if (err)
2779 goto free_used_maps;
2780
2781 /* Upon success of bpf_prog_alloc_id(), the BPF prog is
2782 * effectively publicly exposed. However, retrieving via
2783 * bpf_prog_get_fd_by_id() will take another reference,
2784 * therefore it cannot be gone underneath us.
2785 *
2786 * Only for the time /after/ successful bpf_prog_new_fd()
2787 * and before returning to userspace, we might just hold
2788 * one reference and any parallel close on that fd could
2789 * rip everything out. Hence, below notifications must
2790 * happen before bpf_prog_new_fd().
2791 *
2792 * Also, any failure handling from this point onwards must
2793 * be using bpf_prog_put() given the program is exposed.
2794 */
2795 bpf_prog_kallsyms_add(prog);
2796 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2797 bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2798
2799 err = bpf_prog_new_fd(prog);
2800 if (err < 0)
2801 bpf_prog_put(prog);
2802 return err;
2803
2804free_used_maps:
2805 /* In case we have subprogs, we need to wait for a grace
2806 * period before we can tear down JIT memory since symbols
2807 * are already exposed under kallsyms.
2808 */
2809 __bpf_prog_put_noref(prog, prog->aux->real_func_cnt);
2810 return err;
2811free_prog_sec:
2812 free_uid(prog->aux->user);
2813 security_bpf_prog_free(prog->aux);
2814free_prog:
2815 if (prog->aux->attach_btf)
2816 btf_put(prog->aux->attach_btf);
2817 bpf_prog_free(prog);
2818 return err;
2819}
2820
2821#define BPF_OBJ_LAST_FIELD path_fd
2822
2823static int bpf_obj_pin(const union bpf_attr *attr)
2824{
2825 int path_fd;
2826
2827 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD)
2828 return -EINVAL;
2829
2830 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */
2831 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
2832 return -EINVAL;
2833
2834 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
2835 return bpf_obj_pin_user(attr->bpf_fd, path_fd,
2836 u64_to_user_ptr(attr->pathname));
2837}
2838
2839static int bpf_obj_get(const union bpf_attr *attr)
2840{
2841 int path_fd;
2842
2843 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2844 attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD))
2845 return -EINVAL;
2846
2847 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */
2848 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
2849 return -EINVAL;
2850
2851 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
2852 return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname),
2853 attr->file_flags);
2854}
2855
2856void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2857 const struct bpf_link_ops *ops, struct bpf_prog *prog)
2858{
2859 atomic64_set(&link->refcnt, 1);
2860 link->type = type;
2861 link->id = 0;
2862 link->ops = ops;
2863 link->prog = prog;
2864}
2865
2866static void bpf_link_free_id(int id)
2867{
2868 if (!id)
2869 return;
2870
2871 spin_lock_bh(&link_idr_lock);
2872 idr_remove(&link_idr, id);
2873 spin_unlock_bh(&link_idr_lock);
2874}
2875
2876/* Clean up bpf_link and corresponding anon_inode file and FD. After
2877 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
2878 * anon_inode's release() call. This helper marks bpf_link as
2879 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
2880 * is not decremented, it's the responsibility of a calling code that failed
2881 * to complete bpf_link initialization.
2882 * This helper eventually calls link's dealloc callback, but does not call
2883 * link's release callback.
2884 */
2885void bpf_link_cleanup(struct bpf_link_primer *primer)
2886{
2887 primer->link->prog = NULL;
2888 bpf_link_free_id(primer->id);
2889 fput(primer->file);
2890 put_unused_fd(primer->fd);
2891}
2892
2893void bpf_link_inc(struct bpf_link *link)
2894{
2895 atomic64_inc(&link->refcnt);
2896}
2897
2898/* bpf_link_free is guaranteed to be called from process context */
2899static void bpf_link_free(struct bpf_link *link)
2900{
2901 bpf_link_free_id(link->id);
2902 if (link->prog) {
2903 /* detach BPF program, clean up used resources */
2904 link->ops->release(link);
2905 bpf_prog_put(link->prog);
2906 }
2907 /* free bpf_link and its containing memory */
2908 link->ops->dealloc(link);
2909}
2910
2911static void bpf_link_put_deferred(struct work_struct *work)
2912{
2913 struct bpf_link *link = container_of(work, struct bpf_link, work);
2914
2915 bpf_link_free(link);
2916}
2917
2918/* bpf_link_put might be called from atomic context. It needs to be called
2919 * from sleepable context in order to acquire sleeping locks during the process.
2920 */
2921void bpf_link_put(struct bpf_link *link)
2922{
2923 if (!atomic64_dec_and_test(&link->refcnt))
2924 return;
2925
2926 INIT_WORK(&link->work, bpf_link_put_deferred);
2927 schedule_work(&link->work);
2928}
2929EXPORT_SYMBOL(bpf_link_put);
2930
2931static void bpf_link_put_direct(struct bpf_link *link)
2932{
2933 if (!atomic64_dec_and_test(&link->refcnt))
2934 return;
2935 bpf_link_free(link);
2936}
2937
2938static int bpf_link_release(struct inode *inode, struct file *filp)
2939{
2940 struct bpf_link *link = filp->private_data;
2941
2942 bpf_link_put_direct(link);
2943 return 0;
2944}
2945
2946#ifdef CONFIG_PROC_FS
2947#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
2948#define BPF_MAP_TYPE(_id, _ops)
2949#define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
2950static const char *bpf_link_type_strs[] = {
2951 [BPF_LINK_TYPE_UNSPEC] = "<invalid>",
2952#include <linux/bpf_types.h>
2953};
2954#undef BPF_PROG_TYPE
2955#undef BPF_MAP_TYPE
2956#undef BPF_LINK_TYPE
2957
2958static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
2959{
2960 const struct bpf_link *link = filp->private_data;
2961 const struct bpf_prog *prog = link->prog;
2962 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2963
2964 seq_printf(m,
2965 "link_type:\t%s\n"
2966 "link_id:\t%u\n",
2967 bpf_link_type_strs[link->type],
2968 link->id);
2969 if (prog) {
2970 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2971 seq_printf(m,
2972 "prog_tag:\t%s\n"
2973 "prog_id:\t%u\n",
2974 prog_tag,
2975 prog->aux->id);
2976 }
2977 if (link->ops->show_fdinfo)
2978 link->ops->show_fdinfo(link, m);
2979}
2980#endif
2981
2982static const struct file_operations bpf_link_fops = {
2983#ifdef CONFIG_PROC_FS
2984 .show_fdinfo = bpf_link_show_fdinfo,
2985#endif
2986 .release = bpf_link_release,
2987 .read = bpf_dummy_read,
2988 .write = bpf_dummy_write,
2989};
2990
2991static int bpf_link_alloc_id(struct bpf_link *link)
2992{
2993 int id;
2994
2995 idr_preload(GFP_KERNEL);
2996 spin_lock_bh(&link_idr_lock);
2997 id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
2998 spin_unlock_bh(&link_idr_lock);
2999 idr_preload_end();
3000
3001 return id;
3002}
3003
3004/* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
3005 * reserving unused FD and allocating ID from link_idr. This is to be paired
3006 * with bpf_link_settle() to install FD and ID and expose bpf_link to
3007 * user-space, if bpf_link is successfully attached. If not, bpf_link and
3008 * pre-allocated resources are to be freed with bpf_cleanup() call. All the
3009 * transient state is passed around in struct bpf_link_primer.
3010 * This is preferred way to create and initialize bpf_link, especially when
3011 * there are complicated and expensive operations in between creating bpf_link
3012 * itself and attaching it to BPF hook. By using bpf_link_prime() and
3013 * bpf_link_settle() kernel code using bpf_link doesn't have to perform
3014 * expensive (and potentially failing) roll back operations in a rare case
3015 * that file, FD, or ID can't be allocated.
3016 */
3017int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
3018{
3019 struct file *file;
3020 int fd, id;
3021
3022 fd = get_unused_fd_flags(O_CLOEXEC);
3023 if (fd < 0)
3024 return fd;
3025
3026
3027 id = bpf_link_alloc_id(link);
3028 if (id < 0) {
3029 put_unused_fd(fd);
3030 return id;
3031 }
3032
3033 file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
3034 if (IS_ERR(file)) {
3035 bpf_link_free_id(id);
3036 put_unused_fd(fd);
3037 return PTR_ERR(file);
3038 }
3039
3040 primer->link = link;
3041 primer->file = file;
3042 primer->fd = fd;
3043 primer->id = id;
3044 return 0;
3045}
3046
3047int bpf_link_settle(struct bpf_link_primer *primer)
3048{
3049 /* make bpf_link fetchable by ID */
3050 spin_lock_bh(&link_idr_lock);
3051 primer->link->id = primer->id;
3052 spin_unlock_bh(&link_idr_lock);
3053 /* make bpf_link fetchable by FD */
3054 fd_install(primer->fd, primer->file);
3055 /* pass through installed FD */
3056 return primer->fd;
3057}
3058
3059int bpf_link_new_fd(struct bpf_link *link)
3060{
3061 return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
3062}
3063
3064struct bpf_link *bpf_link_get_from_fd(u32 ufd)
3065{
3066 struct fd f = fdget(ufd);
3067 struct bpf_link *link;
3068
3069 if (!f.file)
3070 return ERR_PTR(-EBADF);
3071 if (f.file->f_op != &bpf_link_fops) {
3072 fdput(f);
3073 return ERR_PTR(-EINVAL);
3074 }
3075
3076 link = f.file->private_data;
3077 bpf_link_inc(link);
3078 fdput(f);
3079
3080 return link;
3081}
3082EXPORT_SYMBOL(bpf_link_get_from_fd);
3083
3084static void bpf_tracing_link_release(struct bpf_link *link)
3085{
3086 struct bpf_tracing_link *tr_link =
3087 container_of(link, struct bpf_tracing_link, link.link);
3088
3089 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link,
3090 tr_link->trampoline));
3091
3092 bpf_trampoline_put(tr_link->trampoline);
3093
3094 /* tgt_prog is NULL if target is a kernel function */
3095 if (tr_link->tgt_prog)
3096 bpf_prog_put(tr_link->tgt_prog);
3097}
3098
3099static void bpf_tracing_link_dealloc(struct bpf_link *link)
3100{
3101 struct bpf_tracing_link *tr_link =
3102 container_of(link, struct bpf_tracing_link, link.link);
3103
3104 kfree(tr_link);
3105}
3106
3107static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
3108 struct seq_file *seq)
3109{
3110 struct bpf_tracing_link *tr_link =
3111 container_of(link, struct bpf_tracing_link, link.link);
3112 u32 target_btf_id, target_obj_id;
3113
3114 bpf_trampoline_unpack_key(tr_link->trampoline->key,
3115 &target_obj_id, &target_btf_id);
3116 seq_printf(seq,
3117 "attach_type:\t%d\n"
3118 "target_obj_id:\t%u\n"
3119 "target_btf_id:\t%u\n",
3120 tr_link->attach_type,
3121 target_obj_id,
3122 target_btf_id);
3123}
3124
3125static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
3126 struct bpf_link_info *info)
3127{
3128 struct bpf_tracing_link *tr_link =
3129 container_of(link, struct bpf_tracing_link, link.link);
3130
3131 info->tracing.attach_type = tr_link->attach_type;
3132 bpf_trampoline_unpack_key(tr_link->trampoline->key,
3133 &info->tracing.target_obj_id,
3134 &info->tracing.target_btf_id);
3135
3136 return 0;
3137}
3138
3139static const struct bpf_link_ops bpf_tracing_link_lops = {
3140 .release = bpf_tracing_link_release,
3141 .dealloc = bpf_tracing_link_dealloc,
3142 .show_fdinfo = bpf_tracing_link_show_fdinfo,
3143 .fill_link_info = bpf_tracing_link_fill_link_info,
3144};
3145
3146static int bpf_tracing_prog_attach(struct bpf_prog *prog,
3147 int tgt_prog_fd,
3148 u32 btf_id,
3149 u64 bpf_cookie)
3150{
3151 struct bpf_link_primer link_primer;
3152 struct bpf_prog *tgt_prog = NULL;
3153 struct bpf_trampoline *tr = NULL;
3154 struct bpf_tracing_link *link;
3155 u64 key = 0;
3156 int err;
3157
3158 switch (prog->type) {
3159 case BPF_PROG_TYPE_TRACING:
3160 if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
3161 prog->expected_attach_type != BPF_TRACE_FEXIT &&
3162 prog->expected_attach_type != BPF_MODIFY_RETURN) {
3163 err = -EINVAL;
3164 goto out_put_prog;
3165 }
3166 break;
3167 case BPF_PROG_TYPE_EXT:
3168 if (prog->expected_attach_type != 0) {
3169 err = -EINVAL;
3170 goto out_put_prog;
3171 }
3172 break;
3173 case BPF_PROG_TYPE_LSM:
3174 if (prog->expected_attach_type != BPF_LSM_MAC) {
3175 err = -EINVAL;
3176 goto out_put_prog;
3177 }
3178 break;
3179 default:
3180 err = -EINVAL;
3181 goto out_put_prog;
3182 }
3183
3184 if (!!tgt_prog_fd != !!btf_id) {
3185 err = -EINVAL;
3186 goto out_put_prog;
3187 }
3188
3189 if (tgt_prog_fd) {
3190 /*
3191 * For now we only allow new targets for BPF_PROG_TYPE_EXT. If this
3192 * part would be changed to implement the same for
3193 * BPF_PROG_TYPE_TRACING, do not forget to update the way how
3194 * attach_tracing_prog flag is set.
3195 */
3196 if (prog->type != BPF_PROG_TYPE_EXT) {
3197 err = -EINVAL;
3198 goto out_put_prog;
3199 }
3200
3201 tgt_prog = bpf_prog_get(tgt_prog_fd);
3202 if (IS_ERR(tgt_prog)) {
3203 err = PTR_ERR(tgt_prog);
3204 tgt_prog = NULL;
3205 goto out_put_prog;
3206 }
3207
3208 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id);
3209 }
3210
3211 link = kzalloc(sizeof(*link), GFP_USER);
3212 if (!link) {
3213 err = -ENOMEM;
3214 goto out_put_prog;
3215 }
3216 bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING,
3217 &bpf_tracing_link_lops, prog);
3218 link->attach_type = prog->expected_attach_type;
3219 link->link.cookie = bpf_cookie;
3220
3221 mutex_lock(&prog->aux->dst_mutex);
3222
3223 /* There are a few possible cases here:
3224 *
3225 * - if prog->aux->dst_trampoline is set, the program was just loaded
3226 * and not yet attached to anything, so we can use the values stored
3227 * in prog->aux
3228 *
3229 * - if prog->aux->dst_trampoline is NULL, the program has already been
3230 * attached to a target and its initial target was cleared (below)
3231 *
3232 * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
3233 * target_btf_id using the link_create API.
3234 *
3235 * - if tgt_prog == NULL when this function was called using the old
3236 * raw_tracepoint_open API, and we need a target from prog->aux
3237 *
3238 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
3239 * was detached and is going for re-attachment.
3240 *
3241 * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf
3242 * are NULL, then program was already attached and user did not provide
3243 * tgt_prog_fd so we have no way to find out or create trampoline
3244 */
3245 if (!prog->aux->dst_trampoline && !tgt_prog) {
3246 /*
3247 * Allow re-attach for TRACING and LSM programs. If it's
3248 * currently linked, bpf_trampoline_link_prog will fail.
3249 * EXT programs need to specify tgt_prog_fd, so they
3250 * re-attach in separate code path.
3251 */
3252 if (prog->type != BPF_PROG_TYPE_TRACING &&
3253 prog->type != BPF_PROG_TYPE_LSM) {
3254 err = -EINVAL;
3255 goto out_unlock;
3256 }
3257 /* We can allow re-attach only if we have valid attach_btf. */
3258 if (!prog->aux->attach_btf) {
3259 err = -EINVAL;
3260 goto out_unlock;
3261 }
3262 btf_id = prog->aux->attach_btf_id;
3263 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
3264 }
3265
3266 if (!prog->aux->dst_trampoline ||
3267 (key && key != prog->aux->dst_trampoline->key)) {
3268 /* If there is no saved target, or the specified target is
3269 * different from the destination specified at load time, we
3270 * need a new trampoline and a check for compatibility
3271 */
3272 struct bpf_attach_target_info tgt_info = {};
3273
3274 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
3275 &tgt_info);
3276 if (err)
3277 goto out_unlock;
3278
3279 if (tgt_info.tgt_mod) {
3280 module_put(prog->aux->mod);
3281 prog->aux->mod = tgt_info.tgt_mod;
3282 }
3283
3284 tr = bpf_trampoline_get(key, &tgt_info);
3285 if (!tr) {
3286 err = -ENOMEM;
3287 goto out_unlock;
3288 }
3289 } else {
3290 /* The caller didn't specify a target, or the target was the
3291 * same as the destination supplied during program load. This
3292 * means we can reuse the trampoline and reference from program
3293 * load time, and there is no need to allocate a new one. This
3294 * can only happen once for any program, as the saved values in
3295 * prog->aux are cleared below.
3296 */
3297 tr = prog->aux->dst_trampoline;
3298 tgt_prog = prog->aux->dst_prog;
3299 }
3300
3301 err = bpf_link_prime(&link->link.link, &link_primer);
3302 if (err)
3303 goto out_unlock;
3304
3305 err = bpf_trampoline_link_prog(&link->link, tr);
3306 if (err) {
3307 bpf_link_cleanup(&link_primer);
3308 link = NULL;
3309 goto out_unlock;
3310 }
3311
3312 link->tgt_prog = tgt_prog;
3313 link->trampoline = tr;
3314
3315 /* Always clear the trampoline and target prog from prog->aux to make
3316 * sure the original attach destination is not kept alive after a
3317 * program is (re-)attached to another target.
3318 */
3319 if (prog->aux->dst_prog &&
3320 (tgt_prog_fd || tr != prog->aux->dst_trampoline))
3321 /* got extra prog ref from syscall, or attaching to different prog */
3322 bpf_prog_put(prog->aux->dst_prog);
3323 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
3324 /* we allocated a new trampoline, so free the old one */
3325 bpf_trampoline_put(prog->aux->dst_trampoline);
3326
3327 prog->aux->dst_prog = NULL;
3328 prog->aux->dst_trampoline = NULL;
3329 mutex_unlock(&prog->aux->dst_mutex);
3330
3331 return bpf_link_settle(&link_primer);
3332out_unlock:
3333 if (tr && tr != prog->aux->dst_trampoline)
3334 bpf_trampoline_put(tr);
3335 mutex_unlock(&prog->aux->dst_mutex);
3336 kfree(link);
3337out_put_prog:
3338 if (tgt_prog_fd && tgt_prog)
3339 bpf_prog_put(tgt_prog);
3340 return err;
3341}
3342
3343struct bpf_raw_tp_link {
3344 struct bpf_link link;
3345 struct bpf_raw_event_map *btp;
3346};
3347
3348static void bpf_raw_tp_link_release(struct bpf_link *link)
3349{
3350 struct bpf_raw_tp_link *raw_tp =
3351 container_of(link, struct bpf_raw_tp_link, link);
3352
3353 bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
3354 bpf_put_raw_tracepoint(raw_tp->btp);
3355}
3356
3357static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
3358{
3359 struct bpf_raw_tp_link *raw_tp =
3360 container_of(link, struct bpf_raw_tp_link, link);
3361
3362 kfree(raw_tp);
3363}
3364
3365static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
3366 struct seq_file *seq)
3367{
3368 struct bpf_raw_tp_link *raw_tp_link =
3369 container_of(link, struct bpf_raw_tp_link, link);
3370
3371 seq_printf(seq,
3372 "tp_name:\t%s\n",
3373 raw_tp_link->btp->tp->name);
3374}
3375
3376static int bpf_copy_to_user(char __user *ubuf, const char *buf, u32 ulen,
3377 u32 len)
3378{
3379 if (ulen >= len + 1) {
3380 if (copy_to_user(ubuf, buf, len + 1))
3381 return -EFAULT;
3382 } else {
3383 char zero = '\0';
3384
3385 if (copy_to_user(ubuf, buf, ulen - 1))
3386 return -EFAULT;
3387 if (put_user(zero, ubuf + ulen - 1))
3388 return -EFAULT;
3389 return -ENOSPC;
3390 }
3391
3392 return 0;
3393}
3394
3395static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
3396 struct bpf_link_info *info)
3397{
3398 struct bpf_raw_tp_link *raw_tp_link =
3399 container_of(link, struct bpf_raw_tp_link, link);
3400 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
3401 const char *tp_name = raw_tp_link->btp->tp->name;
3402 u32 ulen = info->raw_tracepoint.tp_name_len;
3403 size_t tp_len = strlen(tp_name);
3404
3405 if (!ulen ^ !ubuf)
3406 return -EINVAL;
3407
3408 info->raw_tracepoint.tp_name_len = tp_len + 1;
3409
3410 if (!ubuf)
3411 return 0;
3412
3413 return bpf_copy_to_user(ubuf, tp_name, ulen, tp_len);
3414}
3415
3416static const struct bpf_link_ops bpf_raw_tp_link_lops = {
3417 .release = bpf_raw_tp_link_release,
3418 .dealloc = bpf_raw_tp_link_dealloc,
3419 .show_fdinfo = bpf_raw_tp_link_show_fdinfo,
3420 .fill_link_info = bpf_raw_tp_link_fill_link_info,
3421};
3422
3423#ifdef CONFIG_PERF_EVENTS
3424struct bpf_perf_link {
3425 struct bpf_link link;
3426 struct file *perf_file;
3427};
3428
3429static void bpf_perf_link_release(struct bpf_link *link)
3430{
3431 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3432 struct perf_event *event = perf_link->perf_file->private_data;
3433
3434 perf_event_free_bpf_prog(event);
3435 fput(perf_link->perf_file);
3436}
3437
3438static void bpf_perf_link_dealloc(struct bpf_link *link)
3439{
3440 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3441
3442 kfree(perf_link);
3443}
3444
3445static int bpf_perf_link_fill_common(const struct perf_event *event,
3446 char __user *uname, u32 ulen,
3447 u64 *probe_offset, u64 *probe_addr,
3448 u32 *fd_type, unsigned long *missed)
3449{
3450 const char *buf;
3451 u32 prog_id;
3452 size_t len;
3453 int err;
3454
3455 if (!ulen ^ !uname)
3456 return -EINVAL;
3457
3458 err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf,
3459 probe_offset, probe_addr, missed);
3460 if (err)
3461 return err;
3462 if (!uname)
3463 return 0;
3464 if (buf) {
3465 len = strlen(buf);
3466 err = bpf_copy_to_user(uname, buf, ulen, len);
3467 if (err)
3468 return err;
3469 } else {
3470 char zero = '\0';
3471
3472 if (put_user(zero, uname))
3473 return -EFAULT;
3474 }
3475 return 0;
3476}
3477
3478#ifdef CONFIG_KPROBE_EVENTS
3479static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
3480 struct bpf_link_info *info)
3481{
3482 unsigned long missed;
3483 char __user *uname;
3484 u64 addr, offset;
3485 u32 ulen, type;
3486 int err;
3487
3488 uname = u64_to_user_ptr(info->perf_event.kprobe.func_name);
3489 ulen = info->perf_event.kprobe.name_len;
3490 err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
3491 &type, &missed);
3492 if (err)
3493 return err;
3494 if (type == BPF_FD_TYPE_KRETPROBE)
3495 info->perf_event.type = BPF_PERF_EVENT_KRETPROBE;
3496 else
3497 info->perf_event.type = BPF_PERF_EVENT_KPROBE;
3498
3499 info->perf_event.kprobe.offset = offset;
3500 info->perf_event.kprobe.missed = missed;
3501 if (!kallsyms_show_value(current_cred()))
3502 addr = 0;
3503 info->perf_event.kprobe.addr = addr;
3504 return 0;
3505}
3506#endif
3507
3508#ifdef CONFIG_UPROBE_EVENTS
3509static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
3510 struct bpf_link_info *info)
3511{
3512 char __user *uname;
3513 u64 addr, offset;
3514 u32 ulen, type;
3515 int err;
3516
3517 uname = u64_to_user_ptr(info->perf_event.uprobe.file_name);
3518 ulen = info->perf_event.uprobe.name_len;
3519 err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
3520 &type, NULL);
3521 if (err)
3522 return err;
3523
3524 if (type == BPF_FD_TYPE_URETPROBE)
3525 info->perf_event.type = BPF_PERF_EVENT_URETPROBE;
3526 else
3527 info->perf_event.type = BPF_PERF_EVENT_UPROBE;
3528 info->perf_event.uprobe.offset = offset;
3529 return 0;
3530}
3531#endif
3532
3533static int bpf_perf_link_fill_probe(const struct perf_event *event,
3534 struct bpf_link_info *info)
3535{
3536#ifdef CONFIG_KPROBE_EVENTS
3537 if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE)
3538 return bpf_perf_link_fill_kprobe(event, info);
3539#endif
3540#ifdef CONFIG_UPROBE_EVENTS
3541 if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE)
3542 return bpf_perf_link_fill_uprobe(event, info);
3543#endif
3544 return -EOPNOTSUPP;
3545}
3546
3547static int bpf_perf_link_fill_tracepoint(const struct perf_event *event,
3548 struct bpf_link_info *info)
3549{
3550 char __user *uname;
3551 u32 ulen;
3552
3553 uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name);
3554 ulen = info->perf_event.tracepoint.name_len;
3555 info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT;
3556 return bpf_perf_link_fill_common(event, uname, ulen, NULL, NULL, NULL, NULL);
3557}
3558
3559static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
3560 struct bpf_link_info *info)
3561{
3562 info->perf_event.event.type = event->attr.type;
3563 info->perf_event.event.config = event->attr.config;
3564 info->perf_event.type = BPF_PERF_EVENT_EVENT;
3565 return 0;
3566}
3567
3568static int bpf_perf_link_fill_link_info(const struct bpf_link *link,
3569 struct bpf_link_info *info)
3570{
3571 struct bpf_perf_link *perf_link;
3572 const struct perf_event *event;
3573
3574 perf_link = container_of(link, struct bpf_perf_link, link);
3575 event = perf_get_event(perf_link->perf_file);
3576 if (IS_ERR(event))
3577 return PTR_ERR(event);
3578
3579 switch (event->prog->type) {
3580 case BPF_PROG_TYPE_PERF_EVENT:
3581 return bpf_perf_link_fill_perf_event(event, info);
3582 case BPF_PROG_TYPE_TRACEPOINT:
3583 return bpf_perf_link_fill_tracepoint(event, info);
3584 case BPF_PROG_TYPE_KPROBE:
3585 return bpf_perf_link_fill_probe(event, info);
3586 default:
3587 return -EOPNOTSUPP;
3588 }
3589}
3590
3591static const struct bpf_link_ops bpf_perf_link_lops = {
3592 .release = bpf_perf_link_release,
3593 .dealloc = bpf_perf_link_dealloc,
3594 .fill_link_info = bpf_perf_link_fill_link_info,
3595};
3596
3597static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3598{
3599 struct bpf_link_primer link_primer;
3600 struct bpf_perf_link *link;
3601 struct perf_event *event;
3602 struct file *perf_file;
3603 int err;
3604
3605 if (attr->link_create.flags)
3606 return -EINVAL;
3607
3608 perf_file = perf_event_get(attr->link_create.target_fd);
3609 if (IS_ERR(perf_file))
3610 return PTR_ERR(perf_file);
3611
3612 link = kzalloc(sizeof(*link), GFP_USER);
3613 if (!link) {
3614 err = -ENOMEM;
3615 goto out_put_file;
3616 }
3617 bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog);
3618 link->perf_file = perf_file;
3619
3620 err = bpf_link_prime(&link->link, &link_primer);
3621 if (err) {
3622 kfree(link);
3623 goto out_put_file;
3624 }
3625
3626 event = perf_file->private_data;
3627 err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie);
3628 if (err) {
3629 bpf_link_cleanup(&link_primer);
3630 goto out_put_file;
3631 }
3632 /* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */
3633 bpf_prog_inc(prog);
3634
3635 return bpf_link_settle(&link_primer);
3636
3637out_put_file:
3638 fput(perf_file);
3639 return err;
3640}
3641#else
3642static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3643{
3644 return -EOPNOTSUPP;
3645}
3646#endif /* CONFIG_PERF_EVENTS */
3647
3648static int bpf_raw_tp_link_attach(struct bpf_prog *prog,
3649 const char __user *user_tp_name)
3650{
3651 struct bpf_link_primer link_primer;
3652 struct bpf_raw_tp_link *link;
3653 struct bpf_raw_event_map *btp;
3654 const char *tp_name;
3655 char buf[128];
3656 int err;
3657
3658 switch (prog->type) {
3659 case BPF_PROG_TYPE_TRACING:
3660 case BPF_PROG_TYPE_EXT:
3661 case BPF_PROG_TYPE_LSM:
3662 if (user_tp_name)
3663 /* The attach point for this category of programs
3664 * should be specified via btf_id during program load.
3665 */
3666 return -EINVAL;
3667 if (prog->type == BPF_PROG_TYPE_TRACING &&
3668 prog->expected_attach_type == BPF_TRACE_RAW_TP) {
3669 tp_name = prog->aux->attach_func_name;
3670 break;
3671 }
3672 return bpf_tracing_prog_attach(prog, 0, 0, 0);
3673 case BPF_PROG_TYPE_RAW_TRACEPOINT:
3674 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
3675 if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0)
3676 return -EFAULT;
3677 buf[sizeof(buf) - 1] = 0;
3678 tp_name = buf;
3679 break;
3680 default:
3681 return -EINVAL;
3682 }
3683
3684 btp = bpf_get_raw_tracepoint(tp_name);
3685 if (!btp)
3686 return -ENOENT;
3687
3688 link = kzalloc(sizeof(*link), GFP_USER);
3689 if (!link) {
3690 err = -ENOMEM;
3691 goto out_put_btp;
3692 }
3693 bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
3694 &bpf_raw_tp_link_lops, prog);
3695 link->btp = btp;
3696
3697 err = bpf_link_prime(&link->link, &link_primer);
3698 if (err) {
3699 kfree(link);
3700 goto out_put_btp;
3701 }
3702
3703 err = bpf_probe_register(link->btp, prog);
3704 if (err) {
3705 bpf_link_cleanup(&link_primer);
3706 goto out_put_btp;
3707 }
3708
3709 return bpf_link_settle(&link_primer);
3710
3711out_put_btp:
3712 bpf_put_raw_tracepoint(btp);
3713 return err;
3714}
3715
3716#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
3717
3718static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
3719{
3720 struct bpf_prog *prog;
3721 int fd;
3722
3723 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
3724 return -EINVAL;
3725
3726 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
3727 if (IS_ERR(prog))
3728 return PTR_ERR(prog);
3729
3730 fd = bpf_raw_tp_link_attach(prog, u64_to_user_ptr(attr->raw_tracepoint.name));
3731 if (fd < 0)
3732 bpf_prog_put(prog);
3733 return fd;
3734}
3735
3736static enum bpf_prog_type
3737attach_type_to_prog_type(enum bpf_attach_type attach_type)
3738{
3739 switch (attach_type) {
3740 case BPF_CGROUP_INET_INGRESS:
3741 case BPF_CGROUP_INET_EGRESS:
3742 return BPF_PROG_TYPE_CGROUP_SKB;
3743 case BPF_CGROUP_INET_SOCK_CREATE:
3744 case BPF_CGROUP_INET_SOCK_RELEASE:
3745 case BPF_CGROUP_INET4_POST_BIND:
3746 case BPF_CGROUP_INET6_POST_BIND:
3747 return BPF_PROG_TYPE_CGROUP_SOCK;
3748 case BPF_CGROUP_INET4_BIND:
3749 case BPF_CGROUP_INET6_BIND:
3750 case BPF_CGROUP_INET4_CONNECT:
3751 case BPF_CGROUP_INET6_CONNECT:
3752 case BPF_CGROUP_UNIX_CONNECT:
3753 case BPF_CGROUP_INET4_GETPEERNAME:
3754 case BPF_CGROUP_INET6_GETPEERNAME:
3755 case BPF_CGROUP_UNIX_GETPEERNAME:
3756 case BPF_CGROUP_INET4_GETSOCKNAME:
3757 case BPF_CGROUP_INET6_GETSOCKNAME:
3758 case BPF_CGROUP_UNIX_GETSOCKNAME:
3759 case BPF_CGROUP_UDP4_SENDMSG:
3760 case BPF_CGROUP_UDP6_SENDMSG:
3761 case BPF_CGROUP_UNIX_SENDMSG:
3762 case BPF_CGROUP_UDP4_RECVMSG:
3763 case BPF_CGROUP_UDP6_RECVMSG:
3764 case BPF_CGROUP_UNIX_RECVMSG:
3765 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
3766 case BPF_CGROUP_SOCK_OPS:
3767 return BPF_PROG_TYPE_SOCK_OPS;
3768 case BPF_CGROUP_DEVICE:
3769 return BPF_PROG_TYPE_CGROUP_DEVICE;
3770 case BPF_SK_MSG_VERDICT:
3771 return BPF_PROG_TYPE_SK_MSG;
3772 case BPF_SK_SKB_STREAM_PARSER:
3773 case BPF_SK_SKB_STREAM_VERDICT:
3774 case BPF_SK_SKB_VERDICT:
3775 return BPF_PROG_TYPE_SK_SKB;
3776 case BPF_LIRC_MODE2:
3777 return BPF_PROG_TYPE_LIRC_MODE2;
3778 case BPF_FLOW_DISSECTOR:
3779 return BPF_PROG_TYPE_FLOW_DISSECTOR;
3780 case BPF_CGROUP_SYSCTL:
3781 return BPF_PROG_TYPE_CGROUP_SYSCTL;
3782 case BPF_CGROUP_GETSOCKOPT:
3783 case BPF_CGROUP_SETSOCKOPT:
3784 return BPF_PROG_TYPE_CGROUP_SOCKOPT;
3785 case BPF_TRACE_ITER:
3786 case BPF_TRACE_RAW_TP:
3787 case BPF_TRACE_FENTRY:
3788 case BPF_TRACE_FEXIT:
3789 case BPF_MODIFY_RETURN:
3790 return BPF_PROG_TYPE_TRACING;
3791 case BPF_LSM_MAC:
3792 return BPF_PROG_TYPE_LSM;
3793 case BPF_SK_LOOKUP:
3794 return BPF_PROG_TYPE_SK_LOOKUP;
3795 case BPF_XDP:
3796 return BPF_PROG_TYPE_XDP;
3797 case BPF_LSM_CGROUP:
3798 return BPF_PROG_TYPE_LSM;
3799 case BPF_TCX_INGRESS:
3800 case BPF_TCX_EGRESS:
3801 case BPF_NETKIT_PRIMARY:
3802 case BPF_NETKIT_PEER:
3803 return BPF_PROG_TYPE_SCHED_CLS;
3804 default:
3805 return BPF_PROG_TYPE_UNSPEC;
3806 }
3807}
3808
3809static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
3810 enum bpf_attach_type attach_type)
3811{
3812 enum bpf_prog_type ptype;
3813
3814 switch (prog->type) {
3815 case BPF_PROG_TYPE_CGROUP_SOCK:
3816 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3817 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3818 case BPF_PROG_TYPE_SK_LOOKUP:
3819 return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
3820 case BPF_PROG_TYPE_CGROUP_SKB:
3821 if (!capable(CAP_NET_ADMIN))
3822 /* cg-skb progs can be loaded by unpriv user.
3823 * check permissions at attach time.
3824 */
3825 return -EPERM;
3826 return prog->enforce_expected_attach_type &&
3827 prog->expected_attach_type != attach_type ?
3828 -EINVAL : 0;
3829 case BPF_PROG_TYPE_EXT:
3830 return 0;
3831 case BPF_PROG_TYPE_NETFILTER:
3832 if (attach_type != BPF_NETFILTER)
3833 return -EINVAL;
3834 return 0;
3835 case BPF_PROG_TYPE_PERF_EVENT:
3836 case BPF_PROG_TYPE_TRACEPOINT:
3837 if (attach_type != BPF_PERF_EVENT)
3838 return -EINVAL;
3839 return 0;
3840 case BPF_PROG_TYPE_KPROBE:
3841 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI &&
3842 attach_type != BPF_TRACE_KPROBE_MULTI)
3843 return -EINVAL;
3844 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI &&
3845 attach_type != BPF_TRACE_UPROBE_MULTI)
3846 return -EINVAL;
3847 if (attach_type != BPF_PERF_EVENT &&
3848 attach_type != BPF_TRACE_KPROBE_MULTI &&
3849 attach_type != BPF_TRACE_UPROBE_MULTI)
3850 return -EINVAL;
3851 return 0;
3852 case BPF_PROG_TYPE_SCHED_CLS:
3853 if (attach_type != BPF_TCX_INGRESS &&
3854 attach_type != BPF_TCX_EGRESS &&
3855 attach_type != BPF_NETKIT_PRIMARY &&
3856 attach_type != BPF_NETKIT_PEER)
3857 return -EINVAL;
3858 return 0;
3859 default:
3860 ptype = attach_type_to_prog_type(attach_type);
3861 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type)
3862 return -EINVAL;
3863 return 0;
3864 }
3865}
3866
3867#define BPF_PROG_ATTACH_LAST_FIELD expected_revision
3868
3869#define BPF_F_ATTACH_MASK_BASE \
3870 (BPF_F_ALLOW_OVERRIDE | \
3871 BPF_F_ALLOW_MULTI | \
3872 BPF_F_REPLACE)
3873
3874#define BPF_F_ATTACH_MASK_MPROG \
3875 (BPF_F_REPLACE | \
3876 BPF_F_BEFORE | \
3877 BPF_F_AFTER | \
3878 BPF_F_ID | \
3879 BPF_F_LINK)
3880
3881static int bpf_prog_attach(const union bpf_attr *attr)
3882{
3883 enum bpf_prog_type ptype;
3884 struct bpf_prog *prog;
3885 int ret;
3886
3887 if (CHECK_ATTR(BPF_PROG_ATTACH))
3888 return -EINVAL;
3889
3890 ptype = attach_type_to_prog_type(attr->attach_type);
3891 if (ptype == BPF_PROG_TYPE_UNSPEC)
3892 return -EINVAL;
3893 if (bpf_mprog_supported(ptype)) {
3894 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
3895 return -EINVAL;
3896 } else {
3897 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE)
3898 return -EINVAL;
3899 if (attr->relative_fd ||
3900 attr->expected_revision)
3901 return -EINVAL;
3902 }
3903
3904 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
3905 if (IS_ERR(prog))
3906 return PTR_ERR(prog);
3907
3908 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
3909 bpf_prog_put(prog);
3910 return -EINVAL;
3911 }
3912
3913 switch (ptype) {
3914 case BPF_PROG_TYPE_SK_SKB:
3915 case BPF_PROG_TYPE_SK_MSG:
3916 ret = sock_map_get_from_fd(attr, prog);
3917 break;
3918 case BPF_PROG_TYPE_LIRC_MODE2:
3919 ret = lirc_prog_attach(attr, prog);
3920 break;
3921 case BPF_PROG_TYPE_FLOW_DISSECTOR:
3922 ret = netns_bpf_prog_attach(attr, prog);
3923 break;
3924 case BPF_PROG_TYPE_CGROUP_DEVICE:
3925 case BPF_PROG_TYPE_CGROUP_SKB:
3926 case BPF_PROG_TYPE_CGROUP_SOCK:
3927 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3928 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3929 case BPF_PROG_TYPE_CGROUP_SYSCTL:
3930 case BPF_PROG_TYPE_SOCK_OPS:
3931 case BPF_PROG_TYPE_LSM:
3932 if (ptype == BPF_PROG_TYPE_LSM &&
3933 prog->expected_attach_type != BPF_LSM_CGROUP)
3934 ret = -EINVAL;
3935 else
3936 ret = cgroup_bpf_prog_attach(attr, ptype, prog);
3937 break;
3938 case BPF_PROG_TYPE_SCHED_CLS:
3939 if (attr->attach_type == BPF_TCX_INGRESS ||
3940 attr->attach_type == BPF_TCX_EGRESS)
3941 ret = tcx_prog_attach(attr, prog);
3942 else
3943 ret = netkit_prog_attach(attr, prog);
3944 break;
3945 default:
3946 ret = -EINVAL;
3947 }
3948
3949 if (ret)
3950 bpf_prog_put(prog);
3951 return ret;
3952}
3953
3954#define BPF_PROG_DETACH_LAST_FIELD expected_revision
3955
3956static int bpf_prog_detach(const union bpf_attr *attr)
3957{
3958 struct bpf_prog *prog = NULL;
3959 enum bpf_prog_type ptype;
3960 int ret;
3961
3962 if (CHECK_ATTR(BPF_PROG_DETACH))
3963 return -EINVAL;
3964
3965 ptype = attach_type_to_prog_type(attr->attach_type);
3966 if (bpf_mprog_supported(ptype)) {
3967 if (ptype == BPF_PROG_TYPE_UNSPEC)
3968 return -EINVAL;
3969 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
3970 return -EINVAL;
3971 if (attr->attach_bpf_fd) {
3972 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
3973 if (IS_ERR(prog))
3974 return PTR_ERR(prog);
3975 }
3976 } else if (attr->attach_flags ||
3977 attr->relative_fd ||
3978 attr->expected_revision) {
3979 return -EINVAL;
3980 }
3981
3982 switch (ptype) {
3983 case BPF_PROG_TYPE_SK_MSG:
3984 case BPF_PROG_TYPE_SK_SKB:
3985 ret = sock_map_prog_detach(attr, ptype);
3986 break;
3987 case BPF_PROG_TYPE_LIRC_MODE2:
3988 ret = lirc_prog_detach(attr);
3989 break;
3990 case BPF_PROG_TYPE_FLOW_DISSECTOR:
3991 ret = netns_bpf_prog_detach(attr, ptype);
3992 break;
3993 case BPF_PROG_TYPE_CGROUP_DEVICE:
3994 case BPF_PROG_TYPE_CGROUP_SKB:
3995 case BPF_PROG_TYPE_CGROUP_SOCK:
3996 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3997 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3998 case BPF_PROG_TYPE_CGROUP_SYSCTL:
3999 case BPF_PROG_TYPE_SOCK_OPS:
4000 case BPF_PROG_TYPE_LSM:
4001 ret = cgroup_bpf_prog_detach(attr, ptype);
4002 break;
4003 case BPF_PROG_TYPE_SCHED_CLS:
4004 if (attr->attach_type == BPF_TCX_INGRESS ||
4005 attr->attach_type == BPF_TCX_EGRESS)
4006 ret = tcx_prog_detach(attr, prog);
4007 else
4008 ret = netkit_prog_detach(attr, prog);
4009 break;
4010 default:
4011 ret = -EINVAL;
4012 }
4013
4014 if (prog)
4015 bpf_prog_put(prog);
4016 return ret;
4017}
4018
4019#define BPF_PROG_QUERY_LAST_FIELD query.revision
4020
4021static int bpf_prog_query(const union bpf_attr *attr,
4022 union bpf_attr __user *uattr)
4023{
4024 if (!capable(CAP_NET_ADMIN))
4025 return -EPERM;
4026 if (CHECK_ATTR(BPF_PROG_QUERY))
4027 return -EINVAL;
4028 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
4029 return -EINVAL;
4030
4031 switch (attr->query.attach_type) {
4032 case BPF_CGROUP_INET_INGRESS:
4033 case BPF_CGROUP_INET_EGRESS:
4034 case BPF_CGROUP_INET_SOCK_CREATE:
4035 case BPF_CGROUP_INET_SOCK_RELEASE:
4036 case BPF_CGROUP_INET4_BIND:
4037 case BPF_CGROUP_INET6_BIND:
4038 case BPF_CGROUP_INET4_POST_BIND:
4039 case BPF_CGROUP_INET6_POST_BIND:
4040 case BPF_CGROUP_INET4_CONNECT:
4041 case BPF_CGROUP_INET6_CONNECT:
4042 case BPF_CGROUP_UNIX_CONNECT:
4043 case BPF_CGROUP_INET4_GETPEERNAME:
4044 case BPF_CGROUP_INET6_GETPEERNAME:
4045 case BPF_CGROUP_UNIX_GETPEERNAME:
4046 case BPF_CGROUP_INET4_GETSOCKNAME:
4047 case BPF_CGROUP_INET6_GETSOCKNAME:
4048 case BPF_CGROUP_UNIX_GETSOCKNAME:
4049 case BPF_CGROUP_UDP4_SENDMSG:
4050 case BPF_CGROUP_UDP6_SENDMSG:
4051 case BPF_CGROUP_UNIX_SENDMSG:
4052 case BPF_CGROUP_UDP4_RECVMSG:
4053 case BPF_CGROUP_UDP6_RECVMSG:
4054 case BPF_CGROUP_UNIX_RECVMSG:
4055 case BPF_CGROUP_SOCK_OPS:
4056 case BPF_CGROUP_DEVICE:
4057 case BPF_CGROUP_SYSCTL:
4058 case BPF_CGROUP_GETSOCKOPT:
4059 case BPF_CGROUP_SETSOCKOPT:
4060 case BPF_LSM_CGROUP:
4061 return cgroup_bpf_prog_query(attr, uattr);
4062 case BPF_LIRC_MODE2:
4063 return lirc_prog_query(attr, uattr);
4064 case BPF_FLOW_DISSECTOR:
4065 case BPF_SK_LOOKUP:
4066 return netns_bpf_prog_query(attr, uattr);
4067 case BPF_SK_SKB_STREAM_PARSER:
4068 case BPF_SK_SKB_STREAM_VERDICT:
4069 case BPF_SK_MSG_VERDICT:
4070 case BPF_SK_SKB_VERDICT:
4071 return sock_map_bpf_prog_query(attr, uattr);
4072 case BPF_TCX_INGRESS:
4073 case BPF_TCX_EGRESS:
4074 return tcx_prog_query(attr, uattr);
4075 case BPF_NETKIT_PRIMARY:
4076 case BPF_NETKIT_PEER:
4077 return netkit_prog_query(attr, uattr);
4078 default:
4079 return -EINVAL;
4080 }
4081}
4082
4083#define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size
4084
4085static int bpf_prog_test_run(const union bpf_attr *attr,
4086 union bpf_attr __user *uattr)
4087{
4088 struct bpf_prog *prog;
4089 int ret = -ENOTSUPP;
4090
4091 if (CHECK_ATTR(BPF_PROG_TEST_RUN))
4092 return -EINVAL;
4093
4094 if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
4095 (!attr->test.ctx_size_in && attr->test.ctx_in))
4096 return -EINVAL;
4097
4098 if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
4099 (!attr->test.ctx_size_out && attr->test.ctx_out))
4100 return -EINVAL;
4101
4102 prog = bpf_prog_get(attr->test.prog_fd);
4103 if (IS_ERR(prog))
4104 return PTR_ERR(prog);
4105
4106 if (prog->aux->ops->test_run)
4107 ret = prog->aux->ops->test_run(prog, attr, uattr);
4108
4109 bpf_prog_put(prog);
4110 return ret;
4111}
4112
4113#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
4114
4115static int bpf_obj_get_next_id(const union bpf_attr *attr,
4116 union bpf_attr __user *uattr,
4117 struct idr *idr,
4118 spinlock_t *lock)
4119{
4120 u32 next_id = attr->start_id;
4121 int err = 0;
4122
4123 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
4124 return -EINVAL;
4125
4126 if (!capable(CAP_SYS_ADMIN))
4127 return -EPERM;
4128
4129 next_id++;
4130 spin_lock_bh(lock);
4131 if (!idr_get_next(idr, &next_id))
4132 err = -ENOENT;
4133 spin_unlock_bh(lock);
4134
4135 if (!err)
4136 err = put_user(next_id, &uattr->next_id);
4137
4138 return err;
4139}
4140
4141struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
4142{
4143 struct bpf_map *map;
4144
4145 spin_lock_bh(&map_idr_lock);
4146again:
4147 map = idr_get_next(&map_idr, id);
4148 if (map) {
4149 map = __bpf_map_inc_not_zero(map, false);
4150 if (IS_ERR(map)) {
4151 (*id)++;
4152 goto again;
4153 }
4154 }
4155 spin_unlock_bh(&map_idr_lock);
4156
4157 return map;
4158}
4159
4160struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
4161{
4162 struct bpf_prog *prog;
4163
4164 spin_lock_bh(&prog_idr_lock);
4165again:
4166 prog = idr_get_next(&prog_idr, id);
4167 if (prog) {
4168 prog = bpf_prog_inc_not_zero(prog);
4169 if (IS_ERR(prog)) {
4170 (*id)++;
4171 goto again;
4172 }
4173 }
4174 spin_unlock_bh(&prog_idr_lock);
4175
4176 return prog;
4177}
4178
4179#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
4180
4181struct bpf_prog *bpf_prog_by_id(u32 id)
4182{
4183 struct bpf_prog *prog;
4184
4185 if (!id)
4186 return ERR_PTR(-ENOENT);
4187
4188 spin_lock_bh(&prog_idr_lock);
4189 prog = idr_find(&prog_idr, id);
4190 if (prog)
4191 prog = bpf_prog_inc_not_zero(prog);
4192 else
4193 prog = ERR_PTR(-ENOENT);
4194 spin_unlock_bh(&prog_idr_lock);
4195 return prog;
4196}
4197
4198static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
4199{
4200 struct bpf_prog *prog;
4201 u32 id = attr->prog_id;
4202 int fd;
4203
4204 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
4205 return -EINVAL;
4206
4207 if (!capable(CAP_SYS_ADMIN))
4208 return -EPERM;
4209
4210 prog = bpf_prog_by_id(id);
4211 if (IS_ERR(prog))
4212 return PTR_ERR(prog);
4213
4214 fd = bpf_prog_new_fd(prog);
4215 if (fd < 0)
4216 bpf_prog_put(prog);
4217
4218 return fd;
4219}
4220
4221#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
4222
4223static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
4224{
4225 struct bpf_map *map;
4226 u32 id = attr->map_id;
4227 int f_flags;
4228 int fd;
4229
4230 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
4231 attr->open_flags & ~BPF_OBJ_FLAG_MASK)
4232 return -EINVAL;
4233
4234 if (!capable(CAP_SYS_ADMIN))
4235 return -EPERM;
4236
4237 f_flags = bpf_get_file_flag(attr->open_flags);
4238 if (f_flags < 0)
4239 return f_flags;
4240
4241 spin_lock_bh(&map_idr_lock);
4242 map = idr_find(&map_idr, id);
4243 if (map)
4244 map = __bpf_map_inc_not_zero(map, true);
4245 else
4246 map = ERR_PTR(-ENOENT);
4247 spin_unlock_bh(&map_idr_lock);
4248
4249 if (IS_ERR(map))
4250 return PTR_ERR(map);
4251
4252 fd = bpf_map_new_fd(map, f_flags);
4253 if (fd < 0)
4254 bpf_map_put_with_uref(map);
4255
4256 return fd;
4257}
4258
4259static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
4260 unsigned long addr, u32 *off,
4261 u32 *type)
4262{
4263 const struct bpf_map *map;
4264 int i;
4265
4266 mutex_lock(&prog->aux->used_maps_mutex);
4267 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
4268 map = prog->aux->used_maps[i];
4269 if (map == (void *)addr) {
4270 *type = BPF_PSEUDO_MAP_FD;
4271 goto out;
4272 }
4273 if (!map->ops->map_direct_value_meta)
4274 continue;
4275 if (!map->ops->map_direct_value_meta(map, addr, off)) {
4276 *type = BPF_PSEUDO_MAP_VALUE;
4277 goto out;
4278 }
4279 }
4280 map = NULL;
4281
4282out:
4283 mutex_unlock(&prog->aux->used_maps_mutex);
4284 return map;
4285}
4286
4287static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
4288 const struct cred *f_cred)
4289{
4290 const struct bpf_map *map;
4291 struct bpf_insn *insns;
4292 u32 off, type;
4293 u64 imm;
4294 u8 code;
4295 int i;
4296
4297 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
4298 GFP_USER);
4299 if (!insns)
4300 return insns;
4301
4302 for (i = 0; i < prog->len; i++) {
4303 code = insns[i].code;
4304
4305 if (code == (BPF_JMP | BPF_TAIL_CALL)) {
4306 insns[i].code = BPF_JMP | BPF_CALL;
4307 insns[i].imm = BPF_FUNC_tail_call;
4308 /* fall-through */
4309 }
4310 if (code == (BPF_JMP | BPF_CALL) ||
4311 code == (BPF_JMP | BPF_CALL_ARGS)) {
4312 if (code == (BPF_JMP | BPF_CALL_ARGS))
4313 insns[i].code = BPF_JMP | BPF_CALL;
4314 if (!bpf_dump_raw_ok(f_cred))
4315 insns[i].imm = 0;
4316 continue;
4317 }
4318 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
4319 insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
4320 continue;
4321 }
4322
4323 if (code != (BPF_LD | BPF_IMM | BPF_DW))
4324 continue;
4325
4326 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
4327 map = bpf_map_from_imm(prog, imm, &off, &type);
4328 if (map) {
4329 insns[i].src_reg = type;
4330 insns[i].imm = map->id;
4331 insns[i + 1].imm = off;
4332 continue;
4333 }
4334 }
4335
4336 return insns;
4337}
4338
4339static int set_info_rec_size(struct bpf_prog_info *info)
4340{
4341 /*
4342 * Ensure info.*_rec_size is the same as kernel expected size
4343 *
4344 * or
4345 *
4346 * Only allow zero *_rec_size if both _rec_size and _cnt are
4347 * zero. In this case, the kernel will set the expected
4348 * _rec_size back to the info.
4349 */
4350
4351 if ((info->nr_func_info || info->func_info_rec_size) &&
4352 info->func_info_rec_size != sizeof(struct bpf_func_info))
4353 return -EINVAL;
4354
4355 if ((info->nr_line_info || info->line_info_rec_size) &&
4356 info->line_info_rec_size != sizeof(struct bpf_line_info))
4357 return -EINVAL;
4358
4359 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
4360 info->jited_line_info_rec_size != sizeof(__u64))
4361 return -EINVAL;
4362
4363 info->func_info_rec_size = sizeof(struct bpf_func_info);
4364 info->line_info_rec_size = sizeof(struct bpf_line_info);
4365 info->jited_line_info_rec_size = sizeof(__u64);
4366
4367 return 0;
4368}
4369
4370static int bpf_prog_get_info_by_fd(struct file *file,
4371 struct bpf_prog *prog,
4372 const union bpf_attr *attr,
4373 union bpf_attr __user *uattr)
4374{
4375 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4376 struct btf *attach_btf = bpf_prog_get_target_btf(prog);
4377 struct bpf_prog_info info;
4378 u32 info_len = attr->info.info_len;
4379 struct bpf_prog_kstats stats;
4380 char __user *uinsns;
4381 u32 ulen;
4382 int err;
4383
4384 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4385 if (err)
4386 return err;
4387 info_len = min_t(u32, sizeof(info), info_len);
4388
4389 memset(&info, 0, sizeof(info));
4390 if (copy_from_user(&info, uinfo, info_len))
4391 return -EFAULT;
4392
4393 info.type = prog->type;
4394 info.id = prog->aux->id;
4395 info.load_time = prog->aux->load_time;
4396 info.created_by_uid = from_kuid_munged(current_user_ns(),
4397 prog->aux->user->uid);
4398 info.gpl_compatible = prog->gpl_compatible;
4399
4400 memcpy(info.tag, prog->tag, sizeof(prog->tag));
4401 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
4402
4403 mutex_lock(&prog->aux->used_maps_mutex);
4404 ulen = info.nr_map_ids;
4405 info.nr_map_ids = prog->aux->used_map_cnt;
4406 ulen = min_t(u32, info.nr_map_ids, ulen);
4407 if (ulen) {
4408 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
4409 u32 i;
4410
4411 for (i = 0; i < ulen; i++)
4412 if (put_user(prog->aux->used_maps[i]->id,
4413 &user_map_ids[i])) {
4414 mutex_unlock(&prog->aux->used_maps_mutex);
4415 return -EFAULT;
4416 }
4417 }
4418 mutex_unlock(&prog->aux->used_maps_mutex);
4419
4420 err = set_info_rec_size(&info);
4421 if (err)
4422 return err;
4423
4424 bpf_prog_get_stats(prog, &stats);
4425 info.run_time_ns = stats.nsecs;
4426 info.run_cnt = stats.cnt;
4427 info.recursion_misses = stats.misses;
4428
4429 info.verified_insns = prog->aux->verified_insns;
4430
4431 if (!bpf_capable()) {
4432 info.jited_prog_len = 0;
4433 info.xlated_prog_len = 0;
4434 info.nr_jited_ksyms = 0;
4435 info.nr_jited_func_lens = 0;
4436 info.nr_func_info = 0;
4437 info.nr_line_info = 0;
4438 info.nr_jited_line_info = 0;
4439 goto done;
4440 }
4441
4442 ulen = info.xlated_prog_len;
4443 info.xlated_prog_len = bpf_prog_insn_size(prog);
4444 if (info.xlated_prog_len && ulen) {
4445 struct bpf_insn *insns_sanitized;
4446 bool fault;
4447
4448 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
4449 info.xlated_prog_insns = 0;
4450 goto done;
4451 }
4452 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
4453 if (!insns_sanitized)
4454 return -ENOMEM;
4455 uinsns = u64_to_user_ptr(info.xlated_prog_insns);
4456 ulen = min_t(u32, info.xlated_prog_len, ulen);
4457 fault = copy_to_user(uinsns, insns_sanitized, ulen);
4458 kfree(insns_sanitized);
4459 if (fault)
4460 return -EFAULT;
4461 }
4462
4463 if (bpf_prog_is_offloaded(prog->aux)) {
4464 err = bpf_prog_offload_info_fill(&info, prog);
4465 if (err)
4466 return err;
4467 goto done;
4468 }
4469
4470 /* NOTE: the following code is supposed to be skipped for offload.
4471 * bpf_prog_offload_info_fill() is the place to fill similar fields
4472 * for offload.
4473 */
4474 ulen = info.jited_prog_len;
4475 if (prog->aux->func_cnt) {
4476 u32 i;
4477
4478 info.jited_prog_len = 0;
4479 for (i = 0; i < prog->aux->func_cnt; i++)
4480 info.jited_prog_len += prog->aux->func[i]->jited_len;
4481 } else {
4482 info.jited_prog_len = prog->jited_len;
4483 }
4484
4485 if (info.jited_prog_len && ulen) {
4486 if (bpf_dump_raw_ok(file->f_cred)) {
4487 uinsns = u64_to_user_ptr(info.jited_prog_insns);
4488 ulen = min_t(u32, info.jited_prog_len, ulen);
4489
4490 /* for multi-function programs, copy the JITed
4491 * instructions for all the functions
4492 */
4493 if (prog->aux->func_cnt) {
4494 u32 len, free, i;
4495 u8 *img;
4496
4497 free = ulen;
4498 for (i = 0; i < prog->aux->func_cnt; i++) {
4499 len = prog->aux->func[i]->jited_len;
4500 len = min_t(u32, len, free);
4501 img = (u8 *) prog->aux->func[i]->bpf_func;
4502 if (copy_to_user(uinsns, img, len))
4503 return -EFAULT;
4504 uinsns += len;
4505 free -= len;
4506 if (!free)
4507 break;
4508 }
4509 } else {
4510 if (copy_to_user(uinsns, prog->bpf_func, ulen))
4511 return -EFAULT;
4512 }
4513 } else {
4514 info.jited_prog_insns = 0;
4515 }
4516 }
4517
4518 ulen = info.nr_jited_ksyms;
4519 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
4520 if (ulen) {
4521 if (bpf_dump_raw_ok(file->f_cred)) {
4522 unsigned long ksym_addr;
4523 u64 __user *user_ksyms;
4524 u32 i;
4525
4526 /* copy the address of the kernel symbol
4527 * corresponding to each function
4528 */
4529 ulen = min_t(u32, info.nr_jited_ksyms, ulen);
4530 user_ksyms = u64_to_user_ptr(info.jited_ksyms);
4531 if (prog->aux->func_cnt) {
4532 for (i = 0; i < ulen; i++) {
4533 ksym_addr = (unsigned long)
4534 prog->aux->func[i]->bpf_func;
4535 if (put_user((u64) ksym_addr,
4536 &user_ksyms[i]))
4537 return -EFAULT;
4538 }
4539 } else {
4540 ksym_addr = (unsigned long) prog->bpf_func;
4541 if (put_user((u64) ksym_addr, &user_ksyms[0]))
4542 return -EFAULT;
4543 }
4544 } else {
4545 info.jited_ksyms = 0;
4546 }
4547 }
4548
4549 ulen = info.nr_jited_func_lens;
4550 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
4551 if (ulen) {
4552 if (bpf_dump_raw_ok(file->f_cred)) {
4553 u32 __user *user_lens;
4554 u32 func_len, i;
4555
4556 /* copy the JITed image lengths for each function */
4557 ulen = min_t(u32, info.nr_jited_func_lens, ulen);
4558 user_lens = u64_to_user_ptr(info.jited_func_lens);
4559 if (prog->aux->func_cnt) {
4560 for (i = 0; i < ulen; i++) {
4561 func_len =
4562 prog->aux->func[i]->jited_len;
4563 if (put_user(func_len, &user_lens[i]))
4564 return -EFAULT;
4565 }
4566 } else {
4567 func_len = prog->jited_len;
4568 if (put_user(func_len, &user_lens[0]))
4569 return -EFAULT;
4570 }
4571 } else {
4572 info.jited_func_lens = 0;
4573 }
4574 }
4575
4576 if (prog->aux->btf)
4577 info.btf_id = btf_obj_id(prog->aux->btf);
4578 info.attach_btf_id = prog->aux->attach_btf_id;
4579 if (attach_btf)
4580 info.attach_btf_obj_id = btf_obj_id(attach_btf);
4581
4582 ulen = info.nr_func_info;
4583 info.nr_func_info = prog->aux->func_info_cnt;
4584 if (info.nr_func_info && ulen) {
4585 char __user *user_finfo;
4586
4587 user_finfo = u64_to_user_ptr(info.func_info);
4588 ulen = min_t(u32, info.nr_func_info, ulen);
4589 if (copy_to_user(user_finfo, prog->aux->func_info,
4590 info.func_info_rec_size * ulen))
4591 return -EFAULT;
4592 }
4593
4594 ulen = info.nr_line_info;
4595 info.nr_line_info = prog->aux->nr_linfo;
4596 if (info.nr_line_info && ulen) {
4597 __u8 __user *user_linfo;
4598
4599 user_linfo = u64_to_user_ptr(info.line_info);
4600 ulen = min_t(u32, info.nr_line_info, ulen);
4601 if (copy_to_user(user_linfo, prog->aux->linfo,
4602 info.line_info_rec_size * ulen))
4603 return -EFAULT;
4604 }
4605
4606 ulen = info.nr_jited_line_info;
4607 if (prog->aux->jited_linfo)
4608 info.nr_jited_line_info = prog->aux->nr_linfo;
4609 else
4610 info.nr_jited_line_info = 0;
4611 if (info.nr_jited_line_info && ulen) {
4612 if (bpf_dump_raw_ok(file->f_cred)) {
4613 unsigned long line_addr;
4614 __u64 __user *user_linfo;
4615 u32 i;
4616
4617 user_linfo = u64_to_user_ptr(info.jited_line_info);
4618 ulen = min_t(u32, info.nr_jited_line_info, ulen);
4619 for (i = 0; i < ulen; i++) {
4620 line_addr = (unsigned long)prog->aux->jited_linfo[i];
4621 if (put_user((__u64)line_addr, &user_linfo[i]))
4622 return -EFAULT;
4623 }
4624 } else {
4625 info.jited_line_info = 0;
4626 }
4627 }
4628
4629 ulen = info.nr_prog_tags;
4630 info.nr_prog_tags = prog->aux->func_cnt ? : 1;
4631 if (ulen) {
4632 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
4633 u32 i;
4634
4635 user_prog_tags = u64_to_user_ptr(info.prog_tags);
4636 ulen = min_t(u32, info.nr_prog_tags, ulen);
4637 if (prog->aux->func_cnt) {
4638 for (i = 0; i < ulen; i++) {
4639 if (copy_to_user(user_prog_tags[i],
4640 prog->aux->func[i]->tag,
4641 BPF_TAG_SIZE))
4642 return -EFAULT;
4643 }
4644 } else {
4645 if (copy_to_user(user_prog_tags[0],
4646 prog->tag, BPF_TAG_SIZE))
4647 return -EFAULT;
4648 }
4649 }
4650
4651done:
4652 if (copy_to_user(uinfo, &info, info_len) ||
4653 put_user(info_len, &uattr->info.info_len))
4654 return -EFAULT;
4655
4656 return 0;
4657}
4658
4659static int bpf_map_get_info_by_fd(struct file *file,
4660 struct bpf_map *map,
4661 const union bpf_attr *attr,
4662 union bpf_attr __user *uattr)
4663{
4664 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4665 struct bpf_map_info info;
4666 u32 info_len = attr->info.info_len;
4667 int err;
4668
4669 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4670 if (err)
4671 return err;
4672 info_len = min_t(u32, sizeof(info), info_len);
4673
4674 memset(&info, 0, sizeof(info));
4675 info.type = map->map_type;
4676 info.id = map->id;
4677 info.key_size = map->key_size;
4678 info.value_size = map->value_size;
4679 info.max_entries = map->max_entries;
4680 info.map_flags = map->map_flags;
4681 info.map_extra = map->map_extra;
4682 memcpy(info.name, map->name, sizeof(map->name));
4683
4684 if (map->btf) {
4685 info.btf_id = btf_obj_id(map->btf);
4686 info.btf_key_type_id = map->btf_key_type_id;
4687 info.btf_value_type_id = map->btf_value_type_id;
4688 }
4689 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
4690
4691 if (bpf_map_is_offloaded(map)) {
4692 err = bpf_map_offload_info_fill(&info, map);
4693 if (err)
4694 return err;
4695 }
4696
4697 if (copy_to_user(uinfo, &info, info_len) ||
4698 put_user(info_len, &uattr->info.info_len))
4699 return -EFAULT;
4700
4701 return 0;
4702}
4703
4704static int bpf_btf_get_info_by_fd(struct file *file,
4705 struct btf *btf,
4706 const union bpf_attr *attr,
4707 union bpf_attr __user *uattr)
4708{
4709 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4710 u32 info_len = attr->info.info_len;
4711 int err;
4712
4713 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len);
4714 if (err)
4715 return err;
4716
4717 return btf_get_info_by_fd(btf, attr, uattr);
4718}
4719
4720static int bpf_link_get_info_by_fd(struct file *file,
4721 struct bpf_link *link,
4722 const union bpf_attr *attr,
4723 union bpf_attr __user *uattr)
4724{
4725 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4726 struct bpf_link_info info;
4727 u32 info_len = attr->info.info_len;
4728 int err;
4729
4730 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4731 if (err)
4732 return err;
4733 info_len = min_t(u32, sizeof(info), info_len);
4734
4735 memset(&info, 0, sizeof(info));
4736 if (copy_from_user(&info, uinfo, info_len))
4737 return -EFAULT;
4738
4739 info.type = link->type;
4740 info.id = link->id;
4741 if (link->prog)
4742 info.prog_id = link->prog->aux->id;
4743
4744 if (link->ops->fill_link_info) {
4745 err = link->ops->fill_link_info(link, &info);
4746 if (err)
4747 return err;
4748 }
4749
4750 if (copy_to_user(uinfo, &info, info_len) ||
4751 put_user(info_len, &uattr->info.info_len))
4752 return -EFAULT;
4753
4754 return 0;
4755}
4756
4757
4758#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
4759
4760static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
4761 union bpf_attr __user *uattr)
4762{
4763 int ufd = attr->info.bpf_fd;
4764 struct fd f;
4765 int err;
4766
4767 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
4768 return -EINVAL;
4769
4770 f = fdget(ufd);
4771 if (!f.file)
4772 return -EBADFD;
4773
4774 if (f.file->f_op == &bpf_prog_fops)
4775 err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
4776 uattr);
4777 else if (f.file->f_op == &bpf_map_fops)
4778 err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
4779 uattr);
4780 else if (f.file->f_op == &btf_fops)
4781 err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
4782 else if (f.file->f_op == &bpf_link_fops)
4783 err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
4784 attr, uattr);
4785 else
4786 err = -EINVAL;
4787
4788 fdput(f);
4789 return err;
4790}
4791
4792#define BPF_BTF_LOAD_LAST_FIELD btf_log_true_size
4793
4794static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
4795{
4796 if (CHECK_ATTR(BPF_BTF_LOAD))
4797 return -EINVAL;
4798
4799 if (!bpf_capable())
4800 return -EPERM;
4801
4802 return btf_new_fd(attr, uattr, uattr_size);
4803}
4804
4805#define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
4806
4807static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
4808{
4809 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
4810 return -EINVAL;
4811
4812 if (!capable(CAP_SYS_ADMIN))
4813 return -EPERM;
4814
4815 return btf_get_fd_by_id(attr->btf_id);
4816}
4817
4818static int bpf_task_fd_query_copy(const union bpf_attr *attr,
4819 union bpf_attr __user *uattr,
4820 u32 prog_id, u32 fd_type,
4821 const char *buf, u64 probe_offset,
4822 u64 probe_addr)
4823{
4824 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
4825 u32 len = buf ? strlen(buf) : 0, input_len;
4826 int err = 0;
4827
4828 if (put_user(len, &uattr->task_fd_query.buf_len))
4829 return -EFAULT;
4830 input_len = attr->task_fd_query.buf_len;
4831 if (input_len && ubuf) {
4832 if (!len) {
4833 /* nothing to copy, just make ubuf NULL terminated */
4834 char zero = '\0';
4835
4836 if (put_user(zero, ubuf))
4837 return -EFAULT;
4838 } else if (input_len >= len + 1) {
4839 /* ubuf can hold the string with NULL terminator */
4840 if (copy_to_user(ubuf, buf, len + 1))
4841 return -EFAULT;
4842 } else {
4843 /* ubuf cannot hold the string with NULL terminator,
4844 * do a partial copy with NULL terminator.
4845 */
4846 char zero = '\0';
4847
4848 err = -ENOSPC;
4849 if (copy_to_user(ubuf, buf, input_len - 1))
4850 return -EFAULT;
4851 if (put_user(zero, ubuf + input_len - 1))
4852 return -EFAULT;
4853 }
4854 }
4855
4856 if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
4857 put_user(fd_type, &uattr->task_fd_query.fd_type) ||
4858 put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
4859 put_user(probe_addr, &uattr->task_fd_query.probe_addr))
4860 return -EFAULT;
4861
4862 return err;
4863}
4864
4865#define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
4866
4867static int bpf_task_fd_query(const union bpf_attr *attr,
4868 union bpf_attr __user *uattr)
4869{
4870 pid_t pid = attr->task_fd_query.pid;
4871 u32 fd = attr->task_fd_query.fd;
4872 const struct perf_event *event;
4873 struct task_struct *task;
4874 struct file *file;
4875 int err;
4876
4877 if (CHECK_ATTR(BPF_TASK_FD_QUERY))
4878 return -EINVAL;
4879
4880 if (!capable(CAP_SYS_ADMIN))
4881 return -EPERM;
4882
4883 if (attr->task_fd_query.flags != 0)
4884 return -EINVAL;
4885
4886 rcu_read_lock();
4887 task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
4888 rcu_read_unlock();
4889 if (!task)
4890 return -ENOENT;
4891
4892 err = 0;
4893 file = fget_task(task, fd);
4894 put_task_struct(task);
4895 if (!file)
4896 return -EBADF;
4897
4898 if (file->f_op == &bpf_link_fops) {
4899 struct bpf_link *link = file->private_data;
4900
4901 if (link->ops == &bpf_raw_tp_link_lops) {
4902 struct bpf_raw_tp_link *raw_tp =
4903 container_of(link, struct bpf_raw_tp_link, link);
4904 struct bpf_raw_event_map *btp = raw_tp->btp;
4905
4906 err = bpf_task_fd_query_copy(attr, uattr,
4907 raw_tp->link.prog->aux->id,
4908 BPF_FD_TYPE_RAW_TRACEPOINT,
4909 btp->tp->name, 0, 0);
4910 goto put_file;
4911 }
4912 goto out_not_supp;
4913 }
4914
4915 event = perf_get_event(file);
4916 if (!IS_ERR(event)) {
4917 u64 probe_offset, probe_addr;
4918 u32 prog_id, fd_type;
4919 const char *buf;
4920
4921 err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
4922 &buf, &probe_offset,
4923 &probe_addr, NULL);
4924 if (!err)
4925 err = bpf_task_fd_query_copy(attr, uattr, prog_id,
4926 fd_type, buf,
4927 probe_offset,
4928 probe_addr);
4929 goto put_file;
4930 }
4931
4932out_not_supp:
4933 err = -ENOTSUPP;
4934put_file:
4935 fput(file);
4936 return err;
4937}
4938
4939#define BPF_MAP_BATCH_LAST_FIELD batch.flags
4940
4941#define BPF_DO_BATCH(fn, ...) \
4942 do { \
4943 if (!fn) { \
4944 err = -ENOTSUPP; \
4945 goto err_put; \
4946 } \
4947 err = fn(__VA_ARGS__); \
4948 } while (0)
4949
4950static int bpf_map_do_batch(const union bpf_attr *attr,
4951 union bpf_attr __user *uattr,
4952 int cmd)
4953{
4954 bool has_read = cmd == BPF_MAP_LOOKUP_BATCH ||
4955 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
4956 bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
4957 struct bpf_map *map;
4958 int err, ufd;
4959 struct fd f;
4960
4961 if (CHECK_ATTR(BPF_MAP_BATCH))
4962 return -EINVAL;
4963
4964 ufd = attr->batch.map_fd;
4965 f = fdget(ufd);
4966 map = __bpf_map_get(f);
4967 if (IS_ERR(map))
4968 return PTR_ERR(map);
4969 if (has_write)
4970 bpf_map_write_active_inc(map);
4971 if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
4972 err = -EPERM;
4973 goto err_put;
4974 }
4975 if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
4976 err = -EPERM;
4977 goto err_put;
4978 }
4979
4980 if (cmd == BPF_MAP_LOOKUP_BATCH)
4981 BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr);
4982 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
4983 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr);
4984 else if (cmd == BPF_MAP_UPDATE_BATCH)
4985 BPF_DO_BATCH(map->ops->map_update_batch, map, f.file, attr, uattr);
4986 else
4987 BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr);
4988err_put:
4989 if (has_write) {
4990 maybe_wait_bpf_programs(map);
4991 bpf_map_write_active_dec(map);
4992 }
4993 fdput(f);
4994 return err;
4995}
4996
4997#define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.pid
4998static int link_create(union bpf_attr *attr, bpfptr_t uattr)
4999{
5000 struct bpf_prog *prog;
5001 int ret;
5002
5003 if (CHECK_ATTR(BPF_LINK_CREATE))
5004 return -EINVAL;
5005
5006 if (attr->link_create.attach_type == BPF_STRUCT_OPS)
5007 return bpf_struct_ops_link_create(attr);
5008
5009 prog = bpf_prog_get(attr->link_create.prog_fd);
5010 if (IS_ERR(prog))
5011 return PTR_ERR(prog);
5012
5013 ret = bpf_prog_attach_check_attach_type(prog,
5014 attr->link_create.attach_type);
5015 if (ret)
5016 goto out;
5017
5018 switch (prog->type) {
5019 case BPF_PROG_TYPE_CGROUP_SKB:
5020 case BPF_PROG_TYPE_CGROUP_SOCK:
5021 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
5022 case BPF_PROG_TYPE_SOCK_OPS:
5023 case BPF_PROG_TYPE_CGROUP_DEVICE:
5024 case BPF_PROG_TYPE_CGROUP_SYSCTL:
5025 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
5026 ret = cgroup_bpf_link_attach(attr, prog);
5027 break;
5028 case BPF_PROG_TYPE_EXT:
5029 ret = bpf_tracing_prog_attach(prog,
5030 attr->link_create.target_fd,
5031 attr->link_create.target_btf_id,
5032 attr->link_create.tracing.cookie);
5033 break;
5034 case BPF_PROG_TYPE_LSM:
5035 case BPF_PROG_TYPE_TRACING:
5036 if (attr->link_create.attach_type != prog->expected_attach_type) {
5037 ret = -EINVAL;
5038 goto out;
5039 }
5040 if (prog->expected_attach_type == BPF_TRACE_RAW_TP)
5041 ret = bpf_raw_tp_link_attach(prog, NULL);
5042 else if (prog->expected_attach_type == BPF_TRACE_ITER)
5043 ret = bpf_iter_link_attach(attr, uattr, prog);
5044 else if (prog->expected_attach_type == BPF_LSM_CGROUP)
5045 ret = cgroup_bpf_link_attach(attr, prog);
5046 else
5047 ret = bpf_tracing_prog_attach(prog,
5048 attr->link_create.target_fd,
5049 attr->link_create.target_btf_id,
5050 attr->link_create.tracing.cookie);
5051 break;
5052 case BPF_PROG_TYPE_FLOW_DISSECTOR:
5053 case BPF_PROG_TYPE_SK_LOOKUP:
5054 ret = netns_bpf_link_create(attr, prog);
5055 break;
5056#ifdef CONFIG_NET
5057 case BPF_PROG_TYPE_XDP:
5058 ret = bpf_xdp_link_attach(attr, prog);
5059 break;
5060 case BPF_PROG_TYPE_SCHED_CLS:
5061 if (attr->link_create.attach_type == BPF_TCX_INGRESS ||
5062 attr->link_create.attach_type == BPF_TCX_EGRESS)
5063 ret = tcx_link_attach(attr, prog);
5064 else
5065 ret = netkit_link_attach(attr, prog);
5066 break;
5067 case BPF_PROG_TYPE_NETFILTER:
5068 ret = bpf_nf_link_attach(attr, prog);
5069 break;
5070#endif
5071 case BPF_PROG_TYPE_PERF_EVENT:
5072 case BPF_PROG_TYPE_TRACEPOINT:
5073 ret = bpf_perf_link_attach(attr, prog);
5074 break;
5075 case BPF_PROG_TYPE_KPROBE:
5076 if (attr->link_create.attach_type == BPF_PERF_EVENT)
5077 ret = bpf_perf_link_attach(attr, prog);
5078 else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI)
5079 ret = bpf_kprobe_multi_link_attach(attr, prog);
5080 else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI)
5081 ret = bpf_uprobe_multi_link_attach(attr, prog);
5082 break;
5083 default:
5084 ret = -EINVAL;
5085 }
5086
5087out:
5088 if (ret < 0)
5089 bpf_prog_put(prog);
5090 return ret;
5091}
5092
5093static int link_update_map(struct bpf_link *link, union bpf_attr *attr)
5094{
5095 struct bpf_map *new_map, *old_map = NULL;
5096 int ret;
5097
5098 new_map = bpf_map_get(attr->link_update.new_map_fd);
5099 if (IS_ERR(new_map))
5100 return PTR_ERR(new_map);
5101
5102 if (attr->link_update.flags & BPF_F_REPLACE) {
5103 old_map = bpf_map_get(attr->link_update.old_map_fd);
5104 if (IS_ERR(old_map)) {
5105 ret = PTR_ERR(old_map);
5106 goto out_put;
5107 }
5108 } else if (attr->link_update.old_map_fd) {
5109 ret = -EINVAL;
5110 goto out_put;
5111 }
5112
5113 ret = link->ops->update_map(link, new_map, old_map);
5114
5115 if (old_map)
5116 bpf_map_put(old_map);
5117out_put:
5118 bpf_map_put(new_map);
5119 return ret;
5120}
5121
5122#define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
5123
5124static int link_update(union bpf_attr *attr)
5125{
5126 struct bpf_prog *old_prog = NULL, *new_prog;
5127 struct bpf_link *link;
5128 u32 flags;
5129 int ret;
5130
5131 if (CHECK_ATTR(BPF_LINK_UPDATE))
5132 return -EINVAL;
5133
5134 flags = attr->link_update.flags;
5135 if (flags & ~BPF_F_REPLACE)
5136 return -EINVAL;
5137
5138 link = bpf_link_get_from_fd(attr->link_update.link_fd);
5139 if (IS_ERR(link))
5140 return PTR_ERR(link);
5141
5142 if (link->ops->update_map) {
5143 ret = link_update_map(link, attr);
5144 goto out_put_link;
5145 }
5146
5147 new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
5148 if (IS_ERR(new_prog)) {
5149 ret = PTR_ERR(new_prog);
5150 goto out_put_link;
5151 }
5152
5153 if (flags & BPF_F_REPLACE) {
5154 old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
5155 if (IS_ERR(old_prog)) {
5156 ret = PTR_ERR(old_prog);
5157 old_prog = NULL;
5158 goto out_put_progs;
5159 }
5160 } else if (attr->link_update.old_prog_fd) {
5161 ret = -EINVAL;
5162 goto out_put_progs;
5163 }
5164
5165 if (link->ops->update_prog)
5166 ret = link->ops->update_prog(link, new_prog, old_prog);
5167 else
5168 ret = -EINVAL;
5169
5170out_put_progs:
5171 if (old_prog)
5172 bpf_prog_put(old_prog);
5173 if (ret)
5174 bpf_prog_put(new_prog);
5175out_put_link:
5176 bpf_link_put_direct(link);
5177 return ret;
5178}
5179
5180#define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
5181
5182static int link_detach(union bpf_attr *attr)
5183{
5184 struct bpf_link *link;
5185 int ret;
5186
5187 if (CHECK_ATTR(BPF_LINK_DETACH))
5188 return -EINVAL;
5189
5190 link = bpf_link_get_from_fd(attr->link_detach.link_fd);
5191 if (IS_ERR(link))
5192 return PTR_ERR(link);
5193
5194 if (link->ops->detach)
5195 ret = link->ops->detach(link);
5196 else
5197 ret = -EOPNOTSUPP;
5198
5199 bpf_link_put_direct(link);
5200 return ret;
5201}
5202
5203static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
5204{
5205 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
5206}
5207
5208struct bpf_link *bpf_link_by_id(u32 id)
5209{
5210 struct bpf_link *link;
5211
5212 if (!id)
5213 return ERR_PTR(-ENOENT);
5214
5215 spin_lock_bh(&link_idr_lock);
5216 /* before link is "settled", ID is 0, pretend it doesn't exist yet */
5217 link = idr_find(&link_idr, id);
5218 if (link) {
5219 if (link->id)
5220 link = bpf_link_inc_not_zero(link);
5221 else
5222 link = ERR_PTR(-EAGAIN);
5223 } else {
5224 link = ERR_PTR(-ENOENT);
5225 }
5226 spin_unlock_bh(&link_idr_lock);
5227 return link;
5228}
5229
5230struct bpf_link *bpf_link_get_curr_or_next(u32 *id)
5231{
5232 struct bpf_link *link;
5233
5234 spin_lock_bh(&link_idr_lock);
5235again:
5236 link = idr_get_next(&link_idr, id);
5237 if (link) {
5238 link = bpf_link_inc_not_zero(link);
5239 if (IS_ERR(link)) {
5240 (*id)++;
5241 goto again;
5242 }
5243 }
5244 spin_unlock_bh(&link_idr_lock);
5245
5246 return link;
5247}
5248
5249#define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
5250
5251static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
5252{
5253 struct bpf_link *link;
5254 u32 id = attr->link_id;
5255 int fd;
5256
5257 if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
5258 return -EINVAL;
5259
5260 if (!capable(CAP_SYS_ADMIN))
5261 return -EPERM;
5262
5263 link = bpf_link_by_id(id);
5264 if (IS_ERR(link))
5265 return PTR_ERR(link);
5266
5267 fd = bpf_link_new_fd(link);
5268 if (fd < 0)
5269 bpf_link_put_direct(link);
5270
5271 return fd;
5272}
5273
5274DEFINE_MUTEX(bpf_stats_enabled_mutex);
5275
5276static int bpf_stats_release(struct inode *inode, struct file *file)
5277{
5278 mutex_lock(&bpf_stats_enabled_mutex);
5279 static_key_slow_dec(&bpf_stats_enabled_key.key);
5280 mutex_unlock(&bpf_stats_enabled_mutex);
5281 return 0;
5282}
5283
5284static const struct file_operations bpf_stats_fops = {
5285 .release = bpf_stats_release,
5286};
5287
5288static int bpf_enable_runtime_stats(void)
5289{
5290 int fd;
5291
5292 mutex_lock(&bpf_stats_enabled_mutex);
5293
5294 /* Set a very high limit to avoid overflow */
5295 if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
5296 mutex_unlock(&bpf_stats_enabled_mutex);
5297 return -EBUSY;
5298 }
5299
5300 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
5301 if (fd >= 0)
5302 static_key_slow_inc(&bpf_stats_enabled_key.key);
5303
5304 mutex_unlock(&bpf_stats_enabled_mutex);
5305 return fd;
5306}
5307
5308#define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
5309
5310static int bpf_enable_stats(union bpf_attr *attr)
5311{
5312
5313 if (CHECK_ATTR(BPF_ENABLE_STATS))
5314 return -EINVAL;
5315
5316 if (!capable(CAP_SYS_ADMIN))
5317 return -EPERM;
5318
5319 switch (attr->enable_stats.type) {
5320 case BPF_STATS_RUN_TIME:
5321 return bpf_enable_runtime_stats();
5322 default:
5323 break;
5324 }
5325 return -EINVAL;
5326}
5327
5328#define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
5329
5330static int bpf_iter_create(union bpf_attr *attr)
5331{
5332 struct bpf_link *link;
5333 int err;
5334
5335 if (CHECK_ATTR(BPF_ITER_CREATE))
5336 return -EINVAL;
5337
5338 if (attr->iter_create.flags)
5339 return -EINVAL;
5340
5341 link = bpf_link_get_from_fd(attr->iter_create.link_fd);
5342 if (IS_ERR(link))
5343 return PTR_ERR(link);
5344
5345 err = bpf_iter_new_fd(link);
5346 bpf_link_put_direct(link);
5347
5348 return err;
5349}
5350
5351#define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
5352
5353static int bpf_prog_bind_map(union bpf_attr *attr)
5354{
5355 struct bpf_prog *prog;
5356 struct bpf_map *map;
5357 struct bpf_map **used_maps_old, **used_maps_new;
5358 int i, ret = 0;
5359
5360 if (CHECK_ATTR(BPF_PROG_BIND_MAP))
5361 return -EINVAL;
5362
5363 if (attr->prog_bind_map.flags)
5364 return -EINVAL;
5365
5366 prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
5367 if (IS_ERR(prog))
5368 return PTR_ERR(prog);
5369
5370 map = bpf_map_get(attr->prog_bind_map.map_fd);
5371 if (IS_ERR(map)) {
5372 ret = PTR_ERR(map);
5373 goto out_prog_put;
5374 }
5375
5376 mutex_lock(&prog->aux->used_maps_mutex);
5377
5378 used_maps_old = prog->aux->used_maps;
5379
5380 for (i = 0; i < prog->aux->used_map_cnt; i++)
5381 if (used_maps_old[i] == map) {
5382 bpf_map_put(map);
5383 goto out_unlock;
5384 }
5385
5386 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
5387 sizeof(used_maps_new[0]),
5388 GFP_KERNEL);
5389 if (!used_maps_new) {
5390 ret = -ENOMEM;
5391 goto out_unlock;
5392 }
5393
5394 /* The bpf program will not access the bpf map, but for the sake of
5395 * simplicity, increase sleepable_refcnt for sleepable program as well.
5396 */
5397 if (prog->aux->sleepable)
5398 atomic64_inc(&map->sleepable_refcnt);
5399 memcpy(used_maps_new, used_maps_old,
5400 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
5401 used_maps_new[prog->aux->used_map_cnt] = map;
5402
5403 prog->aux->used_map_cnt++;
5404 prog->aux->used_maps = used_maps_new;
5405
5406 kfree(used_maps_old);
5407
5408out_unlock:
5409 mutex_unlock(&prog->aux->used_maps_mutex);
5410
5411 if (ret)
5412 bpf_map_put(map);
5413out_prog_put:
5414 bpf_prog_put(prog);
5415 return ret;
5416}
5417
5418static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
5419{
5420 union bpf_attr attr;
5421 int err;
5422
5423 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
5424 if (err)
5425 return err;
5426 size = min_t(u32, size, sizeof(attr));
5427
5428 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
5429 memset(&attr, 0, sizeof(attr));
5430 if (copy_from_bpfptr(&attr, uattr, size) != 0)
5431 return -EFAULT;
5432
5433 err = security_bpf(cmd, &attr, size);
5434 if (err < 0)
5435 return err;
5436
5437 switch (cmd) {
5438 case BPF_MAP_CREATE:
5439 err = map_create(&attr);
5440 break;
5441 case BPF_MAP_LOOKUP_ELEM:
5442 err = map_lookup_elem(&attr);
5443 break;
5444 case BPF_MAP_UPDATE_ELEM:
5445 err = map_update_elem(&attr, uattr);
5446 break;
5447 case BPF_MAP_DELETE_ELEM:
5448 err = map_delete_elem(&attr, uattr);
5449 break;
5450 case BPF_MAP_GET_NEXT_KEY:
5451 err = map_get_next_key(&attr);
5452 break;
5453 case BPF_MAP_FREEZE:
5454 err = map_freeze(&attr);
5455 break;
5456 case BPF_PROG_LOAD:
5457 err = bpf_prog_load(&attr, uattr, size);
5458 break;
5459 case BPF_OBJ_PIN:
5460 err = bpf_obj_pin(&attr);
5461 break;
5462 case BPF_OBJ_GET:
5463 err = bpf_obj_get(&attr);
5464 break;
5465 case BPF_PROG_ATTACH:
5466 err = bpf_prog_attach(&attr);
5467 break;
5468 case BPF_PROG_DETACH:
5469 err = bpf_prog_detach(&attr);
5470 break;
5471 case BPF_PROG_QUERY:
5472 err = bpf_prog_query(&attr, uattr.user);
5473 break;
5474 case BPF_PROG_TEST_RUN:
5475 err = bpf_prog_test_run(&attr, uattr.user);
5476 break;
5477 case BPF_PROG_GET_NEXT_ID:
5478 err = bpf_obj_get_next_id(&attr, uattr.user,
5479 &prog_idr, &prog_idr_lock);
5480 break;
5481 case BPF_MAP_GET_NEXT_ID:
5482 err = bpf_obj_get_next_id(&attr, uattr.user,
5483 &map_idr, &map_idr_lock);
5484 break;
5485 case BPF_BTF_GET_NEXT_ID:
5486 err = bpf_obj_get_next_id(&attr, uattr.user,
5487 &btf_idr, &btf_idr_lock);
5488 break;
5489 case BPF_PROG_GET_FD_BY_ID:
5490 err = bpf_prog_get_fd_by_id(&attr);
5491 break;
5492 case BPF_MAP_GET_FD_BY_ID:
5493 err = bpf_map_get_fd_by_id(&attr);
5494 break;
5495 case BPF_OBJ_GET_INFO_BY_FD:
5496 err = bpf_obj_get_info_by_fd(&attr, uattr.user);
5497 break;
5498 case BPF_RAW_TRACEPOINT_OPEN:
5499 err = bpf_raw_tracepoint_open(&attr);
5500 break;
5501 case BPF_BTF_LOAD:
5502 err = bpf_btf_load(&attr, uattr, size);
5503 break;
5504 case BPF_BTF_GET_FD_BY_ID:
5505 err = bpf_btf_get_fd_by_id(&attr);
5506 break;
5507 case BPF_TASK_FD_QUERY:
5508 err = bpf_task_fd_query(&attr, uattr.user);
5509 break;
5510 case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
5511 err = map_lookup_and_delete_elem(&attr);
5512 break;
5513 case BPF_MAP_LOOKUP_BATCH:
5514 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH);
5515 break;
5516 case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
5517 err = bpf_map_do_batch(&attr, uattr.user,
5518 BPF_MAP_LOOKUP_AND_DELETE_BATCH);
5519 break;
5520 case BPF_MAP_UPDATE_BATCH:
5521 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH);
5522 break;
5523 case BPF_MAP_DELETE_BATCH:
5524 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
5525 break;
5526 case BPF_LINK_CREATE:
5527 err = link_create(&attr, uattr);
5528 break;
5529 case BPF_LINK_UPDATE:
5530 err = link_update(&attr);
5531 break;
5532 case BPF_LINK_GET_FD_BY_ID:
5533 err = bpf_link_get_fd_by_id(&attr);
5534 break;
5535 case BPF_LINK_GET_NEXT_ID:
5536 err = bpf_obj_get_next_id(&attr, uattr.user,
5537 &link_idr, &link_idr_lock);
5538 break;
5539 case BPF_ENABLE_STATS:
5540 err = bpf_enable_stats(&attr);
5541 break;
5542 case BPF_ITER_CREATE:
5543 err = bpf_iter_create(&attr);
5544 break;
5545 case BPF_LINK_DETACH:
5546 err = link_detach(&attr);
5547 break;
5548 case BPF_PROG_BIND_MAP:
5549 err = bpf_prog_bind_map(&attr);
5550 break;
5551 default:
5552 err = -EINVAL;
5553 break;
5554 }
5555
5556 return err;
5557}
5558
5559SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
5560{
5561 return __sys_bpf(cmd, USER_BPFPTR(uattr), size);
5562}
5563
5564static bool syscall_prog_is_valid_access(int off, int size,
5565 enum bpf_access_type type,
5566 const struct bpf_prog *prog,
5567 struct bpf_insn_access_aux *info)
5568{
5569 if (off < 0 || off >= U16_MAX)
5570 return false;
5571 if (off % size != 0)
5572 return false;
5573 return true;
5574}
5575
5576BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size)
5577{
5578 switch (cmd) {
5579 case BPF_MAP_CREATE:
5580 case BPF_MAP_DELETE_ELEM:
5581 case BPF_MAP_UPDATE_ELEM:
5582 case BPF_MAP_FREEZE:
5583 case BPF_MAP_GET_FD_BY_ID:
5584 case BPF_PROG_LOAD:
5585 case BPF_BTF_LOAD:
5586 case BPF_LINK_CREATE:
5587 case BPF_RAW_TRACEPOINT_OPEN:
5588 break;
5589 default:
5590 return -EINVAL;
5591 }
5592 return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
5593}
5594
5595
5596/* To shut up -Wmissing-prototypes.
5597 * This function is used by the kernel light skeleton
5598 * to load bpf programs when modules are loaded or during kernel boot.
5599 * See tools/lib/bpf/skel_internal.h
5600 */
5601int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
5602
5603int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
5604{
5605 struct bpf_prog * __maybe_unused prog;
5606 struct bpf_tramp_run_ctx __maybe_unused run_ctx;
5607
5608 switch (cmd) {
5609#ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */
5610 case BPF_PROG_TEST_RUN:
5611 if (attr->test.data_in || attr->test.data_out ||
5612 attr->test.ctx_out || attr->test.duration ||
5613 attr->test.repeat || attr->test.flags)
5614 return -EINVAL;
5615
5616 prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL);
5617 if (IS_ERR(prog))
5618 return PTR_ERR(prog);
5619
5620 if (attr->test.ctx_size_in < prog->aux->max_ctx_offset ||
5621 attr->test.ctx_size_in > U16_MAX) {
5622 bpf_prog_put(prog);
5623 return -EINVAL;
5624 }
5625
5626 run_ctx.bpf_cookie = 0;
5627 if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
5628 /* recursion detected */
5629 __bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx);
5630 bpf_prog_put(prog);
5631 return -EBUSY;
5632 }
5633 attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in);
5634 __bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */,
5635 &run_ctx);
5636 bpf_prog_put(prog);
5637 return 0;
5638#endif
5639 default:
5640 return ____bpf_sys_bpf(cmd, attr, size);
5641 }
5642}
5643EXPORT_SYMBOL(kern_sys_bpf);
5644
5645static const struct bpf_func_proto bpf_sys_bpf_proto = {
5646 .func = bpf_sys_bpf,
5647 .gpl_only = false,
5648 .ret_type = RET_INTEGER,
5649 .arg1_type = ARG_ANYTHING,
5650 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
5651 .arg3_type = ARG_CONST_SIZE,
5652};
5653
5654const struct bpf_func_proto * __weak
5655tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5656{
5657 return bpf_base_func_proto(func_id);
5658}
5659
5660BPF_CALL_1(bpf_sys_close, u32, fd)
5661{
5662 /* When bpf program calls this helper there should not be
5663 * an fdget() without matching completed fdput().
5664 * This helper is allowed in the following callchain only:
5665 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close
5666 */
5667 return close_fd(fd);
5668}
5669
5670static const struct bpf_func_proto bpf_sys_close_proto = {
5671 .func = bpf_sys_close,
5672 .gpl_only = false,
5673 .ret_type = RET_INTEGER,
5674 .arg1_type = ARG_ANYTHING,
5675};
5676
5677BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res)
5678{
5679 if (flags)
5680 return -EINVAL;
5681
5682 if (name_sz <= 1 || name[name_sz - 1])
5683 return -EINVAL;
5684
5685 if (!bpf_dump_raw_ok(current_cred()))
5686 return -EPERM;
5687
5688 *res = kallsyms_lookup_name(name);
5689 return *res ? 0 : -ENOENT;
5690}
5691
5692static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
5693 .func = bpf_kallsyms_lookup_name,
5694 .gpl_only = false,
5695 .ret_type = RET_INTEGER,
5696 .arg1_type = ARG_PTR_TO_MEM,
5697 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
5698 .arg3_type = ARG_ANYTHING,
5699 .arg4_type = ARG_PTR_TO_LONG,
5700};
5701
5702static const struct bpf_func_proto *
5703syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5704{
5705 switch (func_id) {
5706 case BPF_FUNC_sys_bpf:
5707 return !perfmon_capable() ? NULL : &bpf_sys_bpf_proto;
5708 case BPF_FUNC_btf_find_by_name_kind:
5709 return &bpf_btf_find_by_name_kind_proto;
5710 case BPF_FUNC_sys_close:
5711 return &bpf_sys_close_proto;
5712 case BPF_FUNC_kallsyms_lookup_name:
5713 return &bpf_kallsyms_lookup_name_proto;
5714 default:
5715 return tracing_prog_func_proto(func_id, prog);
5716 }
5717}
5718
5719const struct bpf_verifier_ops bpf_syscall_verifier_ops = {
5720 .get_func_proto = syscall_prog_func_proto,
5721 .is_valid_access = syscall_prog_is_valid_access,
5722};
5723
5724const struct bpf_prog_ops bpf_syscall_prog_ops = {
5725 .test_run = bpf_prog_test_run_syscall,
5726};
5727
5728#ifdef CONFIG_SYSCTL
5729static int bpf_stats_handler(struct ctl_table *table, int write,
5730 void *buffer, size_t *lenp, loff_t *ppos)
5731{
5732 struct static_key *key = (struct static_key *)table->data;
5733 static int saved_val;
5734 int val, ret;
5735 struct ctl_table tmp = {
5736 .data = &val,
5737 .maxlen = sizeof(val),
5738 .mode = table->mode,
5739 .extra1 = SYSCTL_ZERO,
5740 .extra2 = SYSCTL_ONE,
5741 };
5742
5743 if (write && !capable(CAP_SYS_ADMIN))
5744 return -EPERM;
5745
5746 mutex_lock(&bpf_stats_enabled_mutex);
5747 val = saved_val;
5748 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
5749 if (write && !ret && val != saved_val) {
5750 if (val)
5751 static_key_slow_inc(key);
5752 else
5753 static_key_slow_dec(key);
5754 saved_val = val;
5755 }
5756 mutex_unlock(&bpf_stats_enabled_mutex);
5757 return ret;
5758}
5759
5760void __weak unpriv_ebpf_notify(int new_state)
5761{
5762}
5763
5764static int bpf_unpriv_handler(struct ctl_table *table, int write,
5765 void *buffer, size_t *lenp, loff_t *ppos)
5766{
5767 int ret, unpriv_enable = *(int *)table->data;
5768 bool locked_state = unpriv_enable == 1;
5769 struct ctl_table tmp = *table;
5770
5771 if (write && !capable(CAP_SYS_ADMIN))
5772 return -EPERM;
5773
5774 tmp.data = &unpriv_enable;
5775 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
5776 if (write && !ret) {
5777 if (locked_state && unpriv_enable != 1)
5778 return -EPERM;
5779 *(int *)table->data = unpriv_enable;
5780 }
5781
5782 if (write)
5783 unpriv_ebpf_notify(unpriv_enable);
5784
5785 return ret;
5786}
5787
5788static struct ctl_table bpf_syscall_table[] = {
5789 {
5790 .procname = "unprivileged_bpf_disabled",
5791 .data = &sysctl_unprivileged_bpf_disabled,
5792 .maxlen = sizeof(sysctl_unprivileged_bpf_disabled),
5793 .mode = 0644,
5794 .proc_handler = bpf_unpriv_handler,
5795 .extra1 = SYSCTL_ZERO,
5796 .extra2 = SYSCTL_TWO,
5797 },
5798 {
5799 .procname = "bpf_stats_enabled",
5800 .data = &bpf_stats_enabled_key.key,
5801 .mode = 0644,
5802 .proc_handler = bpf_stats_handler,
5803 },
5804 { }
5805};
5806
5807static int __init bpf_syscall_sysctl_init(void)
5808{
5809 register_sysctl_init("kernel", bpf_syscall_table);
5810 return 0;
5811}
5812late_initcall(bpf_syscall_sysctl_init);
5813#endif /* CONFIG_SYSCTL */
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 */
4#include <linux/bpf.h>
5#include <linux/bpf-cgroup.h>
6#include <linux/bpf_trace.h>
7#include <linux/bpf_lirc.h>
8#include <linux/bpf_verifier.h>
9#include <linux/bsearch.h>
10#include <linux/btf.h>
11#include <linux/syscalls.h>
12#include <linux/slab.h>
13#include <linux/sched/signal.h>
14#include <linux/vmalloc.h>
15#include <linux/mmzone.h>
16#include <linux/anon_inodes.h>
17#include <linux/fdtable.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/license.h>
21#include <linux/filter.h>
22#include <linux/kernel.h>
23#include <linux/idr.h>
24#include <linux/cred.h>
25#include <linux/timekeeping.h>
26#include <linux/ctype.h>
27#include <linux/nospec.h>
28#include <linux/audit.h>
29#include <uapi/linux/btf.h>
30#include <linux/pgtable.h>
31#include <linux/bpf_lsm.h>
32#include <linux/poll.h>
33#include <linux/sort.h>
34#include <linux/bpf-netns.h>
35#include <linux/rcupdate_trace.h>
36#include <linux/memcontrol.h>
37#include <linux/trace_events.h>
38#include <linux/tracepoint.h>
39
40#include <net/netfilter/nf_bpf_link.h>
41#include <net/netkit.h>
42#include <net/tcx.h>
43
44#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
45 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
46 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
47#define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
48#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
49#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
50 IS_FD_HASH(map))
51
52#define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
53
54DEFINE_PER_CPU(int, bpf_prog_active);
55static DEFINE_IDR(prog_idr);
56static DEFINE_SPINLOCK(prog_idr_lock);
57static DEFINE_IDR(map_idr);
58static DEFINE_SPINLOCK(map_idr_lock);
59static DEFINE_IDR(link_idr);
60static DEFINE_SPINLOCK(link_idr_lock);
61
62int sysctl_unprivileged_bpf_disabled __read_mostly =
63 IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
64
65static const struct bpf_map_ops * const bpf_map_types[] = {
66#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
67#define BPF_MAP_TYPE(_id, _ops) \
68 [_id] = &_ops,
69#define BPF_LINK_TYPE(_id, _name)
70#include <linux/bpf_types.h>
71#undef BPF_PROG_TYPE
72#undef BPF_MAP_TYPE
73#undef BPF_LINK_TYPE
74};
75
76/*
77 * If we're handed a bigger struct than we know of, ensure all the unknown bits
78 * are 0 - i.e. new user-space does not rely on any kernel feature extensions
79 * we don't know about yet.
80 *
81 * There is a ToCToU between this function call and the following
82 * copy_from_user() call. However, this is not a concern since this function is
83 * meant to be a future-proofing of bits.
84 */
85int bpf_check_uarg_tail_zero(bpfptr_t uaddr,
86 size_t expected_size,
87 size_t actual_size)
88{
89 int res;
90
91 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */
92 return -E2BIG;
93
94 if (actual_size <= expected_size)
95 return 0;
96
97 if (uaddr.is_kernel)
98 res = memchr_inv(uaddr.kernel + expected_size, 0,
99 actual_size - expected_size) == NULL;
100 else
101 res = check_zeroed_user(uaddr.user + expected_size,
102 actual_size - expected_size);
103 if (res < 0)
104 return res;
105 return res ? 0 : -E2BIG;
106}
107
108const struct bpf_map_ops bpf_map_offload_ops = {
109 .map_meta_equal = bpf_map_meta_equal,
110 .map_alloc = bpf_map_offload_map_alloc,
111 .map_free = bpf_map_offload_map_free,
112 .map_check_btf = map_check_no_btf,
113 .map_mem_usage = bpf_map_offload_map_mem_usage,
114};
115
116static void bpf_map_write_active_inc(struct bpf_map *map)
117{
118 atomic64_inc(&map->writecnt);
119}
120
121static void bpf_map_write_active_dec(struct bpf_map *map)
122{
123 atomic64_dec(&map->writecnt);
124}
125
126bool bpf_map_write_active(const struct bpf_map *map)
127{
128 return atomic64_read(&map->writecnt) != 0;
129}
130
131static u32 bpf_map_value_size(const struct bpf_map *map)
132{
133 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
134 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
135 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
136 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
137 return round_up(map->value_size, 8) * num_possible_cpus();
138 else if (IS_FD_MAP(map))
139 return sizeof(u32);
140 else
141 return map->value_size;
142}
143
144static void maybe_wait_bpf_programs(struct bpf_map *map)
145{
146 /* Wait for any running non-sleepable BPF programs to complete so that
147 * userspace, when we return to it, knows that all non-sleepable
148 * programs that could be running use the new map value. For sleepable
149 * BPF programs, synchronize_rcu_tasks_trace() should be used to wait
150 * for the completions of these programs, but considering the waiting
151 * time can be very long and userspace may think it will hang forever,
152 * so don't handle sleepable BPF programs now.
153 */
154 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
155 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
156 synchronize_rcu();
157}
158
159static void unpin_uptr_kaddr(void *kaddr)
160{
161 if (kaddr)
162 unpin_user_page(virt_to_page(kaddr));
163}
164
165static void __bpf_obj_unpin_uptrs(struct btf_record *rec, u32 cnt, void *obj)
166{
167 const struct btf_field *field;
168 void **uptr_addr;
169 int i;
170
171 for (i = 0, field = rec->fields; i < cnt; i++, field++) {
172 if (field->type != BPF_UPTR)
173 continue;
174
175 uptr_addr = obj + field->offset;
176 unpin_uptr_kaddr(*uptr_addr);
177 }
178}
179
180static void bpf_obj_unpin_uptrs(struct btf_record *rec, void *obj)
181{
182 if (!btf_record_has_field(rec, BPF_UPTR))
183 return;
184
185 __bpf_obj_unpin_uptrs(rec, rec->cnt, obj);
186}
187
188static int bpf_obj_pin_uptrs(struct btf_record *rec, void *obj)
189{
190 const struct btf_field *field;
191 const struct btf_type *t;
192 unsigned long start, end;
193 struct page *page;
194 void **uptr_addr;
195 int i, err;
196
197 if (!btf_record_has_field(rec, BPF_UPTR))
198 return 0;
199
200 for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) {
201 if (field->type != BPF_UPTR)
202 continue;
203
204 uptr_addr = obj + field->offset;
205 start = *(unsigned long *)uptr_addr;
206 if (!start)
207 continue;
208
209 t = btf_type_by_id(field->kptr.btf, field->kptr.btf_id);
210 /* t->size was checked for zero before */
211 if (check_add_overflow(start, t->size - 1, &end)) {
212 err = -EFAULT;
213 goto unpin_all;
214 }
215
216 /* The uptr's struct cannot span across two pages */
217 if ((start & PAGE_MASK) != (end & PAGE_MASK)) {
218 err = -EOPNOTSUPP;
219 goto unpin_all;
220 }
221
222 err = pin_user_pages_fast(start, 1, FOLL_LONGTERM | FOLL_WRITE, &page);
223 if (err != 1)
224 goto unpin_all;
225
226 if (PageHighMem(page)) {
227 err = -EOPNOTSUPP;
228 unpin_user_page(page);
229 goto unpin_all;
230 }
231
232 *uptr_addr = page_address(page) + offset_in_page(start);
233 }
234
235 return 0;
236
237unpin_all:
238 __bpf_obj_unpin_uptrs(rec, i, obj);
239 return err;
240}
241
242static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
243 void *key, void *value, __u64 flags)
244{
245 int err;
246
247 /* Need to create a kthread, thus must support schedule */
248 if (bpf_map_is_offloaded(map)) {
249 return bpf_map_offload_update_elem(map, key, value, flags);
250 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
251 map->map_type == BPF_MAP_TYPE_ARENA ||
252 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
253 return map->ops->map_update_elem(map, key, value, flags);
254 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
255 map->map_type == BPF_MAP_TYPE_SOCKMAP) {
256 return sock_map_update_elem_sys(map, key, value, flags);
257 } else if (IS_FD_PROG_ARRAY(map)) {
258 return bpf_fd_array_map_update_elem(map, map_file, key, value,
259 flags);
260 }
261
262 bpf_disable_instrumentation();
263 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
264 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
265 err = bpf_percpu_hash_update(map, key, value, flags);
266 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
267 err = bpf_percpu_array_update(map, key, value, flags);
268 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
269 err = bpf_percpu_cgroup_storage_update(map, key, value,
270 flags);
271 } else if (IS_FD_ARRAY(map)) {
272 err = bpf_fd_array_map_update_elem(map, map_file, key, value,
273 flags);
274 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
275 err = bpf_fd_htab_map_update_elem(map, map_file, key, value,
276 flags);
277 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
278 /* rcu_read_lock() is not needed */
279 err = bpf_fd_reuseport_array_update_elem(map, key, value,
280 flags);
281 } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
282 map->map_type == BPF_MAP_TYPE_STACK ||
283 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
284 err = map->ops->map_push_elem(map, value, flags);
285 } else {
286 err = bpf_obj_pin_uptrs(map->record, value);
287 if (!err) {
288 rcu_read_lock();
289 err = map->ops->map_update_elem(map, key, value, flags);
290 rcu_read_unlock();
291 if (err)
292 bpf_obj_unpin_uptrs(map->record, value);
293 }
294 }
295 bpf_enable_instrumentation();
296
297 return err;
298}
299
300static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
301 __u64 flags)
302{
303 void *ptr;
304 int err;
305
306 if (bpf_map_is_offloaded(map))
307 return bpf_map_offload_lookup_elem(map, key, value);
308
309 bpf_disable_instrumentation();
310 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
311 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
312 err = bpf_percpu_hash_copy(map, key, value);
313 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
314 err = bpf_percpu_array_copy(map, key, value);
315 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
316 err = bpf_percpu_cgroup_storage_copy(map, key, value);
317 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
318 err = bpf_stackmap_copy(map, key, value);
319 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
320 err = bpf_fd_array_map_lookup_elem(map, key, value);
321 } else if (IS_FD_HASH(map)) {
322 err = bpf_fd_htab_map_lookup_elem(map, key, value);
323 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
324 err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
325 } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
326 map->map_type == BPF_MAP_TYPE_STACK ||
327 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
328 err = map->ops->map_peek_elem(map, value);
329 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
330 /* struct_ops map requires directly updating "value" */
331 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
332 } else {
333 rcu_read_lock();
334 if (map->ops->map_lookup_elem_sys_only)
335 ptr = map->ops->map_lookup_elem_sys_only(map, key);
336 else
337 ptr = map->ops->map_lookup_elem(map, key);
338 if (IS_ERR(ptr)) {
339 err = PTR_ERR(ptr);
340 } else if (!ptr) {
341 err = -ENOENT;
342 } else {
343 err = 0;
344 if (flags & BPF_F_LOCK)
345 /* lock 'ptr' and copy everything but lock */
346 copy_map_value_locked(map, value, ptr, true);
347 else
348 copy_map_value(map, value, ptr);
349 /* mask lock and timer, since value wasn't zero inited */
350 check_and_init_map_value(map, value);
351 }
352 rcu_read_unlock();
353 }
354
355 bpf_enable_instrumentation();
356
357 return err;
358}
359
360/* Please, do not use this function outside from the map creation path
361 * (e.g. in map update path) without taking care of setting the active
362 * memory cgroup (see at bpf_map_kmalloc_node() for example).
363 */
364static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
365{
366 /* We really just want to fail instead of triggering OOM killer
367 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
368 * which is used for lower order allocation requests.
369 *
370 * It has been observed that higher order allocation requests done by
371 * vmalloc with __GFP_NORETRY being set might fail due to not trying
372 * to reclaim memory from the page cache, thus we set
373 * __GFP_RETRY_MAYFAIL to avoid such situations.
374 */
375
376 gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO);
377 unsigned int flags = 0;
378 unsigned long align = 1;
379 void *area;
380
381 if (size >= SIZE_MAX)
382 return NULL;
383
384 /* kmalloc()'ed memory can't be mmap()'ed */
385 if (mmapable) {
386 BUG_ON(!PAGE_ALIGNED(size));
387 align = SHMLBA;
388 flags = VM_USERMAP;
389 } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
390 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
391 numa_node);
392 if (area != NULL)
393 return area;
394 }
395
396 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
397 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
398 flags, numa_node, __builtin_return_address(0));
399}
400
401void *bpf_map_area_alloc(u64 size, int numa_node)
402{
403 return __bpf_map_area_alloc(size, numa_node, false);
404}
405
406void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
407{
408 return __bpf_map_area_alloc(size, numa_node, true);
409}
410
411void bpf_map_area_free(void *area)
412{
413 kvfree(area);
414}
415
416static u32 bpf_map_flags_retain_permanent(u32 flags)
417{
418 /* Some map creation flags are not tied to the map object but
419 * rather to the map fd instead, so they have no meaning upon
420 * map object inspection since multiple file descriptors with
421 * different (access) properties can exist here. Thus, given
422 * this has zero meaning for the map itself, lets clear these
423 * from here.
424 */
425 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
426}
427
428void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
429{
430 map->map_type = attr->map_type;
431 map->key_size = attr->key_size;
432 map->value_size = attr->value_size;
433 map->max_entries = attr->max_entries;
434 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
435 map->numa_node = bpf_map_attr_numa_node(attr);
436 map->map_extra = attr->map_extra;
437}
438
439static int bpf_map_alloc_id(struct bpf_map *map)
440{
441 int id;
442
443 idr_preload(GFP_KERNEL);
444 spin_lock_bh(&map_idr_lock);
445 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
446 if (id > 0)
447 map->id = id;
448 spin_unlock_bh(&map_idr_lock);
449 idr_preload_end();
450
451 if (WARN_ON_ONCE(!id))
452 return -ENOSPC;
453
454 return id > 0 ? 0 : id;
455}
456
457void bpf_map_free_id(struct bpf_map *map)
458{
459 unsigned long flags;
460
461 /* Offloaded maps are removed from the IDR store when their device
462 * disappears - even if someone holds an fd to them they are unusable,
463 * the memory is gone, all ops will fail; they are simply waiting for
464 * refcnt to drop to be freed.
465 */
466 if (!map->id)
467 return;
468
469 spin_lock_irqsave(&map_idr_lock, flags);
470
471 idr_remove(&map_idr, map->id);
472 map->id = 0;
473
474 spin_unlock_irqrestore(&map_idr_lock, flags);
475}
476
477#ifdef CONFIG_MEMCG
478static void bpf_map_save_memcg(struct bpf_map *map)
479{
480 /* Currently if a map is created by a process belonging to the root
481 * memory cgroup, get_obj_cgroup_from_current() will return NULL.
482 * So we have to check map->objcg for being NULL each time it's
483 * being used.
484 */
485 if (memcg_bpf_enabled())
486 map->objcg = get_obj_cgroup_from_current();
487}
488
489static void bpf_map_release_memcg(struct bpf_map *map)
490{
491 if (map->objcg)
492 obj_cgroup_put(map->objcg);
493}
494
495static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map)
496{
497 if (map->objcg)
498 return get_mem_cgroup_from_objcg(map->objcg);
499
500 return root_mem_cgroup;
501}
502
503void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
504 int node)
505{
506 struct mem_cgroup *memcg, *old_memcg;
507 void *ptr;
508
509 memcg = bpf_map_get_memcg(map);
510 old_memcg = set_active_memcg(memcg);
511 ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
512 set_active_memcg(old_memcg);
513 mem_cgroup_put(memcg);
514
515 return ptr;
516}
517
518void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
519{
520 struct mem_cgroup *memcg, *old_memcg;
521 void *ptr;
522
523 memcg = bpf_map_get_memcg(map);
524 old_memcg = set_active_memcg(memcg);
525 ptr = kzalloc(size, flags | __GFP_ACCOUNT);
526 set_active_memcg(old_memcg);
527 mem_cgroup_put(memcg);
528
529 return ptr;
530}
531
532void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
533 gfp_t flags)
534{
535 struct mem_cgroup *memcg, *old_memcg;
536 void *ptr;
537
538 memcg = bpf_map_get_memcg(map);
539 old_memcg = set_active_memcg(memcg);
540 ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT);
541 set_active_memcg(old_memcg);
542 mem_cgroup_put(memcg);
543
544 return ptr;
545}
546
547void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
548 size_t align, gfp_t flags)
549{
550 struct mem_cgroup *memcg, *old_memcg;
551 void __percpu *ptr;
552
553 memcg = bpf_map_get_memcg(map);
554 old_memcg = set_active_memcg(memcg);
555 ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT);
556 set_active_memcg(old_memcg);
557 mem_cgroup_put(memcg);
558
559 return ptr;
560}
561
562#else
563static void bpf_map_save_memcg(struct bpf_map *map)
564{
565}
566
567static void bpf_map_release_memcg(struct bpf_map *map)
568{
569}
570#endif
571
572int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
573 unsigned long nr_pages, struct page **pages)
574{
575 unsigned long i, j;
576 struct page *pg;
577 int ret = 0;
578#ifdef CONFIG_MEMCG
579 struct mem_cgroup *memcg, *old_memcg;
580
581 memcg = bpf_map_get_memcg(map);
582 old_memcg = set_active_memcg(memcg);
583#endif
584 for (i = 0; i < nr_pages; i++) {
585 pg = alloc_pages_node(nid, gfp | __GFP_ACCOUNT, 0);
586
587 if (pg) {
588 pages[i] = pg;
589 continue;
590 }
591 for (j = 0; j < i; j++)
592 __free_page(pages[j]);
593 ret = -ENOMEM;
594 break;
595 }
596
597#ifdef CONFIG_MEMCG
598 set_active_memcg(old_memcg);
599 mem_cgroup_put(memcg);
600#endif
601 return ret;
602}
603
604
605static int btf_field_cmp(const void *a, const void *b)
606{
607 const struct btf_field *f1 = a, *f2 = b;
608
609 if (f1->offset < f2->offset)
610 return -1;
611 else if (f1->offset > f2->offset)
612 return 1;
613 return 0;
614}
615
616struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset,
617 u32 field_mask)
618{
619 struct btf_field *field;
620
621 if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask))
622 return NULL;
623 field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp);
624 if (!field || !(field->type & field_mask))
625 return NULL;
626 return field;
627}
628
629void btf_record_free(struct btf_record *rec)
630{
631 int i;
632
633 if (IS_ERR_OR_NULL(rec))
634 return;
635 for (i = 0; i < rec->cnt; i++) {
636 switch (rec->fields[i].type) {
637 case BPF_KPTR_UNREF:
638 case BPF_KPTR_REF:
639 case BPF_KPTR_PERCPU:
640 case BPF_UPTR:
641 if (rec->fields[i].kptr.module)
642 module_put(rec->fields[i].kptr.module);
643 if (btf_is_kernel(rec->fields[i].kptr.btf))
644 btf_put(rec->fields[i].kptr.btf);
645 break;
646 case BPF_LIST_HEAD:
647 case BPF_LIST_NODE:
648 case BPF_RB_ROOT:
649 case BPF_RB_NODE:
650 case BPF_SPIN_LOCK:
651 case BPF_TIMER:
652 case BPF_REFCOUNT:
653 case BPF_WORKQUEUE:
654 /* Nothing to release */
655 break;
656 default:
657 WARN_ON_ONCE(1);
658 continue;
659 }
660 }
661 kfree(rec);
662}
663
664void bpf_map_free_record(struct bpf_map *map)
665{
666 btf_record_free(map->record);
667 map->record = NULL;
668}
669
670struct btf_record *btf_record_dup(const struct btf_record *rec)
671{
672 const struct btf_field *fields;
673 struct btf_record *new_rec;
674 int ret, size, i;
675
676 if (IS_ERR_OR_NULL(rec))
677 return NULL;
678 size = offsetof(struct btf_record, fields[rec->cnt]);
679 new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN);
680 if (!new_rec)
681 return ERR_PTR(-ENOMEM);
682 /* Do a deep copy of the btf_record */
683 fields = rec->fields;
684 new_rec->cnt = 0;
685 for (i = 0; i < rec->cnt; i++) {
686 switch (fields[i].type) {
687 case BPF_KPTR_UNREF:
688 case BPF_KPTR_REF:
689 case BPF_KPTR_PERCPU:
690 case BPF_UPTR:
691 if (btf_is_kernel(fields[i].kptr.btf))
692 btf_get(fields[i].kptr.btf);
693 if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) {
694 ret = -ENXIO;
695 goto free;
696 }
697 break;
698 case BPF_LIST_HEAD:
699 case BPF_LIST_NODE:
700 case BPF_RB_ROOT:
701 case BPF_RB_NODE:
702 case BPF_SPIN_LOCK:
703 case BPF_TIMER:
704 case BPF_REFCOUNT:
705 case BPF_WORKQUEUE:
706 /* Nothing to acquire */
707 break;
708 default:
709 ret = -EFAULT;
710 WARN_ON_ONCE(1);
711 goto free;
712 }
713 new_rec->cnt++;
714 }
715 return new_rec;
716free:
717 btf_record_free(new_rec);
718 return ERR_PTR(ret);
719}
720
721bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b)
722{
723 bool a_has_fields = !IS_ERR_OR_NULL(rec_a), b_has_fields = !IS_ERR_OR_NULL(rec_b);
724 int size;
725
726 if (!a_has_fields && !b_has_fields)
727 return true;
728 if (a_has_fields != b_has_fields)
729 return false;
730 if (rec_a->cnt != rec_b->cnt)
731 return false;
732 size = offsetof(struct btf_record, fields[rec_a->cnt]);
733 /* btf_parse_fields uses kzalloc to allocate a btf_record, so unused
734 * members are zeroed out. So memcmp is safe to do without worrying
735 * about padding/unused fields.
736 *
737 * While spin_lock, timer, and kptr have no relation to map BTF,
738 * list_head metadata is specific to map BTF, the btf and value_rec
739 * members in particular. btf is the map BTF, while value_rec points to
740 * btf_record in that map BTF.
741 *
742 * So while by default, we don't rely on the map BTF (which the records
743 * were parsed from) matching for both records, which is not backwards
744 * compatible, in case list_head is part of it, we implicitly rely on
745 * that by way of depending on memcmp succeeding for it.
746 */
747 return !memcmp(rec_a, rec_b, size);
748}
749
750void bpf_obj_free_timer(const struct btf_record *rec, void *obj)
751{
752 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_TIMER)))
753 return;
754 bpf_timer_cancel_and_free(obj + rec->timer_off);
755}
756
757void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj)
758{
759 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_WORKQUEUE)))
760 return;
761 bpf_wq_cancel_and_free(obj + rec->wq_off);
762}
763
764void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
765{
766 const struct btf_field *fields;
767 int i;
768
769 if (IS_ERR_OR_NULL(rec))
770 return;
771 fields = rec->fields;
772 for (i = 0; i < rec->cnt; i++) {
773 struct btf_struct_meta *pointee_struct_meta;
774 const struct btf_field *field = &fields[i];
775 void *field_ptr = obj + field->offset;
776 void *xchgd_field;
777
778 switch (fields[i].type) {
779 case BPF_SPIN_LOCK:
780 break;
781 case BPF_TIMER:
782 bpf_timer_cancel_and_free(field_ptr);
783 break;
784 case BPF_WORKQUEUE:
785 bpf_wq_cancel_and_free(field_ptr);
786 break;
787 case BPF_KPTR_UNREF:
788 WRITE_ONCE(*(u64 *)field_ptr, 0);
789 break;
790 case BPF_KPTR_REF:
791 case BPF_KPTR_PERCPU:
792 xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
793 if (!xchgd_field)
794 break;
795
796 if (!btf_is_kernel(field->kptr.btf)) {
797 pointee_struct_meta = btf_find_struct_meta(field->kptr.btf,
798 field->kptr.btf_id);
799 migrate_disable();
800 __bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ?
801 pointee_struct_meta->record : NULL,
802 fields[i].type == BPF_KPTR_PERCPU);
803 migrate_enable();
804 } else {
805 field->kptr.dtor(xchgd_field);
806 }
807 break;
808 case BPF_UPTR:
809 /* The caller ensured that no one is using the uptr */
810 unpin_uptr_kaddr(*(void **)field_ptr);
811 break;
812 case BPF_LIST_HEAD:
813 if (WARN_ON_ONCE(rec->spin_lock_off < 0))
814 continue;
815 bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off);
816 break;
817 case BPF_RB_ROOT:
818 if (WARN_ON_ONCE(rec->spin_lock_off < 0))
819 continue;
820 bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off);
821 break;
822 case BPF_LIST_NODE:
823 case BPF_RB_NODE:
824 case BPF_REFCOUNT:
825 break;
826 default:
827 WARN_ON_ONCE(1);
828 continue;
829 }
830 }
831}
832
833static void bpf_map_free(struct bpf_map *map)
834{
835 struct btf_record *rec = map->record;
836 struct btf *btf = map->btf;
837
838 /* implementation dependent freeing */
839 map->ops->map_free(map);
840 /* Delay freeing of btf_record for maps, as map_free
841 * callback usually needs access to them. It is better to do it here
842 * than require each callback to do the free itself manually.
843 *
844 * Note that the btf_record stashed in map->inner_map_meta->record was
845 * already freed using the map_free callback for map in map case which
846 * eventually calls bpf_map_free_meta, since inner_map_meta is only a
847 * template bpf_map struct used during verification.
848 */
849 btf_record_free(rec);
850 /* Delay freeing of btf for maps, as map_free callback may need
851 * struct_meta info which will be freed with btf_put().
852 */
853 btf_put(btf);
854}
855
856/* called from workqueue */
857static void bpf_map_free_deferred(struct work_struct *work)
858{
859 struct bpf_map *map = container_of(work, struct bpf_map, work);
860
861 security_bpf_map_free(map);
862 bpf_map_release_memcg(map);
863 bpf_map_free(map);
864}
865
866static void bpf_map_put_uref(struct bpf_map *map)
867{
868 if (atomic64_dec_and_test(&map->usercnt)) {
869 if (map->ops->map_release_uref)
870 map->ops->map_release_uref(map);
871 }
872}
873
874static void bpf_map_free_in_work(struct bpf_map *map)
875{
876 INIT_WORK(&map->work, bpf_map_free_deferred);
877 /* Avoid spawning kworkers, since they all might contend
878 * for the same mutex like slab_mutex.
879 */
880 queue_work(system_unbound_wq, &map->work);
881}
882
883static void bpf_map_free_rcu_gp(struct rcu_head *rcu)
884{
885 bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu));
886}
887
888static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu)
889{
890 if (rcu_trace_implies_rcu_gp())
891 bpf_map_free_rcu_gp(rcu);
892 else
893 call_rcu(rcu, bpf_map_free_rcu_gp);
894}
895
896/* decrement map refcnt and schedule it for freeing via workqueue
897 * (underlying map implementation ops->map_free() might sleep)
898 */
899void bpf_map_put(struct bpf_map *map)
900{
901 if (atomic64_dec_and_test(&map->refcnt)) {
902 /* bpf_map_free_id() must be called first */
903 bpf_map_free_id(map);
904
905 WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt));
906 if (READ_ONCE(map->free_after_mult_rcu_gp))
907 call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp);
908 else if (READ_ONCE(map->free_after_rcu_gp))
909 call_rcu(&map->rcu, bpf_map_free_rcu_gp);
910 else
911 bpf_map_free_in_work(map);
912 }
913}
914EXPORT_SYMBOL_GPL(bpf_map_put);
915
916void bpf_map_put_with_uref(struct bpf_map *map)
917{
918 bpf_map_put_uref(map);
919 bpf_map_put(map);
920}
921
922static int bpf_map_release(struct inode *inode, struct file *filp)
923{
924 struct bpf_map *map = filp->private_data;
925
926 if (map->ops->map_release)
927 map->ops->map_release(map, filp);
928
929 bpf_map_put_with_uref(map);
930 return 0;
931}
932
933static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
934{
935 fmode_t mode = fd_file(f)->f_mode;
936
937 /* Our file permissions may have been overridden by global
938 * map permissions facing syscall side.
939 */
940 if (READ_ONCE(map->frozen))
941 mode &= ~FMODE_CAN_WRITE;
942 return mode;
943}
944
945#ifdef CONFIG_PROC_FS
946/* Show the memory usage of a bpf map */
947static u64 bpf_map_memory_usage(const struct bpf_map *map)
948{
949 return map->ops->map_mem_usage(map);
950}
951
952static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
953{
954 struct bpf_map *map = filp->private_data;
955 u32 type = 0, jited = 0;
956
957 if (map_type_contains_progs(map)) {
958 spin_lock(&map->owner.lock);
959 type = map->owner.type;
960 jited = map->owner.jited;
961 spin_unlock(&map->owner.lock);
962 }
963
964 seq_printf(m,
965 "map_type:\t%u\n"
966 "key_size:\t%u\n"
967 "value_size:\t%u\n"
968 "max_entries:\t%u\n"
969 "map_flags:\t%#x\n"
970 "map_extra:\t%#llx\n"
971 "memlock:\t%llu\n"
972 "map_id:\t%u\n"
973 "frozen:\t%u\n",
974 map->map_type,
975 map->key_size,
976 map->value_size,
977 map->max_entries,
978 map->map_flags,
979 (unsigned long long)map->map_extra,
980 bpf_map_memory_usage(map),
981 map->id,
982 READ_ONCE(map->frozen));
983 if (type) {
984 seq_printf(m, "owner_prog_type:\t%u\n", type);
985 seq_printf(m, "owner_jited:\t%u\n", jited);
986 }
987}
988#endif
989
990static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
991 loff_t *ppos)
992{
993 /* We need this handler such that alloc_file() enables
994 * f_mode with FMODE_CAN_READ.
995 */
996 return -EINVAL;
997}
998
999static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
1000 size_t siz, loff_t *ppos)
1001{
1002 /* We need this handler such that alloc_file() enables
1003 * f_mode with FMODE_CAN_WRITE.
1004 */
1005 return -EINVAL;
1006}
1007
1008/* called for any extra memory-mapped regions (except initial) */
1009static void bpf_map_mmap_open(struct vm_area_struct *vma)
1010{
1011 struct bpf_map *map = vma->vm_file->private_data;
1012
1013 if (vma->vm_flags & VM_MAYWRITE)
1014 bpf_map_write_active_inc(map);
1015}
1016
1017/* called for all unmapped memory region (including initial) */
1018static void bpf_map_mmap_close(struct vm_area_struct *vma)
1019{
1020 struct bpf_map *map = vma->vm_file->private_data;
1021
1022 if (vma->vm_flags & VM_MAYWRITE)
1023 bpf_map_write_active_dec(map);
1024}
1025
1026static const struct vm_operations_struct bpf_map_default_vmops = {
1027 .open = bpf_map_mmap_open,
1028 .close = bpf_map_mmap_close,
1029};
1030
1031static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
1032{
1033 struct bpf_map *map = filp->private_data;
1034 int err = 0;
1035
1036 if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record))
1037 return -ENOTSUPP;
1038
1039 if (!(vma->vm_flags & VM_SHARED))
1040 return -EINVAL;
1041
1042 mutex_lock(&map->freeze_mutex);
1043
1044 if (vma->vm_flags & VM_WRITE) {
1045 if (map->frozen) {
1046 err = -EPERM;
1047 goto out;
1048 }
1049 /* map is meant to be read-only, so do not allow mapping as
1050 * writable, because it's possible to leak a writable page
1051 * reference and allows user-space to still modify it after
1052 * freezing, while verifier will assume contents do not change
1053 */
1054 if (map->map_flags & BPF_F_RDONLY_PROG) {
1055 err = -EACCES;
1056 goto out;
1057 }
1058 bpf_map_write_active_inc(map);
1059 }
1060out:
1061 mutex_unlock(&map->freeze_mutex);
1062 if (err)
1063 return err;
1064
1065 /* set default open/close callbacks */
1066 vma->vm_ops = &bpf_map_default_vmops;
1067 vma->vm_private_data = map;
1068 vm_flags_clear(vma, VM_MAYEXEC);
1069 /* If mapping is read-only, then disallow potentially re-mapping with
1070 * PROT_WRITE by dropping VM_MAYWRITE flag. This VM_MAYWRITE clearing
1071 * means that as far as BPF map's memory-mapped VMAs are concerned,
1072 * VM_WRITE and VM_MAYWRITE and equivalent, if one of them is set,
1073 * both should be set, so we can forget about VM_MAYWRITE and always
1074 * check just VM_WRITE
1075 */
1076 if (!(vma->vm_flags & VM_WRITE))
1077 vm_flags_clear(vma, VM_MAYWRITE);
1078
1079 err = map->ops->map_mmap(map, vma);
1080 if (err) {
1081 if (vma->vm_flags & VM_WRITE)
1082 bpf_map_write_active_dec(map);
1083 }
1084
1085 return err;
1086}
1087
1088static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
1089{
1090 struct bpf_map *map = filp->private_data;
1091
1092 if (map->ops->map_poll)
1093 return map->ops->map_poll(map, filp, pts);
1094
1095 return EPOLLERR;
1096}
1097
1098static unsigned long bpf_get_unmapped_area(struct file *filp, unsigned long addr,
1099 unsigned long len, unsigned long pgoff,
1100 unsigned long flags)
1101{
1102 struct bpf_map *map = filp->private_data;
1103
1104 if (map->ops->map_get_unmapped_area)
1105 return map->ops->map_get_unmapped_area(filp, addr, len, pgoff, flags);
1106#ifdef CONFIG_MMU
1107 return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags);
1108#else
1109 return addr;
1110#endif
1111}
1112
1113const struct file_operations bpf_map_fops = {
1114#ifdef CONFIG_PROC_FS
1115 .show_fdinfo = bpf_map_show_fdinfo,
1116#endif
1117 .release = bpf_map_release,
1118 .read = bpf_dummy_read,
1119 .write = bpf_dummy_write,
1120 .mmap = bpf_map_mmap,
1121 .poll = bpf_map_poll,
1122 .get_unmapped_area = bpf_get_unmapped_area,
1123};
1124
1125int bpf_map_new_fd(struct bpf_map *map, int flags)
1126{
1127 int ret;
1128
1129 ret = security_bpf_map(map, OPEN_FMODE(flags));
1130 if (ret < 0)
1131 return ret;
1132
1133 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
1134 flags | O_CLOEXEC);
1135}
1136
1137int bpf_get_file_flag(int flags)
1138{
1139 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
1140 return -EINVAL;
1141 if (flags & BPF_F_RDONLY)
1142 return O_RDONLY;
1143 if (flags & BPF_F_WRONLY)
1144 return O_WRONLY;
1145 return O_RDWR;
1146}
1147
1148/* helper macro to check that unused fields 'union bpf_attr' are zero */
1149#define CHECK_ATTR(CMD) \
1150 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
1151 sizeof(attr->CMD##_LAST_FIELD), 0, \
1152 sizeof(*attr) - \
1153 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
1154 sizeof(attr->CMD##_LAST_FIELD)) != NULL
1155
1156/* dst and src must have at least "size" number of bytes.
1157 * Return strlen on success and < 0 on error.
1158 */
1159int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
1160{
1161 const char *end = src + size;
1162 const char *orig_src = src;
1163
1164 memset(dst, 0, size);
1165 /* Copy all isalnum(), '_' and '.' chars. */
1166 while (src < end && *src) {
1167 if (!isalnum(*src) &&
1168 *src != '_' && *src != '.')
1169 return -EINVAL;
1170 *dst++ = *src++;
1171 }
1172
1173 /* No '\0' found in "size" number of bytes */
1174 if (src == end)
1175 return -EINVAL;
1176
1177 return src - orig_src;
1178}
1179
1180int map_check_no_btf(const struct bpf_map *map,
1181 const struct btf *btf,
1182 const struct btf_type *key_type,
1183 const struct btf_type *value_type)
1184{
1185 return -ENOTSUPP;
1186}
1187
1188static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
1189 const struct btf *btf, u32 btf_key_id, u32 btf_value_id)
1190{
1191 const struct btf_type *key_type, *value_type;
1192 u32 key_size, value_size;
1193 int ret = 0;
1194
1195 /* Some maps allow key to be unspecified. */
1196 if (btf_key_id) {
1197 key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
1198 if (!key_type || key_size != map->key_size)
1199 return -EINVAL;
1200 } else {
1201 key_type = btf_type_by_id(btf, 0);
1202 if (!map->ops->map_check_btf)
1203 return -EINVAL;
1204 }
1205
1206 value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
1207 if (!value_type || value_size != map->value_size)
1208 return -EINVAL;
1209
1210 map->record = btf_parse_fields(btf, value_type,
1211 BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD |
1212 BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE | BPF_UPTR,
1213 map->value_size);
1214 if (!IS_ERR_OR_NULL(map->record)) {
1215 int i;
1216
1217 if (!bpf_token_capable(token, CAP_BPF)) {
1218 ret = -EPERM;
1219 goto free_map_tab;
1220 }
1221 if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) {
1222 ret = -EACCES;
1223 goto free_map_tab;
1224 }
1225 for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) {
1226 switch (map->record->field_mask & (1 << i)) {
1227 case 0:
1228 continue;
1229 case BPF_SPIN_LOCK:
1230 if (map->map_type != BPF_MAP_TYPE_HASH &&
1231 map->map_type != BPF_MAP_TYPE_ARRAY &&
1232 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
1233 map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1234 map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1235 map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1236 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1237 ret = -EOPNOTSUPP;
1238 goto free_map_tab;
1239 }
1240 break;
1241 case BPF_TIMER:
1242 case BPF_WORKQUEUE:
1243 if (map->map_type != BPF_MAP_TYPE_HASH &&
1244 map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1245 map->map_type != BPF_MAP_TYPE_ARRAY) {
1246 ret = -EOPNOTSUPP;
1247 goto free_map_tab;
1248 }
1249 break;
1250 case BPF_KPTR_UNREF:
1251 case BPF_KPTR_REF:
1252 case BPF_KPTR_PERCPU:
1253 case BPF_REFCOUNT:
1254 if (map->map_type != BPF_MAP_TYPE_HASH &&
1255 map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
1256 map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1257 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH &&
1258 map->map_type != BPF_MAP_TYPE_ARRAY &&
1259 map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
1260 map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1261 map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1262 map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1263 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1264 ret = -EOPNOTSUPP;
1265 goto free_map_tab;
1266 }
1267 break;
1268 case BPF_UPTR:
1269 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) {
1270 ret = -EOPNOTSUPP;
1271 goto free_map_tab;
1272 }
1273 break;
1274 case BPF_LIST_HEAD:
1275 case BPF_RB_ROOT:
1276 if (map->map_type != BPF_MAP_TYPE_HASH &&
1277 map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1278 map->map_type != BPF_MAP_TYPE_ARRAY) {
1279 ret = -EOPNOTSUPP;
1280 goto free_map_tab;
1281 }
1282 break;
1283 default:
1284 /* Fail if map_type checks are missing for a field type */
1285 ret = -EOPNOTSUPP;
1286 goto free_map_tab;
1287 }
1288 }
1289 }
1290
1291 ret = btf_check_and_fixup_fields(btf, map->record);
1292 if (ret < 0)
1293 goto free_map_tab;
1294
1295 if (map->ops->map_check_btf) {
1296 ret = map->ops->map_check_btf(map, btf, key_type, value_type);
1297 if (ret < 0)
1298 goto free_map_tab;
1299 }
1300
1301 return ret;
1302free_map_tab:
1303 bpf_map_free_record(map);
1304 return ret;
1305}
1306
1307static bool bpf_net_capable(void)
1308{
1309 return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN);
1310}
1311
1312#define BPF_MAP_CREATE_LAST_FIELD map_token_fd
1313/* called via syscall */
1314static int map_create(union bpf_attr *attr)
1315{
1316 const struct bpf_map_ops *ops;
1317 struct bpf_token *token = NULL;
1318 int numa_node = bpf_map_attr_numa_node(attr);
1319 u32 map_type = attr->map_type;
1320 struct bpf_map *map;
1321 bool token_flag;
1322 int f_flags;
1323 int err;
1324
1325 err = CHECK_ATTR(BPF_MAP_CREATE);
1326 if (err)
1327 return -EINVAL;
1328
1329 /* check BPF_F_TOKEN_FD flag, remember if it's set, and then clear it
1330 * to avoid per-map type checks tripping on unknown flag
1331 */
1332 token_flag = attr->map_flags & BPF_F_TOKEN_FD;
1333 attr->map_flags &= ~BPF_F_TOKEN_FD;
1334
1335 if (attr->btf_vmlinux_value_type_id) {
1336 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
1337 attr->btf_key_type_id || attr->btf_value_type_id)
1338 return -EINVAL;
1339 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
1340 return -EINVAL;
1341 }
1342
1343 if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER &&
1344 attr->map_type != BPF_MAP_TYPE_ARENA &&
1345 attr->map_extra != 0)
1346 return -EINVAL;
1347
1348 f_flags = bpf_get_file_flag(attr->map_flags);
1349 if (f_flags < 0)
1350 return f_flags;
1351
1352 if (numa_node != NUMA_NO_NODE &&
1353 ((unsigned int)numa_node >= nr_node_ids ||
1354 !node_online(numa_node)))
1355 return -EINVAL;
1356
1357 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
1358 map_type = attr->map_type;
1359 if (map_type >= ARRAY_SIZE(bpf_map_types))
1360 return -EINVAL;
1361 map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types));
1362 ops = bpf_map_types[map_type];
1363 if (!ops)
1364 return -EINVAL;
1365
1366 if (ops->map_alloc_check) {
1367 err = ops->map_alloc_check(attr);
1368 if (err)
1369 return err;
1370 }
1371 if (attr->map_ifindex)
1372 ops = &bpf_map_offload_ops;
1373 if (!ops->map_mem_usage)
1374 return -EINVAL;
1375
1376 if (token_flag) {
1377 token = bpf_token_get_from_fd(attr->map_token_fd);
1378 if (IS_ERR(token))
1379 return PTR_ERR(token);
1380
1381 /* if current token doesn't grant map creation permissions,
1382 * then we can't use this token, so ignore it and rely on
1383 * system-wide capabilities checks
1384 */
1385 if (!bpf_token_allow_cmd(token, BPF_MAP_CREATE) ||
1386 !bpf_token_allow_map_type(token, attr->map_type)) {
1387 bpf_token_put(token);
1388 token = NULL;
1389 }
1390 }
1391
1392 err = -EPERM;
1393
1394 /* Intent here is for unprivileged_bpf_disabled to block BPF map
1395 * creation for unprivileged users; other actions depend
1396 * on fd availability and access to bpffs, so are dependent on
1397 * object creation success. Even with unprivileged BPF disabled,
1398 * capability checks are still carried out.
1399 */
1400 if (sysctl_unprivileged_bpf_disabled && !bpf_token_capable(token, CAP_BPF))
1401 goto put_token;
1402
1403 /* check privileged map type permissions */
1404 switch (map_type) {
1405 case BPF_MAP_TYPE_ARRAY:
1406 case BPF_MAP_TYPE_PERCPU_ARRAY:
1407 case BPF_MAP_TYPE_PROG_ARRAY:
1408 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1409 case BPF_MAP_TYPE_CGROUP_ARRAY:
1410 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
1411 case BPF_MAP_TYPE_HASH:
1412 case BPF_MAP_TYPE_PERCPU_HASH:
1413 case BPF_MAP_TYPE_HASH_OF_MAPS:
1414 case BPF_MAP_TYPE_RINGBUF:
1415 case BPF_MAP_TYPE_USER_RINGBUF:
1416 case BPF_MAP_TYPE_CGROUP_STORAGE:
1417 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
1418 /* unprivileged */
1419 break;
1420 case BPF_MAP_TYPE_SK_STORAGE:
1421 case BPF_MAP_TYPE_INODE_STORAGE:
1422 case BPF_MAP_TYPE_TASK_STORAGE:
1423 case BPF_MAP_TYPE_CGRP_STORAGE:
1424 case BPF_MAP_TYPE_BLOOM_FILTER:
1425 case BPF_MAP_TYPE_LPM_TRIE:
1426 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
1427 case BPF_MAP_TYPE_STACK_TRACE:
1428 case BPF_MAP_TYPE_QUEUE:
1429 case BPF_MAP_TYPE_STACK:
1430 case BPF_MAP_TYPE_LRU_HASH:
1431 case BPF_MAP_TYPE_LRU_PERCPU_HASH:
1432 case BPF_MAP_TYPE_STRUCT_OPS:
1433 case BPF_MAP_TYPE_CPUMAP:
1434 case BPF_MAP_TYPE_ARENA:
1435 if (!bpf_token_capable(token, CAP_BPF))
1436 goto put_token;
1437 break;
1438 case BPF_MAP_TYPE_SOCKMAP:
1439 case BPF_MAP_TYPE_SOCKHASH:
1440 case BPF_MAP_TYPE_DEVMAP:
1441 case BPF_MAP_TYPE_DEVMAP_HASH:
1442 case BPF_MAP_TYPE_XSKMAP:
1443 if (!bpf_token_capable(token, CAP_NET_ADMIN))
1444 goto put_token;
1445 break;
1446 default:
1447 WARN(1, "unsupported map type %d", map_type);
1448 goto put_token;
1449 }
1450
1451 map = ops->map_alloc(attr);
1452 if (IS_ERR(map)) {
1453 err = PTR_ERR(map);
1454 goto put_token;
1455 }
1456 map->ops = ops;
1457 map->map_type = map_type;
1458
1459 err = bpf_obj_name_cpy(map->name, attr->map_name,
1460 sizeof(attr->map_name));
1461 if (err < 0)
1462 goto free_map;
1463
1464 atomic64_set(&map->refcnt, 1);
1465 atomic64_set(&map->usercnt, 1);
1466 mutex_init(&map->freeze_mutex);
1467 spin_lock_init(&map->owner.lock);
1468
1469 if (attr->btf_key_type_id || attr->btf_value_type_id ||
1470 /* Even the map's value is a kernel's struct,
1471 * the bpf_prog.o must have BTF to begin with
1472 * to figure out the corresponding kernel's
1473 * counter part. Thus, attr->btf_fd has
1474 * to be valid also.
1475 */
1476 attr->btf_vmlinux_value_type_id) {
1477 struct btf *btf;
1478
1479 btf = btf_get_by_fd(attr->btf_fd);
1480 if (IS_ERR(btf)) {
1481 err = PTR_ERR(btf);
1482 goto free_map;
1483 }
1484 if (btf_is_kernel(btf)) {
1485 btf_put(btf);
1486 err = -EACCES;
1487 goto free_map;
1488 }
1489 map->btf = btf;
1490
1491 if (attr->btf_value_type_id) {
1492 err = map_check_btf(map, token, btf, attr->btf_key_type_id,
1493 attr->btf_value_type_id);
1494 if (err)
1495 goto free_map;
1496 }
1497
1498 map->btf_key_type_id = attr->btf_key_type_id;
1499 map->btf_value_type_id = attr->btf_value_type_id;
1500 map->btf_vmlinux_value_type_id =
1501 attr->btf_vmlinux_value_type_id;
1502 }
1503
1504 err = security_bpf_map_create(map, attr, token);
1505 if (err)
1506 goto free_map_sec;
1507
1508 err = bpf_map_alloc_id(map);
1509 if (err)
1510 goto free_map_sec;
1511
1512 bpf_map_save_memcg(map);
1513 bpf_token_put(token);
1514
1515 err = bpf_map_new_fd(map, f_flags);
1516 if (err < 0) {
1517 /* failed to allocate fd.
1518 * bpf_map_put_with_uref() is needed because the above
1519 * bpf_map_alloc_id() has published the map
1520 * to the userspace and the userspace may
1521 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
1522 */
1523 bpf_map_put_with_uref(map);
1524 return err;
1525 }
1526
1527 return err;
1528
1529free_map_sec:
1530 security_bpf_map_free(map);
1531free_map:
1532 bpf_map_free(map);
1533put_token:
1534 bpf_token_put(token);
1535 return err;
1536}
1537
1538void bpf_map_inc(struct bpf_map *map)
1539{
1540 atomic64_inc(&map->refcnt);
1541}
1542EXPORT_SYMBOL_GPL(bpf_map_inc);
1543
1544void bpf_map_inc_with_uref(struct bpf_map *map)
1545{
1546 atomic64_inc(&map->refcnt);
1547 atomic64_inc(&map->usercnt);
1548}
1549EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
1550
1551struct bpf_map *bpf_map_get(u32 ufd)
1552{
1553 CLASS(fd, f)(ufd);
1554 struct bpf_map *map = __bpf_map_get(f);
1555
1556 if (!IS_ERR(map))
1557 bpf_map_inc(map);
1558
1559 return map;
1560}
1561EXPORT_SYMBOL(bpf_map_get);
1562
1563struct bpf_map *bpf_map_get_with_uref(u32 ufd)
1564{
1565 CLASS(fd, f)(ufd);
1566 struct bpf_map *map = __bpf_map_get(f);
1567
1568 if (!IS_ERR(map))
1569 bpf_map_inc_with_uref(map);
1570
1571 return map;
1572}
1573
1574/* map_idr_lock should have been held or the map should have been
1575 * protected by rcu read lock.
1576 */
1577struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
1578{
1579 int refold;
1580
1581 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
1582 if (!refold)
1583 return ERR_PTR(-ENOENT);
1584 if (uref)
1585 atomic64_inc(&map->usercnt);
1586
1587 return map;
1588}
1589
1590struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
1591{
1592 spin_lock_bh(&map_idr_lock);
1593 map = __bpf_map_inc_not_zero(map, false);
1594 spin_unlock_bh(&map_idr_lock);
1595
1596 return map;
1597}
1598EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
1599
1600int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
1601{
1602 return -ENOTSUPP;
1603}
1604
1605static void *__bpf_copy_key(void __user *ukey, u64 key_size)
1606{
1607 if (key_size)
1608 return vmemdup_user(ukey, key_size);
1609
1610 if (ukey)
1611 return ERR_PTR(-EINVAL);
1612
1613 return NULL;
1614}
1615
1616static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size)
1617{
1618 if (key_size)
1619 return kvmemdup_bpfptr(ukey, key_size);
1620
1621 if (!bpfptr_is_null(ukey))
1622 return ERR_PTR(-EINVAL);
1623
1624 return NULL;
1625}
1626
1627/* last field in 'union bpf_attr' used by this command */
1628#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
1629
1630static int map_lookup_elem(union bpf_attr *attr)
1631{
1632 void __user *ukey = u64_to_user_ptr(attr->key);
1633 void __user *uvalue = u64_to_user_ptr(attr->value);
1634 struct bpf_map *map;
1635 void *key, *value;
1636 u32 value_size;
1637 int err;
1638
1639 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1640 return -EINVAL;
1641
1642 if (attr->flags & ~BPF_F_LOCK)
1643 return -EINVAL;
1644
1645 CLASS(fd, f)(attr->map_fd);
1646 map = __bpf_map_get(f);
1647 if (IS_ERR(map))
1648 return PTR_ERR(map);
1649 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ))
1650 return -EPERM;
1651
1652 if ((attr->flags & BPF_F_LOCK) &&
1653 !btf_record_has_field(map->record, BPF_SPIN_LOCK))
1654 return -EINVAL;
1655
1656 key = __bpf_copy_key(ukey, map->key_size);
1657 if (IS_ERR(key))
1658 return PTR_ERR(key);
1659
1660 value_size = bpf_map_value_size(map);
1661
1662 err = -ENOMEM;
1663 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1664 if (!value)
1665 goto free_key;
1666
1667 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
1668 if (copy_from_user(value, uvalue, value_size))
1669 err = -EFAULT;
1670 else
1671 err = bpf_map_copy_value(map, key, value, attr->flags);
1672 goto free_value;
1673 }
1674
1675 err = bpf_map_copy_value(map, key, value, attr->flags);
1676 if (err)
1677 goto free_value;
1678
1679 err = -EFAULT;
1680 if (copy_to_user(uvalue, value, value_size) != 0)
1681 goto free_value;
1682
1683 err = 0;
1684
1685free_value:
1686 kvfree(value);
1687free_key:
1688 kvfree(key);
1689 return err;
1690}
1691
1692
1693#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1694
1695static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
1696{
1697 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1698 bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
1699 struct bpf_map *map;
1700 void *key, *value;
1701 u32 value_size;
1702 int err;
1703
1704 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1705 return -EINVAL;
1706
1707 CLASS(fd, f)(attr->map_fd);
1708 map = __bpf_map_get(f);
1709 if (IS_ERR(map))
1710 return PTR_ERR(map);
1711 bpf_map_write_active_inc(map);
1712 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1713 err = -EPERM;
1714 goto err_put;
1715 }
1716
1717 if ((attr->flags & BPF_F_LOCK) &&
1718 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1719 err = -EINVAL;
1720 goto err_put;
1721 }
1722
1723 key = ___bpf_copy_key(ukey, map->key_size);
1724 if (IS_ERR(key)) {
1725 err = PTR_ERR(key);
1726 goto err_put;
1727 }
1728
1729 value_size = bpf_map_value_size(map);
1730 value = kvmemdup_bpfptr(uvalue, value_size);
1731 if (IS_ERR(value)) {
1732 err = PTR_ERR(value);
1733 goto free_key;
1734 }
1735
1736 err = bpf_map_update_value(map, fd_file(f), key, value, attr->flags);
1737 if (!err)
1738 maybe_wait_bpf_programs(map);
1739
1740 kvfree(value);
1741free_key:
1742 kvfree(key);
1743err_put:
1744 bpf_map_write_active_dec(map);
1745 return err;
1746}
1747
1748#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1749
1750static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr)
1751{
1752 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1753 struct bpf_map *map;
1754 void *key;
1755 int err;
1756
1757 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1758 return -EINVAL;
1759
1760 CLASS(fd, f)(attr->map_fd);
1761 map = __bpf_map_get(f);
1762 if (IS_ERR(map))
1763 return PTR_ERR(map);
1764 bpf_map_write_active_inc(map);
1765 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1766 err = -EPERM;
1767 goto err_put;
1768 }
1769
1770 key = ___bpf_copy_key(ukey, map->key_size);
1771 if (IS_ERR(key)) {
1772 err = PTR_ERR(key);
1773 goto err_put;
1774 }
1775
1776 if (bpf_map_is_offloaded(map)) {
1777 err = bpf_map_offload_delete_elem(map, key);
1778 goto out;
1779 } else if (IS_FD_PROG_ARRAY(map) ||
1780 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1781 /* These maps require sleepable context */
1782 err = map->ops->map_delete_elem(map, key);
1783 goto out;
1784 }
1785
1786 bpf_disable_instrumentation();
1787 rcu_read_lock();
1788 err = map->ops->map_delete_elem(map, key);
1789 rcu_read_unlock();
1790 bpf_enable_instrumentation();
1791 if (!err)
1792 maybe_wait_bpf_programs(map);
1793out:
1794 kvfree(key);
1795err_put:
1796 bpf_map_write_active_dec(map);
1797 return err;
1798}
1799
1800/* last field in 'union bpf_attr' used by this command */
1801#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1802
1803static int map_get_next_key(union bpf_attr *attr)
1804{
1805 void __user *ukey = u64_to_user_ptr(attr->key);
1806 void __user *unext_key = u64_to_user_ptr(attr->next_key);
1807 struct bpf_map *map;
1808 void *key, *next_key;
1809 int err;
1810
1811 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1812 return -EINVAL;
1813
1814 CLASS(fd, f)(attr->map_fd);
1815 map = __bpf_map_get(f);
1816 if (IS_ERR(map))
1817 return PTR_ERR(map);
1818 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ))
1819 return -EPERM;
1820
1821 if (ukey) {
1822 key = __bpf_copy_key(ukey, map->key_size);
1823 if (IS_ERR(key))
1824 return PTR_ERR(key);
1825 } else {
1826 key = NULL;
1827 }
1828
1829 err = -ENOMEM;
1830 next_key = kvmalloc(map->key_size, GFP_USER);
1831 if (!next_key)
1832 goto free_key;
1833
1834 if (bpf_map_is_offloaded(map)) {
1835 err = bpf_map_offload_get_next_key(map, key, next_key);
1836 goto out;
1837 }
1838
1839 rcu_read_lock();
1840 err = map->ops->map_get_next_key(map, key, next_key);
1841 rcu_read_unlock();
1842out:
1843 if (err)
1844 goto free_next_key;
1845
1846 err = -EFAULT;
1847 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1848 goto free_next_key;
1849
1850 err = 0;
1851
1852free_next_key:
1853 kvfree(next_key);
1854free_key:
1855 kvfree(key);
1856 return err;
1857}
1858
1859int generic_map_delete_batch(struct bpf_map *map,
1860 const union bpf_attr *attr,
1861 union bpf_attr __user *uattr)
1862{
1863 void __user *keys = u64_to_user_ptr(attr->batch.keys);
1864 u32 cp, max_count;
1865 int err = 0;
1866 void *key;
1867
1868 if (attr->batch.elem_flags & ~BPF_F_LOCK)
1869 return -EINVAL;
1870
1871 if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1872 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1873 return -EINVAL;
1874 }
1875
1876 max_count = attr->batch.count;
1877 if (!max_count)
1878 return 0;
1879
1880 if (put_user(0, &uattr->batch.count))
1881 return -EFAULT;
1882
1883 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1884 if (!key)
1885 return -ENOMEM;
1886
1887 for (cp = 0; cp < max_count; cp++) {
1888 err = -EFAULT;
1889 if (copy_from_user(key, keys + cp * map->key_size,
1890 map->key_size))
1891 break;
1892
1893 if (bpf_map_is_offloaded(map)) {
1894 err = bpf_map_offload_delete_elem(map, key);
1895 break;
1896 }
1897
1898 bpf_disable_instrumentation();
1899 rcu_read_lock();
1900 err = map->ops->map_delete_elem(map, key);
1901 rcu_read_unlock();
1902 bpf_enable_instrumentation();
1903 if (err)
1904 break;
1905 cond_resched();
1906 }
1907 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1908 err = -EFAULT;
1909
1910 kvfree(key);
1911
1912 return err;
1913}
1914
1915int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
1916 const union bpf_attr *attr,
1917 union bpf_attr __user *uattr)
1918{
1919 void __user *values = u64_to_user_ptr(attr->batch.values);
1920 void __user *keys = u64_to_user_ptr(attr->batch.keys);
1921 u32 value_size, cp, max_count;
1922 void *key, *value;
1923 int err = 0;
1924
1925 if (attr->batch.elem_flags & ~BPF_F_LOCK)
1926 return -EINVAL;
1927
1928 if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1929 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1930 return -EINVAL;
1931 }
1932
1933 value_size = bpf_map_value_size(map);
1934
1935 max_count = attr->batch.count;
1936 if (!max_count)
1937 return 0;
1938
1939 if (put_user(0, &uattr->batch.count))
1940 return -EFAULT;
1941
1942 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1943 if (!key)
1944 return -ENOMEM;
1945
1946 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1947 if (!value) {
1948 kvfree(key);
1949 return -ENOMEM;
1950 }
1951
1952 for (cp = 0; cp < max_count; cp++) {
1953 err = -EFAULT;
1954 if (copy_from_user(key, keys + cp * map->key_size,
1955 map->key_size) ||
1956 copy_from_user(value, values + cp * value_size, value_size))
1957 break;
1958
1959 err = bpf_map_update_value(map, map_file, key, value,
1960 attr->batch.elem_flags);
1961
1962 if (err)
1963 break;
1964 cond_resched();
1965 }
1966
1967 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1968 err = -EFAULT;
1969
1970 kvfree(value);
1971 kvfree(key);
1972
1973 return err;
1974}
1975
1976int generic_map_lookup_batch(struct bpf_map *map,
1977 const union bpf_attr *attr,
1978 union bpf_attr __user *uattr)
1979{
1980 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1981 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1982 void __user *values = u64_to_user_ptr(attr->batch.values);
1983 void __user *keys = u64_to_user_ptr(attr->batch.keys);
1984 void *buf, *buf_prevkey, *prev_key, *key, *value;
1985 u32 value_size, cp, max_count;
1986 int err;
1987
1988 if (attr->batch.elem_flags & ~BPF_F_LOCK)
1989 return -EINVAL;
1990
1991 if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1992 !btf_record_has_field(map->record, BPF_SPIN_LOCK))
1993 return -EINVAL;
1994
1995 value_size = bpf_map_value_size(map);
1996
1997 max_count = attr->batch.count;
1998 if (!max_count)
1999 return 0;
2000
2001 if (put_user(0, &uattr->batch.count))
2002 return -EFAULT;
2003
2004 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
2005 if (!buf_prevkey)
2006 return -ENOMEM;
2007
2008 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
2009 if (!buf) {
2010 kvfree(buf_prevkey);
2011 return -ENOMEM;
2012 }
2013
2014 err = -EFAULT;
2015 prev_key = NULL;
2016 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
2017 goto free_buf;
2018 key = buf;
2019 value = key + map->key_size;
2020 if (ubatch)
2021 prev_key = buf_prevkey;
2022
2023 for (cp = 0; cp < max_count;) {
2024 rcu_read_lock();
2025 err = map->ops->map_get_next_key(map, prev_key, key);
2026 rcu_read_unlock();
2027 if (err)
2028 break;
2029 err = bpf_map_copy_value(map, key, value,
2030 attr->batch.elem_flags);
2031
2032 if (err == -ENOENT)
2033 goto next_key;
2034
2035 if (err)
2036 goto free_buf;
2037
2038 if (copy_to_user(keys + cp * map->key_size, key,
2039 map->key_size)) {
2040 err = -EFAULT;
2041 goto free_buf;
2042 }
2043 if (copy_to_user(values + cp * value_size, value, value_size)) {
2044 err = -EFAULT;
2045 goto free_buf;
2046 }
2047
2048 cp++;
2049next_key:
2050 if (!prev_key)
2051 prev_key = buf_prevkey;
2052
2053 swap(prev_key, key);
2054 cond_resched();
2055 }
2056
2057 if (err == -EFAULT)
2058 goto free_buf;
2059
2060 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
2061 (cp && copy_to_user(uobatch, prev_key, map->key_size))))
2062 err = -EFAULT;
2063
2064free_buf:
2065 kvfree(buf_prevkey);
2066 kvfree(buf);
2067 return err;
2068}
2069
2070#define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags
2071
2072static int map_lookup_and_delete_elem(union bpf_attr *attr)
2073{
2074 void __user *ukey = u64_to_user_ptr(attr->key);
2075 void __user *uvalue = u64_to_user_ptr(attr->value);
2076 struct bpf_map *map;
2077 void *key, *value;
2078 u32 value_size;
2079 int err;
2080
2081 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
2082 return -EINVAL;
2083
2084 if (attr->flags & ~BPF_F_LOCK)
2085 return -EINVAL;
2086
2087 CLASS(fd, f)(attr->map_fd);
2088 map = __bpf_map_get(f);
2089 if (IS_ERR(map))
2090 return PTR_ERR(map);
2091 bpf_map_write_active_inc(map);
2092 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
2093 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
2094 err = -EPERM;
2095 goto err_put;
2096 }
2097
2098 if (attr->flags &&
2099 (map->map_type == BPF_MAP_TYPE_QUEUE ||
2100 map->map_type == BPF_MAP_TYPE_STACK)) {
2101 err = -EINVAL;
2102 goto err_put;
2103 }
2104
2105 if ((attr->flags & BPF_F_LOCK) &&
2106 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
2107 err = -EINVAL;
2108 goto err_put;
2109 }
2110
2111 key = __bpf_copy_key(ukey, map->key_size);
2112 if (IS_ERR(key)) {
2113 err = PTR_ERR(key);
2114 goto err_put;
2115 }
2116
2117 value_size = bpf_map_value_size(map);
2118
2119 err = -ENOMEM;
2120 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
2121 if (!value)
2122 goto free_key;
2123
2124 err = -ENOTSUPP;
2125 if (map->map_type == BPF_MAP_TYPE_QUEUE ||
2126 map->map_type == BPF_MAP_TYPE_STACK) {
2127 err = map->ops->map_pop_elem(map, value);
2128 } else if (map->map_type == BPF_MAP_TYPE_HASH ||
2129 map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
2130 map->map_type == BPF_MAP_TYPE_LRU_HASH ||
2131 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
2132 if (!bpf_map_is_offloaded(map)) {
2133 bpf_disable_instrumentation();
2134 rcu_read_lock();
2135 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags);
2136 rcu_read_unlock();
2137 bpf_enable_instrumentation();
2138 }
2139 }
2140
2141 if (err)
2142 goto free_value;
2143
2144 if (copy_to_user(uvalue, value, value_size) != 0) {
2145 err = -EFAULT;
2146 goto free_value;
2147 }
2148
2149 err = 0;
2150
2151free_value:
2152 kvfree(value);
2153free_key:
2154 kvfree(key);
2155err_put:
2156 bpf_map_write_active_dec(map);
2157 return err;
2158}
2159
2160#define BPF_MAP_FREEZE_LAST_FIELD map_fd
2161
2162static int map_freeze(const union bpf_attr *attr)
2163{
2164 int err = 0;
2165 struct bpf_map *map;
2166
2167 if (CHECK_ATTR(BPF_MAP_FREEZE))
2168 return -EINVAL;
2169
2170 CLASS(fd, f)(attr->map_fd);
2171 map = __bpf_map_get(f);
2172 if (IS_ERR(map))
2173 return PTR_ERR(map);
2174
2175 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record))
2176 return -ENOTSUPP;
2177
2178 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE))
2179 return -EPERM;
2180
2181 mutex_lock(&map->freeze_mutex);
2182 if (bpf_map_write_active(map)) {
2183 err = -EBUSY;
2184 goto err_put;
2185 }
2186 if (READ_ONCE(map->frozen)) {
2187 err = -EBUSY;
2188 goto err_put;
2189 }
2190
2191 WRITE_ONCE(map->frozen, true);
2192err_put:
2193 mutex_unlock(&map->freeze_mutex);
2194 return err;
2195}
2196
2197static const struct bpf_prog_ops * const bpf_prog_types[] = {
2198#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
2199 [_id] = & _name ## _prog_ops,
2200#define BPF_MAP_TYPE(_id, _ops)
2201#define BPF_LINK_TYPE(_id, _name)
2202#include <linux/bpf_types.h>
2203#undef BPF_PROG_TYPE
2204#undef BPF_MAP_TYPE
2205#undef BPF_LINK_TYPE
2206};
2207
2208static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
2209{
2210 const struct bpf_prog_ops *ops;
2211
2212 if (type >= ARRAY_SIZE(bpf_prog_types))
2213 return -EINVAL;
2214 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
2215 ops = bpf_prog_types[type];
2216 if (!ops)
2217 return -EINVAL;
2218
2219 if (!bpf_prog_is_offloaded(prog->aux))
2220 prog->aux->ops = ops;
2221 else
2222 prog->aux->ops = &bpf_offload_prog_ops;
2223 prog->type = type;
2224 return 0;
2225}
2226
2227enum bpf_audit {
2228 BPF_AUDIT_LOAD,
2229 BPF_AUDIT_UNLOAD,
2230 BPF_AUDIT_MAX,
2231};
2232
2233static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
2234 [BPF_AUDIT_LOAD] = "LOAD",
2235 [BPF_AUDIT_UNLOAD] = "UNLOAD",
2236};
2237
2238static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
2239{
2240 struct audit_context *ctx = NULL;
2241 struct audit_buffer *ab;
2242
2243 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
2244 return;
2245 if (audit_enabled == AUDIT_OFF)
2246 return;
2247 if (!in_irq() && !irqs_disabled())
2248 ctx = audit_context();
2249 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
2250 if (unlikely(!ab))
2251 return;
2252 audit_log_format(ab, "prog-id=%u op=%s",
2253 prog->aux->id, bpf_audit_str[op]);
2254 audit_log_end(ab);
2255}
2256
2257static int bpf_prog_alloc_id(struct bpf_prog *prog)
2258{
2259 int id;
2260
2261 idr_preload(GFP_KERNEL);
2262 spin_lock_bh(&prog_idr_lock);
2263 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
2264 if (id > 0)
2265 prog->aux->id = id;
2266 spin_unlock_bh(&prog_idr_lock);
2267 idr_preload_end();
2268
2269 /* id is in [1, INT_MAX) */
2270 if (WARN_ON_ONCE(!id))
2271 return -ENOSPC;
2272
2273 return id > 0 ? 0 : id;
2274}
2275
2276void bpf_prog_free_id(struct bpf_prog *prog)
2277{
2278 unsigned long flags;
2279
2280 /* cBPF to eBPF migrations are currently not in the idr store.
2281 * Offloaded programs are removed from the store when their device
2282 * disappears - even if someone grabs an fd to them they are unusable,
2283 * simply waiting for refcnt to drop to be freed.
2284 */
2285 if (!prog->aux->id)
2286 return;
2287
2288 spin_lock_irqsave(&prog_idr_lock, flags);
2289 idr_remove(&prog_idr, prog->aux->id);
2290 prog->aux->id = 0;
2291 spin_unlock_irqrestore(&prog_idr_lock, flags);
2292}
2293
2294static void __bpf_prog_put_rcu(struct rcu_head *rcu)
2295{
2296 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
2297
2298 kvfree(aux->func_info);
2299 kfree(aux->func_info_aux);
2300 free_uid(aux->user);
2301 security_bpf_prog_free(aux->prog);
2302 bpf_prog_free(aux->prog);
2303}
2304
2305static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
2306{
2307 bpf_prog_kallsyms_del_all(prog);
2308 btf_put(prog->aux->btf);
2309 module_put(prog->aux->mod);
2310 kvfree(prog->aux->jited_linfo);
2311 kvfree(prog->aux->linfo);
2312 kfree(prog->aux->kfunc_tab);
2313 if (prog->aux->attach_btf)
2314 btf_put(prog->aux->attach_btf);
2315
2316 if (deferred) {
2317 if (prog->sleepable)
2318 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
2319 else
2320 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
2321 } else {
2322 __bpf_prog_put_rcu(&prog->aux->rcu);
2323 }
2324}
2325
2326static void bpf_prog_put_deferred(struct work_struct *work)
2327{
2328 struct bpf_prog_aux *aux;
2329 struct bpf_prog *prog;
2330
2331 aux = container_of(work, struct bpf_prog_aux, work);
2332 prog = aux->prog;
2333 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
2334 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
2335 bpf_prog_free_id(prog);
2336 __bpf_prog_put_noref(prog, true);
2337}
2338
2339static void __bpf_prog_put(struct bpf_prog *prog)
2340{
2341 struct bpf_prog_aux *aux = prog->aux;
2342
2343 if (atomic64_dec_and_test(&aux->refcnt)) {
2344 if (in_irq() || irqs_disabled()) {
2345 INIT_WORK(&aux->work, bpf_prog_put_deferred);
2346 schedule_work(&aux->work);
2347 } else {
2348 bpf_prog_put_deferred(&aux->work);
2349 }
2350 }
2351}
2352
2353void bpf_prog_put(struct bpf_prog *prog)
2354{
2355 __bpf_prog_put(prog);
2356}
2357EXPORT_SYMBOL_GPL(bpf_prog_put);
2358
2359static int bpf_prog_release(struct inode *inode, struct file *filp)
2360{
2361 struct bpf_prog *prog = filp->private_data;
2362
2363 bpf_prog_put(prog);
2364 return 0;
2365}
2366
2367struct bpf_prog_kstats {
2368 u64 nsecs;
2369 u64 cnt;
2370 u64 misses;
2371};
2372
2373void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog)
2374{
2375 struct bpf_prog_stats *stats;
2376 unsigned int flags;
2377
2378 stats = this_cpu_ptr(prog->stats);
2379 flags = u64_stats_update_begin_irqsave(&stats->syncp);
2380 u64_stats_inc(&stats->misses);
2381 u64_stats_update_end_irqrestore(&stats->syncp, flags);
2382}
2383
2384static void bpf_prog_get_stats(const struct bpf_prog *prog,
2385 struct bpf_prog_kstats *stats)
2386{
2387 u64 nsecs = 0, cnt = 0, misses = 0;
2388 int cpu;
2389
2390 for_each_possible_cpu(cpu) {
2391 const struct bpf_prog_stats *st;
2392 unsigned int start;
2393 u64 tnsecs, tcnt, tmisses;
2394
2395 st = per_cpu_ptr(prog->stats, cpu);
2396 do {
2397 start = u64_stats_fetch_begin(&st->syncp);
2398 tnsecs = u64_stats_read(&st->nsecs);
2399 tcnt = u64_stats_read(&st->cnt);
2400 tmisses = u64_stats_read(&st->misses);
2401 } while (u64_stats_fetch_retry(&st->syncp, start));
2402 nsecs += tnsecs;
2403 cnt += tcnt;
2404 misses += tmisses;
2405 }
2406 stats->nsecs = nsecs;
2407 stats->cnt = cnt;
2408 stats->misses = misses;
2409}
2410
2411#ifdef CONFIG_PROC_FS
2412static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
2413{
2414 const struct bpf_prog *prog = filp->private_data;
2415 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2416 struct bpf_prog_kstats stats;
2417
2418 bpf_prog_get_stats(prog, &stats);
2419 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2420 seq_printf(m,
2421 "prog_type:\t%u\n"
2422 "prog_jited:\t%u\n"
2423 "prog_tag:\t%s\n"
2424 "memlock:\t%llu\n"
2425 "prog_id:\t%u\n"
2426 "run_time_ns:\t%llu\n"
2427 "run_cnt:\t%llu\n"
2428 "recursion_misses:\t%llu\n"
2429 "verified_insns:\t%u\n",
2430 prog->type,
2431 prog->jited,
2432 prog_tag,
2433 prog->pages * 1ULL << PAGE_SHIFT,
2434 prog->aux->id,
2435 stats.nsecs,
2436 stats.cnt,
2437 stats.misses,
2438 prog->aux->verified_insns);
2439}
2440#endif
2441
2442const struct file_operations bpf_prog_fops = {
2443#ifdef CONFIG_PROC_FS
2444 .show_fdinfo = bpf_prog_show_fdinfo,
2445#endif
2446 .release = bpf_prog_release,
2447 .read = bpf_dummy_read,
2448 .write = bpf_dummy_write,
2449};
2450
2451int bpf_prog_new_fd(struct bpf_prog *prog)
2452{
2453 int ret;
2454
2455 ret = security_bpf_prog(prog);
2456 if (ret < 0)
2457 return ret;
2458
2459 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
2460 O_RDWR | O_CLOEXEC);
2461}
2462
2463void bpf_prog_add(struct bpf_prog *prog, int i)
2464{
2465 atomic64_add(i, &prog->aux->refcnt);
2466}
2467EXPORT_SYMBOL_GPL(bpf_prog_add);
2468
2469void bpf_prog_sub(struct bpf_prog *prog, int i)
2470{
2471 /* Only to be used for undoing previous bpf_prog_add() in some
2472 * error path. We still know that another entity in our call
2473 * path holds a reference to the program, thus atomic_sub() can
2474 * be safely used in such cases!
2475 */
2476 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
2477}
2478EXPORT_SYMBOL_GPL(bpf_prog_sub);
2479
2480void bpf_prog_inc(struct bpf_prog *prog)
2481{
2482 atomic64_inc(&prog->aux->refcnt);
2483}
2484EXPORT_SYMBOL_GPL(bpf_prog_inc);
2485
2486/* prog_idr_lock should have been held */
2487struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
2488{
2489 int refold;
2490
2491 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
2492
2493 if (!refold)
2494 return ERR_PTR(-ENOENT);
2495
2496 return prog;
2497}
2498EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
2499
2500bool bpf_prog_get_ok(struct bpf_prog *prog,
2501 enum bpf_prog_type *attach_type, bool attach_drv)
2502{
2503 /* not an attachment, just a refcount inc, always allow */
2504 if (!attach_type)
2505 return true;
2506
2507 if (prog->type != *attach_type)
2508 return false;
2509 if (bpf_prog_is_offloaded(prog->aux) && !attach_drv)
2510 return false;
2511
2512 return true;
2513}
2514
2515static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
2516 bool attach_drv)
2517{
2518 CLASS(fd, f)(ufd);
2519 struct bpf_prog *prog;
2520
2521 if (fd_empty(f))
2522 return ERR_PTR(-EBADF);
2523 if (fd_file(f)->f_op != &bpf_prog_fops)
2524 return ERR_PTR(-EINVAL);
2525
2526 prog = fd_file(f)->private_data;
2527 if (!bpf_prog_get_ok(prog, attach_type, attach_drv))
2528 return ERR_PTR(-EINVAL);
2529
2530 bpf_prog_inc(prog);
2531 return prog;
2532}
2533
2534struct bpf_prog *bpf_prog_get(u32 ufd)
2535{
2536 return __bpf_prog_get(ufd, NULL, false);
2537}
2538
2539struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
2540 bool attach_drv)
2541{
2542 return __bpf_prog_get(ufd, &type, attach_drv);
2543}
2544EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
2545
2546/* Initially all BPF programs could be loaded w/o specifying
2547 * expected_attach_type. Later for some of them specifying expected_attach_type
2548 * at load time became required so that program could be validated properly.
2549 * Programs of types that are allowed to be loaded both w/ and w/o (for
2550 * backward compatibility) expected_attach_type, should have the default attach
2551 * type assigned to expected_attach_type for the latter case, so that it can be
2552 * validated later at attach time.
2553 *
2554 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
2555 * prog type requires it but has some attach types that have to be backward
2556 * compatible.
2557 */
2558static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
2559{
2560 switch (attr->prog_type) {
2561 case BPF_PROG_TYPE_CGROUP_SOCK:
2562 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
2563 * exist so checking for non-zero is the way to go here.
2564 */
2565 if (!attr->expected_attach_type)
2566 attr->expected_attach_type =
2567 BPF_CGROUP_INET_SOCK_CREATE;
2568 break;
2569 case BPF_PROG_TYPE_SK_REUSEPORT:
2570 if (!attr->expected_attach_type)
2571 attr->expected_attach_type =
2572 BPF_SK_REUSEPORT_SELECT;
2573 break;
2574 }
2575}
2576
2577static int
2578bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
2579 enum bpf_attach_type expected_attach_type,
2580 struct btf *attach_btf, u32 btf_id,
2581 struct bpf_prog *dst_prog)
2582{
2583 if (btf_id) {
2584 if (btf_id > BTF_MAX_TYPE)
2585 return -EINVAL;
2586
2587 if (!attach_btf && !dst_prog)
2588 return -EINVAL;
2589
2590 switch (prog_type) {
2591 case BPF_PROG_TYPE_TRACING:
2592 case BPF_PROG_TYPE_LSM:
2593 case BPF_PROG_TYPE_STRUCT_OPS:
2594 case BPF_PROG_TYPE_EXT:
2595 break;
2596 default:
2597 return -EINVAL;
2598 }
2599 }
2600
2601 if (attach_btf && (!btf_id || dst_prog))
2602 return -EINVAL;
2603
2604 if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING &&
2605 prog_type != BPF_PROG_TYPE_EXT)
2606 return -EINVAL;
2607
2608 switch (prog_type) {
2609 case BPF_PROG_TYPE_CGROUP_SOCK:
2610 switch (expected_attach_type) {
2611 case BPF_CGROUP_INET_SOCK_CREATE:
2612 case BPF_CGROUP_INET_SOCK_RELEASE:
2613 case BPF_CGROUP_INET4_POST_BIND:
2614 case BPF_CGROUP_INET6_POST_BIND:
2615 return 0;
2616 default:
2617 return -EINVAL;
2618 }
2619 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2620 switch (expected_attach_type) {
2621 case BPF_CGROUP_INET4_BIND:
2622 case BPF_CGROUP_INET6_BIND:
2623 case BPF_CGROUP_INET4_CONNECT:
2624 case BPF_CGROUP_INET6_CONNECT:
2625 case BPF_CGROUP_UNIX_CONNECT:
2626 case BPF_CGROUP_INET4_GETPEERNAME:
2627 case BPF_CGROUP_INET6_GETPEERNAME:
2628 case BPF_CGROUP_UNIX_GETPEERNAME:
2629 case BPF_CGROUP_INET4_GETSOCKNAME:
2630 case BPF_CGROUP_INET6_GETSOCKNAME:
2631 case BPF_CGROUP_UNIX_GETSOCKNAME:
2632 case BPF_CGROUP_UDP4_SENDMSG:
2633 case BPF_CGROUP_UDP6_SENDMSG:
2634 case BPF_CGROUP_UNIX_SENDMSG:
2635 case BPF_CGROUP_UDP4_RECVMSG:
2636 case BPF_CGROUP_UDP6_RECVMSG:
2637 case BPF_CGROUP_UNIX_RECVMSG:
2638 return 0;
2639 default:
2640 return -EINVAL;
2641 }
2642 case BPF_PROG_TYPE_CGROUP_SKB:
2643 switch (expected_attach_type) {
2644 case BPF_CGROUP_INET_INGRESS:
2645 case BPF_CGROUP_INET_EGRESS:
2646 return 0;
2647 default:
2648 return -EINVAL;
2649 }
2650 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2651 switch (expected_attach_type) {
2652 case BPF_CGROUP_SETSOCKOPT:
2653 case BPF_CGROUP_GETSOCKOPT:
2654 return 0;
2655 default:
2656 return -EINVAL;
2657 }
2658 case BPF_PROG_TYPE_SK_LOOKUP:
2659 if (expected_attach_type == BPF_SK_LOOKUP)
2660 return 0;
2661 return -EINVAL;
2662 case BPF_PROG_TYPE_SK_REUSEPORT:
2663 switch (expected_attach_type) {
2664 case BPF_SK_REUSEPORT_SELECT:
2665 case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE:
2666 return 0;
2667 default:
2668 return -EINVAL;
2669 }
2670 case BPF_PROG_TYPE_NETFILTER:
2671 if (expected_attach_type == BPF_NETFILTER)
2672 return 0;
2673 return -EINVAL;
2674 case BPF_PROG_TYPE_SYSCALL:
2675 case BPF_PROG_TYPE_EXT:
2676 if (expected_attach_type)
2677 return -EINVAL;
2678 fallthrough;
2679 default:
2680 return 0;
2681 }
2682}
2683
2684static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2685{
2686 switch (prog_type) {
2687 case BPF_PROG_TYPE_SCHED_CLS:
2688 case BPF_PROG_TYPE_SCHED_ACT:
2689 case BPF_PROG_TYPE_XDP:
2690 case BPF_PROG_TYPE_LWT_IN:
2691 case BPF_PROG_TYPE_LWT_OUT:
2692 case BPF_PROG_TYPE_LWT_XMIT:
2693 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2694 case BPF_PROG_TYPE_SK_SKB:
2695 case BPF_PROG_TYPE_SK_MSG:
2696 case BPF_PROG_TYPE_FLOW_DISSECTOR:
2697 case BPF_PROG_TYPE_CGROUP_DEVICE:
2698 case BPF_PROG_TYPE_CGROUP_SOCK:
2699 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2700 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2701 case BPF_PROG_TYPE_CGROUP_SYSCTL:
2702 case BPF_PROG_TYPE_SOCK_OPS:
2703 case BPF_PROG_TYPE_EXT: /* extends any prog */
2704 case BPF_PROG_TYPE_NETFILTER:
2705 return true;
2706 case BPF_PROG_TYPE_CGROUP_SKB:
2707 /* always unpriv */
2708 case BPF_PROG_TYPE_SK_REUSEPORT:
2709 /* equivalent to SOCKET_FILTER. need CAP_BPF only */
2710 default:
2711 return false;
2712 }
2713}
2714
2715static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2716{
2717 switch (prog_type) {
2718 case BPF_PROG_TYPE_KPROBE:
2719 case BPF_PROG_TYPE_TRACEPOINT:
2720 case BPF_PROG_TYPE_PERF_EVENT:
2721 case BPF_PROG_TYPE_RAW_TRACEPOINT:
2722 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2723 case BPF_PROG_TYPE_TRACING:
2724 case BPF_PROG_TYPE_LSM:
2725 case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2726 case BPF_PROG_TYPE_EXT: /* extends any prog */
2727 return true;
2728 default:
2729 return false;
2730 }
2731}
2732
2733/* last field in 'union bpf_attr' used by this command */
2734#define BPF_PROG_LOAD_LAST_FIELD prog_token_fd
2735
2736static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
2737{
2738 enum bpf_prog_type type = attr->prog_type;
2739 struct bpf_prog *prog, *dst_prog = NULL;
2740 struct btf *attach_btf = NULL;
2741 struct bpf_token *token = NULL;
2742 bool bpf_cap;
2743 int err;
2744 char license[128];
2745
2746 if (CHECK_ATTR(BPF_PROG_LOAD))
2747 return -EINVAL;
2748
2749 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2750 BPF_F_ANY_ALIGNMENT |
2751 BPF_F_TEST_STATE_FREQ |
2752 BPF_F_SLEEPABLE |
2753 BPF_F_TEST_RND_HI32 |
2754 BPF_F_XDP_HAS_FRAGS |
2755 BPF_F_XDP_DEV_BOUND_ONLY |
2756 BPF_F_TEST_REG_INVARIANTS |
2757 BPF_F_TOKEN_FD))
2758 return -EINVAL;
2759
2760 bpf_prog_load_fixup_attach_type(attr);
2761
2762 if (attr->prog_flags & BPF_F_TOKEN_FD) {
2763 token = bpf_token_get_from_fd(attr->prog_token_fd);
2764 if (IS_ERR(token))
2765 return PTR_ERR(token);
2766 /* if current token doesn't grant prog loading permissions,
2767 * then we can't use this token, so ignore it and rely on
2768 * system-wide capabilities checks
2769 */
2770 if (!bpf_token_allow_cmd(token, BPF_PROG_LOAD) ||
2771 !bpf_token_allow_prog_type(token, attr->prog_type,
2772 attr->expected_attach_type)) {
2773 bpf_token_put(token);
2774 token = NULL;
2775 }
2776 }
2777
2778 bpf_cap = bpf_token_capable(token, CAP_BPF);
2779 err = -EPERM;
2780
2781 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2782 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2783 !bpf_cap)
2784 goto put_token;
2785
2786 /* Intent here is for unprivileged_bpf_disabled to block BPF program
2787 * creation for unprivileged users; other actions depend
2788 * on fd availability and access to bpffs, so are dependent on
2789 * object creation success. Even with unprivileged BPF disabled,
2790 * capability checks are still carried out for these
2791 * and other operations.
2792 */
2793 if (sysctl_unprivileged_bpf_disabled && !bpf_cap)
2794 goto put_token;
2795
2796 if (attr->insn_cnt == 0 ||
2797 attr->insn_cnt > (bpf_cap ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) {
2798 err = -E2BIG;
2799 goto put_token;
2800 }
2801 if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2802 type != BPF_PROG_TYPE_CGROUP_SKB &&
2803 !bpf_cap)
2804 goto put_token;
2805
2806 if (is_net_admin_prog_type(type) && !bpf_token_capable(token, CAP_NET_ADMIN))
2807 goto put_token;
2808 if (is_perfmon_prog_type(type) && !bpf_token_capable(token, CAP_PERFMON))
2809 goto put_token;
2810
2811 /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
2812 * or btf, we need to check which one it is
2813 */
2814 if (attr->attach_prog_fd) {
2815 dst_prog = bpf_prog_get(attr->attach_prog_fd);
2816 if (IS_ERR(dst_prog)) {
2817 dst_prog = NULL;
2818 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
2819 if (IS_ERR(attach_btf)) {
2820 err = -EINVAL;
2821 goto put_token;
2822 }
2823 if (!btf_is_kernel(attach_btf)) {
2824 /* attaching through specifying bpf_prog's BTF
2825 * objects directly might be supported eventually
2826 */
2827 btf_put(attach_btf);
2828 err = -ENOTSUPP;
2829 goto put_token;
2830 }
2831 }
2832 } else if (attr->attach_btf_id) {
2833 /* fall back to vmlinux BTF, if BTF type ID is specified */
2834 attach_btf = bpf_get_btf_vmlinux();
2835 if (IS_ERR(attach_btf)) {
2836 err = PTR_ERR(attach_btf);
2837 goto put_token;
2838 }
2839 if (!attach_btf) {
2840 err = -EINVAL;
2841 goto put_token;
2842 }
2843 btf_get(attach_btf);
2844 }
2845
2846 if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2847 attach_btf, attr->attach_btf_id,
2848 dst_prog)) {
2849 if (dst_prog)
2850 bpf_prog_put(dst_prog);
2851 if (attach_btf)
2852 btf_put(attach_btf);
2853 err = -EINVAL;
2854 goto put_token;
2855 }
2856
2857 /* plain bpf_prog allocation */
2858 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2859 if (!prog) {
2860 if (dst_prog)
2861 bpf_prog_put(dst_prog);
2862 if (attach_btf)
2863 btf_put(attach_btf);
2864 err = -EINVAL;
2865 goto put_token;
2866 }
2867
2868 prog->expected_attach_type = attr->expected_attach_type;
2869 prog->sleepable = !!(attr->prog_flags & BPF_F_SLEEPABLE);
2870 prog->aux->attach_btf = attach_btf;
2871 prog->aux->attach_btf_id = attr->attach_btf_id;
2872 prog->aux->dst_prog = dst_prog;
2873 prog->aux->dev_bound = !!attr->prog_ifindex;
2874 prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS;
2875
2876 /* move token into prog->aux, reuse taken refcnt */
2877 prog->aux->token = token;
2878 token = NULL;
2879
2880 prog->aux->user = get_current_user();
2881 prog->len = attr->insn_cnt;
2882
2883 err = -EFAULT;
2884 if (copy_from_bpfptr(prog->insns,
2885 make_bpfptr(attr->insns, uattr.is_kernel),
2886 bpf_prog_insn_size(prog)) != 0)
2887 goto free_prog;
2888 /* copy eBPF program license from user space */
2889 if (strncpy_from_bpfptr(license,
2890 make_bpfptr(attr->license, uattr.is_kernel),
2891 sizeof(license) - 1) < 0)
2892 goto free_prog;
2893 license[sizeof(license) - 1] = 0;
2894
2895 /* eBPF programs must be GPL compatible to use GPL-ed functions */
2896 prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0;
2897
2898 prog->orig_prog = NULL;
2899 prog->jited = 0;
2900
2901 atomic64_set(&prog->aux->refcnt, 1);
2902
2903 if (bpf_prog_is_dev_bound(prog->aux)) {
2904 err = bpf_prog_dev_bound_init(prog, attr);
2905 if (err)
2906 goto free_prog;
2907 }
2908
2909 if (type == BPF_PROG_TYPE_EXT && dst_prog &&
2910 bpf_prog_is_dev_bound(dst_prog->aux)) {
2911 err = bpf_prog_dev_bound_inherit(prog, dst_prog);
2912 if (err)
2913 goto free_prog;
2914 }
2915
2916 /*
2917 * Bookkeeping for managing the program attachment chain.
2918 *
2919 * It might be tempting to set attach_tracing_prog flag at the attachment
2920 * time, but this will not prevent from loading bunch of tracing prog
2921 * first, then attach them one to another.
2922 *
2923 * The flag attach_tracing_prog is set for the whole program lifecycle, and
2924 * doesn't have to be cleared in bpf_tracing_link_release, since tracing
2925 * programs cannot change attachment target.
2926 */
2927 if (type == BPF_PROG_TYPE_TRACING && dst_prog &&
2928 dst_prog->type == BPF_PROG_TYPE_TRACING) {
2929 prog->aux->attach_tracing_prog = true;
2930 }
2931
2932 /* find program type: socket_filter vs tracing_filter */
2933 err = find_prog_type(type, prog);
2934 if (err < 0)
2935 goto free_prog;
2936
2937 prog->aux->load_time = ktime_get_boottime_ns();
2938 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2939 sizeof(attr->prog_name));
2940 if (err < 0)
2941 goto free_prog;
2942
2943 err = security_bpf_prog_load(prog, attr, token);
2944 if (err)
2945 goto free_prog_sec;
2946
2947 /* run eBPF verifier */
2948 err = bpf_check(&prog, attr, uattr, uattr_size);
2949 if (err < 0)
2950 goto free_used_maps;
2951
2952 prog = bpf_prog_select_runtime(prog, &err);
2953 if (err < 0)
2954 goto free_used_maps;
2955
2956 err = bpf_prog_alloc_id(prog);
2957 if (err)
2958 goto free_used_maps;
2959
2960 /* Upon success of bpf_prog_alloc_id(), the BPF prog is
2961 * effectively publicly exposed. However, retrieving via
2962 * bpf_prog_get_fd_by_id() will take another reference,
2963 * therefore it cannot be gone underneath us.
2964 *
2965 * Only for the time /after/ successful bpf_prog_new_fd()
2966 * and before returning to userspace, we might just hold
2967 * one reference and any parallel close on that fd could
2968 * rip everything out. Hence, below notifications must
2969 * happen before bpf_prog_new_fd().
2970 *
2971 * Also, any failure handling from this point onwards must
2972 * be using bpf_prog_put() given the program is exposed.
2973 */
2974 bpf_prog_kallsyms_add(prog);
2975 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2976 bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2977
2978 err = bpf_prog_new_fd(prog);
2979 if (err < 0)
2980 bpf_prog_put(prog);
2981 return err;
2982
2983free_used_maps:
2984 /* In case we have subprogs, we need to wait for a grace
2985 * period before we can tear down JIT memory since symbols
2986 * are already exposed under kallsyms.
2987 */
2988 __bpf_prog_put_noref(prog, prog->aux->real_func_cnt);
2989 return err;
2990
2991free_prog_sec:
2992 security_bpf_prog_free(prog);
2993free_prog:
2994 free_uid(prog->aux->user);
2995 if (prog->aux->attach_btf)
2996 btf_put(prog->aux->attach_btf);
2997 bpf_prog_free(prog);
2998put_token:
2999 bpf_token_put(token);
3000 return err;
3001}
3002
3003#define BPF_OBJ_LAST_FIELD path_fd
3004
3005static int bpf_obj_pin(const union bpf_attr *attr)
3006{
3007 int path_fd;
3008
3009 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD)
3010 return -EINVAL;
3011
3012 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */
3013 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
3014 return -EINVAL;
3015
3016 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
3017 return bpf_obj_pin_user(attr->bpf_fd, path_fd,
3018 u64_to_user_ptr(attr->pathname));
3019}
3020
3021static int bpf_obj_get(const union bpf_attr *attr)
3022{
3023 int path_fd;
3024
3025 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
3026 attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD))
3027 return -EINVAL;
3028
3029 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */
3030 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
3031 return -EINVAL;
3032
3033 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
3034 return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname),
3035 attr->file_flags);
3036}
3037
3038/* bpf_link_init_sleepable() allows to specify whether BPF link itself has
3039 * "sleepable" semantics, which normally would mean that BPF link's attach
3040 * hook can dereference link or link's underlying program for some time after
3041 * detachment due to RCU Tasks Trace-based lifetime protection scheme.
3042 * BPF program itself can be non-sleepable, yet, because it's transitively
3043 * reachable through BPF link, its freeing has to be delayed until after RCU
3044 * Tasks Trace GP.
3045 */
3046void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type,
3047 const struct bpf_link_ops *ops, struct bpf_prog *prog,
3048 bool sleepable)
3049{
3050 WARN_ON(ops->dealloc && ops->dealloc_deferred);
3051 atomic64_set(&link->refcnt, 1);
3052 link->type = type;
3053 link->sleepable = sleepable;
3054 link->id = 0;
3055 link->ops = ops;
3056 link->prog = prog;
3057}
3058
3059void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
3060 const struct bpf_link_ops *ops, struct bpf_prog *prog)
3061{
3062 bpf_link_init_sleepable(link, type, ops, prog, false);
3063}
3064
3065static void bpf_link_free_id(int id)
3066{
3067 if (!id)
3068 return;
3069
3070 spin_lock_bh(&link_idr_lock);
3071 idr_remove(&link_idr, id);
3072 spin_unlock_bh(&link_idr_lock);
3073}
3074
3075/* Clean up bpf_link and corresponding anon_inode file and FD. After
3076 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
3077 * anon_inode's release() call. This helper marks bpf_link as
3078 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
3079 * is not decremented, it's the responsibility of a calling code that failed
3080 * to complete bpf_link initialization.
3081 * This helper eventually calls link's dealloc callback, but does not call
3082 * link's release callback.
3083 */
3084void bpf_link_cleanup(struct bpf_link_primer *primer)
3085{
3086 primer->link->prog = NULL;
3087 bpf_link_free_id(primer->id);
3088 fput(primer->file);
3089 put_unused_fd(primer->fd);
3090}
3091
3092void bpf_link_inc(struct bpf_link *link)
3093{
3094 atomic64_inc(&link->refcnt);
3095}
3096
3097static void bpf_link_dealloc(struct bpf_link *link)
3098{
3099 /* now that we know that bpf_link itself can't be reached, put underlying BPF program */
3100 if (link->prog)
3101 bpf_prog_put(link->prog);
3102
3103 /* free bpf_link and its containing memory */
3104 if (link->ops->dealloc_deferred)
3105 link->ops->dealloc_deferred(link);
3106 else
3107 link->ops->dealloc(link);
3108}
3109
3110static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu)
3111{
3112 struct bpf_link *link = container_of(rcu, struct bpf_link, rcu);
3113
3114 bpf_link_dealloc(link);
3115}
3116
3117static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
3118{
3119 if (rcu_trace_implies_rcu_gp())
3120 bpf_link_defer_dealloc_rcu_gp(rcu);
3121 else
3122 call_rcu(rcu, bpf_link_defer_dealloc_rcu_gp);
3123}
3124
3125/* bpf_link_free is guaranteed to be called from process context */
3126static void bpf_link_free(struct bpf_link *link)
3127{
3128 const struct bpf_link_ops *ops = link->ops;
3129
3130 bpf_link_free_id(link->id);
3131 /* detach BPF program, clean up used resources */
3132 if (link->prog)
3133 ops->release(link);
3134 if (ops->dealloc_deferred) {
3135 /* Schedule BPF link deallocation, which will only then
3136 * trigger putting BPF program refcount.
3137 * If underlying BPF program is sleepable or BPF link's target
3138 * attach hookpoint is sleepable or otherwise requires RCU GPs
3139 * to ensure link and its underlying BPF program is not
3140 * reachable anymore, we need to first wait for RCU tasks
3141 * trace sync, and then go through "classic" RCU grace period
3142 */
3143 if (link->sleepable || (link->prog && link->prog->sleepable))
3144 call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
3145 else
3146 call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
3147 } else if (ops->dealloc) {
3148 bpf_link_dealloc(link);
3149 }
3150}
3151
3152static void bpf_link_put_deferred(struct work_struct *work)
3153{
3154 struct bpf_link *link = container_of(work, struct bpf_link, work);
3155
3156 bpf_link_free(link);
3157}
3158
3159/* bpf_link_put might be called from atomic context. It needs to be called
3160 * from sleepable context in order to acquire sleeping locks during the process.
3161 */
3162void bpf_link_put(struct bpf_link *link)
3163{
3164 if (!atomic64_dec_and_test(&link->refcnt))
3165 return;
3166
3167 INIT_WORK(&link->work, bpf_link_put_deferred);
3168 schedule_work(&link->work);
3169}
3170EXPORT_SYMBOL(bpf_link_put);
3171
3172static void bpf_link_put_direct(struct bpf_link *link)
3173{
3174 if (!atomic64_dec_and_test(&link->refcnt))
3175 return;
3176 bpf_link_free(link);
3177}
3178
3179static int bpf_link_release(struct inode *inode, struct file *filp)
3180{
3181 struct bpf_link *link = filp->private_data;
3182
3183 bpf_link_put_direct(link);
3184 return 0;
3185}
3186
3187#ifdef CONFIG_PROC_FS
3188#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
3189#define BPF_MAP_TYPE(_id, _ops)
3190#define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
3191static const char *bpf_link_type_strs[] = {
3192 [BPF_LINK_TYPE_UNSPEC] = "<invalid>",
3193#include <linux/bpf_types.h>
3194};
3195#undef BPF_PROG_TYPE
3196#undef BPF_MAP_TYPE
3197#undef BPF_LINK_TYPE
3198
3199static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
3200{
3201 const struct bpf_link *link = filp->private_data;
3202 const struct bpf_prog *prog = link->prog;
3203 enum bpf_link_type type = link->type;
3204 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
3205
3206 if (type < ARRAY_SIZE(bpf_link_type_strs) && bpf_link_type_strs[type]) {
3207 seq_printf(m, "link_type:\t%s\n", bpf_link_type_strs[type]);
3208 } else {
3209 WARN_ONCE(1, "missing BPF_LINK_TYPE(...) for link type %u\n", type);
3210 seq_printf(m, "link_type:\t<%u>\n", type);
3211 }
3212 seq_printf(m, "link_id:\t%u\n", link->id);
3213
3214 if (prog) {
3215 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
3216 seq_printf(m,
3217 "prog_tag:\t%s\n"
3218 "prog_id:\t%u\n",
3219 prog_tag,
3220 prog->aux->id);
3221 }
3222 if (link->ops->show_fdinfo)
3223 link->ops->show_fdinfo(link, m);
3224}
3225#endif
3226
3227static __poll_t bpf_link_poll(struct file *file, struct poll_table_struct *pts)
3228{
3229 struct bpf_link *link = file->private_data;
3230
3231 return link->ops->poll(file, pts);
3232}
3233
3234static const struct file_operations bpf_link_fops = {
3235#ifdef CONFIG_PROC_FS
3236 .show_fdinfo = bpf_link_show_fdinfo,
3237#endif
3238 .release = bpf_link_release,
3239 .read = bpf_dummy_read,
3240 .write = bpf_dummy_write,
3241};
3242
3243static const struct file_operations bpf_link_fops_poll = {
3244#ifdef CONFIG_PROC_FS
3245 .show_fdinfo = bpf_link_show_fdinfo,
3246#endif
3247 .release = bpf_link_release,
3248 .read = bpf_dummy_read,
3249 .write = bpf_dummy_write,
3250 .poll = bpf_link_poll,
3251};
3252
3253static int bpf_link_alloc_id(struct bpf_link *link)
3254{
3255 int id;
3256
3257 idr_preload(GFP_KERNEL);
3258 spin_lock_bh(&link_idr_lock);
3259 id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
3260 spin_unlock_bh(&link_idr_lock);
3261 idr_preload_end();
3262
3263 return id;
3264}
3265
3266/* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
3267 * reserving unused FD and allocating ID from link_idr. This is to be paired
3268 * with bpf_link_settle() to install FD and ID and expose bpf_link to
3269 * user-space, if bpf_link is successfully attached. If not, bpf_link and
3270 * pre-allocated resources are to be freed with bpf_cleanup() call. All the
3271 * transient state is passed around in struct bpf_link_primer.
3272 * This is preferred way to create and initialize bpf_link, especially when
3273 * there are complicated and expensive operations in between creating bpf_link
3274 * itself and attaching it to BPF hook. By using bpf_link_prime() and
3275 * bpf_link_settle() kernel code using bpf_link doesn't have to perform
3276 * expensive (and potentially failing) roll back operations in a rare case
3277 * that file, FD, or ID can't be allocated.
3278 */
3279int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
3280{
3281 struct file *file;
3282 int fd, id;
3283
3284 fd = get_unused_fd_flags(O_CLOEXEC);
3285 if (fd < 0)
3286 return fd;
3287
3288
3289 id = bpf_link_alloc_id(link);
3290 if (id < 0) {
3291 put_unused_fd(fd);
3292 return id;
3293 }
3294
3295 file = anon_inode_getfile("bpf_link",
3296 link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops,
3297 link, O_CLOEXEC);
3298 if (IS_ERR(file)) {
3299 bpf_link_free_id(id);
3300 put_unused_fd(fd);
3301 return PTR_ERR(file);
3302 }
3303
3304 primer->link = link;
3305 primer->file = file;
3306 primer->fd = fd;
3307 primer->id = id;
3308 return 0;
3309}
3310
3311int bpf_link_settle(struct bpf_link_primer *primer)
3312{
3313 /* make bpf_link fetchable by ID */
3314 spin_lock_bh(&link_idr_lock);
3315 primer->link->id = primer->id;
3316 spin_unlock_bh(&link_idr_lock);
3317 /* make bpf_link fetchable by FD */
3318 fd_install(primer->fd, primer->file);
3319 /* pass through installed FD */
3320 return primer->fd;
3321}
3322
3323int bpf_link_new_fd(struct bpf_link *link)
3324{
3325 return anon_inode_getfd("bpf-link",
3326 link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops,
3327 link, O_CLOEXEC);
3328}
3329
3330struct bpf_link *bpf_link_get_from_fd(u32 ufd)
3331{
3332 CLASS(fd, f)(ufd);
3333 struct bpf_link *link;
3334
3335 if (fd_empty(f))
3336 return ERR_PTR(-EBADF);
3337 if (fd_file(f)->f_op != &bpf_link_fops && fd_file(f)->f_op != &bpf_link_fops_poll)
3338 return ERR_PTR(-EINVAL);
3339
3340 link = fd_file(f)->private_data;
3341 bpf_link_inc(link);
3342 return link;
3343}
3344EXPORT_SYMBOL(bpf_link_get_from_fd);
3345
3346static void bpf_tracing_link_release(struct bpf_link *link)
3347{
3348 struct bpf_tracing_link *tr_link =
3349 container_of(link, struct bpf_tracing_link, link.link);
3350
3351 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link,
3352 tr_link->trampoline,
3353 tr_link->tgt_prog));
3354
3355 bpf_trampoline_put(tr_link->trampoline);
3356
3357 /* tgt_prog is NULL if target is a kernel function */
3358 if (tr_link->tgt_prog)
3359 bpf_prog_put(tr_link->tgt_prog);
3360}
3361
3362static void bpf_tracing_link_dealloc(struct bpf_link *link)
3363{
3364 struct bpf_tracing_link *tr_link =
3365 container_of(link, struct bpf_tracing_link, link.link);
3366
3367 kfree(tr_link);
3368}
3369
3370static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
3371 struct seq_file *seq)
3372{
3373 struct bpf_tracing_link *tr_link =
3374 container_of(link, struct bpf_tracing_link, link.link);
3375 u32 target_btf_id, target_obj_id;
3376
3377 bpf_trampoline_unpack_key(tr_link->trampoline->key,
3378 &target_obj_id, &target_btf_id);
3379 seq_printf(seq,
3380 "attach_type:\t%d\n"
3381 "target_obj_id:\t%u\n"
3382 "target_btf_id:\t%u\n",
3383 tr_link->attach_type,
3384 target_obj_id,
3385 target_btf_id);
3386}
3387
3388static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
3389 struct bpf_link_info *info)
3390{
3391 struct bpf_tracing_link *tr_link =
3392 container_of(link, struct bpf_tracing_link, link.link);
3393
3394 info->tracing.attach_type = tr_link->attach_type;
3395 bpf_trampoline_unpack_key(tr_link->trampoline->key,
3396 &info->tracing.target_obj_id,
3397 &info->tracing.target_btf_id);
3398
3399 return 0;
3400}
3401
3402static const struct bpf_link_ops bpf_tracing_link_lops = {
3403 .release = bpf_tracing_link_release,
3404 .dealloc = bpf_tracing_link_dealloc,
3405 .show_fdinfo = bpf_tracing_link_show_fdinfo,
3406 .fill_link_info = bpf_tracing_link_fill_link_info,
3407};
3408
3409static int bpf_tracing_prog_attach(struct bpf_prog *prog,
3410 int tgt_prog_fd,
3411 u32 btf_id,
3412 u64 bpf_cookie)
3413{
3414 struct bpf_link_primer link_primer;
3415 struct bpf_prog *tgt_prog = NULL;
3416 struct bpf_trampoline *tr = NULL;
3417 struct bpf_tracing_link *link;
3418 u64 key = 0;
3419 int err;
3420
3421 switch (prog->type) {
3422 case BPF_PROG_TYPE_TRACING:
3423 if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
3424 prog->expected_attach_type != BPF_TRACE_FEXIT &&
3425 prog->expected_attach_type != BPF_MODIFY_RETURN) {
3426 err = -EINVAL;
3427 goto out_put_prog;
3428 }
3429 break;
3430 case BPF_PROG_TYPE_EXT:
3431 if (prog->expected_attach_type != 0) {
3432 err = -EINVAL;
3433 goto out_put_prog;
3434 }
3435 break;
3436 case BPF_PROG_TYPE_LSM:
3437 if (prog->expected_attach_type != BPF_LSM_MAC) {
3438 err = -EINVAL;
3439 goto out_put_prog;
3440 }
3441 break;
3442 default:
3443 err = -EINVAL;
3444 goto out_put_prog;
3445 }
3446
3447 if (!!tgt_prog_fd != !!btf_id) {
3448 err = -EINVAL;
3449 goto out_put_prog;
3450 }
3451
3452 if (tgt_prog_fd) {
3453 /*
3454 * For now we only allow new targets for BPF_PROG_TYPE_EXT. If this
3455 * part would be changed to implement the same for
3456 * BPF_PROG_TYPE_TRACING, do not forget to update the way how
3457 * attach_tracing_prog flag is set.
3458 */
3459 if (prog->type != BPF_PROG_TYPE_EXT) {
3460 err = -EINVAL;
3461 goto out_put_prog;
3462 }
3463
3464 tgt_prog = bpf_prog_get(tgt_prog_fd);
3465 if (IS_ERR(tgt_prog)) {
3466 err = PTR_ERR(tgt_prog);
3467 tgt_prog = NULL;
3468 goto out_put_prog;
3469 }
3470
3471 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id);
3472 }
3473
3474 link = kzalloc(sizeof(*link), GFP_USER);
3475 if (!link) {
3476 err = -ENOMEM;
3477 goto out_put_prog;
3478 }
3479 bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING,
3480 &bpf_tracing_link_lops, prog);
3481 link->attach_type = prog->expected_attach_type;
3482 link->link.cookie = bpf_cookie;
3483
3484 mutex_lock(&prog->aux->dst_mutex);
3485
3486 /* There are a few possible cases here:
3487 *
3488 * - if prog->aux->dst_trampoline is set, the program was just loaded
3489 * and not yet attached to anything, so we can use the values stored
3490 * in prog->aux
3491 *
3492 * - if prog->aux->dst_trampoline is NULL, the program has already been
3493 * attached to a target and its initial target was cleared (below)
3494 *
3495 * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
3496 * target_btf_id using the link_create API.
3497 *
3498 * - if tgt_prog == NULL when this function was called using the old
3499 * raw_tracepoint_open API, and we need a target from prog->aux
3500 *
3501 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
3502 * was detached and is going for re-attachment.
3503 *
3504 * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf
3505 * are NULL, then program was already attached and user did not provide
3506 * tgt_prog_fd so we have no way to find out or create trampoline
3507 */
3508 if (!prog->aux->dst_trampoline && !tgt_prog) {
3509 /*
3510 * Allow re-attach for TRACING and LSM programs. If it's
3511 * currently linked, bpf_trampoline_link_prog will fail.
3512 * EXT programs need to specify tgt_prog_fd, so they
3513 * re-attach in separate code path.
3514 */
3515 if (prog->type != BPF_PROG_TYPE_TRACING &&
3516 prog->type != BPF_PROG_TYPE_LSM) {
3517 err = -EINVAL;
3518 goto out_unlock;
3519 }
3520 /* We can allow re-attach only if we have valid attach_btf. */
3521 if (!prog->aux->attach_btf) {
3522 err = -EINVAL;
3523 goto out_unlock;
3524 }
3525 btf_id = prog->aux->attach_btf_id;
3526 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
3527 }
3528
3529 if (!prog->aux->dst_trampoline ||
3530 (key && key != prog->aux->dst_trampoline->key)) {
3531 /* If there is no saved target, or the specified target is
3532 * different from the destination specified at load time, we
3533 * need a new trampoline and a check for compatibility
3534 */
3535 struct bpf_attach_target_info tgt_info = {};
3536
3537 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
3538 &tgt_info);
3539 if (err)
3540 goto out_unlock;
3541
3542 if (tgt_info.tgt_mod) {
3543 module_put(prog->aux->mod);
3544 prog->aux->mod = tgt_info.tgt_mod;
3545 }
3546
3547 tr = bpf_trampoline_get(key, &tgt_info);
3548 if (!tr) {
3549 err = -ENOMEM;
3550 goto out_unlock;
3551 }
3552 } else {
3553 /* The caller didn't specify a target, or the target was the
3554 * same as the destination supplied during program load. This
3555 * means we can reuse the trampoline and reference from program
3556 * load time, and there is no need to allocate a new one. This
3557 * can only happen once for any program, as the saved values in
3558 * prog->aux are cleared below.
3559 */
3560 tr = prog->aux->dst_trampoline;
3561 tgt_prog = prog->aux->dst_prog;
3562 }
3563
3564 err = bpf_link_prime(&link->link.link, &link_primer);
3565 if (err)
3566 goto out_unlock;
3567
3568 err = bpf_trampoline_link_prog(&link->link, tr, tgt_prog);
3569 if (err) {
3570 bpf_link_cleanup(&link_primer);
3571 link = NULL;
3572 goto out_unlock;
3573 }
3574
3575 link->tgt_prog = tgt_prog;
3576 link->trampoline = tr;
3577
3578 /* Always clear the trampoline and target prog from prog->aux to make
3579 * sure the original attach destination is not kept alive after a
3580 * program is (re-)attached to another target.
3581 */
3582 if (prog->aux->dst_prog &&
3583 (tgt_prog_fd || tr != prog->aux->dst_trampoline))
3584 /* got extra prog ref from syscall, or attaching to different prog */
3585 bpf_prog_put(prog->aux->dst_prog);
3586 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
3587 /* we allocated a new trampoline, so free the old one */
3588 bpf_trampoline_put(prog->aux->dst_trampoline);
3589
3590 prog->aux->dst_prog = NULL;
3591 prog->aux->dst_trampoline = NULL;
3592 mutex_unlock(&prog->aux->dst_mutex);
3593
3594 return bpf_link_settle(&link_primer);
3595out_unlock:
3596 if (tr && tr != prog->aux->dst_trampoline)
3597 bpf_trampoline_put(tr);
3598 mutex_unlock(&prog->aux->dst_mutex);
3599 kfree(link);
3600out_put_prog:
3601 if (tgt_prog_fd && tgt_prog)
3602 bpf_prog_put(tgt_prog);
3603 return err;
3604}
3605
3606static void bpf_raw_tp_link_release(struct bpf_link *link)
3607{
3608 struct bpf_raw_tp_link *raw_tp =
3609 container_of(link, struct bpf_raw_tp_link, link);
3610
3611 bpf_probe_unregister(raw_tp->btp, raw_tp);
3612 bpf_put_raw_tracepoint(raw_tp->btp);
3613}
3614
3615static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
3616{
3617 struct bpf_raw_tp_link *raw_tp =
3618 container_of(link, struct bpf_raw_tp_link, link);
3619
3620 kfree(raw_tp);
3621}
3622
3623static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
3624 struct seq_file *seq)
3625{
3626 struct bpf_raw_tp_link *raw_tp_link =
3627 container_of(link, struct bpf_raw_tp_link, link);
3628
3629 seq_printf(seq,
3630 "tp_name:\t%s\n",
3631 raw_tp_link->btp->tp->name);
3632}
3633
3634static int bpf_copy_to_user(char __user *ubuf, const char *buf, u32 ulen,
3635 u32 len)
3636{
3637 if (ulen >= len + 1) {
3638 if (copy_to_user(ubuf, buf, len + 1))
3639 return -EFAULT;
3640 } else {
3641 char zero = '\0';
3642
3643 if (copy_to_user(ubuf, buf, ulen - 1))
3644 return -EFAULT;
3645 if (put_user(zero, ubuf + ulen - 1))
3646 return -EFAULT;
3647 return -ENOSPC;
3648 }
3649
3650 return 0;
3651}
3652
3653static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
3654 struct bpf_link_info *info)
3655{
3656 struct bpf_raw_tp_link *raw_tp_link =
3657 container_of(link, struct bpf_raw_tp_link, link);
3658 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
3659 const char *tp_name = raw_tp_link->btp->tp->name;
3660 u32 ulen = info->raw_tracepoint.tp_name_len;
3661 size_t tp_len = strlen(tp_name);
3662
3663 if (!ulen ^ !ubuf)
3664 return -EINVAL;
3665
3666 info->raw_tracepoint.tp_name_len = tp_len + 1;
3667
3668 if (!ubuf)
3669 return 0;
3670
3671 return bpf_copy_to_user(ubuf, tp_name, ulen, tp_len);
3672}
3673
3674static const struct bpf_link_ops bpf_raw_tp_link_lops = {
3675 .release = bpf_raw_tp_link_release,
3676 .dealloc_deferred = bpf_raw_tp_link_dealloc,
3677 .show_fdinfo = bpf_raw_tp_link_show_fdinfo,
3678 .fill_link_info = bpf_raw_tp_link_fill_link_info,
3679};
3680
3681#ifdef CONFIG_PERF_EVENTS
3682struct bpf_perf_link {
3683 struct bpf_link link;
3684 struct file *perf_file;
3685};
3686
3687static void bpf_perf_link_release(struct bpf_link *link)
3688{
3689 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3690 struct perf_event *event = perf_link->perf_file->private_data;
3691
3692 perf_event_free_bpf_prog(event);
3693 fput(perf_link->perf_file);
3694}
3695
3696static void bpf_perf_link_dealloc(struct bpf_link *link)
3697{
3698 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3699
3700 kfree(perf_link);
3701}
3702
3703static int bpf_perf_link_fill_common(const struct perf_event *event,
3704 char __user *uname, u32 *ulenp,
3705 u64 *probe_offset, u64 *probe_addr,
3706 u32 *fd_type, unsigned long *missed)
3707{
3708 const char *buf;
3709 u32 prog_id, ulen;
3710 size_t len;
3711 int err;
3712
3713 ulen = *ulenp;
3714 if (!ulen ^ !uname)
3715 return -EINVAL;
3716
3717 err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf,
3718 probe_offset, probe_addr, missed);
3719 if (err)
3720 return err;
3721
3722 if (buf) {
3723 len = strlen(buf);
3724 *ulenp = len + 1;
3725 } else {
3726 *ulenp = 1;
3727 }
3728 if (!uname)
3729 return 0;
3730
3731 if (buf) {
3732 err = bpf_copy_to_user(uname, buf, ulen, len);
3733 if (err)
3734 return err;
3735 } else {
3736 char zero = '\0';
3737
3738 if (put_user(zero, uname))
3739 return -EFAULT;
3740 }
3741 return 0;
3742}
3743
3744#ifdef CONFIG_KPROBE_EVENTS
3745static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
3746 struct bpf_link_info *info)
3747{
3748 unsigned long missed;
3749 char __user *uname;
3750 u64 addr, offset;
3751 u32 ulen, type;
3752 int err;
3753
3754 uname = u64_to_user_ptr(info->perf_event.kprobe.func_name);
3755 ulen = info->perf_event.kprobe.name_len;
3756 err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr,
3757 &type, &missed);
3758 if (err)
3759 return err;
3760 if (type == BPF_FD_TYPE_KRETPROBE)
3761 info->perf_event.type = BPF_PERF_EVENT_KRETPROBE;
3762 else
3763 info->perf_event.type = BPF_PERF_EVENT_KPROBE;
3764 info->perf_event.kprobe.name_len = ulen;
3765 info->perf_event.kprobe.offset = offset;
3766 info->perf_event.kprobe.missed = missed;
3767 if (!kallsyms_show_value(current_cred()))
3768 addr = 0;
3769 info->perf_event.kprobe.addr = addr;
3770 info->perf_event.kprobe.cookie = event->bpf_cookie;
3771 return 0;
3772}
3773#endif
3774
3775#ifdef CONFIG_UPROBE_EVENTS
3776static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
3777 struct bpf_link_info *info)
3778{
3779 char __user *uname;
3780 u64 addr, offset;
3781 u32 ulen, type;
3782 int err;
3783
3784 uname = u64_to_user_ptr(info->perf_event.uprobe.file_name);
3785 ulen = info->perf_event.uprobe.name_len;
3786 err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr,
3787 &type, NULL);
3788 if (err)
3789 return err;
3790
3791 if (type == BPF_FD_TYPE_URETPROBE)
3792 info->perf_event.type = BPF_PERF_EVENT_URETPROBE;
3793 else
3794 info->perf_event.type = BPF_PERF_EVENT_UPROBE;
3795 info->perf_event.uprobe.name_len = ulen;
3796 info->perf_event.uprobe.offset = offset;
3797 info->perf_event.uprobe.cookie = event->bpf_cookie;
3798 return 0;
3799}
3800#endif
3801
3802static int bpf_perf_link_fill_probe(const struct perf_event *event,
3803 struct bpf_link_info *info)
3804{
3805#ifdef CONFIG_KPROBE_EVENTS
3806 if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE)
3807 return bpf_perf_link_fill_kprobe(event, info);
3808#endif
3809#ifdef CONFIG_UPROBE_EVENTS
3810 if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE)
3811 return bpf_perf_link_fill_uprobe(event, info);
3812#endif
3813 return -EOPNOTSUPP;
3814}
3815
3816static int bpf_perf_link_fill_tracepoint(const struct perf_event *event,
3817 struct bpf_link_info *info)
3818{
3819 char __user *uname;
3820 u32 ulen;
3821 int err;
3822
3823 uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name);
3824 ulen = info->perf_event.tracepoint.name_len;
3825 err = bpf_perf_link_fill_common(event, uname, &ulen, NULL, NULL, NULL, NULL);
3826 if (err)
3827 return err;
3828
3829 info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT;
3830 info->perf_event.tracepoint.name_len = ulen;
3831 info->perf_event.tracepoint.cookie = event->bpf_cookie;
3832 return 0;
3833}
3834
3835static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
3836 struct bpf_link_info *info)
3837{
3838 info->perf_event.event.type = event->attr.type;
3839 info->perf_event.event.config = event->attr.config;
3840 info->perf_event.event.cookie = event->bpf_cookie;
3841 info->perf_event.type = BPF_PERF_EVENT_EVENT;
3842 return 0;
3843}
3844
3845static int bpf_perf_link_fill_link_info(const struct bpf_link *link,
3846 struct bpf_link_info *info)
3847{
3848 struct bpf_perf_link *perf_link;
3849 const struct perf_event *event;
3850
3851 perf_link = container_of(link, struct bpf_perf_link, link);
3852 event = perf_get_event(perf_link->perf_file);
3853 if (IS_ERR(event))
3854 return PTR_ERR(event);
3855
3856 switch (event->prog->type) {
3857 case BPF_PROG_TYPE_PERF_EVENT:
3858 return bpf_perf_link_fill_perf_event(event, info);
3859 case BPF_PROG_TYPE_TRACEPOINT:
3860 return bpf_perf_link_fill_tracepoint(event, info);
3861 case BPF_PROG_TYPE_KPROBE:
3862 return bpf_perf_link_fill_probe(event, info);
3863 default:
3864 return -EOPNOTSUPP;
3865 }
3866}
3867
3868static const struct bpf_link_ops bpf_perf_link_lops = {
3869 .release = bpf_perf_link_release,
3870 .dealloc = bpf_perf_link_dealloc,
3871 .fill_link_info = bpf_perf_link_fill_link_info,
3872};
3873
3874static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3875{
3876 struct bpf_link_primer link_primer;
3877 struct bpf_perf_link *link;
3878 struct perf_event *event;
3879 struct file *perf_file;
3880 int err;
3881
3882 if (attr->link_create.flags)
3883 return -EINVAL;
3884
3885 perf_file = perf_event_get(attr->link_create.target_fd);
3886 if (IS_ERR(perf_file))
3887 return PTR_ERR(perf_file);
3888
3889 link = kzalloc(sizeof(*link), GFP_USER);
3890 if (!link) {
3891 err = -ENOMEM;
3892 goto out_put_file;
3893 }
3894 bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog);
3895 link->perf_file = perf_file;
3896
3897 err = bpf_link_prime(&link->link, &link_primer);
3898 if (err) {
3899 kfree(link);
3900 goto out_put_file;
3901 }
3902
3903 event = perf_file->private_data;
3904 err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie);
3905 if (err) {
3906 bpf_link_cleanup(&link_primer);
3907 goto out_put_file;
3908 }
3909 /* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */
3910 bpf_prog_inc(prog);
3911
3912 return bpf_link_settle(&link_primer);
3913
3914out_put_file:
3915 fput(perf_file);
3916 return err;
3917}
3918#else
3919static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3920{
3921 return -EOPNOTSUPP;
3922}
3923#endif /* CONFIG_PERF_EVENTS */
3924
3925static int bpf_raw_tp_link_attach(struct bpf_prog *prog,
3926 const char __user *user_tp_name, u64 cookie)
3927{
3928 struct bpf_link_primer link_primer;
3929 struct bpf_raw_tp_link *link;
3930 struct bpf_raw_event_map *btp;
3931 const char *tp_name;
3932 char buf[128];
3933 int err;
3934
3935 switch (prog->type) {
3936 case BPF_PROG_TYPE_TRACING:
3937 case BPF_PROG_TYPE_EXT:
3938 case BPF_PROG_TYPE_LSM:
3939 if (user_tp_name)
3940 /* The attach point for this category of programs
3941 * should be specified via btf_id during program load.
3942 */
3943 return -EINVAL;
3944 if (prog->type == BPF_PROG_TYPE_TRACING &&
3945 prog->expected_attach_type == BPF_TRACE_RAW_TP) {
3946 tp_name = prog->aux->attach_func_name;
3947 break;
3948 }
3949 return bpf_tracing_prog_attach(prog, 0, 0, 0);
3950 case BPF_PROG_TYPE_RAW_TRACEPOINT:
3951 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
3952 if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0)
3953 return -EFAULT;
3954 buf[sizeof(buf) - 1] = 0;
3955 tp_name = buf;
3956 break;
3957 default:
3958 return -EINVAL;
3959 }
3960
3961 btp = bpf_get_raw_tracepoint(tp_name);
3962 if (!btp)
3963 return -ENOENT;
3964
3965 link = kzalloc(sizeof(*link), GFP_USER);
3966 if (!link) {
3967 err = -ENOMEM;
3968 goto out_put_btp;
3969 }
3970 bpf_link_init_sleepable(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
3971 &bpf_raw_tp_link_lops, prog,
3972 tracepoint_is_faultable(btp->tp));
3973 link->btp = btp;
3974 link->cookie = cookie;
3975
3976 err = bpf_link_prime(&link->link, &link_primer);
3977 if (err) {
3978 kfree(link);
3979 goto out_put_btp;
3980 }
3981
3982 err = bpf_probe_register(link->btp, link);
3983 if (err) {
3984 bpf_link_cleanup(&link_primer);
3985 goto out_put_btp;
3986 }
3987
3988 return bpf_link_settle(&link_primer);
3989
3990out_put_btp:
3991 bpf_put_raw_tracepoint(btp);
3992 return err;
3993}
3994
3995#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.cookie
3996
3997static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
3998{
3999 struct bpf_prog *prog;
4000 void __user *tp_name;
4001 __u64 cookie;
4002 int fd;
4003
4004 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
4005 return -EINVAL;
4006
4007 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
4008 if (IS_ERR(prog))
4009 return PTR_ERR(prog);
4010
4011 tp_name = u64_to_user_ptr(attr->raw_tracepoint.name);
4012 cookie = attr->raw_tracepoint.cookie;
4013 fd = bpf_raw_tp_link_attach(prog, tp_name, cookie);
4014 if (fd < 0)
4015 bpf_prog_put(prog);
4016 return fd;
4017}
4018
4019static enum bpf_prog_type
4020attach_type_to_prog_type(enum bpf_attach_type attach_type)
4021{
4022 switch (attach_type) {
4023 case BPF_CGROUP_INET_INGRESS:
4024 case BPF_CGROUP_INET_EGRESS:
4025 return BPF_PROG_TYPE_CGROUP_SKB;
4026 case BPF_CGROUP_INET_SOCK_CREATE:
4027 case BPF_CGROUP_INET_SOCK_RELEASE:
4028 case BPF_CGROUP_INET4_POST_BIND:
4029 case BPF_CGROUP_INET6_POST_BIND:
4030 return BPF_PROG_TYPE_CGROUP_SOCK;
4031 case BPF_CGROUP_INET4_BIND:
4032 case BPF_CGROUP_INET6_BIND:
4033 case BPF_CGROUP_INET4_CONNECT:
4034 case BPF_CGROUP_INET6_CONNECT:
4035 case BPF_CGROUP_UNIX_CONNECT:
4036 case BPF_CGROUP_INET4_GETPEERNAME:
4037 case BPF_CGROUP_INET6_GETPEERNAME:
4038 case BPF_CGROUP_UNIX_GETPEERNAME:
4039 case BPF_CGROUP_INET4_GETSOCKNAME:
4040 case BPF_CGROUP_INET6_GETSOCKNAME:
4041 case BPF_CGROUP_UNIX_GETSOCKNAME:
4042 case BPF_CGROUP_UDP4_SENDMSG:
4043 case BPF_CGROUP_UDP6_SENDMSG:
4044 case BPF_CGROUP_UNIX_SENDMSG:
4045 case BPF_CGROUP_UDP4_RECVMSG:
4046 case BPF_CGROUP_UDP6_RECVMSG:
4047 case BPF_CGROUP_UNIX_RECVMSG:
4048 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
4049 case BPF_CGROUP_SOCK_OPS:
4050 return BPF_PROG_TYPE_SOCK_OPS;
4051 case BPF_CGROUP_DEVICE:
4052 return BPF_PROG_TYPE_CGROUP_DEVICE;
4053 case BPF_SK_MSG_VERDICT:
4054 return BPF_PROG_TYPE_SK_MSG;
4055 case BPF_SK_SKB_STREAM_PARSER:
4056 case BPF_SK_SKB_STREAM_VERDICT:
4057 case BPF_SK_SKB_VERDICT:
4058 return BPF_PROG_TYPE_SK_SKB;
4059 case BPF_LIRC_MODE2:
4060 return BPF_PROG_TYPE_LIRC_MODE2;
4061 case BPF_FLOW_DISSECTOR:
4062 return BPF_PROG_TYPE_FLOW_DISSECTOR;
4063 case BPF_CGROUP_SYSCTL:
4064 return BPF_PROG_TYPE_CGROUP_SYSCTL;
4065 case BPF_CGROUP_GETSOCKOPT:
4066 case BPF_CGROUP_SETSOCKOPT:
4067 return BPF_PROG_TYPE_CGROUP_SOCKOPT;
4068 case BPF_TRACE_ITER:
4069 case BPF_TRACE_RAW_TP:
4070 case BPF_TRACE_FENTRY:
4071 case BPF_TRACE_FEXIT:
4072 case BPF_MODIFY_RETURN:
4073 return BPF_PROG_TYPE_TRACING;
4074 case BPF_LSM_MAC:
4075 return BPF_PROG_TYPE_LSM;
4076 case BPF_SK_LOOKUP:
4077 return BPF_PROG_TYPE_SK_LOOKUP;
4078 case BPF_XDP:
4079 return BPF_PROG_TYPE_XDP;
4080 case BPF_LSM_CGROUP:
4081 return BPF_PROG_TYPE_LSM;
4082 case BPF_TCX_INGRESS:
4083 case BPF_TCX_EGRESS:
4084 case BPF_NETKIT_PRIMARY:
4085 case BPF_NETKIT_PEER:
4086 return BPF_PROG_TYPE_SCHED_CLS;
4087 default:
4088 return BPF_PROG_TYPE_UNSPEC;
4089 }
4090}
4091
4092static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
4093 enum bpf_attach_type attach_type)
4094{
4095 enum bpf_prog_type ptype;
4096
4097 switch (prog->type) {
4098 case BPF_PROG_TYPE_CGROUP_SOCK:
4099 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4100 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4101 case BPF_PROG_TYPE_SK_LOOKUP:
4102 return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
4103 case BPF_PROG_TYPE_CGROUP_SKB:
4104 if (!bpf_token_capable(prog->aux->token, CAP_NET_ADMIN))
4105 /* cg-skb progs can be loaded by unpriv user.
4106 * check permissions at attach time.
4107 */
4108 return -EPERM;
4109
4110 ptype = attach_type_to_prog_type(attach_type);
4111 if (prog->type != ptype)
4112 return -EINVAL;
4113
4114 return prog->enforce_expected_attach_type &&
4115 prog->expected_attach_type != attach_type ?
4116 -EINVAL : 0;
4117 case BPF_PROG_TYPE_EXT:
4118 return 0;
4119 case BPF_PROG_TYPE_NETFILTER:
4120 if (attach_type != BPF_NETFILTER)
4121 return -EINVAL;
4122 return 0;
4123 case BPF_PROG_TYPE_PERF_EVENT:
4124 case BPF_PROG_TYPE_TRACEPOINT:
4125 if (attach_type != BPF_PERF_EVENT)
4126 return -EINVAL;
4127 return 0;
4128 case BPF_PROG_TYPE_KPROBE:
4129 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI &&
4130 attach_type != BPF_TRACE_KPROBE_MULTI)
4131 return -EINVAL;
4132 if (prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION &&
4133 attach_type != BPF_TRACE_KPROBE_SESSION)
4134 return -EINVAL;
4135 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI &&
4136 attach_type != BPF_TRACE_UPROBE_MULTI)
4137 return -EINVAL;
4138 if (prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION &&
4139 attach_type != BPF_TRACE_UPROBE_SESSION)
4140 return -EINVAL;
4141 if (attach_type != BPF_PERF_EVENT &&
4142 attach_type != BPF_TRACE_KPROBE_MULTI &&
4143 attach_type != BPF_TRACE_KPROBE_SESSION &&
4144 attach_type != BPF_TRACE_UPROBE_MULTI &&
4145 attach_type != BPF_TRACE_UPROBE_SESSION)
4146 return -EINVAL;
4147 return 0;
4148 case BPF_PROG_TYPE_SCHED_CLS:
4149 if (attach_type != BPF_TCX_INGRESS &&
4150 attach_type != BPF_TCX_EGRESS &&
4151 attach_type != BPF_NETKIT_PRIMARY &&
4152 attach_type != BPF_NETKIT_PEER)
4153 return -EINVAL;
4154 return 0;
4155 default:
4156 ptype = attach_type_to_prog_type(attach_type);
4157 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type)
4158 return -EINVAL;
4159 return 0;
4160 }
4161}
4162
4163#define BPF_PROG_ATTACH_LAST_FIELD expected_revision
4164
4165#define BPF_F_ATTACH_MASK_BASE \
4166 (BPF_F_ALLOW_OVERRIDE | \
4167 BPF_F_ALLOW_MULTI | \
4168 BPF_F_REPLACE)
4169
4170#define BPF_F_ATTACH_MASK_MPROG \
4171 (BPF_F_REPLACE | \
4172 BPF_F_BEFORE | \
4173 BPF_F_AFTER | \
4174 BPF_F_ID | \
4175 BPF_F_LINK)
4176
4177static int bpf_prog_attach(const union bpf_attr *attr)
4178{
4179 enum bpf_prog_type ptype;
4180 struct bpf_prog *prog;
4181 int ret;
4182
4183 if (CHECK_ATTR(BPF_PROG_ATTACH))
4184 return -EINVAL;
4185
4186 ptype = attach_type_to_prog_type(attr->attach_type);
4187 if (ptype == BPF_PROG_TYPE_UNSPEC)
4188 return -EINVAL;
4189 if (bpf_mprog_supported(ptype)) {
4190 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
4191 return -EINVAL;
4192 } else {
4193 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE)
4194 return -EINVAL;
4195 if (attr->relative_fd ||
4196 attr->expected_revision)
4197 return -EINVAL;
4198 }
4199
4200 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
4201 if (IS_ERR(prog))
4202 return PTR_ERR(prog);
4203
4204 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
4205 bpf_prog_put(prog);
4206 return -EINVAL;
4207 }
4208
4209 switch (ptype) {
4210 case BPF_PROG_TYPE_SK_SKB:
4211 case BPF_PROG_TYPE_SK_MSG:
4212 ret = sock_map_get_from_fd(attr, prog);
4213 break;
4214 case BPF_PROG_TYPE_LIRC_MODE2:
4215 ret = lirc_prog_attach(attr, prog);
4216 break;
4217 case BPF_PROG_TYPE_FLOW_DISSECTOR:
4218 ret = netns_bpf_prog_attach(attr, prog);
4219 break;
4220 case BPF_PROG_TYPE_CGROUP_DEVICE:
4221 case BPF_PROG_TYPE_CGROUP_SKB:
4222 case BPF_PROG_TYPE_CGROUP_SOCK:
4223 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4224 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4225 case BPF_PROG_TYPE_CGROUP_SYSCTL:
4226 case BPF_PROG_TYPE_SOCK_OPS:
4227 case BPF_PROG_TYPE_LSM:
4228 if (ptype == BPF_PROG_TYPE_LSM &&
4229 prog->expected_attach_type != BPF_LSM_CGROUP)
4230 ret = -EINVAL;
4231 else
4232 ret = cgroup_bpf_prog_attach(attr, ptype, prog);
4233 break;
4234 case BPF_PROG_TYPE_SCHED_CLS:
4235 if (attr->attach_type == BPF_TCX_INGRESS ||
4236 attr->attach_type == BPF_TCX_EGRESS)
4237 ret = tcx_prog_attach(attr, prog);
4238 else
4239 ret = netkit_prog_attach(attr, prog);
4240 break;
4241 default:
4242 ret = -EINVAL;
4243 }
4244
4245 if (ret)
4246 bpf_prog_put(prog);
4247 return ret;
4248}
4249
4250#define BPF_PROG_DETACH_LAST_FIELD expected_revision
4251
4252static int bpf_prog_detach(const union bpf_attr *attr)
4253{
4254 struct bpf_prog *prog = NULL;
4255 enum bpf_prog_type ptype;
4256 int ret;
4257
4258 if (CHECK_ATTR(BPF_PROG_DETACH))
4259 return -EINVAL;
4260
4261 ptype = attach_type_to_prog_type(attr->attach_type);
4262 if (bpf_mprog_supported(ptype)) {
4263 if (ptype == BPF_PROG_TYPE_UNSPEC)
4264 return -EINVAL;
4265 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
4266 return -EINVAL;
4267 if (attr->attach_bpf_fd) {
4268 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
4269 if (IS_ERR(prog))
4270 return PTR_ERR(prog);
4271 }
4272 } else if (attr->attach_flags ||
4273 attr->relative_fd ||
4274 attr->expected_revision) {
4275 return -EINVAL;
4276 }
4277
4278 switch (ptype) {
4279 case BPF_PROG_TYPE_SK_MSG:
4280 case BPF_PROG_TYPE_SK_SKB:
4281 ret = sock_map_prog_detach(attr, ptype);
4282 break;
4283 case BPF_PROG_TYPE_LIRC_MODE2:
4284 ret = lirc_prog_detach(attr);
4285 break;
4286 case BPF_PROG_TYPE_FLOW_DISSECTOR:
4287 ret = netns_bpf_prog_detach(attr, ptype);
4288 break;
4289 case BPF_PROG_TYPE_CGROUP_DEVICE:
4290 case BPF_PROG_TYPE_CGROUP_SKB:
4291 case BPF_PROG_TYPE_CGROUP_SOCK:
4292 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4293 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4294 case BPF_PROG_TYPE_CGROUP_SYSCTL:
4295 case BPF_PROG_TYPE_SOCK_OPS:
4296 case BPF_PROG_TYPE_LSM:
4297 ret = cgroup_bpf_prog_detach(attr, ptype);
4298 break;
4299 case BPF_PROG_TYPE_SCHED_CLS:
4300 if (attr->attach_type == BPF_TCX_INGRESS ||
4301 attr->attach_type == BPF_TCX_EGRESS)
4302 ret = tcx_prog_detach(attr, prog);
4303 else
4304 ret = netkit_prog_detach(attr, prog);
4305 break;
4306 default:
4307 ret = -EINVAL;
4308 }
4309
4310 if (prog)
4311 bpf_prog_put(prog);
4312 return ret;
4313}
4314
4315#define BPF_PROG_QUERY_LAST_FIELD query.revision
4316
4317static int bpf_prog_query(const union bpf_attr *attr,
4318 union bpf_attr __user *uattr)
4319{
4320 if (!bpf_net_capable())
4321 return -EPERM;
4322 if (CHECK_ATTR(BPF_PROG_QUERY))
4323 return -EINVAL;
4324 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
4325 return -EINVAL;
4326
4327 switch (attr->query.attach_type) {
4328 case BPF_CGROUP_INET_INGRESS:
4329 case BPF_CGROUP_INET_EGRESS:
4330 case BPF_CGROUP_INET_SOCK_CREATE:
4331 case BPF_CGROUP_INET_SOCK_RELEASE:
4332 case BPF_CGROUP_INET4_BIND:
4333 case BPF_CGROUP_INET6_BIND:
4334 case BPF_CGROUP_INET4_POST_BIND:
4335 case BPF_CGROUP_INET6_POST_BIND:
4336 case BPF_CGROUP_INET4_CONNECT:
4337 case BPF_CGROUP_INET6_CONNECT:
4338 case BPF_CGROUP_UNIX_CONNECT:
4339 case BPF_CGROUP_INET4_GETPEERNAME:
4340 case BPF_CGROUP_INET6_GETPEERNAME:
4341 case BPF_CGROUP_UNIX_GETPEERNAME:
4342 case BPF_CGROUP_INET4_GETSOCKNAME:
4343 case BPF_CGROUP_INET6_GETSOCKNAME:
4344 case BPF_CGROUP_UNIX_GETSOCKNAME:
4345 case BPF_CGROUP_UDP4_SENDMSG:
4346 case BPF_CGROUP_UDP6_SENDMSG:
4347 case BPF_CGROUP_UNIX_SENDMSG:
4348 case BPF_CGROUP_UDP4_RECVMSG:
4349 case BPF_CGROUP_UDP6_RECVMSG:
4350 case BPF_CGROUP_UNIX_RECVMSG:
4351 case BPF_CGROUP_SOCK_OPS:
4352 case BPF_CGROUP_DEVICE:
4353 case BPF_CGROUP_SYSCTL:
4354 case BPF_CGROUP_GETSOCKOPT:
4355 case BPF_CGROUP_SETSOCKOPT:
4356 case BPF_LSM_CGROUP:
4357 return cgroup_bpf_prog_query(attr, uattr);
4358 case BPF_LIRC_MODE2:
4359 return lirc_prog_query(attr, uattr);
4360 case BPF_FLOW_DISSECTOR:
4361 case BPF_SK_LOOKUP:
4362 return netns_bpf_prog_query(attr, uattr);
4363 case BPF_SK_SKB_STREAM_PARSER:
4364 case BPF_SK_SKB_STREAM_VERDICT:
4365 case BPF_SK_MSG_VERDICT:
4366 case BPF_SK_SKB_VERDICT:
4367 return sock_map_bpf_prog_query(attr, uattr);
4368 case BPF_TCX_INGRESS:
4369 case BPF_TCX_EGRESS:
4370 return tcx_prog_query(attr, uattr);
4371 case BPF_NETKIT_PRIMARY:
4372 case BPF_NETKIT_PEER:
4373 return netkit_prog_query(attr, uattr);
4374 default:
4375 return -EINVAL;
4376 }
4377}
4378
4379#define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size
4380
4381static int bpf_prog_test_run(const union bpf_attr *attr,
4382 union bpf_attr __user *uattr)
4383{
4384 struct bpf_prog *prog;
4385 int ret = -ENOTSUPP;
4386
4387 if (CHECK_ATTR(BPF_PROG_TEST_RUN))
4388 return -EINVAL;
4389
4390 if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
4391 (!attr->test.ctx_size_in && attr->test.ctx_in))
4392 return -EINVAL;
4393
4394 if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
4395 (!attr->test.ctx_size_out && attr->test.ctx_out))
4396 return -EINVAL;
4397
4398 prog = bpf_prog_get(attr->test.prog_fd);
4399 if (IS_ERR(prog))
4400 return PTR_ERR(prog);
4401
4402 if (prog->aux->ops->test_run)
4403 ret = prog->aux->ops->test_run(prog, attr, uattr);
4404
4405 bpf_prog_put(prog);
4406 return ret;
4407}
4408
4409#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
4410
4411static int bpf_obj_get_next_id(const union bpf_attr *attr,
4412 union bpf_attr __user *uattr,
4413 struct idr *idr,
4414 spinlock_t *lock)
4415{
4416 u32 next_id = attr->start_id;
4417 int err = 0;
4418
4419 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
4420 return -EINVAL;
4421
4422 if (!capable(CAP_SYS_ADMIN))
4423 return -EPERM;
4424
4425 next_id++;
4426 spin_lock_bh(lock);
4427 if (!idr_get_next(idr, &next_id))
4428 err = -ENOENT;
4429 spin_unlock_bh(lock);
4430
4431 if (!err)
4432 err = put_user(next_id, &uattr->next_id);
4433
4434 return err;
4435}
4436
4437struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
4438{
4439 struct bpf_map *map;
4440
4441 spin_lock_bh(&map_idr_lock);
4442again:
4443 map = idr_get_next(&map_idr, id);
4444 if (map) {
4445 map = __bpf_map_inc_not_zero(map, false);
4446 if (IS_ERR(map)) {
4447 (*id)++;
4448 goto again;
4449 }
4450 }
4451 spin_unlock_bh(&map_idr_lock);
4452
4453 return map;
4454}
4455
4456struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
4457{
4458 struct bpf_prog *prog;
4459
4460 spin_lock_bh(&prog_idr_lock);
4461again:
4462 prog = idr_get_next(&prog_idr, id);
4463 if (prog) {
4464 prog = bpf_prog_inc_not_zero(prog);
4465 if (IS_ERR(prog)) {
4466 (*id)++;
4467 goto again;
4468 }
4469 }
4470 spin_unlock_bh(&prog_idr_lock);
4471
4472 return prog;
4473}
4474
4475#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
4476
4477struct bpf_prog *bpf_prog_by_id(u32 id)
4478{
4479 struct bpf_prog *prog;
4480
4481 if (!id)
4482 return ERR_PTR(-ENOENT);
4483
4484 spin_lock_bh(&prog_idr_lock);
4485 prog = idr_find(&prog_idr, id);
4486 if (prog)
4487 prog = bpf_prog_inc_not_zero(prog);
4488 else
4489 prog = ERR_PTR(-ENOENT);
4490 spin_unlock_bh(&prog_idr_lock);
4491 return prog;
4492}
4493
4494static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
4495{
4496 struct bpf_prog *prog;
4497 u32 id = attr->prog_id;
4498 int fd;
4499
4500 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
4501 return -EINVAL;
4502
4503 if (!capable(CAP_SYS_ADMIN))
4504 return -EPERM;
4505
4506 prog = bpf_prog_by_id(id);
4507 if (IS_ERR(prog))
4508 return PTR_ERR(prog);
4509
4510 fd = bpf_prog_new_fd(prog);
4511 if (fd < 0)
4512 bpf_prog_put(prog);
4513
4514 return fd;
4515}
4516
4517#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
4518
4519static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
4520{
4521 struct bpf_map *map;
4522 u32 id = attr->map_id;
4523 int f_flags;
4524 int fd;
4525
4526 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
4527 attr->open_flags & ~BPF_OBJ_FLAG_MASK)
4528 return -EINVAL;
4529
4530 if (!capable(CAP_SYS_ADMIN))
4531 return -EPERM;
4532
4533 f_flags = bpf_get_file_flag(attr->open_flags);
4534 if (f_flags < 0)
4535 return f_flags;
4536
4537 spin_lock_bh(&map_idr_lock);
4538 map = idr_find(&map_idr, id);
4539 if (map)
4540 map = __bpf_map_inc_not_zero(map, true);
4541 else
4542 map = ERR_PTR(-ENOENT);
4543 spin_unlock_bh(&map_idr_lock);
4544
4545 if (IS_ERR(map))
4546 return PTR_ERR(map);
4547
4548 fd = bpf_map_new_fd(map, f_flags);
4549 if (fd < 0)
4550 bpf_map_put_with_uref(map);
4551
4552 return fd;
4553}
4554
4555static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
4556 unsigned long addr, u32 *off,
4557 u32 *type)
4558{
4559 const struct bpf_map *map;
4560 int i;
4561
4562 mutex_lock(&prog->aux->used_maps_mutex);
4563 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
4564 map = prog->aux->used_maps[i];
4565 if (map == (void *)addr) {
4566 *type = BPF_PSEUDO_MAP_FD;
4567 goto out;
4568 }
4569 if (!map->ops->map_direct_value_meta)
4570 continue;
4571 if (!map->ops->map_direct_value_meta(map, addr, off)) {
4572 *type = BPF_PSEUDO_MAP_VALUE;
4573 goto out;
4574 }
4575 }
4576 map = NULL;
4577
4578out:
4579 mutex_unlock(&prog->aux->used_maps_mutex);
4580 return map;
4581}
4582
4583static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
4584 const struct cred *f_cred)
4585{
4586 const struct bpf_map *map;
4587 struct bpf_insn *insns;
4588 u32 off, type;
4589 u64 imm;
4590 u8 code;
4591 int i;
4592
4593 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
4594 GFP_USER);
4595 if (!insns)
4596 return insns;
4597
4598 for (i = 0; i < prog->len; i++) {
4599 code = insns[i].code;
4600
4601 if (code == (BPF_JMP | BPF_TAIL_CALL)) {
4602 insns[i].code = BPF_JMP | BPF_CALL;
4603 insns[i].imm = BPF_FUNC_tail_call;
4604 /* fall-through */
4605 }
4606 if (code == (BPF_JMP | BPF_CALL) ||
4607 code == (BPF_JMP | BPF_CALL_ARGS)) {
4608 if (code == (BPF_JMP | BPF_CALL_ARGS))
4609 insns[i].code = BPF_JMP | BPF_CALL;
4610 if (!bpf_dump_raw_ok(f_cred))
4611 insns[i].imm = 0;
4612 continue;
4613 }
4614 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
4615 insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
4616 continue;
4617 }
4618
4619 if ((BPF_CLASS(code) == BPF_LDX || BPF_CLASS(code) == BPF_STX ||
4620 BPF_CLASS(code) == BPF_ST) && BPF_MODE(code) == BPF_PROBE_MEM32) {
4621 insns[i].code = BPF_CLASS(code) | BPF_SIZE(code) | BPF_MEM;
4622 continue;
4623 }
4624
4625 if (code != (BPF_LD | BPF_IMM | BPF_DW))
4626 continue;
4627
4628 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
4629 map = bpf_map_from_imm(prog, imm, &off, &type);
4630 if (map) {
4631 insns[i].src_reg = type;
4632 insns[i].imm = map->id;
4633 insns[i + 1].imm = off;
4634 continue;
4635 }
4636 }
4637
4638 return insns;
4639}
4640
4641static int set_info_rec_size(struct bpf_prog_info *info)
4642{
4643 /*
4644 * Ensure info.*_rec_size is the same as kernel expected size
4645 *
4646 * or
4647 *
4648 * Only allow zero *_rec_size if both _rec_size and _cnt are
4649 * zero. In this case, the kernel will set the expected
4650 * _rec_size back to the info.
4651 */
4652
4653 if ((info->nr_func_info || info->func_info_rec_size) &&
4654 info->func_info_rec_size != sizeof(struct bpf_func_info))
4655 return -EINVAL;
4656
4657 if ((info->nr_line_info || info->line_info_rec_size) &&
4658 info->line_info_rec_size != sizeof(struct bpf_line_info))
4659 return -EINVAL;
4660
4661 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
4662 info->jited_line_info_rec_size != sizeof(__u64))
4663 return -EINVAL;
4664
4665 info->func_info_rec_size = sizeof(struct bpf_func_info);
4666 info->line_info_rec_size = sizeof(struct bpf_line_info);
4667 info->jited_line_info_rec_size = sizeof(__u64);
4668
4669 return 0;
4670}
4671
4672static int bpf_prog_get_info_by_fd(struct file *file,
4673 struct bpf_prog *prog,
4674 const union bpf_attr *attr,
4675 union bpf_attr __user *uattr)
4676{
4677 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4678 struct btf *attach_btf = bpf_prog_get_target_btf(prog);
4679 struct bpf_prog_info info;
4680 u32 info_len = attr->info.info_len;
4681 struct bpf_prog_kstats stats;
4682 char __user *uinsns;
4683 u32 ulen;
4684 int err;
4685
4686 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4687 if (err)
4688 return err;
4689 info_len = min_t(u32, sizeof(info), info_len);
4690
4691 memset(&info, 0, sizeof(info));
4692 if (copy_from_user(&info, uinfo, info_len))
4693 return -EFAULT;
4694
4695 info.type = prog->type;
4696 info.id = prog->aux->id;
4697 info.load_time = prog->aux->load_time;
4698 info.created_by_uid = from_kuid_munged(current_user_ns(),
4699 prog->aux->user->uid);
4700 info.gpl_compatible = prog->gpl_compatible;
4701
4702 memcpy(info.tag, prog->tag, sizeof(prog->tag));
4703 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
4704
4705 mutex_lock(&prog->aux->used_maps_mutex);
4706 ulen = info.nr_map_ids;
4707 info.nr_map_ids = prog->aux->used_map_cnt;
4708 ulen = min_t(u32, info.nr_map_ids, ulen);
4709 if (ulen) {
4710 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
4711 u32 i;
4712
4713 for (i = 0; i < ulen; i++)
4714 if (put_user(prog->aux->used_maps[i]->id,
4715 &user_map_ids[i])) {
4716 mutex_unlock(&prog->aux->used_maps_mutex);
4717 return -EFAULT;
4718 }
4719 }
4720 mutex_unlock(&prog->aux->used_maps_mutex);
4721
4722 err = set_info_rec_size(&info);
4723 if (err)
4724 return err;
4725
4726 bpf_prog_get_stats(prog, &stats);
4727 info.run_time_ns = stats.nsecs;
4728 info.run_cnt = stats.cnt;
4729 info.recursion_misses = stats.misses;
4730
4731 info.verified_insns = prog->aux->verified_insns;
4732
4733 if (!bpf_capable()) {
4734 info.jited_prog_len = 0;
4735 info.xlated_prog_len = 0;
4736 info.nr_jited_ksyms = 0;
4737 info.nr_jited_func_lens = 0;
4738 info.nr_func_info = 0;
4739 info.nr_line_info = 0;
4740 info.nr_jited_line_info = 0;
4741 goto done;
4742 }
4743
4744 ulen = info.xlated_prog_len;
4745 info.xlated_prog_len = bpf_prog_insn_size(prog);
4746 if (info.xlated_prog_len && ulen) {
4747 struct bpf_insn *insns_sanitized;
4748 bool fault;
4749
4750 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
4751 info.xlated_prog_insns = 0;
4752 goto done;
4753 }
4754 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
4755 if (!insns_sanitized)
4756 return -ENOMEM;
4757 uinsns = u64_to_user_ptr(info.xlated_prog_insns);
4758 ulen = min_t(u32, info.xlated_prog_len, ulen);
4759 fault = copy_to_user(uinsns, insns_sanitized, ulen);
4760 kfree(insns_sanitized);
4761 if (fault)
4762 return -EFAULT;
4763 }
4764
4765 if (bpf_prog_is_offloaded(prog->aux)) {
4766 err = bpf_prog_offload_info_fill(&info, prog);
4767 if (err)
4768 return err;
4769 goto done;
4770 }
4771
4772 /* NOTE: the following code is supposed to be skipped for offload.
4773 * bpf_prog_offload_info_fill() is the place to fill similar fields
4774 * for offload.
4775 */
4776 ulen = info.jited_prog_len;
4777 if (prog->aux->func_cnt) {
4778 u32 i;
4779
4780 info.jited_prog_len = 0;
4781 for (i = 0; i < prog->aux->func_cnt; i++)
4782 info.jited_prog_len += prog->aux->func[i]->jited_len;
4783 } else {
4784 info.jited_prog_len = prog->jited_len;
4785 }
4786
4787 if (info.jited_prog_len && ulen) {
4788 if (bpf_dump_raw_ok(file->f_cred)) {
4789 uinsns = u64_to_user_ptr(info.jited_prog_insns);
4790 ulen = min_t(u32, info.jited_prog_len, ulen);
4791
4792 /* for multi-function programs, copy the JITed
4793 * instructions for all the functions
4794 */
4795 if (prog->aux->func_cnt) {
4796 u32 len, free, i;
4797 u8 *img;
4798
4799 free = ulen;
4800 for (i = 0; i < prog->aux->func_cnt; i++) {
4801 len = prog->aux->func[i]->jited_len;
4802 len = min_t(u32, len, free);
4803 img = (u8 *) prog->aux->func[i]->bpf_func;
4804 if (copy_to_user(uinsns, img, len))
4805 return -EFAULT;
4806 uinsns += len;
4807 free -= len;
4808 if (!free)
4809 break;
4810 }
4811 } else {
4812 if (copy_to_user(uinsns, prog->bpf_func, ulen))
4813 return -EFAULT;
4814 }
4815 } else {
4816 info.jited_prog_insns = 0;
4817 }
4818 }
4819
4820 ulen = info.nr_jited_ksyms;
4821 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
4822 if (ulen) {
4823 if (bpf_dump_raw_ok(file->f_cred)) {
4824 unsigned long ksym_addr;
4825 u64 __user *user_ksyms;
4826 u32 i;
4827
4828 /* copy the address of the kernel symbol
4829 * corresponding to each function
4830 */
4831 ulen = min_t(u32, info.nr_jited_ksyms, ulen);
4832 user_ksyms = u64_to_user_ptr(info.jited_ksyms);
4833 if (prog->aux->func_cnt) {
4834 for (i = 0; i < ulen; i++) {
4835 ksym_addr = (unsigned long)
4836 prog->aux->func[i]->bpf_func;
4837 if (put_user((u64) ksym_addr,
4838 &user_ksyms[i]))
4839 return -EFAULT;
4840 }
4841 } else {
4842 ksym_addr = (unsigned long) prog->bpf_func;
4843 if (put_user((u64) ksym_addr, &user_ksyms[0]))
4844 return -EFAULT;
4845 }
4846 } else {
4847 info.jited_ksyms = 0;
4848 }
4849 }
4850
4851 ulen = info.nr_jited_func_lens;
4852 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
4853 if (ulen) {
4854 if (bpf_dump_raw_ok(file->f_cred)) {
4855 u32 __user *user_lens;
4856 u32 func_len, i;
4857
4858 /* copy the JITed image lengths for each function */
4859 ulen = min_t(u32, info.nr_jited_func_lens, ulen);
4860 user_lens = u64_to_user_ptr(info.jited_func_lens);
4861 if (prog->aux->func_cnt) {
4862 for (i = 0; i < ulen; i++) {
4863 func_len =
4864 prog->aux->func[i]->jited_len;
4865 if (put_user(func_len, &user_lens[i]))
4866 return -EFAULT;
4867 }
4868 } else {
4869 func_len = prog->jited_len;
4870 if (put_user(func_len, &user_lens[0]))
4871 return -EFAULT;
4872 }
4873 } else {
4874 info.jited_func_lens = 0;
4875 }
4876 }
4877
4878 if (prog->aux->btf)
4879 info.btf_id = btf_obj_id(prog->aux->btf);
4880 info.attach_btf_id = prog->aux->attach_btf_id;
4881 if (attach_btf)
4882 info.attach_btf_obj_id = btf_obj_id(attach_btf);
4883
4884 ulen = info.nr_func_info;
4885 info.nr_func_info = prog->aux->func_info_cnt;
4886 if (info.nr_func_info && ulen) {
4887 char __user *user_finfo;
4888
4889 user_finfo = u64_to_user_ptr(info.func_info);
4890 ulen = min_t(u32, info.nr_func_info, ulen);
4891 if (copy_to_user(user_finfo, prog->aux->func_info,
4892 info.func_info_rec_size * ulen))
4893 return -EFAULT;
4894 }
4895
4896 ulen = info.nr_line_info;
4897 info.nr_line_info = prog->aux->nr_linfo;
4898 if (info.nr_line_info && ulen) {
4899 __u8 __user *user_linfo;
4900
4901 user_linfo = u64_to_user_ptr(info.line_info);
4902 ulen = min_t(u32, info.nr_line_info, ulen);
4903 if (copy_to_user(user_linfo, prog->aux->linfo,
4904 info.line_info_rec_size * ulen))
4905 return -EFAULT;
4906 }
4907
4908 ulen = info.nr_jited_line_info;
4909 if (prog->aux->jited_linfo)
4910 info.nr_jited_line_info = prog->aux->nr_linfo;
4911 else
4912 info.nr_jited_line_info = 0;
4913 if (info.nr_jited_line_info && ulen) {
4914 if (bpf_dump_raw_ok(file->f_cred)) {
4915 unsigned long line_addr;
4916 __u64 __user *user_linfo;
4917 u32 i;
4918
4919 user_linfo = u64_to_user_ptr(info.jited_line_info);
4920 ulen = min_t(u32, info.nr_jited_line_info, ulen);
4921 for (i = 0; i < ulen; i++) {
4922 line_addr = (unsigned long)prog->aux->jited_linfo[i];
4923 if (put_user((__u64)line_addr, &user_linfo[i]))
4924 return -EFAULT;
4925 }
4926 } else {
4927 info.jited_line_info = 0;
4928 }
4929 }
4930
4931 ulen = info.nr_prog_tags;
4932 info.nr_prog_tags = prog->aux->func_cnt ? : 1;
4933 if (ulen) {
4934 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
4935 u32 i;
4936
4937 user_prog_tags = u64_to_user_ptr(info.prog_tags);
4938 ulen = min_t(u32, info.nr_prog_tags, ulen);
4939 if (prog->aux->func_cnt) {
4940 for (i = 0; i < ulen; i++) {
4941 if (copy_to_user(user_prog_tags[i],
4942 prog->aux->func[i]->tag,
4943 BPF_TAG_SIZE))
4944 return -EFAULT;
4945 }
4946 } else {
4947 if (copy_to_user(user_prog_tags[0],
4948 prog->tag, BPF_TAG_SIZE))
4949 return -EFAULT;
4950 }
4951 }
4952
4953done:
4954 if (copy_to_user(uinfo, &info, info_len) ||
4955 put_user(info_len, &uattr->info.info_len))
4956 return -EFAULT;
4957
4958 return 0;
4959}
4960
4961static int bpf_map_get_info_by_fd(struct file *file,
4962 struct bpf_map *map,
4963 const union bpf_attr *attr,
4964 union bpf_attr __user *uattr)
4965{
4966 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4967 struct bpf_map_info info;
4968 u32 info_len = attr->info.info_len;
4969 int err;
4970
4971 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4972 if (err)
4973 return err;
4974 info_len = min_t(u32, sizeof(info), info_len);
4975
4976 memset(&info, 0, sizeof(info));
4977 info.type = map->map_type;
4978 info.id = map->id;
4979 info.key_size = map->key_size;
4980 info.value_size = map->value_size;
4981 info.max_entries = map->max_entries;
4982 info.map_flags = map->map_flags;
4983 info.map_extra = map->map_extra;
4984 memcpy(info.name, map->name, sizeof(map->name));
4985
4986 if (map->btf) {
4987 info.btf_id = btf_obj_id(map->btf);
4988 info.btf_key_type_id = map->btf_key_type_id;
4989 info.btf_value_type_id = map->btf_value_type_id;
4990 }
4991 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
4992 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS)
4993 bpf_map_struct_ops_info_fill(&info, map);
4994
4995 if (bpf_map_is_offloaded(map)) {
4996 err = bpf_map_offload_info_fill(&info, map);
4997 if (err)
4998 return err;
4999 }
5000
5001 if (copy_to_user(uinfo, &info, info_len) ||
5002 put_user(info_len, &uattr->info.info_len))
5003 return -EFAULT;
5004
5005 return 0;
5006}
5007
5008static int bpf_btf_get_info_by_fd(struct file *file,
5009 struct btf *btf,
5010 const union bpf_attr *attr,
5011 union bpf_attr __user *uattr)
5012{
5013 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
5014 u32 info_len = attr->info.info_len;
5015 int err;
5016
5017 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len);
5018 if (err)
5019 return err;
5020
5021 return btf_get_info_by_fd(btf, attr, uattr);
5022}
5023
5024static int bpf_link_get_info_by_fd(struct file *file,
5025 struct bpf_link *link,
5026 const union bpf_attr *attr,
5027 union bpf_attr __user *uattr)
5028{
5029 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
5030 struct bpf_link_info info;
5031 u32 info_len = attr->info.info_len;
5032 int err;
5033
5034 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
5035 if (err)
5036 return err;
5037 info_len = min_t(u32, sizeof(info), info_len);
5038
5039 memset(&info, 0, sizeof(info));
5040 if (copy_from_user(&info, uinfo, info_len))
5041 return -EFAULT;
5042
5043 info.type = link->type;
5044 info.id = link->id;
5045 if (link->prog)
5046 info.prog_id = link->prog->aux->id;
5047
5048 if (link->ops->fill_link_info) {
5049 err = link->ops->fill_link_info(link, &info);
5050 if (err)
5051 return err;
5052 }
5053
5054 if (copy_to_user(uinfo, &info, info_len) ||
5055 put_user(info_len, &uattr->info.info_len))
5056 return -EFAULT;
5057
5058 return 0;
5059}
5060
5061
5062#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
5063
5064static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
5065 union bpf_attr __user *uattr)
5066{
5067 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
5068 return -EINVAL;
5069
5070 CLASS(fd, f)(attr->info.bpf_fd);
5071 if (fd_empty(f))
5072 return -EBADFD;
5073
5074 if (fd_file(f)->f_op == &bpf_prog_fops)
5075 return bpf_prog_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr,
5076 uattr);
5077 else if (fd_file(f)->f_op == &bpf_map_fops)
5078 return bpf_map_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr,
5079 uattr);
5080 else if (fd_file(f)->f_op == &btf_fops)
5081 return bpf_btf_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, uattr);
5082 else if (fd_file(f)->f_op == &bpf_link_fops || fd_file(f)->f_op == &bpf_link_fops_poll)
5083 return bpf_link_get_info_by_fd(fd_file(f), fd_file(f)->private_data,
5084 attr, uattr);
5085 return -EINVAL;
5086}
5087
5088#define BPF_BTF_LOAD_LAST_FIELD btf_token_fd
5089
5090static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
5091{
5092 struct bpf_token *token = NULL;
5093
5094 if (CHECK_ATTR(BPF_BTF_LOAD))
5095 return -EINVAL;
5096
5097 if (attr->btf_flags & ~BPF_F_TOKEN_FD)
5098 return -EINVAL;
5099
5100 if (attr->btf_flags & BPF_F_TOKEN_FD) {
5101 token = bpf_token_get_from_fd(attr->btf_token_fd);
5102 if (IS_ERR(token))
5103 return PTR_ERR(token);
5104 if (!bpf_token_allow_cmd(token, BPF_BTF_LOAD)) {
5105 bpf_token_put(token);
5106 token = NULL;
5107 }
5108 }
5109
5110 if (!bpf_token_capable(token, CAP_BPF)) {
5111 bpf_token_put(token);
5112 return -EPERM;
5113 }
5114
5115 bpf_token_put(token);
5116
5117 return btf_new_fd(attr, uattr, uattr_size);
5118}
5119
5120#define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
5121
5122static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
5123{
5124 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
5125 return -EINVAL;
5126
5127 if (!capable(CAP_SYS_ADMIN))
5128 return -EPERM;
5129
5130 return btf_get_fd_by_id(attr->btf_id);
5131}
5132
5133static int bpf_task_fd_query_copy(const union bpf_attr *attr,
5134 union bpf_attr __user *uattr,
5135 u32 prog_id, u32 fd_type,
5136 const char *buf, u64 probe_offset,
5137 u64 probe_addr)
5138{
5139 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
5140 u32 len = buf ? strlen(buf) : 0, input_len;
5141 int err = 0;
5142
5143 if (put_user(len, &uattr->task_fd_query.buf_len))
5144 return -EFAULT;
5145 input_len = attr->task_fd_query.buf_len;
5146 if (input_len && ubuf) {
5147 if (!len) {
5148 /* nothing to copy, just make ubuf NULL terminated */
5149 char zero = '\0';
5150
5151 if (put_user(zero, ubuf))
5152 return -EFAULT;
5153 } else if (input_len >= len + 1) {
5154 /* ubuf can hold the string with NULL terminator */
5155 if (copy_to_user(ubuf, buf, len + 1))
5156 return -EFAULT;
5157 } else {
5158 /* ubuf cannot hold the string with NULL terminator,
5159 * do a partial copy with NULL terminator.
5160 */
5161 char zero = '\0';
5162
5163 err = -ENOSPC;
5164 if (copy_to_user(ubuf, buf, input_len - 1))
5165 return -EFAULT;
5166 if (put_user(zero, ubuf + input_len - 1))
5167 return -EFAULT;
5168 }
5169 }
5170
5171 if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
5172 put_user(fd_type, &uattr->task_fd_query.fd_type) ||
5173 put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
5174 put_user(probe_addr, &uattr->task_fd_query.probe_addr))
5175 return -EFAULT;
5176
5177 return err;
5178}
5179
5180#define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
5181
5182static int bpf_task_fd_query(const union bpf_attr *attr,
5183 union bpf_attr __user *uattr)
5184{
5185 pid_t pid = attr->task_fd_query.pid;
5186 u32 fd = attr->task_fd_query.fd;
5187 const struct perf_event *event;
5188 struct task_struct *task;
5189 struct file *file;
5190 int err;
5191
5192 if (CHECK_ATTR(BPF_TASK_FD_QUERY))
5193 return -EINVAL;
5194
5195 if (!capable(CAP_SYS_ADMIN))
5196 return -EPERM;
5197
5198 if (attr->task_fd_query.flags != 0)
5199 return -EINVAL;
5200
5201 rcu_read_lock();
5202 task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
5203 rcu_read_unlock();
5204 if (!task)
5205 return -ENOENT;
5206
5207 err = 0;
5208 file = fget_task(task, fd);
5209 put_task_struct(task);
5210 if (!file)
5211 return -EBADF;
5212
5213 if (file->f_op == &bpf_link_fops || file->f_op == &bpf_link_fops_poll) {
5214 struct bpf_link *link = file->private_data;
5215
5216 if (link->ops == &bpf_raw_tp_link_lops) {
5217 struct bpf_raw_tp_link *raw_tp =
5218 container_of(link, struct bpf_raw_tp_link, link);
5219 struct bpf_raw_event_map *btp = raw_tp->btp;
5220
5221 err = bpf_task_fd_query_copy(attr, uattr,
5222 raw_tp->link.prog->aux->id,
5223 BPF_FD_TYPE_RAW_TRACEPOINT,
5224 btp->tp->name, 0, 0);
5225 goto put_file;
5226 }
5227 goto out_not_supp;
5228 }
5229
5230 event = perf_get_event(file);
5231 if (!IS_ERR(event)) {
5232 u64 probe_offset, probe_addr;
5233 u32 prog_id, fd_type;
5234 const char *buf;
5235
5236 err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
5237 &buf, &probe_offset,
5238 &probe_addr, NULL);
5239 if (!err)
5240 err = bpf_task_fd_query_copy(attr, uattr, prog_id,
5241 fd_type, buf,
5242 probe_offset,
5243 probe_addr);
5244 goto put_file;
5245 }
5246
5247out_not_supp:
5248 err = -ENOTSUPP;
5249put_file:
5250 fput(file);
5251 return err;
5252}
5253
5254#define BPF_MAP_BATCH_LAST_FIELD batch.flags
5255
5256#define BPF_DO_BATCH(fn, ...) \
5257 do { \
5258 if (!fn) { \
5259 err = -ENOTSUPP; \
5260 goto err_put; \
5261 } \
5262 err = fn(__VA_ARGS__); \
5263 } while (0)
5264
5265static int bpf_map_do_batch(const union bpf_attr *attr,
5266 union bpf_attr __user *uattr,
5267 int cmd)
5268{
5269 bool has_read = cmd == BPF_MAP_LOOKUP_BATCH ||
5270 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
5271 bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
5272 struct bpf_map *map;
5273 int err;
5274
5275 if (CHECK_ATTR(BPF_MAP_BATCH))
5276 return -EINVAL;
5277
5278 CLASS(fd, f)(attr->batch.map_fd);
5279
5280 map = __bpf_map_get(f);
5281 if (IS_ERR(map))
5282 return PTR_ERR(map);
5283 if (has_write)
5284 bpf_map_write_active_inc(map);
5285 if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
5286 err = -EPERM;
5287 goto err_put;
5288 }
5289 if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
5290 err = -EPERM;
5291 goto err_put;
5292 }
5293
5294 if (cmd == BPF_MAP_LOOKUP_BATCH)
5295 BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr);
5296 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
5297 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr);
5298 else if (cmd == BPF_MAP_UPDATE_BATCH)
5299 BPF_DO_BATCH(map->ops->map_update_batch, map, fd_file(f), attr, uattr);
5300 else
5301 BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr);
5302err_put:
5303 if (has_write) {
5304 maybe_wait_bpf_programs(map);
5305 bpf_map_write_active_dec(map);
5306 }
5307 return err;
5308}
5309
5310#define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.pid
5311static int link_create(union bpf_attr *attr, bpfptr_t uattr)
5312{
5313 struct bpf_prog *prog;
5314 int ret;
5315
5316 if (CHECK_ATTR(BPF_LINK_CREATE))
5317 return -EINVAL;
5318
5319 if (attr->link_create.attach_type == BPF_STRUCT_OPS)
5320 return bpf_struct_ops_link_create(attr);
5321
5322 prog = bpf_prog_get(attr->link_create.prog_fd);
5323 if (IS_ERR(prog))
5324 return PTR_ERR(prog);
5325
5326 ret = bpf_prog_attach_check_attach_type(prog,
5327 attr->link_create.attach_type);
5328 if (ret)
5329 goto out;
5330
5331 switch (prog->type) {
5332 case BPF_PROG_TYPE_CGROUP_SKB:
5333 case BPF_PROG_TYPE_CGROUP_SOCK:
5334 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
5335 case BPF_PROG_TYPE_SOCK_OPS:
5336 case BPF_PROG_TYPE_CGROUP_DEVICE:
5337 case BPF_PROG_TYPE_CGROUP_SYSCTL:
5338 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
5339 ret = cgroup_bpf_link_attach(attr, prog);
5340 break;
5341 case BPF_PROG_TYPE_EXT:
5342 ret = bpf_tracing_prog_attach(prog,
5343 attr->link_create.target_fd,
5344 attr->link_create.target_btf_id,
5345 attr->link_create.tracing.cookie);
5346 break;
5347 case BPF_PROG_TYPE_LSM:
5348 case BPF_PROG_TYPE_TRACING:
5349 if (attr->link_create.attach_type != prog->expected_attach_type) {
5350 ret = -EINVAL;
5351 goto out;
5352 }
5353 if (prog->expected_attach_type == BPF_TRACE_RAW_TP)
5354 ret = bpf_raw_tp_link_attach(prog, NULL, attr->link_create.tracing.cookie);
5355 else if (prog->expected_attach_type == BPF_TRACE_ITER)
5356 ret = bpf_iter_link_attach(attr, uattr, prog);
5357 else if (prog->expected_attach_type == BPF_LSM_CGROUP)
5358 ret = cgroup_bpf_link_attach(attr, prog);
5359 else
5360 ret = bpf_tracing_prog_attach(prog,
5361 attr->link_create.target_fd,
5362 attr->link_create.target_btf_id,
5363 attr->link_create.tracing.cookie);
5364 break;
5365 case BPF_PROG_TYPE_FLOW_DISSECTOR:
5366 case BPF_PROG_TYPE_SK_LOOKUP:
5367 ret = netns_bpf_link_create(attr, prog);
5368 break;
5369 case BPF_PROG_TYPE_SK_MSG:
5370 case BPF_PROG_TYPE_SK_SKB:
5371 ret = sock_map_link_create(attr, prog);
5372 break;
5373#ifdef CONFIG_NET
5374 case BPF_PROG_TYPE_XDP:
5375 ret = bpf_xdp_link_attach(attr, prog);
5376 break;
5377 case BPF_PROG_TYPE_SCHED_CLS:
5378 if (attr->link_create.attach_type == BPF_TCX_INGRESS ||
5379 attr->link_create.attach_type == BPF_TCX_EGRESS)
5380 ret = tcx_link_attach(attr, prog);
5381 else
5382 ret = netkit_link_attach(attr, prog);
5383 break;
5384 case BPF_PROG_TYPE_NETFILTER:
5385 ret = bpf_nf_link_attach(attr, prog);
5386 break;
5387#endif
5388 case BPF_PROG_TYPE_PERF_EVENT:
5389 case BPF_PROG_TYPE_TRACEPOINT:
5390 ret = bpf_perf_link_attach(attr, prog);
5391 break;
5392 case BPF_PROG_TYPE_KPROBE:
5393 if (attr->link_create.attach_type == BPF_PERF_EVENT)
5394 ret = bpf_perf_link_attach(attr, prog);
5395 else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI ||
5396 attr->link_create.attach_type == BPF_TRACE_KPROBE_SESSION)
5397 ret = bpf_kprobe_multi_link_attach(attr, prog);
5398 else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI ||
5399 attr->link_create.attach_type == BPF_TRACE_UPROBE_SESSION)
5400 ret = bpf_uprobe_multi_link_attach(attr, prog);
5401 break;
5402 default:
5403 ret = -EINVAL;
5404 }
5405
5406out:
5407 if (ret < 0)
5408 bpf_prog_put(prog);
5409 return ret;
5410}
5411
5412static int link_update_map(struct bpf_link *link, union bpf_attr *attr)
5413{
5414 struct bpf_map *new_map, *old_map = NULL;
5415 int ret;
5416
5417 new_map = bpf_map_get(attr->link_update.new_map_fd);
5418 if (IS_ERR(new_map))
5419 return PTR_ERR(new_map);
5420
5421 if (attr->link_update.flags & BPF_F_REPLACE) {
5422 old_map = bpf_map_get(attr->link_update.old_map_fd);
5423 if (IS_ERR(old_map)) {
5424 ret = PTR_ERR(old_map);
5425 goto out_put;
5426 }
5427 } else if (attr->link_update.old_map_fd) {
5428 ret = -EINVAL;
5429 goto out_put;
5430 }
5431
5432 ret = link->ops->update_map(link, new_map, old_map);
5433
5434 if (old_map)
5435 bpf_map_put(old_map);
5436out_put:
5437 bpf_map_put(new_map);
5438 return ret;
5439}
5440
5441#define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
5442
5443static int link_update(union bpf_attr *attr)
5444{
5445 struct bpf_prog *old_prog = NULL, *new_prog;
5446 struct bpf_link *link;
5447 u32 flags;
5448 int ret;
5449
5450 if (CHECK_ATTR(BPF_LINK_UPDATE))
5451 return -EINVAL;
5452
5453 flags = attr->link_update.flags;
5454 if (flags & ~BPF_F_REPLACE)
5455 return -EINVAL;
5456
5457 link = bpf_link_get_from_fd(attr->link_update.link_fd);
5458 if (IS_ERR(link))
5459 return PTR_ERR(link);
5460
5461 if (link->ops->update_map) {
5462 ret = link_update_map(link, attr);
5463 goto out_put_link;
5464 }
5465
5466 new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
5467 if (IS_ERR(new_prog)) {
5468 ret = PTR_ERR(new_prog);
5469 goto out_put_link;
5470 }
5471
5472 if (flags & BPF_F_REPLACE) {
5473 old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
5474 if (IS_ERR(old_prog)) {
5475 ret = PTR_ERR(old_prog);
5476 old_prog = NULL;
5477 goto out_put_progs;
5478 }
5479 } else if (attr->link_update.old_prog_fd) {
5480 ret = -EINVAL;
5481 goto out_put_progs;
5482 }
5483
5484 if (link->ops->update_prog)
5485 ret = link->ops->update_prog(link, new_prog, old_prog);
5486 else
5487 ret = -EINVAL;
5488
5489out_put_progs:
5490 if (old_prog)
5491 bpf_prog_put(old_prog);
5492 if (ret)
5493 bpf_prog_put(new_prog);
5494out_put_link:
5495 bpf_link_put_direct(link);
5496 return ret;
5497}
5498
5499#define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
5500
5501static int link_detach(union bpf_attr *attr)
5502{
5503 struct bpf_link *link;
5504 int ret;
5505
5506 if (CHECK_ATTR(BPF_LINK_DETACH))
5507 return -EINVAL;
5508
5509 link = bpf_link_get_from_fd(attr->link_detach.link_fd);
5510 if (IS_ERR(link))
5511 return PTR_ERR(link);
5512
5513 if (link->ops->detach)
5514 ret = link->ops->detach(link);
5515 else
5516 ret = -EOPNOTSUPP;
5517
5518 bpf_link_put_direct(link);
5519 return ret;
5520}
5521
5522struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
5523{
5524 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
5525}
5526EXPORT_SYMBOL(bpf_link_inc_not_zero);
5527
5528struct bpf_link *bpf_link_by_id(u32 id)
5529{
5530 struct bpf_link *link;
5531
5532 if (!id)
5533 return ERR_PTR(-ENOENT);
5534
5535 spin_lock_bh(&link_idr_lock);
5536 /* before link is "settled", ID is 0, pretend it doesn't exist yet */
5537 link = idr_find(&link_idr, id);
5538 if (link) {
5539 if (link->id)
5540 link = bpf_link_inc_not_zero(link);
5541 else
5542 link = ERR_PTR(-EAGAIN);
5543 } else {
5544 link = ERR_PTR(-ENOENT);
5545 }
5546 spin_unlock_bh(&link_idr_lock);
5547 return link;
5548}
5549
5550struct bpf_link *bpf_link_get_curr_or_next(u32 *id)
5551{
5552 struct bpf_link *link;
5553
5554 spin_lock_bh(&link_idr_lock);
5555again:
5556 link = idr_get_next(&link_idr, id);
5557 if (link) {
5558 link = bpf_link_inc_not_zero(link);
5559 if (IS_ERR(link)) {
5560 (*id)++;
5561 goto again;
5562 }
5563 }
5564 spin_unlock_bh(&link_idr_lock);
5565
5566 return link;
5567}
5568
5569#define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
5570
5571static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
5572{
5573 struct bpf_link *link;
5574 u32 id = attr->link_id;
5575 int fd;
5576
5577 if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
5578 return -EINVAL;
5579
5580 if (!capable(CAP_SYS_ADMIN))
5581 return -EPERM;
5582
5583 link = bpf_link_by_id(id);
5584 if (IS_ERR(link))
5585 return PTR_ERR(link);
5586
5587 fd = bpf_link_new_fd(link);
5588 if (fd < 0)
5589 bpf_link_put_direct(link);
5590
5591 return fd;
5592}
5593
5594DEFINE_MUTEX(bpf_stats_enabled_mutex);
5595
5596static int bpf_stats_release(struct inode *inode, struct file *file)
5597{
5598 mutex_lock(&bpf_stats_enabled_mutex);
5599 static_key_slow_dec(&bpf_stats_enabled_key.key);
5600 mutex_unlock(&bpf_stats_enabled_mutex);
5601 return 0;
5602}
5603
5604static const struct file_operations bpf_stats_fops = {
5605 .release = bpf_stats_release,
5606};
5607
5608static int bpf_enable_runtime_stats(void)
5609{
5610 int fd;
5611
5612 mutex_lock(&bpf_stats_enabled_mutex);
5613
5614 /* Set a very high limit to avoid overflow */
5615 if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
5616 mutex_unlock(&bpf_stats_enabled_mutex);
5617 return -EBUSY;
5618 }
5619
5620 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
5621 if (fd >= 0)
5622 static_key_slow_inc(&bpf_stats_enabled_key.key);
5623
5624 mutex_unlock(&bpf_stats_enabled_mutex);
5625 return fd;
5626}
5627
5628#define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
5629
5630static int bpf_enable_stats(union bpf_attr *attr)
5631{
5632
5633 if (CHECK_ATTR(BPF_ENABLE_STATS))
5634 return -EINVAL;
5635
5636 if (!capable(CAP_SYS_ADMIN))
5637 return -EPERM;
5638
5639 switch (attr->enable_stats.type) {
5640 case BPF_STATS_RUN_TIME:
5641 return bpf_enable_runtime_stats();
5642 default:
5643 break;
5644 }
5645 return -EINVAL;
5646}
5647
5648#define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
5649
5650static int bpf_iter_create(union bpf_attr *attr)
5651{
5652 struct bpf_link *link;
5653 int err;
5654
5655 if (CHECK_ATTR(BPF_ITER_CREATE))
5656 return -EINVAL;
5657
5658 if (attr->iter_create.flags)
5659 return -EINVAL;
5660
5661 link = bpf_link_get_from_fd(attr->iter_create.link_fd);
5662 if (IS_ERR(link))
5663 return PTR_ERR(link);
5664
5665 err = bpf_iter_new_fd(link);
5666 bpf_link_put_direct(link);
5667
5668 return err;
5669}
5670
5671#define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
5672
5673static int bpf_prog_bind_map(union bpf_attr *attr)
5674{
5675 struct bpf_prog *prog;
5676 struct bpf_map *map;
5677 struct bpf_map **used_maps_old, **used_maps_new;
5678 int i, ret = 0;
5679
5680 if (CHECK_ATTR(BPF_PROG_BIND_MAP))
5681 return -EINVAL;
5682
5683 if (attr->prog_bind_map.flags)
5684 return -EINVAL;
5685
5686 prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
5687 if (IS_ERR(prog))
5688 return PTR_ERR(prog);
5689
5690 map = bpf_map_get(attr->prog_bind_map.map_fd);
5691 if (IS_ERR(map)) {
5692 ret = PTR_ERR(map);
5693 goto out_prog_put;
5694 }
5695
5696 mutex_lock(&prog->aux->used_maps_mutex);
5697
5698 used_maps_old = prog->aux->used_maps;
5699
5700 for (i = 0; i < prog->aux->used_map_cnt; i++)
5701 if (used_maps_old[i] == map) {
5702 bpf_map_put(map);
5703 goto out_unlock;
5704 }
5705
5706 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
5707 sizeof(used_maps_new[0]),
5708 GFP_KERNEL);
5709 if (!used_maps_new) {
5710 ret = -ENOMEM;
5711 goto out_unlock;
5712 }
5713
5714 /* The bpf program will not access the bpf map, but for the sake of
5715 * simplicity, increase sleepable_refcnt for sleepable program as well.
5716 */
5717 if (prog->sleepable)
5718 atomic64_inc(&map->sleepable_refcnt);
5719 memcpy(used_maps_new, used_maps_old,
5720 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
5721 used_maps_new[prog->aux->used_map_cnt] = map;
5722
5723 prog->aux->used_map_cnt++;
5724 prog->aux->used_maps = used_maps_new;
5725
5726 kfree(used_maps_old);
5727
5728out_unlock:
5729 mutex_unlock(&prog->aux->used_maps_mutex);
5730
5731 if (ret)
5732 bpf_map_put(map);
5733out_prog_put:
5734 bpf_prog_put(prog);
5735 return ret;
5736}
5737
5738#define BPF_TOKEN_CREATE_LAST_FIELD token_create.bpffs_fd
5739
5740static int token_create(union bpf_attr *attr)
5741{
5742 if (CHECK_ATTR(BPF_TOKEN_CREATE))
5743 return -EINVAL;
5744
5745 /* no flags are supported yet */
5746 if (attr->token_create.flags)
5747 return -EINVAL;
5748
5749 return bpf_token_create(attr);
5750}
5751
5752static int __sys_bpf(enum bpf_cmd cmd, bpfptr_t uattr, unsigned int size)
5753{
5754 union bpf_attr attr;
5755 int err;
5756
5757 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
5758 if (err)
5759 return err;
5760 size = min_t(u32, size, sizeof(attr));
5761
5762 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
5763 memset(&attr, 0, sizeof(attr));
5764 if (copy_from_bpfptr(&attr, uattr, size) != 0)
5765 return -EFAULT;
5766
5767 err = security_bpf(cmd, &attr, size);
5768 if (err < 0)
5769 return err;
5770
5771 switch (cmd) {
5772 case BPF_MAP_CREATE:
5773 err = map_create(&attr);
5774 break;
5775 case BPF_MAP_LOOKUP_ELEM:
5776 err = map_lookup_elem(&attr);
5777 break;
5778 case BPF_MAP_UPDATE_ELEM:
5779 err = map_update_elem(&attr, uattr);
5780 break;
5781 case BPF_MAP_DELETE_ELEM:
5782 err = map_delete_elem(&attr, uattr);
5783 break;
5784 case BPF_MAP_GET_NEXT_KEY:
5785 err = map_get_next_key(&attr);
5786 break;
5787 case BPF_MAP_FREEZE:
5788 err = map_freeze(&attr);
5789 break;
5790 case BPF_PROG_LOAD:
5791 err = bpf_prog_load(&attr, uattr, size);
5792 break;
5793 case BPF_OBJ_PIN:
5794 err = bpf_obj_pin(&attr);
5795 break;
5796 case BPF_OBJ_GET:
5797 err = bpf_obj_get(&attr);
5798 break;
5799 case BPF_PROG_ATTACH:
5800 err = bpf_prog_attach(&attr);
5801 break;
5802 case BPF_PROG_DETACH:
5803 err = bpf_prog_detach(&attr);
5804 break;
5805 case BPF_PROG_QUERY:
5806 err = bpf_prog_query(&attr, uattr.user);
5807 break;
5808 case BPF_PROG_TEST_RUN:
5809 err = bpf_prog_test_run(&attr, uattr.user);
5810 break;
5811 case BPF_PROG_GET_NEXT_ID:
5812 err = bpf_obj_get_next_id(&attr, uattr.user,
5813 &prog_idr, &prog_idr_lock);
5814 break;
5815 case BPF_MAP_GET_NEXT_ID:
5816 err = bpf_obj_get_next_id(&attr, uattr.user,
5817 &map_idr, &map_idr_lock);
5818 break;
5819 case BPF_BTF_GET_NEXT_ID:
5820 err = bpf_obj_get_next_id(&attr, uattr.user,
5821 &btf_idr, &btf_idr_lock);
5822 break;
5823 case BPF_PROG_GET_FD_BY_ID:
5824 err = bpf_prog_get_fd_by_id(&attr);
5825 break;
5826 case BPF_MAP_GET_FD_BY_ID:
5827 err = bpf_map_get_fd_by_id(&attr);
5828 break;
5829 case BPF_OBJ_GET_INFO_BY_FD:
5830 err = bpf_obj_get_info_by_fd(&attr, uattr.user);
5831 break;
5832 case BPF_RAW_TRACEPOINT_OPEN:
5833 err = bpf_raw_tracepoint_open(&attr);
5834 break;
5835 case BPF_BTF_LOAD:
5836 err = bpf_btf_load(&attr, uattr, size);
5837 break;
5838 case BPF_BTF_GET_FD_BY_ID:
5839 err = bpf_btf_get_fd_by_id(&attr);
5840 break;
5841 case BPF_TASK_FD_QUERY:
5842 err = bpf_task_fd_query(&attr, uattr.user);
5843 break;
5844 case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
5845 err = map_lookup_and_delete_elem(&attr);
5846 break;
5847 case BPF_MAP_LOOKUP_BATCH:
5848 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH);
5849 break;
5850 case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
5851 err = bpf_map_do_batch(&attr, uattr.user,
5852 BPF_MAP_LOOKUP_AND_DELETE_BATCH);
5853 break;
5854 case BPF_MAP_UPDATE_BATCH:
5855 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH);
5856 break;
5857 case BPF_MAP_DELETE_BATCH:
5858 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
5859 break;
5860 case BPF_LINK_CREATE:
5861 err = link_create(&attr, uattr);
5862 break;
5863 case BPF_LINK_UPDATE:
5864 err = link_update(&attr);
5865 break;
5866 case BPF_LINK_GET_FD_BY_ID:
5867 err = bpf_link_get_fd_by_id(&attr);
5868 break;
5869 case BPF_LINK_GET_NEXT_ID:
5870 err = bpf_obj_get_next_id(&attr, uattr.user,
5871 &link_idr, &link_idr_lock);
5872 break;
5873 case BPF_ENABLE_STATS:
5874 err = bpf_enable_stats(&attr);
5875 break;
5876 case BPF_ITER_CREATE:
5877 err = bpf_iter_create(&attr);
5878 break;
5879 case BPF_LINK_DETACH:
5880 err = link_detach(&attr);
5881 break;
5882 case BPF_PROG_BIND_MAP:
5883 err = bpf_prog_bind_map(&attr);
5884 break;
5885 case BPF_TOKEN_CREATE:
5886 err = token_create(&attr);
5887 break;
5888 default:
5889 err = -EINVAL;
5890 break;
5891 }
5892
5893 return err;
5894}
5895
5896SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
5897{
5898 return __sys_bpf(cmd, USER_BPFPTR(uattr), size);
5899}
5900
5901static bool syscall_prog_is_valid_access(int off, int size,
5902 enum bpf_access_type type,
5903 const struct bpf_prog *prog,
5904 struct bpf_insn_access_aux *info)
5905{
5906 if (off < 0 || off >= U16_MAX)
5907 return false;
5908 if (off % size != 0)
5909 return false;
5910 return true;
5911}
5912
5913BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size)
5914{
5915 switch (cmd) {
5916 case BPF_MAP_CREATE:
5917 case BPF_MAP_DELETE_ELEM:
5918 case BPF_MAP_UPDATE_ELEM:
5919 case BPF_MAP_FREEZE:
5920 case BPF_MAP_GET_FD_BY_ID:
5921 case BPF_PROG_LOAD:
5922 case BPF_BTF_LOAD:
5923 case BPF_LINK_CREATE:
5924 case BPF_RAW_TRACEPOINT_OPEN:
5925 break;
5926 default:
5927 return -EINVAL;
5928 }
5929 return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
5930}
5931
5932
5933/* To shut up -Wmissing-prototypes.
5934 * This function is used by the kernel light skeleton
5935 * to load bpf programs when modules are loaded or during kernel boot.
5936 * See tools/lib/bpf/skel_internal.h
5937 */
5938int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
5939
5940int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
5941{
5942 struct bpf_prog * __maybe_unused prog;
5943 struct bpf_tramp_run_ctx __maybe_unused run_ctx;
5944
5945 switch (cmd) {
5946#ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */
5947 case BPF_PROG_TEST_RUN:
5948 if (attr->test.data_in || attr->test.data_out ||
5949 attr->test.ctx_out || attr->test.duration ||
5950 attr->test.repeat || attr->test.flags)
5951 return -EINVAL;
5952
5953 prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL);
5954 if (IS_ERR(prog))
5955 return PTR_ERR(prog);
5956
5957 if (attr->test.ctx_size_in < prog->aux->max_ctx_offset ||
5958 attr->test.ctx_size_in > U16_MAX) {
5959 bpf_prog_put(prog);
5960 return -EINVAL;
5961 }
5962
5963 run_ctx.bpf_cookie = 0;
5964 if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
5965 /* recursion detected */
5966 __bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx);
5967 bpf_prog_put(prog);
5968 return -EBUSY;
5969 }
5970 attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in);
5971 __bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */,
5972 &run_ctx);
5973 bpf_prog_put(prog);
5974 return 0;
5975#endif
5976 default:
5977 return ____bpf_sys_bpf(cmd, attr, size);
5978 }
5979}
5980EXPORT_SYMBOL(kern_sys_bpf);
5981
5982static const struct bpf_func_proto bpf_sys_bpf_proto = {
5983 .func = bpf_sys_bpf,
5984 .gpl_only = false,
5985 .ret_type = RET_INTEGER,
5986 .arg1_type = ARG_ANYTHING,
5987 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
5988 .arg3_type = ARG_CONST_SIZE,
5989};
5990
5991const struct bpf_func_proto * __weak
5992tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5993{
5994 return bpf_base_func_proto(func_id, prog);
5995}
5996
5997BPF_CALL_1(bpf_sys_close, u32, fd)
5998{
5999 /* When bpf program calls this helper there should not be
6000 * an fdget() without matching completed fdput().
6001 * This helper is allowed in the following callchain only:
6002 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close
6003 */
6004 return close_fd(fd);
6005}
6006
6007static const struct bpf_func_proto bpf_sys_close_proto = {
6008 .func = bpf_sys_close,
6009 .gpl_only = false,
6010 .ret_type = RET_INTEGER,
6011 .arg1_type = ARG_ANYTHING,
6012};
6013
6014BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res)
6015{
6016 *res = 0;
6017 if (flags)
6018 return -EINVAL;
6019
6020 if (name_sz <= 1 || name[name_sz - 1])
6021 return -EINVAL;
6022
6023 if (!bpf_dump_raw_ok(current_cred()))
6024 return -EPERM;
6025
6026 *res = kallsyms_lookup_name(name);
6027 return *res ? 0 : -ENOENT;
6028}
6029
6030static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
6031 .func = bpf_kallsyms_lookup_name,
6032 .gpl_only = false,
6033 .ret_type = RET_INTEGER,
6034 .arg1_type = ARG_PTR_TO_MEM,
6035 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
6036 .arg3_type = ARG_ANYTHING,
6037 .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
6038 .arg4_size = sizeof(u64),
6039};
6040
6041static const struct bpf_func_proto *
6042syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6043{
6044 switch (func_id) {
6045 case BPF_FUNC_sys_bpf:
6046 return !bpf_token_capable(prog->aux->token, CAP_PERFMON)
6047 ? NULL : &bpf_sys_bpf_proto;
6048 case BPF_FUNC_btf_find_by_name_kind:
6049 return &bpf_btf_find_by_name_kind_proto;
6050 case BPF_FUNC_sys_close:
6051 return &bpf_sys_close_proto;
6052 case BPF_FUNC_kallsyms_lookup_name:
6053 return &bpf_kallsyms_lookup_name_proto;
6054 default:
6055 return tracing_prog_func_proto(func_id, prog);
6056 }
6057}
6058
6059const struct bpf_verifier_ops bpf_syscall_verifier_ops = {
6060 .get_func_proto = syscall_prog_func_proto,
6061 .is_valid_access = syscall_prog_is_valid_access,
6062};
6063
6064const struct bpf_prog_ops bpf_syscall_prog_ops = {
6065 .test_run = bpf_prog_test_run_syscall,
6066};
6067
6068#ifdef CONFIG_SYSCTL
6069static int bpf_stats_handler(const struct ctl_table *table, int write,
6070 void *buffer, size_t *lenp, loff_t *ppos)
6071{
6072 struct static_key *key = (struct static_key *)table->data;
6073 static int saved_val;
6074 int val, ret;
6075 struct ctl_table tmp = {
6076 .data = &val,
6077 .maxlen = sizeof(val),
6078 .mode = table->mode,
6079 .extra1 = SYSCTL_ZERO,
6080 .extra2 = SYSCTL_ONE,
6081 };
6082
6083 if (write && !capable(CAP_SYS_ADMIN))
6084 return -EPERM;
6085
6086 mutex_lock(&bpf_stats_enabled_mutex);
6087 val = saved_val;
6088 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
6089 if (write && !ret && val != saved_val) {
6090 if (val)
6091 static_key_slow_inc(key);
6092 else
6093 static_key_slow_dec(key);
6094 saved_val = val;
6095 }
6096 mutex_unlock(&bpf_stats_enabled_mutex);
6097 return ret;
6098}
6099
6100void __weak unpriv_ebpf_notify(int new_state)
6101{
6102}
6103
6104static int bpf_unpriv_handler(const struct ctl_table *table, int write,
6105 void *buffer, size_t *lenp, loff_t *ppos)
6106{
6107 int ret, unpriv_enable = *(int *)table->data;
6108 bool locked_state = unpriv_enable == 1;
6109 struct ctl_table tmp = *table;
6110
6111 if (write && !capable(CAP_SYS_ADMIN))
6112 return -EPERM;
6113
6114 tmp.data = &unpriv_enable;
6115 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
6116 if (write && !ret) {
6117 if (locked_state && unpriv_enable != 1)
6118 return -EPERM;
6119 *(int *)table->data = unpriv_enable;
6120 }
6121
6122 if (write)
6123 unpriv_ebpf_notify(unpriv_enable);
6124
6125 return ret;
6126}
6127
6128static struct ctl_table bpf_syscall_table[] = {
6129 {
6130 .procname = "unprivileged_bpf_disabled",
6131 .data = &sysctl_unprivileged_bpf_disabled,
6132 .maxlen = sizeof(sysctl_unprivileged_bpf_disabled),
6133 .mode = 0644,
6134 .proc_handler = bpf_unpriv_handler,
6135 .extra1 = SYSCTL_ZERO,
6136 .extra2 = SYSCTL_TWO,
6137 },
6138 {
6139 .procname = "bpf_stats_enabled",
6140 .data = &bpf_stats_enabled_key.key,
6141 .mode = 0644,
6142 .proc_handler = bpf_stats_handler,
6143 },
6144};
6145
6146static int __init bpf_syscall_sysctl_init(void)
6147{
6148 register_sysctl_init("kernel", bpf_syscall_table);
6149 return 0;
6150}
6151late_initcall(bpf_syscall_sysctl_init);
6152#endif /* CONFIG_SYSCTL */