Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 */
4#include <linux/bpf.h>
5#include <linux/bpf-cgroup.h>
6#include <linux/bpf_trace.h>
7#include <linux/bpf_lirc.h>
8#include <linux/bpf_verifier.h>
9#include <linux/bsearch.h>
10#include <linux/btf.h>
11#include <linux/syscalls.h>
12#include <linux/slab.h>
13#include <linux/sched/signal.h>
14#include <linux/vmalloc.h>
15#include <linux/mmzone.h>
16#include <linux/anon_inodes.h>
17#include <linux/fdtable.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/license.h>
21#include <linux/filter.h>
22#include <linux/kernel.h>
23#include <linux/idr.h>
24#include <linux/cred.h>
25#include <linux/timekeeping.h>
26#include <linux/ctype.h>
27#include <linux/nospec.h>
28#include <linux/audit.h>
29#include <uapi/linux/btf.h>
30#include <linux/pgtable.h>
31#include <linux/bpf_lsm.h>
32#include <linux/poll.h>
33#include <linux/sort.h>
34#include <linux/bpf-netns.h>
35#include <linux/rcupdate_trace.h>
36#include <linux/memcontrol.h>
37#include <linux/trace_events.h>
38#include <linux/tracepoint.h>
39
40#include <net/netfilter/nf_bpf_link.h>
41#include <net/netkit.h>
42#include <net/tcx.h>
43
44#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
45 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
46 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
47#define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
48#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
49#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
50 IS_FD_HASH(map))
51
52#define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
53
54DEFINE_PER_CPU(int, bpf_prog_active);
55static DEFINE_IDR(prog_idr);
56static DEFINE_SPINLOCK(prog_idr_lock);
57static DEFINE_IDR(map_idr);
58static DEFINE_SPINLOCK(map_idr_lock);
59static DEFINE_IDR(link_idr);
60static DEFINE_SPINLOCK(link_idr_lock);
61
62int sysctl_unprivileged_bpf_disabled __read_mostly =
63 IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
64
65static const struct bpf_map_ops * const bpf_map_types[] = {
66#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
67#define BPF_MAP_TYPE(_id, _ops) \
68 [_id] = &_ops,
69#define BPF_LINK_TYPE(_id, _name)
70#include <linux/bpf_types.h>
71#undef BPF_PROG_TYPE
72#undef BPF_MAP_TYPE
73#undef BPF_LINK_TYPE
74};
75
76/*
77 * If we're handed a bigger struct than we know of, ensure all the unknown bits
78 * are 0 - i.e. new user-space does not rely on any kernel feature extensions
79 * we don't know about yet.
80 *
81 * There is a ToCToU between this function call and the following
82 * copy_from_user() call. However, this is not a concern since this function is
83 * meant to be a future-proofing of bits.
84 */
85int bpf_check_uarg_tail_zero(bpfptr_t uaddr,
86 size_t expected_size,
87 size_t actual_size)
88{
89 int res;
90
91 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */
92 return -E2BIG;
93
94 if (actual_size <= expected_size)
95 return 0;
96
97 if (uaddr.is_kernel)
98 res = memchr_inv(uaddr.kernel + expected_size, 0,
99 actual_size - expected_size) == NULL;
100 else
101 res = check_zeroed_user(uaddr.user + expected_size,
102 actual_size - expected_size);
103 if (res < 0)
104 return res;
105 return res ? 0 : -E2BIG;
106}
107
108const struct bpf_map_ops bpf_map_offload_ops = {
109 .map_meta_equal = bpf_map_meta_equal,
110 .map_alloc = bpf_map_offload_map_alloc,
111 .map_free = bpf_map_offload_map_free,
112 .map_check_btf = map_check_no_btf,
113 .map_mem_usage = bpf_map_offload_map_mem_usage,
114};
115
116static void bpf_map_write_active_inc(struct bpf_map *map)
117{
118 atomic64_inc(&map->writecnt);
119}
120
121static void bpf_map_write_active_dec(struct bpf_map *map)
122{
123 atomic64_dec(&map->writecnt);
124}
125
126bool bpf_map_write_active(const struct bpf_map *map)
127{
128 return atomic64_read(&map->writecnt) != 0;
129}
130
131static u32 bpf_map_value_size(const struct bpf_map *map)
132{
133 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
134 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
135 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
136 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
137 return round_up(map->value_size, 8) * num_possible_cpus();
138 else if (IS_FD_MAP(map))
139 return sizeof(u32);
140 else
141 return map->value_size;
142}
143
144static void maybe_wait_bpf_programs(struct bpf_map *map)
145{
146 /* Wait for any running non-sleepable BPF programs to complete so that
147 * userspace, when we return to it, knows that all non-sleepable
148 * programs that could be running use the new map value. For sleepable
149 * BPF programs, synchronize_rcu_tasks_trace() should be used to wait
150 * for the completions of these programs, but considering the waiting
151 * time can be very long and userspace may think it will hang forever,
152 * so don't handle sleepable BPF programs now.
153 */
154 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
155 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
156 synchronize_rcu();
157}
158
159static void unpin_uptr_kaddr(void *kaddr)
160{
161 if (kaddr)
162 unpin_user_page(virt_to_page(kaddr));
163}
164
165static void __bpf_obj_unpin_uptrs(struct btf_record *rec, u32 cnt, void *obj)
166{
167 const struct btf_field *field;
168 void **uptr_addr;
169 int i;
170
171 for (i = 0, field = rec->fields; i < cnt; i++, field++) {
172 if (field->type != BPF_UPTR)
173 continue;
174
175 uptr_addr = obj + field->offset;
176 unpin_uptr_kaddr(*uptr_addr);
177 }
178}
179
180static void bpf_obj_unpin_uptrs(struct btf_record *rec, void *obj)
181{
182 if (!btf_record_has_field(rec, BPF_UPTR))
183 return;
184
185 __bpf_obj_unpin_uptrs(rec, rec->cnt, obj);
186}
187
188static int bpf_obj_pin_uptrs(struct btf_record *rec, void *obj)
189{
190 const struct btf_field *field;
191 const struct btf_type *t;
192 unsigned long start, end;
193 struct page *page;
194 void **uptr_addr;
195 int i, err;
196
197 if (!btf_record_has_field(rec, BPF_UPTR))
198 return 0;
199
200 for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) {
201 if (field->type != BPF_UPTR)
202 continue;
203
204 uptr_addr = obj + field->offset;
205 start = *(unsigned long *)uptr_addr;
206 if (!start)
207 continue;
208
209 t = btf_type_by_id(field->kptr.btf, field->kptr.btf_id);
210 /* t->size was checked for zero before */
211 if (check_add_overflow(start, t->size - 1, &end)) {
212 err = -EFAULT;
213 goto unpin_all;
214 }
215
216 /* The uptr's struct cannot span across two pages */
217 if ((start & PAGE_MASK) != (end & PAGE_MASK)) {
218 err = -EOPNOTSUPP;
219 goto unpin_all;
220 }
221
222 err = pin_user_pages_fast(start, 1, FOLL_LONGTERM | FOLL_WRITE, &page);
223 if (err != 1)
224 goto unpin_all;
225
226 if (PageHighMem(page)) {
227 err = -EOPNOTSUPP;
228 unpin_user_page(page);
229 goto unpin_all;
230 }
231
232 *uptr_addr = page_address(page) + offset_in_page(start);
233 }
234
235 return 0;
236
237unpin_all:
238 __bpf_obj_unpin_uptrs(rec, i, obj);
239 return err;
240}
241
242static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
243 void *key, void *value, __u64 flags)
244{
245 int err;
246
247 /* Need to create a kthread, thus must support schedule */
248 if (bpf_map_is_offloaded(map)) {
249 return bpf_map_offload_update_elem(map, key, value, flags);
250 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
251 map->map_type == BPF_MAP_TYPE_ARENA ||
252 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
253 return map->ops->map_update_elem(map, key, value, flags);
254 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
255 map->map_type == BPF_MAP_TYPE_SOCKMAP) {
256 return sock_map_update_elem_sys(map, key, value, flags);
257 } else if (IS_FD_PROG_ARRAY(map)) {
258 return bpf_fd_array_map_update_elem(map, map_file, key, value,
259 flags);
260 }
261
262 bpf_disable_instrumentation();
263 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
264 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
265 err = bpf_percpu_hash_update(map, key, value, flags);
266 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
267 err = bpf_percpu_array_update(map, key, value, flags);
268 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
269 err = bpf_percpu_cgroup_storage_update(map, key, value,
270 flags);
271 } else if (IS_FD_ARRAY(map)) {
272 err = bpf_fd_array_map_update_elem(map, map_file, key, value,
273 flags);
274 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
275 err = bpf_fd_htab_map_update_elem(map, map_file, key, value,
276 flags);
277 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
278 /* rcu_read_lock() is not needed */
279 err = bpf_fd_reuseport_array_update_elem(map, key, value,
280 flags);
281 } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
282 map->map_type == BPF_MAP_TYPE_STACK ||
283 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
284 err = map->ops->map_push_elem(map, value, flags);
285 } else {
286 err = bpf_obj_pin_uptrs(map->record, value);
287 if (!err) {
288 rcu_read_lock();
289 err = map->ops->map_update_elem(map, key, value, flags);
290 rcu_read_unlock();
291 if (err)
292 bpf_obj_unpin_uptrs(map->record, value);
293 }
294 }
295 bpf_enable_instrumentation();
296
297 return err;
298}
299
300static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
301 __u64 flags)
302{
303 void *ptr;
304 int err;
305
306 if (bpf_map_is_offloaded(map))
307 return bpf_map_offload_lookup_elem(map, key, value);
308
309 bpf_disable_instrumentation();
310 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
311 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
312 err = bpf_percpu_hash_copy(map, key, value);
313 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
314 err = bpf_percpu_array_copy(map, key, value);
315 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
316 err = bpf_percpu_cgroup_storage_copy(map, key, value);
317 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
318 err = bpf_stackmap_copy(map, key, value);
319 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
320 err = bpf_fd_array_map_lookup_elem(map, key, value);
321 } else if (IS_FD_HASH(map)) {
322 err = bpf_fd_htab_map_lookup_elem(map, key, value);
323 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
324 err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
325 } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
326 map->map_type == BPF_MAP_TYPE_STACK ||
327 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
328 err = map->ops->map_peek_elem(map, value);
329 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
330 /* struct_ops map requires directly updating "value" */
331 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
332 } else {
333 rcu_read_lock();
334 if (map->ops->map_lookup_elem_sys_only)
335 ptr = map->ops->map_lookup_elem_sys_only(map, key);
336 else
337 ptr = map->ops->map_lookup_elem(map, key);
338 if (IS_ERR(ptr)) {
339 err = PTR_ERR(ptr);
340 } else if (!ptr) {
341 err = -ENOENT;
342 } else {
343 err = 0;
344 if (flags & BPF_F_LOCK)
345 /* lock 'ptr' and copy everything but lock */
346 copy_map_value_locked(map, value, ptr, true);
347 else
348 copy_map_value(map, value, ptr);
349 /* mask lock and timer, since value wasn't zero inited */
350 check_and_init_map_value(map, value);
351 }
352 rcu_read_unlock();
353 }
354
355 bpf_enable_instrumentation();
356
357 return err;
358}
359
360/* Please, do not use this function outside from the map creation path
361 * (e.g. in map update path) without taking care of setting the active
362 * memory cgroup (see at bpf_map_kmalloc_node() for example).
363 */
364static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
365{
366 /* We really just want to fail instead of triggering OOM killer
367 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
368 * which is used for lower order allocation requests.
369 *
370 * It has been observed that higher order allocation requests done by
371 * vmalloc with __GFP_NORETRY being set might fail due to not trying
372 * to reclaim memory from the page cache, thus we set
373 * __GFP_RETRY_MAYFAIL to avoid such situations.
374 */
375
376 gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO);
377 unsigned int flags = 0;
378 unsigned long align = 1;
379 void *area;
380
381 if (size >= SIZE_MAX)
382 return NULL;
383
384 /* kmalloc()'ed memory can't be mmap()'ed */
385 if (mmapable) {
386 BUG_ON(!PAGE_ALIGNED(size));
387 align = SHMLBA;
388 flags = VM_USERMAP;
389 } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
390 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
391 numa_node);
392 if (area != NULL)
393 return area;
394 }
395
396 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
397 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
398 flags, numa_node, __builtin_return_address(0));
399}
400
401void *bpf_map_area_alloc(u64 size, int numa_node)
402{
403 return __bpf_map_area_alloc(size, numa_node, false);
404}
405
406void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
407{
408 return __bpf_map_area_alloc(size, numa_node, true);
409}
410
411void bpf_map_area_free(void *area)
412{
413 kvfree(area);
414}
415
416static u32 bpf_map_flags_retain_permanent(u32 flags)
417{
418 /* Some map creation flags are not tied to the map object but
419 * rather to the map fd instead, so they have no meaning upon
420 * map object inspection since multiple file descriptors with
421 * different (access) properties can exist here. Thus, given
422 * this has zero meaning for the map itself, lets clear these
423 * from here.
424 */
425 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
426}
427
428void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
429{
430 map->map_type = attr->map_type;
431 map->key_size = attr->key_size;
432 map->value_size = attr->value_size;
433 map->max_entries = attr->max_entries;
434 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
435 map->numa_node = bpf_map_attr_numa_node(attr);
436 map->map_extra = attr->map_extra;
437}
438
439static int bpf_map_alloc_id(struct bpf_map *map)
440{
441 int id;
442
443 idr_preload(GFP_KERNEL);
444 spin_lock_bh(&map_idr_lock);
445 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
446 if (id > 0)
447 map->id = id;
448 spin_unlock_bh(&map_idr_lock);
449 idr_preload_end();
450
451 if (WARN_ON_ONCE(!id))
452 return -ENOSPC;
453
454 return id > 0 ? 0 : id;
455}
456
457void bpf_map_free_id(struct bpf_map *map)
458{
459 unsigned long flags;
460
461 /* Offloaded maps are removed from the IDR store when their device
462 * disappears - even if someone holds an fd to them they are unusable,
463 * the memory is gone, all ops will fail; they are simply waiting for
464 * refcnt to drop to be freed.
465 */
466 if (!map->id)
467 return;
468
469 spin_lock_irqsave(&map_idr_lock, flags);
470
471 idr_remove(&map_idr, map->id);
472 map->id = 0;
473
474 spin_unlock_irqrestore(&map_idr_lock, flags);
475}
476
477#ifdef CONFIG_MEMCG
478static void bpf_map_save_memcg(struct bpf_map *map)
479{
480 /* Currently if a map is created by a process belonging to the root
481 * memory cgroup, get_obj_cgroup_from_current() will return NULL.
482 * So we have to check map->objcg for being NULL each time it's
483 * being used.
484 */
485 if (memcg_bpf_enabled())
486 map->objcg = get_obj_cgroup_from_current();
487}
488
489static void bpf_map_release_memcg(struct bpf_map *map)
490{
491 if (map->objcg)
492 obj_cgroup_put(map->objcg);
493}
494
495static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map)
496{
497 if (map->objcg)
498 return get_mem_cgroup_from_objcg(map->objcg);
499
500 return root_mem_cgroup;
501}
502
503void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
504 int node)
505{
506 struct mem_cgroup *memcg, *old_memcg;
507 void *ptr;
508
509 memcg = bpf_map_get_memcg(map);
510 old_memcg = set_active_memcg(memcg);
511 ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
512 set_active_memcg(old_memcg);
513 mem_cgroup_put(memcg);
514
515 return ptr;
516}
517
518void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
519{
520 struct mem_cgroup *memcg, *old_memcg;
521 void *ptr;
522
523 memcg = bpf_map_get_memcg(map);
524 old_memcg = set_active_memcg(memcg);
525 ptr = kzalloc(size, flags | __GFP_ACCOUNT);
526 set_active_memcg(old_memcg);
527 mem_cgroup_put(memcg);
528
529 return ptr;
530}
531
532void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
533 gfp_t flags)
534{
535 struct mem_cgroup *memcg, *old_memcg;
536 void *ptr;
537
538 memcg = bpf_map_get_memcg(map);
539 old_memcg = set_active_memcg(memcg);
540 ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT);
541 set_active_memcg(old_memcg);
542 mem_cgroup_put(memcg);
543
544 return ptr;
545}
546
547void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
548 size_t align, gfp_t flags)
549{
550 struct mem_cgroup *memcg, *old_memcg;
551 void __percpu *ptr;
552
553 memcg = bpf_map_get_memcg(map);
554 old_memcg = set_active_memcg(memcg);
555 ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT);
556 set_active_memcg(old_memcg);
557 mem_cgroup_put(memcg);
558
559 return ptr;
560}
561
562#else
563static void bpf_map_save_memcg(struct bpf_map *map)
564{
565}
566
567static void bpf_map_release_memcg(struct bpf_map *map)
568{
569}
570#endif
571
572int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
573 unsigned long nr_pages, struct page **pages)
574{
575 unsigned long i, j;
576 struct page *pg;
577 int ret = 0;
578#ifdef CONFIG_MEMCG
579 struct mem_cgroup *memcg, *old_memcg;
580
581 memcg = bpf_map_get_memcg(map);
582 old_memcg = set_active_memcg(memcg);
583#endif
584 for (i = 0; i < nr_pages; i++) {
585 pg = alloc_pages_node(nid, gfp | __GFP_ACCOUNT, 0);
586
587 if (pg) {
588 pages[i] = pg;
589 continue;
590 }
591 for (j = 0; j < i; j++)
592 __free_page(pages[j]);
593 ret = -ENOMEM;
594 break;
595 }
596
597#ifdef CONFIG_MEMCG
598 set_active_memcg(old_memcg);
599 mem_cgroup_put(memcg);
600#endif
601 return ret;
602}
603
604
605static int btf_field_cmp(const void *a, const void *b)
606{
607 const struct btf_field *f1 = a, *f2 = b;
608
609 if (f1->offset < f2->offset)
610 return -1;
611 else if (f1->offset > f2->offset)
612 return 1;
613 return 0;
614}
615
616struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset,
617 u32 field_mask)
618{
619 struct btf_field *field;
620
621 if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask))
622 return NULL;
623 field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp);
624 if (!field || !(field->type & field_mask))
625 return NULL;
626 return field;
627}
628
629void btf_record_free(struct btf_record *rec)
630{
631 int i;
632
633 if (IS_ERR_OR_NULL(rec))
634 return;
635 for (i = 0; i < rec->cnt; i++) {
636 switch (rec->fields[i].type) {
637 case BPF_KPTR_UNREF:
638 case BPF_KPTR_REF:
639 case BPF_KPTR_PERCPU:
640 case BPF_UPTR:
641 if (rec->fields[i].kptr.module)
642 module_put(rec->fields[i].kptr.module);
643 if (btf_is_kernel(rec->fields[i].kptr.btf))
644 btf_put(rec->fields[i].kptr.btf);
645 break;
646 case BPF_LIST_HEAD:
647 case BPF_LIST_NODE:
648 case BPF_RB_ROOT:
649 case BPF_RB_NODE:
650 case BPF_SPIN_LOCK:
651 case BPF_TIMER:
652 case BPF_REFCOUNT:
653 case BPF_WORKQUEUE:
654 /* Nothing to release */
655 break;
656 default:
657 WARN_ON_ONCE(1);
658 continue;
659 }
660 }
661 kfree(rec);
662}
663
664void bpf_map_free_record(struct bpf_map *map)
665{
666 btf_record_free(map->record);
667 map->record = NULL;
668}
669
670struct btf_record *btf_record_dup(const struct btf_record *rec)
671{
672 const struct btf_field *fields;
673 struct btf_record *new_rec;
674 int ret, size, i;
675
676 if (IS_ERR_OR_NULL(rec))
677 return NULL;
678 size = offsetof(struct btf_record, fields[rec->cnt]);
679 new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN);
680 if (!new_rec)
681 return ERR_PTR(-ENOMEM);
682 /* Do a deep copy of the btf_record */
683 fields = rec->fields;
684 new_rec->cnt = 0;
685 for (i = 0; i < rec->cnt; i++) {
686 switch (fields[i].type) {
687 case BPF_KPTR_UNREF:
688 case BPF_KPTR_REF:
689 case BPF_KPTR_PERCPU:
690 case BPF_UPTR:
691 if (btf_is_kernel(fields[i].kptr.btf))
692 btf_get(fields[i].kptr.btf);
693 if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) {
694 ret = -ENXIO;
695 goto free;
696 }
697 break;
698 case BPF_LIST_HEAD:
699 case BPF_LIST_NODE:
700 case BPF_RB_ROOT:
701 case BPF_RB_NODE:
702 case BPF_SPIN_LOCK:
703 case BPF_TIMER:
704 case BPF_REFCOUNT:
705 case BPF_WORKQUEUE:
706 /* Nothing to acquire */
707 break;
708 default:
709 ret = -EFAULT;
710 WARN_ON_ONCE(1);
711 goto free;
712 }
713 new_rec->cnt++;
714 }
715 return new_rec;
716free:
717 btf_record_free(new_rec);
718 return ERR_PTR(ret);
719}
720
721bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b)
722{
723 bool a_has_fields = !IS_ERR_OR_NULL(rec_a), b_has_fields = !IS_ERR_OR_NULL(rec_b);
724 int size;
725
726 if (!a_has_fields && !b_has_fields)
727 return true;
728 if (a_has_fields != b_has_fields)
729 return false;
730 if (rec_a->cnt != rec_b->cnt)
731 return false;
732 size = offsetof(struct btf_record, fields[rec_a->cnt]);
733 /* btf_parse_fields uses kzalloc to allocate a btf_record, so unused
734 * members are zeroed out. So memcmp is safe to do without worrying
735 * about padding/unused fields.
736 *
737 * While spin_lock, timer, and kptr have no relation to map BTF,
738 * list_head metadata is specific to map BTF, the btf and value_rec
739 * members in particular. btf is the map BTF, while value_rec points to
740 * btf_record in that map BTF.
741 *
742 * So while by default, we don't rely on the map BTF (which the records
743 * were parsed from) matching for both records, which is not backwards
744 * compatible, in case list_head is part of it, we implicitly rely on
745 * that by way of depending on memcmp succeeding for it.
746 */
747 return !memcmp(rec_a, rec_b, size);
748}
749
750void bpf_obj_free_timer(const struct btf_record *rec, void *obj)
751{
752 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_TIMER)))
753 return;
754 bpf_timer_cancel_and_free(obj + rec->timer_off);
755}
756
757void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj)
758{
759 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_WORKQUEUE)))
760 return;
761 bpf_wq_cancel_and_free(obj + rec->wq_off);
762}
763
764void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
765{
766 const struct btf_field *fields;
767 int i;
768
769 if (IS_ERR_OR_NULL(rec))
770 return;
771 fields = rec->fields;
772 for (i = 0; i < rec->cnt; i++) {
773 struct btf_struct_meta *pointee_struct_meta;
774 const struct btf_field *field = &fields[i];
775 void *field_ptr = obj + field->offset;
776 void *xchgd_field;
777
778 switch (fields[i].type) {
779 case BPF_SPIN_LOCK:
780 break;
781 case BPF_TIMER:
782 bpf_timer_cancel_and_free(field_ptr);
783 break;
784 case BPF_WORKQUEUE:
785 bpf_wq_cancel_and_free(field_ptr);
786 break;
787 case BPF_KPTR_UNREF:
788 WRITE_ONCE(*(u64 *)field_ptr, 0);
789 break;
790 case BPF_KPTR_REF:
791 case BPF_KPTR_PERCPU:
792 xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
793 if (!xchgd_field)
794 break;
795
796 if (!btf_is_kernel(field->kptr.btf)) {
797 pointee_struct_meta = btf_find_struct_meta(field->kptr.btf,
798 field->kptr.btf_id);
799 migrate_disable();
800 __bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ?
801 pointee_struct_meta->record : NULL,
802 fields[i].type == BPF_KPTR_PERCPU);
803 migrate_enable();
804 } else {
805 field->kptr.dtor(xchgd_field);
806 }
807 break;
808 case BPF_UPTR:
809 /* The caller ensured that no one is using the uptr */
810 unpin_uptr_kaddr(*(void **)field_ptr);
811 break;
812 case BPF_LIST_HEAD:
813 if (WARN_ON_ONCE(rec->spin_lock_off < 0))
814 continue;
815 bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off);
816 break;
817 case BPF_RB_ROOT:
818 if (WARN_ON_ONCE(rec->spin_lock_off < 0))
819 continue;
820 bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off);
821 break;
822 case BPF_LIST_NODE:
823 case BPF_RB_NODE:
824 case BPF_REFCOUNT:
825 break;
826 default:
827 WARN_ON_ONCE(1);
828 continue;
829 }
830 }
831}
832
833static void bpf_map_free(struct bpf_map *map)
834{
835 struct btf_record *rec = map->record;
836 struct btf *btf = map->btf;
837
838 /* implementation dependent freeing */
839 map->ops->map_free(map);
840 /* Delay freeing of btf_record for maps, as map_free
841 * callback usually needs access to them. It is better to do it here
842 * than require each callback to do the free itself manually.
843 *
844 * Note that the btf_record stashed in map->inner_map_meta->record was
845 * already freed using the map_free callback for map in map case which
846 * eventually calls bpf_map_free_meta, since inner_map_meta is only a
847 * template bpf_map struct used during verification.
848 */
849 btf_record_free(rec);
850 /* Delay freeing of btf for maps, as map_free callback may need
851 * struct_meta info which will be freed with btf_put().
852 */
853 btf_put(btf);
854}
855
856/* called from workqueue */
857static void bpf_map_free_deferred(struct work_struct *work)
858{
859 struct bpf_map *map = container_of(work, struct bpf_map, work);
860
861 security_bpf_map_free(map);
862 bpf_map_release_memcg(map);
863 bpf_map_free(map);
864}
865
866static void bpf_map_put_uref(struct bpf_map *map)
867{
868 if (atomic64_dec_and_test(&map->usercnt)) {
869 if (map->ops->map_release_uref)
870 map->ops->map_release_uref(map);
871 }
872}
873
874static void bpf_map_free_in_work(struct bpf_map *map)
875{
876 INIT_WORK(&map->work, bpf_map_free_deferred);
877 /* Avoid spawning kworkers, since they all might contend
878 * for the same mutex like slab_mutex.
879 */
880 queue_work(system_unbound_wq, &map->work);
881}
882
883static void bpf_map_free_rcu_gp(struct rcu_head *rcu)
884{
885 bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu));
886}
887
888static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu)
889{
890 if (rcu_trace_implies_rcu_gp())
891 bpf_map_free_rcu_gp(rcu);
892 else
893 call_rcu(rcu, bpf_map_free_rcu_gp);
894}
895
896/* decrement map refcnt and schedule it for freeing via workqueue
897 * (underlying map implementation ops->map_free() might sleep)
898 */
899void bpf_map_put(struct bpf_map *map)
900{
901 if (atomic64_dec_and_test(&map->refcnt)) {
902 /* bpf_map_free_id() must be called first */
903 bpf_map_free_id(map);
904
905 WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt));
906 if (READ_ONCE(map->free_after_mult_rcu_gp))
907 call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp);
908 else if (READ_ONCE(map->free_after_rcu_gp))
909 call_rcu(&map->rcu, bpf_map_free_rcu_gp);
910 else
911 bpf_map_free_in_work(map);
912 }
913}
914EXPORT_SYMBOL_GPL(bpf_map_put);
915
916void bpf_map_put_with_uref(struct bpf_map *map)
917{
918 bpf_map_put_uref(map);
919 bpf_map_put(map);
920}
921
922static int bpf_map_release(struct inode *inode, struct file *filp)
923{
924 struct bpf_map *map = filp->private_data;
925
926 if (map->ops->map_release)
927 map->ops->map_release(map, filp);
928
929 bpf_map_put_with_uref(map);
930 return 0;
931}
932
933static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
934{
935 fmode_t mode = fd_file(f)->f_mode;
936
937 /* Our file permissions may have been overridden by global
938 * map permissions facing syscall side.
939 */
940 if (READ_ONCE(map->frozen))
941 mode &= ~FMODE_CAN_WRITE;
942 return mode;
943}
944
945#ifdef CONFIG_PROC_FS
946/* Show the memory usage of a bpf map */
947static u64 bpf_map_memory_usage(const struct bpf_map *map)
948{
949 return map->ops->map_mem_usage(map);
950}
951
952static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
953{
954 struct bpf_map *map = filp->private_data;
955 u32 type = 0, jited = 0;
956
957 if (map_type_contains_progs(map)) {
958 spin_lock(&map->owner.lock);
959 type = map->owner.type;
960 jited = map->owner.jited;
961 spin_unlock(&map->owner.lock);
962 }
963
964 seq_printf(m,
965 "map_type:\t%u\n"
966 "key_size:\t%u\n"
967 "value_size:\t%u\n"
968 "max_entries:\t%u\n"
969 "map_flags:\t%#x\n"
970 "map_extra:\t%#llx\n"
971 "memlock:\t%llu\n"
972 "map_id:\t%u\n"
973 "frozen:\t%u\n",
974 map->map_type,
975 map->key_size,
976 map->value_size,
977 map->max_entries,
978 map->map_flags,
979 (unsigned long long)map->map_extra,
980 bpf_map_memory_usage(map),
981 map->id,
982 READ_ONCE(map->frozen));
983 if (type) {
984 seq_printf(m, "owner_prog_type:\t%u\n", type);
985 seq_printf(m, "owner_jited:\t%u\n", jited);
986 }
987}
988#endif
989
990static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
991 loff_t *ppos)
992{
993 /* We need this handler such that alloc_file() enables
994 * f_mode with FMODE_CAN_READ.
995 */
996 return -EINVAL;
997}
998
999static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
1000 size_t siz, loff_t *ppos)
1001{
1002 /* We need this handler such that alloc_file() enables
1003 * f_mode with FMODE_CAN_WRITE.
1004 */
1005 return -EINVAL;
1006}
1007
1008/* called for any extra memory-mapped regions (except initial) */
1009static void bpf_map_mmap_open(struct vm_area_struct *vma)
1010{
1011 struct bpf_map *map = vma->vm_file->private_data;
1012
1013 if (vma->vm_flags & VM_MAYWRITE)
1014 bpf_map_write_active_inc(map);
1015}
1016
1017/* called for all unmapped memory region (including initial) */
1018static void bpf_map_mmap_close(struct vm_area_struct *vma)
1019{
1020 struct bpf_map *map = vma->vm_file->private_data;
1021
1022 if (vma->vm_flags & VM_MAYWRITE)
1023 bpf_map_write_active_dec(map);
1024}
1025
1026static const struct vm_operations_struct bpf_map_default_vmops = {
1027 .open = bpf_map_mmap_open,
1028 .close = bpf_map_mmap_close,
1029};
1030
1031static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
1032{
1033 struct bpf_map *map = filp->private_data;
1034 int err = 0;
1035
1036 if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record))
1037 return -ENOTSUPP;
1038
1039 if (!(vma->vm_flags & VM_SHARED))
1040 return -EINVAL;
1041
1042 mutex_lock(&map->freeze_mutex);
1043
1044 if (vma->vm_flags & VM_WRITE) {
1045 if (map->frozen) {
1046 err = -EPERM;
1047 goto out;
1048 }
1049 /* map is meant to be read-only, so do not allow mapping as
1050 * writable, because it's possible to leak a writable page
1051 * reference and allows user-space to still modify it after
1052 * freezing, while verifier will assume contents do not change
1053 */
1054 if (map->map_flags & BPF_F_RDONLY_PROG) {
1055 err = -EACCES;
1056 goto out;
1057 }
1058 bpf_map_write_active_inc(map);
1059 }
1060out:
1061 mutex_unlock(&map->freeze_mutex);
1062 if (err)
1063 return err;
1064
1065 /* set default open/close callbacks */
1066 vma->vm_ops = &bpf_map_default_vmops;
1067 vma->vm_private_data = map;
1068 vm_flags_clear(vma, VM_MAYEXEC);
1069 /* If mapping is read-only, then disallow potentially re-mapping with
1070 * PROT_WRITE by dropping VM_MAYWRITE flag. This VM_MAYWRITE clearing
1071 * means that as far as BPF map's memory-mapped VMAs are concerned,
1072 * VM_WRITE and VM_MAYWRITE and equivalent, if one of them is set,
1073 * both should be set, so we can forget about VM_MAYWRITE and always
1074 * check just VM_WRITE
1075 */
1076 if (!(vma->vm_flags & VM_WRITE))
1077 vm_flags_clear(vma, VM_MAYWRITE);
1078
1079 err = map->ops->map_mmap(map, vma);
1080 if (err) {
1081 if (vma->vm_flags & VM_WRITE)
1082 bpf_map_write_active_dec(map);
1083 }
1084
1085 return err;
1086}
1087
1088static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
1089{
1090 struct bpf_map *map = filp->private_data;
1091
1092 if (map->ops->map_poll)
1093 return map->ops->map_poll(map, filp, pts);
1094
1095 return EPOLLERR;
1096}
1097
1098static unsigned long bpf_get_unmapped_area(struct file *filp, unsigned long addr,
1099 unsigned long len, unsigned long pgoff,
1100 unsigned long flags)
1101{
1102 struct bpf_map *map = filp->private_data;
1103
1104 if (map->ops->map_get_unmapped_area)
1105 return map->ops->map_get_unmapped_area(filp, addr, len, pgoff, flags);
1106#ifdef CONFIG_MMU
1107 return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags);
1108#else
1109 return addr;
1110#endif
1111}
1112
1113const struct file_operations bpf_map_fops = {
1114#ifdef CONFIG_PROC_FS
1115 .show_fdinfo = bpf_map_show_fdinfo,
1116#endif
1117 .release = bpf_map_release,
1118 .read = bpf_dummy_read,
1119 .write = bpf_dummy_write,
1120 .mmap = bpf_map_mmap,
1121 .poll = bpf_map_poll,
1122 .get_unmapped_area = bpf_get_unmapped_area,
1123};
1124
1125int bpf_map_new_fd(struct bpf_map *map, int flags)
1126{
1127 int ret;
1128
1129 ret = security_bpf_map(map, OPEN_FMODE(flags));
1130 if (ret < 0)
1131 return ret;
1132
1133 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
1134 flags | O_CLOEXEC);
1135}
1136
1137int bpf_get_file_flag(int flags)
1138{
1139 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
1140 return -EINVAL;
1141 if (flags & BPF_F_RDONLY)
1142 return O_RDONLY;
1143 if (flags & BPF_F_WRONLY)
1144 return O_WRONLY;
1145 return O_RDWR;
1146}
1147
1148/* helper macro to check that unused fields 'union bpf_attr' are zero */
1149#define CHECK_ATTR(CMD) \
1150 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
1151 sizeof(attr->CMD##_LAST_FIELD), 0, \
1152 sizeof(*attr) - \
1153 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
1154 sizeof(attr->CMD##_LAST_FIELD)) != NULL
1155
1156/* dst and src must have at least "size" number of bytes.
1157 * Return strlen on success and < 0 on error.
1158 */
1159int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
1160{
1161 const char *end = src + size;
1162 const char *orig_src = src;
1163
1164 memset(dst, 0, size);
1165 /* Copy all isalnum(), '_' and '.' chars. */
1166 while (src < end && *src) {
1167 if (!isalnum(*src) &&
1168 *src != '_' && *src != '.')
1169 return -EINVAL;
1170 *dst++ = *src++;
1171 }
1172
1173 /* No '\0' found in "size" number of bytes */
1174 if (src == end)
1175 return -EINVAL;
1176
1177 return src - orig_src;
1178}
1179
1180int map_check_no_btf(const struct bpf_map *map,
1181 const struct btf *btf,
1182 const struct btf_type *key_type,
1183 const struct btf_type *value_type)
1184{
1185 return -ENOTSUPP;
1186}
1187
1188static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
1189 const struct btf *btf, u32 btf_key_id, u32 btf_value_id)
1190{
1191 const struct btf_type *key_type, *value_type;
1192 u32 key_size, value_size;
1193 int ret = 0;
1194
1195 /* Some maps allow key to be unspecified. */
1196 if (btf_key_id) {
1197 key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
1198 if (!key_type || key_size != map->key_size)
1199 return -EINVAL;
1200 } else {
1201 key_type = btf_type_by_id(btf, 0);
1202 if (!map->ops->map_check_btf)
1203 return -EINVAL;
1204 }
1205
1206 value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
1207 if (!value_type || value_size != map->value_size)
1208 return -EINVAL;
1209
1210 map->record = btf_parse_fields(btf, value_type,
1211 BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD |
1212 BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE | BPF_UPTR,
1213 map->value_size);
1214 if (!IS_ERR_OR_NULL(map->record)) {
1215 int i;
1216
1217 if (!bpf_token_capable(token, CAP_BPF)) {
1218 ret = -EPERM;
1219 goto free_map_tab;
1220 }
1221 if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) {
1222 ret = -EACCES;
1223 goto free_map_tab;
1224 }
1225 for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) {
1226 switch (map->record->field_mask & (1 << i)) {
1227 case 0:
1228 continue;
1229 case BPF_SPIN_LOCK:
1230 if (map->map_type != BPF_MAP_TYPE_HASH &&
1231 map->map_type != BPF_MAP_TYPE_ARRAY &&
1232 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
1233 map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1234 map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1235 map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1236 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1237 ret = -EOPNOTSUPP;
1238 goto free_map_tab;
1239 }
1240 break;
1241 case BPF_TIMER:
1242 case BPF_WORKQUEUE:
1243 if (map->map_type != BPF_MAP_TYPE_HASH &&
1244 map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1245 map->map_type != BPF_MAP_TYPE_ARRAY) {
1246 ret = -EOPNOTSUPP;
1247 goto free_map_tab;
1248 }
1249 break;
1250 case BPF_KPTR_UNREF:
1251 case BPF_KPTR_REF:
1252 case BPF_KPTR_PERCPU:
1253 case BPF_REFCOUNT:
1254 if (map->map_type != BPF_MAP_TYPE_HASH &&
1255 map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
1256 map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1257 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH &&
1258 map->map_type != BPF_MAP_TYPE_ARRAY &&
1259 map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
1260 map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1261 map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1262 map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1263 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1264 ret = -EOPNOTSUPP;
1265 goto free_map_tab;
1266 }
1267 break;
1268 case BPF_UPTR:
1269 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) {
1270 ret = -EOPNOTSUPP;
1271 goto free_map_tab;
1272 }
1273 break;
1274 case BPF_LIST_HEAD:
1275 case BPF_RB_ROOT:
1276 if (map->map_type != BPF_MAP_TYPE_HASH &&
1277 map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1278 map->map_type != BPF_MAP_TYPE_ARRAY) {
1279 ret = -EOPNOTSUPP;
1280 goto free_map_tab;
1281 }
1282 break;
1283 default:
1284 /* Fail if map_type checks are missing for a field type */
1285 ret = -EOPNOTSUPP;
1286 goto free_map_tab;
1287 }
1288 }
1289 }
1290
1291 ret = btf_check_and_fixup_fields(btf, map->record);
1292 if (ret < 0)
1293 goto free_map_tab;
1294
1295 if (map->ops->map_check_btf) {
1296 ret = map->ops->map_check_btf(map, btf, key_type, value_type);
1297 if (ret < 0)
1298 goto free_map_tab;
1299 }
1300
1301 return ret;
1302free_map_tab:
1303 bpf_map_free_record(map);
1304 return ret;
1305}
1306
1307static bool bpf_net_capable(void)
1308{
1309 return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN);
1310}
1311
1312#define BPF_MAP_CREATE_LAST_FIELD map_token_fd
1313/* called via syscall */
1314static int map_create(union bpf_attr *attr)
1315{
1316 const struct bpf_map_ops *ops;
1317 struct bpf_token *token = NULL;
1318 int numa_node = bpf_map_attr_numa_node(attr);
1319 u32 map_type = attr->map_type;
1320 struct bpf_map *map;
1321 bool token_flag;
1322 int f_flags;
1323 int err;
1324
1325 err = CHECK_ATTR(BPF_MAP_CREATE);
1326 if (err)
1327 return -EINVAL;
1328
1329 /* check BPF_F_TOKEN_FD flag, remember if it's set, and then clear it
1330 * to avoid per-map type checks tripping on unknown flag
1331 */
1332 token_flag = attr->map_flags & BPF_F_TOKEN_FD;
1333 attr->map_flags &= ~BPF_F_TOKEN_FD;
1334
1335 if (attr->btf_vmlinux_value_type_id) {
1336 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
1337 attr->btf_key_type_id || attr->btf_value_type_id)
1338 return -EINVAL;
1339 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
1340 return -EINVAL;
1341 }
1342
1343 if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER &&
1344 attr->map_type != BPF_MAP_TYPE_ARENA &&
1345 attr->map_extra != 0)
1346 return -EINVAL;
1347
1348 f_flags = bpf_get_file_flag(attr->map_flags);
1349 if (f_flags < 0)
1350 return f_flags;
1351
1352 if (numa_node != NUMA_NO_NODE &&
1353 ((unsigned int)numa_node >= nr_node_ids ||
1354 !node_online(numa_node)))
1355 return -EINVAL;
1356
1357 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
1358 map_type = attr->map_type;
1359 if (map_type >= ARRAY_SIZE(bpf_map_types))
1360 return -EINVAL;
1361 map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types));
1362 ops = bpf_map_types[map_type];
1363 if (!ops)
1364 return -EINVAL;
1365
1366 if (ops->map_alloc_check) {
1367 err = ops->map_alloc_check(attr);
1368 if (err)
1369 return err;
1370 }
1371 if (attr->map_ifindex)
1372 ops = &bpf_map_offload_ops;
1373 if (!ops->map_mem_usage)
1374 return -EINVAL;
1375
1376 if (token_flag) {
1377 token = bpf_token_get_from_fd(attr->map_token_fd);
1378 if (IS_ERR(token))
1379 return PTR_ERR(token);
1380
1381 /* if current token doesn't grant map creation permissions,
1382 * then we can't use this token, so ignore it and rely on
1383 * system-wide capabilities checks
1384 */
1385 if (!bpf_token_allow_cmd(token, BPF_MAP_CREATE) ||
1386 !bpf_token_allow_map_type(token, attr->map_type)) {
1387 bpf_token_put(token);
1388 token = NULL;
1389 }
1390 }
1391
1392 err = -EPERM;
1393
1394 /* Intent here is for unprivileged_bpf_disabled to block BPF map
1395 * creation for unprivileged users; other actions depend
1396 * on fd availability and access to bpffs, so are dependent on
1397 * object creation success. Even with unprivileged BPF disabled,
1398 * capability checks are still carried out.
1399 */
1400 if (sysctl_unprivileged_bpf_disabled && !bpf_token_capable(token, CAP_BPF))
1401 goto put_token;
1402
1403 /* check privileged map type permissions */
1404 switch (map_type) {
1405 case BPF_MAP_TYPE_ARRAY:
1406 case BPF_MAP_TYPE_PERCPU_ARRAY:
1407 case BPF_MAP_TYPE_PROG_ARRAY:
1408 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1409 case BPF_MAP_TYPE_CGROUP_ARRAY:
1410 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
1411 case BPF_MAP_TYPE_HASH:
1412 case BPF_MAP_TYPE_PERCPU_HASH:
1413 case BPF_MAP_TYPE_HASH_OF_MAPS:
1414 case BPF_MAP_TYPE_RINGBUF:
1415 case BPF_MAP_TYPE_USER_RINGBUF:
1416 case BPF_MAP_TYPE_CGROUP_STORAGE:
1417 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
1418 /* unprivileged */
1419 break;
1420 case BPF_MAP_TYPE_SK_STORAGE:
1421 case BPF_MAP_TYPE_INODE_STORAGE:
1422 case BPF_MAP_TYPE_TASK_STORAGE:
1423 case BPF_MAP_TYPE_CGRP_STORAGE:
1424 case BPF_MAP_TYPE_BLOOM_FILTER:
1425 case BPF_MAP_TYPE_LPM_TRIE:
1426 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
1427 case BPF_MAP_TYPE_STACK_TRACE:
1428 case BPF_MAP_TYPE_QUEUE:
1429 case BPF_MAP_TYPE_STACK:
1430 case BPF_MAP_TYPE_LRU_HASH:
1431 case BPF_MAP_TYPE_LRU_PERCPU_HASH:
1432 case BPF_MAP_TYPE_STRUCT_OPS:
1433 case BPF_MAP_TYPE_CPUMAP:
1434 case BPF_MAP_TYPE_ARENA:
1435 if (!bpf_token_capable(token, CAP_BPF))
1436 goto put_token;
1437 break;
1438 case BPF_MAP_TYPE_SOCKMAP:
1439 case BPF_MAP_TYPE_SOCKHASH:
1440 case BPF_MAP_TYPE_DEVMAP:
1441 case BPF_MAP_TYPE_DEVMAP_HASH:
1442 case BPF_MAP_TYPE_XSKMAP:
1443 if (!bpf_token_capable(token, CAP_NET_ADMIN))
1444 goto put_token;
1445 break;
1446 default:
1447 WARN(1, "unsupported map type %d", map_type);
1448 goto put_token;
1449 }
1450
1451 map = ops->map_alloc(attr);
1452 if (IS_ERR(map)) {
1453 err = PTR_ERR(map);
1454 goto put_token;
1455 }
1456 map->ops = ops;
1457 map->map_type = map_type;
1458
1459 err = bpf_obj_name_cpy(map->name, attr->map_name,
1460 sizeof(attr->map_name));
1461 if (err < 0)
1462 goto free_map;
1463
1464 atomic64_set(&map->refcnt, 1);
1465 atomic64_set(&map->usercnt, 1);
1466 mutex_init(&map->freeze_mutex);
1467 spin_lock_init(&map->owner.lock);
1468
1469 if (attr->btf_key_type_id || attr->btf_value_type_id ||
1470 /* Even the map's value is a kernel's struct,
1471 * the bpf_prog.o must have BTF to begin with
1472 * to figure out the corresponding kernel's
1473 * counter part. Thus, attr->btf_fd has
1474 * to be valid also.
1475 */
1476 attr->btf_vmlinux_value_type_id) {
1477 struct btf *btf;
1478
1479 btf = btf_get_by_fd(attr->btf_fd);
1480 if (IS_ERR(btf)) {
1481 err = PTR_ERR(btf);
1482 goto free_map;
1483 }
1484 if (btf_is_kernel(btf)) {
1485 btf_put(btf);
1486 err = -EACCES;
1487 goto free_map;
1488 }
1489 map->btf = btf;
1490
1491 if (attr->btf_value_type_id) {
1492 err = map_check_btf(map, token, btf, attr->btf_key_type_id,
1493 attr->btf_value_type_id);
1494 if (err)
1495 goto free_map;
1496 }
1497
1498 map->btf_key_type_id = attr->btf_key_type_id;
1499 map->btf_value_type_id = attr->btf_value_type_id;
1500 map->btf_vmlinux_value_type_id =
1501 attr->btf_vmlinux_value_type_id;
1502 }
1503
1504 err = security_bpf_map_create(map, attr, token);
1505 if (err)
1506 goto free_map_sec;
1507
1508 err = bpf_map_alloc_id(map);
1509 if (err)
1510 goto free_map_sec;
1511
1512 bpf_map_save_memcg(map);
1513 bpf_token_put(token);
1514
1515 err = bpf_map_new_fd(map, f_flags);
1516 if (err < 0) {
1517 /* failed to allocate fd.
1518 * bpf_map_put_with_uref() is needed because the above
1519 * bpf_map_alloc_id() has published the map
1520 * to the userspace and the userspace may
1521 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
1522 */
1523 bpf_map_put_with_uref(map);
1524 return err;
1525 }
1526
1527 return err;
1528
1529free_map_sec:
1530 security_bpf_map_free(map);
1531free_map:
1532 bpf_map_free(map);
1533put_token:
1534 bpf_token_put(token);
1535 return err;
1536}
1537
1538void bpf_map_inc(struct bpf_map *map)
1539{
1540 atomic64_inc(&map->refcnt);
1541}
1542EXPORT_SYMBOL_GPL(bpf_map_inc);
1543
1544void bpf_map_inc_with_uref(struct bpf_map *map)
1545{
1546 atomic64_inc(&map->refcnt);
1547 atomic64_inc(&map->usercnt);
1548}
1549EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
1550
1551struct bpf_map *bpf_map_get(u32 ufd)
1552{
1553 CLASS(fd, f)(ufd);
1554 struct bpf_map *map = __bpf_map_get(f);
1555
1556 if (!IS_ERR(map))
1557 bpf_map_inc(map);
1558
1559 return map;
1560}
1561EXPORT_SYMBOL(bpf_map_get);
1562
1563struct bpf_map *bpf_map_get_with_uref(u32 ufd)
1564{
1565 CLASS(fd, f)(ufd);
1566 struct bpf_map *map = __bpf_map_get(f);
1567
1568 if (!IS_ERR(map))
1569 bpf_map_inc_with_uref(map);
1570
1571 return map;
1572}
1573
1574/* map_idr_lock should have been held or the map should have been
1575 * protected by rcu read lock.
1576 */
1577struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
1578{
1579 int refold;
1580
1581 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
1582 if (!refold)
1583 return ERR_PTR(-ENOENT);
1584 if (uref)
1585 atomic64_inc(&map->usercnt);
1586
1587 return map;
1588}
1589
1590struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
1591{
1592 spin_lock_bh(&map_idr_lock);
1593 map = __bpf_map_inc_not_zero(map, false);
1594 spin_unlock_bh(&map_idr_lock);
1595
1596 return map;
1597}
1598EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
1599
1600int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
1601{
1602 return -ENOTSUPP;
1603}
1604
1605static void *__bpf_copy_key(void __user *ukey, u64 key_size)
1606{
1607 if (key_size)
1608 return vmemdup_user(ukey, key_size);
1609
1610 if (ukey)
1611 return ERR_PTR(-EINVAL);
1612
1613 return NULL;
1614}
1615
1616static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size)
1617{
1618 if (key_size)
1619 return kvmemdup_bpfptr(ukey, key_size);
1620
1621 if (!bpfptr_is_null(ukey))
1622 return ERR_PTR(-EINVAL);
1623
1624 return NULL;
1625}
1626
1627/* last field in 'union bpf_attr' used by this command */
1628#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
1629
1630static int map_lookup_elem(union bpf_attr *attr)
1631{
1632 void __user *ukey = u64_to_user_ptr(attr->key);
1633 void __user *uvalue = u64_to_user_ptr(attr->value);
1634 struct bpf_map *map;
1635 void *key, *value;
1636 u32 value_size;
1637 int err;
1638
1639 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1640 return -EINVAL;
1641
1642 if (attr->flags & ~BPF_F_LOCK)
1643 return -EINVAL;
1644
1645 CLASS(fd, f)(attr->map_fd);
1646 map = __bpf_map_get(f);
1647 if (IS_ERR(map))
1648 return PTR_ERR(map);
1649 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ))
1650 return -EPERM;
1651
1652 if ((attr->flags & BPF_F_LOCK) &&
1653 !btf_record_has_field(map->record, BPF_SPIN_LOCK))
1654 return -EINVAL;
1655
1656 key = __bpf_copy_key(ukey, map->key_size);
1657 if (IS_ERR(key))
1658 return PTR_ERR(key);
1659
1660 value_size = bpf_map_value_size(map);
1661
1662 err = -ENOMEM;
1663 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1664 if (!value)
1665 goto free_key;
1666
1667 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
1668 if (copy_from_user(value, uvalue, value_size))
1669 err = -EFAULT;
1670 else
1671 err = bpf_map_copy_value(map, key, value, attr->flags);
1672 goto free_value;
1673 }
1674
1675 err = bpf_map_copy_value(map, key, value, attr->flags);
1676 if (err)
1677 goto free_value;
1678
1679 err = -EFAULT;
1680 if (copy_to_user(uvalue, value, value_size) != 0)
1681 goto free_value;
1682
1683 err = 0;
1684
1685free_value:
1686 kvfree(value);
1687free_key:
1688 kvfree(key);
1689 return err;
1690}
1691
1692
1693#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1694
1695static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
1696{
1697 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1698 bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
1699 struct bpf_map *map;
1700 void *key, *value;
1701 u32 value_size;
1702 int err;
1703
1704 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1705 return -EINVAL;
1706
1707 CLASS(fd, f)(attr->map_fd);
1708 map = __bpf_map_get(f);
1709 if (IS_ERR(map))
1710 return PTR_ERR(map);
1711 bpf_map_write_active_inc(map);
1712 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1713 err = -EPERM;
1714 goto err_put;
1715 }
1716
1717 if ((attr->flags & BPF_F_LOCK) &&
1718 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1719 err = -EINVAL;
1720 goto err_put;
1721 }
1722
1723 key = ___bpf_copy_key(ukey, map->key_size);
1724 if (IS_ERR(key)) {
1725 err = PTR_ERR(key);
1726 goto err_put;
1727 }
1728
1729 value_size = bpf_map_value_size(map);
1730 value = kvmemdup_bpfptr(uvalue, value_size);
1731 if (IS_ERR(value)) {
1732 err = PTR_ERR(value);
1733 goto free_key;
1734 }
1735
1736 err = bpf_map_update_value(map, fd_file(f), key, value, attr->flags);
1737 if (!err)
1738 maybe_wait_bpf_programs(map);
1739
1740 kvfree(value);
1741free_key:
1742 kvfree(key);
1743err_put:
1744 bpf_map_write_active_dec(map);
1745 return err;
1746}
1747
1748#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1749
1750static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr)
1751{
1752 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1753 struct bpf_map *map;
1754 void *key;
1755 int err;
1756
1757 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1758 return -EINVAL;
1759
1760 CLASS(fd, f)(attr->map_fd);
1761 map = __bpf_map_get(f);
1762 if (IS_ERR(map))
1763 return PTR_ERR(map);
1764 bpf_map_write_active_inc(map);
1765 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1766 err = -EPERM;
1767 goto err_put;
1768 }
1769
1770 key = ___bpf_copy_key(ukey, map->key_size);
1771 if (IS_ERR(key)) {
1772 err = PTR_ERR(key);
1773 goto err_put;
1774 }
1775
1776 if (bpf_map_is_offloaded(map)) {
1777 err = bpf_map_offload_delete_elem(map, key);
1778 goto out;
1779 } else if (IS_FD_PROG_ARRAY(map) ||
1780 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1781 /* These maps require sleepable context */
1782 err = map->ops->map_delete_elem(map, key);
1783 goto out;
1784 }
1785
1786 bpf_disable_instrumentation();
1787 rcu_read_lock();
1788 err = map->ops->map_delete_elem(map, key);
1789 rcu_read_unlock();
1790 bpf_enable_instrumentation();
1791 if (!err)
1792 maybe_wait_bpf_programs(map);
1793out:
1794 kvfree(key);
1795err_put:
1796 bpf_map_write_active_dec(map);
1797 return err;
1798}
1799
1800/* last field in 'union bpf_attr' used by this command */
1801#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1802
1803static int map_get_next_key(union bpf_attr *attr)
1804{
1805 void __user *ukey = u64_to_user_ptr(attr->key);
1806 void __user *unext_key = u64_to_user_ptr(attr->next_key);
1807 struct bpf_map *map;
1808 void *key, *next_key;
1809 int err;
1810
1811 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1812 return -EINVAL;
1813
1814 CLASS(fd, f)(attr->map_fd);
1815 map = __bpf_map_get(f);
1816 if (IS_ERR(map))
1817 return PTR_ERR(map);
1818 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ))
1819 return -EPERM;
1820
1821 if (ukey) {
1822 key = __bpf_copy_key(ukey, map->key_size);
1823 if (IS_ERR(key))
1824 return PTR_ERR(key);
1825 } else {
1826 key = NULL;
1827 }
1828
1829 err = -ENOMEM;
1830 next_key = kvmalloc(map->key_size, GFP_USER);
1831 if (!next_key)
1832 goto free_key;
1833
1834 if (bpf_map_is_offloaded(map)) {
1835 err = bpf_map_offload_get_next_key(map, key, next_key);
1836 goto out;
1837 }
1838
1839 rcu_read_lock();
1840 err = map->ops->map_get_next_key(map, key, next_key);
1841 rcu_read_unlock();
1842out:
1843 if (err)
1844 goto free_next_key;
1845
1846 err = -EFAULT;
1847 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1848 goto free_next_key;
1849
1850 err = 0;
1851
1852free_next_key:
1853 kvfree(next_key);
1854free_key:
1855 kvfree(key);
1856 return err;
1857}
1858
1859int generic_map_delete_batch(struct bpf_map *map,
1860 const union bpf_attr *attr,
1861 union bpf_attr __user *uattr)
1862{
1863 void __user *keys = u64_to_user_ptr(attr->batch.keys);
1864 u32 cp, max_count;
1865 int err = 0;
1866 void *key;
1867
1868 if (attr->batch.elem_flags & ~BPF_F_LOCK)
1869 return -EINVAL;
1870
1871 if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1872 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1873 return -EINVAL;
1874 }
1875
1876 max_count = attr->batch.count;
1877 if (!max_count)
1878 return 0;
1879
1880 if (put_user(0, &uattr->batch.count))
1881 return -EFAULT;
1882
1883 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1884 if (!key)
1885 return -ENOMEM;
1886
1887 for (cp = 0; cp < max_count; cp++) {
1888 err = -EFAULT;
1889 if (copy_from_user(key, keys + cp * map->key_size,
1890 map->key_size))
1891 break;
1892
1893 if (bpf_map_is_offloaded(map)) {
1894 err = bpf_map_offload_delete_elem(map, key);
1895 break;
1896 }
1897
1898 bpf_disable_instrumentation();
1899 rcu_read_lock();
1900 err = map->ops->map_delete_elem(map, key);
1901 rcu_read_unlock();
1902 bpf_enable_instrumentation();
1903 if (err)
1904 break;
1905 cond_resched();
1906 }
1907 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1908 err = -EFAULT;
1909
1910 kvfree(key);
1911
1912 return err;
1913}
1914
1915int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
1916 const union bpf_attr *attr,
1917 union bpf_attr __user *uattr)
1918{
1919 void __user *values = u64_to_user_ptr(attr->batch.values);
1920 void __user *keys = u64_to_user_ptr(attr->batch.keys);
1921 u32 value_size, cp, max_count;
1922 void *key, *value;
1923 int err = 0;
1924
1925 if (attr->batch.elem_flags & ~BPF_F_LOCK)
1926 return -EINVAL;
1927
1928 if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1929 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1930 return -EINVAL;
1931 }
1932
1933 value_size = bpf_map_value_size(map);
1934
1935 max_count = attr->batch.count;
1936 if (!max_count)
1937 return 0;
1938
1939 if (put_user(0, &uattr->batch.count))
1940 return -EFAULT;
1941
1942 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1943 if (!key)
1944 return -ENOMEM;
1945
1946 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1947 if (!value) {
1948 kvfree(key);
1949 return -ENOMEM;
1950 }
1951
1952 for (cp = 0; cp < max_count; cp++) {
1953 err = -EFAULT;
1954 if (copy_from_user(key, keys + cp * map->key_size,
1955 map->key_size) ||
1956 copy_from_user(value, values + cp * value_size, value_size))
1957 break;
1958
1959 err = bpf_map_update_value(map, map_file, key, value,
1960 attr->batch.elem_flags);
1961
1962 if (err)
1963 break;
1964 cond_resched();
1965 }
1966
1967 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1968 err = -EFAULT;
1969
1970 kvfree(value);
1971 kvfree(key);
1972
1973 return err;
1974}
1975
1976int generic_map_lookup_batch(struct bpf_map *map,
1977 const union bpf_attr *attr,
1978 union bpf_attr __user *uattr)
1979{
1980 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1981 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1982 void __user *values = u64_to_user_ptr(attr->batch.values);
1983 void __user *keys = u64_to_user_ptr(attr->batch.keys);
1984 void *buf, *buf_prevkey, *prev_key, *key, *value;
1985 u32 value_size, cp, max_count;
1986 int err;
1987
1988 if (attr->batch.elem_flags & ~BPF_F_LOCK)
1989 return -EINVAL;
1990
1991 if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1992 !btf_record_has_field(map->record, BPF_SPIN_LOCK))
1993 return -EINVAL;
1994
1995 value_size = bpf_map_value_size(map);
1996
1997 max_count = attr->batch.count;
1998 if (!max_count)
1999 return 0;
2000
2001 if (put_user(0, &uattr->batch.count))
2002 return -EFAULT;
2003
2004 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
2005 if (!buf_prevkey)
2006 return -ENOMEM;
2007
2008 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
2009 if (!buf) {
2010 kvfree(buf_prevkey);
2011 return -ENOMEM;
2012 }
2013
2014 err = -EFAULT;
2015 prev_key = NULL;
2016 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
2017 goto free_buf;
2018 key = buf;
2019 value = key + map->key_size;
2020 if (ubatch)
2021 prev_key = buf_prevkey;
2022
2023 for (cp = 0; cp < max_count;) {
2024 rcu_read_lock();
2025 err = map->ops->map_get_next_key(map, prev_key, key);
2026 rcu_read_unlock();
2027 if (err)
2028 break;
2029 err = bpf_map_copy_value(map, key, value,
2030 attr->batch.elem_flags);
2031
2032 if (err == -ENOENT)
2033 goto next_key;
2034
2035 if (err)
2036 goto free_buf;
2037
2038 if (copy_to_user(keys + cp * map->key_size, key,
2039 map->key_size)) {
2040 err = -EFAULT;
2041 goto free_buf;
2042 }
2043 if (copy_to_user(values + cp * value_size, value, value_size)) {
2044 err = -EFAULT;
2045 goto free_buf;
2046 }
2047
2048 cp++;
2049next_key:
2050 if (!prev_key)
2051 prev_key = buf_prevkey;
2052
2053 swap(prev_key, key);
2054 cond_resched();
2055 }
2056
2057 if (err == -EFAULT)
2058 goto free_buf;
2059
2060 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
2061 (cp && copy_to_user(uobatch, prev_key, map->key_size))))
2062 err = -EFAULT;
2063
2064free_buf:
2065 kvfree(buf_prevkey);
2066 kvfree(buf);
2067 return err;
2068}
2069
2070#define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags
2071
2072static int map_lookup_and_delete_elem(union bpf_attr *attr)
2073{
2074 void __user *ukey = u64_to_user_ptr(attr->key);
2075 void __user *uvalue = u64_to_user_ptr(attr->value);
2076 struct bpf_map *map;
2077 void *key, *value;
2078 u32 value_size;
2079 int err;
2080
2081 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
2082 return -EINVAL;
2083
2084 if (attr->flags & ~BPF_F_LOCK)
2085 return -EINVAL;
2086
2087 CLASS(fd, f)(attr->map_fd);
2088 map = __bpf_map_get(f);
2089 if (IS_ERR(map))
2090 return PTR_ERR(map);
2091 bpf_map_write_active_inc(map);
2092 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
2093 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
2094 err = -EPERM;
2095 goto err_put;
2096 }
2097
2098 if (attr->flags &&
2099 (map->map_type == BPF_MAP_TYPE_QUEUE ||
2100 map->map_type == BPF_MAP_TYPE_STACK)) {
2101 err = -EINVAL;
2102 goto err_put;
2103 }
2104
2105 if ((attr->flags & BPF_F_LOCK) &&
2106 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
2107 err = -EINVAL;
2108 goto err_put;
2109 }
2110
2111 key = __bpf_copy_key(ukey, map->key_size);
2112 if (IS_ERR(key)) {
2113 err = PTR_ERR(key);
2114 goto err_put;
2115 }
2116
2117 value_size = bpf_map_value_size(map);
2118
2119 err = -ENOMEM;
2120 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
2121 if (!value)
2122 goto free_key;
2123
2124 err = -ENOTSUPP;
2125 if (map->map_type == BPF_MAP_TYPE_QUEUE ||
2126 map->map_type == BPF_MAP_TYPE_STACK) {
2127 err = map->ops->map_pop_elem(map, value);
2128 } else if (map->map_type == BPF_MAP_TYPE_HASH ||
2129 map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
2130 map->map_type == BPF_MAP_TYPE_LRU_HASH ||
2131 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
2132 if (!bpf_map_is_offloaded(map)) {
2133 bpf_disable_instrumentation();
2134 rcu_read_lock();
2135 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags);
2136 rcu_read_unlock();
2137 bpf_enable_instrumentation();
2138 }
2139 }
2140
2141 if (err)
2142 goto free_value;
2143
2144 if (copy_to_user(uvalue, value, value_size) != 0) {
2145 err = -EFAULT;
2146 goto free_value;
2147 }
2148
2149 err = 0;
2150
2151free_value:
2152 kvfree(value);
2153free_key:
2154 kvfree(key);
2155err_put:
2156 bpf_map_write_active_dec(map);
2157 return err;
2158}
2159
2160#define BPF_MAP_FREEZE_LAST_FIELD map_fd
2161
2162static int map_freeze(const union bpf_attr *attr)
2163{
2164 int err = 0;
2165 struct bpf_map *map;
2166
2167 if (CHECK_ATTR(BPF_MAP_FREEZE))
2168 return -EINVAL;
2169
2170 CLASS(fd, f)(attr->map_fd);
2171 map = __bpf_map_get(f);
2172 if (IS_ERR(map))
2173 return PTR_ERR(map);
2174
2175 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record))
2176 return -ENOTSUPP;
2177
2178 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE))
2179 return -EPERM;
2180
2181 mutex_lock(&map->freeze_mutex);
2182 if (bpf_map_write_active(map)) {
2183 err = -EBUSY;
2184 goto err_put;
2185 }
2186 if (READ_ONCE(map->frozen)) {
2187 err = -EBUSY;
2188 goto err_put;
2189 }
2190
2191 WRITE_ONCE(map->frozen, true);
2192err_put:
2193 mutex_unlock(&map->freeze_mutex);
2194 return err;
2195}
2196
2197static const struct bpf_prog_ops * const bpf_prog_types[] = {
2198#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
2199 [_id] = & _name ## _prog_ops,
2200#define BPF_MAP_TYPE(_id, _ops)
2201#define BPF_LINK_TYPE(_id, _name)
2202#include <linux/bpf_types.h>
2203#undef BPF_PROG_TYPE
2204#undef BPF_MAP_TYPE
2205#undef BPF_LINK_TYPE
2206};
2207
2208static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
2209{
2210 const struct bpf_prog_ops *ops;
2211
2212 if (type >= ARRAY_SIZE(bpf_prog_types))
2213 return -EINVAL;
2214 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
2215 ops = bpf_prog_types[type];
2216 if (!ops)
2217 return -EINVAL;
2218
2219 if (!bpf_prog_is_offloaded(prog->aux))
2220 prog->aux->ops = ops;
2221 else
2222 prog->aux->ops = &bpf_offload_prog_ops;
2223 prog->type = type;
2224 return 0;
2225}
2226
2227enum bpf_audit {
2228 BPF_AUDIT_LOAD,
2229 BPF_AUDIT_UNLOAD,
2230 BPF_AUDIT_MAX,
2231};
2232
2233static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
2234 [BPF_AUDIT_LOAD] = "LOAD",
2235 [BPF_AUDIT_UNLOAD] = "UNLOAD",
2236};
2237
2238static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
2239{
2240 struct audit_context *ctx = NULL;
2241 struct audit_buffer *ab;
2242
2243 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
2244 return;
2245 if (audit_enabled == AUDIT_OFF)
2246 return;
2247 if (!in_irq() && !irqs_disabled())
2248 ctx = audit_context();
2249 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
2250 if (unlikely(!ab))
2251 return;
2252 audit_log_format(ab, "prog-id=%u op=%s",
2253 prog->aux->id, bpf_audit_str[op]);
2254 audit_log_end(ab);
2255}
2256
2257static int bpf_prog_alloc_id(struct bpf_prog *prog)
2258{
2259 int id;
2260
2261 idr_preload(GFP_KERNEL);
2262 spin_lock_bh(&prog_idr_lock);
2263 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
2264 if (id > 0)
2265 prog->aux->id = id;
2266 spin_unlock_bh(&prog_idr_lock);
2267 idr_preload_end();
2268
2269 /* id is in [1, INT_MAX) */
2270 if (WARN_ON_ONCE(!id))
2271 return -ENOSPC;
2272
2273 return id > 0 ? 0 : id;
2274}
2275
2276void bpf_prog_free_id(struct bpf_prog *prog)
2277{
2278 unsigned long flags;
2279
2280 /* cBPF to eBPF migrations are currently not in the idr store.
2281 * Offloaded programs are removed from the store when their device
2282 * disappears - even if someone grabs an fd to them they are unusable,
2283 * simply waiting for refcnt to drop to be freed.
2284 */
2285 if (!prog->aux->id)
2286 return;
2287
2288 spin_lock_irqsave(&prog_idr_lock, flags);
2289 idr_remove(&prog_idr, prog->aux->id);
2290 prog->aux->id = 0;
2291 spin_unlock_irqrestore(&prog_idr_lock, flags);
2292}
2293
2294static void __bpf_prog_put_rcu(struct rcu_head *rcu)
2295{
2296 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
2297
2298 kvfree(aux->func_info);
2299 kfree(aux->func_info_aux);
2300 free_uid(aux->user);
2301 security_bpf_prog_free(aux->prog);
2302 bpf_prog_free(aux->prog);
2303}
2304
2305static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
2306{
2307 bpf_prog_kallsyms_del_all(prog);
2308 btf_put(prog->aux->btf);
2309 module_put(prog->aux->mod);
2310 kvfree(prog->aux->jited_linfo);
2311 kvfree(prog->aux->linfo);
2312 kfree(prog->aux->kfunc_tab);
2313 if (prog->aux->attach_btf)
2314 btf_put(prog->aux->attach_btf);
2315
2316 if (deferred) {
2317 if (prog->sleepable)
2318 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
2319 else
2320 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
2321 } else {
2322 __bpf_prog_put_rcu(&prog->aux->rcu);
2323 }
2324}
2325
2326static void bpf_prog_put_deferred(struct work_struct *work)
2327{
2328 struct bpf_prog_aux *aux;
2329 struct bpf_prog *prog;
2330
2331 aux = container_of(work, struct bpf_prog_aux, work);
2332 prog = aux->prog;
2333 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
2334 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
2335 bpf_prog_free_id(prog);
2336 __bpf_prog_put_noref(prog, true);
2337}
2338
2339static void __bpf_prog_put(struct bpf_prog *prog)
2340{
2341 struct bpf_prog_aux *aux = prog->aux;
2342
2343 if (atomic64_dec_and_test(&aux->refcnt)) {
2344 if (in_irq() || irqs_disabled()) {
2345 INIT_WORK(&aux->work, bpf_prog_put_deferred);
2346 schedule_work(&aux->work);
2347 } else {
2348 bpf_prog_put_deferred(&aux->work);
2349 }
2350 }
2351}
2352
2353void bpf_prog_put(struct bpf_prog *prog)
2354{
2355 __bpf_prog_put(prog);
2356}
2357EXPORT_SYMBOL_GPL(bpf_prog_put);
2358
2359static int bpf_prog_release(struct inode *inode, struct file *filp)
2360{
2361 struct bpf_prog *prog = filp->private_data;
2362
2363 bpf_prog_put(prog);
2364 return 0;
2365}
2366
2367struct bpf_prog_kstats {
2368 u64 nsecs;
2369 u64 cnt;
2370 u64 misses;
2371};
2372
2373void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog)
2374{
2375 struct bpf_prog_stats *stats;
2376 unsigned int flags;
2377
2378 stats = this_cpu_ptr(prog->stats);
2379 flags = u64_stats_update_begin_irqsave(&stats->syncp);
2380 u64_stats_inc(&stats->misses);
2381 u64_stats_update_end_irqrestore(&stats->syncp, flags);
2382}
2383
2384static void bpf_prog_get_stats(const struct bpf_prog *prog,
2385 struct bpf_prog_kstats *stats)
2386{
2387 u64 nsecs = 0, cnt = 0, misses = 0;
2388 int cpu;
2389
2390 for_each_possible_cpu(cpu) {
2391 const struct bpf_prog_stats *st;
2392 unsigned int start;
2393 u64 tnsecs, tcnt, tmisses;
2394
2395 st = per_cpu_ptr(prog->stats, cpu);
2396 do {
2397 start = u64_stats_fetch_begin(&st->syncp);
2398 tnsecs = u64_stats_read(&st->nsecs);
2399 tcnt = u64_stats_read(&st->cnt);
2400 tmisses = u64_stats_read(&st->misses);
2401 } while (u64_stats_fetch_retry(&st->syncp, start));
2402 nsecs += tnsecs;
2403 cnt += tcnt;
2404 misses += tmisses;
2405 }
2406 stats->nsecs = nsecs;
2407 stats->cnt = cnt;
2408 stats->misses = misses;
2409}
2410
2411#ifdef CONFIG_PROC_FS
2412static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
2413{
2414 const struct bpf_prog *prog = filp->private_data;
2415 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2416 struct bpf_prog_kstats stats;
2417
2418 bpf_prog_get_stats(prog, &stats);
2419 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2420 seq_printf(m,
2421 "prog_type:\t%u\n"
2422 "prog_jited:\t%u\n"
2423 "prog_tag:\t%s\n"
2424 "memlock:\t%llu\n"
2425 "prog_id:\t%u\n"
2426 "run_time_ns:\t%llu\n"
2427 "run_cnt:\t%llu\n"
2428 "recursion_misses:\t%llu\n"
2429 "verified_insns:\t%u\n",
2430 prog->type,
2431 prog->jited,
2432 prog_tag,
2433 prog->pages * 1ULL << PAGE_SHIFT,
2434 prog->aux->id,
2435 stats.nsecs,
2436 stats.cnt,
2437 stats.misses,
2438 prog->aux->verified_insns);
2439}
2440#endif
2441
2442const struct file_operations bpf_prog_fops = {
2443#ifdef CONFIG_PROC_FS
2444 .show_fdinfo = bpf_prog_show_fdinfo,
2445#endif
2446 .release = bpf_prog_release,
2447 .read = bpf_dummy_read,
2448 .write = bpf_dummy_write,
2449};
2450
2451int bpf_prog_new_fd(struct bpf_prog *prog)
2452{
2453 int ret;
2454
2455 ret = security_bpf_prog(prog);
2456 if (ret < 0)
2457 return ret;
2458
2459 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
2460 O_RDWR | O_CLOEXEC);
2461}
2462
2463void bpf_prog_add(struct bpf_prog *prog, int i)
2464{
2465 atomic64_add(i, &prog->aux->refcnt);
2466}
2467EXPORT_SYMBOL_GPL(bpf_prog_add);
2468
2469void bpf_prog_sub(struct bpf_prog *prog, int i)
2470{
2471 /* Only to be used for undoing previous bpf_prog_add() in some
2472 * error path. We still know that another entity in our call
2473 * path holds a reference to the program, thus atomic_sub() can
2474 * be safely used in such cases!
2475 */
2476 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
2477}
2478EXPORT_SYMBOL_GPL(bpf_prog_sub);
2479
2480void bpf_prog_inc(struct bpf_prog *prog)
2481{
2482 atomic64_inc(&prog->aux->refcnt);
2483}
2484EXPORT_SYMBOL_GPL(bpf_prog_inc);
2485
2486/* prog_idr_lock should have been held */
2487struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
2488{
2489 int refold;
2490
2491 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
2492
2493 if (!refold)
2494 return ERR_PTR(-ENOENT);
2495
2496 return prog;
2497}
2498EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
2499
2500bool bpf_prog_get_ok(struct bpf_prog *prog,
2501 enum bpf_prog_type *attach_type, bool attach_drv)
2502{
2503 /* not an attachment, just a refcount inc, always allow */
2504 if (!attach_type)
2505 return true;
2506
2507 if (prog->type != *attach_type)
2508 return false;
2509 if (bpf_prog_is_offloaded(prog->aux) && !attach_drv)
2510 return false;
2511
2512 return true;
2513}
2514
2515static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
2516 bool attach_drv)
2517{
2518 CLASS(fd, f)(ufd);
2519 struct bpf_prog *prog;
2520
2521 if (fd_empty(f))
2522 return ERR_PTR(-EBADF);
2523 if (fd_file(f)->f_op != &bpf_prog_fops)
2524 return ERR_PTR(-EINVAL);
2525
2526 prog = fd_file(f)->private_data;
2527 if (!bpf_prog_get_ok(prog, attach_type, attach_drv))
2528 return ERR_PTR(-EINVAL);
2529
2530 bpf_prog_inc(prog);
2531 return prog;
2532}
2533
2534struct bpf_prog *bpf_prog_get(u32 ufd)
2535{
2536 return __bpf_prog_get(ufd, NULL, false);
2537}
2538
2539struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
2540 bool attach_drv)
2541{
2542 return __bpf_prog_get(ufd, &type, attach_drv);
2543}
2544EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
2545
2546/* Initially all BPF programs could be loaded w/o specifying
2547 * expected_attach_type. Later for some of them specifying expected_attach_type
2548 * at load time became required so that program could be validated properly.
2549 * Programs of types that are allowed to be loaded both w/ and w/o (for
2550 * backward compatibility) expected_attach_type, should have the default attach
2551 * type assigned to expected_attach_type for the latter case, so that it can be
2552 * validated later at attach time.
2553 *
2554 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
2555 * prog type requires it but has some attach types that have to be backward
2556 * compatible.
2557 */
2558static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
2559{
2560 switch (attr->prog_type) {
2561 case BPF_PROG_TYPE_CGROUP_SOCK:
2562 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
2563 * exist so checking for non-zero is the way to go here.
2564 */
2565 if (!attr->expected_attach_type)
2566 attr->expected_attach_type =
2567 BPF_CGROUP_INET_SOCK_CREATE;
2568 break;
2569 case BPF_PROG_TYPE_SK_REUSEPORT:
2570 if (!attr->expected_attach_type)
2571 attr->expected_attach_type =
2572 BPF_SK_REUSEPORT_SELECT;
2573 break;
2574 }
2575}
2576
2577static int
2578bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
2579 enum bpf_attach_type expected_attach_type,
2580 struct btf *attach_btf, u32 btf_id,
2581 struct bpf_prog *dst_prog)
2582{
2583 if (btf_id) {
2584 if (btf_id > BTF_MAX_TYPE)
2585 return -EINVAL;
2586
2587 if (!attach_btf && !dst_prog)
2588 return -EINVAL;
2589
2590 switch (prog_type) {
2591 case BPF_PROG_TYPE_TRACING:
2592 case BPF_PROG_TYPE_LSM:
2593 case BPF_PROG_TYPE_STRUCT_OPS:
2594 case BPF_PROG_TYPE_EXT:
2595 break;
2596 default:
2597 return -EINVAL;
2598 }
2599 }
2600
2601 if (attach_btf && (!btf_id || dst_prog))
2602 return -EINVAL;
2603
2604 if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING &&
2605 prog_type != BPF_PROG_TYPE_EXT)
2606 return -EINVAL;
2607
2608 switch (prog_type) {
2609 case BPF_PROG_TYPE_CGROUP_SOCK:
2610 switch (expected_attach_type) {
2611 case BPF_CGROUP_INET_SOCK_CREATE:
2612 case BPF_CGROUP_INET_SOCK_RELEASE:
2613 case BPF_CGROUP_INET4_POST_BIND:
2614 case BPF_CGROUP_INET6_POST_BIND:
2615 return 0;
2616 default:
2617 return -EINVAL;
2618 }
2619 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2620 switch (expected_attach_type) {
2621 case BPF_CGROUP_INET4_BIND:
2622 case BPF_CGROUP_INET6_BIND:
2623 case BPF_CGROUP_INET4_CONNECT:
2624 case BPF_CGROUP_INET6_CONNECT:
2625 case BPF_CGROUP_UNIX_CONNECT:
2626 case BPF_CGROUP_INET4_GETPEERNAME:
2627 case BPF_CGROUP_INET6_GETPEERNAME:
2628 case BPF_CGROUP_UNIX_GETPEERNAME:
2629 case BPF_CGROUP_INET4_GETSOCKNAME:
2630 case BPF_CGROUP_INET6_GETSOCKNAME:
2631 case BPF_CGROUP_UNIX_GETSOCKNAME:
2632 case BPF_CGROUP_UDP4_SENDMSG:
2633 case BPF_CGROUP_UDP6_SENDMSG:
2634 case BPF_CGROUP_UNIX_SENDMSG:
2635 case BPF_CGROUP_UDP4_RECVMSG:
2636 case BPF_CGROUP_UDP6_RECVMSG:
2637 case BPF_CGROUP_UNIX_RECVMSG:
2638 return 0;
2639 default:
2640 return -EINVAL;
2641 }
2642 case BPF_PROG_TYPE_CGROUP_SKB:
2643 switch (expected_attach_type) {
2644 case BPF_CGROUP_INET_INGRESS:
2645 case BPF_CGROUP_INET_EGRESS:
2646 return 0;
2647 default:
2648 return -EINVAL;
2649 }
2650 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2651 switch (expected_attach_type) {
2652 case BPF_CGROUP_SETSOCKOPT:
2653 case BPF_CGROUP_GETSOCKOPT:
2654 return 0;
2655 default:
2656 return -EINVAL;
2657 }
2658 case BPF_PROG_TYPE_SK_LOOKUP:
2659 if (expected_attach_type == BPF_SK_LOOKUP)
2660 return 0;
2661 return -EINVAL;
2662 case BPF_PROG_TYPE_SK_REUSEPORT:
2663 switch (expected_attach_type) {
2664 case BPF_SK_REUSEPORT_SELECT:
2665 case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE:
2666 return 0;
2667 default:
2668 return -EINVAL;
2669 }
2670 case BPF_PROG_TYPE_NETFILTER:
2671 if (expected_attach_type == BPF_NETFILTER)
2672 return 0;
2673 return -EINVAL;
2674 case BPF_PROG_TYPE_SYSCALL:
2675 case BPF_PROG_TYPE_EXT:
2676 if (expected_attach_type)
2677 return -EINVAL;
2678 fallthrough;
2679 default:
2680 return 0;
2681 }
2682}
2683
2684static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2685{
2686 switch (prog_type) {
2687 case BPF_PROG_TYPE_SCHED_CLS:
2688 case BPF_PROG_TYPE_SCHED_ACT:
2689 case BPF_PROG_TYPE_XDP:
2690 case BPF_PROG_TYPE_LWT_IN:
2691 case BPF_PROG_TYPE_LWT_OUT:
2692 case BPF_PROG_TYPE_LWT_XMIT:
2693 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2694 case BPF_PROG_TYPE_SK_SKB:
2695 case BPF_PROG_TYPE_SK_MSG:
2696 case BPF_PROG_TYPE_FLOW_DISSECTOR:
2697 case BPF_PROG_TYPE_CGROUP_DEVICE:
2698 case BPF_PROG_TYPE_CGROUP_SOCK:
2699 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2700 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2701 case BPF_PROG_TYPE_CGROUP_SYSCTL:
2702 case BPF_PROG_TYPE_SOCK_OPS:
2703 case BPF_PROG_TYPE_EXT: /* extends any prog */
2704 case BPF_PROG_TYPE_NETFILTER:
2705 return true;
2706 case BPF_PROG_TYPE_CGROUP_SKB:
2707 /* always unpriv */
2708 case BPF_PROG_TYPE_SK_REUSEPORT:
2709 /* equivalent to SOCKET_FILTER. need CAP_BPF only */
2710 default:
2711 return false;
2712 }
2713}
2714
2715static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2716{
2717 switch (prog_type) {
2718 case BPF_PROG_TYPE_KPROBE:
2719 case BPF_PROG_TYPE_TRACEPOINT:
2720 case BPF_PROG_TYPE_PERF_EVENT:
2721 case BPF_PROG_TYPE_RAW_TRACEPOINT:
2722 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2723 case BPF_PROG_TYPE_TRACING:
2724 case BPF_PROG_TYPE_LSM:
2725 case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2726 case BPF_PROG_TYPE_EXT: /* extends any prog */
2727 return true;
2728 default:
2729 return false;
2730 }
2731}
2732
2733/* last field in 'union bpf_attr' used by this command */
2734#define BPF_PROG_LOAD_LAST_FIELD prog_token_fd
2735
2736static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
2737{
2738 enum bpf_prog_type type = attr->prog_type;
2739 struct bpf_prog *prog, *dst_prog = NULL;
2740 struct btf *attach_btf = NULL;
2741 struct bpf_token *token = NULL;
2742 bool bpf_cap;
2743 int err;
2744 char license[128];
2745
2746 if (CHECK_ATTR(BPF_PROG_LOAD))
2747 return -EINVAL;
2748
2749 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2750 BPF_F_ANY_ALIGNMENT |
2751 BPF_F_TEST_STATE_FREQ |
2752 BPF_F_SLEEPABLE |
2753 BPF_F_TEST_RND_HI32 |
2754 BPF_F_XDP_HAS_FRAGS |
2755 BPF_F_XDP_DEV_BOUND_ONLY |
2756 BPF_F_TEST_REG_INVARIANTS |
2757 BPF_F_TOKEN_FD))
2758 return -EINVAL;
2759
2760 bpf_prog_load_fixup_attach_type(attr);
2761
2762 if (attr->prog_flags & BPF_F_TOKEN_FD) {
2763 token = bpf_token_get_from_fd(attr->prog_token_fd);
2764 if (IS_ERR(token))
2765 return PTR_ERR(token);
2766 /* if current token doesn't grant prog loading permissions,
2767 * then we can't use this token, so ignore it and rely on
2768 * system-wide capabilities checks
2769 */
2770 if (!bpf_token_allow_cmd(token, BPF_PROG_LOAD) ||
2771 !bpf_token_allow_prog_type(token, attr->prog_type,
2772 attr->expected_attach_type)) {
2773 bpf_token_put(token);
2774 token = NULL;
2775 }
2776 }
2777
2778 bpf_cap = bpf_token_capable(token, CAP_BPF);
2779 err = -EPERM;
2780
2781 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2782 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2783 !bpf_cap)
2784 goto put_token;
2785
2786 /* Intent here is for unprivileged_bpf_disabled to block BPF program
2787 * creation for unprivileged users; other actions depend
2788 * on fd availability and access to bpffs, so are dependent on
2789 * object creation success. Even with unprivileged BPF disabled,
2790 * capability checks are still carried out for these
2791 * and other operations.
2792 */
2793 if (sysctl_unprivileged_bpf_disabled && !bpf_cap)
2794 goto put_token;
2795
2796 if (attr->insn_cnt == 0 ||
2797 attr->insn_cnt > (bpf_cap ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) {
2798 err = -E2BIG;
2799 goto put_token;
2800 }
2801 if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2802 type != BPF_PROG_TYPE_CGROUP_SKB &&
2803 !bpf_cap)
2804 goto put_token;
2805
2806 if (is_net_admin_prog_type(type) && !bpf_token_capable(token, CAP_NET_ADMIN))
2807 goto put_token;
2808 if (is_perfmon_prog_type(type) && !bpf_token_capable(token, CAP_PERFMON))
2809 goto put_token;
2810
2811 /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
2812 * or btf, we need to check which one it is
2813 */
2814 if (attr->attach_prog_fd) {
2815 dst_prog = bpf_prog_get(attr->attach_prog_fd);
2816 if (IS_ERR(dst_prog)) {
2817 dst_prog = NULL;
2818 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
2819 if (IS_ERR(attach_btf)) {
2820 err = -EINVAL;
2821 goto put_token;
2822 }
2823 if (!btf_is_kernel(attach_btf)) {
2824 /* attaching through specifying bpf_prog's BTF
2825 * objects directly might be supported eventually
2826 */
2827 btf_put(attach_btf);
2828 err = -ENOTSUPP;
2829 goto put_token;
2830 }
2831 }
2832 } else if (attr->attach_btf_id) {
2833 /* fall back to vmlinux BTF, if BTF type ID is specified */
2834 attach_btf = bpf_get_btf_vmlinux();
2835 if (IS_ERR(attach_btf)) {
2836 err = PTR_ERR(attach_btf);
2837 goto put_token;
2838 }
2839 if (!attach_btf) {
2840 err = -EINVAL;
2841 goto put_token;
2842 }
2843 btf_get(attach_btf);
2844 }
2845
2846 if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2847 attach_btf, attr->attach_btf_id,
2848 dst_prog)) {
2849 if (dst_prog)
2850 bpf_prog_put(dst_prog);
2851 if (attach_btf)
2852 btf_put(attach_btf);
2853 err = -EINVAL;
2854 goto put_token;
2855 }
2856
2857 /* plain bpf_prog allocation */
2858 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2859 if (!prog) {
2860 if (dst_prog)
2861 bpf_prog_put(dst_prog);
2862 if (attach_btf)
2863 btf_put(attach_btf);
2864 err = -EINVAL;
2865 goto put_token;
2866 }
2867
2868 prog->expected_attach_type = attr->expected_attach_type;
2869 prog->sleepable = !!(attr->prog_flags & BPF_F_SLEEPABLE);
2870 prog->aux->attach_btf = attach_btf;
2871 prog->aux->attach_btf_id = attr->attach_btf_id;
2872 prog->aux->dst_prog = dst_prog;
2873 prog->aux->dev_bound = !!attr->prog_ifindex;
2874 prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS;
2875
2876 /* move token into prog->aux, reuse taken refcnt */
2877 prog->aux->token = token;
2878 token = NULL;
2879
2880 prog->aux->user = get_current_user();
2881 prog->len = attr->insn_cnt;
2882
2883 err = -EFAULT;
2884 if (copy_from_bpfptr(prog->insns,
2885 make_bpfptr(attr->insns, uattr.is_kernel),
2886 bpf_prog_insn_size(prog)) != 0)
2887 goto free_prog;
2888 /* copy eBPF program license from user space */
2889 if (strncpy_from_bpfptr(license,
2890 make_bpfptr(attr->license, uattr.is_kernel),
2891 sizeof(license) - 1) < 0)
2892 goto free_prog;
2893 license[sizeof(license) - 1] = 0;
2894
2895 /* eBPF programs must be GPL compatible to use GPL-ed functions */
2896 prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0;
2897
2898 prog->orig_prog = NULL;
2899 prog->jited = 0;
2900
2901 atomic64_set(&prog->aux->refcnt, 1);
2902
2903 if (bpf_prog_is_dev_bound(prog->aux)) {
2904 err = bpf_prog_dev_bound_init(prog, attr);
2905 if (err)
2906 goto free_prog;
2907 }
2908
2909 if (type == BPF_PROG_TYPE_EXT && dst_prog &&
2910 bpf_prog_is_dev_bound(dst_prog->aux)) {
2911 err = bpf_prog_dev_bound_inherit(prog, dst_prog);
2912 if (err)
2913 goto free_prog;
2914 }
2915
2916 /*
2917 * Bookkeeping for managing the program attachment chain.
2918 *
2919 * It might be tempting to set attach_tracing_prog flag at the attachment
2920 * time, but this will not prevent from loading bunch of tracing prog
2921 * first, then attach them one to another.
2922 *
2923 * The flag attach_tracing_prog is set for the whole program lifecycle, and
2924 * doesn't have to be cleared in bpf_tracing_link_release, since tracing
2925 * programs cannot change attachment target.
2926 */
2927 if (type == BPF_PROG_TYPE_TRACING && dst_prog &&
2928 dst_prog->type == BPF_PROG_TYPE_TRACING) {
2929 prog->aux->attach_tracing_prog = true;
2930 }
2931
2932 /* find program type: socket_filter vs tracing_filter */
2933 err = find_prog_type(type, prog);
2934 if (err < 0)
2935 goto free_prog;
2936
2937 prog->aux->load_time = ktime_get_boottime_ns();
2938 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2939 sizeof(attr->prog_name));
2940 if (err < 0)
2941 goto free_prog;
2942
2943 err = security_bpf_prog_load(prog, attr, token);
2944 if (err)
2945 goto free_prog_sec;
2946
2947 /* run eBPF verifier */
2948 err = bpf_check(&prog, attr, uattr, uattr_size);
2949 if (err < 0)
2950 goto free_used_maps;
2951
2952 prog = bpf_prog_select_runtime(prog, &err);
2953 if (err < 0)
2954 goto free_used_maps;
2955
2956 err = bpf_prog_alloc_id(prog);
2957 if (err)
2958 goto free_used_maps;
2959
2960 /* Upon success of bpf_prog_alloc_id(), the BPF prog is
2961 * effectively publicly exposed. However, retrieving via
2962 * bpf_prog_get_fd_by_id() will take another reference,
2963 * therefore it cannot be gone underneath us.
2964 *
2965 * Only for the time /after/ successful bpf_prog_new_fd()
2966 * and before returning to userspace, we might just hold
2967 * one reference and any parallel close on that fd could
2968 * rip everything out. Hence, below notifications must
2969 * happen before bpf_prog_new_fd().
2970 *
2971 * Also, any failure handling from this point onwards must
2972 * be using bpf_prog_put() given the program is exposed.
2973 */
2974 bpf_prog_kallsyms_add(prog);
2975 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2976 bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2977
2978 err = bpf_prog_new_fd(prog);
2979 if (err < 0)
2980 bpf_prog_put(prog);
2981 return err;
2982
2983free_used_maps:
2984 /* In case we have subprogs, we need to wait for a grace
2985 * period before we can tear down JIT memory since symbols
2986 * are already exposed under kallsyms.
2987 */
2988 __bpf_prog_put_noref(prog, prog->aux->real_func_cnt);
2989 return err;
2990
2991free_prog_sec:
2992 security_bpf_prog_free(prog);
2993free_prog:
2994 free_uid(prog->aux->user);
2995 if (prog->aux->attach_btf)
2996 btf_put(prog->aux->attach_btf);
2997 bpf_prog_free(prog);
2998put_token:
2999 bpf_token_put(token);
3000 return err;
3001}
3002
3003#define BPF_OBJ_LAST_FIELD path_fd
3004
3005static int bpf_obj_pin(const union bpf_attr *attr)
3006{
3007 int path_fd;
3008
3009 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD)
3010 return -EINVAL;
3011
3012 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */
3013 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
3014 return -EINVAL;
3015
3016 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
3017 return bpf_obj_pin_user(attr->bpf_fd, path_fd,
3018 u64_to_user_ptr(attr->pathname));
3019}
3020
3021static int bpf_obj_get(const union bpf_attr *attr)
3022{
3023 int path_fd;
3024
3025 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
3026 attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD))
3027 return -EINVAL;
3028
3029 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */
3030 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
3031 return -EINVAL;
3032
3033 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
3034 return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname),
3035 attr->file_flags);
3036}
3037
3038/* bpf_link_init_sleepable() allows to specify whether BPF link itself has
3039 * "sleepable" semantics, which normally would mean that BPF link's attach
3040 * hook can dereference link or link's underlying program for some time after
3041 * detachment due to RCU Tasks Trace-based lifetime protection scheme.
3042 * BPF program itself can be non-sleepable, yet, because it's transitively
3043 * reachable through BPF link, its freeing has to be delayed until after RCU
3044 * Tasks Trace GP.
3045 */
3046void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type,
3047 const struct bpf_link_ops *ops, struct bpf_prog *prog,
3048 bool sleepable)
3049{
3050 WARN_ON(ops->dealloc && ops->dealloc_deferred);
3051 atomic64_set(&link->refcnt, 1);
3052 link->type = type;
3053 link->sleepable = sleepable;
3054 link->id = 0;
3055 link->ops = ops;
3056 link->prog = prog;
3057}
3058
3059void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
3060 const struct bpf_link_ops *ops, struct bpf_prog *prog)
3061{
3062 bpf_link_init_sleepable(link, type, ops, prog, false);
3063}
3064
3065static void bpf_link_free_id(int id)
3066{
3067 if (!id)
3068 return;
3069
3070 spin_lock_bh(&link_idr_lock);
3071 idr_remove(&link_idr, id);
3072 spin_unlock_bh(&link_idr_lock);
3073}
3074
3075/* Clean up bpf_link and corresponding anon_inode file and FD. After
3076 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
3077 * anon_inode's release() call. This helper marks bpf_link as
3078 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
3079 * is not decremented, it's the responsibility of a calling code that failed
3080 * to complete bpf_link initialization.
3081 * This helper eventually calls link's dealloc callback, but does not call
3082 * link's release callback.
3083 */
3084void bpf_link_cleanup(struct bpf_link_primer *primer)
3085{
3086 primer->link->prog = NULL;
3087 bpf_link_free_id(primer->id);
3088 fput(primer->file);
3089 put_unused_fd(primer->fd);
3090}
3091
3092void bpf_link_inc(struct bpf_link *link)
3093{
3094 atomic64_inc(&link->refcnt);
3095}
3096
3097static void bpf_link_dealloc(struct bpf_link *link)
3098{
3099 /* now that we know that bpf_link itself can't be reached, put underlying BPF program */
3100 if (link->prog)
3101 bpf_prog_put(link->prog);
3102
3103 /* free bpf_link and its containing memory */
3104 if (link->ops->dealloc_deferred)
3105 link->ops->dealloc_deferred(link);
3106 else
3107 link->ops->dealloc(link);
3108}
3109
3110static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu)
3111{
3112 struct bpf_link *link = container_of(rcu, struct bpf_link, rcu);
3113
3114 bpf_link_dealloc(link);
3115}
3116
3117static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
3118{
3119 if (rcu_trace_implies_rcu_gp())
3120 bpf_link_defer_dealloc_rcu_gp(rcu);
3121 else
3122 call_rcu(rcu, bpf_link_defer_dealloc_rcu_gp);
3123}
3124
3125/* bpf_link_free is guaranteed to be called from process context */
3126static void bpf_link_free(struct bpf_link *link)
3127{
3128 const struct bpf_link_ops *ops = link->ops;
3129
3130 bpf_link_free_id(link->id);
3131 /* detach BPF program, clean up used resources */
3132 if (link->prog)
3133 ops->release(link);
3134 if (ops->dealloc_deferred) {
3135 /* Schedule BPF link deallocation, which will only then
3136 * trigger putting BPF program refcount.
3137 * If underlying BPF program is sleepable or BPF link's target
3138 * attach hookpoint is sleepable or otherwise requires RCU GPs
3139 * to ensure link and its underlying BPF program is not
3140 * reachable anymore, we need to first wait for RCU tasks
3141 * trace sync, and then go through "classic" RCU grace period
3142 */
3143 if (link->sleepable || (link->prog && link->prog->sleepable))
3144 call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
3145 else
3146 call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
3147 } else if (ops->dealloc) {
3148 bpf_link_dealloc(link);
3149 }
3150}
3151
3152static void bpf_link_put_deferred(struct work_struct *work)
3153{
3154 struct bpf_link *link = container_of(work, struct bpf_link, work);
3155
3156 bpf_link_free(link);
3157}
3158
3159/* bpf_link_put might be called from atomic context. It needs to be called
3160 * from sleepable context in order to acquire sleeping locks during the process.
3161 */
3162void bpf_link_put(struct bpf_link *link)
3163{
3164 if (!atomic64_dec_and_test(&link->refcnt))
3165 return;
3166
3167 INIT_WORK(&link->work, bpf_link_put_deferred);
3168 schedule_work(&link->work);
3169}
3170EXPORT_SYMBOL(bpf_link_put);
3171
3172static void bpf_link_put_direct(struct bpf_link *link)
3173{
3174 if (!atomic64_dec_and_test(&link->refcnt))
3175 return;
3176 bpf_link_free(link);
3177}
3178
3179static int bpf_link_release(struct inode *inode, struct file *filp)
3180{
3181 struct bpf_link *link = filp->private_data;
3182
3183 bpf_link_put_direct(link);
3184 return 0;
3185}
3186
3187#ifdef CONFIG_PROC_FS
3188#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
3189#define BPF_MAP_TYPE(_id, _ops)
3190#define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
3191static const char *bpf_link_type_strs[] = {
3192 [BPF_LINK_TYPE_UNSPEC] = "<invalid>",
3193#include <linux/bpf_types.h>
3194};
3195#undef BPF_PROG_TYPE
3196#undef BPF_MAP_TYPE
3197#undef BPF_LINK_TYPE
3198
3199static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
3200{
3201 const struct bpf_link *link = filp->private_data;
3202 const struct bpf_prog *prog = link->prog;
3203 enum bpf_link_type type = link->type;
3204 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
3205
3206 if (type < ARRAY_SIZE(bpf_link_type_strs) && bpf_link_type_strs[type]) {
3207 seq_printf(m, "link_type:\t%s\n", bpf_link_type_strs[type]);
3208 } else {
3209 WARN_ONCE(1, "missing BPF_LINK_TYPE(...) for link type %u\n", type);
3210 seq_printf(m, "link_type:\t<%u>\n", type);
3211 }
3212 seq_printf(m, "link_id:\t%u\n", link->id);
3213
3214 if (prog) {
3215 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
3216 seq_printf(m,
3217 "prog_tag:\t%s\n"
3218 "prog_id:\t%u\n",
3219 prog_tag,
3220 prog->aux->id);
3221 }
3222 if (link->ops->show_fdinfo)
3223 link->ops->show_fdinfo(link, m);
3224}
3225#endif
3226
3227static __poll_t bpf_link_poll(struct file *file, struct poll_table_struct *pts)
3228{
3229 struct bpf_link *link = file->private_data;
3230
3231 return link->ops->poll(file, pts);
3232}
3233
3234static const struct file_operations bpf_link_fops = {
3235#ifdef CONFIG_PROC_FS
3236 .show_fdinfo = bpf_link_show_fdinfo,
3237#endif
3238 .release = bpf_link_release,
3239 .read = bpf_dummy_read,
3240 .write = bpf_dummy_write,
3241};
3242
3243static const struct file_operations bpf_link_fops_poll = {
3244#ifdef CONFIG_PROC_FS
3245 .show_fdinfo = bpf_link_show_fdinfo,
3246#endif
3247 .release = bpf_link_release,
3248 .read = bpf_dummy_read,
3249 .write = bpf_dummy_write,
3250 .poll = bpf_link_poll,
3251};
3252
3253static int bpf_link_alloc_id(struct bpf_link *link)
3254{
3255 int id;
3256
3257 idr_preload(GFP_KERNEL);
3258 spin_lock_bh(&link_idr_lock);
3259 id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
3260 spin_unlock_bh(&link_idr_lock);
3261 idr_preload_end();
3262
3263 return id;
3264}
3265
3266/* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
3267 * reserving unused FD and allocating ID from link_idr. This is to be paired
3268 * with bpf_link_settle() to install FD and ID and expose bpf_link to
3269 * user-space, if bpf_link is successfully attached. If not, bpf_link and
3270 * pre-allocated resources are to be freed with bpf_cleanup() call. All the
3271 * transient state is passed around in struct bpf_link_primer.
3272 * This is preferred way to create and initialize bpf_link, especially when
3273 * there are complicated and expensive operations in between creating bpf_link
3274 * itself and attaching it to BPF hook. By using bpf_link_prime() and
3275 * bpf_link_settle() kernel code using bpf_link doesn't have to perform
3276 * expensive (and potentially failing) roll back operations in a rare case
3277 * that file, FD, or ID can't be allocated.
3278 */
3279int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
3280{
3281 struct file *file;
3282 int fd, id;
3283
3284 fd = get_unused_fd_flags(O_CLOEXEC);
3285 if (fd < 0)
3286 return fd;
3287
3288
3289 id = bpf_link_alloc_id(link);
3290 if (id < 0) {
3291 put_unused_fd(fd);
3292 return id;
3293 }
3294
3295 file = anon_inode_getfile("bpf_link",
3296 link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops,
3297 link, O_CLOEXEC);
3298 if (IS_ERR(file)) {
3299 bpf_link_free_id(id);
3300 put_unused_fd(fd);
3301 return PTR_ERR(file);
3302 }
3303
3304 primer->link = link;
3305 primer->file = file;
3306 primer->fd = fd;
3307 primer->id = id;
3308 return 0;
3309}
3310
3311int bpf_link_settle(struct bpf_link_primer *primer)
3312{
3313 /* make bpf_link fetchable by ID */
3314 spin_lock_bh(&link_idr_lock);
3315 primer->link->id = primer->id;
3316 spin_unlock_bh(&link_idr_lock);
3317 /* make bpf_link fetchable by FD */
3318 fd_install(primer->fd, primer->file);
3319 /* pass through installed FD */
3320 return primer->fd;
3321}
3322
3323int bpf_link_new_fd(struct bpf_link *link)
3324{
3325 return anon_inode_getfd("bpf-link",
3326 link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops,
3327 link, O_CLOEXEC);
3328}
3329
3330struct bpf_link *bpf_link_get_from_fd(u32 ufd)
3331{
3332 CLASS(fd, f)(ufd);
3333 struct bpf_link *link;
3334
3335 if (fd_empty(f))
3336 return ERR_PTR(-EBADF);
3337 if (fd_file(f)->f_op != &bpf_link_fops && fd_file(f)->f_op != &bpf_link_fops_poll)
3338 return ERR_PTR(-EINVAL);
3339
3340 link = fd_file(f)->private_data;
3341 bpf_link_inc(link);
3342 return link;
3343}
3344EXPORT_SYMBOL(bpf_link_get_from_fd);
3345
3346static void bpf_tracing_link_release(struct bpf_link *link)
3347{
3348 struct bpf_tracing_link *tr_link =
3349 container_of(link, struct bpf_tracing_link, link.link);
3350
3351 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link,
3352 tr_link->trampoline,
3353 tr_link->tgt_prog));
3354
3355 bpf_trampoline_put(tr_link->trampoline);
3356
3357 /* tgt_prog is NULL if target is a kernel function */
3358 if (tr_link->tgt_prog)
3359 bpf_prog_put(tr_link->tgt_prog);
3360}
3361
3362static void bpf_tracing_link_dealloc(struct bpf_link *link)
3363{
3364 struct bpf_tracing_link *tr_link =
3365 container_of(link, struct bpf_tracing_link, link.link);
3366
3367 kfree(tr_link);
3368}
3369
3370static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
3371 struct seq_file *seq)
3372{
3373 struct bpf_tracing_link *tr_link =
3374 container_of(link, struct bpf_tracing_link, link.link);
3375 u32 target_btf_id, target_obj_id;
3376
3377 bpf_trampoline_unpack_key(tr_link->trampoline->key,
3378 &target_obj_id, &target_btf_id);
3379 seq_printf(seq,
3380 "attach_type:\t%d\n"
3381 "target_obj_id:\t%u\n"
3382 "target_btf_id:\t%u\n",
3383 tr_link->attach_type,
3384 target_obj_id,
3385 target_btf_id);
3386}
3387
3388static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
3389 struct bpf_link_info *info)
3390{
3391 struct bpf_tracing_link *tr_link =
3392 container_of(link, struct bpf_tracing_link, link.link);
3393
3394 info->tracing.attach_type = tr_link->attach_type;
3395 bpf_trampoline_unpack_key(tr_link->trampoline->key,
3396 &info->tracing.target_obj_id,
3397 &info->tracing.target_btf_id);
3398
3399 return 0;
3400}
3401
3402static const struct bpf_link_ops bpf_tracing_link_lops = {
3403 .release = bpf_tracing_link_release,
3404 .dealloc = bpf_tracing_link_dealloc,
3405 .show_fdinfo = bpf_tracing_link_show_fdinfo,
3406 .fill_link_info = bpf_tracing_link_fill_link_info,
3407};
3408
3409static int bpf_tracing_prog_attach(struct bpf_prog *prog,
3410 int tgt_prog_fd,
3411 u32 btf_id,
3412 u64 bpf_cookie)
3413{
3414 struct bpf_link_primer link_primer;
3415 struct bpf_prog *tgt_prog = NULL;
3416 struct bpf_trampoline *tr = NULL;
3417 struct bpf_tracing_link *link;
3418 u64 key = 0;
3419 int err;
3420
3421 switch (prog->type) {
3422 case BPF_PROG_TYPE_TRACING:
3423 if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
3424 prog->expected_attach_type != BPF_TRACE_FEXIT &&
3425 prog->expected_attach_type != BPF_MODIFY_RETURN) {
3426 err = -EINVAL;
3427 goto out_put_prog;
3428 }
3429 break;
3430 case BPF_PROG_TYPE_EXT:
3431 if (prog->expected_attach_type != 0) {
3432 err = -EINVAL;
3433 goto out_put_prog;
3434 }
3435 break;
3436 case BPF_PROG_TYPE_LSM:
3437 if (prog->expected_attach_type != BPF_LSM_MAC) {
3438 err = -EINVAL;
3439 goto out_put_prog;
3440 }
3441 break;
3442 default:
3443 err = -EINVAL;
3444 goto out_put_prog;
3445 }
3446
3447 if (!!tgt_prog_fd != !!btf_id) {
3448 err = -EINVAL;
3449 goto out_put_prog;
3450 }
3451
3452 if (tgt_prog_fd) {
3453 /*
3454 * For now we only allow new targets for BPF_PROG_TYPE_EXT. If this
3455 * part would be changed to implement the same for
3456 * BPF_PROG_TYPE_TRACING, do not forget to update the way how
3457 * attach_tracing_prog flag is set.
3458 */
3459 if (prog->type != BPF_PROG_TYPE_EXT) {
3460 err = -EINVAL;
3461 goto out_put_prog;
3462 }
3463
3464 tgt_prog = bpf_prog_get(tgt_prog_fd);
3465 if (IS_ERR(tgt_prog)) {
3466 err = PTR_ERR(tgt_prog);
3467 tgt_prog = NULL;
3468 goto out_put_prog;
3469 }
3470
3471 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id);
3472 }
3473
3474 link = kzalloc(sizeof(*link), GFP_USER);
3475 if (!link) {
3476 err = -ENOMEM;
3477 goto out_put_prog;
3478 }
3479 bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING,
3480 &bpf_tracing_link_lops, prog);
3481 link->attach_type = prog->expected_attach_type;
3482 link->link.cookie = bpf_cookie;
3483
3484 mutex_lock(&prog->aux->dst_mutex);
3485
3486 /* There are a few possible cases here:
3487 *
3488 * - if prog->aux->dst_trampoline is set, the program was just loaded
3489 * and not yet attached to anything, so we can use the values stored
3490 * in prog->aux
3491 *
3492 * - if prog->aux->dst_trampoline is NULL, the program has already been
3493 * attached to a target and its initial target was cleared (below)
3494 *
3495 * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
3496 * target_btf_id using the link_create API.
3497 *
3498 * - if tgt_prog == NULL when this function was called using the old
3499 * raw_tracepoint_open API, and we need a target from prog->aux
3500 *
3501 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
3502 * was detached and is going for re-attachment.
3503 *
3504 * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf
3505 * are NULL, then program was already attached and user did not provide
3506 * tgt_prog_fd so we have no way to find out or create trampoline
3507 */
3508 if (!prog->aux->dst_trampoline && !tgt_prog) {
3509 /*
3510 * Allow re-attach for TRACING and LSM programs. If it's
3511 * currently linked, bpf_trampoline_link_prog will fail.
3512 * EXT programs need to specify tgt_prog_fd, so they
3513 * re-attach in separate code path.
3514 */
3515 if (prog->type != BPF_PROG_TYPE_TRACING &&
3516 prog->type != BPF_PROG_TYPE_LSM) {
3517 err = -EINVAL;
3518 goto out_unlock;
3519 }
3520 /* We can allow re-attach only if we have valid attach_btf. */
3521 if (!prog->aux->attach_btf) {
3522 err = -EINVAL;
3523 goto out_unlock;
3524 }
3525 btf_id = prog->aux->attach_btf_id;
3526 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
3527 }
3528
3529 if (!prog->aux->dst_trampoline ||
3530 (key && key != prog->aux->dst_trampoline->key)) {
3531 /* If there is no saved target, or the specified target is
3532 * different from the destination specified at load time, we
3533 * need a new trampoline and a check for compatibility
3534 */
3535 struct bpf_attach_target_info tgt_info = {};
3536
3537 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
3538 &tgt_info);
3539 if (err)
3540 goto out_unlock;
3541
3542 if (tgt_info.tgt_mod) {
3543 module_put(prog->aux->mod);
3544 prog->aux->mod = tgt_info.tgt_mod;
3545 }
3546
3547 tr = bpf_trampoline_get(key, &tgt_info);
3548 if (!tr) {
3549 err = -ENOMEM;
3550 goto out_unlock;
3551 }
3552 } else {
3553 /* The caller didn't specify a target, or the target was the
3554 * same as the destination supplied during program load. This
3555 * means we can reuse the trampoline and reference from program
3556 * load time, and there is no need to allocate a new one. This
3557 * can only happen once for any program, as the saved values in
3558 * prog->aux are cleared below.
3559 */
3560 tr = prog->aux->dst_trampoline;
3561 tgt_prog = prog->aux->dst_prog;
3562 }
3563
3564 err = bpf_link_prime(&link->link.link, &link_primer);
3565 if (err)
3566 goto out_unlock;
3567
3568 err = bpf_trampoline_link_prog(&link->link, tr, tgt_prog);
3569 if (err) {
3570 bpf_link_cleanup(&link_primer);
3571 link = NULL;
3572 goto out_unlock;
3573 }
3574
3575 link->tgt_prog = tgt_prog;
3576 link->trampoline = tr;
3577
3578 /* Always clear the trampoline and target prog from prog->aux to make
3579 * sure the original attach destination is not kept alive after a
3580 * program is (re-)attached to another target.
3581 */
3582 if (prog->aux->dst_prog &&
3583 (tgt_prog_fd || tr != prog->aux->dst_trampoline))
3584 /* got extra prog ref from syscall, or attaching to different prog */
3585 bpf_prog_put(prog->aux->dst_prog);
3586 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
3587 /* we allocated a new trampoline, so free the old one */
3588 bpf_trampoline_put(prog->aux->dst_trampoline);
3589
3590 prog->aux->dst_prog = NULL;
3591 prog->aux->dst_trampoline = NULL;
3592 mutex_unlock(&prog->aux->dst_mutex);
3593
3594 return bpf_link_settle(&link_primer);
3595out_unlock:
3596 if (tr && tr != prog->aux->dst_trampoline)
3597 bpf_trampoline_put(tr);
3598 mutex_unlock(&prog->aux->dst_mutex);
3599 kfree(link);
3600out_put_prog:
3601 if (tgt_prog_fd && tgt_prog)
3602 bpf_prog_put(tgt_prog);
3603 return err;
3604}
3605
3606static void bpf_raw_tp_link_release(struct bpf_link *link)
3607{
3608 struct bpf_raw_tp_link *raw_tp =
3609 container_of(link, struct bpf_raw_tp_link, link);
3610
3611 bpf_probe_unregister(raw_tp->btp, raw_tp);
3612 bpf_put_raw_tracepoint(raw_tp->btp);
3613}
3614
3615static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
3616{
3617 struct bpf_raw_tp_link *raw_tp =
3618 container_of(link, struct bpf_raw_tp_link, link);
3619
3620 kfree(raw_tp);
3621}
3622
3623static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
3624 struct seq_file *seq)
3625{
3626 struct bpf_raw_tp_link *raw_tp_link =
3627 container_of(link, struct bpf_raw_tp_link, link);
3628
3629 seq_printf(seq,
3630 "tp_name:\t%s\n",
3631 raw_tp_link->btp->tp->name);
3632}
3633
3634static int bpf_copy_to_user(char __user *ubuf, const char *buf, u32 ulen,
3635 u32 len)
3636{
3637 if (ulen >= len + 1) {
3638 if (copy_to_user(ubuf, buf, len + 1))
3639 return -EFAULT;
3640 } else {
3641 char zero = '\0';
3642
3643 if (copy_to_user(ubuf, buf, ulen - 1))
3644 return -EFAULT;
3645 if (put_user(zero, ubuf + ulen - 1))
3646 return -EFAULT;
3647 return -ENOSPC;
3648 }
3649
3650 return 0;
3651}
3652
3653static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
3654 struct bpf_link_info *info)
3655{
3656 struct bpf_raw_tp_link *raw_tp_link =
3657 container_of(link, struct bpf_raw_tp_link, link);
3658 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
3659 const char *tp_name = raw_tp_link->btp->tp->name;
3660 u32 ulen = info->raw_tracepoint.tp_name_len;
3661 size_t tp_len = strlen(tp_name);
3662
3663 if (!ulen ^ !ubuf)
3664 return -EINVAL;
3665
3666 info->raw_tracepoint.tp_name_len = tp_len + 1;
3667
3668 if (!ubuf)
3669 return 0;
3670
3671 return bpf_copy_to_user(ubuf, tp_name, ulen, tp_len);
3672}
3673
3674static const struct bpf_link_ops bpf_raw_tp_link_lops = {
3675 .release = bpf_raw_tp_link_release,
3676 .dealloc_deferred = bpf_raw_tp_link_dealloc,
3677 .show_fdinfo = bpf_raw_tp_link_show_fdinfo,
3678 .fill_link_info = bpf_raw_tp_link_fill_link_info,
3679};
3680
3681#ifdef CONFIG_PERF_EVENTS
3682struct bpf_perf_link {
3683 struct bpf_link link;
3684 struct file *perf_file;
3685};
3686
3687static void bpf_perf_link_release(struct bpf_link *link)
3688{
3689 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3690 struct perf_event *event = perf_link->perf_file->private_data;
3691
3692 perf_event_free_bpf_prog(event);
3693 fput(perf_link->perf_file);
3694}
3695
3696static void bpf_perf_link_dealloc(struct bpf_link *link)
3697{
3698 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3699
3700 kfree(perf_link);
3701}
3702
3703static int bpf_perf_link_fill_common(const struct perf_event *event,
3704 char __user *uname, u32 *ulenp,
3705 u64 *probe_offset, u64 *probe_addr,
3706 u32 *fd_type, unsigned long *missed)
3707{
3708 const char *buf;
3709 u32 prog_id, ulen;
3710 size_t len;
3711 int err;
3712
3713 ulen = *ulenp;
3714 if (!ulen ^ !uname)
3715 return -EINVAL;
3716
3717 err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf,
3718 probe_offset, probe_addr, missed);
3719 if (err)
3720 return err;
3721
3722 if (buf) {
3723 len = strlen(buf);
3724 *ulenp = len + 1;
3725 } else {
3726 *ulenp = 1;
3727 }
3728 if (!uname)
3729 return 0;
3730
3731 if (buf) {
3732 err = bpf_copy_to_user(uname, buf, ulen, len);
3733 if (err)
3734 return err;
3735 } else {
3736 char zero = '\0';
3737
3738 if (put_user(zero, uname))
3739 return -EFAULT;
3740 }
3741 return 0;
3742}
3743
3744#ifdef CONFIG_KPROBE_EVENTS
3745static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
3746 struct bpf_link_info *info)
3747{
3748 unsigned long missed;
3749 char __user *uname;
3750 u64 addr, offset;
3751 u32 ulen, type;
3752 int err;
3753
3754 uname = u64_to_user_ptr(info->perf_event.kprobe.func_name);
3755 ulen = info->perf_event.kprobe.name_len;
3756 err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr,
3757 &type, &missed);
3758 if (err)
3759 return err;
3760 if (type == BPF_FD_TYPE_KRETPROBE)
3761 info->perf_event.type = BPF_PERF_EVENT_KRETPROBE;
3762 else
3763 info->perf_event.type = BPF_PERF_EVENT_KPROBE;
3764 info->perf_event.kprobe.name_len = ulen;
3765 info->perf_event.kprobe.offset = offset;
3766 info->perf_event.kprobe.missed = missed;
3767 if (!kallsyms_show_value(current_cred()))
3768 addr = 0;
3769 info->perf_event.kprobe.addr = addr;
3770 info->perf_event.kprobe.cookie = event->bpf_cookie;
3771 return 0;
3772}
3773#endif
3774
3775#ifdef CONFIG_UPROBE_EVENTS
3776static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
3777 struct bpf_link_info *info)
3778{
3779 char __user *uname;
3780 u64 addr, offset;
3781 u32 ulen, type;
3782 int err;
3783
3784 uname = u64_to_user_ptr(info->perf_event.uprobe.file_name);
3785 ulen = info->perf_event.uprobe.name_len;
3786 err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr,
3787 &type, NULL);
3788 if (err)
3789 return err;
3790
3791 if (type == BPF_FD_TYPE_URETPROBE)
3792 info->perf_event.type = BPF_PERF_EVENT_URETPROBE;
3793 else
3794 info->perf_event.type = BPF_PERF_EVENT_UPROBE;
3795 info->perf_event.uprobe.name_len = ulen;
3796 info->perf_event.uprobe.offset = offset;
3797 info->perf_event.uprobe.cookie = event->bpf_cookie;
3798 return 0;
3799}
3800#endif
3801
3802static int bpf_perf_link_fill_probe(const struct perf_event *event,
3803 struct bpf_link_info *info)
3804{
3805#ifdef CONFIG_KPROBE_EVENTS
3806 if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE)
3807 return bpf_perf_link_fill_kprobe(event, info);
3808#endif
3809#ifdef CONFIG_UPROBE_EVENTS
3810 if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE)
3811 return bpf_perf_link_fill_uprobe(event, info);
3812#endif
3813 return -EOPNOTSUPP;
3814}
3815
3816static int bpf_perf_link_fill_tracepoint(const struct perf_event *event,
3817 struct bpf_link_info *info)
3818{
3819 char __user *uname;
3820 u32 ulen;
3821 int err;
3822
3823 uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name);
3824 ulen = info->perf_event.tracepoint.name_len;
3825 err = bpf_perf_link_fill_common(event, uname, &ulen, NULL, NULL, NULL, NULL);
3826 if (err)
3827 return err;
3828
3829 info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT;
3830 info->perf_event.tracepoint.name_len = ulen;
3831 info->perf_event.tracepoint.cookie = event->bpf_cookie;
3832 return 0;
3833}
3834
3835static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
3836 struct bpf_link_info *info)
3837{
3838 info->perf_event.event.type = event->attr.type;
3839 info->perf_event.event.config = event->attr.config;
3840 info->perf_event.event.cookie = event->bpf_cookie;
3841 info->perf_event.type = BPF_PERF_EVENT_EVENT;
3842 return 0;
3843}
3844
3845static int bpf_perf_link_fill_link_info(const struct bpf_link *link,
3846 struct bpf_link_info *info)
3847{
3848 struct bpf_perf_link *perf_link;
3849 const struct perf_event *event;
3850
3851 perf_link = container_of(link, struct bpf_perf_link, link);
3852 event = perf_get_event(perf_link->perf_file);
3853 if (IS_ERR(event))
3854 return PTR_ERR(event);
3855
3856 switch (event->prog->type) {
3857 case BPF_PROG_TYPE_PERF_EVENT:
3858 return bpf_perf_link_fill_perf_event(event, info);
3859 case BPF_PROG_TYPE_TRACEPOINT:
3860 return bpf_perf_link_fill_tracepoint(event, info);
3861 case BPF_PROG_TYPE_KPROBE:
3862 return bpf_perf_link_fill_probe(event, info);
3863 default:
3864 return -EOPNOTSUPP;
3865 }
3866}
3867
3868static const struct bpf_link_ops bpf_perf_link_lops = {
3869 .release = bpf_perf_link_release,
3870 .dealloc = bpf_perf_link_dealloc,
3871 .fill_link_info = bpf_perf_link_fill_link_info,
3872};
3873
3874static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3875{
3876 struct bpf_link_primer link_primer;
3877 struct bpf_perf_link *link;
3878 struct perf_event *event;
3879 struct file *perf_file;
3880 int err;
3881
3882 if (attr->link_create.flags)
3883 return -EINVAL;
3884
3885 perf_file = perf_event_get(attr->link_create.target_fd);
3886 if (IS_ERR(perf_file))
3887 return PTR_ERR(perf_file);
3888
3889 link = kzalloc(sizeof(*link), GFP_USER);
3890 if (!link) {
3891 err = -ENOMEM;
3892 goto out_put_file;
3893 }
3894 bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog);
3895 link->perf_file = perf_file;
3896
3897 err = bpf_link_prime(&link->link, &link_primer);
3898 if (err) {
3899 kfree(link);
3900 goto out_put_file;
3901 }
3902
3903 event = perf_file->private_data;
3904 err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie);
3905 if (err) {
3906 bpf_link_cleanup(&link_primer);
3907 goto out_put_file;
3908 }
3909 /* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */
3910 bpf_prog_inc(prog);
3911
3912 return bpf_link_settle(&link_primer);
3913
3914out_put_file:
3915 fput(perf_file);
3916 return err;
3917}
3918#else
3919static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3920{
3921 return -EOPNOTSUPP;
3922}
3923#endif /* CONFIG_PERF_EVENTS */
3924
3925static int bpf_raw_tp_link_attach(struct bpf_prog *prog,
3926 const char __user *user_tp_name, u64 cookie)
3927{
3928 struct bpf_link_primer link_primer;
3929 struct bpf_raw_tp_link *link;
3930 struct bpf_raw_event_map *btp;
3931 const char *tp_name;
3932 char buf[128];
3933 int err;
3934
3935 switch (prog->type) {
3936 case BPF_PROG_TYPE_TRACING:
3937 case BPF_PROG_TYPE_EXT:
3938 case BPF_PROG_TYPE_LSM:
3939 if (user_tp_name)
3940 /* The attach point for this category of programs
3941 * should be specified via btf_id during program load.
3942 */
3943 return -EINVAL;
3944 if (prog->type == BPF_PROG_TYPE_TRACING &&
3945 prog->expected_attach_type == BPF_TRACE_RAW_TP) {
3946 tp_name = prog->aux->attach_func_name;
3947 break;
3948 }
3949 return bpf_tracing_prog_attach(prog, 0, 0, 0);
3950 case BPF_PROG_TYPE_RAW_TRACEPOINT:
3951 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
3952 if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0)
3953 return -EFAULT;
3954 buf[sizeof(buf) - 1] = 0;
3955 tp_name = buf;
3956 break;
3957 default:
3958 return -EINVAL;
3959 }
3960
3961 btp = bpf_get_raw_tracepoint(tp_name);
3962 if (!btp)
3963 return -ENOENT;
3964
3965 link = kzalloc(sizeof(*link), GFP_USER);
3966 if (!link) {
3967 err = -ENOMEM;
3968 goto out_put_btp;
3969 }
3970 bpf_link_init_sleepable(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
3971 &bpf_raw_tp_link_lops, prog,
3972 tracepoint_is_faultable(btp->tp));
3973 link->btp = btp;
3974 link->cookie = cookie;
3975
3976 err = bpf_link_prime(&link->link, &link_primer);
3977 if (err) {
3978 kfree(link);
3979 goto out_put_btp;
3980 }
3981
3982 err = bpf_probe_register(link->btp, link);
3983 if (err) {
3984 bpf_link_cleanup(&link_primer);
3985 goto out_put_btp;
3986 }
3987
3988 return bpf_link_settle(&link_primer);
3989
3990out_put_btp:
3991 bpf_put_raw_tracepoint(btp);
3992 return err;
3993}
3994
3995#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.cookie
3996
3997static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
3998{
3999 struct bpf_prog *prog;
4000 void __user *tp_name;
4001 __u64 cookie;
4002 int fd;
4003
4004 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
4005 return -EINVAL;
4006
4007 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
4008 if (IS_ERR(prog))
4009 return PTR_ERR(prog);
4010
4011 tp_name = u64_to_user_ptr(attr->raw_tracepoint.name);
4012 cookie = attr->raw_tracepoint.cookie;
4013 fd = bpf_raw_tp_link_attach(prog, tp_name, cookie);
4014 if (fd < 0)
4015 bpf_prog_put(prog);
4016 return fd;
4017}
4018
4019static enum bpf_prog_type
4020attach_type_to_prog_type(enum bpf_attach_type attach_type)
4021{
4022 switch (attach_type) {
4023 case BPF_CGROUP_INET_INGRESS:
4024 case BPF_CGROUP_INET_EGRESS:
4025 return BPF_PROG_TYPE_CGROUP_SKB;
4026 case BPF_CGROUP_INET_SOCK_CREATE:
4027 case BPF_CGROUP_INET_SOCK_RELEASE:
4028 case BPF_CGROUP_INET4_POST_BIND:
4029 case BPF_CGROUP_INET6_POST_BIND:
4030 return BPF_PROG_TYPE_CGROUP_SOCK;
4031 case BPF_CGROUP_INET4_BIND:
4032 case BPF_CGROUP_INET6_BIND:
4033 case BPF_CGROUP_INET4_CONNECT:
4034 case BPF_CGROUP_INET6_CONNECT:
4035 case BPF_CGROUP_UNIX_CONNECT:
4036 case BPF_CGROUP_INET4_GETPEERNAME:
4037 case BPF_CGROUP_INET6_GETPEERNAME:
4038 case BPF_CGROUP_UNIX_GETPEERNAME:
4039 case BPF_CGROUP_INET4_GETSOCKNAME:
4040 case BPF_CGROUP_INET6_GETSOCKNAME:
4041 case BPF_CGROUP_UNIX_GETSOCKNAME:
4042 case BPF_CGROUP_UDP4_SENDMSG:
4043 case BPF_CGROUP_UDP6_SENDMSG:
4044 case BPF_CGROUP_UNIX_SENDMSG:
4045 case BPF_CGROUP_UDP4_RECVMSG:
4046 case BPF_CGROUP_UDP6_RECVMSG:
4047 case BPF_CGROUP_UNIX_RECVMSG:
4048 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
4049 case BPF_CGROUP_SOCK_OPS:
4050 return BPF_PROG_TYPE_SOCK_OPS;
4051 case BPF_CGROUP_DEVICE:
4052 return BPF_PROG_TYPE_CGROUP_DEVICE;
4053 case BPF_SK_MSG_VERDICT:
4054 return BPF_PROG_TYPE_SK_MSG;
4055 case BPF_SK_SKB_STREAM_PARSER:
4056 case BPF_SK_SKB_STREAM_VERDICT:
4057 case BPF_SK_SKB_VERDICT:
4058 return BPF_PROG_TYPE_SK_SKB;
4059 case BPF_LIRC_MODE2:
4060 return BPF_PROG_TYPE_LIRC_MODE2;
4061 case BPF_FLOW_DISSECTOR:
4062 return BPF_PROG_TYPE_FLOW_DISSECTOR;
4063 case BPF_CGROUP_SYSCTL:
4064 return BPF_PROG_TYPE_CGROUP_SYSCTL;
4065 case BPF_CGROUP_GETSOCKOPT:
4066 case BPF_CGROUP_SETSOCKOPT:
4067 return BPF_PROG_TYPE_CGROUP_SOCKOPT;
4068 case BPF_TRACE_ITER:
4069 case BPF_TRACE_RAW_TP:
4070 case BPF_TRACE_FENTRY:
4071 case BPF_TRACE_FEXIT:
4072 case BPF_MODIFY_RETURN:
4073 return BPF_PROG_TYPE_TRACING;
4074 case BPF_LSM_MAC:
4075 return BPF_PROG_TYPE_LSM;
4076 case BPF_SK_LOOKUP:
4077 return BPF_PROG_TYPE_SK_LOOKUP;
4078 case BPF_XDP:
4079 return BPF_PROG_TYPE_XDP;
4080 case BPF_LSM_CGROUP:
4081 return BPF_PROG_TYPE_LSM;
4082 case BPF_TCX_INGRESS:
4083 case BPF_TCX_EGRESS:
4084 case BPF_NETKIT_PRIMARY:
4085 case BPF_NETKIT_PEER:
4086 return BPF_PROG_TYPE_SCHED_CLS;
4087 default:
4088 return BPF_PROG_TYPE_UNSPEC;
4089 }
4090}
4091
4092static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
4093 enum bpf_attach_type attach_type)
4094{
4095 enum bpf_prog_type ptype;
4096
4097 switch (prog->type) {
4098 case BPF_PROG_TYPE_CGROUP_SOCK:
4099 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4100 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4101 case BPF_PROG_TYPE_SK_LOOKUP:
4102 return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
4103 case BPF_PROG_TYPE_CGROUP_SKB:
4104 if (!bpf_token_capable(prog->aux->token, CAP_NET_ADMIN))
4105 /* cg-skb progs can be loaded by unpriv user.
4106 * check permissions at attach time.
4107 */
4108 return -EPERM;
4109
4110 ptype = attach_type_to_prog_type(attach_type);
4111 if (prog->type != ptype)
4112 return -EINVAL;
4113
4114 return prog->enforce_expected_attach_type &&
4115 prog->expected_attach_type != attach_type ?
4116 -EINVAL : 0;
4117 case BPF_PROG_TYPE_EXT:
4118 return 0;
4119 case BPF_PROG_TYPE_NETFILTER:
4120 if (attach_type != BPF_NETFILTER)
4121 return -EINVAL;
4122 return 0;
4123 case BPF_PROG_TYPE_PERF_EVENT:
4124 case BPF_PROG_TYPE_TRACEPOINT:
4125 if (attach_type != BPF_PERF_EVENT)
4126 return -EINVAL;
4127 return 0;
4128 case BPF_PROG_TYPE_KPROBE:
4129 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI &&
4130 attach_type != BPF_TRACE_KPROBE_MULTI)
4131 return -EINVAL;
4132 if (prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION &&
4133 attach_type != BPF_TRACE_KPROBE_SESSION)
4134 return -EINVAL;
4135 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI &&
4136 attach_type != BPF_TRACE_UPROBE_MULTI)
4137 return -EINVAL;
4138 if (prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION &&
4139 attach_type != BPF_TRACE_UPROBE_SESSION)
4140 return -EINVAL;
4141 if (attach_type != BPF_PERF_EVENT &&
4142 attach_type != BPF_TRACE_KPROBE_MULTI &&
4143 attach_type != BPF_TRACE_KPROBE_SESSION &&
4144 attach_type != BPF_TRACE_UPROBE_MULTI &&
4145 attach_type != BPF_TRACE_UPROBE_SESSION)
4146 return -EINVAL;
4147 return 0;
4148 case BPF_PROG_TYPE_SCHED_CLS:
4149 if (attach_type != BPF_TCX_INGRESS &&
4150 attach_type != BPF_TCX_EGRESS &&
4151 attach_type != BPF_NETKIT_PRIMARY &&
4152 attach_type != BPF_NETKIT_PEER)
4153 return -EINVAL;
4154 return 0;
4155 default:
4156 ptype = attach_type_to_prog_type(attach_type);
4157 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type)
4158 return -EINVAL;
4159 return 0;
4160 }
4161}
4162
4163#define BPF_PROG_ATTACH_LAST_FIELD expected_revision
4164
4165#define BPF_F_ATTACH_MASK_BASE \
4166 (BPF_F_ALLOW_OVERRIDE | \
4167 BPF_F_ALLOW_MULTI | \
4168 BPF_F_REPLACE)
4169
4170#define BPF_F_ATTACH_MASK_MPROG \
4171 (BPF_F_REPLACE | \
4172 BPF_F_BEFORE | \
4173 BPF_F_AFTER | \
4174 BPF_F_ID | \
4175 BPF_F_LINK)
4176
4177static int bpf_prog_attach(const union bpf_attr *attr)
4178{
4179 enum bpf_prog_type ptype;
4180 struct bpf_prog *prog;
4181 int ret;
4182
4183 if (CHECK_ATTR(BPF_PROG_ATTACH))
4184 return -EINVAL;
4185
4186 ptype = attach_type_to_prog_type(attr->attach_type);
4187 if (ptype == BPF_PROG_TYPE_UNSPEC)
4188 return -EINVAL;
4189 if (bpf_mprog_supported(ptype)) {
4190 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
4191 return -EINVAL;
4192 } else {
4193 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE)
4194 return -EINVAL;
4195 if (attr->relative_fd ||
4196 attr->expected_revision)
4197 return -EINVAL;
4198 }
4199
4200 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
4201 if (IS_ERR(prog))
4202 return PTR_ERR(prog);
4203
4204 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
4205 bpf_prog_put(prog);
4206 return -EINVAL;
4207 }
4208
4209 switch (ptype) {
4210 case BPF_PROG_TYPE_SK_SKB:
4211 case BPF_PROG_TYPE_SK_MSG:
4212 ret = sock_map_get_from_fd(attr, prog);
4213 break;
4214 case BPF_PROG_TYPE_LIRC_MODE2:
4215 ret = lirc_prog_attach(attr, prog);
4216 break;
4217 case BPF_PROG_TYPE_FLOW_DISSECTOR:
4218 ret = netns_bpf_prog_attach(attr, prog);
4219 break;
4220 case BPF_PROG_TYPE_CGROUP_DEVICE:
4221 case BPF_PROG_TYPE_CGROUP_SKB:
4222 case BPF_PROG_TYPE_CGROUP_SOCK:
4223 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4224 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4225 case BPF_PROG_TYPE_CGROUP_SYSCTL:
4226 case BPF_PROG_TYPE_SOCK_OPS:
4227 case BPF_PROG_TYPE_LSM:
4228 if (ptype == BPF_PROG_TYPE_LSM &&
4229 prog->expected_attach_type != BPF_LSM_CGROUP)
4230 ret = -EINVAL;
4231 else
4232 ret = cgroup_bpf_prog_attach(attr, ptype, prog);
4233 break;
4234 case BPF_PROG_TYPE_SCHED_CLS:
4235 if (attr->attach_type == BPF_TCX_INGRESS ||
4236 attr->attach_type == BPF_TCX_EGRESS)
4237 ret = tcx_prog_attach(attr, prog);
4238 else
4239 ret = netkit_prog_attach(attr, prog);
4240 break;
4241 default:
4242 ret = -EINVAL;
4243 }
4244
4245 if (ret)
4246 bpf_prog_put(prog);
4247 return ret;
4248}
4249
4250#define BPF_PROG_DETACH_LAST_FIELD expected_revision
4251
4252static int bpf_prog_detach(const union bpf_attr *attr)
4253{
4254 struct bpf_prog *prog = NULL;
4255 enum bpf_prog_type ptype;
4256 int ret;
4257
4258 if (CHECK_ATTR(BPF_PROG_DETACH))
4259 return -EINVAL;
4260
4261 ptype = attach_type_to_prog_type(attr->attach_type);
4262 if (bpf_mprog_supported(ptype)) {
4263 if (ptype == BPF_PROG_TYPE_UNSPEC)
4264 return -EINVAL;
4265 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
4266 return -EINVAL;
4267 if (attr->attach_bpf_fd) {
4268 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
4269 if (IS_ERR(prog))
4270 return PTR_ERR(prog);
4271 }
4272 } else if (attr->attach_flags ||
4273 attr->relative_fd ||
4274 attr->expected_revision) {
4275 return -EINVAL;
4276 }
4277
4278 switch (ptype) {
4279 case BPF_PROG_TYPE_SK_MSG:
4280 case BPF_PROG_TYPE_SK_SKB:
4281 ret = sock_map_prog_detach(attr, ptype);
4282 break;
4283 case BPF_PROG_TYPE_LIRC_MODE2:
4284 ret = lirc_prog_detach(attr);
4285 break;
4286 case BPF_PROG_TYPE_FLOW_DISSECTOR:
4287 ret = netns_bpf_prog_detach(attr, ptype);
4288 break;
4289 case BPF_PROG_TYPE_CGROUP_DEVICE:
4290 case BPF_PROG_TYPE_CGROUP_SKB:
4291 case BPF_PROG_TYPE_CGROUP_SOCK:
4292 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4293 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4294 case BPF_PROG_TYPE_CGROUP_SYSCTL:
4295 case BPF_PROG_TYPE_SOCK_OPS:
4296 case BPF_PROG_TYPE_LSM:
4297 ret = cgroup_bpf_prog_detach(attr, ptype);
4298 break;
4299 case BPF_PROG_TYPE_SCHED_CLS:
4300 if (attr->attach_type == BPF_TCX_INGRESS ||
4301 attr->attach_type == BPF_TCX_EGRESS)
4302 ret = tcx_prog_detach(attr, prog);
4303 else
4304 ret = netkit_prog_detach(attr, prog);
4305 break;
4306 default:
4307 ret = -EINVAL;
4308 }
4309
4310 if (prog)
4311 bpf_prog_put(prog);
4312 return ret;
4313}
4314
4315#define BPF_PROG_QUERY_LAST_FIELD query.revision
4316
4317static int bpf_prog_query(const union bpf_attr *attr,
4318 union bpf_attr __user *uattr)
4319{
4320 if (!bpf_net_capable())
4321 return -EPERM;
4322 if (CHECK_ATTR(BPF_PROG_QUERY))
4323 return -EINVAL;
4324 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
4325 return -EINVAL;
4326
4327 switch (attr->query.attach_type) {
4328 case BPF_CGROUP_INET_INGRESS:
4329 case BPF_CGROUP_INET_EGRESS:
4330 case BPF_CGROUP_INET_SOCK_CREATE:
4331 case BPF_CGROUP_INET_SOCK_RELEASE:
4332 case BPF_CGROUP_INET4_BIND:
4333 case BPF_CGROUP_INET6_BIND:
4334 case BPF_CGROUP_INET4_POST_BIND:
4335 case BPF_CGROUP_INET6_POST_BIND:
4336 case BPF_CGROUP_INET4_CONNECT:
4337 case BPF_CGROUP_INET6_CONNECT:
4338 case BPF_CGROUP_UNIX_CONNECT:
4339 case BPF_CGROUP_INET4_GETPEERNAME:
4340 case BPF_CGROUP_INET6_GETPEERNAME:
4341 case BPF_CGROUP_UNIX_GETPEERNAME:
4342 case BPF_CGROUP_INET4_GETSOCKNAME:
4343 case BPF_CGROUP_INET6_GETSOCKNAME:
4344 case BPF_CGROUP_UNIX_GETSOCKNAME:
4345 case BPF_CGROUP_UDP4_SENDMSG:
4346 case BPF_CGROUP_UDP6_SENDMSG:
4347 case BPF_CGROUP_UNIX_SENDMSG:
4348 case BPF_CGROUP_UDP4_RECVMSG:
4349 case BPF_CGROUP_UDP6_RECVMSG:
4350 case BPF_CGROUP_UNIX_RECVMSG:
4351 case BPF_CGROUP_SOCK_OPS:
4352 case BPF_CGROUP_DEVICE:
4353 case BPF_CGROUP_SYSCTL:
4354 case BPF_CGROUP_GETSOCKOPT:
4355 case BPF_CGROUP_SETSOCKOPT:
4356 case BPF_LSM_CGROUP:
4357 return cgroup_bpf_prog_query(attr, uattr);
4358 case BPF_LIRC_MODE2:
4359 return lirc_prog_query(attr, uattr);
4360 case BPF_FLOW_DISSECTOR:
4361 case BPF_SK_LOOKUP:
4362 return netns_bpf_prog_query(attr, uattr);
4363 case BPF_SK_SKB_STREAM_PARSER:
4364 case BPF_SK_SKB_STREAM_VERDICT:
4365 case BPF_SK_MSG_VERDICT:
4366 case BPF_SK_SKB_VERDICT:
4367 return sock_map_bpf_prog_query(attr, uattr);
4368 case BPF_TCX_INGRESS:
4369 case BPF_TCX_EGRESS:
4370 return tcx_prog_query(attr, uattr);
4371 case BPF_NETKIT_PRIMARY:
4372 case BPF_NETKIT_PEER:
4373 return netkit_prog_query(attr, uattr);
4374 default:
4375 return -EINVAL;
4376 }
4377}
4378
4379#define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size
4380
4381static int bpf_prog_test_run(const union bpf_attr *attr,
4382 union bpf_attr __user *uattr)
4383{
4384 struct bpf_prog *prog;
4385 int ret = -ENOTSUPP;
4386
4387 if (CHECK_ATTR(BPF_PROG_TEST_RUN))
4388 return -EINVAL;
4389
4390 if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
4391 (!attr->test.ctx_size_in && attr->test.ctx_in))
4392 return -EINVAL;
4393
4394 if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
4395 (!attr->test.ctx_size_out && attr->test.ctx_out))
4396 return -EINVAL;
4397
4398 prog = bpf_prog_get(attr->test.prog_fd);
4399 if (IS_ERR(prog))
4400 return PTR_ERR(prog);
4401
4402 if (prog->aux->ops->test_run)
4403 ret = prog->aux->ops->test_run(prog, attr, uattr);
4404
4405 bpf_prog_put(prog);
4406 return ret;
4407}
4408
4409#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
4410
4411static int bpf_obj_get_next_id(const union bpf_attr *attr,
4412 union bpf_attr __user *uattr,
4413 struct idr *idr,
4414 spinlock_t *lock)
4415{
4416 u32 next_id = attr->start_id;
4417 int err = 0;
4418
4419 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
4420 return -EINVAL;
4421
4422 if (!capable(CAP_SYS_ADMIN))
4423 return -EPERM;
4424
4425 next_id++;
4426 spin_lock_bh(lock);
4427 if (!idr_get_next(idr, &next_id))
4428 err = -ENOENT;
4429 spin_unlock_bh(lock);
4430
4431 if (!err)
4432 err = put_user(next_id, &uattr->next_id);
4433
4434 return err;
4435}
4436
4437struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
4438{
4439 struct bpf_map *map;
4440
4441 spin_lock_bh(&map_idr_lock);
4442again:
4443 map = idr_get_next(&map_idr, id);
4444 if (map) {
4445 map = __bpf_map_inc_not_zero(map, false);
4446 if (IS_ERR(map)) {
4447 (*id)++;
4448 goto again;
4449 }
4450 }
4451 spin_unlock_bh(&map_idr_lock);
4452
4453 return map;
4454}
4455
4456struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
4457{
4458 struct bpf_prog *prog;
4459
4460 spin_lock_bh(&prog_idr_lock);
4461again:
4462 prog = idr_get_next(&prog_idr, id);
4463 if (prog) {
4464 prog = bpf_prog_inc_not_zero(prog);
4465 if (IS_ERR(prog)) {
4466 (*id)++;
4467 goto again;
4468 }
4469 }
4470 spin_unlock_bh(&prog_idr_lock);
4471
4472 return prog;
4473}
4474
4475#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
4476
4477struct bpf_prog *bpf_prog_by_id(u32 id)
4478{
4479 struct bpf_prog *prog;
4480
4481 if (!id)
4482 return ERR_PTR(-ENOENT);
4483
4484 spin_lock_bh(&prog_idr_lock);
4485 prog = idr_find(&prog_idr, id);
4486 if (prog)
4487 prog = bpf_prog_inc_not_zero(prog);
4488 else
4489 prog = ERR_PTR(-ENOENT);
4490 spin_unlock_bh(&prog_idr_lock);
4491 return prog;
4492}
4493
4494static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
4495{
4496 struct bpf_prog *prog;
4497 u32 id = attr->prog_id;
4498 int fd;
4499
4500 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
4501 return -EINVAL;
4502
4503 if (!capable(CAP_SYS_ADMIN))
4504 return -EPERM;
4505
4506 prog = bpf_prog_by_id(id);
4507 if (IS_ERR(prog))
4508 return PTR_ERR(prog);
4509
4510 fd = bpf_prog_new_fd(prog);
4511 if (fd < 0)
4512 bpf_prog_put(prog);
4513
4514 return fd;
4515}
4516
4517#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
4518
4519static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
4520{
4521 struct bpf_map *map;
4522 u32 id = attr->map_id;
4523 int f_flags;
4524 int fd;
4525
4526 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
4527 attr->open_flags & ~BPF_OBJ_FLAG_MASK)
4528 return -EINVAL;
4529
4530 if (!capable(CAP_SYS_ADMIN))
4531 return -EPERM;
4532
4533 f_flags = bpf_get_file_flag(attr->open_flags);
4534 if (f_flags < 0)
4535 return f_flags;
4536
4537 spin_lock_bh(&map_idr_lock);
4538 map = idr_find(&map_idr, id);
4539 if (map)
4540 map = __bpf_map_inc_not_zero(map, true);
4541 else
4542 map = ERR_PTR(-ENOENT);
4543 spin_unlock_bh(&map_idr_lock);
4544
4545 if (IS_ERR(map))
4546 return PTR_ERR(map);
4547
4548 fd = bpf_map_new_fd(map, f_flags);
4549 if (fd < 0)
4550 bpf_map_put_with_uref(map);
4551
4552 return fd;
4553}
4554
4555static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
4556 unsigned long addr, u32 *off,
4557 u32 *type)
4558{
4559 const struct bpf_map *map;
4560 int i;
4561
4562 mutex_lock(&prog->aux->used_maps_mutex);
4563 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
4564 map = prog->aux->used_maps[i];
4565 if (map == (void *)addr) {
4566 *type = BPF_PSEUDO_MAP_FD;
4567 goto out;
4568 }
4569 if (!map->ops->map_direct_value_meta)
4570 continue;
4571 if (!map->ops->map_direct_value_meta(map, addr, off)) {
4572 *type = BPF_PSEUDO_MAP_VALUE;
4573 goto out;
4574 }
4575 }
4576 map = NULL;
4577
4578out:
4579 mutex_unlock(&prog->aux->used_maps_mutex);
4580 return map;
4581}
4582
4583static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
4584 const struct cred *f_cred)
4585{
4586 const struct bpf_map *map;
4587 struct bpf_insn *insns;
4588 u32 off, type;
4589 u64 imm;
4590 u8 code;
4591 int i;
4592
4593 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
4594 GFP_USER);
4595 if (!insns)
4596 return insns;
4597
4598 for (i = 0; i < prog->len; i++) {
4599 code = insns[i].code;
4600
4601 if (code == (BPF_JMP | BPF_TAIL_CALL)) {
4602 insns[i].code = BPF_JMP | BPF_CALL;
4603 insns[i].imm = BPF_FUNC_tail_call;
4604 /* fall-through */
4605 }
4606 if (code == (BPF_JMP | BPF_CALL) ||
4607 code == (BPF_JMP | BPF_CALL_ARGS)) {
4608 if (code == (BPF_JMP | BPF_CALL_ARGS))
4609 insns[i].code = BPF_JMP | BPF_CALL;
4610 if (!bpf_dump_raw_ok(f_cred))
4611 insns[i].imm = 0;
4612 continue;
4613 }
4614 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
4615 insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
4616 continue;
4617 }
4618
4619 if ((BPF_CLASS(code) == BPF_LDX || BPF_CLASS(code) == BPF_STX ||
4620 BPF_CLASS(code) == BPF_ST) && BPF_MODE(code) == BPF_PROBE_MEM32) {
4621 insns[i].code = BPF_CLASS(code) | BPF_SIZE(code) | BPF_MEM;
4622 continue;
4623 }
4624
4625 if (code != (BPF_LD | BPF_IMM | BPF_DW))
4626 continue;
4627
4628 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
4629 map = bpf_map_from_imm(prog, imm, &off, &type);
4630 if (map) {
4631 insns[i].src_reg = type;
4632 insns[i].imm = map->id;
4633 insns[i + 1].imm = off;
4634 continue;
4635 }
4636 }
4637
4638 return insns;
4639}
4640
4641static int set_info_rec_size(struct bpf_prog_info *info)
4642{
4643 /*
4644 * Ensure info.*_rec_size is the same as kernel expected size
4645 *
4646 * or
4647 *
4648 * Only allow zero *_rec_size if both _rec_size and _cnt are
4649 * zero. In this case, the kernel will set the expected
4650 * _rec_size back to the info.
4651 */
4652
4653 if ((info->nr_func_info || info->func_info_rec_size) &&
4654 info->func_info_rec_size != sizeof(struct bpf_func_info))
4655 return -EINVAL;
4656
4657 if ((info->nr_line_info || info->line_info_rec_size) &&
4658 info->line_info_rec_size != sizeof(struct bpf_line_info))
4659 return -EINVAL;
4660
4661 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
4662 info->jited_line_info_rec_size != sizeof(__u64))
4663 return -EINVAL;
4664
4665 info->func_info_rec_size = sizeof(struct bpf_func_info);
4666 info->line_info_rec_size = sizeof(struct bpf_line_info);
4667 info->jited_line_info_rec_size = sizeof(__u64);
4668
4669 return 0;
4670}
4671
4672static int bpf_prog_get_info_by_fd(struct file *file,
4673 struct bpf_prog *prog,
4674 const union bpf_attr *attr,
4675 union bpf_attr __user *uattr)
4676{
4677 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4678 struct btf *attach_btf = bpf_prog_get_target_btf(prog);
4679 struct bpf_prog_info info;
4680 u32 info_len = attr->info.info_len;
4681 struct bpf_prog_kstats stats;
4682 char __user *uinsns;
4683 u32 ulen;
4684 int err;
4685
4686 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4687 if (err)
4688 return err;
4689 info_len = min_t(u32, sizeof(info), info_len);
4690
4691 memset(&info, 0, sizeof(info));
4692 if (copy_from_user(&info, uinfo, info_len))
4693 return -EFAULT;
4694
4695 info.type = prog->type;
4696 info.id = prog->aux->id;
4697 info.load_time = prog->aux->load_time;
4698 info.created_by_uid = from_kuid_munged(current_user_ns(),
4699 prog->aux->user->uid);
4700 info.gpl_compatible = prog->gpl_compatible;
4701
4702 memcpy(info.tag, prog->tag, sizeof(prog->tag));
4703 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
4704
4705 mutex_lock(&prog->aux->used_maps_mutex);
4706 ulen = info.nr_map_ids;
4707 info.nr_map_ids = prog->aux->used_map_cnt;
4708 ulen = min_t(u32, info.nr_map_ids, ulen);
4709 if (ulen) {
4710 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
4711 u32 i;
4712
4713 for (i = 0; i < ulen; i++)
4714 if (put_user(prog->aux->used_maps[i]->id,
4715 &user_map_ids[i])) {
4716 mutex_unlock(&prog->aux->used_maps_mutex);
4717 return -EFAULT;
4718 }
4719 }
4720 mutex_unlock(&prog->aux->used_maps_mutex);
4721
4722 err = set_info_rec_size(&info);
4723 if (err)
4724 return err;
4725
4726 bpf_prog_get_stats(prog, &stats);
4727 info.run_time_ns = stats.nsecs;
4728 info.run_cnt = stats.cnt;
4729 info.recursion_misses = stats.misses;
4730
4731 info.verified_insns = prog->aux->verified_insns;
4732
4733 if (!bpf_capable()) {
4734 info.jited_prog_len = 0;
4735 info.xlated_prog_len = 0;
4736 info.nr_jited_ksyms = 0;
4737 info.nr_jited_func_lens = 0;
4738 info.nr_func_info = 0;
4739 info.nr_line_info = 0;
4740 info.nr_jited_line_info = 0;
4741 goto done;
4742 }
4743
4744 ulen = info.xlated_prog_len;
4745 info.xlated_prog_len = bpf_prog_insn_size(prog);
4746 if (info.xlated_prog_len && ulen) {
4747 struct bpf_insn *insns_sanitized;
4748 bool fault;
4749
4750 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
4751 info.xlated_prog_insns = 0;
4752 goto done;
4753 }
4754 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
4755 if (!insns_sanitized)
4756 return -ENOMEM;
4757 uinsns = u64_to_user_ptr(info.xlated_prog_insns);
4758 ulen = min_t(u32, info.xlated_prog_len, ulen);
4759 fault = copy_to_user(uinsns, insns_sanitized, ulen);
4760 kfree(insns_sanitized);
4761 if (fault)
4762 return -EFAULT;
4763 }
4764
4765 if (bpf_prog_is_offloaded(prog->aux)) {
4766 err = bpf_prog_offload_info_fill(&info, prog);
4767 if (err)
4768 return err;
4769 goto done;
4770 }
4771
4772 /* NOTE: the following code is supposed to be skipped for offload.
4773 * bpf_prog_offload_info_fill() is the place to fill similar fields
4774 * for offload.
4775 */
4776 ulen = info.jited_prog_len;
4777 if (prog->aux->func_cnt) {
4778 u32 i;
4779
4780 info.jited_prog_len = 0;
4781 for (i = 0; i < prog->aux->func_cnt; i++)
4782 info.jited_prog_len += prog->aux->func[i]->jited_len;
4783 } else {
4784 info.jited_prog_len = prog->jited_len;
4785 }
4786
4787 if (info.jited_prog_len && ulen) {
4788 if (bpf_dump_raw_ok(file->f_cred)) {
4789 uinsns = u64_to_user_ptr(info.jited_prog_insns);
4790 ulen = min_t(u32, info.jited_prog_len, ulen);
4791
4792 /* for multi-function programs, copy the JITed
4793 * instructions for all the functions
4794 */
4795 if (prog->aux->func_cnt) {
4796 u32 len, free, i;
4797 u8 *img;
4798
4799 free = ulen;
4800 for (i = 0; i < prog->aux->func_cnt; i++) {
4801 len = prog->aux->func[i]->jited_len;
4802 len = min_t(u32, len, free);
4803 img = (u8 *) prog->aux->func[i]->bpf_func;
4804 if (copy_to_user(uinsns, img, len))
4805 return -EFAULT;
4806 uinsns += len;
4807 free -= len;
4808 if (!free)
4809 break;
4810 }
4811 } else {
4812 if (copy_to_user(uinsns, prog->bpf_func, ulen))
4813 return -EFAULT;
4814 }
4815 } else {
4816 info.jited_prog_insns = 0;
4817 }
4818 }
4819
4820 ulen = info.nr_jited_ksyms;
4821 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
4822 if (ulen) {
4823 if (bpf_dump_raw_ok(file->f_cred)) {
4824 unsigned long ksym_addr;
4825 u64 __user *user_ksyms;
4826 u32 i;
4827
4828 /* copy the address of the kernel symbol
4829 * corresponding to each function
4830 */
4831 ulen = min_t(u32, info.nr_jited_ksyms, ulen);
4832 user_ksyms = u64_to_user_ptr(info.jited_ksyms);
4833 if (prog->aux->func_cnt) {
4834 for (i = 0; i < ulen; i++) {
4835 ksym_addr = (unsigned long)
4836 prog->aux->func[i]->bpf_func;
4837 if (put_user((u64) ksym_addr,
4838 &user_ksyms[i]))
4839 return -EFAULT;
4840 }
4841 } else {
4842 ksym_addr = (unsigned long) prog->bpf_func;
4843 if (put_user((u64) ksym_addr, &user_ksyms[0]))
4844 return -EFAULT;
4845 }
4846 } else {
4847 info.jited_ksyms = 0;
4848 }
4849 }
4850
4851 ulen = info.nr_jited_func_lens;
4852 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
4853 if (ulen) {
4854 if (bpf_dump_raw_ok(file->f_cred)) {
4855 u32 __user *user_lens;
4856 u32 func_len, i;
4857
4858 /* copy the JITed image lengths for each function */
4859 ulen = min_t(u32, info.nr_jited_func_lens, ulen);
4860 user_lens = u64_to_user_ptr(info.jited_func_lens);
4861 if (prog->aux->func_cnt) {
4862 for (i = 0; i < ulen; i++) {
4863 func_len =
4864 prog->aux->func[i]->jited_len;
4865 if (put_user(func_len, &user_lens[i]))
4866 return -EFAULT;
4867 }
4868 } else {
4869 func_len = prog->jited_len;
4870 if (put_user(func_len, &user_lens[0]))
4871 return -EFAULT;
4872 }
4873 } else {
4874 info.jited_func_lens = 0;
4875 }
4876 }
4877
4878 if (prog->aux->btf)
4879 info.btf_id = btf_obj_id(prog->aux->btf);
4880 info.attach_btf_id = prog->aux->attach_btf_id;
4881 if (attach_btf)
4882 info.attach_btf_obj_id = btf_obj_id(attach_btf);
4883
4884 ulen = info.nr_func_info;
4885 info.nr_func_info = prog->aux->func_info_cnt;
4886 if (info.nr_func_info && ulen) {
4887 char __user *user_finfo;
4888
4889 user_finfo = u64_to_user_ptr(info.func_info);
4890 ulen = min_t(u32, info.nr_func_info, ulen);
4891 if (copy_to_user(user_finfo, prog->aux->func_info,
4892 info.func_info_rec_size * ulen))
4893 return -EFAULT;
4894 }
4895
4896 ulen = info.nr_line_info;
4897 info.nr_line_info = prog->aux->nr_linfo;
4898 if (info.nr_line_info && ulen) {
4899 __u8 __user *user_linfo;
4900
4901 user_linfo = u64_to_user_ptr(info.line_info);
4902 ulen = min_t(u32, info.nr_line_info, ulen);
4903 if (copy_to_user(user_linfo, prog->aux->linfo,
4904 info.line_info_rec_size * ulen))
4905 return -EFAULT;
4906 }
4907
4908 ulen = info.nr_jited_line_info;
4909 if (prog->aux->jited_linfo)
4910 info.nr_jited_line_info = prog->aux->nr_linfo;
4911 else
4912 info.nr_jited_line_info = 0;
4913 if (info.nr_jited_line_info && ulen) {
4914 if (bpf_dump_raw_ok(file->f_cred)) {
4915 unsigned long line_addr;
4916 __u64 __user *user_linfo;
4917 u32 i;
4918
4919 user_linfo = u64_to_user_ptr(info.jited_line_info);
4920 ulen = min_t(u32, info.nr_jited_line_info, ulen);
4921 for (i = 0; i < ulen; i++) {
4922 line_addr = (unsigned long)prog->aux->jited_linfo[i];
4923 if (put_user((__u64)line_addr, &user_linfo[i]))
4924 return -EFAULT;
4925 }
4926 } else {
4927 info.jited_line_info = 0;
4928 }
4929 }
4930
4931 ulen = info.nr_prog_tags;
4932 info.nr_prog_tags = prog->aux->func_cnt ? : 1;
4933 if (ulen) {
4934 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
4935 u32 i;
4936
4937 user_prog_tags = u64_to_user_ptr(info.prog_tags);
4938 ulen = min_t(u32, info.nr_prog_tags, ulen);
4939 if (prog->aux->func_cnt) {
4940 for (i = 0; i < ulen; i++) {
4941 if (copy_to_user(user_prog_tags[i],
4942 prog->aux->func[i]->tag,
4943 BPF_TAG_SIZE))
4944 return -EFAULT;
4945 }
4946 } else {
4947 if (copy_to_user(user_prog_tags[0],
4948 prog->tag, BPF_TAG_SIZE))
4949 return -EFAULT;
4950 }
4951 }
4952
4953done:
4954 if (copy_to_user(uinfo, &info, info_len) ||
4955 put_user(info_len, &uattr->info.info_len))
4956 return -EFAULT;
4957
4958 return 0;
4959}
4960
4961static int bpf_map_get_info_by_fd(struct file *file,
4962 struct bpf_map *map,
4963 const union bpf_attr *attr,
4964 union bpf_attr __user *uattr)
4965{
4966 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4967 struct bpf_map_info info;
4968 u32 info_len = attr->info.info_len;
4969 int err;
4970
4971 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4972 if (err)
4973 return err;
4974 info_len = min_t(u32, sizeof(info), info_len);
4975
4976 memset(&info, 0, sizeof(info));
4977 info.type = map->map_type;
4978 info.id = map->id;
4979 info.key_size = map->key_size;
4980 info.value_size = map->value_size;
4981 info.max_entries = map->max_entries;
4982 info.map_flags = map->map_flags;
4983 info.map_extra = map->map_extra;
4984 memcpy(info.name, map->name, sizeof(map->name));
4985
4986 if (map->btf) {
4987 info.btf_id = btf_obj_id(map->btf);
4988 info.btf_key_type_id = map->btf_key_type_id;
4989 info.btf_value_type_id = map->btf_value_type_id;
4990 }
4991 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
4992 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS)
4993 bpf_map_struct_ops_info_fill(&info, map);
4994
4995 if (bpf_map_is_offloaded(map)) {
4996 err = bpf_map_offload_info_fill(&info, map);
4997 if (err)
4998 return err;
4999 }
5000
5001 if (copy_to_user(uinfo, &info, info_len) ||
5002 put_user(info_len, &uattr->info.info_len))
5003 return -EFAULT;
5004
5005 return 0;
5006}
5007
5008static int bpf_btf_get_info_by_fd(struct file *file,
5009 struct btf *btf,
5010 const union bpf_attr *attr,
5011 union bpf_attr __user *uattr)
5012{
5013 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
5014 u32 info_len = attr->info.info_len;
5015 int err;
5016
5017 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len);
5018 if (err)
5019 return err;
5020
5021 return btf_get_info_by_fd(btf, attr, uattr);
5022}
5023
5024static int bpf_link_get_info_by_fd(struct file *file,
5025 struct bpf_link *link,
5026 const union bpf_attr *attr,
5027 union bpf_attr __user *uattr)
5028{
5029 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
5030 struct bpf_link_info info;
5031 u32 info_len = attr->info.info_len;
5032 int err;
5033
5034 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
5035 if (err)
5036 return err;
5037 info_len = min_t(u32, sizeof(info), info_len);
5038
5039 memset(&info, 0, sizeof(info));
5040 if (copy_from_user(&info, uinfo, info_len))
5041 return -EFAULT;
5042
5043 info.type = link->type;
5044 info.id = link->id;
5045 if (link->prog)
5046 info.prog_id = link->prog->aux->id;
5047
5048 if (link->ops->fill_link_info) {
5049 err = link->ops->fill_link_info(link, &info);
5050 if (err)
5051 return err;
5052 }
5053
5054 if (copy_to_user(uinfo, &info, info_len) ||
5055 put_user(info_len, &uattr->info.info_len))
5056 return -EFAULT;
5057
5058 return 0;
5059}
5060
5061
5062#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
5063
5064static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
5065 union bpf_attr __user *uattr)
5066{
5067 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
5068 return -EINVAL;
5069
5070 CLASS(fd, f)(attr->info.bpf_fd);
5071 if (fd_empty(f))
5072 return -EBADFD;
5073
5074 if (fd_file(f)->f_op == &bpf_prog_fops)
5075 return bpf_prog_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr,
5076 uattr);
5077 else if (fd_file(f)->f_op == &bpf_map_fops)
5078 return bpf_map_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr,
5079 uattr);
5080 else if (fd_file(f)->f_op == &btf_fops)
5081 return bpf_btf_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, uattr);
5082 else if (fd_file(f)->f_op == &bpf_link_fops || fd_file(f)->f_op == &bpf_link_fops_poll)
5083 return bpf_link_get_info_by_fd(fd_file(f), fd_file(f)->private_data,
5084 attr, uattr);
5085 return -EINVAL;
5086}
5087
5088#define BPF_BTF_LOAD_LAST_FIELD btf_token_fd
5089
5090static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
5091{
5092 struct bpf_token *token = NULL;
5093
5094 if (CHECK_ATTR(BPF_BTF_LOAD))
5095 return -EINVAL;
5096
5097 if (attr->btf_flags & ~BPF_F_TOKEN_FD)
5098 return -EINVAL;
5099
5100 if (attr->btf_flags & BPF_F_TOKEN_FD) {
5101 token = bpf_token_get_from_fd(attr->btf_token_fd);
5102 if (IS_ERR(token))
5103 return PTR_ERR(token);
5104 if (!bpf_token_allow_cmd(token, BPF_BTF_LOAD)) {
5105 bpf_token_put(token);
5106 token = NULL;
5107 }
5108 }
5109
5110 if (!bpf_token_capable(token, CAP_BPF)) {
5111 bpf_token_put(token);
5112 return -EPERM;
5113 }
5114
5115 bpf_token_put(token);
5116
5117 return btf_new_fd(attr, uattr, uattr_size);
5118}
5119
5120#define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
5121
5122static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
5123{
5124 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
5125 return -EINVAL;
5126
5127 if (!capable(CAP_SYS_ADMIN))
5128 return -EPERM;
5129
5130 return btf_get_fd_by_id(attr->btf_id);
5131}
5132
5133static int bpf_task_fd_query_copy(const union bpf_attr *attr,
5134 union bpf_attr __user *uattr,
5135 u32 prog_id, u32 fd_type,
5136 const char *buf, u64 probe_offset,
5137 u64 probe_addr)
5138{
5139 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
5140 u32 len = buf ? strlen(buf) : 0, input_len;
5141 int err = 0;
5142
5143 if (put_user(len, &uattr->task_fd_query.buf_len))
5144 return -EFAULT;
5145 input_len = attr->task_fd_query.buf_len;
5146 if (input_len && ubuf) {
5147 if (!len) {
5148 /* nothing to copy, just make ubuf NULL terminated */
5149 char zero = '\0';
5150
5151 if (put_user(zero, ubuf))
5152 return -EFAULT;
5153 } else if (input_len >= len + 1) {
5154 /* ubuf can hold the string with NULL terminator */
5155 if (copy_to_user(ubuf, buf, len + 1))
5156 return -EFAULT;
5157 } else {
5158 /* ubuf cannot hold the string with NULL terminator,
5159 * do a partial copy with NULL terminator.
5160 */
5161 char zero = '\0';
5162
5163 err = -ENOSPC;
5164 if (copy_to_user(ubuf, buf, input_len - 1))
5165 return -EFAULT;
5166 if (put_user(zero, ubuf + input_len - 1))
5167 return -EFAULT;
5168 }
5169 }
5170
5171 if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
5172 put_user(fd_type, &uattr->task_fd_query.fd_type) ||
5173 put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
5174 put_user(probe_addr, &uattr->task_fd_query.probe_addr))
5175 return -EFAULT;
5176
5177 return err;
5178}
5179
5180#define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
5181
5182static int bpf_task_fd_query(const union bpf_attr *attr,
5183 union bpf_attr __user *uattr)
5184{
5185 pid_t pid = attr->task_fd_query.pid;
5186 u32 fd = attr->task_fd_query.fd;
5187 const struct perf_event *event;
5188 struct task_struct *task;
5189 struct file *file;
5190 int err;
5191
5192 if (CHECK_ATTR(BPF_TASK_FD_QUERY))
5193 return -EINVAL;
5194
5195 if (!capable(CAP_SYS_ADMIN))
5196 return -EPERM;
5197
5198 if (attr->task_fd_query.flags != 0)
5199 return -EINVAL;
5200
5201 rcu_read_lock();
5202 task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
5203 rcu_read_unlock();
5204 if (!task)
5205 return -ENOENT;
5206
5207 err = 0;
5208 file = fget_task(task, fd);
5209 put_task_struct(task);
5210 if (!file)
5211 return -EBADF;
5212
5213 if (file->f_op == &bpf_link_fops || file->f_op == &bpf_link_fops_poll) {
5214 struct bpf_link *link = file->private_data;
5215
5216 if (link->ops == &bpf_raw_tp_link_lops) {
5217 struct bpf_raw_tp_link *raw_tp =
5218 container_of(link, struct bpf_raw_tp_link, link);
5219 struct bpf_raw_event_map *btp = raw_tp->btp;
5220
5221 err = bpf_task_fd_query_copy(attr, uattr,
5222 raw_tp->link.prog->aux->id,
5223 BPF_FD_TYPE_RAW_TRACEPOINT,
5224 btp->tp->name, 0, 0);
5225 goto put_file;
5226 }
5227 goto out_not_supp;
5228 }
5229
5230 event = perf_get_event(file);
5231 if (!IS_ERR(event)) {
5232 u64 probe_offset, probe_addr;
5233 u32 prog_id, fd_type;
5234 const char *buf;
5235
5236 err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
5237 &buf, &probe_offset,
5238 &probe_addr, NULL);
5239 if (!err)
5240 err = bpf_task_fd_query_copy(attr, uattr, prog_id,
5241 fd_type, buf,
5242 probe_offset,
5243 probe_addr);
5244 goto put_file;
5245 }
5246
5247out_not_supp:
5248 err = -ENOTSUPP;
5249put_file:
5250 fput(file);
5251 return err;
5252}
5253
5254#define BPF_MAP_BATCH_LAST_FIELD batch.flags
5255
5256#define BPF_DO_BATCH(fn, ...) \
5257 do { \
5258 if (!fn) { \
5259 err = -ENOTSUPP; \
5260 goto err_put; \
5261 } \
5262 err = fn(__VA_ARGS__); \
5263 } while (0)
5264
5265static int bpf_map_do_batch(const union bpf_attr *attr,
5266 union bpf_attr __user *uattr,
5267 int cmd)
5268{
5269 bool has_read = cmd == BPF_MAP_LOOKUP_BATCH ||
5270 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
5271 bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
5272 struct bpf_map *map;
5273 int err;
5274
5275 if (CHECK_ATTR(BPF_MAP_BATCH))
5276 return -EINVAL;
5277
5278 CLASS(fd, f)(attr->batch.map_fd);
5279
5280 map = __bpf_map_get(f);
5281 if (IS_ERR(map))
5282 return PTR_ERR(map);
5283 if (has_write)
5284 bpf_map_write_active_inc(map);
5285 if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
5286 err = -EPERM;
5287 goto err_put;
5288 }
5289 if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
5290 err = -EPERM;
5291 goto err_put;
5292 }
5293
5294 if (cmd == BPF_MAP_LOOKUP_BATCH)
5295 BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr);
5296 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
5297 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr);
5298 else if (cmd == BPF_MAP_UPDATE_BATCH)
5299 BPF_DO_BATCH(map->ops->map_update_batch, map, fd_file(f), attr, uattr);
5300 else
5301 BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr);
5302err_put:
5303 if (has_write) {
5304 maybe_wait_bpf_programs(map);
5305 bpf_map_write_active_dec(map);
5306 }
5307 return err;
5308}
5309
5310#define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.pid
5311static int link_create(union bpf_attr *attr, bpfptr_t uattr)
5312{
5313 struct bpf_prog *prog;
5314 int ret;
5315
5316 if (CHECK_ATTR(BPF_LINK_CREATE))
5317 return -EINVAL;
5318
5319 if (attr->link_create.attach_type == BPF_STRUCT_OPS)
5320 return bpf_struct_ops_link_create(attr);
5321
5322 prog = bpf_prog_get(attr->link_create.prog_fd);
5323 if (IS_ERR(prog))
5324 return PTR_ERR(prog);
5325
5326 ret = bpf_prog_attach_check_attach_type(prog,
5327 attr->link_create.attach_type);
5328 if (ret)
5329 goto out;
5330
5331 switch (prog->type) {
5332 case BPF_PROG_TYPE_CGROUP_SKB:
5333 case BPF_PROG_TYPE_CGROUP_SOCK:
5334 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
5335 case BPF_PROG_TYPE_SOCK_OPS:
5336 case BPF_PROG_TYPE_CGROUP_DEVICE:
5337 case BPF_PROG_TYPE_CGROUP_SYSCTL:
5338 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
5339 ret = cgroup_bpf_link_attach(attr, prog);
5340 break;
5341 case BPF_PROG_TYPE_EXT:
5342 ret = bpf_tracing_prog_attach(prog,
5343 attr->link_create.target_fd,
5344 attr->link_create.target_btf_id,
5345 attr->link_create.tracing.cookie);
5346 break;
5347 case BPF_PROG_TYPE_LSM:
5348 case BPF_PROG_TYPE_TRACING:
5349 if (attr->link_create.attach_type != prog->expected_attach_type) {
5350 ret = -EINVAL;
5351 goto out;
5352 }
5353 if (prog->expected_attach_type == BPF_TRACE_RAW_TP)
5354 ret = bpf_raw_tp_link_attach(prog, NULL, attr->link_create.tracing.cookie);
5355 else if (prog->expected_attach_type == BPF_TRACE_ITER)
5356 ret = bpf_iter_link_attach(attr, uattr, prog);
5357 else if (prog->expected_attach_type == BPF_LSM_CGROUP)
5358 ret = cgroup_bpf_link_attach(attr, prog);
5359 else
5360 ret = bpf_tracing_prog_attach(prog,
5361 attr->link_create.target_fd,
5362 attr->link_create.target_btf_id,
5363 attr->link_create.tracing.cookie);
5364 break;
5365 case BPF_PROG_TYPE_FLOW_DISSECTOR:
5366 case BPF_PROG_TYPE_SK_LOOKUP:
5367 ret = netns_bpf_link_create(attr, prog);
5368 break;
5369 case BPF_PROG_TYPE_SK_MSG:
5370 case BPF_PROG_TYPE_SK_SKB:
5371 ret = sock_map_link_create(attr, prog);
5372 break;
5373#ifdef CONFIG_NET
5374 case BPF_PROG_TYPE_XDP:
5375 ret = bpf_xdp_link_attach(attr, prog);
5376 break;
5377 case BPF_PROG_TYPE_SCHED_CLS:
5378 if (attr->link_create.attach_type == BPF_TCX_INGRESS ||
5379 attr->link_create.attach_type == BPF_TCX_EGRESS)
5380 ret = tcx_link_attach(attr, prog);
5381 else
5382 ret = netkit_link_attach(attr, prog);
5383 break;
5384 case BPF_PROG_TYPE_NETFILTER:
5385 ret = bpf_nf_link_attach(attr, prog);
5386 break;
5387#endif
5388 case BPF_PROG_TYPE_PERF_EVENT:
5389 case BPF_PROG_TYPE_TRACEPOINT:
5390 ret = bpf_perf_link_attach(attr, prog);
5391 break;
5392 case BPF_PROG_TYPE_KPROBE:
5393 if (attr->link_create.attach_type == BPF_PERF_EVENT)
5394 ret = bpf_perf_link_attach(attr, prog);
5395 else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI ||
5396 attr->link_create.attach_type == BPF_TRACE_KPROBE_SESSION)
5397 ret = bpf_kprobe_multi_link_attach(attr, prog);
5398 else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI ||
5399 attr->link_create.attach_type == BPF_TRACE_UPROBE_SESSION)
5400 ret = bpf_uprobe_multi_link_attach(attr, prog);
5401 break;
5402 default:
5403 ret = -EINVAL;
5404 }
5405
5406out:
5407 if (ret < 0)
5408 bpf_prog_put(prog);
5409 return ret;
5410}
5411
5412static int link_update_map(struct bpf_link *link, union bpf_attr *attr)
5413{
5414 struct bpf_map *new_map, *old_map = NULL;
5415 int ret;
5416
5417 new_map = bpf_map_get(attr->link_update.new_map_fd);
5418 if (IS_ERR(new_map))
5419 return PTR_ERR(new_map);
5420
5421 if (attr->link_update.flags & BPF_F_REPLACE) {
5422 old_map = bpf_map_get(attr->link_update.old_map_fd);
5423 if (IS_ERR(old_map)) {
5424 ret = PTR_ERR(old_map);
5425 goto out_put;
5426 }
5427 } else if (attr->link_update.old_map_fd) {
5428 ret = -EINVAL;
5429 goto out_put;
5430 }
5431
5432 ret = link->ops->update_map(link, new_map, old_map);
5433
5434 if (old_map)
5435 bpf_map_put(old_map);
5436out_put:
5437 bpf_map_put(new_map);
5438 return ret;
5439}
5440
5441#define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
5442
5443static int link_update(union bpf_attr *attr)
5444{
5445 struct bpf_prog *old_prog = NULL, *new_prog;
5446 struct bpf_link *link;
5447 u32 flags;
5448 int ret;
5449
5450 if (CHECK_ATTR(BPF_LINK_UPDATE))
5451 return -EINVAL;
5452
5453 flags = attr->link_update.flags;
5454 if (flags & ~BPF_F_REPLACE)
5455 return -EINVAL;
5456
5457 link = bpf_link_get_from_fd(attr->link_update.link_fd);
5458 if (IS_ERR(link))
5459 return PTR_ERR(link);
5460
5461 if (link->ops->update_map) {
5462 ret = link_update_map(link, attr);
5463 goto out_put_link;
5464 }
5465
5466 new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
5467 if (IS_ERR(new_prog)) {
5468 ret = PTR_ERR(new_prog);
5469 goto out_put_link;
5470 }
5471
5472 if (flags & BPF_F_REPLACE) {
5473 old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
5474 if (IS_ERR(old_prog)) {
5475 ret = PTR_ERR(old_prog);
5476 old_prog = NULL;
5477 goto out_put_progs;
5478 }
5479 } else if (attr->link_update.old_prog_fd) {
5480 ret = -EINVAL;
5481 goto out_put_progs;
5482 }
5483
5484 if (link->ops->update_prog)
5485 ret = link->ops->update_prog(link, new_prog, old_prog);
5486 else
5487 ret = -EINVAL;
5488
5489out_put_progs:
5490 if (old_prog)
5491 bpf_prog_put(old_prog);
5492 if (ret)
5493 bpf_prog_put(new_prog);
5494out_put_link:
5495 bpf_link_put_direct(link);
5496 return ret;
5497}
5498
5499#define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
5500
5501static int link_detach(union bpf_attr *attr)
5502{
5503 struct bpf_link *link;
5504 int ret;
5505
5506 if (CHECK_ATTR(BPF_LINK_DETACH))
5507 return -EINVAL;
5508
5509 link = bpf_link_get_from_fd(attr->link_detach.link_fd);
5510 if (IS_ERR(link))
5511 return PTR_ERR(link);
5512
5513 if (link->ops->detach)
5514 ret = link->ops->detach(link);
5515 else
5516 ret = -EOPNOTSUPP;
5517
5518 bpf_link_put_direct(link);
5519 return ret;
5520}
5521
5522struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
5523{
5524 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
5525}
5526EXPORT_SYMBOL(bpf_link_inc_not_zero);
5527
5528struct bpf_link *bpf_link_by_id(u32 id)
5529{
5530 struct bpf_link *link;
5531
5532 if (!id)
5533 return ERR_PTR(-ENOENT);
5534
5535 spin_lock_bh(&link_idr_lock);
5536 /* before link is "settled", ID is 0, pretend it doesn't exist yet */
5537 link = idr_find(&link_idr, id);
5538 if (link) {
5539 if (link->id)
5540 link = bpf_link_inc_not_zero(link);
5541 else
5542 link = ERR_PTR(-EAGAIN);
5543 } else {
5544 link = ERR_PTR(-ENOENT);
5545 }
5546 spin_unlock_bh(&link_idr_lock);
5547 return link;
5548}
5549
5550struct bpf_link *bpf_link_get_curr_or_next(u32 *id)
5551{
5552 struct bpf_link *link;
5553
5554 spin_lock_bh(&link_idr_lock);
5555again:
5556 link = idr_get_next(&link_idr, id);
5557 if (link) {
5558 link = bpf_link_inc_not_zero(link);
5559 if (IS_ERR(link)) {
5560 (*id)++;
5561 goto again;
5562 }
5563 }
5564 spin_unlock_bh(&link_idr_lock);
5565
5566 return link;
5567}
5568
5569#define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
5570
5571static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
5572{
5573 struct bpf_link *link;
5574 u32 id = attr->link_id;
5575 int fd;
5576
5577 if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
5578 return -EINVAL;
5579
5580 if (!capable(CAP_SYS_ADMIN))
5581 return -EPERM;
5582
5583 link = bpf_link_by_id(id);
5584 if (IS_ERR(link))
5585 return PTR_ERR(link);
5586
5587 fd = bpf_link_new_fd(link);
5588 if (fd < 0)
5589 bpf_link_put_direct(link);
5590
5591 return fd;
5592}
5593
5594DEFINE_MUTEX(bpf_stats_enabled_mutex);
5595
5596static int bpf_stats_release(struct inode *inode, struct file *file)
5597{
5598 mutex_lock(&bpf_stats_enabled_mutex);
5599 static_key_slow_dec(&bpf_stats_enabled_key.key);
5600 mutex_unlock(&bpf_stats_enabled_mutex);
5601 return 0;
5602}
5603
5604static const struct file_operations bpf_stats_fops = {
5605 .release = bpf_stats_release,
5606};
5607
5608static int bpf_enable_runtime_stats(void)
5609{
5610 int fd;
5611
5612 mutex_lock(&bpf_stats_enabled_mutex);
5613
5614 /* Set a very high limit to avoid overflow */
5615 if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
5616 mutex_unlock(&bpf_stats_enabled_mutex);
5617 return -EBUSY;
5618 }
5619
5620 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
5621 if (fd >= 0)
5622 static_key_slow_inc(&bpf_stats_enabled_key.key);
5623
5624 mutex_unlock(&bpf_stats_enabled_mutex);
5625 return fd;
5626}
5627
5628#define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
5629
5630static int bpf_enable_stats(union bpf_attr *attr)
5631{
5632
5633 if (CHECK_ATTR(BPF_ENABLE_STATS))
5634 return -EINVAL;
5635
5636 if (!capable(CAP_SYS_ADMIN))
5637 return -EPERM;
5638
5639 switch (attr->enable_stats.type) {
5640 case BPF_STATS_RUN_TIME:
5641 return bpf_enable_runtime_stats();
5642 default:
5643 break;
5644 }
5645 return -EINVAL;
5646}
5647
5648#define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
5649
5650static int bpf_iter_create(union bpf_attr *attr)
5651{
5652 struct bpf_link *link;
5653 int err;
5654
5655 if (CHECK_ATTR(BPF_ITER_CREATE))
5656 return -EINVAL;
5657
5658 if (attr->iter_create.flags)
5659 return -EINVAL;
5660
5661 link = bpf_link_get_from_fd(attr->iter_create.link_fd);
5662 if (IS_ERR(link))
5663 return PTR_ERR(link);
5664
5665 err = bpf_iter_new_fd(link);
5666 bpf_link_put_direct(link);
5667
5668 return err;
5669}
5670
5671#define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
5672
5673static int bpf_prog_bind_map(union bpf_attr *attr)
5674{
5675 struct bpf_prog *prog;
5676 struct bpf_map *map;
5677 struct bpf_map **used_maps_old, **used_maps_new;
5678 int i, ret = 0;
5679
5680 if (CHECK_ATTR(BPF_PROG_BIND_MAP))
5681 return -EINVAL;
5682
5683 if (attr->prog_bind_map.flags)
5684 return -EINVAL;
5685
5686 prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
5687 if (IS_ERR(prog))
5688 return PTR_ERR(prog);
5689
5690 map = bpf_map_get(attr->prog_bind_map.map_fd);
5691 if (IS_ERR(map)) {
5692 ret = PTR_ERR(map);
5693 goto out_prog_put;
5694 }
5695
5696 mutex_lock(&prog->aux->used_maps_mutex);
5697
5698 used_maps_old = prog->aux->used_maps;
5699
5700 for (i = 0; i < prog->aux->used_map_cnt; i++)
5701 if (used_maps_old[i] == map) {
5702 bpf_map_put(map);
5703 goto out_unlock;
5704 }
5705
5706 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
5707 sizeof(used_maps_new[0]),
5708 GFP_KERNEL);
5709 if (!used_maps_new) {
5710 ret = -ENOMEM;
5711 goto out_unlock;
5712 }
5713
5714 /* The bpf program will not access the bpf map, but for the sake of
5715 * simplicity, increase sleepable_refcnt for sleepable program as well.
5716 */
5717 if (prog->sleepable)
5718 atomic64_inc(&map->sleepable_refcnt);
5719 memcpy(used_maps_new, used_maps_old,
5720 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
5721 used_maps_new[prog->aux->used_map_cnt] = map;
5722
5723 prog->aux->used_map_cnt++;
5724 prog->aux->used_maps = used_maps_new;
5725
5726 kfree(used_maps_old);
5727
5728out_unlock:
5729 mutex_unlock(&prog->aux->used_maps_mutex);
5730
5731 if (ret)
5732 bpf_map_put(map);
5733out_prog_put:
5734 bpf_prog_put(prog);
5735 return ret;
5736}
5737
5738#define BPF_TOKEN_CREATE_LAST_FIELD token_create.bpffs_fd
5739
5740static int token_create(union bpf_attr *attr)
5741{
5742 if (CHECK_ATTR(BPF_TOKEN_CREATE))
5743 return -EINVAL;
5744
5745 /* no flags are supported yet */
5746 if (attr->token_create.flags)
5747 return -EINVAL;
5748
5749 return bpf_token_create(attr);
5750}
5751
5752static int __sys_bpf(enum bpf_cmd cmd, bpfptr_t uattr, unsigned int size)
5753{
5754 union bpf_attr attr;
5755 int err;
5756
5757 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
5758 if (err)
5759 return err;
5760 size = min_t(u32, size, sizeof(attr));
5761
5762 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
5763 memset(&attr, 0, sizeof(attr));
5764 if (copy_from_bpfptr(&attr, uattr, size) != 0)
5765 return -EFAULT;
5766
5767 err = security_bpf(cmd, &attr, size);
5768 if (err < 0)
5769 return err;
5770
5771 switch (cmd) {
5772 case BPF_MAP_CREATE:
5773 err = map_create(&attr);
5774 break;
5775 case BPF_MAP_LOOKUP_ELEM:
5776 err = map_lookup_elem(&attr);
5777 break;
5778 case BPF_MAP_UPDATE_ELEM:
5779 err = map_update_elem(&attr, uattr);
5780 break;
5781 case BPF_MAP_DELETE_ELEM:
5782 err = map_delete_elem(&attr, uattr);
5783 break;
5784 case BPF_MAP_GET_NEXT_KEY:
5785 err = map_get_next_key(&attr);
5786 break;
5787 case BPF_MAP_FREEZE:
5788 err = map_freeze(&attr);
5789 break;
5790 case BPF_PROG_LOAD:
5791 err = bpf_prog_load(&attr, uattr, size);
5792 break;
5793 case BPF_OBJ_PIN:
5794 err = bpf_obj_pin(&attr);
5795 break;
5796 case BPF_OBJ_GET:
5797 err = bpf_obj_get(&attr);
5798 break;
5799 case BPF_PROG_ATTACH:
5800 err = bpf_prog_attach(&attr);
5801 break;
5802 case BPF_PROG_DETACH:
5803 err = bpf_prog_detach(&attr);
5804 break;
5805 case BPF_PROG_QUERY:
5806 err = bpf_prog_query(&attr, uattr.user);
5807 break;
5808 case BPF_PROG_TEST_RUN:
5809 err = bpf_prog_test_run(&attr, uattr.user);
5810 break;
5811 case BPF_PROG_GET_NEXT_ID:
5812 err = bpf_obj_get_next_id(&attr, uattr.user,
5813 &prog_idr, &prog_idr_lock);
5814 break;
5815 case BPF_MAP_GET_NEXT_ID:
5816 err = bpf_obj_get_next_id(&attr, uattr.user,
5817 &map_idr, &map_idr_lock);
5818 break;
5819 case BPF_BTF_GET_NEXT_ID:
5820 err = bpf_obj_get_next_id(&attr, uattr.user,
5821 &btf_idr, &btf_idr_lock);
5822 break;
5823 case BPF_PROG_GET_FD_BY_ID:
5824 err = bpf_prog_get_fd_by_id(&attr);
5825 break;
5826 case BPF_MAP_GET_FD_BY_ID:
5827 err = bpf_map_get_fd_by_id(&attr);
5828 break;
5829 case BPF_OBJ_GET_INFO_BY_FD:
5830 err = bpf_obj_get_info_by_fd(&attr, uattr.user);
5831 break;
5832 case BPF_RAW_TRACEPOINT_OPEN:
5833 err = bpf_raw_tracepoint_open(&attr);
5834 break;
5835 case BPF_BTF_LOAD:
5836 err = bpf_btf_load(&attr, uattr, size);
5837 break;
5838 case BPF_BTF_GET_FD_BY_ID:
5839 err = bpf_btf_get_fd_by_id(&attr);
5840 break;
5841 case BPF_TASK_FD_QUERY:
5842 err = bpf_task_fd_query(&attr, uattr.user);
5843 break;
5844 case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
5845 err = map_lookup_and_delete_elem(&attr);
5846 break;
5847 case BPF_MAP_LOOKUP_BATCH:
5848 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH);
5849 break;
5850 case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
5851 err = bpf_map_do_batch(&attr, uattr.user,
5852 BPF_MAP_LOOKUP_AND_DELETE_BATCH);
5853 break;
5854 case BPF_MAP_UPDATE_BATCH:
5855 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH);
5856 break;
5857 case BPF_MAP_DELETE_BATCH:
5858 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
5859 break;
5860 case BPF_LINK_CREATE:
5861 err = link_create(&attr, uattr);
5862 break;
5863 case BPF_LINK_UPDATE:
5864 err = link_update(&attr);
5865 break;
5866 case BPF_LINK_GET_FD_BY_ID:
5867 err = bpf_link_get_fd_by_id(&attr);
5868 break;
5869 case BPF_LINK_GET_NEXT_ID:
5870 err = bpf_obj_get_next_id(&attr, uattr.user,
5871 &link_idr, &link_idr_lock);
5872 break;
5873 case BPF_ENABLE_STATS:
5874 err = bpf_enable_stats(&attr);
5875 break;
5876 case BPF_ITER_CREATE:
5877 err = bpf_iter_create(&attr);
5878 break;
5879 case BPF_LINK_DETACH:
5880 err = link_detach(&attr);
5881 break;
5882 case BPF_PROG_BIND_MAP:
5883 err = bpf_prog_bind_map(&attr);
5884 break;
5885 case BPF_TOKEN_CREATE:
5886 err = token_create(&attr);
5887 break;
5888 default:
5889 err = -EINVAL;
5890 break;
5891 }
5892
5893 return err;
5894}
5895
5896SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
5897{
5898 return __sys_bpf(cmd, USER_BPFPTR(uattr), size);
5899}
5900
5901static bool syscall_prog_is_valid_access(int off, int size,
5902 enum bpf_access_type type,
5903 const struct bpf_prog *prog,
5904 struct bpf_insn_access_aux *info)
5905{
5906 if (off < 0 || off >= U16_MAX)
5907 return false;
5908 if (off % size != 0)
5909 return false;
5910 return true;
5911}
5912
5913BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size)
5914{
5915 switch (cmd) {
5916 case BPF_MAP_CREATE:
5917 case BPF_MAP_DELETE_ELEM:
5918 case BPF_MAP_UPDATE_ELEM:
5919 case BPF_MAP_FREEZE:
5920 case BPF_MAP_GET_FD_BY_ID:
5921 case BPF_PROG_LOAD:
5922 case BPF_BTF_LOAD:
5923 case BPF_LINK_CREATE:
5924 case BPF_RAW_TRACEPOINT_OPEN:
5925 break;
5926 default:
5927 return -EINVAL;
5928 }
5929 return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
5930}
5931
5932
5933/* To shut up -Wmissing-prototypes.
5934 * This function is used by the kernel light skeleton
5935 * to load bpf programs when modules are loaded or during kernel boot.
5936 * See tools/lib/bpf/skel_internal.h
5937 */
5938int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
5939
5940int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
5941{
5942 struct bpf_prog * __maybe_unused prog;
5943 struct bpf_tramp_run_ctx __maybe_unused run_ctx;
5944
5945 switch (cmd) {
5946#ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */
5947 case BPF_PROG_TEST_RUN:
5948 if (attr->test.data_in || attr->test.data_out ||
5949 attr->test.ctx_out || attr->test.duration ||
5950 attr->test.repeat || attr->test.flags)
5951 return -EINVAL;
5952
5953 prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL);
5954 if (IS_ERR(prog))
5955 return PTR_ERR(prog);
5956
5957 if (attr->test.ctx_size_in < prog->aux->max_ctx_offset ||
5958 attr->test.ctx_size_in > U16_MAX) {
5959 bpf_prog_put(prog);
5960 return -EINVAL;
5961 }
5962
5963 run_ctx.bpf_cookie = 0;
5964 if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
5965 /* recursion detected */
5966 __bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx);
5967 bpf_prog_put(prog);
5968 return -EBUSY;
5969 }
5970 attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in);
5971 __bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */,
5972 &run_ctx);
5973 bpf_prog_put(prog);
5974 return 0;
5975#endif
5976 default:
5977 return ____bpf_sys_bpf(cmd, attr, size);
5978 }
5979}
5980EXPORT_SYMBOL(kern_sys_bpf);
5981
5982static const struct bpf_func_proto bpf_sys_bpf_proto = {
5983 .func = bpf_sys_bpf,
5984 .gpl_only = false,
5985 .ret_type = RET_INTEGER,
5986 .arg1_type = ARG_ANYTHING,
5987 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
5988 .arg3_type = ARG_CONST_SIZE,
5989};
5990
5991const struct bpf_func_proto * __weak
5992tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5993{
5994 return bpf_base_func_proto(func_id, prog);
5995}
5996
5997BPF_CALL_1(bpf_sys_close, u32, fd)
5998{
5999 /* When bpf program calls this helper there should not be
6000 * an fdget() without matching completed fdput().
6001 * This helper is allowed in the following callchain only:
6002 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close
6003 */
6004 return close_fd(fd);
6005}
6006
6007static const struct bpf_func_proto bpf_sys_close_proto = {
6008 .func = bpf_sys_close,
6009 .gpl_only = false,
6010 .ret_type = RET_INTEGER,
6011 .arg1_type = ARG_ANYTHING,
6012};
6013
6014BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res)
6015{
6016 *res = 0;
6017 if (flags)
6018 return -EINVAL;
6019
6020 if (name_sz <= 1 || name[name_sz - 1])
6021 return -EINVAL;
6022
6023 if (!bpf_dump_raw_ok(current_cred()))
6024 return -EPERM;
6025
6026 *res = kallsyms_lookup_name(name);
6027 return *res ? 0 : -ENOENT;
6028}
6029
6030static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
6031 .func = bpf_kallsyms_lookup_name,
6032 .gpl_only = false,
6033 .ret_type = RET_INTEGER,
6034 .arg1_type = ARG_PTR_TO_MEM,
6035 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
6036 .arg3_type = ARG_ANYTHING,
6037 .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
6038 .arg4_size = sizeof(u64),
6039};
6040
6041static const struct bpf_func_proto *
6042syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6043{
6044 switch (func_id) {
6045 case BPF_FUNC_sys_bpf:
6046 return !bpf_token_capable(prog->aux->token, CAP_PERFMON)
6047 ? NULL : &bpf_sys_bpf_proto;
6048 case BPF_FUNC_btf_find_by_name_kind:
6049 return &bpf_btf_find_by_name_kind_proto;
6050 case BPF_FUNC_sys_close:
6051 return &bpf_sys_close_proto;
6052 case BPF_FUNC_kallsyms_lookup_name:
6053 return &bpf_kallsyms_lookup_name_proto;
6054 default:
6055 return tracing_prog_func_proto(func_id, prog);
6056 }
6057}
6058
6059const struct bpf_verifier_ops bpf_syscall_verifier_ops = {
6060 .get_func_proto = syscall_prog_func_proto,
6061 .is_valid_access = syscall_prog_is_valid_access,
6062};
6063
6064const struct bpf_prog_ops bpf_syscall_prog_ops = {
6065 .test_run = bpf_prog_test_run_syscall,
6066};
6067
6068#ifdef CONFIG_SYSCTL
6069static int bpf_stats_handler(const struct ctl_table *table, int write,
6070 void *buffer, size_t *lenp, loff_t *ppos)
6071{
6072 struct static_key *key = (struct static_key *)table->data;
6073 static int saved_val;
6074 int val, ret;
6075 struct ctl_table tmp = {
6076 .data = &val,
6077 .maxlen = sizeof(val),
6078 .mode = table->mode,
6079 .extra1 = SYSCTL_ZERO,
6080 .extra2 = SYSCTL_ONE,
6081 };
6082
6083 if (write && !capable(CAP_SYS_ADMIN))
6084 return -EPERM;
6085
6086 mutex_lock(&bpf_stats_enabled_mutex);
6087 val = saved_val;
6088 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
6089 if (write && !ret && val != saved_val) {
6090 if (val)
6091 static_key_slow_inc(key);
6092 else
6093 static_key_slow_dec(key);
6094 saved_val = val;
6095 }
6096 mutex_unlock(&bpf_stats_enabled_mutex);
6097 return ret;
6098}
6099
6100void __weak unpriv_ebpf_notify(int new_state)
6101{
6102}
6103
6104static int bpf_unpriv_handler(const struct ctl_table *table, int write,
6105 void *buffer, size_t *lenp, loff_t *ppos)
6106{
6107 int ret, unpriv_enable = *(int *)table->data;
6108 bool locked_state = unpriv_enable == 1;
6109 struct ctl_table tmp = *table;
6110
6111 if (write && !capable(CAP_SYS_ADMIN))
6112 return -EPERM;
6113
6114 tmp.data = &unpriv_enable;
6115 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
6116 if (write && !ret) {
6117 if (locked_state && unpriv_enable != 1)
6118 return -EPERM;
6119 *(int *)table->data = unpriv_enable;
6120 }
6121
6122 if (write)
6123 unpriv_ebpf_notify(unpriv_enable);
6124
6125 return ret;
6126}
6127
6128static struct ctl_table bpf_syscall_table[] = {
6129 {
6130 .procname = "unprivileged_bpf_disabled",
6131 .data = &sysctl_unprivileged_bpf_disabled,
6132 .maxlen = sizeof(sysctl_unprivileged_bpf_disabled),
6133 .mode = 0644,
6134 .proc_handler = bpf_unpriv_handler,
6135 .extra1 = SYSCTL_ZERO,
6136 .extra2 = SYSCTL_TWO,
6137 },
6138 {
6139 .procname = "bpf_stats_enabled",
6140 .data = &bpf_stats_enabled_key.key,
6141 .mode = 0644,
6142 .proc_handler = bpf_stats_handler,
6143 },
6144};
6145
6146static int __init bpf_syscall_sysctl_init(void)
6147{
6148 register_sysctl_init("kernel", bpf_syscall_table);
6149 return 0;
6150}
6151late_initcall(bpf_syscall_sysctl_init);
6152#endif /* CONFIG_SYSCTL */
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 */
4#include <linux/bpf.h>
5#include <linux/bpf-cgroup.h>
6#include <linux/bpf_trace.h>
7#include <linux/bpf_lirc.h>
8#include <linux/bpf_verifier.h>
9#include <linux/bsearch.h>
10#include <linux/btf.h>
11#include <linux/syscalls.h>
12#include <linux/slab.h>
13#include <linux/sched/signal.h>
14#include <linux/vmalloc.h>
15#include <linux/mmzone.h>
16#include <linux/anon_inodes.h>
17#include <linux/fdtable.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/license.h>
21#include <linux/filter.h>
22#include <linux/kernel.h>
23#include <linux/idr.h>
24#include <linux/cred.h>
25#include <linux/timekeeping.h>
26#include <linux/ctype.h>
27#include <linux/nospec.h>
28#include <linux/audit.h>
29#include <uapi/linux/btf.h>
30#include <linux/pgtable.h>
31#include <linux/bpf_lsm.h>
32#include <linux/poll.h>
33#include <linux/sort.h>
34#include <linux/bpf-netns.h>
35#include <linux/rcupdate_trace.h>
36#include <linux/memcontrol.h>
37#include <linux/trace_events.h>
38
39#include <net/netfilter/nf_bpf_link.h>
40#include <net/netkit.h>
41#include <net/tcx.h>
42
43#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
44 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
45 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
46#define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
47#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
48#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
49 IS_FD_HASH(map))
50
51#define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
52
53DEFINE_PER_CPU(int, bpf_prog_active);
54static DEFINE_IDR(prog_idr);
55static DEFINE_SPINLOCK(prog_idr_lock);
56static DEFINE_IDR(map_idr);
57static DEFINE_SPINLOCK(map_idr_lock);
58static DEFINE_IDR(link_idr);
59static DEFINE_SPINLOCK(link_idr_lock);
60
61int sysctl_unprivileged_bpf_disabled __read_mostly =
62 IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
63
64static const struct bpf_map_ops * const bpf_map_types[] = {
65#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
66#define BPF_MAP_TYPE(_id, _ops) \
67 [_id] = &_ops,
68#define BPF_LINK_TYPE(_id, _name)
69#include <linux/bpf_types.h>
70#undef BPF_PROG_TYPE
71#undef BPF_MAP_TYPE
72#undef BPF_LINK_TYPE
73};
74
75/*
76 * If we're handed a bigger struct than we know of, ensure all the unknown bits
77 * are 0 - i.e. new user-space does not rely on any kernel feature extensions
78 * we don't know about yet.
79 *
80 * There is a ToCToU between this function call and the following
81 * copy_from_user() call. However, this is not a concern since this function is
82 * meant to be a future-proofing of bits.
83 */
84int bpf_check_uarg_tail_zero(bpfptr_t uaddr,
85 size_t expected_size,
86 size_t actual_size)
87{
88 int res;
89
90 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */
91 return -E2BIG;
92
93 if (actual_size <= expected_size)
94 return 0;
95
96 if (uaddr.is_kernel)
97 res = memchr_inv(uaddr.kernel + expected_size, 0,
98 actual_size - expected_size) == NULL;
99 else
100 res = check_zeroed_user(uaddr.user + expected_size,
101 actual_size - expected_size);
102 if (res < 0)
103 return res;
104 return res ? 0 : -E2BIG;
105}
106
107const struct bpf_map_ops bpf_map_offload_ops = {
108 .map_meta_equal = bpf_map_meta_equal,
109 .map_alloc = bpf_map_offload_map_alloc,
110 .map_free = bpf_map_offload_map_free,
111 .map_check_btf = map_check_no_btf,
112 .map_mem_usage = bpf_map_offload_map_mem_usage,
113};
114
115static void bpf_map_write_active_inc(struct bpf_map *map)
116{
117 atomic64_inc(&map->writecnt);
118}
119
120static void bpf_map_write_active_dec(struct bpf_map *map)
121{
122 atomic64_dec(&map->writecnt);
123}
124
125bool bpf_map_write_active(const struct bpf_map *map)
126{
127 return atomic64_read(&map->writecnt) != 0;
128}
129
130static u32 bpf_map_value_size(const struct bpf_map *map)
131{
132 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
133 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
134 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
135 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
136 return round_up(map->value_size, 8) * num_possible_cpus();
137 else if (IS_FD_MAP(map))
138 return sizeof(u32);
139 else
140 return map->value_size;
141}
142
143static void maybe_wait_bpf_programs(struct bpf_map *map)
144{
145 /* Wait for any running non-sleepable BPF programs to complete so that
146 * userspace, when we return to it, knows that all non-sleepable
147 * programs that could be running use the new map value. For sleepable
148 * BPF programs, synchronize_rcu_tasks_trace() should be used to wait
149 * for the completions of these programs, but considering the waiting
150 * time can be very long and userspace may think it will hang forever,
151 * so don't handle sleepable BPF programs now.
152 */
153 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
154 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
155 synchronize_rcu();
156}
157
158static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
159 void *key, void *value, __u64 flags)
160{
161 int err;
162
163 /* Need to create a kthread, thus must support schedule */
164 if (bpf_map_is_offloaded(map)) {
165 return bpf_map_offload_update_elem(map, key, value, flags);
166 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
167 map->map_type == BPF_MAP_TYPE_ARENA ||
168 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
169 return map->ops->map_update_elem(map, key, value, flags);
170 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
171 map->map_type == BPF_MAP_TYPE_SOCKMAP) {
172 return sock_map_update_elem_sys(map, key, value, flags);
173 } else if (IS_FD_PROG_ARRAY(map)) {
174 return bpf_fd_array_map_update_elem(map, map_file, key, value,
175 flags);
176 }
177
178 bpf_disable_instrumentation();
179 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
180 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
181 err = bpf_percpu_hash_update(map, key, value, flags);
182 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
183 err = bpf_percpu_array_update(map, key, value, flags);
184 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
185 err = bpf_percpu_cgroup_storage_update(map, key, value,
186 flags);
187 } else if (IS_FD_ARRAY(map)) {
188 err = bpf_fd_array_map_update_elem(map, map_file, key, value,
189 flags);
190 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
191 err = bpf_fd_htab_map_update_elem(map, map_file, key, value,
192 flags);
193 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
194 /* rcu_read_lock() is not needed */
195 err = bpf_fd_reuseport_array_update_elem(map, key, value,
196 flags);
197 } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
198 map->map_type == BPF_MAP_TYPE_STACK ||
199 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
200 err = map->ops->map_push_elem(map, value, flags);
201 } else {
202 rcu_read_lock();
203 err = map->ops->map_update_elem(map, key, value, flags);
204 rcu_read_unlock();
205 }
206 bpf_enable_instrumentation();
207
208 return err;
209}
210
211static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
212 __u64 flags)
213{
214 void *ptr;
215 int err;
216
217 if (bpf_map_is_offloaded(map))
218 return bpf_map_offload_lookup_elem(map, key, value);
219
220 bpf_disable_instrumentation();
221 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
222 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
223 err = bpf_percpu_hash_copy(map, key, value);
224 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
225 err = bpf_percpu_array_copy(map, key, value);
226 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
227 err = bpf_percpu_cgroup_storage_copy(map, key, value);
228 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
229 err = bpf_stackmap_copy(map, key, value);
230 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
231 err = bpf_fd_array_map_lookup_elem(map, key, value);
232 } else if (IS_FD_HASH(map)) {
233 err = bpf_fd_htab_map_lookup_elem(map, key, value);
234 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
235 err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
236 } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
237 map->map_type == BPF_MAP_TYPE_STACK ||
238 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
239 err = map->ops->map_peek_elem(map, value);
240 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
241 /* struct_ops map requires directly updating "value" */
242 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
243 } else {
244 rcu_read_lock();
245 if (map->ops->map_lookup_elem_sys_only)
246 ptr = map->ops->map_lookup_elem_sys_only(map, key);
247 else
248 ptr = map->ops->map_lookup_elem(map, key);
249 if (IS_ERR(ptr)) {
250 err = PTR_ERR(ptr);
251 } else if (!ptr) {
252 err = -ENOENT;
253 } else {
254 err = 0;
255 if (flags & BPF_F_LOCK)
256 /* lock 'ptr' and copy everything but lock */
257 copy_map_value_locked(map, value, ptr, true);
258 else
259 copy_map_value(map, value, ptr);
260 /* mask lock and timer, since value wasn't zero inited */
261 check_and_init_map_value(map, value);
262 }
263 rcu_read_unlock();
264 }
265
266 bpf_enable_instrumentation();
267
268 return err;
269}
270
271/* Please, do not use this function outside from the map creation path
272 * (e.g. in map update path) without taking care of setting the active
273 * memory cgroup (see at bpf_map_kmalloc_node() for example).
274 */
275static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
276{
277 /* We really just want to fail instead of triggering OOM killer
278 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
279 * which is used for lower order allocation requests.
280 *
281 * It has been observed that higher order allocation requests done by
282 * vmalloc with __GFP_NORETRY being set might fail due to not trying
283 * to reclaim memory from the page cache, thus we set
284 * __GFP_RETRY_MAYFAIL to avoid such situations.
285 */
286
287 gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO);
288 unsigned int flags = 0;
289 unsigned long align = 1;
290 void *area;
291
292 if (size >= SIZE_MAX)
293 return NULL;
294
295 /* kmalloc()'ed memory can't be mmap()'ed */
296 if (mmapable) {
297 BUG_ON(!PAGE_ALIGNED(size));
298 align = SHMLBA;
299 flags = VM_USERMAP;
300 } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
301 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
302 numa_node);
303 if (area != NULL)
304 return area;
305 }
306
307 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
308 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
309 flags, numa_node, __builtin_return_address(0));
310}
311
312void *bpf_map_area_alloc(u64 size, int numa_node)
313{
314 return __bpf_map_area_alloc(size, numa_node, false);
315}
316
317void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
318{
319 return __bpf_map_area_alloc(size, numa_node, true);
320}
321
322void bpf_map_area_free(void *area)
323{
324 kvfree(area);
325}
326
327static u32 bpf_map_flags_retain_permanent(u32 flags)
328{
329 /* Some map creation flags are not tied to the map object but
330 * rather to the map fd instead, so they have no meaning upon
331 * map object inspection since multiple file descriptors with
332 * different (access) properties can exist here. Thus, given
333 * this has zero meaning for the map itself, lets clear these
334 * from here.
335 */
336 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
337}
338
339void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
340{
341 map->map_type = attr->map_type;
342 map->key_size = attr->key_size;
343 map->value_size = attr->value_size;
344 map->max_entries = attr->max_entries;
345 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
346 map->numa_node = bpf_map_attr_numa_node(attr);
347 map->map_extra = attr->map_extra;
348}
349
350static int bpf_map_alloc_id(struct bpf_map *map)
351{
352 int id;
353
354 idr_preload(GFP_KERNEL);
355 spin_lock_bh(&map_idr_lock);
356 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
357 if (id > 0)
358 map->id = id;
359 spin_unlock_bh(&map_idr_lock);
360 idr_preload_end();
361
362 if (WARN_ON_ONCE(!id))
363 return -ENOSPC;
364
365 return id > 0 ? 0 : id;
366}
367
368void bpf_map_free_id(struct bpf_map *map)
369{
370 unsigned long flags;
371
372 /* Offloaded maps are removed from the IDR store when their device
373 * disappears - even if someone holds an fd to them they are unusable,
374 * the memory is gone, all ops will fail; they are simply waiting for
375 * refcnt to drop to be freed.
376 */
377 if (!map->id)
378 return;
379
380 spin_lock_irqsave(&map_idr_lock, flags);
381
382 idr_remove(&map_idr, map->id);
383 map->id = 0;
384
385 spin_unlock_irqrestore(&map_idr_lock, flags);
386}
387
388#ifdef CONFIG_MEMCG_KMEM
389static void bpf_map_save_memcg(struct bpf_map *map)
390{
391 /* Currently if a map is created by a process belonging to the root
392 * memory cgroup, get_obj_cgroup_from_current() will return NULL.
393 * So we have to check map->objcg for being NULL each time it's
394 * being used.
395 */
396 if (memcg_bpf_enabled())
397 map->objcg = get_obj_cgroup_from_current();
398}
399
400static void bpf_map_release_memcg(struct bpf_map *map)
401{
402 if (map->objcg)
403 obj_cgroup_put(map->objcg);
404}
405
406static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map)
407{
408 if (map->objcg)
409 return get_mem_cgroup_from_objcg(map->objcg);
410
411 return root_mem_cgroup;
412}
413
414void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
415 int node)
416{
417 struct mem_cgroup *memcg, *old_memcg;
418 void *ptr;
419
420 memcg = bpf_map_get_memcg(map);
421 old_memcg = set_active_memcg(memcg);
422 ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
423 set_active_memcg(old_memcg);
424 mem_cgroup_put(memcg);
425
426 return ptr;
427}
428
429void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
430{
431 struct mem_cgroup *memcg, *old_memcg;
432 void *ptr;
433
434 memcg = bpf_map_get_memcg(map);
435 old_memcg = set_active_memcg(memcg);
436 ptr = kzalloc(size, flags | __GFP_ACCOUNT);
437 set_active_memcg(old_memcg);
438 mem_cgroup_put(memcg);
439
440 return ptr;
441}
442
443void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
444 gfp_t flags)
445{
446 struct mem_cgroup *memcg, *old_memcg;
447 void *ptr;
448
449 memcg = bpf_map_get_memcg(map);
450 old_memcg = set_active_memcg(memcg);
451 ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT);
452 set_active_memcg(old_memcg);
453 mem_cgroup_put(memcg);
454
455 return ptr;
456}
457
458void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
459 size_t align, gfp_t flags)
460{
461 struct mem_cgroup *memcg, *old_memcg;
462 void __percpu *ptr;
463
464 memcg = bpf_map_get_memcg(map);
465 old_memcg = set_active_memcg(memcg);
466 ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT);
467 set_active_memcg(old_memcg);
468 mem_cgroup_put(memcg);
469
470 return ptr;
471}
472
473#else
474static void bpf_map_save_memcg(struct bpf_map *map)
475{
476}
477
478static void bpf_map_release_memcg(struct bpf_map *map)
479{
480}
481#endif
482
483int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
484 unsigned long nr_pages, struct page **pages)
485{
486 unsigned long i, j;
487 struct page *pg;
488 int ret = 0;
489#ifdef CONFIG_MEMCG_KMEM
490 struct mem_cgroup *memcg, *old_memcg;
491
492 memcg = bpf_map_get_memcg(map);
493 old_memcg = set_active_memcg(memcg);
494#endif
495 for (i = 0; i < nr_pages; i++) {
496 pg = alloc_pages_node(nid, gfp | __GFP_ACCOUNT, 0);
497
498 if (pg) {
499 pages[i] = pg;
500 continue;
501 }
502 for (j = 0; j < i; j++)
503 __free_page(pages[j]);
504 ret = -ENOMEM;
505 break;
506 }
507
508#ifdef CONFIG_MEMCG_KMEM
509 set_active_memcg(old_memcg);
510 mem_cgroup_put(memcg);
511#endif
512 return ret;
513}
514
515
516static int btf_field_cmp(const void *a, const void *b)
517{
518 const struct btf_field *f1 = a, *f2 = b;
519
520 if (f1->offset < f2->offset)
521 return -1;
522 else if (f1->offset > f2->offset)
523 return 1;
524 return 0;
525}
526
527struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset,
528 u32 field_mask)
529{
530 struct btf_field *field;
531
532 if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask))
533 return NULL;
534 field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp);
535 if (!field || !(field->type & field_mask))
536 return NULL;
537 return field;
538}
539
540void btf_record_free(struct btf_record *rec)
541{
542 int i;
543
544 if (IS_ERR_OR_NULL(rec))
545 return;
546 for (i = 0; i < rec->cnt; i++) {
547 switch (rec->fields[i].type) {
548 case BPF_KPTR_UNREF:
549 case BPF_KPTR_REF:
550 case BPF_KPTR_PERCPU:
551 if (rec->fields[i].kptr.module)
552 module_put(rec->fields[i].kptr.module);
553 btf_put(rec->fields[i].kptr.btf);
554 break;
555 case BPF_LIST_HEAD:
556 case BPF_LIST_NODE:
557 case BPF_RB_ROOT:
558 case BPF_RB_NODE:
559 case BPF_SPIN_LOCK:
560 case BPF_TIMER:
561 case BPF_REFCOUNT:
562 /* Nothing to release */
563 break;
564 default:
565 WARN_ON_ONCE(1);
566 continue;
567 }
568 }
569 kfree(rec);
570}
571
572void bpf_map_free_record(struct bpf_map *map)
573{
574 btf_record_free(map->record);
575 map->record = NULL;
576}
577
578struct btf_record *btf_record_dup(const struct btf_record *rec)
579{
580 const struct btf_field *fields;
581 struct btf_record *new_rec;
582 int ret, size, i;
583
584 if (IS_ERR_OR_NULL(rec))
585 return NULL;
586 size = offsetof(struct btf_record, fields[rec->cnt]);
587 new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN);
588 if (!new_rec)
589 return ERR_PTR(-ENOMEM);
590 /* Do a deep copy of the btf_record */
591 fields = rec->fields;
592 new_rec->cnt = 0;
593 for (i = 0; i < rec->cnt; i++) {
594 switch (fields[i].type) {
595 case BPF_KPTR_UNREF:
596 case BPF_KPTR_REF:
597 case BPF_KPTR_PERCPU:
598 btf_get(fields[i].kptr.btf);
599 if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) {
600 ret = -ENXIO;
601 goto free;
602 }
603 break;
604 case BPF_LIST_HEAD:
605 case BPF_LIST_NODE:
606 case BPF_RB_ROOT:
607 case BPF_RB_NODE:
608 case BPF_SPIN_LOCK:
609 case BPF_TIMER:
610 case BPF_REFCOUNT:
611 /* Nothing to acquire */
612 break;
613 default:
614 ret = -EFAULT;
615 WARN_ON_ONCE(1);
616 goto free;
617 }
618 new_rec->cnt++;
619 }
620 return new_rec;
621free:
622 btf_record_free(new_rec);
623 return ERR_PTR(ret);
624}
625
626bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b)
627{
628 bool a_has_fields = !IS_ERR_OR_NULL(rec_a), b_has_fields = !IS_ERR_OR_NULL(rec_b);
629 int size;
630
631 if (!a_has_fields && !b_has_fields)
632 return true;
633 if (a_has_fields != b_has_fields)
634 return false;
635 if (rec_a->cnt != rec_b->cnt)
636 return false;
637 size = offsetof(struct btf_record, fields[rec_a->cnt]);
638 /* btf_parse_fields uses kzalloc to allocate a btf_record, so unused
639 * members are zeroed out. So memcmp is safe to do without worrying
640 * about padding/unused fields.
641 *
642 * While spin_lock, timer, and kptr have no relation to map BTF,
643 * list_head metadata is specific to map BTF, the btf and value_rec
644 * members in particular. btf is the map BTF, while value_rec points to
645 * btf_record in that map BTF.
646 *
647 * So while by default, we don't rely on the map BTF (which the records
648 * were parsed from) matching for both records, which is not backwards
649 * compatible, in case list_head is part of it, we implicitly rely on
650 * that by way of depending on memcmp succeeding for it.
651 */
652 return !memcmp(rec_a, rec_b, size);
653}
654
655void bpf_obj_free_timer(const struct btf_record *rec, void *obj)
656{
657 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_TIMER)))
658 return;
659 bpf_timer_cancel_and_free(obj + rec->timer_off);
660}
661
662void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
663{
664 const struct btf_field *fields;
665 int i;
666
667 if (IS_ERR_OR_NULL(rec))
668 return;
669 fields = rec->fields;
670 for (i = 0; i < rec->cnt; i++) {
671 struct btf_struct_meta *pointee_struct_meta;
672 const struct btf_field *field = &fields[i];
673 void *field_ptr = obj + field->offset;
674 void *xchgd_field;
675
676 switch (fields[i].type) {
677 case BPF_SPIN_LOCK:
678 break;
679 case BPF_TIMER:
680 bpf_timer_cancel_and_free(field_ptr);
681 break;
682 case BPF_KPTR_UNREF:
683 WRITE_ONCE(*(u64 *)field_ptr, 0);
684 break;
685 case BPF_KPTR_REF:
686 case BPF_KPTR_PERCPU:
687 xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
688 if (!xchgd_field)
689 break;
690
691 if (!btf_is_kernel(field->kptr.btf)) {
692 pointee_struct_meta = btf_find_struct_meta(field->kptr.btf,
693 field->kptr.btf_id);
694 migrate_disable();
695 __bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ?
696 pointee_struct_meta->record : NULL,
697 fields[i].type == BPF_KPTR_PERCPU);
698 migrate_enable();
699 } else {
700 field->kptr.dtor(xchgd_field);
701 }
702 break;
703 case BPF_LIST_HEAD:
704 if (WARN_ON_ONCE(rec->spin_lock_off < 0))
705 continue;
706 bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off);
707 break;
708 case BPF_RB_ROOT:
709 if (WARN_ON_ONCE(rec->spin_lock_off < 0))
710 continue;
711 bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off);
712 break;
713 case BPF_LIST_NODE:
714 case BPF_RB_NODE:
715 case BPF_REFCOUNT:
716 break;
717 default:
718 WARN_ON_ONCE(1);
719 continue;
720 }
721 }
722}
723
724/* called from workqueue */
725static void bpf_map_free_deferred(struct work_struct *work)
726{
727 struct bpf_map *map = container_of(work, struct bpf_map, work);
728 struct btf_record *rec = map->record;
729 struct btf *btf = map->btf;
730
731 security_bpf_map_free(map);
732 bpf_map_release_memcg(map);
733 /* implementation dependent freeing */
734 map->ops->map_free(map);
735 /* Delay freeing of btf_record for maps, as map_free
736 * callback usually needs access to them. It is better to do it here
737 * than require each callback to do the free itself manually.
738 *
739 * Note that the btf_record stashed in map->inner_map_meta->record was
740 * already freed using the map_free callback for map in map case which
741 * eventually calls bpf_map_free_meta, since inner_map_meta is only a
742 * template bpf_map struct used during verification.
743 */
744 btf_record_free(rec);
745 /* Delay freeing of btf for maps, as map_free callback may need
746 * struct_meta info which will be freed with btf_put().
747 */
748 btf_put(btf);
749}
750
751static void bpf_map_put_uref(struct bpf_map *map)
752{
753 if (atomic64_dec_and_test(&map->usercnt)) {
754 if (map->ops->map_release_uref)
755 map->ops->map_release_uref(map);
756 }
757}
758
759static void bpf_map_free_in_work(struct bpf_map *map)
760{
761 INIT_WORK(&map->work, bpf_map_free_deferred);
762 /* Avoid spawning kworkers, since they all might contend
763 * for the same mutex like slab_mutex.
764 */
765 queue_work(system_unbound_wq, &map->work);
766}
767
768static void bpf_map_free_rcu_gp(struct rcu_head *rcu)
769{
770 bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu));
771}
772
773static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu)
774{
775 if (rcu_trace_implies_rcu_gp())
776 bpf_map_free_rcu_gp(rcu);
777 else
778 call_rcu(rcu, bpf_map_free_rcu_gp);
779}
780
781/* decrement map refcnt and schedule it for freeing via workqueue
782 * (underlying map implementation ops->map_free() might sleep)
783 */
784void bpf_map_put(struct bpf_map *map)
785{
786 if (atomic64_dec_and_test(&map->refcnt)) {
787 /* bpf_map_free_id() must be called first */
788 bpf_map_free_id(map);
789
790 WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt));
791 if (READ_ONCE(map->free_after_mult_rcu_gp))
792 call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp);
793 else if (READ_ONCE(map->free_after_rcu_gp))
794 call_rcu(&map->rcu, bpf_map_free_rcu_gp);
795 else
796 bpf_map_free_in_work(map);
797 }
798}
799EXPORT_SYMBOL_GPL(bpf_map_put);
800
801void bpf_map_put_with_uref(struct bpf_map *map)
802{
803 bpf_map_put_uref(map);
804 bpf_map_put(map);
805}
806
807static int bpf_map_release(struct inode *inode, struct file *filp)
808{
809 struct bpf_map *map = filp->private_data;
810
811 if (map->ops->map_release)
812 map->ops->map_release(map, filp);
813
814 bpf_map_put_with_uref(map);
815 return 0;
816}
817
818static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
819{
820 fmode_t mode = f.file->f_mode;
821
822 /* Our file permissions may have been overridden by global
823 * map permissions facing syscall side.
824 */
825 if (READ_ONCE(map->frozen))
826 mode &= ~FMODE_CAN_WRITE;
827 return mode;
828}
829
830#ifdef CONFIG_PROC_FS
831/* Show the memory usage of a bpf map */
832static u64 bpf_map_memory_usage(const struct bpf_map *map)
833{
834 return map->ops->map_mem_usage(map);
835}
836
837static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
838{
839 struct bpf_map *map = filp->private_data;
840 u32 type = 0, jited = 0;
841
842 if (map_type_contains_progs(map)) {
843 spin_lock(&map->owner.lock);
844 type = map->owner.type;
845 jited = map->owner.jited;
846 spin_unlock(&map->owner.lock);
847 }
848
849 seq_printf(m,
850 "map_type:\t%u\n"
851 "key_size:\t%u\n"
852 "value_size:\t%u\n"
853 "max_entries:\t%u\n"
854 "map_flags:\t%#x\n"
855 "map_extra:\t%#llx\n"
856 "memlock:\t%llu\n"
857 "map_id:\t%u\n"
858 "frozen:\t%u\n",
859 map->map_type,
860 map->key_size,
861 map->value_size,
862 map->max_entries,
863 map->map_flags,
864 (unsigned long long)map->map_extra,
865 bpf_map_memory_usage(map),
866 map->id,
867 READ_ONCE(map->frozen));
868 if (type) {
869 seq_printf(m, "owner_prog_type:\t%u\n", type);
870 seq_printf(m, "owner_jited:\t%u\n", jited);
871 }
872}
873#endif
874
875static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
876 loff_t *ppos)
877{
878 /* We need this handler such that alloc_file() enables
879 * f_mode with FMODE_CAN_READ.
880 */
881 return -EINVAL;
882}
883
884static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
885 size_t siz, loff_t *ppos)
886{
887 /* We need this handler such that alloc_file() enables
888 * f_mode with FMODE_CAN_WRITE.
889 */
890 return -EINVAL;
891}
892
893/* called for any extra memory-mapped regions (except initial) */
894static void bpf_map_mmap_open(struct vm_area_struct *vma)
895{
896 struct bpf_map *map = vma->vm_file->private_data;
897
898 if (vma->vm_flags & VM_MAYWRITE)
899 bpf_map_write_active_inc(map);
900}
901
902/* called for all unmapped memory region (including initial) */
903static void bpf_map_mmap_close(struct vm_area_struct *vma)
904{
905 struct bpf_map *map = vma->vm_file->private_data;
906
907 if (vma->vm_flags & VM_MAYWRITE)
908 bpf_map_write_active_dec(map);
909}
910
911static const struct vm_operations_struct bpf_map_default_vmops = {
912 .open = bpf_map_mmap_open,
913 .close = bpf_map_mmap_close,
914};
915
916static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
917{
918 struct bpf_map *map = filp->private_data;
919 int err;
920
921 if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record))
922 return -ENOTSUPP;
923
924 if (!(vma->vm_flags & VM_SHARED))
925 return -EINVAL;
926
927 mutex_lock(&map->freeze_mutex);
928
929 if (vma->vm_flags & VM_WRITE) {
930 if (map->frozen) {
931 err = -EPERM;
932 goto out;
933 }
934 /* map is meant to be read-only, so do not allow mapping as
935 * writable, because it's possible to leak a writable page
936 * reference and allows user-space to still modify it after
937 * freezing, while verifier will assume contents do not change
938 */
939 if (map->map_flags & BPF_F_RDONLY_PROG) {
940 err = -EACCES;
941 goto out;
942 }
943 }
944
945 /* set default open/close callbacks */
946 vma->vm_ops = &bpf_map_default_vmops;
947 vma->vm_private_data = map;
948 vm_flags_clear(vma, VM_MAYEXEC);
949 if (!(vma->vm_flags & VM_WRITE))
950 /* disallow re-mapping with PROT_WRITE */
951 vm_flags_clear(vma, VM_MAYWRITE);
952
953 err = map->ops->map_mmap(map, vma);
954 if (err)
955 goto out;
956
957 if (vma->vm_flags & VM_MAYWRITE)
958 bpf_map_write_active_inc(map);
959out:
960 mutex_unlock(&map->freeze_mutex);
961 return err;
962}
963
964static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
965{
966 struct bpf_map *map = filp->private_data;
967
968 if (map->ops->map_poll)
969 return map->ops->map_poll(map, filp, pts);
970
971 return EPOLLERR;
972}
973
974static unsigned long bpf_get_unmapped_area(struct file *filp, unsigned long addr,
975 unsigned long len, unsigned long pgoff,
976 unsigned long flags)
977{
978 struct bpf_map *map = filp->private_data;
979
980 if (map->ops->map_get_unmapped_area)
981 return map->ops->map_get_unmapped_area(filp, addr, len, pgoff, flags);
982#ifdef CONFIG_MMU
983 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
984#else
985 return addr;
986#endif
987}
988
989const struct file_operations bpf_map_fops = {
990#ifdef CONFIG_PROC_FS
991 .show_fdinfo = bpf_map_show_fdinfo,
992#endif
993 .release = bpf_map_release,
994 .read = bpf_dummy_read,
995 .write = bpf_dummy_write,
996 .mmap = bpf_map_mmap,
997 .poll = bpf_map_poll,
998 .get_unmapped_area = bpf_get_unmapped_area,
999};
1000
1001int bpf_map_new_fd(struct bpf_map *map, int flags)
1002{
1003 int ret;
1004
1005 ret = security_bpf_map(map, OPEN_FMODE(flags));
1006 if (ret < 0)
1007 return ret;
1008
1009 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
1010 flags | O_CLOEXEC);
1011}
1012
1013int bpf_get_file_flag(int flags)
1014{
1015 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
1016 return -EINVAL;
1017 if (flags & BPF_F_RDONLY)
1018 return O_RDONLY;
1019 if (flags & BPF_F_WRONLY)
1020 return O_WRONLY;
1021 return O_RDWR;
1022}
1023
1024/* helper macro to check that unused fields 'union bpf_attr' are zero */
1025#define CHECK_ATTR(CMD) \
1026 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
1027 sizeof(attr->CMD##_LAST_FIELD), 0, \
1028 sizeof(*attr) - \
1029 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
1030 sizeof(attr->CMD##_LAST_FIELD)) != NULL
1031
1032/* dst and src must have at least "size" number of bytes.
1033 * Return strlen on success and < 0 on error.
1034 */
1035int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
1036{
1037 const char *end = src + size;
1038 const char *orig_src = src;
1039
1040 memset(dst, 0, size);
1041 /* Copy all isalnum(), '_' and '.' chars. */
1042 while (src < end && *src) {
1043 if (!isalnum(*src) &&
1044 *src != '_' && *src != '.')
1045 return -EINVAL;
1046 *dst++ = *src++;
1047 }
1048
1049 /* No '\0' found in "size" number of bytes */
1050 if (src == end)
1051 return -EINVAL;
1052
1053 return src - orig_src;
1054}
1055
1056int map_check_no_btf(const struct bpf_map *map,
1057 const struct btf *btf,
1058 const struct btf_type *key_type,
1059 const struct btf_type *value_type)
1060{
1061 return -ENOTSUPP;
1062}
1063
1064static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
1065 const struct btf *btf, u32 btf_key_id, u32 btf_value_id)
1066{
1067 const struct btf_type *key_type, *value_type;
1068 u32 key_size, value_size;
1069 int ret = 0;
1070
1071 /* Some maps allow key to be unspecified. */
1072 if (btf_key_id) {
1073 key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
1074 if (!key_type || key_size != map->key_size)
1075 return -EINVAL;
1076 } else {
1077 key_type = btf_type_by_id(btf, 0);
1078 if (!map->ops->map_check_btf)
1079 return -EINVAL;
1080 }
1081
1082 value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
1083 if (!value_type || value_size != map->value_size)
1084 return -EINVAL;
1085
1086 map->record = btf_parse_fields(btf, value_type,
1087 BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD |
1088 BPF_RB_ROOT | BPF_REFCOUNT,
1089 map->value_size);
1090 if (!IS_ERR_OR_NULL(map->record)) {
1091 int i;
1092
1093 if (!bpf_token_capable(token, CAP_BPF)) {
1094 ret = -EPERM;
1095 goto free_map_tab;
1096 }
1097 if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) {
1098 ret = -EACCES;
1099 goto free_map_tab;
1100 }
1101 for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) {
1102 switch (map->record->field_mask & (1 << i)) {
1103 case 0:
1104 continue;
1105 case BPF_SPIN_LOCK:
1106 if (map->map_type != BPF_MAP_TYPE_HASH &&
1107 map->map_type != BPF_MAP_TYPE_ARRAY &&
1108 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
1109 map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1110 map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1111 map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1112 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1113 ret = -EOPNOTSUPP;
1114 goto free_map_tab;
1115 }
1116 break;
1117 case BPF_TIMER:
1118 if (map->map_type != BPF_MAP_TYPE_HASH &&
1119 map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1120 map->map_type != BPF_MAP_TYPE_ARRAY) {
1121 ret = -EOPNOTSUPP;
1122 goto free_map_tab;
1123 }
1124 break;
1125 case BPF_KPTR_UNREF:
1126 case BPF_KPTR_REF:
1127 case BPF_KPTR_PERCPU:
1128 case BPF_REFCOUNT:
1129 if (map->map_type != BPF_MAP_TYPE_HASH &&
1130 map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
1131 map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1132 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH &&
1133 map->map_type != BPF_MAP_TYPE_ARRAY &&
1134 map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
1135 map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1136 map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1137 map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1138 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1139 ret = -EOPNOTSUPP;
1140 goto free_map_tab;
1141 }
1142 break;
1143 case BPF_LIST_HEAD:
1144 case BPF_RB_ROOT:
1145 if (map->map_type != BPF_MAP_TYPE_HASH &&
1146 map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1147 map->map_type != BPF_MAP_TYPE_ARRAY) {
1148 ret = -EOPNOTSUPP;
1149 goto free_map_tab;
1150 }
1151 break;
1152 default:
1153 /* Fail if map_type checks are missing for a field type */
1154 ret = -EOPNOTSUPP;
1155 goto free_map_tab;
1156 }
1157 }
1158 }
1159
1160 ret = btf_check_and_fixup_fields(btf, map->record);
1161 if (ret < 0)
1162 goto free_map_tab;
1163
1164 if (map->ops->map_check_btf) {
1165 ret = map->ops->map_check_btf(map, btf, key_type, value_type);
1166 if (ret < 0)
1167 goto free_map_tab;
1168 }
1169
1170 return ret;
1171free_map_tab:
1172 bpf_map_free_record(map);
1173 return ret;
1174}
1175
1176static bool bpf_net_capable(void)
1177{
1178 return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN);
1179}
1180
1181#define BPF_MAP_CREATE_LAST_FIELD map_token_fd
1182/* called via syscall */
1183static int map_create(union bpf_attr *attr)
1184{
1185 const struct bpf_map_ops *ops;
1186 struct bpf_token *token = NULL;
1187 int numa_node = bpf_map_attr_numa_node(attr);
1188 u32 map_type = attr->map_type;
1189 struct bpf_map *map;
1190 bool token_flag;
1191 int f_flags;
1192 int err;
1193
1194 err = CHECK_ATTR(BPF_MAP_CREATE);
1195 if (err)
1196 return -EINVAL;
1197
1198 /* check BPF_F_TOKEN_FD flag, remember if it's set, and then clear it
1199 * to avoid per-map type checks tripping on unknown flag
1200 */
1201 token_flag = attr->map_flags & BPF_F_TOKEN_FD;
1202 attr->map_flags &= ~BPF_F_TOKEN_FD;
1203
1204 if (attr->btf_vmlinux_value_type_id) {
1205 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
1206 attr->btf_key_type_id || attr->btf_value_type_id)
1207 return -EINVAL;
1208 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
1209 return -EINVAL;
1210 }
1211
1212 if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER &&
1213 attr->map_type != BPF_MAP_TYPE_ARENA &&
1214 attr->map_extra != 0)
1215 return -EINVAL;
1216
1217 f_flags = bpf_get_file_flag(attr->map_flags);
1218 if (f_flags < 0)
1219 return f_flags;
1220
1221 if (numa_node != NUMA_NO_NODE &&
1222 ((unsigned int)numa_node >= nr_node_ids ||
1223 !node_online(numa_node)))
1224 return -EINVAL;
1225
1226 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
1227 map_type = attr->map_type;
1228 if (map_type >= ARRAY_SIZE(bpf_map_types))
1229 return -EINVAL;
1230 map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types));
1231 ops = bpf_map_types[map_type];
1232 if (!ops)
1233 return -EINVAL;
1234
1235 if (ops->map_alloc_check) {
1236 err = ops->map_alloc_check(attr);
1237 if (err)
1238 return err;
1239 }
1240 if (attr->map_ifindex)
1241 ops = &bpf_map_offload_ops;
1242 if (!ops->map_mem_usage)
1243 return -EINVAL;
1244
1245 if (token_flag) {
1246 token = bpf_token_get_from_fd(attr->map_token_fd);
1247 if (IS_ERR(token))
1248 return PTR_ERR(token);
1249
1250 /* if current token doesn't grant map creation permissions,
1251 * then we can't use this token, so ignore it and rely on
1252 * system-wide capabilities checks
1253 */
1254 if (!bpf_token_allow_cmd(token, BPF_MAP_CREATE) ||
1255 !bpf_token_allow_map_type(token, attr->map_type)) {
1256 bpf_token_put(token);
1257 token = NULL;
1258 }
1259 }
1260
1261 err = -EPERM;
1262
1263 /* Intent here is for unprivileged_bpf_disabled to block BPF map
1264 * creation for unprivileged users; other actions depend
1265 * on fd availability and access to bpffs, so are dependent on
1266 * object creation success. Even with unprivileged BPF disabled,
1267 * capability checks are still carried out.
1268 */
1269 if (sysctl_unprivileged_bpf_disabled && !bpf_token_capable(token, CAP_BPF))
1270 goto put_token;
1271
1272 /* check privileged map type permissions */
1273 switch (map_type) {
1274 case BPF_MAP_TYPE_ARRAY:
1275 case BPF_MAP_TYPE_PERCPU_ARRAY:
1276 case BPF_MAP_TYPE_PROG_ARRAY:
1277 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1278 case BPF_MAP_TYPE_CGROUP_ARRAY:
1279 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
1280 case BPF_MAP_TYPE_HASH:
1281 case BPF_MAP_TYPE_PERCPU_HASH:
1282 case BPF_MAP_TYPE_HASH_OF_MAPS:
1283 case BPF_MAP_TYPE_RINGBUF:
1284 case BPF_MAP_TYPE_USER_RINGBUF:
1285 case BPF_MAP_TYPE_CGROUP_STORAGE:
1286 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
1287 /* unprivileged */
1288 break;
1289 case BPF_MAP_TYPE_SK_STORAGE:
1290 case BPF_MAP_TYPE_INODE_STORAGE:
1291 case BPF_MAP_TYPE_TASK_STORAGE:
1292 case BPF_MAP_TYPE_CGRP_STORAGE:
1293 case BPF_MAP_TYPE_BLOOM_FILTER:
1294 case BPF_MAP_TYPE_LPM_TRIE:
1295 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
1296 case BPF_MAP_TYPE_STACK_TRACE:
1297 case BPF_MAP_TYPE_QUEUE:
1298 case BPF_MAP_TYPE_STACK:
1299 case BPF_MAP_TYPE_LRU_HASH:
1300 case BPF_MAP_TYPE_LRU_PERCPU_HASH:
1301 case BPF_MAP_TYPE_STRUCT_OPS:
1302 case BPF_MAP_TYPE_CPUMAP:
1303 case BPF_MAP_TYPE_ARENA:
1304 if (!bpf_token_capable(token, CAP_BPF))
1305 goto put_token;
1306 break;
1307 case BPF_MAP_TYPE_SOCKMAP:
1308 case BPF_MAP_TYPE_SOCKHASH:
1309 case BPF_MAP_TYPE_DEVMAP:
1310 case BPF_MAP_TYPE_DEVMAP_HASH:
1311 case BPF_MAP_TYPE_XSKMAP:
1312 if (!bpf_token_capable(token, CAP_NET_ADMIN))
1313 goto put_token;
1314 break;
1315 default:
1316 WARN(1, "unsupported map type %d", map_type);
1317 goto put_token;
1318 }
1319
1320 map = ops->map_alloc(attr);
1321 if (IS_ERR(map)) {
1322 err = PTR_ERR(map);
1323 goto put_token;
1324 }
1325 map->ops = ops;
1326 map->map_type = map_type;
1327
1328 err = bpf_obj_name_cpy(map->name, attr->map_name,
1329 sizeof(attr->map_name));
1330 if (err < 0)
1331 goto free_map;
1332
1333 atomic64_set(&map->refcnt, 1);
1334 atomic64_set(&map->usercnt, 1);
1335 mutex_init(&map->freeze_mutex);
1336 spin_lock_init(&map->owner.lock);
1337
1338 if (attr->btf_key_type_id || attr->btf_value_type_id ||
1339 /* Even the map's value is a kernel's struct,
1340 * the bpf_prog.o must have BTF to begin with
1341 * to figure out the corresponding kernel's
1342 * counter part. Thus, attr->btf_fd has
1343 * to be valid also.
1344 */
1345 attr->btf_vmlinux_value_type_id) {
1346 struct btf *btf;
1347
1348 btf = btf_get_by_fd(attr->btf_fd);
1349 if (IS_ERR(btf)) {
1350 err = PTR_ERR(btf);
1351 goto free_map;
1352 }
1353 if (btf_is_kernel(btf)) {
1354 btf_put(btf);
1355 err = -EACCES;
1356 goto free_map;
1357 }
1358 map->btf = btf;
1359
1360 if (attr->btf_value_type_id) {
1361 err = map_check_btf(map, token, btf, attr->btf_key_type_id,
1362 attr->btf_value_type_id);
1363 if (err)
1364 goto free_map;
1365 }
1366
1367 map->btf_key_type_id = attr->btf_key_type_id;
1368 map->btf_value_type_id = attr->btf_value_type_id;
1369 map->btf_vmlinux_value_type_id =
1370 attr->btf_vmlinux_value_type_id;
1371 }
1372
1373 err = security_bpf_map_create(map, attr, token);
1374 if (err)
1375 goto free_map_sec;
1376
1377 err = bpf_map_alloc_id(map);
1378 if (err)
1379 goto free_map_sec;
1380
1381 bpf_map_save_memcg(map);
1382 bpf_token_put(token);
1383
1384 err = bpf_map_new_fd(map, f_flags);
1385 if (err < 0) {
1386 /* failed to allocate fd.
1387 * bpf_map_put_with_uref() is needed because the above
1388 * bpf_map_alloc_id() has published the map
1389 * to the userspace and the userspace may
1390 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
1391 */
1392 bpf_map_put_with_uref(map);
1393 return err;
1394 }
1395
1396 return err;
1397
1398free_map_sec:
1399 security_bpf_map_free(map);
1400free_map:
1401 btf_put(map->btf);
1402 map->ops->map_free(map);
1403put_token:
1404 bpf_token_put(token);
1405 return err;
1406}
1407
1408/* if error is returned, fd is released.
1409 * On success caller should complete fd access with matching fdput()
1410 */
1411struct bpf_map *__bpf_map_get(struct fd f)
1412{
1413 if (!f.file)
1414 return ERR_PTR(-EBADF);
1415 if (f.file->f_op != &bpf_map_fops) {
1416 fdput(f);
1417 return ERR_PTR(-EINVAL);
1418 }
1419
1420 return f.file->private_data;
1421}
1422
1423void bpf_map_inc(struct bpf_map *map)
1424{
1425 atomic64_inc(&map->refcnt);
1426}
1427EXPORT_SYMBOL_GPL(bpf_map_inc);
1428
1429void bpf_map_inc_with_uref(struct bpf_map *map)
1430{
1431 atomic64_inc(&map->refcnt);
1432 atomic64_inc(&map->usercnt);
1433}
1434EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
1435
1436struct bpf_map *bpf_map_get(u32 ufd)
1437{
1438 struct fd f = fdget(ufd);
1439 struct bpf_map *map;
1440
1441 map = __bpf_map_get(f);
1442 if (IS_ERR(map))
1443 return map;
1444
1445 bpf_map_inc(map);
1446 fdput(f);
1447
1448 return map;
1449}
1450EXPORT_SYMBOL(bpf_map_get);
1451
1452struct bpf_map *bpf_map_get_with_uref(u32 ufd)
1453{
1454 struct fd f = fdget(ufd);
1455 struct bpf_map *map;
1456
1457 map = __bpf_map_get(f);
1458 if (IS_ERR(map))
1459 return map;
1460
1461 bpf_map_inc_with_uref(map);
1462 fdput(f);
1463
1464 return map;
1465}
1466
1467/* map_idr_lock should have been held or the map should have been
1468 * protected by rcu read lock.
1469 */
1470struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
1471{
1472 int refold;
1473
1474 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
1475 if (!refold)
1476 return ERR_PTR(-ENOENT);
1477 if (uref)
1478 atomic64_inc(&map->usercnt);
1479
1480 return map;
1481}
1482
1483struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
1484{
1485 spin_lock_bh(&map_idr_lock);
1486 map = __bpf_map_inc_not_zero(map, false);
1487 spin_unlock_bh(&map_idr_lock);
1488
1489 return map;
1490}
1491EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
1492
1493int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
1494{
1495 return -ENOTSUPP;
1496}
1497
1498static void *__bpf_copy_key(void __user *ukey, u64 key_size)
1499{
1500 if (key_size)
1501 return vmemdup_user(ukey, key_size);
1502
1503 if (ukey)
1504 return ERR_PTR(-EINVAL);
1505
1506 return NULL;
1507}
1508
1509static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size)
1510{
1511 if (key_size)
1512 return kvmemdup_bpfptr(ukey, key_size);
1513
1514 if (!bpfptr_is_null(ukey))
1515 return ERR_PTR(-EINVAL);
1516
1517 return NULL;
1518}
1519
1520/* last field in 'union bpf_attr' used by this command */
1521#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
1522
1523static int map_lookup_elem(union bpf_attr *attr)
1524{
1525 void __user *ukey = u64_to_user_ptr(attr->key);
1526 void __user *uvalue = u64_to_user_ptr(attr->value);
1527 int ufd = attr->map_fd;
1528 struct bpf_map *map;
1529 void *key, *value;
1530 u32 value_size;
1531 struct fd f;
1532 int err;
1533
1534 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1535 return -EINVAL;
1536
1537 if (attr->flags & ~BPF_F_LOCK)
1538 return -EINVAL;
1539
1540 f = fdget(ufd);
1541 map = __bpf_map_get(f);
1542 if (IS_ERR(map))
1543 return PTR_ERR(map);
1544 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1545 err = -EPERM;
1546 goto err_put;
1547 }
1548
1549 if ((attr->flags & BPF_F_LOCK) &&
1550 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1551 err = -EINVAL;
1552 goto err_put;
1553 }
1554
1555 key = __bpf_copy_key(ukey, map->key_size);
1556 if (IS_ERR(key)) {
1557 err = PTR_ERR(key);
1558 goto err_put;
1559 }
1560
1561 value_size = bpf_map_value_size(map);
1562
1563 err = -ENOMEM;
1564 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1565 if (!value)
1566 goto free_key;
1567
1568 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
1569 if (copy_from_user(value, uvalue, value_size))
1570 err = -EFAULT;
1571 else
1572 err = bpf_map_copy_value(map, key, value, attr->flags);
1573 goto free_value;
1574 }
1575
1576 err = bpf_map_copy_value(map, key, value, attr->flags);
1577 if (err)
1578 goto free_value;
1579
1580 err = -EFAULT;
1581 if (copy_to_user(uvalue, value, value_size) != 0)
1582 goto free_value;
1583
1584 err = 0;
1585
1586free_value:
1587 kvfree(value);
1588free_key:
1589 kvfree(key);
1590err_put:
1591 fdput(f);
1592 return err;
1593}
1594
1595
1596#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1597
1598static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
1599{
1600 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1601 bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
1602 int ufd = attr->map_fd;
1603 struct bpf_map *map;
1604 void *key, *value;
1605 u32 value_size;
1606 struct fd f;
1607 int err;
1608
1609 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1610 return -EINVAL;
1611
1612 f = fdget(ufd);
1613 map = __bpf_map_get(f);
1614 if (IS_ERR(map))
1615 return PTR_ERR(map);
1616 bpf_map_write_active_inc(map);
1617 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1618 err = -EPERM;
1619 goto err_put;
1620 }
1621
1622 if ((attr->flags & BPF_F_LOCK) &&
1623 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1624 err = -EINVAL;
1625 goto err_put;
1626 }
1627
1628 key = ___bpf_copy_key(ukey, map->key_size);
1629 if (IS_ERR(key)) {
1630 err = PTR_ERR(key);
1631 goto err_put;
1632 }
1633
1634 value_size = bpf_map_value_size(map);
1635 value = kvmemdup_bpfptr(uvalue, value_size);
1636 if (IS_ERR(value)) {
1637 err = PTR_ERR(value);
1638 goto free_key;
1639 }
1640
1641 err = bpf_map_update_value(map, f.file, key, value, attr->flags);
1642 if (!err)
1643 maybe_wait_bpf_programs(map);
1644
1645 kvfree(value);
1646free_key:
1647 kvfree(key);
1648err_put:
1649 bpf_map_write_active_dec(map);
1650 fdput(f);
1651 return err;
1652}
1653
1654#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1655
1656static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr)
1657{
1658 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1659 int ufd = attr->map_fd;
1660 struct bpf_map *map;
1661 struct fd f;
1662 void *key;
1663 int err;
1664
1665 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1666 return -EINVAL;
1667
1668 f = fdget(ufd);
1669 map = __bpf_map_get(f);
1670 if (IS_ERR(map))
1671 return PTR_ERR(map);
1672 bpf_map_write_active_inc(map);
1673 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1674 err = -EPERM;
1675 goto err_put;
1676 }
1677
1678 key = ___bpf_copy_key(ukey, map->key_size);
1679 if (IS_ERR(key)) {
1680 err = PTR_ERR(key);
1681 goto err_put;
1682 }
1683
1684 if (bpf_map_is_offloaded(map)) {
1685 err = bpf_map_offload_delete_elem(map, key);
1686 goto out;
1687 } else if (IS_FD_PROG_ARRAY(map) ||
1688 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1689 /* These maps require sleepable context */
1690 err = map->ops->map_delete_elem(map, key);
1691 goto out;
1692 }
1693
1694 bpf_disable_instrumentation();
1695 rcu_read_lock();
1696 err = map->ops->map_delete_elem(map, key);
1697 rcu_read_unlock();
1698 bpf_enable_instrumentation();
1699 if (!err)
1700 maybe_wait_bpf_programs(map);
1701out:
1702 kvfree(key);
1703err_put:
1704 bpf_map_write_active_dec(map);
1705 fdput(f);
1706 return err;
1707}
1708
1709/* last field in 'union bpf_attr' used by this command */
1710#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1711
1712static int map_get_next_key(union bpf_attr *attr)
1713{
1714 void __user *ukey = u64_to_user_ptr(attr->key);
1715 void __user *unext_key = u64_to_user_ptr(attr->next_key);
1716 int ufd = attr->map_fd;
1717 struct bpf_map *map;
1718 void *key, *next_key;
1719 struct fd f;
1720 int err;
1721
1722 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1723 return -EINVAL;
1724
1725 f = fdget(ufd);
1726 map = __bpf_map_get(f);
1727 if (IS_ERR(map))
1728 return PTR_ERR(map);
1729 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1730 err = -EPERM;
1731 goto err_put;
1732 }
1733
1734 if (ukey) {
1735 key = __bpf_copy_key(ukey, map->key_size);
1736 if (IS_ERR(key)) {
1737 err = PTR_ERR(key);
1738 goto err_put;
1739 }
1740 } else {
1741 key = NULL;
1742 }
1743
1744 err = -ENOMEM;
1745 next_key = kvmalloc(map->key_size, GFP_USER);
1746 if (!next_key)
1747 goto free_key;
1748
1749 if (bpf_map_is_offloaded(map)) {
1750 err = bpf_map_offload_get_next_key(map, key, next_key);
1751 goto out;
1752 }
1753
1754 rcu_read_lock();
1755 err = map->ops->map_get_next_key(map, key, next_key);
1756 rcu_read_unlock();
1757out:
1758 if (err)
1759 goto free_next_key;
1760
1761 err = -EFAULT;
1762 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1763 goto free_next_key;
1764
1765 err = 0;
1766
1767free_next_key:
1768 kvfree(next_key);
1769free_key:
1770 kvfree(key);
1771err_put:
1772 fdput(f);
1773 return err;
1774}
1775
1776int generic_map_delete_batch(struct bpf_map *map,
1777 const union bpf_attr *attr,
1778 union bpf_attr __user *uattr)
1779{
1780 void __user *keys = u64_to_user_ptr(attr->batch.keys);
1781 u32 cp, max_count;
1782 int err = 0;
1783 void *key;
1784
1785 if (attr->batch.elem_flags & ~BPF_F_LOCK)
1786 return -EINVAL;
1787
1788 if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1789 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1790 return -EINVAL;
1791 }
1792
1793 max_count = attr->batch.count;
1794 if (!max_count)
1795 return 0;
1796
1797 if (put_user(0, &uattr->batch.count))
1798 return -EFAULT;
1799
1800 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1801 if (!key)
1802 return -ENOMEM;
1803
1804 for (cp = 0; cp < max_count; cp++) {
1805 err = -EFAULT;
1806 if (copy_from_user(key, keys + cp * map->key_size,
1807 map->key_size))
1808 break;
1809
1810 if (bpf_map_is_offloaded(map)) {
1811 err = bpf_map_offload_delete_elem(map, key);
1812 break;
1813 }
1814
1815 bpf_disable_instrumentation();
1816 rcu_read_lock();
1817 err = map->ops->map_delete_elem(map, key);
1818 rcu_read_unlock();
1819 bpf_enable_instrumentation();
1820 if (err)
1821 break;
1822 cond_resched();
1823 }
1824 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1825 err = -EFAULT;
1826
1827 kvfree(key);
1828
1829 return err;
1830}
1831
1832int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
1833 const union bpf_attr *attr,
1834 union bpf_attr __user *uattr)
1835{
1836 void __user *values = u64_to_user_ptr(attr->batch.values);
1837 void __user *keys = u64_to_user_ptr(attr->batch.keys);
1838 u32 value_size, cp, max_count;
1839 void *key, *value;
1840 int err = 0;
1841
1842 if (attr->batch.elem_flags & ~BPF_F_LOCK)
1843 return -EINVAL;
1844
1845 if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1846 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1847 return -EINVAL;
1848 }
1849
1850 value_size = bpf_map_value_size(map);
1851
1852 max_count = attr->batch.count;
1853 if (!max_count)
1854 return 0;
1855
1856 if (put_user(0, &uattr->batch.count))
1857 return -EFAULT;
1858
1859 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1860 if (!key)
1861 return -ENOMEM;
1862
1863 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1864 if (!value) {
1865 kvfree(key);
1866 return -ENOMEM;
1867 }
1868
1869 for (cp = 0; cp < max_count; cp++) {
1870 err = -EFAULT;
1871 if (copy_from_user(key, keys + cp * map->key_size,
1872 map->key_size) ||
1873 copy_from_user(value, values + cp * value_size, value_size))
1874 break;
1875
1876 err = bpf_map_update_value(map, map_file, key, value,
1877 attr->batch.elem_flags);
1878
1879 if (err)
1880 break;
1881 cond_resched();
1882 }
1883
1884 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1885 err = -EFAULT;
1886
1887 kvfree(value);
1888 kvfree(key);
1889
1890 return err;
1891}
1892
1893#define MAP_LOOKUP_RETRIES 3
1894
1895int generic_map_lookup_batch(struct bpf_map *map,
1896 const union bpf_attr *attr,
1897 union bpf_attr __user *uattr)
1898{
1899 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1900 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1901 void __user *values = u64_to_user_ptr(attr->batch.values);
1902 void __user *keys = u64_to_user_ptr(attr->batch.keys);
1903 void *buf, *buf_prevkey, *prev_key, *key, *value;
1904 int err, retry = MAP_LOOKUP_RETRIES;
1905 u32 value_size, cp, max_count;
1906
1907 if (attr->batch.elem_flags & ~BPF_F_LOCK)
1908 return -EINVAL;
1909
1910 if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1911 !btf_record_has_field(map->record, BPF_SPIN_LOCK))
1912 return -EINVAL;
1913
1914 value_size = bpf_map_value_size(map);
1915
1916 max_count = attr->batch.count;
1917 if (!max_count)
1918 return 0;
1919
1920 if (put_user(0, &uattr->batch.count))
1921 return -EFAULT;
1922
1923 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1924 if (!buf_prevkey)
1925 return -ENOMEM;
1926
1927 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
1928 if (!buf) {
1929 kvfree(buf_prevkey);
1930 return -ENOMEM;
1931 }
1932
1933 err = -EFAULT;
1934 prev_key = NULL;
1935 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
1936 goto free_buf;
1937 key = buf;
1938 value = key + map->key_size;
1939 if (ubatch)
1940 prev_key = buf_prevkey;
1941
1942 for (cp = 0; cp < max_count;) {
1943 rcu_read_lock();
1944 err = map->ops->map_get_next_key(map, prev_key, key);
1945 rcu_read_unlock();
1946 if (err)
1947 break;
1948 err = bpf_map_copy_value(map, key, value,
1949 attr->batch.elem_flags);
1950
1951 if (err == -ENOENT) {
1952 if (retry) {
1953 retry--;
1954 continue;
1955 }
1956 err = -EINTR;
1957 break;
1958 }
1959
1960 if (err)
1961 goto free_buf;
1962
1963 if (copy_to_user(keys + cp * map->key_size, key,
1964 map->key_size)) {
1965 err = -EFAULT;
1966 goto free_buf;
1967 }
1968 if (copy_to_user(values + cp * value_size, value, value_size)) {
1969 err = -EFAULT;
1970 goto free_buf;
1971 }
1972
1973 if (!prev_key)
1974 prev_key = buf_prevkey;
1975
1976 swap(prev_key, key);
1977 retry = MAP_LOOKUP_RETRIES;
1978 cp++;
1979 cond_resched();
1980 }
1981
1982 if (err == -EFAULT)
1983 goto free_buf;
1984
1985 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
1986 (cp && copy_to_user(uobatch, prev_key, map->key_size))))
1987 err = -EFAULT;
1988
1989free_buf:
1990 kvfree(buf_prevkey);
1991 kvfree(buf);
1992 return err;
1993}
1994
1995#define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags
1996
1997static int map_lookup_and_delete_elem(union bpf_attr *attr)
1998{
1999 void __user *ukey = u64_to_user_ptr(attr->key);
2000 void __user *uvalue = u64_to_user_ptr(attr->value);
2001 int ufd = attr->map_fd;
2002 struct bpf_map *map;
2003 void *key, *value;
2004 u32 value_size;
2005 struct fd f;
2006 int err;
2007
2008 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
2009 return -EINVAL;
2010
2011 if (attr->flags & ~BPF_F_LOCK)
2012 return -EINVAL;
2013
2014 f = fdget(ufd);
2015 map = __bpf_map_get(f);
2016 if (IS_ERR(map))
2017 return PTR_ERR(map);
2018 bpf_map_write_active_inc(map);
2019 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
2020 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
2021 err = -EPERM;
2022 goto err_put;
2023 }
2024
2025 if (attr->flags &&
2026 (map->map_type == BPF_MAP_TYPE_QUEUE ||
2027 map->map_type == BPF_MAP_TYPE_STACK)) {
2028 err = -EINVAL;
2029 goto err_put;
2030 }
2031
2032 if ((attr->flags & BPF_F_LOCK) &&
2033 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
2034 err = -EINVAL;
2035 goto err_put;
2036 }
2037
2038 key = __bpf_copy_key(ukey, map->key_size);
2039 if (IS_ERR(key)) {
2040 err = PTR_ERR(key);
2041 goto err_put;
2042 }
2043
2044 value_size = bpf_map_value_size(map);
2045
2046 err = -ENOMEM;
2047 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
2048 if (!value)
2049 goto free_key;
2050
2051 err = -ENOTSUPP;
2052 if (map->map_type == BPF_MAP_TYPE_QUEUE ||
2053 map->map_type == BPF_MAP_TYPE_STACK) {
2054 err = map->ops->map_pop_elem(map, value);
2055 } else if (map->map_type == BPF_MAP_TYPE_HASH ||
2056 map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
2057 map->map_type == BPF_MAP_TYPE_LRU_HASH ||
2058 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
2059 if (!bpf_map_is_offloaded(map)) {
2060 bpf_disable_instrumentation();
2061 rcu_read_lock();
2062 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags);
2063 rcu_read_unlock();
2064 bpf_enable_instrumentation();
2065 }
2066 }
2067
2068 if (err)
2069 goto free_value;
2070
2071 if (copy_to_user(uvalue, value, value_size) != 0) {
2072 err = -EFAULT;
2073 goto free_value;
2074 }
2075
2076 err = 0;
2077
2078free_value:
2079 kvfree(value);
2080free_key:
2081 kvfree(key);
2082err_put:
2083 bpf_map_write_active_dec(map);
2084 fdput(f);
2085 return err;
2086}
2087
2088#define BPF_MAP_FREEZE_LAST_FIELD map_fd
2089
2090static int map_freeze(const union bpf_attr *attr)
2091{
2092 int err = 0, ufd = attr->map_fd;
2093 struct bpf_map *map;
2094 struct fd f;
2095
2096 if (CHECK_ATTR(BPF_MAP_FREEZE))
2097 return -EINVAL;
2098
2099 f = fdget(ufd);
2100 map = __bpf_map_get(f);
2101 if (IS_ERR(map))
2102 return PTR_ERR(map);
2103
2104 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) {
2105 fdput(f);
2106 return -ENOTSUPP;
2107 }
2108
2109 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
2110 fdput(f);
2111 return -EPERM;
2112 }
2113
2114 mutex_lock(&map->freeze_mutex);
2115 if (bpf_map_write_active(map)) {
2116 err = -EBUSY;
2117 goto err_put;
2118 }
2119 if (READ_ONCE(map->frozen)) {
2120 err = -EBUSY;
2121 goto err_put;
2122 }
2123
2124 WRITE_ONCE(map->frozen, true);
2125err_put:
2126 mutex_unlock(&map->freeze_mutex);
2127 fdput(f);
2128 return err;
2129}
2130
2131static const struct bpf_prog_ops * const bpf_prog_types[] = {
2132#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
2133 [_id] = & _name ## _prog_ops,
2134#define BPF_MAP_TYPE(_id, _ops)
2135#define BPF_LINK_TYPE(_id, _name)
2136#include <linux/bpf_types.h>
2137#undef BPF_PROG_TYPE
2138#undef BPF_MAP_TYPE
2139#undef BPF_LINK_TYPE
2140};
2141
2142static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
2143{
2144 const struct bpf_prog_ops *ops;
2145
2146 if (type >= ARRAY_SIZE(bpf_prog_types))
2147 return -EINVAL;
2148 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
2149 ops = bpf_prog_types[type];
2150 if (!ops)
2151 return -EINVAL;
2152
2153 if (!bpf_prog_is_offloaded(prog->aux))
2154 prog->aux->ops = ops;
2155 else
2156 prog->aux->ops = &bpf_offload_prog_ops;
2157 prog->type = type;
2158 return 0;
2159}
2160
2161enum bpf_audit {
2162 BPF_AUDIT_LOAD,
2163 BPF_AUDIT_UNLOAD,
2164 BPF_AUDIT_MAX,
2165};
2166
2167static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
2168 [BPF_AUDIT_LOAD] = "LOAD",
2169 [BPF_AUDIT_UNLOAD] = "UNLOAD",
2170};
2171
2172static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
2173{
2174 struct audit_context *ctx = NULL;
2175 struct audit_buffer *ab;
2176
2177 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
2178 return;
2179 if (audit_enabled == AUDIT_OFF)
2180 return;
2181 if (!in_irq() && !irqs_disabled())
2182 ctx = audit_context();
2183 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
2184 if (unlikely(!ab))
2185 return;
2186 audit_log_format(ab, "prog-id=%u op=%s",
2187 prog->aux->id, bpf_audit_str[op]);
2188 audit_log_end(ab);
2189}
2190
2191static int bpf_prog_alloc_id(struct bpf_prog *prog)
2192{
2193 int id;
2194
2195 idr_preload(GFP_KERNEL);
2196 spin_lock_bh(&prog_idr_lock);
2197 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
2198 if (id > 0)
2199 prog->aux->id = id;
2200 spin_unlock_bh(&prog_idr_lock);
2201 idr_preload_end();
2202
2203 /* id is in [1, INT_MAX) */
2204 if (WARN_ON_ONCE(!id))
2205 return -ENOSPC;
2206
2207 return id > 0 ? 0 : id;
2208}
2209
2210void bpf_prog_free_id(struct bpf_prog *prog)
2211{
2212 unsigned long flags;
2213
2214 /* cBPF to eBPF migrations are currently not in the idr store.
2215 * Offloaded programs are removed from the store when their device
2216 * disappears - even if someone grabs an fd to them they are unusable,
2217 * simply waiting for refcnt to drop to be freed.
2218 */
2219 if (!prog->aux->id)
2220 return;
2221
2222 spin_lock_irqsave(&prog_idr_lock, flags);
2223 idr_remove(&prog_idr, prog->aux->id);
2224 prog->aux->id = 0;
2225 spin_unlock_irqrestore(&prog_idr_lock, flags);
2226}
2227
2228static void __bpf_prog_put_rcu(struct rcu_head *rcu)
2229{
2230 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
2231
2232 kvfree(aux->func_info);
2233 kfree(aux->func_info_aux);
2234 free_uid(aux->user);
2235 security_bpf_prog_free(aux->prog);
2236 bpf_prog_free(aux->prog);
2237}
2238
2239static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
2240{
2241 bpf_prog_kallsyms_del_all(prog);
2242 btf_put(prog->aux->btf);
2243 module_put(prog->aux->mod);
2244 kvfree(prog->aux->jited_linfo);
2245 kvfree(prog->aux->linfo);
2246 kfree(prog->aux->kfunc_tab);
2247 if (prog->aux->attach_btf)
2248 btf_put(prog->aux->attach_btf);
2249
2250 if (deferred) {
2251 if (prog->sleepable)
2252 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
2253 else
2254 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
2255 } else {
2256 __bpf_prog_put_rcu(&prog->aux->rcu);
2257 }
2258}
2259
2260static void bpf_prog_put_deferred(struct work_struct *work)
2261{
2262 struct bpf_prog_aux *aux;
2263 struct bpf_prog *prog;
2264
2265 aux = container_of(work, struct bpf_prog_aux, work);
2266 prog = aux->prog;
2267 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
2268 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
2269 bpf_prog_free_id(prog);
2270 __bpf_prog_put_noref(prog, true);
2271}
2272
2273static void __bpf_prog_put(struct bpf_prog *prog)
2274{
2275 struct bpf_prog_aux *aux = prog->aux;
2276
2277 if (atomic64_dec_and_test(&aux->refcnt)) {
2278 if (in_irq() || irqs_disabled()) {
2279 INIT_WORK(&aux->work, bpf_prog_put_deferred);
2280 schedule_work(&aux->work);
2281 } else {
2282 bpf_prog_put_deferred(&aux->work);
2283 }
2284 }
2285}
2286
2287void bpf_prog_put(struct bpf_prog *prog)
2288{
2289 __bpf_prog_put(prog);
2290}
2291EXPORT_SYMBOL_GPL(bpf_prog_put);
2292
2293static int bpf_prog_release(struct inode *inode, struct file *filp)
2294{
2295 struct bpf_prog *prog = filp->private_data;
2296
2297 bpf_prog_put(prog);
2298 return 0;
2299}
2300
2301struct bpf_prog_kstats {
2302 u64 nsecs;
2303 u64 cnt;
2304 u64 misses;
2305};
2306
2307void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog)
2308{
2309 struct bpf_prog_stats *stats;
2310 unsigned int flags;
2311
2312 stats = this_cpu_ptr(prog->stats);
2313 flags = u64_stats_update_begin_irqsave(&stats->syncp);
2314 u64_stats_inc(&stats->misses);
2315 u64_stats_update_end_irqrestore(&stats->syncp, flags);
2316}
2317
2318static void bpf_prog_get_stats(const struct bpf_prog *prog,
2319 struct bpf_prog_kstats *stats)
2320{
2321 u64 nsecs = 0, cnt = 0, misses = 0;
2322 int cpu;
2323
2324 for_each_possible_cpu(cpu) {
2325 const struct bpf_prog_stats *st;
2326 unsigned int start;
2327 u64 tnsecs, tcnt, tmisses;
2328
2329 st = per_cpu_ptr(prog->stats, cpu);
2330 do {
2331 start = u64_stats_fetch_begin(&st->syncp);
2332 tnsecs = u64_stats_read(&st->nsecs);
2333 tcnt = u64_stats_read(&st->cnt);
2334 tmisses = u64_stats_read(&st->misses);
2335 } while (u64_stats_fetch_retry(&st->syncp, start));
2336 nsecs += tnsecs;
2337 cnt += tcnt;
2338 misses += tmisses;
2339 }
2340 stats->nsecs = nsecs;
2341 stats->cnt = cnt;
2342 stats->misses = misses;
2343}
2344
2345#ifdef CONFIG_PROC_FS
2346static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
2347{
2348 const struct bpf_prog *prog = filp->private_data;
2349 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2350 struct bpf_prog_kstats stats;
2351
2352 bpf_prog_get_stats(prog, &stats);
2353 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2354 seq_printf(m,
2355 "prog_type:\t%u\n"
2356 "prog_jited:\t%u\n"
2357 "prog_tag:\t%s\n"
2358 "memlock:\t%llu\n"
2359 "prog_id:\t%u\n"
2360 "run_time_ns:\t%llu\n"
2361 "run_cnt:\t%llu\n"
2362 "recursion_misses:\t%llu\n"
2363 "verified_insns:\t%u\n",
2364 prog->type,
2365 prog->jited,
2366 prog_tag,
2367 prog->pages * 1ULL << PAGE_SHIFT,
2368 prog->aux->id,
2369 stats.nsecs,
2370 stats.cnt,
2371 stats.misses,
2372 prog->aux->verified_insns);
2373}
2374#endif
2375
2376const struct file_operations bpf_prog_fops = {
2377#ifdef CONFIG_PROC_FS
2378 .show_fdinfo = bpf_prog_show_fdinfo,
2379#endif
2380 .release = bpf_prog_release,
2381 .read = bpf_dummy_read,
2382 .write = bpf_dummy_write,
2383};
2384
2385int bpf_prog_new_fd(struct bpf_prog *prog)
2386{
2387 int ret;
2388
2389 ret = security_bpf_prog(prog);
2390 if (ret < 0)
2391 return ret;
2392
2393 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
2394 O_RDWR | O_CLOEXEC);
2395}
2396
2397static struct bpf_prog *____bpf_prog_get(struct fd f)
2398{
2399 if (!f.file)
2400 return ERR_PTR(-EBADF);
2401 if (f.file->f_op != &bpf_prog_fops) {
2402 fdput(f);
2403 return ERR_PTR(-EINVAL);
2404 }
2405
2406 return f.file->private_data;
2407}
2408
2409void bpf_prog_add(struct bpf_prog *prog, int i)
2410{
2411 atomic64_add(i, &prog->aux->refcnt);
2412}
2413EXPORT_SYMBOL_GPL(bpf_prog_add);
2414
2415void bpf_prog_sub(struct bpf_prog *prog, int i)
2416{
2417 /* Only to be used for undoing previous bpf_prog_add() in some
2418 * error path. We still know that another entity in our call
2419 * path holds a reference to the program, thus atomic_sub() can
2420 * be safely used in such cases!
2421 */
2422 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
2423}
2424EXPORT_SYMBOL_GPL(bpf_prog_sub);
2425
2426void bpf_prog_inc(struct bpf_prog *prog)
2427{
2428 atomic64_inc(&prog->aux->refcnt);
2429}
2430EXPORT_SYMBOL_GPL(bpf_prog_inc);
2431
2432/* prog_idr_lock should have been held */
2433struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
2434{
2435 int refold;
2436
2437 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
2438
2439 if (!refold)
2440 return ERR_PTR(-ENOENT);
2441
2442 return prog;
2443}
2444EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
2445
2446bool bpf_prog_get_ok(struct bpf_prog *prog,
2447 enum bpf_prog_type *attach_type, bool attach_drv)
2448{
2449 /* not an attachment, just a refcount inc, always allow */
2450 if (!attach_type)
2451 return true;
2452
2453 if (prog->type != *attach_type)
2454 return false;
2455 if (bpf_prog_is_offloaded(prog->aux) && !attach_drv)
2456 return false;
2457
2458 return true;
2459}
2460
2461static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
2462 bool attach_drv)
2463{
2464 struct fd f = fdget(ufd);
2465 struct bpf_prog *prog;
2466
2467 prog = ____bpf_prog_get(f);
2468 if (IS_ERR(prog))
2469 return prog;
2470 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
2471 prog = ERR_PTR(-EINVAL);
2472 goto out;
2473 }
2474
2475 bpf_prog_inc(prog);
2476out:
2477 fdput(f);
2478 return prog;
2479}
2480
2481struct bpf_prog *bpf_prog_get(u32 ufd)
2482{
2483 return __bpf_prog_get(ufd, NULL, false);
2484}
2485
2486struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
2487 bool attach_drv)
2488{
2489 return __bpf_prog_get(ufd, &type, attach_drv);
2490}
2491EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
2492
2493/* Initially all BPF programs could be loaded w/o specifying
2494 * expected_attach_type. Later for some of them specifying expected_attach_type
2495 * at load time became required so that program could be validated properly.
2496 * Programs of types that are allowed to be loaded both w/ and w/o (for
2497 * backward compatibility) expected_attach_type, should have the default attach
2498 * type assigned to expected_attach_type for the latter case, so that it can be
2499 * validated later at attach time.
2500 *
2501 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
2502 * prog type requires it but has some attach types that have to be backward
2503 * compatible.
2504 */
2505static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
2506{
2507 switch (attr->prog_type) {
2508 case BPF_PROG_TYPE_CGROUP_SOCK:
2509 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
2510 * exist so checking for non-zero is the way to go here.
2511 */
2512 if (!attr->expected_attach_type)
2513 attr->expected_attach_type =
2514 BPF_CGROUP_INET_SOCK_CREATE;
2515 break;
2516 case BPF_PROG_TYPE_SK_REUSEPORT:
2517 if (!attr->expected_attach_type)
2518 attr->expected_attach_type =
2519 BPF_SK_REUSEPORT_SELECT;
2520 break;
2521 }
2522}
2523
2524static int
2525bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
2526 enum bpf_attach_type expected_attach_type,
2527 struct btf *attach_btf, u32 btf_id,
2528 struct bpf_prog *dst_prog)
2529{
2530 if (btf_id) {
2531 if (btf_id > BTF_MAX_TYPE)
2532 return -EINVAL;
2533
2534 if (!attach_btf && !dst_prog)
2535 return -EINVAL;
2536
2537 switch (prog_type) {
2538 case BPF_PROG_TYPE_TRACING:
2539 case BPF_PROG_TYPE_LSM:
2540 case BPF_PROG_TYPE_STRUCT_OPS:
2541 case BPF_PROG_TYPE_EXT:
2542 break;
2543 default:
2544 return -EINVAL;
2545 }
2546 }
2547
2548 if (attach_btf && (!btf_id || dst_prog))
2549 return -EINVAL;
2550
2551 if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING &&
2552 prog_type != BPF_PROG_TYPE_EXT)
2553 return -EINVAL;
2554
2555 switch (prog_type) {
2556 case BPF_PROG_TYPE_CGROUP_SOCK:
2557 switch (expected_attach_type) {
2558 case BPF_CGROUP_INET_SOCK_CREATE:
2559 case BPF_CGROUP_INET_SOCK_RELEASE:
2560 case BPF_CGROUP_INET4_POST_BIND:
2561 case BPF_CGROUP_INET6_POST_BIND:
2562 return 0;
2563 default:
2564 return -EINVAL;
2565 }
2566 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2567 switch (expected_attach_type) {
2568 case BPF_CGROUP_INET4_BIND:
2569 case BPF_CGROUP_INET6_BIND:
2570 case BPF_CGROUP_INET4_CONNECT:
2571 case BPF_CGROUP_INET6_CONNECT:
2572 case BPF_CGROUP_UNIX_CONNECT:
2573 case BPF_CGROUP_INET4_GETPEERNAME:
2574 case BPF_CGROUP_INET6_GETPEERNAME:
2575 case BPF_CGROUP_UNIX_GETPEERNAME:
2576 case BPF_CGROUP_INET4_GETSOCKNAME:
2577 case BPF_CGROUP_INET6_GETSOCKNAME:
2578 case BPF_CGROUP_UNIX_GETSOCKNAME:
2579 case BPF_CGROUP_UDP4_SENDMSG:
2580 case BPF_CGROUP_UDP6_SENDMSG:
2581 case BPF_CGROUP_UNIX_SENDMSG:
2582 case BPF_CGROUP_UDP4_RECVMSG:
2583 case BPF_CGROUP_UDP6_RECVMSG:
2584 case BPF_CGROUP_UNIX_RECVMSG:
2585 return 0;
2586 default:
2587 return -EINVAL;
2588 }
2589 case BPF_PROG_TYPE_CGROUP_SKB:
2590 switch (expected_attach_type) {
2591 case BPF_CGROUP_INET_INGRESS:
2592 case BPF_CGROUP_INET_EGRESS:
2593 return 0;
2594 default:
2595 return -EINVAL;
2596 }
2597 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2598 switch (expected_attach_type) {
2599 case BPF_CGROUP_SETSOCKOPT:
2600 case BPF_CGROUP_GETSOCKOPT:
2601 return 0;
2602 default:
2603 return -EINVAL;
2604 }
2605 case BPF_PROG_TYPE_SK_LOOKUP:
2606 if (expected_attach_type == BPF_SK_LOOKUP)
2607 return 0;
2608 return -EINVAL;
2609 case BPF_PROG_TYPE_SK_REUSEPORT:
2610 switch (expected_attach_type) {
2611 case BPF_SK_REUSEPORT_SELECT:
2612 case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE:
2613 return 0;
2614 default:
2615 return -EINVAL;
2616 }
2617 case BPF_PROG_TYPE_NETFILTER:
2618 if (expected_attach_type == BPF_NETFILTER)
2619 return 0;
2620 return -EINVAL;
2621 case BPF_PROG_TYPE_SYSCALL:
2622 case BPF_PROG_TYPE_EXT:
2623 if (expected_attach_type)
2624 return -EINVAL;
2625 fallthrough;
2626 default:
2627 return 0;
2628 }
2629}
2630
2631static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2632{
2633 switch (prog_type) {
2634 case BPF_PROG_TYPE_SCHED_CLS:
2635 case BPF_PROG_TYPE_SCHED_ACT:
2636 case BPF_PROG_TYPE_XDP:
2637 case BPF_PROG_TYPE_LWT_IN:
2638 case BPF_PROG_TYPE_LWT_OUT:
2639 case BPF_PROG_TYPE_LWT_XMIT:
2640 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2641 case BPF_PROG_TYPE_SK_SKB:
2642 case BPF_PROG_TYPE_SK_MSG:
2643 case BPF_PROG_TYPE_FLOW_DISSECTOR:
2644 case BPF_PROG_TYPE_CGROUP_DEVICE:
2645 case BPF_PROG_TYPE_CGROUP_SOCK:
2646 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2647 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2648 case BPF_PROG_TYPE_CGROUP_SYSCTL:
2649 case BPF_PROG_TYPE_SOCK_OPS:
2650 case BPF_PROG_TYPE_EXT: /* extends any prog */
2651 case BPF_PROG_TYPE_NETFILTER:
2652 return true;
2653 case BPF_PROG_TYPE_CGROUP_SKB:
2654 /* always unpriv */
2655 case BPF_PROG_TYPE_SK_REUSEPORT:
2656 /* equivalent to SOCKET_FILTER. need CAP_BPF only */
2657 default:
2658 return false;
2659 }
2660}
2661
2662static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2663{
2664 switch (prog_type) {
2665 case BPF_PROG_TYPE_KPROBE:
2666 case BPF_PROG_TYPE_TRACEPOINT:
2667 case BPF_PROG_TYPE_PERF_EVENT:
2668 case BPF_PROG_TYPE_RAW_TRACEPOINT:
2669 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2670 case BPF_PROG_TYPE_TRACING:
2671 case BPF_PROG_TYPE_LSM:
2672 case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2673 case BPF_PROG_TYPE_EXT: /* extends any prog */
2674 return true;
2675 default:
2676 return false;
2677 }
2678}
2679
2680/* last field in 'union bpf_attr' used by this command */
2681#define BPF_PROG_LOAD_LAST_FIELD prog_token_fd
2682
2683static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
2684{
2685 enum bpf_prog_type type = attr->prog_type;
2686 struct bpf_prog *prog, *dst_prog = NULL;
2687 struct btf *attach_btf = NULL;
2688 struct bpf_token *token = NULL;
2689 bool bpf_cap;
2690 int err;
2691 char license[128];
2692
2693 if (CHECK_ATTR(BPF_PROG_LOAD))
2694 return -EINVAL;
2695
2696 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2697 BPF_F_ANY_ALIGNMENT |
2698 BPF_F_TEST_STATE_FREQ |
2699 BPF_F_SLEEPABLE |
2700 BPF_F_TEST_RND_HI32 |
2701 BPF_F_XDP_HAS_FRAGS |
2702 BPF_F_XDP_DEV_BOUND_ONLY |
2703 BPF_F_TEST_REG_INVARIANTS |
2704 BPF_F_TOKEN_FD))
2705 return -EINVAL;
2706
2707 bpf_prog_load_fixup_attach_type(attr);
2708
2709 if (attr->prog_flags & BPF_F_TOKEN_FD) {
2710 token = bpf_token_get_from_fd(attr->prog_token_fd);
2711 if (IS_ERR(token))
2712 return PTR_ERR(token);
2713 /* if current token doesn't grant prog loading permissions,
2714 * then we can't use this token, so ignore it and rely on
2715 * system-wide capabilities checks
2716 */
2717 if (!bpf_token_allow_cmd(token, BPF_PROG_LOAD) ||
2718 !bpf_token_allow_prog_type(token, attr->prog_type,
2719 attr->expected_attach_type)) {
2720 bpf_token_put(token);
2721 token = NULL;
2722 }
2723 }
2724
2725 bpf_cap = bpf_token_capable(token, CAP_BPF);
2726 err = -EPERM;
2727
2728 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2729 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2730 !bpf_cap)
2731 goto put_token;
2732
2733 /* Intent here is for unprivileged_bpf_disabled to block BPF program
2734 * creation for unprivileged users; other actions depend
2735 * on fd availability and access to bpffs, so are dependent on
2736 * object creation success. Even with unprivileged BPF disabled,
2737 * capability checks are still carried out for these
2738 * and other operations.
2739 */
2740 if (sysctl_unprivileged_bpf_disabled && !bpf_cap)
2741 goto put_token;
2742
2743 if (attr->insn_cnt == 0 ||
2744 attr->insn_cnt > (bpf_cap ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) {
2745 err = -E2BIG;
2746 goto put_token;
2747 }
2748 if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2749 type != BPF_PROG_TYPE_CGROUP_SKB &&
2750 !bpf_cap)
2751 goto put_token;
2752
2753 if (is_net_admin_prog_type(type) && !bpf_token_capable(token, CAP_NET_ADMIN))
2754 goto put_token;
2755 if (is_perfmon_prog_type(type) && !bpf_token_capable(token, CAP_PERFMON))
2756 goto put_token;
2757
2758 /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
2759 * or btf, we need to check which one it is
2760 */
2761 if (attr->attach_prog_fd) {
2762 dst_prog = bpf_prog_get(attr->attach_prog_fd);
2763 if (IS_ERR(dst_prog)) {
2764 dst_prog = NULL;
2765 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
2766 if (IS_ERR(attach_btf)) {
2767 err = -EINVAL;
2768 goto put_token;
2769 }
2770 if (!btf_is_kernel(attach_btf)) {
2771 /* attaching through specifying bpf_prog's BTF
2772 * objects directly might be supported eventually
2773 */
2774 btf_put(attach_btf);
2775 err = -ENOTSUPP;
2776 goto put_token;
2777 }
2778 }
2779 } else if (attr->attach_btf_id) {
2780 /* fall back to vmlinux BTF, if BTF type ID is specified */
2781 attach_btf = bpf_get_btf_vmlinux();
2782 if (IS_ERR(attach_btf)) {
2783 err = PTR_ERR(attach_btf);
2784 goto put_token;
2785 }
2786 if (!attach_btf) {
2787 err = -EINVAL;
2788 goto put_token;
2789 }
2790 btf_get(attach_btf);
2791 }
2792
2793 if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2794 attach_btf, attr->attach_btf_id,
2795 dst_prog)) {
2796 if (dst_prog)
2797 bpf_prog_put(dst_prog);
2798 if (attach_btf)
2799 btf_put(attach_btf);
2800 err = -EINVAL;
2801 goto put_token;
2802 }
2803
2804 /* plain bpf_prog allocation */
2805 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2806 if (!prog) {
2807 if (dst_prog)
2808 bpf_prog_put(dst_prog);
2809 if (attach_btf)
2810 btf_put(attach_btf);
2811 err = -EINVAL;
2812 goto put_token;
2813 }
2814
2815 prog->expected_attach_type = attr->expected_attach_type;
2816 prog->sleepable = !!(attr->prog_flags & BPF_F_SLEEPABLE);
2817 prog->aux->attach_btf = attach_btf;
2818 prog->aux->attach_btf_id = attr->attach_btf_id;
2819 prog->aux->dst_prog = dst_prog;
2820 prog->aux->dev_bound = !!attr->prog_ifindex;
2821 prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS;
2822
2823 /* move token into prog->aux, reuse taken refcnt */
2824 prog->aux->token = token;
2825 token = NULL;
2826
2827 prog->aux->user = get_current_user();
2828 prog->len = attr->insn_cnt;
2829
2830 err = -EFAULT;
2831 if (copy_from_bpfptr(prog->insns,
2832 make_bpfptr(attr->insns, uattr.is_kernel),
2833 bpf_prog_insn_size(prog)) != 0)
2834 goto free_prog;
2835 /* copy eBPF program license from user space */
2836 if (strncpy_from_bpfptr(license,
2837 make_bpfptr(attr->license, uattr.is_kernel),
2838 sizeof(license) - 1) < 0)
2839 goto free_prog;
2840 license[sizeof(license) - 1] = 0;
2841
2842 /* eBPF programs must be GPL compatible to use GPL-ed functions */
2843 prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0;
2844
2845 prog->orig_prog = NULL;
2846 prog->jited = 0;
2847
2848 atomic64_set(&prog->aux->refcnt, 1);
2849
2850 if (bpf_prog_is_dev_bound(prog->aux)) {
2851 err = bpf_prog_dev_bound_init(prog, attr);
2852 if (err)
2853 goto free_prog;
2854 }
2855
2856 if (type == BPF_PROG_TYPE_EXT && dst_prog &&
2857 bpf_prog_is_dev_bound(dst_prog->aux)) {
2858 err = bpf_prog_dev_bound_inherit(prog, dst_prog);
2859 if (err)
2860 goto free_prog;
2861 }
2862
2863 /*
2864 * Bookkeeping for managing the program attachment chain.
2865 *
2866 * It might be tempting to set attach_tracing_prog flag at the attachment
2867 * time, but this will not prevent from loading bunch of tracing prog
2868 * first, then attach them one to another.
2869 *
2870 * The flag attach_tracing_prog is set for the whole program lifecycle, and
2871 * doesn't have to be cleared in bpf_tracing_link_release, since tracing
2872 * programs cannot change attachment target.
2873 */
2874 if (type == BPF_PROG_TYPE_TRACING && dst_prog &&
2875 dst_prog->type == BPF_PROG_TYPE_TRACING) {
2876 prog->aux->attach_tracing_prog = true;
2877 }
2878
2879 /* find program type: socket_filter vs tracing_filter */
2880 err = find_prog_type(type, prog);
2881 if (err < 0)
2882 goto free_prog;
2883
2884 prog->aux->load_time = ktime_get_boottime_ns();
2885 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2886 sizeof(attr->prog_name));
2887 if (err < 0)
2888 goto free_prog;
2889
2890 err = security_bpf_prog_load(prog, attr, token);
2891 if (err)
2892 goto free_prog_sec;
2893
2894 /* run eBPF verifier */
2895 err = bpf_check(&prog, attr, uattr, uattr_size);
2896 if (err < 0)
2897 goto free_used_maps;
2898
2899 prog = bpf_prog_select_runtime(prog, &err);
2900 if (err < 0)
2901 goto free_used_maps;
2902
2903 err = bpf_prog_alloc_id(prog);
2904 if (err)
2905 goto free_used_maps;
2906
2907 /* Upon success of bpf_prog_alloc_id(), the BPF prog is
2908 * effectively publicly exposed. However, retrieving via
2909 * bpf_prog_get_fd_by_id() will take another reference,
2910 * therefore it cannot be gone underneath us.
2911 *
2912 * Only for the time /after/ successful bpf_prog_new_fd()
2913 * and before returning to userspace, we might just hold
2914 * one reference and any parallel close on that fd could
2915 * rip everything out. Hence, below notifications must
2916 * happen before bpf_prog_new_fd().
2917 *
2918 * Also, any failure handling from this point onwards must
2919 * be using bpf_prog_put() given the program is exposed.
2920 */
2921 bpf_prog_kallsyms_add(prog);
2922 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2923 bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2924
2925 err = bpf_prog_new_fd(prog);
2926 if (err < 0)
2927 bpf_prog_put(prog);
2928 return err;
2929
2930free_used_maps:
2931 /* In case we have subprogs, we need to wait for a grace
2932 * period before we can tear down JIT memory since symbols
2933 * are already exposed under kallsyms.
2934 */
2935 __bpf_prog_put_noref(prog, prog->aux->real_func_cnt);
2936 return err;
2937
2938free_prog_sec:
2939 security_bpf_prog_free(prog);
2940free_prog:
2941 free_uid(prog->aux->user);
2942 if (prog->aux->attach_btf)
2943 btf_put(prog->aux->attach_btf);
2944 bpf_prog_free(prog);
2945put_token:
2946 bpf_token_put(token);
2947 return err;
2948}
2949
2950#define BPF_OBJ_LAST_FIELD path_fd
2951
2952static int bpf_obj_pin(const union bpf_attr *attr)
2953{
2954 int path_fd;
2955
2956 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD)
2957 return -EINVAL;
2958
2959 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */
2960 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
2961 return -EINVAL;
2962
2963 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
2964 return bpf_obj_pin_user(attr->bpf_fd, path_fd,
2965 u64_to_user_ptr(attr->pathname));
2966}
2967
2968static int bpf_obj_get(const union bpf_attr *attr)
2969{
2970 int path_fd;
2971
2972 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2973 attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD))
2974 return -EINVAL;
2975
2976 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */
2977 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
2978 return -EINVAL;
2979
2980 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
2981 return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname),
2982 attr->file_flags);
2983}
2984
2985void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2986 const struct bpf_link_ops *ops, struct bpf_prog *prog)
2987{
2988 atomic64_set(&link->refcnt, 1);
2989 link->type = type;
2990 link->id = 0;
2991 link->ops = ops;
2992 link->prog = prog;
2993}
2994
2995static void bpf_link_free_id(int id)
2996{
2997 if (!id)
2998 return;
2999
3000 spin_lock_bh(&link_idr_lock);
3001 idr_remove(&link_idr, id);
3002 spin_unlock_bh(&link_idr_lock);
3003}
3004
3005/* Clean up bpf_link and corresponding anon_inode file and FD. After
3006 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
3007 * anon_inode's release() call. This helper marks bpf_link as
3008 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
3009 * is not decremented, it's the responsibility of a calling code that failed
3010 * to complete bpf_link initialization.
3011 * This helper eventually calls link's dealloc callback, but does not call
3012 * link's release callback.
3013 */
3014void bpf_link_cleanup(struct bpf_link_primer *primer)
3015{
3016 primer->link->prog = NULL;
3017 bpf_link_free_id(primer->id);
3018 fput(primer->file);
3019 put_unused_fd(primer->fd);
3020}
3021
3022void bpf_link_inc(struct bpf_link *link)
3023{
3024 atomic64_inc(&link->refcnt);
3025}
3026
3027static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu)
3028{
3029 struct bpf_link *link = container_of(rcu, struct bpf_link, rcu);
3030
3031 /* free bpf_link and its containing memory */
3032 link->ops->dealloc_deferred(link);
3033}
3034
3035static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
3036{
3037 if (rcu_trace_implies_rcu_gp())
3038 bpf_link_defer_dealloc_rcu_gp(rcu);
3039 else
3040 call_rcu(rcu, bpf_link_defer_dealloc_rcu_gp);
3041}
3042
3043/* bpf_link_free is guaranteed to be called from process context */
3044static void bpf_link_free(struct bpf_link *link)
3045{
3046 bool sleepable = false;
3047
3048 bpf_link_free_id(link->id);
3049 if (link->prog) {
3050 sleepable = link->prog->sleepable;
3051 /* detach BPF program, clean up used resources */
3052 link->ops->release(link);
3053 bpf_prog_put(link->prog);
3054 }
3055 if (link->ops->dealloc_deferred) {
3056 /* schedule BPF link deallocation; if underlying BPF program
3057 * is sleepable, we need to first wait for RCU tasks trace
3058 * sync, then go through "classic" RCU grace period
3059 */
3060 if (sleepable)
3061 call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
3062 else
3063 call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
3064 }
3065 if (link->ops->dealloc)
3066 link->ops->dealloc(link);
3067}
3068
3069static void bpf_link_put_deferred(struct work_struct *work)
3070{
3071 struct bpf_link *link = container_of(work, struct bpf_link, work);
3072
3073 bpf_link_free(link);
3074}
3075
3076/* bpf_link_put might be called from atomic context. It needs to be called
3077 * from sleepable context in order to acquire sleeping locks during the process.
3078 */
3079void bpf_link_put(struct bpf_link *link)
3080{
3081 if (!atomic64_dec_and_test(&link->refcnt))
3082 return;
3083
3084 INIT_WORK(&link->work, bpf_link_put_deferred);
3085 schedule_work(&link->work);
3086}
3087EXPORT_SYMBOL(bpf_link_put);
3088
3089static void bpf_link_put_direct(struct bpf_link *link)
3090{
3091 if (!atomic64_dec_and_test(&link->refcnt))
3092 return;
3093 bpf_link_free(link);
3094}
3095
3096static int bpf_link_release(struct inode *inode, struct file *filp)
3097{
3098 struct bpf_link *link = filp->private_data;
3099
3100 bpf_link_put_direct(link);
3101 return 0;
3102}
3103
3104#ifdef CONFIG_PROC_FS
3105#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
3106#define BPF_MAP_TYPE(_id, _ops)
3107#define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
3108static const char *bpf_link_type_strs[] = {
3109 [BPF_LINK_TYPE_UNSPEC] = "<invalid>",
3110#include <linux/bpf_types.h>
3111};
3112#undef BPF_PROG_TYPE
3113#undef BPF_MAP_TYPE
3114#undef BPF_LINK_TYPE
3115
3116static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
3117{
3118 const struct bpf_link *link = filp->private_data;
3119 const struct bpf_prog *prog = link->prog;
3120 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
3121
3122 seq_printf(m,
3123 "link_type:\t%s\n"
3124 "link_id:\t%u\n",
3125 bpf_link_type_strs[link->type],
3126 link->id);
3127 if (prog) {
3128 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
3129 seq_printf(m,
3130 "prog_tag:\t%s\n"
3131 "prog_id:\t%u\n",
3132 prog_tag,
3133 prog->aux->id);
3134 }
3135 if (link->ops->show_fdinfo)
3136 link->ops->show_fdinfo(link, m);
3137}
3138#endif
3139
3140static const struct file_operations bpf_link_fops = {
3141#ifdef CONFIG_PROC_FS
3142 .show_fdinfo = bpf_link_show_fdinfo,
3143#endif
3144 .release = bpf_link_release,
3145 .read = bpf_dummy_read,
3146 .write = bpf_dummy_write,
3147};
3148
3149static int bpf_link_alloc_id(struct bpf_link *link)
3150{
3151 int id;
3152
3153 idr_preload(GFP_KERNEL);
3154 spin_lock_bh(&link_idr_lock);
3155 id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
3156 spin_unlock_bh(&link_idr_lock);
3157 idr_preload_end();
3158
3159 return id;
3160}
3161
3162/* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
3163 * reserving unused FD and allocating ID from link_idr. This is to be paired
3164 * with bpf_link_settle() to install FD and ID and expose bpf_link to
3165 * user-space, if bpf_link is successfully attached. If not, bpf_link and
3166 * pre-allocated resources are to be freed with bpf_cleanup() call. All the
3167 * transient state is passed around in struct bpf_link_primer.
3168 * This is preferred way to create and initialize bpf_link, especially when
3169 * there are complicated and expensive operations in between creating bpf_link
3170 * itself and attaching it to BPF hook. By using bpf_link_prime() and
3171 * bpf_link_settle() kernel code using bpf_link doesn't have to perform
3172 * expensive (and potentially failing) roll back operations in a rare case
3173 * that file, FD, or ID can't be allocated.
3174 */
3175int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
3176{
3177 struct file *file;
3178 int fd, id;
3179
3180 fd = get_unused_fd_flags(O_CLOEXEC);
3181 if (fd < 0)
3182 return fd;
3183
3184
3185 id = bpf_link_alloc_id(link);
3186 if (id < 0) {
3187 put_unused_fd(fd);
3188 return id;
3189 }
3190
3191 file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
3192 if (IS_ERR(file)) {
3193 bpf_link_free_id(id);
3194 put_unused_fd(fd);
3195 return PTR_ERR(file);
3196 }
3197
3198 primer->link = link;
3199 primer->file = file;
3200 primer->fd = fd;
3201 primer->id = id;
3202 return 0;
3203}
3204
3205int bpf_link_settle(struct bpf_link_primer *primer)
3206{
3207 /* make bpf_link fetchable by ID */
3208 spin_lock_bh(&link_idr_lock);
3209 primer->link->id = primer->id;
3210 spin_unlock_bh(&link_idr_lock);
3211 /* make bpf_link fetchable by FD */
3212 fd_install(primer->fd, primer->file);
3213 /* pass through installed FD */
3214 return primer->fd;
3215}
3216
3217int bpf_link_new_fd(struct bpf_link *link)
3218{
3219 return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
3220}
3221
3222struct bpf_link *bpf_link_get_from_fd(u32 ufd)
3223{
3224 struct fd f = fdget(ufd);
3225 struct bpf_link *link;
3226
3227 if (!f.file)
3228 return ERR_PTR(-EBADF);
3229 if (f.file->f_op != &bpf_link_fops) {
3230 fdput(f);
3231 return ERR_PTR(-EINVAL);
3232 }
3233
3234 link = f.file->private_data;
3235 bpf_link_inc(link);
3236 fdput(f);
3237
3238 return link;
3239}
3240EXPORT_SYMBOL(bpf_link_get_from_fd);
3241
3242static void bpf_tracing_link_release(struct bpf_link *link)
3243{
3244 struct bpf_tracing_link *tr_link =
3245 container_of(link, struct bpf_tracing_link, link.link);
3246
3247 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link,
3248 tr_link->trampoline));
3249
3250 bpf_trampoline_put(tr_link->trampoline);
3251
3252 /* tgt_prog is NULL if target is a kernel function */
3253 if (tr_link->tgt_prog)
3254 bpf_prog_put(tr_link->tgt_prog);
3255}
3256
3257static void bpf_tracing_link_dealloc(struct bpf_link *link)
3258{
3259 struct bpf_tracing_link *tr_link =
3260 container_of(link, struct bpf_tracing_link, link.link);
3261
3262 kfree(tr_link);
3263}
3264
3265static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
3266 struct seq_file *seq)
3267{
3268 struct bpf_tracing_link *tr_link =
3269 container_of(link, struct bpf_tracing_link, link.link);
3270 u32 target_btf_id, target_obj_id;
3271
3272 bpf_trampoline_unpack_key(tr_link->trampoline->key,
3273 &target_obj_id, &target_btf_id);
3274 seq_printf(seq,
3275 "attach_type:\t%d\n"
3276 "target_obj_id:\t%u\n"
3277 "target_btf_id:\t%u\n",
3278 tr_link->attach_type,
3279 target_obj_id,
3280 target_btf_id);
3281}
3282
3283static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
3284 struct bpf_link_info *info)
3285{
3286 struct bpf_tracing_link *tr_link =
3287 container_of(link, struct bpf_tracing_link, link.link);
3288
3289 info->tracing.attach_type = tr_link->attach_type;
3290 bpf_trampoline_unpack_key(tr_link->trampoline->key,
3291 &info->tracing.target_obj_id,
3292 &info->tracing.target_btf_id);
3293
3294 return 0;
3295}
3296
3297static const struct bpf_link_ops bpf_tracing_link_lops = {
3298 .release = bpf_tracing_link_release,
3299 .dealloc = bpf_tracing_link_dealloc,
3300 .show_fdinfo = bpf_tracing_link_show_fdinfo,
3301 .fill_link_info = bpf_tracing_link_fill_link_info,
3302};
3303
3304static int bpf_tracing_prog_attach(struct bpf_prog *prog,
3305 int tgt_prog_fd,
3306 u32 btf_id,
3307 u64 bpf_cookie)
3308{
3309 struct bpf_link_primer link_primer;
3310 struct bpf_prog *tgt_prog = NULL;
3311 struct bpf_trampoline *tr = NULL;
3312 struct bpf_tracing_link *link;
3313 u64 key = 0;
3314 int err;
3315
3316 switch (prog->type) {
3317 case BPF_PROG_TYPE_TRACING:
3318 if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
3319 prog->expected_attach_type != BPF_TRACE_FEXIT &&
3320 prog->expected_attach_type != BPF_MODIFY_RETURN) {
3321 err = -EINVAL;
3322 goto out_put_prog;
3323 }
3324 break;
3325 case BPF_PROG_TYPE_EXT:
3326 if (prog->expected_attach_type != 0) {
3327 err = -EINVAL;
3328 goto out_put_prog;
3329 }
3330 break;
3331 case BPF_PROG_TYPE_LSM:
3332 if (prog->expected_attach_type != BPF_LSM_MAC) {
3333 err = -EINVAL;
3334 goto out_put_prog;
3335 }
3336 break;
3337 default:
3338 err = -EINVAL;
3339 goto out_put_prog;
3340 }
3341
3342 if (!!tgt_prog_fd != !!btf_id) {
3343 err = -EINVAL;
3344 goto out_put_prog;
3345 }
3346
3347 if (tgt_prog_fd) {
3348 /*
3349 * For now we only allow new targets for BPF_PROG_TYPE_EXT. If this
3350 * part would be changed to implement the same for
3351 * BPF_PROG_TYPE_TRACING, do not forget to update the way how
3352 * attach_tracing_prog flag is set.
3353 */
3354 if (prog->type != BPF_PROG_TYPE_EXT) {
3355 err = -EINVAL;
3356 goto out_put_prog;
3357 }
3358
3359 tgt_prog = bpf_prog_get(tgt_prog_fd);
3360 if (IS_ERR(tgt_prog)) {
3361 err = PTR_ERR(tgt_prog);
3362 tgt_prog = NULL;
3363 goto out_put_prog;
3364 }
3365
3366 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id);
3367 }
3368
3369 link = kzalloc(sizeof(*link), GFP_USER);
3370 if (!link) {
3371 err = -ENOMEM;
3372 goto out_put_prog;
3373 }
3374 bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING,
3375 &bpf_tracing_link_lops, prog);
3376 link->attach_type = prog->expected_attach_type;
3377 link->link.cookie = bpf_cookie;
3378
3379 mutex_lock(&prog->aux->dst_mutex);
3380
3381 /* There are a few possible cases here:
3382 *
3383 * - if prog->aux->dst_trampoline is set, the program was just loaded
3384 * and not yet attached to anything, so we can use the values stored
3385 * in prog->aux
3386 *
3387 * - if prog->aux->dst_trampoline is NULL, the program has already been
3388 * attached to a target and its initial target was cleared (below)
3389 *
3390 * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
3391 * target_btf_id using the link_create API.
3392 *
3393 * - if tgt_prog == NULL when this function was called using the old
3394 * raw_tracepoint_open API, and we need a target from prog->aux
3395 *
3396 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
3397 * was detached and is going for re-attachment.
3398 *
3399 * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf
3400 * are NULL, then program was already attached and user did not provide
3401 * tgt_prog_fd so we have no way to find out or create trampoline
3402 */
3403 if (!prog->aux->dst_trampoline && !tgt_prog) {
3404 /*
3405 * Allow re-attach for TRACING and LSM programs. If it's
3406 * currently linked, bpf_trampoline_link_prog will fail.
3407 * EXT programs need to specify tgt_prog_fd, so they
3408 * re-attach in separate code path.
3409 */
3410 if (prog->type != BPF_PROG_TYPE_TRACING &&
3411 prog->type != BPF_PROG_TYPE_LSM) {
3412 err = -EINVAL;
3413 goto out_unlock;
3414 }
3415 /* We can allow re-attach only if we have valid attach_btf. */
3416 if (!prog->aux->attach_btf) {
3417 err = -EINVAL;
3418 goto out_unlock;
3419 }
3420 btf_id = prog->aux->attach_btf_id;
3421 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
3422 }
3423
3424 if (!prog->aux->dst_trampoline ||
3425 (key && key != prog->aux->dst_trampoline->key)) {
3426 /* If there is no saved target, or the specified target is
3427 * different from the destination specified at load time, we
3428 * need a new trampoline and a check for compatibility
3429 */
3430 struct bpf_attach_target_info tgt_info = {};
3431
3432 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
3433 &tgt_info);
3434 if (err)
3435 goto out_unlock;
3436
3437 if (tgt_info.tgt_mod) {
3438 module_put(prog->aux->mod);
3439 prog->aux->mod = tgt_info.tgt_mod;
3440 }
3441
3442 tr = bpf_trampoline_get(key, &tgt_info);
3443 if (!tr) {
3444 err = -ENOMEM;
3445 goto out_unlock;
3446 }
3447 } else {
3448 /* The caller didn't specify a target, or the target was the
3449 * same as the destination supplied during program load. This
3450 * means we can reuse the trampoline and reference from program
3451 * load time, and there is no need to allocate a new one. This
3452 * can only happen once for any program, as the saved values in
3453 * prog->aux are cleared below.
3454 */
3455 tr = prog->aux->dst_trampoline;
3456 tgt_prog = prog->aux->dst_prog;
3457 }
3458
3459 err = bpf_link_prime(&link->link.link, &link_primer);
3460 if (err)
3461 goto out_unlock;
3462
3463 err = bpf_trampoline_link_prog(&link->link, tr);
3464 if (err) {
3465 bpf_link_cleanup(&link_primer);
3466 link = NULL;
3467 goto out_unlock;
3468 }
3469
3470 link->tgt_prog = tgt_prog;
3471 link->trampoline = tr;
3472
3473 /* Always clear the trampoline and target prog from prog->aux to make
3474 * sure the original attach destination is not kept alive after a
3475 * program is (re-)attached to another target.
3476 */
3477 if (prog->aux->dst_prog &&
3478 (tgt_prog_fd || tr != prog->aux->dst_trampoline))
3479 /* got extra prog ref from syscall, or attaching to different prog */
3480 bpf_prog_put(prog->aux->dst_prog);
3481 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
3482 /* we allocated a new trampoline, so free the old one */
3483 bpf_trampoline_put(prog->aux->dst_trampoline);
3484
3485 prog->aux->dst_prog = NULL;
3486 prog->aux->dst_trampoline = NULL;
3487 mutex_unlock(&prog->aux->dst_mutex);
3488
3489 return bpf_link_settle(&link_primer);
3490out_unlock:
3491 if (tr && tr != prog->aux->dst_trampoline)
3492 bpf_trampoline_put(tr);
3493 mutex_unlock(&prog->aux->dst_mutex);
3494 kfree(link);
3495out_put_prog:
3496 if (tgt_prog_fd && tgt_prog)
3497 bpf_prog_put(tgt_prog);
3498 return err;
3499}
3500
3501struct bpf_raw_tp_link {
3502 struct bpf_link link;
3503 struct bpf_raw_event_map *btp;
3504};
3505
3506static void bpf_raw_tp_link_release(struct bpf_link *link)
3507{
3508 struct bpf_raw_tp_link *raw_tp =
3509 container_of(link, struct bpf_raw_tp_link, link);
3510
3511 bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
3512 bpf_put_raw_tracepoint(raw_tp->btp);
3513}
3514
3515static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
3516{
3517 struct bpf_raw_tp_link *raw_tp =
3518 container_of(link, struct bpf_raw_tp_link, link);
3519
3520 kfree(raw_tp);
3521}
3522
3523static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
3524 struct seq_file *seq)
3525{
3526 struct bpf_raw_tp_link *raw_tp_link =
3527 container_of(link, struct bpf_raw_tp_link, link);
3528
3529 seq_printf(seq,
3530 "tp_name:\t%s\n",
3531 raw_tp_link->btp->tp->name);
3532}
3533
3534static int bpf_copy_to_user(char __user *ubuf, const char *buf, u32 ulen,
3535 u32 len)
3536{
3537 if (ulen >= len + 1) {
3538 if (copy_to_user(ubuf, buf, len + 1))
3539 return -EFAULT;
3540 } else {
3541 char zero = '\0';
3542
3543 if (copy_to_user(ubuf, buf, ulen - 1))
3544 return -EFAULT;
3545 if (put_user(zero, ubuf + ulen - 1))
3546 return -EFAULT;
3547 return -ENOSPC;
3548 }
3549
3550 return 0;
3551}
3552
3553static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
3554 struct bpf_link_info *info)
3555{
3556 struct bpf_raw_tp_link *raw_tp_link =
3557 container_of(link, struct bpf_raw_tp_link, link);
3558 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
3559 const char *tp_name = raw_tp_link->btp->tp->name;
3560 u32 ulen = info->raw_tracepoint.tp_name_len;
3561 size_t tp_len = strlen(tp_name);
3562
3563 if (!ulen ^ !ubuf)
3564 return -EINVAL;
3565
3566 info->raw_tracepoint.tp_name_len = tp_len + 1;
3567
3568 if (!ubuf)
3569 return 0;
3570
3571 return bpf_copy_to_user(ubuf, tp_name, ulen, tp_len);
3572}
3573
3574static const struct bpf_link_ops bpf_raw_tp_link_lops = {
3575 .release = bpf_raw_tp_link_release,
3576 .dealloc_deferred = bpf_raw_tp_link_dealloc,
3577 .show_fdinfo = bpf_raw_tp_link_show_fdinfo,
3578 .fill_link_info = bpf_raw_tp_link_fill_link_info,
3579};
3580
3581#ifdef CONFIG_PERF_EVENTS
3582struct bpf_perf_link {
3583 struct bpf_link link;
3584 struct file *perf_file;
3585};
3586
3587static void bpf_perf_link_release(struct bpf_link *link)
3588{
3589 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3590 struct perf_event *event = perf_link->perf_file->private_data;
3591
3592 perf_event_free_bpf_prog(event);
3593 fput(perf_link->perf_file);
3594}
3595
3596static void bpf_perf_link_dealloc(struct bpf_link *link)
3597{
3598 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3599
3600 kfree(perf_link);
3601}
3602
3603static int bpf_perf_link_fill_common(const struct perf_event *event,
3604 char __user *uname, u32 ulen,
3605 u64 *probe_offset, u64 *probe_addr,
3606 u32 *fd_type, unsigned long *missed)
3607{
3608 const char *buf;
3609 u32 prog_id;
3610 size_t len;
3611 int err;
3612
3613 if (!ulen ^ !uname)
3614 return -EINVAL;
3615
3616 err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf,
3617 probe_offset, probe_addr, missed);
3618 if (err)
3619 return err;
3620 if (!uname)
3621 return 0;
3622 if (buf) {
3623 len = strlen(buf);
3624 err = bpf_copy_to_user(uname, buf, ulen, len);
3625 if (err)
3626 return err;
3627 } else {
3628 char zero = '\0';
3629
3630 if (put_user(zero, uname))
3631 return -EFAULT;
3632 }
3633 return 0;
3634}
3635
3636#ifdef CONFIG_KPROBE_EVENTS
3637static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
3638 struct bpf_link_info *info)
3639{
3640 unsigned long missed;
3641 char __user *uname;
3642 u64 addr, offset;
3643 u32 ulen, type;
3644 int err;
3645
3646 uname = u64_to_user_ptr(info->perf_event.kprobe.func_name);
3647 ulen = info->perf_event.kprobe.name_len;
3648 err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
3649 &type, &missed);
3650 if (err)
3651 return err;
3652 if (type == BPF_FD_TYPE_KRETPROBE)
3653 info->perf_event.type = BPF_PERF_EVENT_KRETPROBE;
3654 else
3655 info->perf_event.type = BPF_PERF_EVENT_KPROBE;
3656
3657 info->perf_event.kprobe.offset = offset;
3658 info->perf_event.kprobe.missed = missed;
3659 if (!kallsyms_show_value(current_cred()))
3660 addr = 0;
3661 info->perf_event.kprobe.addr = addr;
3662 info->perf_event.kprobe.cookie = event->bpf_cookie;
3663 return 0;
3664}
3665#endif
3666
3667#ifdef CONFIG_UPROBE_EVENTS
3668static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
3669 struct bpf_link_info *info)
3670{
3671 char __user *uname;
3672 u64 addr, offset;
3673 u32 ulen, type;
3674 int err;
3675
3676 uname = u64_to_user_ptr(info->perf_event.uprobe.file_name);
3677 ulen = info->perf_event.uprobe.name_len;
3678 err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
3679 &type, NULL);
3680 if (err)
3681 return err;
3682
3683 if (type == BPF_FD_TYPE_URETPROBE)
3684 info->perf_event.type = BPF_PERF_EVENT_URETPROBE;
3685 else
3686 info->perf_event.type = BPF_PERF_EVENT_UPROBE;
3687 info->perf_event.uprobe.offset = offset;
3688 info->perf_event.uprobe.cookie = event->bpf_cookie;
3689 return 0;
3690}
3691#endif
3692
3693static int bpf_perf_link_fill_probe(const struct perf_event *event,
3694 struct bpf_link_info *info)
3695{
3696#ifdef CONFIG_KPROBE_EVENTS
3697 if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE)
3698 return bpf_perf_link_fill_kprobe(event, info);
3699#endif
3700#ifdef CONFIG_UPROBE_EVENTS
3701 if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE)
3702 return bpf_perf_link_fill_uprobe(event, info);
3703#endif
3704 return -EOPNOTSUPP;
3705}
3706
3707static int bpf_perf_link_fill_tracepoint(const struct perf_event *event,
3708 struct bpf_link_info *info)
3709{
3710 char __user *uname;
3711 u32 ulen;
3712
3713 uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name);
3714 ulen = info->perf_event.tracepoint.name_len;
3715 info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT;
3716 info->perf_event.tracepoint.cookie = event->bpf_cookie;
3717 return bpf_perf_link_fill_common(event, uname, ulen, NULL, NULL, NULL, NULL);
3718}
3719
3720static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
3721 struct bpf_link_info *info)
3722{
3723 info->perf_event.event.type = event->attr.type;
3724 info->perf_event.event.config = event->attr.config;
3725 info->perf_event.event.cookie = event->bpf_cookie;
3726 info->perf_event.type = BPF_PERF_EVENT_EVENT;
3727 return 0;
3728}
3729
3730static int bpf_perf_link_fill_link_info(const struct bpf_link *link,
3731 struct bpf_link_info *info)
3732{
3733 struct bpf_perf_link *perf_link;
3734 const struct perf_event *event;
3735
3736 perf_link = container_of(link, struct bpf_perf_link, link);
3737 event = perf_get_event(perf_link->perf_file);
3738 if (IS_ERR(event))
3739 return PTR_ERR(event);
3740
3741 switch (event->prog->type) {
3742 case BPF_PROG_TYPE_PERF_EVENT:
3743 return bpf_perf_link_fill_perf_event(event, info);
3744 case BPF_PROG_TYPE_TRACEPOINT:
3745 return bpf_perf_link_fill_tracepoint(event, info);
3746 case BPF_PROG_TYPE_KPROBE:
3747 return bpf_perf_link_fill_probe(event, info);
3748 default:
3749 return -EOPNOTSUPP;
3750 }
3751}
3752
3753static const struct bpf_link_ops bpf_perf_link_lops = {
3754 .release = bpf_perf_link_release,
3755 .dealloc = bpf_perf_link_dealloc,
3756 .fill_link_info = bpf_perf_link_fill_link_info,
3757};
3758
3759static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3760{
3761 struct bpf_link_primer link_primer;
3762 struct bpf_perf_link *link;
3763 struct perf_event *event;
3764 struct file *perf_file;
3765 int err;
3766
3767 if (attr->link_create.flags)
3768 return -EINVAL;
3769
3770 perf_file = perf_event_get(attr->link_create.target_fd);
3771 if (IS_ERR(perf_file))
3772 return PTR_ERR(perf_file);
3773
3774 link = kzalloc(sizeof(*link), GFP_USER);
3775 if (!link) {
3776 err = -ENOMEM;
3777 goto out_put_file;
3778 }
3779 bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog);
3780 link->perf_file = perf_file;
3781
3782 err = bpf_link_prime(&link->link, &link_primer);
3783 if (err) {
3784 kfree(link);
3785 goto out_put_file;
3786 }
3787
3788 event = perf_file->private_data;
3789 err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie);
3790 if (err) {
3791 bpf_link_cleanup(&link_primer);
3792 goto out_put_file;
3793 }
3794 /* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */
3795 bpf_prog_inc(prog);
3796
3797 return bpf_link_settle(&link_primer);
3798
3799out_put_file:
3800 fput(perf_file);
3801 return err;
3802}
3803#else
3804static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3805{
3806 return -EOPNOTSUPP;
3807}
3808#endif /* CONFIG_PERF_EVENTS */
3809
3810static int bpf_raw_tp_link_attach(struct bpf_prog *prog,
3811 const char __user *user_tp_name)
3812{
3813 struct bpf_link_primer link_primer;
3814 struct bpf_raw_tp_link *link;
3815 struct bpf_raw_event_map *btp;
3816 const char *tp_name;
3817 char buf[128];
3818 int err;
3819
3820 switch (prog->type) {
3821 case BPF_PROG_TYPE_TRACING:
3822 case BPF_PROG_TYPE_EXT:
3823 case BPF_PROG_TYPE_LSM:
3824 if (user_tp_name)
3825 /* The attach point for this category of programs
3826 * should be specified via btf_id during program load.
3827 */
3828 return -EINVAL;
3829 if (prog->type == BPF_PROG_TYPE_TRACING &&
3830 prog->expected_attach_type == BPF_TRACE_RAW_TP) {
3831 tp_name = prog->aux->attach_func_name;
3832 break;
3833 }
3834 return bpf_tracing_prog_attach(prog, 0, 0, 0);
3835 case BPF_PROG_TYPE_RAW_TRACEPOINT:
3836 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
3837 if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0)
3838 return -EFAULT;
3839 buf[sizeof(buf) - 1] = 0;
3840 tp_name = buf;
3841 break;
3842 default:
3843 return -EINVAL;
3844 }
3845
3846 btp = bpf_get_raw_tracepoint(tp_name);
3847 if (!btp)
3848 return -ENOENT;
3849
3850 link = kzalloc(sizeof(*link), GFP_USER);
3851 if (!link) {
3852 err = -ENOMEM;
3853 goto out_put_btp;
3854 }
3855 bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
3856 &bpf_raw_tp_link_lops, prog);
3857 link->btp = btp;
3858
3859 err = bpf_link_prime(&link->link, &link_primer);
3860 if (err) {
3861 kfree(link);
3862 goto out_put_btp;
3863 }
3864
3865 err = bpf_probe_register(link->btp, prog);
3866 if (err) {
3867 bpf_link_cleanup(&link_primer);
3868 goto out_put_btp;
3869 }
3870
3871 return bpf_link_settle(&link_primer);
3872
3873out_put_btp:
3874 bpf_put_raw_tracepoint(btp);
3875 return err;
3876}
3877
3878#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
3879
3880static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
3881{
3882 struct bpf_prog *prog;
3883 int fd;
3884
3885 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
3886 return -EINVAL;
3887
3888 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
3889 if (IS_ERR(prog))
3890 return PTR_ERR(prog);
3891
3892 fd = bpf_raw_tp_link_attach(prog, u64_to_user_ptr(attr->raw_tracepoint.name));
3893 if (fd < 0)
3894 bpf_prog_put(prog);
3895 return fd;
3896}
3897
3898static enum bpf_prog_type
3899attach_type_to_prog_type(enum bpf_attach_type attach_type)
3900{
3901 switch (attach_type) {
3902 case BPF_CGROUP_INET_INGRESS:
3903 case BPF_CGROUP_INET_EGRESS:
3904 return BPF_PROG_TYPE_CGROUP_SKB;
3905 case BPF_CGROUP_INET_SOCK_CREATE:
3906 case BPF_CGROUP_INET_SOCK_RELEASE:
3907 case BPF_CGROUP_INET4_POST_BIND:
3908 case BPF_CGROUP_INET6_POST_BIND:
3909 return BPF_PROG_TYPE_CGROUP_SOCK;
3910 case BPF_CGROUP_INET4_BIND:
3911 case BPF_CGROUP_INET6_BIND:
3912 case BPF_CGROUP_INET4_CONNECT:
3913 case BPF_CGROUP_INET6_CONNECT:
3914 case BPF_CGROUP_UNIX_CONNECT:
3915 case BPF_CGROUP_INET4_GETPEERNAME:
3916 case BPF_CGROUP_INET6_GETPEERNAME:
3917 case BPF_CGROUP_UNIX_GETPEERNAME:
3918 case BPF_CGROUP_INET4_GETSOCKNAME:
3919 case BPF_CGROUP_INET6_GETSOCKNAME:
3920 case BPF_CGROUP_UNIX_GETSOCKNAME:
3921 case BPF_CGROUP_UDP4_SENDMSG:
3922 case BPF_CGROUP_UDP6_SENDMSG:
3923 case BPF_CGROUP_UNIX_SENDMSG:
3924 case BPF_CGROUP_UDP4_RECVMSG:
3925 case BPF_CGROUP_UDP6_RECVMSG:
3926 case BPF_CGROUP_UNIX_RECVMSG:
3927 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
3928 case BPF_CGROUP_SOCK_OPS:
3929 return BPF_PROG_TYPE_SOCK_OPS;
3930 case BPF_CGROUP_DEVICE:
3931 return BPF_PROG_TYPE_CGROUP_DEVICE;
3932 case BPF_SK_MSG_VERDICT:
3933 return BPF_PROG_TYPE_SK_MSG;
3934 case BPF_SK_SKB_STREAM_PARSER:
3935 case BPF_SK_SKB_STREAM_VERDICT:
3936 case BPF_SK_SKB_VERDICT:
3937 return BPF_PROG_TYPE_SK_SKB;
3938 case BPF_LIRC_MODE2:
3939 return BPF_PROG_TYPE_LIRC_MODE2;
3940 case BPF_FLOW_DISSECTOR:
3941 return BPF_PROG_TYPE_FLOW_DISSECTOR;
3942 case BPF_CGROUP_SYSCTL:
3943 return BPF_PROG_TYPE_CGROUP_SYSCTL;
3944 case BPF_CGROUP_GETSOCKOPT:
3945 case BPF_CGROUP_SETSOCKOPT:
3946 return BPF_PROG_TYPE_CGROUP_SOCKOPT;
3947 case BPF_TRACE_ITER:
3948 case BPF_TRACE_RAW_TP:
3949 case BPF_TRACE_FENTRY:
3950 case BPF_TRACE_FEXIT:
3951 case BPF_MODIFY_RETURN:
3952 return BPF_PROG_TYPE_TRACING;
3953 case BPF_LSM_MAC:
3954 return BPF_PROG_TYPE_LSM;
3955 case BPF_SK_LOOKUP:
3956 return BPF_PROG_TYPE_SK_LOOKUP;
3957 case BPF_XDP:
3958 return BPF_PROG_TYPE_XDP;
3959 case BPF_LSM_CGROUP:
3960 return BPF_PROG_TYPE_LSM;
3961 case BPF_TCX_INGRESS:
3962 case BPF_TCX_EGRESS:
3963 case BPF_NETKIT_PRIMARY:
3964 case BPF_NETKIT_PEER:
3965 return BPF_PROG_TYPE_SCHED_CLS;
3966 default:
3967 return BPF_PROG_TYPE_UNSPEC;
3968 }
3969}
3970
3971static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
3972 enum bpf_attach_type attach_type)
3973{
3974 enum bpf_prog_type ptype;
3975
3976 switch (prog->type) {
3977 case BPF_PROG_TYPE_CGROUP_SOCK:
3978 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3979 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3980 case BPF_PROG_TYPE_SK_LOOKUP:
3981 return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
3982 case BPF_PROG_TYPE_CGROUP_SKB:
3983 if (!bpf_token_capable(prog->aux->token, CAP_NET_ADMIN))
3984 /* cg-skb progs can be loaded by unpriv user.
3985 * check permissions at attach time.
3986 */
3987 return -EPERM;
3988
3989 ptype = attach_type_to_prog_type(attach_type);
3990 if (prog->type != ptype)
3991 return -EINVAL;
3992
3993 return prog->enforce_expected_attach_type &&
3994 prog->expected_attach_type != attach_type ?
3995 -EINVAL : 0;
3996 case BPF_PROG_TYPE_EXT:
3997 return 0;
3998 case BPF_PROG_TYPE_NETFILTER:
3999 if (attach_type != BPF_NETFILTER)
4000 return -EINVAL;
4001 return 0;
4002 case BPF_PROG_TYPE_PERF_EVENT:
4003 case BPF_PROG_TYPE_TRACEPOINT:
4004 if (attach_type != BPF_PERF_EVENT)
4005 return -EINVAL;
4006 return 0;
4007 case BPF_PROG_TYPE_KPROBE:
4008 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI &&
4009 attach_type != BPF_TRACE_KPROBE_MULTI)
4010 return -EINVAL;
4011 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI &&
4012 attach_type != BPF_TRACE_UPROBE_MULTI)
4013 return -EINVAL;
4014 if (attach_type != BPF_PERF_EVENT &&
4015 attach_type != BPF_TRACE_KPROBE_MULTI &&
4016 attach_type != BPF_TRACE_UPROBE_MULTI)
4017 return -EINVAL;
4018 return 0;
4019 case BPF_PROG_TYPE_SCHED_CLS:
4020 if (attach_type != BPF_TCX_INGRESS &&
4021 attach_type != BPF_TCX_EGRESS &&
4022 attach_type != BPF_NETKIT_PRIMARY &&
4023 attach_type != BPF_NETKIT_PEER)
4024 return -EINVAL;
4025 return 0;
4026 default:
4027 ptype = attach_type_to_prog_type(attach_type);
4028 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type)
4029 return -EINVAL;
4030 return 0;
4031 }
4032}
4033
4034#define BPF_PROG_ATTACH_LAST_FIELD expected_revision
4035
4036#define BPF_F_ATTACH_MASK_BASE \
4037 (BPF_F_ALLOW_OVERRIDE | \
4038 BPF_F_ALLOW_MULTI | \
4039 BPF_F_REPLACE)
4040
4041#define BPF_F_ATTACH_MASK_MPROG \
4042 (BPF_F_REPLACE | \
4043 BPF_F_BEFORE | \
4044 BPF_F_AFTER | \
4045 BPF_F_ID | \
4046 BPF_F_LINK)
4047
4048static int bpf_prog_attach(const union bpf_attr *attr)
4049{
4050 enum bpf_prog_type ptype;
4051 struct bpf_prog *prog;
4052 int ret;
4053
4054 if (CHECK_ATTR(BPF_PROG_ATTACH))
4055 return -EINVAL;
4056
4057 ptype = attach_type_to_prog_type(attr->attach_type);
4058 if (ptype == BPF_PROG_TYPE_UNSPEC)
4059 return -EINVAL;
4060 if (bpf_mprog_supported(ptype)) {
4061 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
4062 return -EINVAL;
4063 } else {
4064 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE)
4065 return -EINVAL;
4066 if (attr->relative_fd ||
4067 attr->expected_revision)
4068 return -EINVAL;
4069 }
4070
4071 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
4072 if (IS_ERR(prog))
4073 return PTR_ERR(prog);
4074
4075 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
4076 bpf_prog_put(prog);
4077 return -EINVAL;
4078 }
4079
4080 switch (ptype) {
4081 case BPF_PROG_TYPE_SK_SKB:
4082 case BPF_PROG_TYPE_SK_MSG:
4083 ret = sock_map_get_from_fd(attr, prog);
4084 break;
4085 case BPF_PROG_TYPE_LIRC_MODE2:
4086 ret = lirc_prog_attach(attr, prog);
4087 break;
4088 case BPF_PROG_TYPE_FLOW_DISSECTOR:
4089 ret = netns_bpf_prog_attach(attr, prog);
4090 break;
4091 case BPF_PROG_TYPE_CGROUP_DEVICE:
4092 case BPF_PROG_TYPE_CGROUP_SKB:
4093 case BPF_PROG_TYPE_CGROUP_SOCK:
4094 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4095 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4096 case BPF_PROG_TYPE_CGROUP_SYSCTL:
4097 case BPF_PROG_TYPE_SOCK_OPS:
4098 case BPF_PROG_TYPE_LSM:
4099 if (ptype == BPF_PROG_TYPE_LSM &&
4100 prog->expected_attach_type != BPF_LSM_CGROUP)
4101 ret = -EINVAL;
4102 else
4103 ret = cgroup_bpf_prog_attach(attr, ptype, prog);
4104 break;
4105 case BPF_PROG_TYPE_SCHED_CLS:
4106 if (attr->attach_type == BPF_TCX_INGRESS ||
4107 attr->attach_type == BPF_TCX_EGRESS)
4108 ret = tcx_prog_attach(attr, prog);
4109 else
4110 ret = netkit_prog_attach(attr, prog);
4111 break;
4112 default:
4113 ret = -EINVAL;
4114 }
4115
4116 if (ret)
4117 bpf_prog_put(prog);
4118 return ret;
4119}
4120
4121#define BPF_PROG_DETACH_LAST_FIELD expected_revision
4122
4123static int bpf_prog_detach(const union bpf_attr *attr)
4124{
4125 struct bpf_prog *prog = NULL;
4126 enum bpf_prog_type ptype;
4127 int ret;
4128
4129 if (CHECK_ATTR(BPF_PROG_DETACH))
4130 return -EINVAL;
4131
4132 ptype = attach_type_to_prog_type(attr->attach_type);
4133 if (bpf_mprog_supported(ptype)) {
4134 if (ptype == BPF_PROG_TYPE_UNSPEC)
4135 return -EINVAL;
4136 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
4137 return -EINVAL;
4138 if (attr->attach_bpf_fd) {
4139 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
4140 if (IS_ERR(prog))
4141 return PTR_ERR(prog);
4142 }
4143 } else if (attr->attach_flags ||
4144 attr->relative_fd ||
4145 attr->expected_revision) {
4146 return -EINVAL;
4147 }
4148
4149 switch (ptype) {
4150 case BPF_PROG_TYPE_SK_MSG:
4151 case BPF_PROG_TYPE_SK_SKB:
4152 ret = sock_map_prog_detach(attr, ptype);
4153 break;
4154 case BPF_PROG_TYPE_LIRC_MODE2:
4155 ret = lirc_prog_detach(attr);
4156 break;
4157 case BPF_PROG_TYPE_FLOW_DISSECTOR:
4158 ret = netns_bpf_prog_detach(attr, ptype);
4159 break;
4160 case BPF_PROG_TYPE_CGROUP_DEVICE:
4161 case BPF_PROG_TYPE_CGROUP_SKB:
4162 case BPF_PROG_TYPE_CGROUP_SOCK:
4163 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4164 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4165 case BPF_PROG_TYPE_CGROUP_SYSCTL:
4166 case BPF_PROG_TYPE_SOCK_OPS:
4167 case BPF_PROG_TYPE_LSM:
4168 ret = cgroup_bpf_prog_detach(attr, ptype);
4169 break;
4170 case BPF_PROG_TYPE_SCHED_CLS:
4171 if (attr->attach_type == BPF_TCX_INGRESS ||
4172 attr->attach_type == BPF_TCX_EGRESS)
4173 ret = tcx_prog_detach(attr, prog);
4174 else
4175 ret = netkit_prog_detach(attr, prog);
4176 break;
4177 default:
4178 ret = -EINVAL;
4179 }
4180
4181 if (prog)
4182 bpf_prog_put(prog);
4183 return ret;
4184}
4185
4186#define BPF_PROG_QUERY_LAST_FIELD query.revision
4187
4188static int bpf_prog_query(const union bpf_attr *attr,
4189 union bpf_attr __user *uattr)
4190{
4191 if (!bpf_net_capable())
4192 return -EPERM;
4193 if (CHECK_ATTR(BPF_PROG_QUERY))
4194 return -EINVAL;
4195 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
4196 return -EINVAL;
4197
4198 switch (attr->query.attach_type) {
4199 case BPF_CGROUP_INET_INGRESS:
4200 case BPF_CGROUP_INET_EGRESS:
4201 case BPF_CGROUP_INET_SOCK_CREATE:
4202 case BPF_CGROUP_INET_SOCK_RELEASE:
4203 case BPF_CGROUP_INET4_BIND:
4204 case BPF_CGROUP_INET6_BIND:
4205 case BPF_CGROUP_INET4_POST_BIND:
4206 case BPF_CGROUP_INET6_POST_BIND:
4207 case BPF_CGROUP_INET4_CONNECT:
4208 case BPF_CGROUP_INET6_CONNECT:
4209 case BPF_CGROUP_UNIX_CONNECT:
4210 case BPF_CGROUP_INET4_GETPEERNAME:
4211 case BPF_CGROUP_INET6_GETPEERNAME:
4212 case BPF_CGROUP_UNIX_GETPEERNAME:
4213 case BPF_CGROUP_INET4_GETSOCKNAME:
4214 case BPF_CGROUP_INET6_GETSOCKNAME:
4215 case BPF_CGROUP_UNIX_GETSOCKNAME:
4216 case BPF_CGROUP_UDP4_SENDMSG:
4217 case BPF_CGROUP_UDP6_SENDMSG:
4218 case BPF_CGROUP_UNIX_SENDMSG:
4219 case BPF_CGROUP_UDP4_RECVMSG:
4220 case BPF_CGROUP_UDP6_RECVMSG:
4221 case BPF_CGROUP_UNIX_RECVMSG:
4222 case BPF_CGROUP_SOCK_OPS:
4223 case BPF_CGROUP_DEVICE:
4224 case BPF_CGROUP_SYSCTL:
4225 case BPF_CGROUP_GETSOCKOPT:
4226 case BPF_CGROUP_SETSOCKOPT:
4227 case BPF_LSM_CGROUP:
4228 return cgroup_bpf_prog_query(attr, uattr);
4229 case BPF_LIRC_MODE2:
4230 return lirc_prog_query(attr, uattr);
4231 case BPF_FLOW_DISSECTOR:
4232 case BPF_SK_LOOKUP:
4233 return netns_bpf_prog_query(attr, uattr);
4234 case BPF_SK_SKB_STREAM_PARSER:
4235 case BPF_SK_SKB_STREAM_VERDICT:
4236 case BPF_SK_MSG_VERDICT:
4237 case BPF_SK_SKB_VERDICT:
4238 return sock_map_bpf_prog_query(attr, uattr);
4239 case BPF_TCX_INGRESS:
4240 case BPF_TCX_EGRESS:
4241 return tcx_prog_query(attr, uattr);
4242 case BPF_NETKIT_PRIMARY:
4243 case BPF_NETKIT_PEER:
4244 return netkit_prog_query(attr, uattr);
4245 default:
4246 return -EINVAL;
4247 }
4248}
4249
4250#define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size
4251
4252static int bpf_prog_test_run(const union bpf_attr *attr,
4253 union bpf_attr __user *uattr)
4254{
4255 struct bpf_prog *prog;
4256 int ret = -ENOTSUPP;
4257
4258 if (CHECK_ATTR(BPF_PROG_TEST_RUN))
4259 return -EINVAL;
4260
4261 if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
4262 (!attr->test.ctx_size_in && attr->test.ctx_in))
4263 return -EINVAL;
4264
4265 if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
4266 (!attr->test.ctx_size_out && attr->test.ctx_out))
4267 return -EINVAL;
4268
4269 prog = bpf_prog_get(attr->test.prog_fd);
4270 if (IS_ERR(prog))
4271 return PTR_ERR(prog);
4272
4273 if (prog->aux->ops->test_run)
4274 ret = prog->aux->ops->test_run(prog, attr, uattr);
4275
4276 bpf_prog_put(prog);
4277 return ret;
4278}
4279
4280#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
4281
4282static int bpf_obj_get_next_id(const union bpf_attr *attr,
4283 union bpf_attr __user *uattr,
4284 struct idr *idr,
4285 spinlock_t *lock)
4286{
4287 u32 next_id = attr->start_id;
4288 int err = 0;
4289
4290 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
4291 return -EINVAL;
4292
4293 if (!capable(CAP_SYS_ADMIN))
4294 return -EPERM;
4295
4296 next_id++;
4297 spin_lock_bh(lock);
4298 if (!idr_get_next(idr, &next_id))
4299 err = -ENOENT;
4300 spin_unlock_bh(lock);
4301
4302 if (!err)
4303 err = put_user(next_id, &uattr->next_id);
4304
4305 return err;
4306}
4307
4308struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
4309{
4310 struct bpf_map *map;
4311
4312 spin_lock_bh(&map_idr_lock);
4313again:
4314 map = idr_get_next(&map_idr, id);
4315 if (map) {
4316 map = __bpf_map_inc_not_zero(map, false);
4317 if (IS_ERR(map)) {
4318 (*id)++;
4319 goto again;
4320 }
4321 }
4322 spin_unlock_bh(&map_idr_lock);
4323
4324 return map;
4325}
4326
4327struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
4328{
4329 struct bpf_prog *prog;
4330
4331 spin_lock_bh(&prog_idr_lock);
4332again:
4333 prog = idr_get_next(&prog_idr, id);
4334 if (prog) {
4335 prog = bpf_prog_inc_not_zero(prog);
4336 if (IS_ERR(prog)) {
4337 (*id)++;
4338 goto again;
4339 }
4340 }
4341 spin_unlock_bh(&prog_idr_lock);
4342
4343 return prog;
4344}
4345
4346#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
4347
4348struct bpf_prog *bpf_prog_by_id(u32 id)
4349{
4350 struct bpf_prog *prog;
4351
4352 if (!id)
4353 return ERR_PTR(-ENOENT);
4354
4355 spin_lock_bh(&prog_idr_lock);
4356 prog = idr_find(&prog_idr, id);
4357 if (prog)
4358 prog = bpf_prog_inc_not_zero(prog);
4359 else
4360 prog = ERR_PTR(-ENOENT);
4361 spin_unlock_bh(&prog_idr_lock);
4362 return prog;
4363}
4364
4365static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
4366{
4367 struct bpf_prog *prog;
4368 u32 id = attr->prog_id;
4369 int fd;
4370
4371 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
4372 return -EINVAL;
4373
4374 if (!capable(CAP_SYS_ADMIN))
4375 return -EPERM;
4376
4377 prog = bpf_prog_by_id(id);
4378 if (IS_ERR(prog))
4379 return PTR_ERR(prog);
4380
4381 fd = bpf_prog_new_fd(prog);
4382 if (fd < 0)
4383 bpf_prog_put(prog);
4384
4385 return fd;
4386}
4387
4388#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
4389
4390static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
4391{
4392 struct bpf_map *map;
4393 u32 id = attr->map_id;
4394 int f_flags;
4395 int fd;
4396
4397 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
4398 attr->open_flags & ~BPF_OBJ_FLAG_MASK)
4399 return -EINVAL;
4400
4401 if (!capable(CAP_SYS_ADMIN))
4402 return -EPERM;
4403
4404 f_flags = bpf_get_file_flag(attr->open_flags);
4405 if (f_flags < 0)
4406 return f_flags;
4407
4408 spin_lock_bh(&map_idr_lock);
4409 map = idr_find(&map_idr, id);
4410 if (map)
4411 map = __bpf_map_inc_not_zero(map, true);
4412 else
4413 map = ERR_PTR(-ENOENT);
4414 spin_unlock_bh(&map_idr_lock);
4415
4416 if (IS_ERR(map))
4417 return PTR_ERR(map);
4418
4419 fd = bpf_map_new_fd(map, f_flags);
4420 if (fd < 0)
4421 bpf_map_put_with_uref(map);
4422
4423 return fd;
4424}
4425
4426static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
4427 unsigned long addr, u32 *off,
4428 u32 *type)
4429{
4430 const struct bpf_map *map;
4431 int i;
4432
4433 mutex_lock(&prog->aux->used_maps_mutex);
4434 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
4435 map = prog->aux->used_maps[i];
4436 if (map == (void *)addr) {
4437 *type = BPF_PSEUDO_MAP_FD;
4438 goto out;
4439 }
4440 if (!map->ops->map_direct_value_meta)
4441 continue;
4442 if (!map->ops->map_direct_value_meta(map, addr, off)) {
4443 *type = BPF_PSEUDO_MAP_VALUE;
4444 goto out;
4445 }
4446 }
4447 map = NULL;
4448
4449out:
4450 mutex_unlock(&prog->aux->used_maps_mutex);
4451 return map;
4452}
4453
4454static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
4455 const struct cred *f_cred)
4456{
4457 const struct bpf_map *map;
4458 struct bpf_insn *insns;
4459 u32 off, type;
4460 u64 imm;
4461 u8 code;
4462 int i;
4463
4464 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
4465 GFP_USER);
4466 if (!insns)
4467 return insns;
4468
4469 for (i = 0; i < prog->len; i++) {
4470 code = insns[i].code;
4471
4472 if (code == (BPF_JMP | BPF_TAIL_CALL)) {
4473 insns[i].code = BPF_JMP | BPF_CALL;
4474 insns[i].imm = BPF_FUNC_tail_call;
4475 /* fall-through */
4476 }
4477 if (code == (BPF_JMP | BPF_CALL) ||
4478 code == (BPF_JMP | BPF_CALL_ARGS)) {
4479 if (code == (BPF_JMP | BPF_CALL_ARGS))
4480 insns[i].code = BPF_JMP | BPF_CALL;
4481 if (!bpf_dump_raw_ok(f_cred))
4482 insns[i].imm = 0;
4483 continue;
4484 }
4485 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
4486 insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
4487 continue;
4488 }
4489
4490 if ((BPF_CLASS(code) == BPF_LDX || BPF_CLASS(code) == BPF_STX ||
4491 BPF_CLASS(code) == BPF_ST) && BPF_MODE(code) == BPF_PROBE_MEM32) {
4492 insns[i].code = BPF_CLASS(code) | BPF_SIZE(code) | BPF_MEM;
4493 continue;
4494 }
4495
4496 if (code != (BPF_LD | BPF_IMM | BPF_DW))
4497 continue;
4498
4499 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
4500 map = bpf_map_from_imm(prog, imm, &off, &type);
4501 if (map) {
4502 insns[i].src_reg = type;
4503 insns[i].imm = map->id;
4504 insns[i + 1].imm = off;
4505 continue;
4506 }
4507 }
4508
4509 return insns;
4510}
4511
4512static int set_info_rec_size(struct bpf_prog_info *info)
4513{
4514 /*
4515 * Ensure info.*_rec_size is the same as kernel expected size
4516 *
4517 * or
4518 *
4519 * Only allow zero *_rec_size if both _rec_size and _cnt are
4520 * zero. In this case, the kernel will set the expected
4521 * _rec_size back to the info.
4522 */
4523
4524 if ((info->nr_func_info || info->func_info_rec_size) &&
4525 info->func_info_rec_size != sizeof(struct bpf_func_info))
4526 return -EINVAL;
4527
4528 if ((info->nr_line_info || info->line_info_rec_size) &&
4529 info->line_info_rec_size != sizeof(struct bpf_line_info))
4530 return -EINVAL;
4531
4532 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
4533 info->jited_line_info_rec_size != sizeof(__u64))
4534 return -EINVAL;
4535
4536 info->func_info_rec_size = sizeof(struct bpf_func_info);
4537 info->line_info_rec_size = sizeof(struct bpf_line_info);
4538 info->jited_line_info_rec_size = sizeof(__u64);
4539
4540 return 0;
4541}
4542
4543static int bpf_prog_get_info_by_fd(struct file *file,
4544 struct bpf_prog *prog,
4545 const union bpf_attr *attr,
4546 union bpf_attr __user *uattr)
4547{
4548 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4549 struct btf *attach_btf = bpf_prog_get_target_btf(prog);
4550 struct bpf_prog_info info;
4551 u32 info_len = attr->info.info_len;
4552 struct bpf_prog_kstats stats;
4553 char __user *uinsns;
4554 u32 ulen;
4555 int err;
4556
4557 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4558 if (err)
4559 return err;
4560 info_len = min_t(u32, sizeof(info), info_len);
4561
4562 memset(&info, 0, sizeof(info));
4563 if (copy_from_user(&info, uinfo, info_len))
4564 return -EFAULT;
4565
4566 info.type = prog->type;
4567 info.id = prog->aux->id;
4568 info.load_time = prog->aux->load_time;
4569 info.created_by_uid = from_kuid_munged(current_user_ns(),
4570 prog->aux->user->uid);
4571 info.gpl_compatible = prog->gpl_compatible;
4572
4573 memcpy(info.tag, prog->tag, sizeof(prog->tag));
4574 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
4575
4576 mutex_lock(&prog->aux->used_maps_mutex);
4577 ulen = info.nr_map_ids;
4578 info.nr_map_ids = prog->aux->used_map_cnt;
4579 ulen = min_t(u32, info.nr_map_ids, ulen);
4580 if (ulen) {
4581 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
4582 u32 i;
4583
4584 for (i = 0; i < ulen; i++)
4585 if (put_user(prog->aux->used_maps[i]->id,
4586 &user_map_ids[i])) {
4587 mutex_unlock(&prog->aux->used_maps_mutex);
4588 return -EFAULT;
4589 }
4590 }
4591 mutex_unlock(&prog->aux->used_maps_mutex);
4592
4593 err = set_info_rec_size(&info);
4594 if (err)
4595 return err;
4596
4597 bpf_prog_get_stats(prog, &stats);
4598 info.run_time_ns = stats.nsecs;
4599 info.run_cnt = stats.cnt;
4600 info.recursion_misses = stats.misses;
4601
4602 info.verified_insns = prog->aux->verified_insns;
4603
4604 if (!bpf_capable()) {
4605 info.jited_prog_len = 0;
4606 info.xlated_prog_len = 0;
4607 info.nr_jited_ksyms = 0;
4608 info.nr_jited_func_lens = 0;
4609 info.nr_func_info = 0;
4610 info.nr_line_info = 0;
4611 info.nr_jited_line_info = 0;
4612 goto done;
4613 }
4614
4615 ulen = info.xlated_prog_len;
4616 info.xlated_prog_len = bpf_prog_insn_size(prog);
4617 if (info.xlated_prog_len && ulen) {
4618 struct bpf_insn *insns_sanitized;
4619 bool fault;
4620
4621 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
4622 info.xlated_prog_insns = 0;
4623 goto done;
4624 }
4625 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
4626 if (!insns_sanitized)
4627 return -ENOMEM;
4628 uinsns = u64_to_user_ptr(info.xlated_prog_insns);
4629 ulen = min_t(u32, info.xlated_prog_len, ulen);
4630 fault = copy_to_user(uinsns, insns_sanitized, ulen);
4631 kfree(insns_sanitized);
4632 if (fault)
4633 return -EFAULT;
4634 }
4635
4636 if (bpf_prog_is_offloaded(prog->aux)) {
4637 err = bpf_prog_offload_info_fill(&info, prog);
4638 if (err)
4639 return err;
4640 goto done;
4641 }
4642
4643 /* NOTE: the following code is supposed to be skipped for offload.
4644 * bpf_prog_offload_info_fill() is the place to fill similar fields
4645 * for offload.
4646 */
4647 ulen = info.jited_prog_len;
4648 if (prog->aux->func_cnt) {
4649 u32 i;
4650
4651 info.jited_prog_len = 0;
4652 for (i = 0; i < prog->aux->func_cnt; i++)
4653 info.jited_prog_len += prog->aux->func[i]->jited_len;
4654 } else {
4655 info.jited_prog_len = prog->jited_len;
4656 }
4657
4658 if (info.jited_prog_len && ulen) {
4659 if (bpf_dump_raw_ok(file->f_cred)) {
4660 uinsns = u64_to_user_ptr(info.jited_prog_insns);
4661 ulen = min_t(u32, info.jited_prog_len, ulen);
4662
4663 /* for multi-function programs, copy the JITed
4664 * instructions for all the functions
4665 */
4666 if (prog->aux->func_cnt) {
4667 u32 len, free, i;
4668 u8 *img;
4669
4670 free = ulen;
4671 for (i = 0; i < prog->aux->func_cnt; i++) {
4672 len = prog->aux->func[i]->jited_len;
4673 len = min_t(u32, len, free);
4674 img = (u8 *) prog->aux->func[i]->bpf_func;
4675 if (copy_to_user(uinsns, img, len))
4676 return -EFAULT;
4677 uinsns += len;
4678 free -= len;
4679 if (!free)
4680 break;
4681 }
4682 } else {
4683 if (copy_to_user(uinsns, prog->bpf_func, ulen))
4684 return -EFAULT;
4685 }
4686 } else {
4687 info.jited_prog_insns = 0;
4688 }
4689 }
4690
4691 ulen = info.nr_jited_ksyms;
4692 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
4693 if (ulen) {
4694 if (bpf_dump_raw_ok(file->f_cred)) {
4695 unsigned long ksym_addr;
4696 u64 __user *user_ksyms;
4697 u32 i;
4698
4699 /* copy the address of the kernel symbol
4700 * corresponding to each function
4701 */
4702 ulen = min_t(u32, info.nr_jited_ksyms, ulen);
4703 user_ksyms = u64_to_user_ptr(info.jited_ksyms);
4704 if (prog->aux->func_cnt) {
4705 for (i = 0; i < ulen; i++) {
4706 ksym_addr = (unsigned long)
4707 prog->aux->func[i]->bpf_func;
4708 if (put_user((u64) ksym_addr,
4709 &user_ksyms[i]))
4710 return -EFAULT;
4711 }
4712 } else {
4713 ksym_addr = (unsigned long) prog->bpf_func;
4714 if (put_user((u64) ksym_addr, &user_ksyms[0]))
4715 return -EFAULT;
4716 }
4717 } else {
4718 info.jited_ksyms = 0;
4719 }
4720 }
4721
4722 ulen = info.nr_jited_func_lens;
4723 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
4724 if (ulen) {
4725 if (bpf_dump_raw_ok(file->f_cred)) {
4726 u32 __user *user_lens;
4727 u32 func_len, i;
4728
4729 /* copy the JITed image lengths for each function */
4730 ulen = min_t(u32, info.nr_jited_func_lens, ulen);
4731 user_lens = u64_to_user_ptr(info.jited_func_lens);
4732 if (prog->aux->func_cnt) {
4733 for (i = 0; i < ulen; i++) {
4734 func_len =
4735 prog->aux->func[i]->jited_len;
4736 if (put_user(func_len, &user_lens[i]))
4737 return -EFAULT;
4738 }
4739 } else {
4740 func_len = prog->jited_len;
4741 if (put_user(func_len, &user_lens[0]))
4742 return -EFAULT;
4743 }
4744 } else {
4745 info.jited_func_lens = 0;
4746 }
4747 }
4748
4749 if (prog->aux->btf)
4750 info.btf_id = btf_obj_id(prog->aux->btf);
4751 info.attach_btf_id = prog->aux->attach_btf_id;
4752 if (attach_btf)
4753 info.attach_btf_obj_id = btf_obj_id(attach_btf);
4754
4755 ulen = info.nr_func_info;
4756 info.nr_func_info = prog->aux->func_info_cnt;
4757 if (info.nr_func_info && ulen) {
4758 char __user *user_finfo;
4759
4760 user_finfo = u64_to_user_ptr(info.func_info);
4761 ulen = min_t(u32, info.nr_func_info, ulen);
4762 if (copy_to_user(user_finfo, prog->aux->func_info,
4763 info.func_info_rec_size * ulen))
4764 return -EFAULT;
4765 }
4766
4767 ulen = info.nr_line_info;
4768 info.nr_line_info = prog->aux->nr_linfo;
4769 if (info.nr_line_info && ulen) {
4770 __u8 __user *user_linfo;
4771
4772 user_linfo = u64_to_user_ptr(info.line_info);
4773 ulen = min_t(u32, info.nr_line_info, ulen);
4774 if (copy_to_user(user_linfo, prog->aux->linfo,
4775 info.line_info_rec_size * ulen))
4776 return -EFAULT;
4777 }
4778
4779 ulen = info.nr_jited_line_info;
4780 if (prog->aux->jited_linfo)
4781 info.nr_jited_line_info = prog->aux->nr_linfo;
4782 else
4783 info.nr_jited_line_info = 0;
4784 if (info.nr_jited_line_info && ulen) {
4785 if (bpf_dump_raw_ok(file->f_cred)) {
4786 unsigned long line_addr;
4787 __u64 __user *user_linfo;
4788 u32 i;
4789
4790 user_linfo = u64_to_user_ptr(info.jited_line_info);
4791 ulen = min_t(u32, info.nr_jited_line_info, ulen);
4792 for (i = 0; i < ulen; i++) {
4793 line_addr = (unsigned long)prog->aux->jited_linfo[i];
4794 if (put_user((__u64)line_addr, &user_linfo[i]))
4795 return -EFAULT;
4796 }
4797 } else {
4798 info.jited_line_info = 0;
4799 }
4800 }
4801
4802 ulen = info.nr_prog_tags;
4803 info.nr_prog_tags = prog->aux->func_cnt ? : 1;
4804 if (ulen) {
4805 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
4806 u32 i;
4807
4808 user_prog_tags = u64_to_user_ptr(info.prog_tags);
4809 ulen = min_t(u32, info.nr_prog_tags, ulen);
4810 if (prog->aux->func_cnt) {
4811 for (i = 0; i < ulen; i++) {
4812 if (copy_to_user(user_prog_tags[i],
4813 prog->aux->func[i]->tag,
4814 BPF_TAG_SIZE))
4815 return -EFAULT;
4816 }
4817 } else {
4818 if (copy_to_user(user_prog_tags[0],
4819 prog->tag, BPF_TAG_SIZE))
4820 return -EFAULT;
4821 }
4822 }
4823
4824done:
4825 if (copy_to_user(uinfo, &info, info_len) ||
4826 put_user(info_len, &uattr->info.info_len))
4827 return -EFAULT;
4828
4829 return 0;
4830}
4831
4832static int bpf_map_get_info_by_fd(struct file *file,
4833 struct bpf_map *map,
4834 const union bpf_attr *attr,
4835 union bpf_attr __user *uattr)
4836{
4837 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4838 struct bpf_map_info info;
4839 u32 info_len = attr->info.info_len;
4840 int err;
4841
4842 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4843 if (err)
4844 return err;
4845 info_len = min_t(u32, sizeof(info), info_len);
4846
4847 memset(&info, 0, sizeof(info));
4848 info.type = map->map_type;
4849 info.id = map->id;
4850 info.key_size = map->key_size;
4851 info.value_size = map->value_size;
4852 info.max_entries = map->max_entries;
4853 info.map_flags = map->map_flags;
4854 info.map_extra = map->map_extra;
4855 memcpy(info.name, map->name, sizeof(map->name));
4856
4857 if (map->btf) {
4858 info.btf_id = btf_obj_id(map->btf);
4859 info.btf_key_type_id = map->btf_key_type_id;
4860 info.btf_value_type_id = map->btf_value_type_id;
4861 }
4862 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
4863 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS)
4864 bpf_map_struct_ops_info_fill(&info, map);
4865
4866 if (bpf_map_is_offloaded(map)) {
4867 err = bpf_map_offload_info_fill(&info, map);
4868 if (err)
4869 return err;
4870 }
4871
4872 if (copy_to_user(uinfo, &info, info_len) ||
4873 put_user(info_len, &uattr->info.info_len))
4874 return -EFAULT;
4875
4876 return 0;
4877}
4878
4879static int bpf_btf_get_info_by_fd(struct file *file,
4880 struct btf *btf,
4881 const union bpf_attr *attr,
4882 union bpf_attr __user *uattr)
4883{
4884 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4885 u32 info_len = attr->info.info_len;
4886 int err;
4887
4888 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len);
4889 if (err)
4890 return err;
4891
4892 return btf_get_info_by_fd(btf, attr, uattr);
4893}
4894
4895static int bpf_link_get_info_by_fd(struct file *file,
4896 struct bpf_link *link,
4897 const union bpf_attr *attr,
4898 union bpf_attr __user *uattr)
4899{
4900 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4901 struct bpf_link_info info;
4902 u32 info_len = attr->info.info_len;
4903 int err;
4904
4905 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4906 if (err)
4907 return err;
4908 info_len = min_t(u32, sizeof(info), info_len);
4909
4910 memset(&info, 0, sizeof(info));
4911 if (copy_from_user(&info, uinfo, info_len))
4912 return -EFAULT;
4913
4914 info.type = link->type;
4915 info.id = link->id;
4916 if (link->prog)
4917 info.prog_id = link->prog->aux->id;
4918
4919 if (link->ops->fill_link_info) {
4920 err = link->ops->fill_link_info(link, &info);
4921 if (err)
4922 return err;
4923 }
4924
4925 if (copy_to_user(uinfo, &info, info_len) ||
4926 put_user(info_len, &uattr->info.info_len))
4927 return -EFAULT;
4928
4929 return 0;
4930}
4931
4932
4933#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
4934
4935static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
4936 union bpf_attr __user *uattr)
4937{
4938 int ufd = attr->info.bpf_fd;
4939 struct fd f;
4940 int err;
4941
4942 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
4943 return -EINVAL;
4944
4945 f = fdget(ufd);
4946 if (!f.file)
4947 return -EBADFD;
4948
4949 if (f.file->f_op == &bpf_prog_fops)
4950 err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
4951 uattr);
4952 else if (f.file->f_op == &bpf_map_fops)
4953 err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
4954 uattr);
4955 else if (f.file->f_op == &btf_fops)
4956 err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
4957 else if (f.file->f_op == &bpf_link_fops)
4958 err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
4959 attr, uattr);
4960 else
4961 err = -EINVAL;
4962
4963 fdput(f);
4964 return err;
4965}
4966
4967#define BPF_BTF_LOAD_LAST_FIELD btf_token_fd
4968
4969static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
4970{
4971 struct bpf_token *token = NULL;
4972
4973 if (CHECK_ATTR(BPF_BTF_LOAD))
4974 return -EINVAL;
4975
4976 if (attr->btf_flags & ~BPF_F_TOKEN_FD)
4977 return -EINVAL;
4978
4979 if (attr->btf_flags & BPF_F_TOKEN_FD) {
4980 token = bpf_token_get_from_fd(attr->btf_token_fd);
4981 if (IS_ERR(token))
4982 return PTR_ERR(token);
4983 if (!bpf_token_allow_cmd(token, BPF_BTF_LOAD)) {
4984 bpf_token_put(token);
4985 token = NULL;
4986 }
4987 }
4988
4989 if (!bpf_token_capable(token, CAP_BPF)) {
4990 bpf_token_put(token);
4991 return -EPERM;
4992 }
4993
4994 bpf_token_put(token);
4995
4996 return btf_new_fd(attr, uattr, uattr_size);
4997}
4998
4999#define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
5000
5001static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
5002{
5003 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
5004 return -EINVAL;
5005
5006 if (!capable(CAP_SYS_ADMIN))
5007 return -EPERM;
5008
5009 return btf_get_fd_by_id(attr->btf_id);
5010}
5011
5012static int bpf_task_fd_query_copy(const union bpf_attr *attr,
5013 union bpf_attr __user *uattr,
5014 u32 prog_id, u32 fd_type,
5015 const char *buf, u64 probe_offset,
5016 u64 probe_addr)
5017{
5018 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
5019 u32 len = buf ? strlen(buf) : 0, input_len;
5020 int err = 0;
5021
5022 if (put_user(len, &uattr->task_fd_query.buf_len))
5023 return -EFAULT;
5024 input_len = attr->task_fd_query.buf_len;
5025 if (input_len && ubuf) {
5026 if (!len) {
5027 /* nothing to copy, just make ubuf NULL terminated */
5028 char zero = '\0';
5029
5030 if (put_user(zero, ubuf))
5031 return -EFAULT;
5032 } else if (input_len >= len + 1) {
5033 /* ubuf can hold the string with NULL terminator */
5034 if (copy_to_user(ubuf, buf, len + 1))
5035 return -EFAULT;
5036 } else {
5037 /* ubuf cannot hold the string with NULL terminator,
5038 * do a partial copy with NULL terminator.
5039 */
5040 char zero = '\0';
5041
5042 err = -ENOSPC;
5043 if (copy_to_user(ubuf, buf, input_len - 1))
5044 return -EFAULT;
5045 if (put_user(zero, ubuf + input_len - 1))
5046 return -EFAULT;
5047 }
5048 }
5049
5050 if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
5051 put_user(fd_type, &uattr->task_fd_query.fd_type) ||
5052 put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
5053 put_user(probe_addr, &uattr->task_fd_query.probe_addr))
5054 return -EFAULT;
5055
5056 return err;
5057}
5058
5059#define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
5060
5061static int bpf_task_fd_query(const union bpf_attr *attr,
5062 union bpf_attr __user *uattr)
5063{
5064 pid_t pid = attr->task_fd_query.pid;
5065 u32 fd = attr->task_fd_query.fd;
5066 const struct perf_event *event;
5067 struct task_struct *task;
5068 struct file *file;
5069 int err;
5070
5071 if (CHECK_ATTR(BPF_TASK_FD_QUERY))
5072 return -EINVAL;
5073
5074 if (!capable(CAP_SYS_ADMIN))
5075 return -EPERM;
5076
5077 if (attr->task_fd_query.flags != 0)
5078 return -EINVAL;
5079
5080 rcu_read_lock();
5081 task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
5082 rcu_read_unlock();
5083 if (!task)
5084 return -ENOENT;
5085
5086 err = 0;
5087 file = fget_task(task, fd);
5088 put_task_struct(task);
5089 if (!file)
5090 return -EBADF;
5091
5092 if (file->f_op == &bpf_link_fops) {
5093 struct bpf_link *link = file->private_data;
5094
5095 if (link->ops == &bpf_raw_tp_link_lops) {
5096 struct bpf_raw_tp_link *raw_tp =
5097 container_of(link, struct bpf_raw_tp_link, link);
5098 struct bpf_raw_event_map *btp = raw_tp->btp;
5099
5100 err = bpf_task_fd_query_copy(attr, uattr,
5101 raw_tp->link.prog->aux->id,
5102 BPF_FD_TYPE_RAW_TRACEPOINT,
5103 btp->tp->name, 0, 0);
5104 goto put_file;
5105 }
5106 goto out_not_supp;
5107 }
5108
5109 event = perf_get_event(file);
5110 if (!IS_ERR(event)) {
5111 u64 probe_offset, probe_addr;
5112 u32 prog_id, fd_type;
5113 const char *buf;
5114
5115 err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
5116 &buf, &probe_offset,
5117 &probe_addr, NULL);
5118 if (!err)
5119 err = bpf_task_fd_query_copy(attr, uattr, prog_id,
5120 fd_type, buf,
5121 probe_offset,
5122 probe_addr);
5123 goto put_file;
5124 }
5125
5126out_not_supp:
5127 err = -ENOTSUPP;
5128put_file:
5129 fput(file);
5130 return err;
5131}
5132
5133#define BPF_MAP_BATCH_LAST_FIELD batch.flags
5134
5135#define BPF_DO_BATCH(fn, ...) \
5136 do { \
5137 if (!fn) { \
5138 err = -ENOTSUPP; \
5139 goto err_put; \
5140 } \
5141 err = fn(__VA_ARGS__); \
5142 } while (0)
5143
5144static int bpf_map_do_batch(const union bpf_attr *attr,
5145 union bpf_attr __user *uattr,
5146 int cmd)
5147{
5148 bool has_read = cmd == BPF_MAP_LOOKUP_BATCH ||
5149 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
5150 bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
5151 struct bpf_map *map;
5152 int err, ufd;
5153 struct fd f;
5154
5155 if (CHECK_ATTR(BPF_MAP_BATCH))
5156 return -EINVAL;
5157
5158 ufd = attr->batch.map_fd;
5159 f = fdget(ufd);
5160 map = __bpf_map_get(f);
5161 if (IS_ERR(map))
5162 return PTR_ERR(map);
5163 if (has_write)
5164 bpf_map_write_active_inc(map);
5165 if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
5166 err = -EPERM;
5167 goto err_put;
5168 }
5169 if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
5170 err = -EPERM;
5171 goto err_put;
5172 }
5173
5174 if (cmd == BPF_MAP_LOOKUP_BATCH)
5175 BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr);
5176 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
5177 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr);
5178 else if (cmd == BPF_MAP_UPDATE_BATCH)
5179 BPF_DO_BATCH(map->ops->map_update_batch, map, f.file, attr, uattr);
5180 else
5181 BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr);
5182err_put:
5183 if (has_write) {
5184 maybe_wait_bpf_programs(map);
5185 bpf_map_write_active_dec(map);
5186 }
5187 fdput(f);
5188 return err;
5189}
5190
5191#define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.pid
5192static int link_create(union bpf_attr *attr, bpfptr_t uattr)
5193{
5194 struct bpf_prog *prog;
5195 int ret;
5196
5197 if (CHECK_ATTR(BPF_LINK_CREATE))
5198 return -EINVAL;
5199
5200 if (attr->link_create.attach_type == BPF_STRUCT_OPS)
5201 return bpf_struct_ops_link_create(attr);
5202
5203 prog = bpf_prog_get(attr->link_create.prog_fd);
5204 if (IS_ERR(prog))
5205 return PTR_ERR(prog);
5206
5207 ret = bpf_prog_attach_check_attach_type(prog,
5208 attr->link_create.attach_type);
5209 if (ret)
5210 goto out;
5211
5212 switch (prog->type) {
5213 case BPF_PROG_TYPE_CGROUP_SKB:
5214 case BPF_PROG_TYPE_CGROUP_SOCK:
5215 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
5216 case BPF_PROG_TYPE_SOCK_OPS:
5217 case BPF_PROG_TYPE_CGROUP_DEVICE:
5218 case BPF_PROG_TYPE_CGROUP_SYSCTL:
5219 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
5220 ret = cgroup_bpf_link_attach(attr, prog);
5221 break;
5222 case BPF_PROG_TYPE_EXT:
5223 ret = bpf_tracing_prog_attach(prog,
5224 attr->link_create.target_fd,
5225 attr->link_create.target_btf_id,
5226 attr->link_create.tracing.cookie);
5227 break;
5228 case BPF_PROG_TYPE_LSM:
5229 case BPF_PROG_TYPE_TRACING:
5230 if (attr->link_create.attach_type != prog->expected_attach_type) {
5231 ret = -EINVAL;
5232 goto out;
5233 }
5234 if (prog->expected_attach_type == BPF_TRACE_RAW_TP)
5235 ret = bpf_raw_tp_link_attach(prog, NULL);
5236 else if (prog->expected_attach_type == BPF_TRACE_ITER)
5237 ret = bpf_iter_link_attach(attr, uattr, prog);
5238 else if (prog->expected_attach_type == BPF_LSM_CGROUP)
5239 ret = cgroup_bpf_link_attach(attr, prog);
5240 else
5241 ret = bpf_tracing_prog_attach(prog,
5242 attr->link_create.target_fd,
5243 attr->link_create.target_btf_id,
5244 attr->link_create.tracing.cookie);
5245 break;
5246 case BPF_PROG_TYPE_FLOW_DISSECTOR:
5247 case BPF_PROG_TYPE_SK_LOOKUP:
5248 ret = netns_bpf_link_create(attr, prog);
5249 break;
5250#ifdef CONFIG_NET
5251 case BPF_PROG_TYPE_XDP:
5252 ret = bpf_xdp_link_attach(attr, prog);
5253 break;
5254 case BPF_PROG_TYPE_SCHED_CLS:
5255 if (attr->link_create.attach_type == BPF_TCX_INGRESS ||
5256 attr->link_create.attach_type == BPF_TCX_EGRESS)
5257 ret = tcx_link_attach(attr, prog);
5258 else
5259 ret = netkit_link_attach(attr, prog);
5260 break;
5261 case BPF_PROG_TYPE_NETFILTER:
5262 ret = bpf_nf_link_attach(attr, prog);
5263 break;
5264#endif
5265 case BPF_PROG_TYPE_PERF_EVENT:
5266 case BPF_PROG_TYPE_TRACEPOINT:
5267 ret = bpf_perf_link_attach(attr, prog);
5268 break;
5269 case BPF_PROG_TYPE_KPROBE:
5270 if (attr->link_create.attach_type == BPF_PERF_EVENT)
5271 ret = bpf_perf_link_attach(attr, prog);
5272 else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI)
5273 ret = bpf_kprobe_multi_link_attach(attr, prog);
5274 else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI)
5275 ret = bpf_uprobe_multi_link_attach(attr, prog);
5276 break;
5277 default:
5278 ret = -EINVAL;
5279 }
5280
5281out:
5282 if (ret < 0)
5283 bpf_prog_put(prog);
5284 return ret;
5285}
5286
5287static int link_update_map(struct bpf_link *link, union bpf_attr *attr)
5288{
5289 struct bpf_map *new_map, *old_map = NULL;
5290 int ret;
5291
5292 new_map = bpf_map_get(attr->link_update.new_map_fd);
5293 if (IS_ERR(new_map))
5294 return PTR_ERR(new_map);
5295
5296 if (attr->link_update.flags & BPF_F_REPLACE) {
5297 old_map = bpf_map_get(attr->link_update.old_map_fd);
5298 if (IS_ERR(old_map)) {
5299 ret = PTR_ERR(old_map);
5300 goto out_put;
5301 }
5302 } else if (attr->link_update.old_map_fd) {
5303 ret = -EINVAL;
5304 goto out_put;
5305 }
5306
5307 ret = link->ops->update_map(link, new_map, old_map);
5308
5309 if (old_map)
5310 bpf_map_put(old_map);
5311out_put:
5312 bpf_map_put(new_map);
5313 return ret;
5314}
5315
5316#define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
5317
5318static int link_update(union bpf_attr *attr)
5319{
5320 struct bpf_prog *old_prog = NULL, *new_prog;
5321 struct bpf_link *link;
5322 u32 flags;
5323 int ret;
5324
5325 if (CHECK_ATTR(BPF_LINK_UPDATE))
5326 return -EINVAL;
5327
5328 flags = attr->link_update.flags;
5329 if (flags & ~BPF_F_REPLACE)
5330 return -EINVAL;
5331
5332 link = bpf_link_get_from_fd(attr->link_update.link_fd);
5333 if (IS_ERR(link))
5334 return PTR_ERR(link);
5335
5336 if (link->ops->update_map) {
5337 ret = link_update_map(link, attr);
5338 goto out_put_link;
5339 }
5340
5341 new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
5342 if (IS_ERR(new_prog)) {
5343 ret = PTR_ERR(new_prog);
5344 goto out_put_link;
5345 }
5346
5347 if (flags & BPF_F_REPLACE) {
5348 old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
5349 if (IS_ERR(old_prog)) {
5350 ret = PTR_ERR(old_prog);
5351 old_prog = NULL;
5352 goto out_put_progs;
5353 }
5354 } else if (attr->link_update.old_prog_fd) {
5355 ret = -EINVAL;
5356 goto out_put_progs;
5357 }
5358
5359 if (link->ops->update_prog)
5360 ret = link->ops->update_prog(link, new_prog, old_prog);
5361 else
5362 ret = -EINVAL;
5363
5364out_put_progs:
5365 if (old_prog)
5366 bpf_prog_put(old_prog);
5367 if (ret)
5368 bpf_prog_put(new_prog);
5369out_put_link:
5370 bpf_link_put_direct(link);
5371 return ret;
5372}
5373
5374#define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
5375
5376static int link_detach(union bpf_attr *attr)
5377{
5378 struct bpf_link *link;
5379 int ret;
5380
5381 if (CHECK_ATTR(BPF_LINK_DETACH))
5382 return -EINVAL;
5383
5384 link = bpf_link_get_from_fd(attr->link_detach.link_fd);
5385 if (IS_ERR(link))
5386 return PTR_ERR(link);
5387
5388 if (link->ops->detach)
5389 ret = link->ops->detach(link);
5390 else
5391 ret = -EOPNOTSUPP;
5392
5393 bpf_link_put_direct(link);
5394 return ret;
5395}
5396
5397static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
5398{
5399 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
5400}
5401
5402struct bpf_link *bpf_link_by_id(u32 id)
5403{
5404 struct bpf_link *link;
5405
5406 if (!id)
5407 return ERR_PTR(-ENOENT);
5408
5409 spin_lock_bh(&link_idr_lock);
5410 /* before link is "settled", ID is 0, pretend it doesn't exist yet */
5411 link = idr_find(&link_idr, id);
5412 if (link) {
5413 if (link->id)
5414 link = bpf_link_inc_not_zero(link);
5415 else
5416 link = ERR_PTR(-EAGAIN);
5417 } else {
5418 link = ERR_PTR(-ENOENT);
5419 }
5420 spin_unlock_bh(&link_idr_lock);
5421 return link;
5422}
5423
5424struct bpf_link *bpf_link_get_curr_or_next(u32 *id)
5425{
5426 struct bpf_link *link;
5427
5428 spin_lock_bh(&link_idr_lock);
5429again:
5430 link = idr_get_next(&link_idr, id);
5431 if (link) {
5432 link = bpf_link_inc_not_zero(link);
5433 if (IS_ERR(link)) {
5434 (*id)++;
5435 goto again;
5436 }
5437 }
5438 spin_unlock_bh(&link_idr_lock);
5439
5440 return link;
5441}
5442
5443#define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
5444
5445static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
5446{
5447 struct bpf_link *link;
5448 u32 id = attr->link_id;
5449 int fd;
5450
5451 if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
5452 return -EINVAL;
5453
5454 if (!capable(CAP_SYS_ADMIN))
5455 return -EPERM;
5456
5457 link = bpf_link_by_id(id);
5458 if (IS_ERR(link))
5459 return PTR_ERR(link);
5460
5461 fd = bpf_link_new_fd(link);
5462 if (fd < 0)
5463 bpf_link_put_direct(link);
5464
5465 return fd;
5466}
5467
5468DEFINE_MUTEX(bpf_stats_enabled_mutex);
5469
5470static int bpf_stats_release(struct inode *inode, struct file *file)
5471{
5472 mutex_lock(&bpf_stats_enabled_mutex);
5473 static_key_slow_dec(&bpf_stats_enabled_key.key);
5474 mutex_unlock(&bpf_stats_enabled_mutex);
5475 return 0;
5476}
5477
5478static const struct file_operations bpf_stats_fops = {
5479 .release = bpf_stats_release,
5480};
5481
5482static int bpf_enable_runtime_stats(void)
5483{
5484 int fd;
5485
5486 mutex_lock(&bpf_stats_enabled_mutex);
5487
5488 /* Set a very high limit to avoid overflow */
5489 if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
5490 mutex_unlock(&bpf_stats_enabled_mutex);
5491 return -EBUSY;
5492 }
5493
5494 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
5495 if (fd >= 0)
5496 static_key_slow_inc(&bpf_stats_enabled_key.key);
5497
5498 mutex_unlock(&bpf_stats_enabled_mutex);
5499 return fd;
5500}
5501
5502#define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
5503
5504static int bpf_enable_stats(union bpf_attr *attr)
5505{
5506
5507 if (CHECK_ATTR(BPF_ENABLE_STATS))
5508 return -EINVAL;
5509
5510 if (!capable(CAP_SYS_ADMIN))
5511 return -EPERM;
5512
5513 switch (attr->enable_stats.type) {
5514 case BPF_STATS_RUN_TIME:
5515 return bpf_enable_runtime_stats();
5516 default:
5517 break;
5518 }
5519 return -EINVAL;
5520}
5521
5522#define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
5523
5524static int bpf_iter_create(union bpf_attr *attr)
5525{
5526 struct bpf_link *link;
5527 int err;
5528
5529 if (CHECK_ATTR(BPF_ITER_CREATE))
5530 return -EINVAL;
5531
5532 if (attr->iter_create.flags)
5533 return -EINVAL;
5534
5535 link = bpf_link_get_from_fd(attr->iter_create.link_fd);
5536 if (IS_ERR(link))
5537 return PTR_ERR(link);
5538
5539 err = bpf_iter_new_fd(link);
5540 bpf_link_put_direct(link);
5541
5542 return err;
5543}
5544
5545#define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
5546
5547static int bpf_prog_bind_map(union bpf_attr *attr)
5548{
5549 struct bpf_prog *prog;
5550 struct bpf_map *map;
5551 struct bpf_map **used_maps_old, **used_maps_new;
5552 int i, ret = 0;
5553
5554 if (CHECK_ATTR(BPF_PROG_BIND_MAP))
5555 return -EINVAL;
5556
5557 if (attr->prog_bind_map.flags)
5558 return -EINVAL;
5559
5560 prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
5561 if (IS_ERR(prog))
5562 return PTR_ERR(prog);
5563
5564 map = bpf_map_get(attr->prog_bind_map.map_fd);
5565 if (IS_ERR(map)) {
5566 ret = PTR_ERR(map);
5567 goto out_prog_put;
5568 }
5569
5570 mutex_lock(&prog->aux->used_maps_mutex);
5571
5572 used_maps_old = prog->aux->used_maps;
5573
5574 for (i = 0; i < prog->aux->used_map_cnt; i++)
5575 if (used_maps_old[i] == map) {
5576 bpf_map_put(map);
5577 goto out_unlock;
5578 }
5579
5580 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
5581 sizeof(used_maps_new[0]),
5582 GFP_KERNEL);
5583 if (!used_maps_new) {
5584 ret = -ENOMEM;
5585 goto out_unlock;
5586 }
5587
5588 /* The bpf program will not access the bpf map, but for the sake of
5589 * simplicity, increase sleepable_refcnt for sleepable program as well.
5590 */
5591 if (prog->sleepable)
5592 atomic64_inc(&map->sleepable_refcnt);
5593 memcpy(used_maps_new, used_maps_old,
5594 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
5595 used_maps_new[prog->aux->used_map_cnt] = map;
5596
5597 prog->aux->used_map_cnt++;
5598 prog->aux->used_maps = used_maps_new;
5599
5600 kfree(used_maps_old);
5601
5602out_unlock:
5603 mutex_unlock(&prog->aux->used_maps_mutex);
5604
5605 if (ret)
5606 bpf_map_put(map);
5607out_prog_put:
5608 bpf_prog_put(prog);
5609 return ret;
5610}
5611
5612#define BPF_TOKEN_CREATE_LAST_FIELD token_create.bpffs_fd
5613
5614static int token_create(union bpf_attr *attr)
5615{
5616 if (CHECK_ATTR(BPF_TOKEN_CREATE))
5617 return -EINVAL;
5618
5619 /* no flags are supported yet */
5620 if (attr->token_create.flags)
5621 return -EINVAL;
5622
5623 return bpf_token_create(attr);
5624}
5625
5626static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
5627{
5628 union bpf_attr attr;
5629 int err;
5630
5631 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
5632 if (err)
5633 return err;
5634 size = min_t(u32, size, sizeof(attr));
5635
5636 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
5637 memset(&attr, 0, sizeof(attr));
5638 if (copy_from_bpfptr(&attr, uattr, size) != 0)
5639 return -EFAULT;
5640
5641 err = security_bpf(cmd, &attr, size);
5642 if (err < 0)
5643 return err;
5644
5645 switch (cmd) {
5646 case BPF_MAP_CREATE:
5647 err = map_create(&attr);
5648 break;
5649 case BPF_MAP_LOOKUP_ELEM:
5650 err = map_lookup_elem(&attr);
5651 break;
5652 case BPF_MAP_UPDATE_ELEM:
5653 err = map_update_elem(&attr, uattr);
5654 break;
5655 case BPF_MAP_DELETE_ELEM:
5656 err = map_delete_elem(&attr, uattr);
5657 break;
5658 case BPF_MAP_GET_NEXT_KEY:
5659 err = map_get_next_key(&attr);
5660 break;
5661 case BPF_MAP_FREEZE:
5662 err = map_freeze(&attr);
5663 break;
5664 case BPF_PROG_LOAD:
5665 err = bpf_prog_load(&attr, uattr, size);
5666 break;
5667 case BPF_OBJ_PIN:
5668 err = bpf_obj_pin(&attr);
5669 break;
5670 case BPF_OBJ_GET:
5671 err = bpf_obj_get(&attr);
5672 break;
5673 case BPF_PROG_ATTACH:
5674 err = bpf_prog_attach(&attr);
5675 break;
5676 case BPF_PROG_DETACH:
5677 err = bpf_prog_detach(&attr);
5678 break;
5679 case BPF_PROG_QUERY:
5680 err = bpf_prog_query(&attr, uattr.user);
5681 break;
5682 case BPF_PROG_TEST_RUN:
5683 err = bpf_prog_test_run(&attr, uattr.user);
5684 break;
5685 case BPF_PROG_GET_NEXT_ID:
5686 err = bpf_obj_get_next_id(&attr, uattr.user,
5687 &prog_idr, &prog_idr_lock);
5688 break;
5689 case BPF_MAP_GET_NEXT_ID:
5690 err = bpf_obj_get_next_id(&attr, uattr.user,
5691 &map_idr, &map_idr_lock);
5692 break;
5693 case BPF_BTF_GET_NEXT_ID:
5694 err = bpf_obj_get_next_id(&attr, uattr.user,
5695 &btf_idr, &btf_idr_lock);
5696 break;
5697 case BPF_PROG_GET_FD_BY_ID:
5698 err = bpf_prog_get_fd_by_id(&attr);
5699 break;
5700 case BPF_MAP_GET_FD_BY_ID:
5701 err = bpf_map_get_fd_by_id(&attr);
5702 break;
5703 case BPF_OBJ_GET_INFO_BY_FD:
5704 err = bpf_obj_get_info_by_fd(&attr, uattr.user);
5705 break;
5706 case BPF_RAW_TRACEPOINT_OPEN:
5707 err = bpf_raw_tracepoint_open(&attr);
5708 break;
5709 case BPF_BTF_LOAD:
5710 err = bpf_btf_load(&attr, uattr, size);
5711 break;
5712 case BPF_BTF_GET_FD_BY_ID:
5713 err = bpf_btf_get_fd_by_id(&attr);
5714 break;
5715 case BPF_TASK_FD_QUERY:
5716 err = bpf_task_fd_query(&attr, uattr.user);
5717 break;
5718 case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
5719 err = map_lookup_and_delete_elem(&attr);
5720 break;
5721 case BPF_MAP_LOOKUP_BATCH:
5722 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH);
5723 break;
5724 case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
5725 err = bpf_map_do_batch(&attr, uattr.user,
5726 BPF_MAP_LOOKUP_AND_DELETE_BATCH);
5727 break;
5728 case BPF_MAP_UPDATE_BATCH:
5729 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH);
5730 break;
5731 case BPF_MAP_DELETE_BATCH:
5732 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
5733 break;
5734 case BPF_LINK_CREATE:
5735 err = link_create(&attr, uattr);
5736 break;
5737 case BPF_LINK_UPDATE:
5738 err = link_update(&attr);
5739 break;
5740 case BPF_LINK_GET_FD_BY_ID:
5741 err = bpf_link_get_fd_by_id(&attr);
5742 break;
5743 case BPF_LINK_GET_NEXT_ID:
5744 err = bpf_obj_get_next_id(&attr, uattr.user,
5745 &link_idr, &link_idr_lock);
5746 break;
5747 case BPF_ENABLE_STATS:
5748 err = bpf_enable_stats(&attr);
5749 break;
5750 case BPF_ITER_CREATE:
5751 err = bpf_iter_create(&attr);
5752 break;
5753 case BPF_LINK_DETACH:
5754 err = link_detach(&attr);
5755 break;
5756 case BPF_PROG_BIND_MAP:
5757 err = bpf_prog_bind_map(&attr);
5758 break;
5759 case BPF_TOKEN_CREATE:
5760 err = token_create(&attr);
5761 break;
5762 default:
5763 err = -EINVAL;
5764 break;
5765 }
5766
5767 return err;
5768}
5769
5770SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
5771{
5772 return __sys_bpf(cmd, USER_BPFPTR(uattr), size);
5773}
5774
5775static bool syscall_prog_is_valid_access(int off, int size,
5776 enum bpf_access_type type,
5777 const struct bpf_prog *prog,
5778 struct bpf_insn_access_aux *info)
5779{
5780 if (off < 0 || off >= U16_MAX)
5781 return false;
5782 if (off % size != 0)
5783 return false;
5784 return true;
5785}
5786
5787BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size)
5788{
5789 switch (cmd) {
5790 case BPF_MAP_CREATE:
5791 case BPF_MAP_DELETE_ELEM:
5792 case BPF_MAP_UPDATE_ELEM:
5793 case BPF_MAP_FREEZE:
5794 case BPF_MAP_GET_FD_BY_ID:
5795 case BPF_PROG_LOAD:
5796 case BPF_BTF_LOAD:
5797 case BPF_LINK_CREATE:
5798 case BPF_RAW_TRACEPOINT_OPEN:
5799 break;
5800 default:
5801 return -EINVAL;
5802 }
5803 return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
5804}
5805
5806
5807/* To shut up -Wmissing-prototypes.
5808 * This function is used by the kernel light skeleton
5809 * to load bpf programs when modules are loaded or during kernel boot.
5810 * See tools/lib/bpf/skel_internal.h
5811 */
5812int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
5813
5814int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
5815{
5816 struct bpf_prog * __maybe_unused prog;
5817 struct bpf_tramp_run_ctx __maybe_unused run_ctx;
5818
5819 switch (cmd) {
5820#ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */
5821 case BPF_PROG_TEST_RUN:
5822 if (attr->test.data_in || attr->test.data_out ||
5823 attr->test.ctx_out || attr->test.duration ||
5824 attr->test.repeat || attr->test.flags)
5825 return -EINVAL;
5826
5827 prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL);
5828 if (IS_ERR(prog))
5829 return PTR_ERR(prog);
5830
5831 if (attr->test.ctx_size_in < prog->aux->max_ctx_offset ||
5832 attr->test.ctx_size_in > U16_MAX) {
5833 bpf_prog_put(prog);
5834 return -EINVAL;
5835 }
5836
5837 run_ctx.bpf_cookie = 0;
5838 if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
5839 /* recursion detected */
5840 __bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx);
5841 bpf_prog_put(prog);
5842 return -EBUSY;
5843 }
5844 attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in);
5845 __bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */,
5846 &run_ctx);
5847 bpf_prog_put(prog);
5848 return 0;
5849#endif
5850 default:
5851 return ____bpf_sys_bpf(cmd, attr, size);
5852 }
5853}
5854EXPORT_SYMBOL(kern_sys_bpf);
5855
5856static const struct bpf_func_proto bpf_sys_bpf_proto = {
5857 .func = bpf_sys_bpf,
5858 .gpl_only = false,
5859 .ret_type = RET_INTEGER,
5860 .arg1_type = ARG_ANYTHING,
5861 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
5862 .arg3_type = ARG_CONST_SIZE,
5863};
5864
5865const struct bpf_func_proto * __weak
5866tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5867{
5868 return bpf_base_func_proto(func_id, prog);
5869}
5870
5871BPF_CALL_1(bpf_sys_close, u32, fd)
5872{
5873 /* When bpf program calls this helper there should not be
5874 * an fdget() without matching completed fdput().
5875 * This helper is allowed in the following callchain only:
5876 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close
5877 */
5878 return close_fd(fd);
5879}
5880
5881static const struct bpf_func_proto bpf_sys_close_proto = {
5882 .func = bpf_sys_close,
5883 .gpl_only = false,
5884 .ret_type = RET_INTEGER,
5885 .arg1_type = ARG_ANYTHING,
5886};
5887
5888BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res)
5889{
5890 if (flags)
5891 return -EINVAL;
5892
5893 if (name_sz <= 1 || name[name_sz - 1])
5894 return -EINVAL;
5895
5896 if (!bpf_dump_raw_ok(current_cred()))
5897 return -EPERM;
5898
5899 *res = kallsyms_lookup_name(name);
5900 return *res ? 0 : -ENOENT;
5901}
5902
5903static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
5904 .func = bpf_kallsyms_lookup_name,
5905 .gpl_only = false,
5906 .ret_type = RET_INTEGER,
5907 .arg1_type = ARG_PTR_TO_MEM,
5908 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
5909 .arg3_type = ARG_ANYTHING,
5910 .arg4_type = ARG_PTR_TO_LONG,
5911};
5912
5913static const struct bpf_func_proto *
5914syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5915{
5916 switch (func_id) {
5917 case BPF_FUNC_sys_bpf:
5918 return !bpf_token_capable(prog->aux->token, CAP_PERFMON)
5919 ? NULL : &bpf_sys_bpf_proto;
5920 case BPF_FUNC_btf_find_by_name_kind:
5921 return &bpf_btf_find_by_name_kind_proto;
5922 case BPF_FUNC_sys_close:
5923 return &bpf_sys_close_proto;
5924 case BPF_FUNC_kallsyms_lookup_name:
5925 return &bpf_kallsyms_lookup_name_proto;
5926 default:
5927 return tracing_prog_func_proto(func_id, prog);
5928 }
5929}
5930
5931const struct bpf_verifier_ops bpf_syscall_verifier_ops = {
5932 .get_func_proto = syscall_prog_func_proto,
5933 .is_valid_access = syscall_prog_is_valid_access,
5934};
5935
5936const struct bpf_prog_ops bpf_syscall_prog_ops = {
5937 .test_run = bpf_prog_test_run_syscall,
5938};
5939
5940#ifdef CONFIG_SYSCTL
5941static int bpf_stats_handler(struct ctl_table *table, int write,
5942 void *buffer, size_t *lenp, loff_t *ppos)
5943{
5944 struct static_key *key = (struct static_key *)table->data;
5945 static int saved_val;
5946 int val, ret;
5947 struct ctl_table tmp = {
5948 .data = &val,
5949 .maxlen = sizeof(val),
5950 .mode = table->mode,
5951 .extra1 = SYSCTL_ZERO,
5952 .extra2 = SYSCTL_ONE,
5953 };
5954
5955 if (write && !capable(CAP_SYS_ADMIN))
5956 return -EPERM;
5957
5958 mutex_lock(&bpf_stats_enabled_mutex);
5959 val = saved_val;
5960 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
5961 if (write && !ret && val != saved_val) {
5962 if (val)
5963 static_key_slow_inc(key);
5964 else
5965 static_key_slow_dec(key);
5966 saved_val = val;
5967 }
5968 mutex_unlock(&bpf_stats_enabled_mutex);
5969 return ret;
5970}
5971
5972void __weak unpriv_ebpf_notify(int new_state)
5973{
5974}
5975
5976static int bpf_unpriv_handler(struct ctl_table *table, int write,
5977 void *buffer, size_t *lenp, loff_t *ppos)
5978{
5979 int ret, unpriv_enable = *(int *)table->data;
5980 bool locked_state = unpriv_enable == 1;
5981 struct ctl_table tmp = *table;
5982
5983 if (write && !capable(CAP_SYS_ADMIN))
5984 return -EPERM;
5985
5986 tmp.data = &unpriv_enable;
5987 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
5988 if (write && !ret) {
5989 if (locked_state && unpriv_enable != 1)
5990 return -EPERM;
5991 *(int *)table->data = unpriv_enable;
5992 }
5993
5994 if (write)
5995 unpriv_ebpf_notify(unpriv_enable);
5996
5997 return ret;
5998}
5999
6000static struct ctl_table bpf_syscall_table[] = {
6001 {
6002 .procname = "unprivileged_bpf_disabled",
6003 .data = &sysctl_unprivileged_bpf_disabled,
6004 .maxlen = sizeof(sysctl_unprivileged_bpf_disabled),
6005 .mode = 0644,
6006 .proc_handler = bpf_unpriv_handler,
6007 .extra1 = SYSCTL_ZERO,
6008 .extra2 = SYSCTL_TWO,
6009 },
6010 {
6011 .procname = "bpf_stats_enabled",
6012 .data = &bpf_stats_enabled_key.key,
6013 .mode = 0644,
6014 .proc_handler = bpf_stats_handler,
6015 },
6016 { }
6017};
6018
6019static int __init bpf_syscall_sysctl_init(void)
6020{
6021 register_sysctl_init("kernel", bpf_syscall_table);
6022 return 0;
6023}
6024late_initcall(bpf_syscall_sysctl_init);
6025#endif /* CONFIG_SYSCTL */