Loading...
1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016,2017 Facebook
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/bpf.h>
14#include <linux/err.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17#include <linux/filter.h>
18#include <linux/perf_event.h>
19
20#include "map_in_map.h"
21
22#define ARRAY_CREATE_FLAG_MASK \
23 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
24
25static void bpf_array_free_percpu(struct bpf_array *array)
26{
27 int i;
28
29 for (i = 0; i < array->map.max_entries; i++) {
30 free_percpu(array->pptrs[i]);
31 cond_resched();
32 }
33}
34
35static int bpf_array_alloc_percpu(struct bpf_array *array)
36{
37 void __percpu *ptr;
38 int i;
39
40 for (i = 0; i < array->map.max_entries; i++) {
41 ptr = __alloc_percpu_gfp(array->elem_size, 8,
42 GFP_USER | __GFP_NOWARN);
43 if (!ptr) {
44 bpf_array_free_percpu(array);
45 return -ENOMEM;
46 }
47 array->pptrs[i] = ptr;
48 cond_resched();
49 }
50
51 return 0;
52}
53
54/* Called from syscall */
55static int array_map_alloc_check(union bpf_attr *attr)
56{
57 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
58 int numa_node = bpf_map_attr_numa_node(attr);
59
60 /* check sanity of attributes */
61 if (attr->max_entries == 0 || attr->key_size != 4 ||
62 attr->value_size == 0 ||
63 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
64 (percpu && numa_node != NUMA_NO_NODE))
65 return -EINVAL;
66
67 if (attr->value_size > KMALLOC_MAX_SIZE)
68 /* if value_size is bigger, the user space won't be able to
69 * access the elements.
70 */
71 return -E2BIG;
72
73 return 0;
74}
75
76static struct bpf_map *array_map_alloc(union bpf_attr *attr)
77{
78 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
79 int ret, numa_node = bpf_map_attr_numa_node(attr);
80 u32 elem_size, index_mask, max_entries;
81 bool unpriv = !capable(CAP_SYS_ADMIN);
82 u64 cost, array_size, mask64;
83 struct bpf_array *array;
84
85 elem_size = round_up(attr->value_size, 8);
86
87 max_entries = attr->max_entries;
88
89 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
90 * upper most bit set in u32 space is undefined behavior due to
91 * resulting 1U << 32, so do it manually here in u64 space.
92 */
93 mask64 = fls_long(max_entries - 1);
94 mask64 = 1ULL << mask64;
95 mask64 -= 1;
96
97 index_mask = mask64;
98 if (unpriv) {
99 /* round up array size to nearest power of 2,
100 * since cpu will speculate within index_mask limits
101 */
102 max_entries = index_mask + 1;
103 /* Check for overflows. */
104 if (max_entries < attr->max_entries)
105 return ERR_PTR(-E2BIG);
106 }
107
108 array_size = sizeof(*array);
109 if (percpu)
110 array_size += (u64) max_entries * sizeof(void *);
111 else
112 array_size += (u64) max_entries * elem_size;
113
114 /* make sure there is no u32 overflow later in round_up() */
115 cost = array_size;
116 if (cost >= U32_MAX - PAGE_SIZE)
117 return ERR_PTR(-ENOMEM);
118 if (percpu) {
119 cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
120 if (cost >= U32_MAX - PAGE_SIZE)
121 return ERR_PTR(-ENOMEM);
122 }
123 cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
124
125 ret = bpf_map_precharge_memlock(cost);
126 if (ret < 0)
127 return ERR_PTR(ret);
128
129 /* allocate all map elements and zero-initialize them */
130 array = bpf_map_area_alloc(array_size, numa_node);
131 if (!array)
132 return ERR_PTR(-ENOMEM);
133 array->index_mask = index_mask;
134 array->map.unpriv_array = unpriv;
135
136 /* copy mandatory map attributes */
137 bpf_map_init_from_attr(&array->map, attr);
138 array->map.pages = cost;
139 array->elem_size = elem_size;
140
141 if (percpu && bpf_array_alloc_percpu(array)) {
142 bpf_map_area_free(array);
143 return ERR_PTR(-ENOMEM);
144 }
145
146 return &array->map;
147}
148
149/* Called from syscall or from eBPF program */
150static void *array_map_lookup_elem(struct bpf_map *map, void *key)
151{
152 struct bpf_array *array = container_of(map, struct bpf_array, map);
153 u32 index = *(u32 *)key;
154
155 if (unlikely(index >= array->map.max_entries))
156 return NULL;
157
158 return array->value + array->elem_size * (index & array->index_mask);
159}
160
161/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
162static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
163{
164 struct bpf_array *array = container_of(map, struct bpf_array, map);
165 struct bpf_insn *insn = insn_buf;
166 u32 elem_size = round_up(map->value_size, 8);
167 const int ret = BPF_REG_0;
168 const int map_ptr = BPF_REG_1;
169 const int index = BPF_REG_2;
170
171 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
172 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
173 if (map->unpriv_array) {
174 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
175 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
176 } else {
177 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
178 }
179
180 if (is_power_of_2(elem_size)) {
181 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
182 } else {
183 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
184 }
185 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
186 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
187 *insn++ = BPF_MOV64_IMM(ret, 0);
188 return insn - insn_buf;
189}
190
191/* Called from eBPF program */
192static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
193{
194 struct bpf_array *array = container_of(map, struct bpf_array, map);
195 u32 index = *(u32 *)key;
196
197 if (unlikely(index >= array->map.max_entries))
198 return NULL;
199
200 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
201}
202
203int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
204{
205 struct bpf_array *array = container_of(map, struct bpf_array, map);
206 u32 index = *(u32 *)key;
207 void __percpu *pptr;
208 int cpu, off = 0;
209 u32 size;
210
211 if (unlikely(index >= array->map.max_entries))
212 return -ENOENT;
213
214 /* per_cpu areas are zero-filled and bpf programs can only
215 * access 'value_size' of them, so copying rounded areas
216 * will not leak any kernel data
217 */
218 size = round_up(map->value_size, 8);
219 rcu_read_lock();
220 pptr = array->pptrs[index & array->index_mask];
221 for_each_possible_cpu(cpu) {
222 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
223 off += size;
224 }
225 rcu_read_unlock();
226 return 0;
227}
228
229/* Called from syscall */
230static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
231{
232 struct bpf_array *array = container_of(map, struct bpf_array, map);
233 u32 index = key ? *(u32 *)key : U32_MAX;
234 u32 *next = (u32 *)next_key;
235
236 if (index >= array->map.max_entries) {
237 *next = 0;
238 return 0;
239 }
240
241 if (index == array->map.max_entries - 1)
242 return -ENOENT;
243
244 *next = index + 1;
245 return 0;
246}
247
248/* Called from syscall or from eBPF program */
249static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
250 u64 map_flags)
251{
252 struct bpf_array *array = container_of(map, struct bpf_array, map);
253 u32 index = *(u32 *)key;
254
255 if (unlikely(map_flags > BPF_EXIST))
256 /* unknown flags */
257 return -EINVAL;
258
259 if (unlikely(index >= array->map.max_entries))
260 /* all elements were pre-allocated, cannot insert a new one */
261 return -E2BIG;
262
263 if (unlikely(map_flags == BPF_NOEXIST))
264 /* all elements already exist */
265 return -EEXIST;
266
267 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
268 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
269 value, map->value_size);
270 else
271 memcpy(array->value +
272 array->elem_size * (index & array->index_mask),
273 value, map->value_size);
274 return 0;
275}
276
277int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
278 u64 map_flags)
279{
280 struct bpf_array *array = container_of(map, struct bpf_array, map);
281 u32 index = *(u32 *)key;
282 void __percpu *pptr;
283 int cpu, off = 0;
284 u32 size;
285
286 if (unlikely(map_flags > BPF_EXIST))
287 /* unknown flags */
288 return -EINVAL;
289
290 if (unlikely(index >= array->map.max_entries))
291 /* all elements were pre-allocated, cannot insert a new one */
292 return -E2BIG;
293
294 if (unlikely(map_flags == BPF_NOEXIST))
295 /* all elements already exist */
296 return -EEXIST;
297
298 /* the user space will provide round_up(value_size, 8) bytes that
299 * will be copied into per-cpu area. bpf programs can only access
300 * value_size of it. During lookup the same extra bytes will be
301 * returned or zeros which were zero-filled by percpu_alloc,
302 * so no kernel data leaks possible
303 */
304 size = round_up(map->value_size, 8);
305 rcu_read_lock();
306 pptr = array->pptrs[index & array->index_mask];
307 for_each_possible_cpu(cpu) {
308 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
309 off += size;
310 }
311 rcu_read_unlock();
312 return 0;
313}
314
315/* Called from syscall or from eBPF program */
316static int array_map_delete_elem(struct bpf_map *map, void *key)
317{
318 return -EINVAL;
319}
320
321/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
322static void array_map_free(struct bpf_map *map)
323{
324 struct bpf_array *array = container_of(map, struct bpf_array, map);
325
326 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
327 * so the programs (can be more than one that used this map) were
328 * disconnected from events. Wait for outstanding programs to complete
329 * and free the array
330 */
331 synchronize_rcu();
332
333 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
334 bpf_array_free_percpu(array);
335
336 bpf_map_area_free(array);
337}
338
339const struct bpf_map_ops array_map_ops = {
340 .map_alloc_check = array_map_alloc_check,
341 .map_alloc = array_map_alloc,
342 .map_free = array_map_free,
343 .map_get_next_key = array_map_get_next_key,
344 .map_lookup_elem = array_map_lookup_elem,
345 .map_update_elem = array_map_update_elem,
346 .map_delete_elem = array_map_delete_elem,
347 .map_gen_lookup = array_map_gen_lookup,
348};
349
350const struct bpf_map_ops percpu_array_map_ops = {
351 .map_alloc_check = array_map_alloc_check,
352 .map_alloc = array_map_alloc,
353 .map_free = array_map_free,
354 .map_get_next_key = array_map_get_next_key,
355 .map_lookup_elem = percpu_array_map_lookup_elem,
356 .map_update_elem = array_map_update_elem,
357 .map_delete_elem = array_map_delete_elem,
358};
359
360static int fd_array_map_alloc_check(union bpf_attr *attr)
361{
362 /* only file descriptors can be stored in this type of map */
363 if (attr->value_size != sizeof(u32))
364 return -EINVAL;
365 return array_map_alloc_check(attr);
366}
367
368static void fd_array_map_free(struct bpf_map *map)
369{
370 struct bpf_array *array = container_of(map, struct bpf_array, map);
371 int i;
372
373 synchronize_rcu();
374
375 /* make sure it's empty */
376 for (i = 0; i < array->map.max_entries; i++)
377 BUG_ON(array->ptrs[i] != NULL);
378
379 bpf_map_area_free(array);
380}
381
382static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
383{
384 return NULL;
385}
386
387/* only called from syscall */
388int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
389{
390 void **elem, *ptr;
391 int ret = 0;
392
393 if (!map->ops->map_fd_sys_lookup_elem)
394 return -ENOTSUPP;
395
396 rcu_read_lock();
397 elem = array_map_lookup_elem(map, key);
398 if (elem && (ptr = READ_ONCE(*elem)))
399 *value = map->ops->map_fd_sys_lookup_elem(ptr);
400 else
401 ret = -ENOENT;
402 rcu_read_unlock();
403
404 return ret;
405}
406
407/* only called from syscall */
408int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
409 void *key, void *value, u64 map_flags)
410{
411 struct bpf_array *array = container_of(map, struct bpf_array, map);
412 void *new_ptr, *old_ptr;
413 u32 index = *(u32 *)key, ufd;
414
415 if (map_flags != BPF_ANY)
416 return -EINVAL;
417
418 if (index >= array->map.max_entries)
419 return -E2BIG;
420
421 ufd = *(u32 *)value;
422 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
423 if (IS_ERR(new_ptr))
424 return PTR_ERR(new_ptr);
425
426 old_ptr = xchg(array->ptrs + index, new_ptr);
427 if (old_ptr)
428 map->ops->map_fd_put_ptr(old_ptr);
429
430 return 0;
431}
432
433static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
434{
435 struct bpf_array *array = container_of(map, struct bpf_array, map);
436 void *old_ptr;
437 u32 index = *(u32 *)key;
438
439 if (index >= array->map.max_entries)
440 return -E2BIG;
441
442 old_ptr = xchg(array->ptrs + index, NULL);
443 if (old_ptr) {
444 map->ops->map_fd_put_ptr(old_ptr);
445 return 0;
446 } else {
447 return -ENOENT;
448 }
449}
450
451static void *prog_fd_array_get_ptr(struct bpf_map *map,
452 struct file *map_file, int fd)
453{
454 struct bpf_array *array = container_of(map, struct bpf_array, map);
455 struct bpf_prog *prog = bpf_prog_get(fd);
456
457 if (IS_ERR(prog))
458 return prog;
459
460 if (!bpf_prog_array_compatible(array, prog)) {
461 bpf_prog_put(prog);
462 return ERR_PTR(-EINVAL);
463 }
464
465 return prog;
466}
467
468static void prog_fd_array_put_ptr(void *ptr)
469{
470 bpf_prog_put(ptr);
471}
472
473static u32 prog_fd_array_sys_lookup_elem(void *ptr)
474{
475 return ((struct bpf_prog *)ptr)->aux->id;
476}
477
478/* decrement refcnt of all bpf_progs that are stored in this map */
479static void bpf_fd_array_map_clear(struct bpf_map *map)
480{
481 struct bpf_array *array = container_of(map, struct bpf_array, map);
482 int i;
483
484 for (i = 0; i < array->map.max_entries; i++)
485 fd_array_map_delete_elem(map, &i);
486}
487
488const struct bpf_map_ops prog_array_map_ops = {
489 .map_alloc_check = fd_array_map_alloc_check,
490 .map_alloc = array_map_alloc,
491 .map_free = fd_array_map_free,
492 .map_get_next_key = array_map_get_next_key,
493 .map_lookup_elem = fd_array_map_lookup_elem,
494 .map_delete_elem = fd_array_map_delete_elem,
495 .map_fd_get_ptr = prog_fd_array_get_ptr,
496 .map_fd_put_ptr = prog_fd_array_put_ptr,
497 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
498 .map_release_uref = bpf_fd_array_map_clear,
499};
500
501static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
502 struct file *map_file)
503{
504 struct bpf_event_entry *ee;
505
506 ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
507 if (ee) {
508 ee->event = perf_file->private_data;
509 ee->perf_file = perf_file;
510 ee->map_file = map_file;
511 }
512
513 return ee;
514}
515
516static void __bpf_event_entry_free(struct rcu_head *rcu)
517{
518 struct bpf_event_entry *ee;
519
520 ee = container_of(rcu, struct bpf_event_entry, rcu);
521 fput(ee->perf_file);
522 kfree(ee);
523}
524
525static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
526{
527 call_rcu(&ee->rcu, __bpf_event_entry_free);
528}
529
530static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
531 struct file *map_file, int fd)
532{
533 struct bpf_event_entry *ee;
534 struct perf_event *event;
535 struct file *perf_file;
536 u64 value;
537
538 perf_file = perf_event_get(fd);
539 if (IS_ERR(perf_file))
540 return perf_file;
541
542 ee = ERR_PTR(-EOPNOTSUPP);
543 event = perf_file->private_data;
544 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
545 goto err_out;
546
547 ee = bpf_event_entry_gen(perf_file, map_file);
548 if (ee)
549 return ee;
550 ee = ERR_PTR(-ENOMEM);
551err_out:
552 fput(perf_file);
553 return ee;
554}
555
556static void perf_event_fd_array_put_ptr(void *ptr)
557{
558 bpf_event_entry_free_rcu(ptr);
559}
560
561static void perf_event_fd_array_release(struct bpf_map *map,
562 struct file *map_file)
563{
564 struct bpf_array *array = container_of(map, struct bpf_array, map);
565 struct bpf_event_entry *ee;
566 int i;
567
568 rcu_read_lock();
569 for (i = 0; i < array->map.max_entries; i++) {
570 ee = READ_ONCE(array->ptrs[i]);
571 if (ee && ee->map_file == map_file)
572 fd_array_map_delete_elem(map, &i);
573 }
574 rcu_read_unlock();
575}
576
577const struct bpf_map_ops perf_event_array_map_ops = {
578 .map_alloc_check = fd_array_map_alloc_check,
579 .map_alloc = array_map_alloc,
580 .map_free = fd_array_map_free,
581 .map_get_next_key = array_map_get_next_key,
582 .map_lookup_elem = fd_array_map_lookup_elem,
583 .map_delete_elem = fd_array_map_delete_elem,
584 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
585 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
586 .map_release = perf_event_fd_array_release,
587};
588
589#ifdef CONFIG_CGROUPS
590static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
591 struct file *map_file /* not used */,
592 int fd)
593{
594 return cgroup_get_from_fd(fd);
595}
596
597static void cgroup_fd_array_put_ptr(void *ptr)
598{
599 /* cgroup_put free cgrp after a rcu grace period */
600 cgroup_put(ptr);
601}
602
603static void cgroup_fd_array_free(struct bpf_map *map)
604{
605 bpf_fd_array_map_clear(map);
606 fd_array_map_free(map);
607}
608
609const struct bpf_map_ops cgroup_array_map_ops = {
610 .map_alloc_check = fd_array_map_alloc_check,
611 .map_alloc = array_map_alloc,
612 .map_free = cgroup_fd_array_free,
613 .map_get_next_key = array_map_get_next_key,
614 .map_lookup_elem = fd_array_map_lookup_elem,
615 .map_delete_elem = fd_array_map_delete_elem,
616 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
617 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
618};
619#endif
620
621static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
622{
623 struct bpf_map *map, *inner_map_meta;
624
625 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
626 if (IS_ERR(inner_map_meta))
627 return inner_map_meta;
628
629 map = array_map_alloc(attr);
630 if (IS_ERR(map)) {
631 bpf_map_meta_free(inner_map_meta);
632 return map;
633 }
634
635 map->inner_map_meta = inner_map_meta;
636
637 return map;
638}
639
640static void array_of_map_free(struct bpf_map *map)
641{
642 /* map->inner_map_meta is only accessed by syscall which
643 * is protected by fdget/fdput.
644 */
645 bpf_map_meta_free(map->inner_map_meta);
646 bpf_fd_array_map_clear(map);
647 fd_array_map_free(map);
648}
649
650static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
651{
652 struct bpf_map **inner_map = array_map_lookup_elem(map, key);
653
654 if (!inner_map)
655 return NULL;
656
657 return READ_ONCE(*inner_map);
658}
659
660static u32 array_of_map_gen_lookup(struct bpf_map *map,
661 struct bpf_insn *insn_buf)
662{
663 struct bpf_array *array = container_of(map, struct bpf_array, map);
664 u32 elem_size = round_up(map->value_size, 8);
665 struct bpf_insn *insn = insn_buf;
666 const int ret = BPF_REG_0;
667 const int map_ptr = BPF_REG_1;
668 const int index = BPF_REG_2;
669
670 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
671 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
672 if (map->unpriv_array) {
673 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
674 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
675 } else {
676 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
677 }
678 if (is_power_of_2(elem_size))
679 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
680 else
681 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
682 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
683 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
684 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
685 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
686 *insn++ = BPF_MOV64_IMM(ret, 0);
687
688 return insn - insn_buf;
689}
690
691const struct bpf_map_ops array_of_maps_map_ops = {
692 .map_alloc_check = fd_array_map_alloc_check,
693 .map_alloc = array_of_map_alloc,
694 .map_free = array_of_map_free,
695 .map_get_next_key = array_map_get_next_key,
696 .map_lookup_elem = array_of_map_lookup_elem,
697 .map_delete_elem = fd_array_map_delete_elem,
698 .map_fd_get_ptr = bpf_map_fd_get_ptr,
699 .map_fd_put_ptr = bpf_map_fd_put_ptr,
700 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
701 .map_gen_lookup = array_of_map_gen_lookup,
702};
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016,2017 Facebook
4 */
5#include <linux/bpf.h>
6#include <linux/btf.h>
7#include <linux/err.h>
8#include <linux/slab.h>
9#include <linux/mm.h>
10#include <linux/filter.h>
11#include <linux/perf_event.h>
12#include <uapi/linux/btf.h>
13
14#include "map_in_map.h"
15
16#define ARRAY_CREATE_FLAG_MASK \
17 (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK)
18
19static void bpf_array_free_percpu(struct bpf_array *array)
20{
21 int i;
22
23 for (i = 0; i < array->map.max_entries; i++) {
24 free_percpu(array->pptrs[i]);
25 cond_resched();
26 }
27}
28
29static int bpf_array_alloc_percpu(struct bpf_array *array)
30{
31 void __percpu *ptr;
32 int i;
33
34 for (i = 0; i < array->map.max_entries; i++) {
35 ptr = __alloc_percpu_gfp(array->elem_size, 8,
36 GFP_USER | __GFP_NOWARN);
37 if (!ptr) {
38 bpf_array_free_percpu(array);
39 return -ENOMEM;
40 }
41 array->pptrs[i] = ptr;
42 cond_resched();
43 }
44
45 return 0;
46}
47
48/* Called from syscall */
49int array_map_alloc_check(union bpf_attr *attr)
50{
51 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
52 int numa_node = bpf_map_attr_numa_node(attr);
53
54 /* check sanity of attributes */
55 if (attr->max_entries == 0 || attr->key_size != 4 ||
56 attr->value_size == 0 ||
57 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
58 !bpf_map_flags_access_ok(attr->map_flags) ||
59 (percpu && numa_node != NUMA_NO_NODE))
60 return -EINVAL;
61
62 if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
63 attr->map_flags & BPF_F_MMAPABLE)
64 return -EINVAL;
65
66 if (attr->value_size > KMALLOC_MAX_SIZE)
67 /* if value_size is bigger, the user space won't be able to
68 * access the elements.
69 */
70 return -E2BIG;
71
72 return 0;
73}
74
75static struct bpf_map *array_map_alloc(union bpf_attr *attr)
76{
77 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
78 int ret, numa_node = bpf_map_attr_numa_node(attr);
79 u32 elem_size, index_mask, max_entries;
80 bool bypass_spec_v1 = bpf_bypass_spec_v1();
81 u64 cost, array_size, mask64;
82 struct bpf_map_memory mem;
83 struct bpf_array *array;
84
85 elem_size = round_up(attr->value_size, 8);
86
87 max_entries = attr->max_entries;
88
89 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
90 * upper most bit set in u32 space is undefined behavior due to
91 * resulting 1U << 32, so do it manually here in u64 space.
92 */
93 mask64 = fls_long(max_entries - 1);
94 mask64 = 1ULL << mask64;
95 mask64 -= 1;
96
97 index_mask = mask64;
98 if (!bypass_spec_v1) {
99 /* round up array size to nearest power of 2,
100 * since cpu will speculate within index_mask limits
101 */
102 max_entries = index_mask + 1;
103 /* Check for overflows. */
104 if (max_entries < attr->max_entries)
105 return ERR_PTR(-E2BIG);
106 }
107
108 array_size = sizeof(*array);
109 if (percpu) {
110 array_size += (u64) max_entries * sizeof(void *);
111 } else {
112 /* rely on vmalloc() to return page-aligned memory and
113 * ensure array->value is exactly page-aligned
114 */
115 if (attr->map_flags & BPF_F_MMAPABLE) {
116 array_size = PAGE_ALIGN(array_size);
117 array_size += PAGE_ALIGN((u64) max_entries * elem_size);
118 } else {
119 array_size += (u64) max_entries * elem_size;
120 }
121 }
122
123 /* make sure there is no u32 overflow later in round_up() */
124 cost = array_size;
125 if (percpu)
126 cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
127
128 ret = bpf_map_charge_init(&mem, cost);
129 if (ret < 0)
130 return ERR_PTR(ret);
131
132 /* allocate all map elements and zero-initialize them */
133 if (attr->map_flags & BPF_F_MMAPABLE) {
134 void *data;
135
136 /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
137 data = bpf_map_area_mmapable_alloc(array_size, numa_node);
138 if (!data) {
139 bpf_map_charge_finish(&mem);
140 return ERR_PTR(-ENOMEM);
141 }
142 array = data + PAGE_ALIGN(sizeof(struct bpf_array))
143 - offsetof(struct bpf_array, value);
144 } else {
145 array = bpf_map_area_alloc(array_size, numa_node);
146 }
147 if (!array) {
148 bpf_map_charge_finish(&mem);
149 return ERR_PTR(-ENOMEM);
150 }
151 array->index_mask = index_mask;
152 array->map.bypass_spec_v1 = bypass_spec_v1;
153
154 /* copy mandatory map attributes */
155 bpf_map_init_from_attr(&array->map, attr);
156 bpf_map_charge_move(&array->map.memory, &mem);
157 array->elem_size = elem_size;
158
159 if (percpu && bpf_array_alloc_percpu(array)) {
160 bpf_map_charge_finish(&array->map.memory);
161 bpf_map_area_free(array);
162 return ERR_PTR(-ENOMEM);
163 }
164
165 return &array->map;
166}
167
168/* Called from syscall or from eBPF program */
169static void *array_map_lookup_elem(struct bpf_map *map, void *key)
170{
171 struct bpf_array *array = container_of(map, struct bpf_array, map);
172 u32 index = *(u32 *)key;
173
174 if (unlikely(index >= array->map.max_entries))
175 return NULL;
176
177 return array->value + array->elem_size * (index & array->index_mask);
178}
179
180static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
181 u32 off)
182{
183 struct bpf_array *array = container_of(map, struct bpf_array, map);
184
185 if (map->max_entries != 1)
186 return -ENOTSUPP;
187 if (off >= map->value_size)
188 return -EINVAL;
189
190 *imm = (unsigned long)array->value;
191 return 0;
192}
193
194static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
195 u32 *off)
196{
197 struct bpf_array *array = container_of(map, struct bpf_array, map);
198 u64 base = (unsigned long)array->value;
199 u64 range = array->elem_size;
200
201 if (map->max_entries != 1)
202 return -ENOTSUPP;
203 if (imm < base || imm >= base + range)
204 return -ENOENT;
205
206 *off = imm - base;
207 return 0;
208}
209
210/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
211static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
212{
213 struct bpf_array *array = container_of(map, struct bpf_array, map);
214 struct bpf_insn *insn = insn_buf;
215 u32 elem_size = round_up(map->value_size, 8);
216 const int ret = BPF_REG_0;
217 const int map_ptr = BPF_REG_1;
218 const int index = BPF_REG_2;
219
220 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
221 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
222 if (!map->bypass_spec_v1) {
223 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
224 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
225 } else {
226 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
227 }
228
229 if (is_power_of_2(elem_size)) {
230 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
231 } else {
232 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
233 }
234 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
235 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
236 *insn++ = BPF_MOV64_IMM(ret, 0);
237 return insn - insn_buf;
238}
239
240/* Called from eBPF program */
241static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
242{
243 struct bpf_array *array = container_of(map, struct bpf_array, map);
244 u32 index = *(u32 *)key;
245
246 if (unlikely(index >= array->map.max_entries))
247 return NULL;
248
249 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
250}
251
252int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
253{
254 struct bpf_array *array = container_of(map, struct bpf_array, map);
255 u32 index = *(u32 *)key;
256 void __percpu *pptr;
257 int cpu, off = 0;
258 u32 size;
259
260 if (unlikely(index >= array->map.max_entries))
261 return -ENOENT;
262
263 /* per_cpu areas are zero-filled and bpf programs can only
264 * access 'value_size' of them, so copying rounded areas
265 * will not leak any kernel data
266 */
267 size = round_up(map->value_size, 8);
268 rcu_read_lock();
269 pptr = array->pptrs[index & array->index_mask];
270 for_each_possible_cpu(cpu) {
271 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
272 off += size;
273 }
274 rcu_read_unlock();
275 return 0;
276}
277
278/* Called from syscall */
279static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
280{
281 struct bpf_array *array = container_of(map, struct bpf_array, map);
282 u32 index = key ? *(u32 *)key : U32_MAX;
283 u32 *next = (u32 *)next_key;
284
285 if (index >= array->map.max_entries) {
286 *next = 0;
287 return 0;
288 }
289
290 if (index == array->map.max_entries - 1)
291 return -ENOENT;
292
293 *next = index + 1;
294 return 0;
295}
296
297/* Called from syscall or from eBPF program */
298static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
299 u64 map_flags)
300{
301 struct bpf_array *array = container_of(map, struct bpf_array, map);
302 u32 index = *(u32 *)key;
303 char *val;
304
305 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
306 /* unknown flags */
307 return -EINVAL;
308
309 if (unlikely(index >= array->map.max_entries))
310 /* all elements were pre-allocated, cannot insert a new one */
311 return -E2BIG;
312
313 if (unlikely(map_flags & BPF_NOEXIST))
314 /* all elements already exist */
315 return -EEXIST;
316
317 if (unlikely((map_flags & BPF_F_LOCK) &&
318 !map_value_has_spin_lock(map)))
319 return -EINVAL;
320
321 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
322 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
323 value, map->value_size);
324 } else {
325 val = array->value +
326 array->elem_size * (index & array->index_mask);
327 if (map_flags & BPF_F_LOCK)
328 copy_map_value_locked(map, val, value, false);
329 else
330 copy_map_value(map, val, value);
331 }
332 return 0;
333}
334
335int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
336 u64 map_flags)
337{
338 struct bpf_array *array = container_of(map, struct bpf_array, map);
339 u32 index = *(u32 *)key;
340 void __percpu *pptr;
341 int cpu, off = 0;
342 u32 size;
343
344 if (unlikely(map_flags > BPF_EXIST))
345 /* unknown flags */
346 return -EINVAL;
347
348 if (unlikely(index >= array->map.max_entries))
349 /* all elements were pre-allocated, cannot insert a new one */
350 return -E2BIG;
351
352 if (unlikely(map_flags == BPF_NOEXIST))
353 /* all elements already exist */
354 return -EEXIST;
355
356 /* the user space will provide round_up(value_size, 8) bytes that
357 * will be copied into per-cpu area. bpf programs can only access
358 * value_size of it. During lookup the same extra bytes will be
359 * returned or zeros which were zero-filled by percpu_alloc,
360 * so no kernel data leaks possible
361 */
362 size = round_up(map->value_size, 8);
363 rcu_read_lock();
364 pptr = array->pptrs[index & array->index_mask];
365 for_each_possible_cpu(cpu) {
366 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
367 off += size;
368 }
369 rcu_read_unlock();
370 return 0;
371}
372
373/* Called from syscall or from eBPF program */
374static int array_map_delete_elem(struct bpf_map *map, void *key)
375{
376 return -EINVAL;
377}
378
379static void *array_map_vmalloc_addr(struct bpf_array *array)
380{
381 return (void *)round_down((unsigned long)array, PAGE_SIZE);
382}
383
384/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
385static void array_map_free(struct bpf_map *map)
386{
387 struct bpf_array *array = container_of(map, struct bpf_array, map);
388
389 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
390 bpf_array_free_percpu(array);
391
392 if (array->map.map_flags & BPF_F_MMAPABLE)
393 bpf_map_area_free(array_map_vmalloc_addr(array));
394 else
395 bpf_map_area_free(array);
396}
397
398static void array_map_seq_show_elem(struct bpf_map *map, void *key,
399 struct seq_file *m)
400{
401 void *value;
402
403 rcu_read_lock();
404
405 value = array_map_lookup_elem(map, key);
406 if (!value) {
407 rcu_read_unlock();
408 return;
409 }
410
411 if (map->btf_key_type_id)
412 seq_printf(m, "%u: ", *(u32 *)key);
413 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
414 seq_puts(m, "\n");
415
416 rcu_read_unlock();
417}
418
419static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
420 struct seq_file *m)
421{
422 struct bpf_array *array = container_of(map, struct bpf_array, map);
423 u32 index = *(u32 *)key;
424 void __percpu *pptr;
425 int cpu;
426
427 rcu_read_lock();
428
429 seq_printf(m, "%u: {\n", *(u32 *)key);
430 pptr = array->pptrs[index & array->index_mask];
431 for_each_possible_cpu(cpu) {
432 seq_printf(m, "\tcpu%d: ", cpu);
433 btf_type_seq_show(map->btf, map->btf_value_type_id,
434 per_cpu_ptr(pptr, cpu), m);
435 seq_puts(m, "\n");
436 }
437 seq_puts(m, "}\n");
438
439 rcu_read_unlock();
440}
441
442static int array_map_check_btf(const struct bpf_map *map,
443 const struct btf *btf,
444 const struct btf_type *key_type,
445 const struct btf_type *value_type)
446{
447 u32 int_data;
448
449 /* One exception for keyless BTF: .bss/.data/.rodata map */
450 if (btf_type_is_void(key_type)) {
451 if (map->map_type != BPF_MAP_TYPE_ARRAY ||
452 map->max_entries != 1)
453 return -EINVAL;
454
455 if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
456 return -EINVAL;
457
458 return 0;
459 }
460
461 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
462 return -EINVAL;
463
464 int_data = *(u32 *)(key_type + 1);
465 /* bpf array can only take a u32 key. This check makes sure
466 * that the btf matches the attr used during map_create.
467 */
468 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
469 return -EINVAL;
470
471 return 0;
472}
473
474static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
475{
476 struct bpf_array *array = container_of(map, struct bpf_array, map);
477 pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
478
479 if (!(map->map_flags & BPF_F_MMAPABLE))
480 return -EINVAL;
481
482 if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
483 PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
484 return -EINVAL;
485
486 return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
487 vma->vm_pgoff + pgoff);
488}
489
490struct bpf_iter_seq_array_map_info {
491 struct bpf_map *map;
492 void *percpu_value_buf;
493 u32 index;
494};
495
496static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
497{
498 struct bpf_iter_seq_array_map_info *info = seq->private;
499 struct bpf_map *map = info->map;
500 struct bpf_array *array;
501 u32 index;
502
503 if (info->index >= map->max_entries)
504 return NULL;
505
506 if (*pos == 0)
507 ++*pos;
508 array = container_of(map, struct bpf_array, map);
509 index = info->index & array->index_mask;
510 if (info->percpu_value_buf)
511 return array->pptrs[index];
512 return array->value + array->elem_size * index;
513}
514
515static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
516{
517 struct bpf_iter_seq_array_map_info *info = seq->private;
518 struct bpf_map *map = info->map;
519 struct bpf_array *array;
520 u32 index;
521
522 ++*pos;
523 ++info->index;
524 if (info->index >= map->max_entries)
525 return NULL;
526
527 array = container_of(map, struct bpf_array, map);
528 index = info->index & array->index_mask;
529 if (info->percpu_value_buf)
530 return array->pptrs[index];
531 return array->value + array->elem_size * index;
532}
533
534static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
535{
536 struct bpf_iter_seq_array_map_info *info = seq->private;
537 struct bpf_iter__bpf_map_elem ctx = {};
538 struct bpf_map *map = info->map;
539 struct bpf_iter_meta meta;
540 struct bpf_prog *prog;
541 int off = 0, cpu = 0;
542 void __percpu **pptr;
543 u32 size;
544
545 meta.seq = seq;
546 prog = bpf_iter_get_info(&meta, v == NULL);
547 if (!prog)
548 return 0;
549
550 ctx.meta = &meta;
551 ctx.map = info->map;
552 if (v) {
553 ctx.key = &info->index;
554
555 if (!info->percpu_value_buf) {
556 ctx.value = v;
557 } else {
558 pptr = v;
559 size = round_up(map->value_size, 8);
560 for_each_possible_cpu(cpu) {
561 bpf_long_memcpy(info->percpu_value_buf + off,
562 per_cpu_ptr(pptr, cpu),
563 size);
564 off += size;
565 }
566 ctx.value = info->percpu_value_buf;
567 }
568 }
569
570 return bpf_iter_run_prog(prog, &ctx);
571}
572
573static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
574{
575 return __bpf_array_map_seq_show(seq, v);
576}
577
578static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
579{
580 if (!v)
581 (void)__bpf_array_map_seq_show(seq, NULL);
582}
583
584static int bpf_iter_init_array_map(void *priv_data,
585 struct bpf_iter_aux_info *aux)
586{
587 struct bpf_iter_seq_array_map_info *seq_info = priv_data;
588 struct bpf_map *map = aux->map;
589 void *value_buf;
590 u32 buf_size;
591
592 if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
593 buf_size = round_up(map->value_size, 8) * num_possible_cpus();
594 value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
595 if (!value_buf)
596 return -ENOMEM;
597
598 seq_info->percpu_value_buf = value_buf;
599 }
600
601 seq_info->map = map;
602 return 0;
603}
604
605static void bpf_iter_fini_array_map(void *priv_data)
606{
607 struct bpf_iter_seq_array_map_info *seq_info = priv_data;
608
609 kfree(seq_info->percpu_value_buf);
610}
611
612static const struct seq_operations bpf_array_map_seq_ops = {
613 .start = bpf_array_map_seq_start,
614 .next = bpf_array_map_seq_next,
615 .stop = bpf_array_map_seq_stop,
616 .show = bpf_array_map_seq_show,
617};
618
619static const struct bpf_iter_seq_info iter_seq_info = {
620 .seq_ops = &bpf_array_map_seq_ops,
621 .init_seq_private = bpf_iter_init_array_map,
622 .fini_seq_private = bpf_iter_fini_array_map,
623 .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info),
624};
625
626static int array_map_btf_id;
627const struct bpf_map_ops array_map_ops = {
628 .map_alloc_check = array_map_alloc_check,
629 .map_alloc = array_map_alloc,
630 .map_free = array_map_free,
631 .map_get_next_key = array_map_get_next_key,
632 .map_lookup_elem = array_map_lookup_elem,
633 .map_update_elem = array_map_update_elem,
634 .map_delete_elem = array_map_delete_elem,
635 .map_gen_lookup = array_map_gen_lookup,
636 .map_direct_value_addr = array_map_direct_value_addr,
637 .map_direct_value_meta = array_map_direct_value_meta,
638 .map_mmap = array_map_mmap,
639 .map_seq_show_elem = array_map_seq_show_elem,
640 .map_check_btf = array_map_check_btf,
641 .map_lookup_batch = generic_map_lookup_batch,
642 .map_update_batch = generic_map_update_batch,
643 .map_btf_name = "bpf_array",
644 .map_btf_id = &array_map_btf_id,
645 .iter_seq_info = &iter_seq_info,
646};
647
648static int percpu_array_map_btf_id;
649const struct bpf_map_ops percpu_array_map_ops = {
650 .map_alloc_check = array_map_alloc_check,
651 .map_alloc = array_map_alloc,
652 .map_free = array_map_free,
653 .map_get_next_key = array_map_get_next_key,
654 .map_lookup_elem = percpu_array_map_lookup_elem,
655 .map_update_elem = array_map_update_elem,
656 .map_delete_elem = array_map_delete_elem,
657 .map_seq_show_elem = percpu_array_map_seq_show_elem,
658 .map_check_btf = array_map_check_btf,
659 .map_btf_name = "bpf_array",
660 .map_btf_id = &percpu_array_map_btf_id,
661 .iter_seq_info = &iter_seq_info,
662};
663
664static int fd_array_map_alloc_check(union bpf_attr *attr)
665{
666 /* only file descriptors can be stored in this type of map */
667 if (attr->value_size != sizeof(u32))
668 return -EINVAL;
669 /* Program read-only/write-only not supported for special maps yet. */
670 if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
671 return -EINVAL;
672 return array_map_alloc_check(attr);
673}
674
675static void fd_array_map_free(struct bpf_map *map)
676{
677 struct bpf_array *array = container_of(map, struct bpf_array, map);
678 int i;
679
680 /* make sure it's empty */
681 for (i = 0; i < array->map.max_entries; i++)
682 BUG_ON(array->ptrs[i] != NULL);
683
684 bpf_map_area_free(array);
685}
686
687static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
688{
689 return ERR_PTR(-EOPNOTSUPP);
690}
691
692/* only called from syscall */
693int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
694{
695 void **elem, *ptr;
696 int ret = 0;
697
698 if (!map->ops->map_fd_sys_lookup_elem)
699 return -ENOTSUPP;
700
701 rcu_read_lock();
702 elem = array_map_lookup_elem(map, key);
703 if (elem && (ptr = READ_ONCE(*elem)))
704 *value = map->ops->map_fd_sys_lookup_elem(ptr);
705 else
706 ret = -ENOENT;
707 rcu_read_unlock();
708
709 return ret;
710}
711
712/* only called from syscall */
713int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
714 void *key, void *value, u64 map_flags)
715{
716 struct bpf_array *array = container_of(map, struct bpf_array, map);
717 void *new_ptr, *old_ptr;
718 u32 index = *(u32 *)key, ufd;
719
720 if (map_flags != BPF_ANY)
721 return -EINVAL;
722
723 if (index >= array->map.max_entries)
724 return -E2BIG;
725
726 ufd = *(u32 *)value;
727 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
728 if (IS_ERR(new_ptr))
729 return PTR_ERR(new_ptr);
730
731 if (map->ops->map_poke_run) {
732 mutex_lock(&array->aux->poke_mutex);
733 old_ptr = xchg(array->ptrs + index, new_ptr);
734 map->ops->map_poke_run(map, index, old_ptr, new_ptr);
735 mutex_unlock(&array->aux->poke_mutex);
736 } else {
737 old_ptr = xchg(array->ptrs + index, new_ptr);
738 }
739
740 if (old_ptr)
741 map->ops->map_fd_put_ptr(old_ptr);
742 return 0;
743}
744
745static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
746{
747 struct bpf_array *array = container_of(map, struct bpf_array, map);
748 void *old_ptr;
749 u32 index = *(u32 *)key;
750
751 if (index >= array->map.max_entries)
752 return -E2BIG;
753
754 if (map->ops->map_poke_run) {
755 mutex_lock(&array->aux->poke_mutex);
756 old_ptr = xchg(array->ptrs + index, NULL);
757 map->ops->map_poke_run(map, index, old_ptr, NULL);
758 mutex_unlock(&array->aux->poke_mutex);
759 } else {
760 old_ptr = xchg(array->ptrs + index, NULL);
761 }
762
763 if (old_ptr) {
764 map->ops->map_fd_put_ptr(old_ptr);
765 return 0;
766 } else {
767 return -ENOENT;
768 }
769}
770
771static void *prog_fd_array_get_ptr(struct bpf_map *map,
772 struct file *map_file, int fd)
773{
774 struct bpf_array *array = container_of(map, struct bpf_array, map);
775 struct bpf_prog *prog = bpf_prog_get(fd);
776
777 if (IS_ERR(prog))
778 return prog;
779
780 if (!bpf_prog_array_compatible(array, prog)) {
781 bpf_prog_put(prog);
782 return ERR_PTR(-EINVAL);
783 }
784
785 return prog;
786}
787
788static void prog_fd_array_put_ptr(void *ptr)
789{
790 bpf_prog_put(ptr);
791}
792
793static u32 prog_fd_array_sys_lookup_elem(void *ptr)
794{
795 return ((struct bpf_prog *)ptr)->aux->id;
796}
797
798/* decrement refcnt of all bpf_progs that are stored in this map */
799static void bpf_fd_array_map_clear(struct bpf_map *map)
800{
801 struct bpf_array *array = container_of(map, struct bpf_array, map);
802 int i;
803
804 for (i = 0; i < array->map.max_entries; i++)
805 fd_array_map_delete_elem(map, &i);
806}
807
808static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
809 struct seq_file *m)
810{
811 void **elem, *ptr;
812 u32 prog_id;
813
814 rcu_read_lock();
815
816 elem = array_map_lookup_elem(map, key);
817 if (elem) {
818 ptr = READ_ONCE(*elem);
819 if (ptr) {
820 seq_printf(m, "%u: ", *(u32 *)key);
821 prog_id = prog_fd_array_sys_lookup_elem(ptr);
822 btf_type_seq_show(map->btf, map->btf_value_type_id,
823 &prog_id, m);
824 seq_puts(m, "\n");
825 }
826 }
827
828 rcu_read_unlock();
829}
830
831struct prog_poke_elem {
832 struct list_head list;
833 struct bpf_prog_aux *aux;
834};
835
836static int prog_array_map_poke_track(struct bpf_map *map,
837 struct bpf_prog_aux *prog_aux)
838{
839 struct prog_poke_elem *elem;
840 struct bpf_array_aux *aux;
841 int ret = 0;
842
843 aux = container_of(map, struct bpf_array, map)->aux;
844 mutex_lock(&aux->poke_mutex);
845 list_for_each_entry(elem, &aux->poke_progs, list) {
846 if (elem->aux == prog_aux)
847 goto out;
848 }
849
850 elem = kmalloc(sizeof(*elem), GFP_KERNEL);
851 if (!elem) {
852 ret = -ENOMEM;
853 goto out;
854 }
855
856 INIT_LIST_HEAD(&elem->list);
857 /* We must track the program's aux info at this point in time
858 * since the program pointer itself may not be stable yet, see
859 * also comment in prog_array_map_poke_run().
860 */
861 elem->aux = prog_aux;
862
863 list_add_tail(&elem->list, &aux->poke_progs);
864out:
865 mutex_unlock(&aux->poke_mutex);
866 return ret;
867}
868
869static void prog_array_map_poke_untrack(struct bpf_map *map,
870 struct bpf_prog_aux *prog_aux)
871{
872 struct prog_poke_elem *elem, *tmp;
873 struct bpf_array_aux *aux;
874
875 aux = container_of(map, struct bpf_array, map)->aux;
876 mutex_lock(&aux->poke_mutex);
877 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
878 if (elem->aux == prog_aux) {
879 list_del_init(&elem->list);
880 kfree(elem);
881 break;
882 }
883 }
884 mutex_unlock(&aux->poke_mutex);
885}
886
887static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
888 struct bpf_prog *old,
889 struct bpf_prog *new)
890{
891 struct prog_poke_elem *elem;
892 struct bpf_array_aux *aux;
893
894 aux = container_of(map, struct bpf_array, map)->aux;
895 WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
896
897 list_for_each_entry(elem, &aux->poke_progs, list) {
898 struct bpf_jit_poke_descriptor *poke;
899 int i, ret;
900
901 for (i = 0; i < elem->aux->size_poke_tab; i++) {
902 poke = &elem->aux->poke_tab[i];
903
904 /* Few things to be aware of:
905 *
906 * 1) We can only ever access aux in this context, but
907 * not aux->prog since it might not be stable yet and
908 * there could be danger of use after free otherwise.
909 * 2) Initially when we start tracking aux, the program
910 * is not JITed yet and also does not have a kallsyms
911 * entry. We skip these as poke->ip_stable is not
912 * active yet. The JIT will do the final fixup before
913 * setting it stable. The various poke->ip_stable are
914 * successively activated, so tail call updates can
915 * arrive from here while JIT is still finishing its
916 * final fixup for non-activated poke entries.
917 * 3) On program teardown, the program's kallsym entry gets
918 * removed out of RCU callback, but we can only untrack
919 * from sleepable context, therefore bpf_arch_text_poke()
920 * might not see that this is in BPF text section and
921 * bails out with -EINVAL. As these are unreachable since
922 * RCU grace period already passed, we simply skip them.
923 * 4) Also programs reaching refcount of zero while patching
924 * is in progress is okay since we're protected under
925 * poke_mutex and untrack the programs before the JIT
926 * buffer is freed. When we're still in the middle of
927 * patching and suddenly kallsyms entry of the program
928 * gets evicted, we just skip the rest which is fine due
929 * to point 3).
930 * 5) Any other error happening below from bpf_arch_text_poke()
931 * is a unexpected bug.
932 */
933 if (!READ_ONCE(poke->ip_stable))
934 continue;
935 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
936 continue;
937 if (poke->tail_call.map != map ||
938 poke->tail_call.key != key)
939 continue;
940
941 ret = bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP,
942 old ? (u8 *)old->bpf_func +
943 poke->adj_off : NULL,
944 new ? (u8 *)new->bpf_func +
945 poke->adj_off : NULL);
946 BUG_ON(ret < 0 && ret != -EINVAL);
947 }
948 }
949}
950
951static void prog_array_map_clear_deferred(struct work_struct *work)
952{
953 struct bpf_map *map = container_of(work, struct bpf_array_aux,
954 work)->map;
955 bpf_fd_array_map_clear(map);
956 bpf_map_put(map);
957}
958
959static void prog_array_map_clear(struct bpf_map *map)
960{
961 struct bpf_array_aux *aux = container_of(map, struct bpf_array,
962 map)->aux;
963 bpf_map_inc(map);
964 schedule_work(&aux->work);
965}
966
967static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
968{
969 struct bpf_array_aux *aux;
970 struct bpf_map *map;
971
972 aux = kzalloc(sizeof(*aux), GFP_KERNEL);
973 if (!aux)
974 return ERR_PTR(-ENOMEM);
975
976 INIT_WORK(&aux->work, prog_array_map_clear_deferred);
977 INIT_LIST_HEAD(&aux->poke_progs);
978 mutex_init(&aux->poke_mutex);
979
980 map = array_map_alloc(attr);
981 if (IS_ERR(map)) {
982 kfree(aux);
983 return map;
984 }
985
986 container_of(map, struct bpf_array, map)->aux = aux;
987 aux->map = map;
988
989 return map;
990}
991
992static void prog_array_map_free(struct bpf_map *map)
993{
994 struct prog_poke_elem *elem, *tmp;
995 struct bpf_array_aux *aux;
996
997 aux = container_of(map, struct bpf_array, map)->aux;
998 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
999 list_del_init(&elem->list);
1000 kfree(elem);
1001 }
1002 kfree(aux);
1003 fd_array_map_free(map);
1004}
1005
1006static int prog_array_map_btf_id;
1007const struct bpf_map_ops prog_array_map_ops = {
1008 .map_alloc_check = fd_array_map_alloc_check,
1009 .map_alloc = prog_array_map_alloc,
1010 .map_free = prog_array_map_free,
1011 .map_poke_track = prog_array_map_poke_track,
1012 .map_poke_untrack = prog_array_map_poke_untrack,
1013 .map_poke_run = prog_array_map_poke_run,
1014 .map_get_next_key = array_map_get_next_key,
1015 .map_lookup_elem = fd_array_map_lookup_elem,
1016 .map_delete_elem = fd_array_map_delete_elem,
1017 .map_fd_get_ptr = prog_fd_array_get_ptr,
1018 .map_fd_put_ptr = prog_fd_array_put_ptr,
1019 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
1020 .map_release_uref = prog_array_map_clear,
1021 .map_seq_show_elem = prog_array_map_seq_show_elem,
1022 .map_btf_name = "bpf_array",
1023 .map_btf_id = &prog_array_map_btf_id,
1024};
1025
1026static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
1027 struct file *map_file)
1028{
1029 struct bpf_event_entry *ee;
1030
1031 ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
1032 if (ee) {
1033 ee->event = perf_file->private_data;
1034 ee->perf_file = perf_file;
1035 ee->map_file = map_file;
1036 }
1037
1038 return ee;
1039}
1040
1041static void __bpf_event_entry_free(struct rcu_head *rcu)
1042{
1043 struct bpf_event_entry *ee;
1044
1045 ee = container_of(rcu, struct bpf_event_entry, rcu);
1046 fput(ee->perf_file);
1047 kfree(ee);
1048}
1049
1050static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
1051{
1052 call_rcu(&ee->rcu, __bpf_event_entry_free);
1053}
1054
1055static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1056 struct file *map_file, int fd)
1057{
1058 struct bpf_event_entry *ee;
1059 struct perf_event *event;
1060 struct file *perf_file;
1061 u64 value;
1062
1063 perf_file = perf_event_get(fd);
1064 if (IS_ERR(perf_file))
1065 return perf_file;
1066
1067 ee = ERR_PTR(-EOPNOTSUPP);
1068 event = perf_file->private_data;
1069 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
1070 goto err_out;
1071
1072 ee = bpf_event_entry_gen(perf_file, map_file);
1073 if (ee)
1074 return ee;
1075 ee = ERR_PTR(-ENOMEM);
1076err_out:
1077 fput(perf_file);
1078 return ee;
1079}
1080
1081static void perf_event_fd_array_put_ptr(void *ptr)
1082{
1083 bpf_event_entry_free_rcu(ptr);
1084}
1085
1086static void perf_event_fd_array_release(struct bpf_map *map,
1087 struct file *map_file)
1088{
1089 struct bpf_array *array = container_of(map, struct bpf_array, map);
1090 struct bpf_event_entry *ee;
1091 int i;
1092
1093 rcu_read_lock();
1094 for (i = 0; i < array->map.max_entries; i++) {
1095 ee = READ_ONCE(array->ptrs[i]);
1096 if (ee && ee->map_file == map_file)
1097 fd_array_map_delete_elem(map, &i);
1098 }
1099 rcu_read_unlock();
1100}
1101
1102static int perf_event_array_map_btf_id;
1103const struct bpf_map_ops perf_event_array_map_ops = {
1104 .map_alloc_check = fd_array_map_alloc_check,
1105 .map_alloc = array_map_alloc,
1106 .map_free = fd_array_map_free,
1107 .map_get_next_key = array_map_get_next_key,
1108 .map_lookup_elem = fd_array_map_lookup_elem,
1109 .map_delete_elem = fd_array_map_delete_elem,
1110 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
1111 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
1112 .map_release = perf_event_fd_array_release,
1113 .map_check_btf = map_check_no_btf,
1114 .map_btf_name = "bpf_array",
1115 .map_btf_id = &perf_event_array_map_btf_id,
1116};
1117
1118#ifdef CONFIG_CGROUPS
1119static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
1120 struct file *map_file /* not used */,
1121 int fd)
1122{
1123 return cgroup_get_from_fd(fd);
1124}
1125
1126static void cgroup_fd_array_put_ptr(void *ptr)
1127{
1128 /* cgroup_put free cgrp after a rcu grace period */
1129 cgroup_put(ptr);
1130}
1131
1132static void cgroup_fd_array_free(struct bpf_map *map)
1133{
1134 bpf_fd_array_map_clear(map);
1135 fd_array_map_free(map);
1136}
1137
1138static int cgroup_array_map_btf_id;
1139const struct bpf_map_ops cgroup_array_map_ops = {
1140 .map_alloc_check = fd_array_map_alloc_check,
1141 .map_alloc = array_map_alloc,
1142 .map_free = cgroup_fd_array_free,
1143 .map_get_next_key = array_map_get_next_key,
1144 .map_lookup_elem = fd_array_map_lookup_elem,
1145 .map_delete_elem = fd_array_map_delete_elem,
1146 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
1147 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
1148 .map_check_btf = map_check_no_btf,
1149 .map_btf_name = "bpf_array",
1150 .map_btf_id = &cgroup_array_map_btf_id,
1151};
1152#endif
1153
1154static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
1155{
1156 struct bpf_map *map, *inner_map_meta;
1157
1158 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1159 if (IS_ERR(inner_map_meta))
1160 return inner_map_meta;
1161
1162 map = array_map_alloc(attr);
1163 if (IS_ERR(map)) {
1164 bpf_map_meta_free(inner_map_meta);
1165 return map;
1166 }
1167
1168 map->inner_map_meta = inner_map_meta;
1169
1170 return map;
1171}
1172
1173static void array_of_map_free(struct bpf_map *map)
1174{
1175 /* map->inner_map_meta is only accessed by syscall which
1176 * is protected by fdget/fdput.
1177 */
1178 bpf_map_meta_free(map->inner_map_meta);
1179 bpf_fd_array_map_clear(map);
1180 fd_array_map_free(map);
1181}
1182
1183static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
1184{
1185 struct bpf_map **inner_map = array_map_lookup_elem(map, key);
1186
1187 if (!inner_map)
1188 return NULL;
1189
1190 return READ_ONCE(*inner_map);
1191}
1192
1193static u32 array_of_map_gen_lookup(struct bpf_map *map,
1194 struct bpf_insn *insn_buf)
1195{
1196 struct bpf_array *array = container_of(map, struct bpf_array, map);
1197 u32 elem_size = round_up(map->value_size, 8);
1198 struct bpf_insn *insn = insn_buf;
1199 const int ret = BPF_REG_0;
1200 const int map_ptr = BPF_REG_1;
1201 const int index = BPF_REG_2;
1202
1203 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
1204 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
1205 if (!map->bypass_spec_v1) {
1206 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1207 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1208 } else {
1209 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1210 }
1211 if (is_power_of_2(elem_size))
1212 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
1213 else
1214 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
1215 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
1216 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1217 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
1218 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
1219 *insn++ = BPF_MOV64_IMM(ret, 0);
1220
1221 return insn - insn_buf;
1222}
1223
1224static int array_of_maps_map_btf_id;
1225const struct bpf_map_ops array_of_maps_map_ops = {
1226 .map_alloc_check = fd_array_map_alloc_check,
1227 .map_alloc = array_of_map_alloc,
1228 .map_free = array_of_map_free,
1229 .map_get_next_key = array_map_get_next_key,
1230 .map_lookup_elem = array_of_map_lookup_elem,
1231 .map_delete_elem = fd_array_map_delete_elem,
1232 .map_fd_get_ptr = bpf_map_fd_get_ptr,
1233 .map_fd_put_ptr = bpf_map_fd_put_ptr,
1234 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1235 .map_gen_lookup = array_of_map_gen_lookup,
1236 .map_check_btf = map_check_no_btf,
1237 .map_btf_name = "bpf_array",
1238 .map_btf_id = &array_of_maps_map_btf_id,
1239};