Linux Audio

Check our new training course

Loading...
v4.17
 
  1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2 * Copyright (c) 2016,2017 Facebook
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of version 2 of the GNU General Public
  6 * License as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful, but
  9 * WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 11 * General Public License for more details.
 12 */
 13#include <linux/bpf.h>
 
 14#include <linux/err.h>
 15#include <linux/slab.h>
 16#include <linux/mm.h>
 17#include <linux/filter.h>
 18#include <linux/perf_event.h>
 
 19
 20#include "map_in_map.h"
 21
 22#define ARRAY_CREATE_FLAG_MASK \
 23	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
 24
 25static void bpf_array_free_percpu(struct bpf_array *array)
 26{
 27	int i;
 28
 29	for (i = 0; i < array->map.max_entries; i++) {
 30		free_percpu(array->pptrs[i]);
 31		cond_resched();
 32	}
 33}
 34
 35static int bpf_array_alloc_percpu(struct bpf_array *array)
 36{
 37	void __percpu *ptr;
 38	int i;
 39
 40	for (i = 0; i < array->map.max_entries; i++) {
 41		ptr = __alloc_percpu_gfp(array->elem_size, 8,
 42					 GFP_USER | __GFP_NOWARN);
 43		if (!ptr) {
 44			bpf_array_free_percpu(array);
 45			return -ENOMEM;
 46		}
 47		array->pptrs[i] = ptr;
 48		cond_resched();
 49	}
 50
 51	return 0;
 52}
 53
 54/* Called from syscall */
 55static int array_map_alloc_check(union bpf_attr *attr)
 56{
 57	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
 58	int numa_node = bpf_map_attr_numa_node(attr);
 59
 60	/* check sanity of attributes */
 61	if (attr->max_entries == 0 || attr->key_size != 4 ||
 62	    attr->value_size == 0 ||
 63	    attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
 
 64	    (percpu && numa_node != NUMA_NO_NODE))
 65		return -EINVAL;
 66
 67	if (attr->value_size > KMALLOC_MAX_SIZE)
 68		/* if value_size is bigger, the user space won't be able to
 69		 * access the elements.
 70		 */
 71		return -E2BIG;
 72
 73	return 0;
 74}
 75
 76static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 77{
 78	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
 79	int ret, numa_node = bpf_map_attr_numa_node(attr);
 80	u32 elem_size, index_mask, max_entries;
 81	bool unpriv = !capable(CAP_SYS_ADMIN);
 82	u64 cost, array_size, mask64;
 
 83	struct bpf_array *array;
 84
 85	elem_size = round_up(attr->value_size, 8);
 86
 87	max_entries = attr->max_entries;
 88
 89	/* On 32 bit archs roundup_pow_of_two() with max_entries that has
 90	 * upper most bit set in u32 space is undefined behavior due to
 91	 * resulting 1U << 32, so do it manually here in u64 space.
 92	 */
 93	mask64 = fls_long(max_entries - 1);
 94	mask64 = 1ULL << mask64;
 95	mask64 -= 1;
 96
 97	index_mask = mask64;
 98	if (unpriv) {
 99		/* round up array size to nearest power of 2,
100		 * since cpu will speculate within index_mask limits
101		 */
102		max_entries = index_mask + 1;
103		/* Check for overflows. */
104		if (max_entries < attr->max_entries)
105			return ERR_PTR(-E2BIG);
106	}
107
108	array_size = sizeof(*array);
109	if (percpu)
110		array_size += (u64) max_entries * sizeof(void *);
111	else
112		array_size += (u64) max_entries * elem_size;
113
114	/* make sure there is no u32 overflow later in round_up() */
115	cost = array_size;
116	if (cost >= U32_MAX - PAGE_SIZE)
117		return ERR_PTR(-ENOMEM);
118	if (percpu) {
119		cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
120		if (cost >= U32_MAX - PAGE_SIZE)
121			return ERR_PTR(-ENOMEM);
122	}
123	cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
124
125	ret = bpf_map_precharge_memlock(cost);
126	if (ret < 0)
127		return ERR_PTR(ret);
128
129	/* allocate all map elements and zero-initialize them */
130	array = bpf_map_area_alloc(array_size, numa_node);
131	if (!array)
 
132		return ERR_PTR(-ENOMEM);
 
133	array->index_mask = index_mask;
134	array->map.unpriv_array = unpriv;
135
136	/* copy mandatory map attributes */
137	bpf_map_init_from_attr(&array->map, attr);
138	array->map.pages = cost;
139	array->elem_size = elem_size;
140
141	if (percpu && bpf_array_alloc_percpu(array)) {
 
142		bpf_map_area_free(array);
143		return ERR_PTR(-ENOMEM);
144	}
145
146	return &array->map;
147}
148
149/* Called from syscall or from eBPF program */
150static void *array_map_lookup_elem(struct bpf_map *map, void *key)
151{
152	struct bpf_array *array = container_of(map, struct bpf_array, map);
153	u32 index = *(u32 *)key;
154
155	if (unlikely(index >= array->map.max_entries))
156		return NULL;
157
158	return array->value + array->elem_size * (index & array->index_mask);
159}
160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
162static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
163{
164	struct bpf_array *array = container_of(map, struct bpf_array, map);
165	struct bpf_insn *insn = insn_buf;
166	u32 elem_size = round_up(map->value_size, 8);
167	const int ret = BPF_REG_0;
168	const int map_ptr = BPF_REG_1;
169	const int index = BPF_REG_2;
170
171	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
172	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
173	if (map->unpriv_array) {
174		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
175		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
176	} else {
177		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
178	}
179
180	if (is_power_of_2(elem_size)) {
181		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
182	} else {
183		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
184	}
185	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
186	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
187	*insn++ = BPF_MOV64_IMM(ret, 0);
188	return insn - insn_buf;
189}
190
191/* Called from eBPF program */
192static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
193{
194	struct bpf_array *array = container_of(map, struct bpf_array, map);
195	u32 index = *(u32 *)key;
196
197	if (unlikely(index >= array->map.max_entries))
198		return NULL;
199
200	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
201}
202
203int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
204{
205	struct bpf_array *array = container_of(map, struct bpf_array, map);
206	u32 index = *(u32 *)key;
207	void __percpu *pptr;
208	int cpu, off = 0;
209	u32 size;
210
211	if (unlikely(index >= array->map.max_entries))
212		return -ENOENT;
213
214	/* per_cpu areas are zero-filled and bpf programs can only
215	 * access 'value_size' of them, so copying rounded areas
216	 * will not leak any kernel data
217	 */
218	size = round_up(map->value_size, 8);
219	rcu_read_lock();
220	pptr = array->pptrs[index & array->index_mask];
221	for_each_possible_cpu(cpu) {
222		bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
223		off += size;
224	}
225	rcu_read_unlock();
226	return 0;
227}
228
229/* Called from syscall */
230static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
231{
232	struct bpf_array *array = container_of(map, struct bpf_array, map);
233	u32 index = key ? *(u32 *)key : U32_MAX;
234	u32 *next = (u32 *)next_key;
235
236	if (index >= array->map.max_entries) {
237		*next = 0;
238		return 0;
239	}
240
241	if (index == array->map.max_entries - 1)
242		return -ENOENT;
243
244	*next = index + 1;
245	return 0;
246}
247
248/* Called from syscall or from eBPF program */
249static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
250				 u64 map_flags)
251{
252	struct bpf_array *array = container_of(map, struct bpf_array, map);
253	u32 index = *(u32 *)key;
 
254
255	if (unlikely(map_flags > BPF_EXIST))
256		/* unknown flags */
257		return -EINVAL;
258
259	if (unlikely(index >= array->map.max_entries))
260		/* all elements were pre-allocated, cannot insert a new one */
261		return -E2BIG;
262
263	if (unlikely(map_flags == BPF_NOEXIST))
264		/* all elements already exist */
265		return -EEXIST;
266
267	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
 
 
 
 
268		memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
269		       value, map->value_size);
270	else
271		memcpy(array->value +
272		       array->elem_size * (index & array->index_mask),
273		       value, map->value_size);
 
 
 
 
274	return 0;
275}
276
277int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
278			    u64 map_flags)
279{
280	struct bpf_array *array = container_of(map, struct bpf_array, map);
281	u32 index = *(u32 *)key;
282	void __percpu *pptr;
283	int cpu, off = 0;
284	u32 size;
285
286	if (unlikely(map_flags > BPF_EXIST))
287		/* unknown flags */
288		return -EINVAL;
289
290	if (unlikely(index >= array->map.max_entries))
291		/* all elements were pre-allocated, cannot insert a new one */
292		return -E2BIG;
293
294	if (unlikely(map_flags == BPF_NOEXIST))
295		/* all elements already exist */
296		return -EEXIST;
297
298	/* the user space will provide round_up(value_size, 8) bytes that
299	 * will be copied into per-cpu area. bpf programs can only access
300	 * value_size of it. During lookup the same extra bytes will be
301	 * returned or zeros which were zero-filled by percpu_alloc,
302	 * so no kernel data leaks possible
303	 */
304	size = round_up(map->value_size, 8);
305	rcu_read_lock();
306	pptr = array->pptrs[index & array->index_mask];
307	for_each_possible_cpu(cpu) {
308		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
309		off += size;
310	}
311	rcu_read_unlock();
312	return 0;
313}
314
315/* Called from syscall or from eBPF program */
316static int array_map_delete_elem(struct bpf_map *map, void *key)
317{
318	return -EINVAL;
319}
320
321/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
322static void array_map_free(struct bpf_map *map)
323{
324	struct bpf_array *array = container_of(map, struct bpf_array, map);
325
326	/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
327	 * so the programs (can be more than one that used this map) were
328	 * disconnected from events. Wait for outstanding programs to complete
329	 * and free the array
330	 */
331	synchronize_rcu();
332
333	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
334		bpf_array_free_percpu(array);
335
336	bpf_map_area_free(array);
337}
338
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
339const struct bpf_map_ops array_map_ops = {
340	.map_alloc_check = array_map_alloc_check,
341	.map_alloc = array_map_alloc,
342	.map_free = array_map_free,
343	.map_get_next_key = array_map_get_next_key,
344	.map_lookup_elem = array_map_lookup_elem,
345	.map_update_elem = array_map_update_elem,
346	.map_delete_elem = array_map_delete_elem,
347	.map_gen_lookup = array_map_gen_lookup,
 
 
 
 
348};
349
350const struct bpf_map_ops percpu_array_map_ops = {
351	.map_alloc_check = array_map_alloc_check,
352	.map_alloc = array_map_alloc,
353	.map_free = array_map_free,
354	.map_get_next_key = array_map_get_next_key,
355	.map_lookup_elem = percpu_array_map_lookup_elem,
356	.map_update_elem = array_map_update_elem,
357	.map_delete_elem = array_map_delete_elem,
 
 
358};
359
360static int fd_array_map_alloc_check(union bpf_attr *attr)
361{
362	/* only file descriptors can be stored in this type of map */
363	if (attr->value_size != sizeof(u32))
364		return -EINVAL;
 
 
 
365	return array_map_alloc_check(attr);
366}
367
368static void fd_array_map_free(struct bpf_map *map)
369{
370	struct bpf_array *array = container_of(map, struct bpf_array, map);
371	int i;
372
373	synchronize_rcu();
374
375	/* make sure it's empty */
376	for (i = 0; i < array->map.max_entries; i++)
377		BUG_ON(array->ptrs[i] != NULL);
378
379	bpf_map_area_free(array);
380}
381
382static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
383{
384	return NULL;
385}
386
387/* only called from syscall */
388int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
389{
390	void **elem, *ptr;
391	int ret =  0;
392
393	if (!map->ops->map_fd_sys_lookup_elem)
394		return -ENOTSUPP;
395
396	rcu_read_lock();
397	elem = array_map_lookup_elem(map, key);
398	if (elem && (ptr = READ_ONCE(*elem)))
399		*value = map->ops->map_fd_sys_lookup_elem(ptr);
400	else
401		ret = -ENOENT;
402	rcu_read_unlock();
403
404	return ret;
405}
406
407/* only called from syscall */
408int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
409				 void *key, void *value, u64 map_flags)
410{
411	struct bpf_array *array = container_of(map, struct bpf_array, map);
412	void *new_ptr, *old_ptr;
413	u32 index = *(u32 *)key, ufd;
414
415	if (map_flags != BPF_ANY)
416		return -EINVAL;
417
418	if (index >= array->map.max_entries)
419		return -E2BIG;
420
421	ufd = *(u32 *)value;
422	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
423	if (IS_ERR(new_ptr))
424		return PTR_ERR(new_ptr);
425
426	old_ptr = xchg(array->ptrs + index, new_ptr);
427	if (old_ptr)
428		map->ops->map_fd_put_ptr(old_ptr);
429
430	return 0;
431}
432
433static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
434{
435	struct bpf_array *array = container_of(map, struct bpf_array, map);
436	void *old_ptr;
437	u32 index = *(u32 *)key;
438
439	if (index >= array->map.max_entries)
440		return -E2BIG;
441
442	old_ptr = xchg(array->ptrs + index, NULL);
443	if (old_ptr) {
444		map->ops->map_fd_put_ptr(old_ptr);
445		return 0;
446	} else {
447		return -ENOENT;
448	}
449}
450
451static void *prog_fd_array_get_ptr(struct bpf_map *map,
452				   struct file *map_file, int fd)
453{
454	struct bpf_array *array = container_of(map, struct bpf_array, map);
455	struct bpf_prog *prog = bpf_prog_get(fd);
456
457	if (IS_ERR(prog))
458		return prog;
459
460	if (!bpf_prog_array_compatible(array, prog)) {
461		bpf_prog_put(prog);
462		return ERR_PTR(-EINVAL);
463	}
464
465	return prog;
466}
467
468static void prog_fd_array_put_ptr(void *ptr)
469{
470	bpf_prog_put(ptr);
471}
472
473static u32 prog_fd_array_sys_lookup_elem(void *ptr)
474{
475	return ((struct bpf_prog *)ptr)->aux->id;
476}
477
478/* decrement refcnt of all bpf_progs that are stored in this map */
479static void bpf_fd_array_map_clear(struct bpf_map *map)
480{
481	struct bpf_array *array = container_of(map, struct bpf_array, map);
482	int i;
483
484	for (i = 0; i < array->map.max_entries; i++)
485		fd_array_map_delete_elem(map, &i);
486}
487
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
488const struct bpf_map_ops prog_array_map_ops = {
489	.map_alloc_check = fd_array_map_alloc_check,
490	.map_alloc = array_map_alloc,
491	.map_free = fd_array_map_free,
492	.map_get_next_key = array_map_get_next_key,
493	.map_lookup_elem = fd_array_map_lookup_elem,
494	.map_delete_elem = fd_array_map_delete_elem,
495	.map_fd_get_ptr = prog_fd_array_get_ptr,
496	.map_fd_put_ptr = prog_fd_array_put_ptr,
497	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
498	.map_release_uref = bpf_fd_array_map_clear,
 
499};
500
501static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
502						   struct file *map_file)
503{
504	struct bpf_event_entry *ee;
505
506	ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
507	if (ee) {
508		ee->event = perf_file->private_data;
509		ee->perf_file = perf_file;
510		ee->map_file = map_file;
511	}
512
513	return ee;
514}
515
516static void __bpf_event_entry_free(struct rcu_head *rcu)
517{
518	struct bpf_event_entry *ee;
519
520	ee = container_of(rcu, struct bpf_event_entry, rcu);
521	fput(ee->perf_file);
522	kfree(ee);
523}
524
525static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
526{
527	call_rcu(&ee->rcu, __bpf_event_entry_free);
528}
529
530static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
531					 struct file *map_file, int fd)
532{
533	struct bpf_event_entry *ee;
534	struct perf_event *event;
535	struct file *perf_file;
536	u64 value;
537
538	perf_file = perf_event_get(fd);
539	if (IS_ERR(perf_file))
540		return perf_file;
541
542	ee = ERR_PTR(-EOPNOTSUPP);
543	event = perf_file->private_data;
544	if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
545		goto err_out;
546
547	ee = bpf_event_entry_gen(perf_file, map_file);
548	if (ee)
549		return ee;
550	ee = ERR_PTR(-ENOMEM);
551err_out:
552	fput(perf_file);
553	return ee;
554}
555
556static void perf_event_fd_array_put_ptr(void *ptr)
557{
558	bpf_event_entry_free_rcu(ptr);
559}
560
561static void perf_event_fd_array_release(struct bpf_map *map,
562					struct file *map_file)
563{
564	struct bpf_array *array = container_of(map, struct bpf_array, map);
565	struct bpf_event_entry *ee;
566	int i;
567
568	rcu_read_lock();
569	for (i = 0; i < array->map.max_entries; i++) {
570		ee = READ_ONCE(array->ptrs[i]);
571		if (ee && ee->map_file == map_file)
572			fd_array_map_delete_elem(map, &i);
573	}
574	rcu_read_unlock();
575}
576
577const struct bpf_map_ops perf_event_array_map_ops = {
578	.map_alloc_check = fd_array_map_alloc_check,
579	.map_alloc = array_map_alloc,
580	.map_free = fd_array_map_free,
581	.map_get_next_key = array_map_get_next_key,
582	.map_lookup_elem = fd_array_map_lookup_elem,
583	.map_delete_elem = fd_array_map_delete_elem,
584	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
585	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
586	.map_release = perf_event_fd_array_release,
 
587};
588
589#ifdef CONFIG_CGROUPS
590static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
591				     struct file *map_file /* not used */,
592				     int fd)
593{
594	return cgroup_get_from_fd(fd);
595}
596
597static void cgroup_fd_array_put_ptr(void *ptr)
598{
599	/* cgroup_put free cgrp after a rcu grace period */
600	cgroup_put(ptr);
601}
602
603static void cgroup_fd_array_free(struct bpf_map *map)
604{
605	bpf_fd_array_map_clear(map);
606	fd_array_map_free(map);
607}
608
609const struct bpf_map_ops cgroup_array_map_ops = {
610	.map_alloc_check = fd_array_map_alloc_check,
611	.map_alloc = array_map_alloc,
612	.map_free = cgroup_fd_array_free,
613	.map_get_next_key = array_map_get_next_key,
614	.map_lookup_elem = fd_array_map_lookup_elem,
615	.map_delete_elem = fd_array_map_delete_elem,
616	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
617	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
 
618};
619#endif
620
621static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
622{
623	struct bpf_map *map, *inner_map_meta;
624
625	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
626	if (IS_ERR(inner_map_meta))
627		return inner_map_meta;
628
629	map = array_map_alloc(attr);
630	if (IS_ERR(map)) {
631		bpf_map_meta_free(inner_map_meta);
632		return map;
633	}
634
635	map->inner_map_meta = inner_map_meta;
636
637	return map;
638}
639
640static void array_of_map_free(struct bpf_map *map)
641{
642	/* map->inner_map_meta is only accessed by syscall which
643	 * is protected by fdget/fdput.
644	 */
645	bpf_map_meta_free(map->inner_map_meta);
646	bpf_fd_array_map_clear(map);
647	fd_array_map_free(map);
648}
649
650static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
651{
652	struct bpf_map **inner_map = array_map_lookup_elem(map, key);
653
654	if (!inner_map)
655		return NULL;
656
657	return READ_ONCE(*inner_map);
658}
659
660static u32 array_of_map_gen_lookup(struct bpf_map *map,
661				   struct bpf_insn *insn_buf)
662{
663	struct bpf_array *array = container_of(map, struct bpf_array, map);
664	u32 elem_size = round_up(map->value_size, 8);
665	struct bpf_insn *insn = insn_buf;
666	const int ret = BPF_REG_0;
667	const int map_ptr = BPF_REG_1;
668	const int index = BPF_REG_2;
669
670	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
671	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
672	if (map->unpriv_array) {
673		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
674		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
675	} else {
676		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
677	}
678	if (is_power_of_2(elem_size))
679		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
680	else
681		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
682	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
683	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
684	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
685	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
686	*insn++ = BPF_MOV64_IMM(ret, 0);
687
688	return insn - insn_buf;
689}
690
691const struct bpf_map_ops array_of_maps_map_ops = {
692	.map_alloc_check = fd_array_map_alloc_check,
693	.map_alloc = array_of_map_alloc,
694	.map_free = array_of_map_free,
695	.map_get_next_key = array_map_get_next_key,
696	.map_lookup_elem = array_of_map_lookup_elem,
697	.map_delete_elem = fd_array_map_delete_elem,
698	.map_fd_get_ptr = bpf_map_fd_get_ptr,
699	.map_fd_put_ptr = bpf_map_fd_put_ptr,
700	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
701	.map_gen_lookup = array_of_map_gen_lookup,
 
702};
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  3 * Copyright (c) 2016,2017 Facebook
 
 
 
 
 
 
 
 
 
  4 */
  5#include <linux/bpf.h>
  6#include <linux/btf.h>
  7#include <linux/err.h>
  8#include <linux/slab.h>
  9#include <linux/mm.h>
 10#include <linux/filter.h>
 11#include <linux/perf_event.h>
 12#include <uapi/linux/btf.h>
 13
 14#include "map_in_map.h"
 15
 16#define ARRAY_CREATE_FLAG_MASK \
 17	(BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
 18
 19static void bpf_array_free_percpu(struct bpf_array *array)
 20{
 21	int i;
 22
 23	for (i = 0; i < array->map.max_entries; i++) {
 24		free_percpu(array->pptrs[i]);
 25		cond_resched();
 26	}
 27}
 28
 29static int bpf_array_alloc_percpu(struct bpf_array *array)
 30{
 31	void __percpu *ptr;
 32	int i;
 33
 34	for (i = 0; i < array->map.max_entries; i++) {
 35		ptr = __alloc_percpu_gfp(array->elem_size, 8,
 36					 GFP_USER | __GFP_NOWARN);
 37		if (!ptr) {
 38			bpf_array_free_percpu(array);
 39			return -ENOMEM;
 40		}
 41		array->pptrs[i] = ptr;
 42		cond_resched();
 43	}
 44
 45	return 0;
 46}
 47
 48/* Called from syscall */
 49int array_map_alloc_check(union bpf_attr *attr)
 50{
 51	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
 52	int numa_node = bpf_map_attr_numa_node(attr);
 53
 54	/* check sanity of attributes */
 55	if (attr->max_entries == 0 || attr->key_size != 4 ||
 56	    attr->value_size == 0 ||
 57	    attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
 58	    !bpf_map_flags_access_ok(attr->map_flags) ||
 59	    (percpu && numa_node != NUMA_NO_NODE))
 60		return -EINVAL;
 61
 62	if (attr->value_size > KMALLOC_MAX_SIZE)
 63		/* if value_size is bigger, the user space won't be able to
 64		 * access the elements.
 65		 */
 66		return -E2BIG;
 67
 68	return 0;
 69}
 70
 71static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 72{
 73	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
 74	int ret, numa_node = bpf_map_attr_numa_node(attr);
 75	u32 elem_size, index_mask, max_entries;
 76	bool unpriv = !capable(CAP_SYS_ADMIN);
 77	u64 cost, array_size, mask64;
 78	struct bpf_map_memory mem;
 79	struct bpf_array *array;
 80
 81	elem_size = round_up(attr->value_size, 8);
 82
 83	max_entries = attr->max_entries;
 84
 85	/* On 32 bit archs roundup_pow_of_two() with max_entries that has
 86	 * upper most bit set in u32 space is undefined behavior due to
 87	 * resulting 1U << 32, so do it manually here in u64 space.
 88	 */
 89	mask64 = fls_long(max_entries - 1);
 90	mask64 = 1ULL << mask64;
 91	mask64 -= 1;
 92
 93	index_mask = mask64;
 94	if (unpriv) {
 95		/* round up array size to nearest power of 2,
 96		 * since cpu will speculate within index_mask limits
 97		 */
 98		max_entries = index_mask + 1;
 99		/* Check for overflows. */
100		if (max_entries < attr->max_entries)
101			return ERR_PTR(-E2BIG);
102	}
103
104	array_size = sizeof(*array);
105	if (percpu)
106		array_size += (u64) max_entries * sizeof(void *);
107	else
108		array_size += (u64) max_entries * elem_size;
109
110	/* make sure there is no u32 overflow later in round_up() */
111	cost = array_size;
112	if (percpu)
 
 
113		cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
 
 
 
 
114
115	ret = bpf_map_charge_init(&mem, cost);
116	if (ret < 0)
117		return ERR_PTR(ret);
118
119	/* allocate all map elements and zero-initialize them */
120	array = bpf_map_area_alloc(array_size, numa_node);
121	if (!array) {
122		bpf_map_charge_finish(&mem);
123		return ERR_PTR(-ENOMEM);
124	}
125	array->index_mask = index_mask;
126	array->map.unpriv_array = unpriv;
127
128	/* copy mandatory map attributes */
129	bpf_map_init_from_attr(&array->map, attr);
130	bpf_map_charge_move(&array->map.memory, &mem);
131	array->elem_size = elem_size;
132
133	if (percpu && bpf_array_alloc_percpu(array)) {
134		bpf_map_charge_finish(&array->map.memory);
135		bpf_map_area_free(array);
136		return ERR_PTR(-ENOMEM);
137	}
138
139	return &array->map;
140}
141
142/* Called from syscall or from eBPF program */
143static void *array_map_lookup_elem(struct bpf_map *map, void *key)
144{
145	struct bpf_array *array = container_of(map, struct bpf_array, map);
146	u32 index = *(u32 *)key;
147
148	if (unlikely(index >= array->map.max_entries))
149		return NULL;
150
151	return array->value + array->elem_size * (index & array->index_mask);
152}
153
154static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
155				       u32 off)
156{
157	struct bpf_array *array = container_of(map, struct bpf_array, map);
158
159	if (map->max_entries != 1)
160		return -ENOTSUPP;
161	if (off >= map->value_size)
162		return -EINVAL;
163
164	*imm = (unsigned long)array->value;
165	return 0;
166}
167
168static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
169				       u32 *off)
170{
171	struct bpf_array *array = container_of(map, struct bpf_array, map);
172	u64 base = (unsigned long)array->value;
173	u64 range = array->elem_size;
174
175	if (map->max_entries != 1)
176		return -ENOTSUPP;
177	if (imm < base || imm >= base + range)
178		return -ENOENT;
179
180	*off = imm - base;
181	return 0;
182}
183
184/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
185static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
186{
187	struct bpf_array *array = container_of(map, struct bpf_array, map);
188	struct bpf_insn *insn = insn_buf;
189	u32 elem_size = round_up(map->value_size, 8);
190	const int ret = BPF_REG_0;
191	const int map_ptr = BPF_REG_1;
192	const int index = BPF_REG_2;
193
194	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
195	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
196	if (map->unpriv_array) {
197		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
198		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
199	} else {
200		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
201	}
202
203	if (is_power_of_2(elem_size)) {
204		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
205	} else {
206		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
207	}
208	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
209	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
210	*insn++ = BPF_MOV64_IMM(ret, 0);
211	return insn - insn_buf;
212}
213
214/* Called from eBPF program */
215static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
216{
217	struct bpf_array *array = container_of(map, struct bpf_array, map);
218	u32 index = *(u32 *)key;
219
220	if (unlikely(index >= array->map.max_entries))
221		return NULL;
222
223	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
224}
225
226int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
227{
228	struct bpf_array *array = container_of(map, struct bpf_array, map);
229	u32 index = *(u32 *)key;
230	void __percpu *pptr;
231	int cpu, off = 0;
232	u32 size;
233
234	if (unlikely(index >= array->map.max_entries))
235		return -ENOENT;
236
237	/* per_cpu areas are zero-filled and bpf programs can only
238	 * access 'value_size' of them, so copying rounded areas
239	 * will not leak any kernel data
240	 */
241	size = round_up(map->value_size, 8);
242	rcu_read_lock();
243	pptr = array->pptrs[index & array->index_mask];
244	for_each_possible_cpu(cpu) {
245		bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
246		off += size;
247	}
248	rcu_read_unlock();
249	return 0;
250}
251
252/* Called from syscall */
253static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
254{
255	struct bpf_array *array = container_of(map, struct bpf_array, map);
256	u32 index = key ? *(u32 *)key : U32_MAX;
257	u32 *next = (u32 *)next_key;
258
259	if (index >= array->map.max_entries) {
260		*next = 0;
261		return 0;
262	}
263
264	if (index == array->map.max_entries - 1)
265		return -ENOENT;
266
267	*next = index + 1;
268	return 0;
269}
270
271/* Called from syscall or from eBPF program */
272static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
273				 u64 map_flags)
274{
275	struct bpf_array *array = container_of(map, struct bpf_array, map);
276	u32 index = *(u32 *)key;
277	char *val;
278
279	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
280		/* unknown flags */
281		return -EINVAL;
282
283	if (unlikely(index >= array->map.max_entries))
284		/* all elements were pre-allocated, cannot insert a new one */
285		return -E2BIG;
286
287	if (unlikely(map_flags & BPF_NOEXIST))
288		/* all elements already exist */
289		return -EEXIST;
290
291	if (unlikely((map_flags & BPF_F_LOCK) &&
292		     !map_value_has_spin_lock(map)))
293		return -EINVAL;
294
295	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
296		memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
297		       value, map->value_size);
298	} else {
299		val = array->value +
300			array->elem_size * (index & array->index_mask);
301		if (map_flags & BPF_F_LOCK)
302			copy_map_value_locked(map, val, value, false);
303		else
304			copy_map_value(map, val, value);
305	}
306	return 0;
307}
308
309int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
310			    u64 map_flags)
311{
312	struct bpf_array *array = container_of(map, struct bpf_array, map);
313	u32 index = *(u32 *)key;
314	void __percpu *pptr;
315	int cpu, off = 0;
316	u32 size;
317
318	if (unlikely(map_flags > BPF_EXIST))
319		/* unknown flags */
320		return -EINVAL;
321
322	if (unlikely(index >= array->map.max_entries))
323		/* all elements were pre-allocated, cannot insert a new one */
324		return -E2BIG;
325
326	if (unlikely(map_flags == BPF_NOEXIST))
327		/* all elements already exist */
328		return -EEXIST;
329
330	/* the user space will provide round_up(value_size, 8) bytes that
331	 * will be copied into per-cpu area. bpf programs can only access
332	 * value_size of it. During lookup the same extra bytes will be
333	 * returned or zeros which were zero-filled by percpu_alloc,
334	 * so no kernel data leaks possible
335	 */
336	size = round_up(map->value_size, 8);
337	rcu_read_lock();
338	pptr = array->pptrs[index & array->index_mask];
339	for_each_possible_cpu(cpu) {
340		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
341		off += size;
342	}
343	rcu_read_unlock();
344	return 0;
345}
346
347/* Called from syscall or from eBPF program */
348static int array_map_delete_elem(struct bpf_map *map, void *key)
349{
350	return -EINVAL;
351}
352
353/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
354static void array_map_free(struct bpf_map *map)
355{
356	struct bpf_array *array = container_of(map, struct bpf_array, map);
357
358	/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
359	 * so the programs (can be more than one that used this map) were
360	 * disconnected from events. Wait for outstanding programs to complete
361	 * and free the array
362	 */
363	synchronize_rcu();
364
365	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
366		bpf_array_free_percpu(array);
367
368	bpf_map_area_free(array);
369}
370
371static void array_map_seq_show_elem(struct bpf_map *map, void *key,
372				    struct seq_file *m)
373{
374	void *value;
375
376	rcu_read_lock();
377
378	value = array_map_lookup_elem(map, key);
379	if (!value) {
380		rcu_read_unlock();
381		return;
382	}
383
384	if (map->btf_key_type_id)
385		seq_printf(m, "%u: ", *(u32 *)key);
386	btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
387	seq_puts(m, "\n");
388
389	rcu_read_unlock();
390}
391
392static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
393					   struct seq_file *m)
394{
395	struct bpf_array *array = container_of(map, struct bpf_array, map);
396	u32 index = *(u32 *)key;
397	void __percpu *pptr;
398	int cpu;
399
400	rcu_read_lock();
401
402	seq_printf(m, "%u: {\n", *(u32 *)key);
403	pptr = array->pptrs[index & array->index_mask];
404	for_each_possible_cpu(cpu) {
405		seq_printf(m, "\tcpu%d: ", cpu);
406		btf_type_seq_show(map->btf, map->btf_value_type_id,
407				  per_cpu_ptr(pptr, cpu), m);
408		seq_puts(m, "\n");
409	}
410	seq_puts(m, "}\n");
411
412	rcu_read_unlock();
413}
414
415static int array_map_check_btf(const struct bpf_map *map,
416			       const struct btf *btf,
417			       const struct btf_type *key_type,
418			       const struct btf_type *value_type)
419{
420	u32 int_data;
421
422	/* One exception for keyless BTF: .bss/.data/.rodata map */
423	if (btf_type_is_void(key_type)) {
424		if (map->map_type != BPF_MAP_TYPE_ARRAY ||
425		    map->max_entries != 1)
426			return -EINVAL;
427
428		if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
429			return -EINVAL;
430
431		return 0;
432	}
433
434	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
435		return -EINVAL;
436
437	int_data = *(u32 *)(key_type + 1);
438	/* bpf array can only take a u32 key. This check makes sure
439	 * that the btf matches the attr used during map_create.
440	 */
441	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
442		return -EINVAL;
443
444	return 0;
445}
446
447const struct bpf_map_ops array_map_ops = {
448	.map_alloc_check = array_map_alloc_check,
449	.map_alloc = array_map_alloc,
450	.map_free = array_map_free,
451	.map_get_next_key = array_map_get_next_key,
452	.map_lookup_elem = array_map_lookup_elem,
453	.map_update_elem = array_map_update_elem,
454	.map_delete_elem = array_map_delete_elem,
455	.map_gen_lookup = array_map_gen_lookup,
456	.map_direct_value_addr = array_map_direct_value_addr,
457	.map_direct_value_meta = array_map_direct_value_meta,
458	.map_seq_show_elem = array_map_seq_show_elem,
459	.map_check_btf = array_map_check_btf,
460};
461
462const struct bpf_map_ops percpu_array_map_ops = {
463	.map_alloc_check = array_map_alloc_check,
464	.map_alloc = array_map_alloc,
465	.map_free = array_map_free,
466	.map_get_next_key = array_map_get_next_key,
467	.map_lookup_elem = percpu_array_map_lookup_elem,
468	.map_update_elem = array_map_update_elem,
469	.map_delete_elem = array_map_delete_elem,
470	.map_seq_show_elem = percpu_array_map_seq_show_elem,
471	.map_check_btf = array_map_check_btf,
472};
473
474static int fd_array_map_alloc_check(union bpf_attr *attr)
475{
476	/* only file descriptors can be stored in this type of map */
477	if (attr->value_size != sizeof(u32))
478		return -EINVAL;
479	/* Program read-only/write-only not supported for special maps yet. */
480	if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
481		return -EINVAL;
482	return array_map_alloc_check(attr);
483}
484
485static void fd_array_map_free(struct bpf_map *map)
486{
487	struct bpf_array *array = container_of(map, struct bpf_array, map);
488	int i;
489
490	synchronize_rcu();
491
492	/* make sure it's empty */
493	for (i = 0; i < array->map.max_entries; i++)
494		BUG_ON(array->ptrs[i] != NULL);
495
496	bpf_map_area_free(array);
497}
498
499static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
500{
501	return ERR_PTR(-EOPNOTSUPP);
502}
503
504/* only called from syscall */
505int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
506{
507	void **elem, *ptr;
508	int ret =  0;
509
510	if (!map->ops->map_fd_sys_lookup_elem)
511		return -ENOTSUPP;
512
513	rcu_read_lock();
514	elem = array_map_lookup_elem(map, key);
515	if (elem && (ptr = READ_ONCE(*elem)))
516		*value = map->ops->map_fd_sys_lookup_elem(ptr);
517	else
518		ret = -ENOENT;
519	rcu_read_unlock();
520
521	return ret;
522}
523
524/* only called from syscall */
525int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
526				 void *key, void *value, u64 map_flags)
527{
528	struct bpf_array *array = container_of(map, struct bpf_array, map);
529	void *new_ptr, *old_ptr;
530	u32 index = *(u32 *)key, ufd;
531
532	if (map_flags != BPF_ANY)
533		return -EINVAL;
534
535	if (index >= array->map.max_entries)
536		return -E2BIG;
537
538	ufd = *(u32 *)value;
539	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
540	if (IS_ERR(new_ptr))
541		return PTR_ERR(new_ptr);
542
543	old_ptr = xchg(array->ptrs + index, new_ptr);
544	if (old_ptr)
545		map->ops->map_fd_put_ptr(old_ptr);
546
547	return 0;
548}
549
550static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
551{
552	struct bpf_array *array = container_of(map, struct bpf_array, map);
553	void *old_ptr;
554	u32 index = *(u32 *)key;
555
556	if (index >= array->map.max_entries)
557		return -E2BIG;
558
559	old_ptr = xchg(array->ptrs + index, NULL);
560	if (old_ptr) {
561		map->ops->map_fd_put_ptr(old_ptr);
562		return 0;
563	} else {
564		return -ENOENT;
565	}
566}
567
568static void *prog_fd_array_get_ptr(struct bpf_map *map,
569				   struct file *map_file, int fd)
570{
571	struct bpf_array *array = container_of(map, struct bpf_array, map);
572	struct bpf_prog *prog = bpf_prog_get(fd);
573
574	if (IS_ERR(prog))
575		return prog;
576
577	if (!bpf_prog_array_compatible(array, prog)) {
578		bpf_prog_put(prog);
579		return ERR_PTR(-EINVAL);
580	}
581
582	return prog;
583}
584
585static void prog_fd_array_put_ptr(void *ptr)
586{
587	bpf_prog_put(ptr);
588}
589
590static u32 prog_fd_array_sys_lookup_elem(void *ptr)
591{
592	return ((struct bpf_prog *)ptr)->aux->id;
593}
594
595/* decrement refcnt of all bpf_progs that are stored in this map */
596static void bpf_fd_array_map_clear(struct bpf_map *map)
597{
598	struct bpf_array *array = container_of(map, struct bpf_array, map);
599	int i;
600
601	for (i = 0; i < array->map.max_entries; i++)
602		fd_array_map_delete_elem(map, &i);
603}
604
605static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
606					 struct seq_file *m)
607{
608	void **elem, *ptr;
609	u32 prog_id;
610
611	rcu_read_lock();
612
613	elem = array_map_lookup_elem(map, key);
614	if (elem) {
615		ptr = READ_ONCE(*elem);
616		if (ptr) {
617			seq_printf(m, "%u: ", *(u32 *)key);
618			prog_id = prog_fd_array_sys_lookup_elem(ptr);
619			btf_type_seq_show(map->btf, map->btf_value_type_id,
620					  &prog_id, m);
621			seq_puts(m, "\n");
622		}
623	}
624
625	rcu_read_unlock();
626}
627
628const struct bpf_map_ops prog_array_map_ops = {
629	.map_alloc_check = fd_array_map_alloc_check,
630	.map_alloc = array_map_alloc,
631	.map_free = fd_array_map_free,
632	.map_get_next_key = array_map_get_next_key,
633	.map_lookup_elem = fd_array_map_lookup_elem,
634	.map_delete_elem = fd_array_map_delete_elem,
635	.map_fd_get_ptr = prog_fd_array_get_ptr,
636	.map_fd_put_ptr = prog_fd_array_put_ptr,
637	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
638	.map_release_uref = bpf_fd_array_map_clear,
639	.map_seq_show_elem = prog_array_map_seq_show_elem,
640};
641
642static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
643						   struct file *map_file)
644{
645	struct bpf_event_entry *ee;
646
647	ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
648	if (ee) {
649		ee->event = perf_file->private_data;
650		ee->perf_file = perf_file;
651		ee->map_file = map_file;
652	}
653
654	return ee;
655}
656
657static void __bpf_event_entry_free(struct rcu_head *rcu)
658{
659	struct bpf_event_entry *ee;
660
661	ee = container_of(rcu, struct bpf_event_entry, rcu);
662	fput(ee->perf_file);
663	kfree(ee);
664}
665
666static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
667{
668	call_rcu(&ee->rcu, __bpf_event_entry_free);
669}
670
671static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
672					 struct file *map_file, int fd)
673{
674	struct bpf_event_entry *ee;
675	struct perf_event *event;
676	struct file *perf_file;
677	u64 value;
678
679	perf_file = perf_event_get(fd);
680	if (IS_ERR(perf_file))
681		return perf_file;
682
683	ee = ERR_PTR(-EOPNOTSUPP);
684	event = perf_file->private_data;
685	if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
686		goto err_out;
687
688	ee = bpf_event_entry_gen(perf_file, map_file);
689	if (ee)
690		return ee;
691	ee = ERR_PTR(-ENOMEM);
692err_out:
693	fput(perf_file);
694	return ee;
695}
696
697static void perf_event_fd_array_put_ptr(void *ptr)
698{
699	bpf_event_entry_free_rcu(ptr);
700}
701
702static void perf_event_fd_array_release(struct bpf_map *map,
703					struct file *map_file)
704{
705	struct bpf_array *array = container_of(map, struct bpf_array, map);
706	struct bpf_event_entry *ee;
707	int i;
708
709	rcu_read_lock();
710	for (i = 0; i < array->map.max_entries; i++) {
711		ee = READ_ONCE(array->ptrs[i]);
712		if (ee && ee->map_file == map_file)
713			fd_array_map_delete_elem(map, &i);
714	}
715	rcu_read_unlock();
716}
717
718const struct bpf_map_ops perf_event_array_map_ops = {
719	.map_alloc_check = fd_array_map_alloc_check,
720	.map_alloc = array_map_alloc,
721	.map_free = fd_array_map_free,
722	.map_get_next_key = array_map_get_next_key,
723	.map_lookup_elem = fd_array_map_lookup_elem,
724	.map_delete_elem = fd_array_map_delete_elem,
725	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
726	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
727	.map_release = perf_event_fd_array_release,
728	.map_check_btf = map_check_no_btf,
729};
730
731#ifdef CONFIG_CGROUPS
732static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
733				     struct file *map_file /* not used */,
734				     int fd)
735{
736	return cgroup_get_from_fd(fd);
737}
738
739static void cgroup_fd_array_put_ptr(void *ptr)
740{
741	/* cgroup_put free cgrp after a rcu grace period */
742	cgroup_put(ptr);
743}
744
745static void cgroup_fd_array_free(struct bpf_map *map)
746{
747	bpf_fd_array_map_clear(map);
748	fd_array_map_free(map);
749}
750
751const struct bpf_map_ops cgroup_array_map_ops = {
752	.map_alloc_check = fd_array_map_alloc_check,
753	.map_alloc = array_map_alloc,
754	.map_free = cgroup_fd_array_free,
755	.map_get_next_key = array_map_get_next_key,
756	.map_lookup_elem = fd_array_map_lookup_elem,
757	.map_delete_elem = fd_array_map_delete_elem,
758	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
759	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
760	.map_check_btf = map_check_no_btf,
761};
762#endif
763
764static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
765{
766	struct bpf_map *map, *inner_map_meta;
767
768	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
769	if (IS_ERR(inner_map_meta))
770		return inner_map_meta;
771
772	map = array_map_alloc(attr);
773	if (IS_ERR(map)) {
774		bpf_map_meta_free(inner_map_meta);
775		return map;
776	}
777
778	map->inner_map_meta = inner_map_meta;
779
780	return map;
781}
782
783static void array_of_map_free(struct bpf_map *map)
784{
785	/* map->inner_map_meta is only accessed by syscall which
786	 * is protected by fdget/fdput.
787	 */
788	bpf_map_meta_free(map->inner_map_meta);
789	bpf_fd_array_map_clear(map);
790	fd_array_map_free(map);
791}
792
793static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
794{
795	struct bpf_map **inner_map = array_map_lookup_elem(map, key);
796
797	if (!inner_map)
798		return NULL;
799
800	return READ_ONCE(*inner_map);
801}
802
803static u32 array_of_map_gen_lookup(struct bpf_map *map,
804				   struct bpf_insn *insn_buf)
805{
806	struct bpf_array *array = container_of(map, struct bpf_array, map);
807	u32 elem_size = round_up(map->value_size, 8);
808	struct bpf_insn *insn = insn_buf;
809	const int ret = BPF_REG_0;
810	const int map_ptr = BPF_REG_1;
811	const int index = BPF_REG_2;
812
813	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
814	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
815	if (map->unpriv_array) {
816		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
817		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
818	} else {
819		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
820	}
821	if (is_power_of_2(elem_size))
822		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
823	else
824		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
825	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
826	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
827	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
828	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
829	*insn++ = BPF_MOV64_IMM(ret, 0);
830
831	return insn - insn_buf;
832}
833
834const struct bpf_map_ops array_of_maps_map_ops = {
835	.map_alloc_check = fd_array_map_alloc_check,
836	.map_alloc = array_of_map_alloc,
837	.map_free = array_of_map_free,
838	.map_get_next_key = array_map_get_next_key,
839	.map_lookup_elem = array_of_map_lookup_elem,
840	.map_delete_elem = fd_array_map_delete_elem,
841	.map_fd_get_ptr = bpf_map_fd_get_ptr,
842	.map_fd_put_ptr = bpf_map_fd_put_ptr,
843	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
844	.map_gen_lookup = array_of_map_gen_lookup,
845	.map_check_btf = map_check_no_btf,
846};