Linux Audio

Check our new training course

Loading...
v4.6
  1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
 
  2 *
  3 * This program is free software; you can redistribute it and/or
  4 * modify it under the terms of version 2 of the GNU General Public
  5 * License as published by the Free Software Foundation.
  6 *
  7 * This program is distributed in the hope that it will be useful, but
  8 * WITHOUT ANY WARRANTY; without even the implied warranty of
  9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 10 * General Public License for more details.
 11 */
 12#include <linux/bpf.h>
 13#include <linux/err.h>
 14#include <linux/vmalloc.h>
 15#include <linux/slab.h>
 16#include <linux/mm.h>
 17#include <linux/filter.h>
 18#include <linux/perf_event.h>
 19
 
 
 
 
 
 20static void bpf_array_free_percpu(struct bpf_array *array)
 21{
 22	int i;
 23
 24	for (i = 0; i < array->map.max_entries; i++)
 25		free_percpu(array->pptrs[i]);
 
 
 26}
 27
 28static int bpf_array_alloc_percpu(struct bpf_array *array)
 29{
 30	void __percpu *ptr;
 31	int i;
 32
 33	for (i = 0; i < array->map.max_entries; i++) {
 34		ptr = __alloc_percpu_gfp(array->elem_size, 8,
 35					 GFP_USER | __GFP_NOWARN);
 36		if (!ptr) {
 37			bpf_array_free_percpu(array);
 38			return -ENOMEM;
 39		}
 40		array->pptrs[i] = ptr;
 
 41	}
 42
 43	return 0;
 44}
 45
 46/* Called from syscall */
 47static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 48{
 49	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
 50	struct bpf_array *array;
 51	u64 array_size;
 52	u32 elem_size;
 53
 54	/* check sanity of attributes */
 55	if (attr->max_entries == 0 || attr->key_size != 4 ||
 56	    attr->value_size == 0 || attr->map_flags)
 57		return ERR_PTR(-EINVAL);
 
 
 58
 59	if (attr->value_size >= 1 << (KMALLOC_SHIFT_MAX - 1))
 60		/* if value_size is bigger, the user space won't be able to
 61		 * access the elements.
 62		 */
 63		return ERR_PTR(-E2BIG);
 
 
 
 
 
 
 
 
 
 
 
 
 64
 65	elem_size = round_up(attr->value_size, 8);
 66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 67	array_size = sizeof(*array);
 68	if (percpu)
 69		array_size += (u64) attr->max_entries * sizeof(void *);
 70	else
 71		array_size += (u64) attr->max_entries * elem_size;
 72
 73	/* make sure there is no u32 overflow later in round_up() */
 74	if (array_size >= U32_MAX - PAGE_SIZE)
 
 75		return ERR_PTR(-ENOMEM);
 
 
 
 
 
 
 76
 
 
 
 77
 78	/* allocate all map elements and zero-initialize them */
 79	array = kzalloc(array_size, GFP_USER | __GFP_NOWARN);
 80	if (!array) {
 81		array = vzalloc(array_size);
 82		if (!array)
 83			return ERR_PTR(-ENOMEM);
 84	}
 85
 86	/* copy mandatory map attributes */
 87	array->map.map_type = attr->map_type;
 88	array->map.key_size = attr->key_size;
 89	array->map.value_size = attr->value_size;
 90	array->map.max_entries = attr->max_entries;
 91	array->elem_size = elem_size;
 92
 93	if (!percpu)
 94		goto out;
 95
 96	array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
 97
 98	if (array_size >= U32_MAX - PAGE_SIZE ||
 99	    elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
100		kvfree(array);
101		return ERR_PTR(-ENOMEM);
102	}
103out:
104	array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
105
106	return &array->map;
107}
108
109/* Called from syscall or from eBPF program */
110static void *array_map_lookup_elem(struct bpf_map *map, void *key)
111{
112	struct bpf_array *array = container_of(map, struct bpf_array, map);
113	u32 index = *(u32 *)key;
114
115	if (unlikely(index >= array->map.max_entries))
116		return NULL;
117
118	return array->value + array->elem_size * index;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119}
120
121/* Called from eBPF program */
122static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
123{
124	struct bpf_array *array = container_of(map, struct bpf_array, map);
125	u32 index = *(u32 *)key;
126
127	if (unlikely(index >= array->map.max_entries))
128		return NULL;
129
130	return this_cpu_ptr(array->pptrs[index]);
131}
132
133int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
134{
135	struct bpf_array *array = container_of(map, struct bpf_array, map);
136	u32 index = *(u32 *)key;
137	void __percpu *pptr;
138	int cpu, off = 0;
139	u32 size;
140
141	if (unlikely(index >= array->map.max_entries))
142		return -ENOENT;
143
144	/* per_cpu areas are zero-filled and bpf programs can only
145	 * access 'value_size' of them, so copying rounded areas
146	 * will not leak any kernel data
147	 */
148	size = round_up(map->value_size, 8);
149	rcu_read_lock();
150	pptr = array->pptrs[index];
151	for_each_possible_cpu(cpu) {
152		bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
153		off += size;
154	}
155	rcu_read_unlock();
156	return 0;
157}
158
159/* Called from syscall */
160static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
161{
162	struct bpf_array *array = container_of(map, struct bpf_array, map);
163	u32 index = *(u32 *)key;
164	u32 *next = (u32 *)next_key;
165
166	if (index >= array->map.max_entries) {
167		*next = 0;
168		return 0;
169	}
170
171	if (index == array->map.max_entries - 1)
172		return -ENOENT;
173
174	*next = index + 1;
175	return 0;
176}
177
178/* Called from syscall or from eBPF program */
179static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
180				 u64 map_flags)
181{
182	struct bpf_array *array = container_of(map, struct bpf_array, map);
183	u32 index = *(u32 *)key;
184
185	if (unlikely(map_flags > BPF_EXIST))
186		/* unknown flags */
187		return -EINVAL;
188
189	if (unlikely(index >= array->map.max_entries))
190		/* all elements were pre-allocated, cannot insert a new one */
191		return -E2BIG;
192
193	if (unlikely(map_flags == BPF_NOEXIST))
194		/* all elements already exist */
195		return -EEXIST;
196
197	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
198		memcpy(this_cpu_ptr(array->pptrs[index]),
199		       value, map->value_size);
200	else
201		memcpy(array->value + array->elem_size * index,
 
202		       value, map->value_size);
203	return 0;
204}
205
206int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
207			    u64 map_flags)
208{
209	struct bpf_array *array = container_of(map, struct bpf_array, map);
210	u32 index = *(u32 *)key;
211	void __percpu *pptr;
212	int cpu, off = 0;
213	u32 size;
214
215	if (unlikely(map_flags > BPF_EXIST))
216		/* unknown flags */
217		return -EINVAL;
218
219	if (unlikely(index >= array->map.max_entries))
220		/* all elements were pre-allocated, cannot insert a new one */
221		return -E2BIG;
222
223	if (unlikely(map_flags == BPF_NOEXIST))
224		/* all elements already exist */
225		return -EEXIST;
226
227	/* the user space will provide round_up(value_size, 8) bytes that
228	 * will be copied into per-cpu area. bpf programs can only access
229	 * value_size of it. During lookup the same extra bytes will be
230	 * returned or zeros which were zero-filled by percpu_alloc,
231	 * so no kernel data leaks possible
232	 */
233	size = round_up(map->value_size, 8);
234	rcu_read_lock();
235	pptr = array->pptrs[index];
236	for_each_possible_cpu(cpu) {
237		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
238		off += size;
239	}
240	rcu_read_unlock();
241	return 0;
242}
243
244/* Called from syscall or from eBPF program */
245static int array_map_delete_elem(struct bpf_map *map, void *key)
246{
247	return -EINVAL;
248}
249
250/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
251static void array_map_free(struct bpf_map *map)
252{
253	struct bpf_array *array = container_of(map, struct bpf_array, map);
254
255	/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
256	 * so the programs (can be more than one that used this map) were
257	 * disconnected from events. Wait for outstanding programs to complete
258	 * and free the array
259	 */
260	synchronize_rcu();
261
262	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
263		bpf_array_free_percpu(array);
264
265	kvfree(array);
266}
267
268static const struct bpf_map_ops array_ops = {
 
269	.map_alloc = array_map_alloc,
270	.map_free = array_map_free,
271	.map_get_next_key = array_map_get_next_key,
272	.map_lookup_elem = array_map_lookup_elem,
273	.map_update_elem = array_map_update_elem,
274	.map_delete_elem = array_map_delete_elem,
 
275};
276
277static struct bpf_map_type_list array_type __read_mostly = {
278	.ops = &array_ops,
279	.type = BPF_MAP_TYPE_ARRAY,
280};
281
282static const struct bpf_map_ops percpu_array_ops = {
283	.map_alloc = array_map_alloc,
284	.map_free = array_map_free,
285	.map_get_next_key = array_map_get_next_key,
286	.map_lookup_elem = percpu_array_map_lookup_elem,
287	.map_update_elem = array_map_update_elem,
288	.map_delete_elem = array_map_delete_elem,
289};
290
291static struct bpf_map_type_list percpu_array_type __read_mostly = {
292	.ops = &percpu_array_ops,
293	.type = BPF_MAP_TYPE_PERCPU_ARRAY,
294};
295
296static int __init register_array_map(void)
297{
298	bpf_register_map_type(&array_type);
299	bpf_register_map_type(&percpu_array_type);
300	return 0;
301}
302late_initcall(register_array_map);
303
304static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
305{
306	/* only file descriptors can be stored in this type of map */
307	if (attr->value_size != sizeof(u32))
308		return ERR_PTR(-EINVAL);
309	return array_map_alloc(attr);
310}
311
312static void fd_array_map_free(struct bpf_map *map)
313{
314	struct bpf_array *array = container_of(map, struct bpf_array, map);
315	int i;
316
317	synchronize_rcu();
318
319	/* make sure it's empty */
320	for (i = 0; i < array->map.max_entries; i++)
321		BUG_ON(array->ptrs[i] != NULL);
322	kvfree(array);
 
323}
324
325static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
326{
327	return NULL;
328}
329
330/* only called from syscall */
331static int fd_array_map_update_elem(struct bpf_map *map, void *key,
332				    void *value, u64 map_flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333{
334	struct bpf_array *array = container_of(map, struct bpf_array, map);
335	void *new_ptr, *old_ptr;
336	u32 index = *(u32 *)key, ufd;
337
338	if (map_flags != BPF_ANY)
339		return -EINVAL;
340
341	if (index >= array->map.max_entries)
342		return -E2BIG;
343
344	ufd = *(u32 *)value;
345	new_ptr = map->ops->map_fd_get_ptr(map, ufd);
346	if (IS_ERR(new_ptr))
347		return PTR_ERR(new_ptr);
348
349	old_ptr = xchg(array->ptrs + index, new_ptr);
350	if (old_ptr)
351		map->ops->map_fd_put_ptr(old_ptr);
352
353	return 0;
354}
355
356static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
357{
358	struct bpf_array *array = container_of(map, struct bpf_array, map);
359	void *old_ptr;
360	u32 index = *(u32 *)key;
361
362	if (index >= array->map.max_entries)
363		return -E2BIG;
364
365	old_ptr = xchg(array->ptrs + index, NULL);
366	if (old_ptr) {
367		map->ops->map_fd_put_ptr(old_ptr);
368		return 0;
369	} else {
370		return -ENOENT;
371	}
372}
373
374static void *prog_fd_array_get_ptr(struct bpf_map *map, int fd)
 
375{
376	struct bpf_array *array = container_of(map, struct bpf_array, map);
377	struct bpf_prog *prog = bpf_prog_get(fd);
 
378	if (IS_ERR(prog))
379		return prog;
380
381	if (!bpf_prog_array_compatible(array, prog)) {
382		bpf_prog_put(prog);
383		return ERR_PTR(-EINVAL);
384	}
 
385	return prog;
386}
387
388static void prog_fd_array_put_ptr(void *ptr)
389{
390	struct bpf_prog *prog = ptr;
 
391
392	bpf_prog_put_rcu(prog);
 
 
393}
394
395/* decrement refcnt of all bpf_progs that are stored in this map */
396void bpf_fd_array_map_clear(struct bpf_map *map)
397{
398	struct bpf_array *array = container_of(map, struct bpf_array, map);
399	int i;
400
401	for (i = 0; i < array->map.max_entries; i++)
402		fd_array_map_delete_elem(map, &i);
403}
404
405static const struct bpf_map_ops prog_array_ops = {
406	.map_alloc = fd_array_map_alloc,
 
407	.map_free = fd_array_map_free,
408	.map_get_next_key = array_map_get_next_key,
409	.map_lookup_elem = fd_array_map_lookup_elem,
410	.map_update_elem = fd_array_map_update_elem,
411	.map_delete_elem = fd_array_map_delete_elem,
412	.map_fd_get_ptr = prog_fd_array_get_ptr,
413	.map_fd_put_ptr = prog_fd_array_put_ptr,
 
 
414};
415
416static struct bpf_map_type_list prog_array_type __read_mostly = {
417	.ops = &prog_array_ops,
418	.type = BPF_MAP_TYPE_PROG_ARRAY,
419};
420
421static int __init register_prog_array_map(void)
 
 
 
 
 
 
 
 
 
 
422{
423	bpf_register_map_type(&prog_array_type);
424	return 0;
 
 
 
425}
426late_initcall(register_prog_array_map);
427
428static void perf_event_array_map_free(struct bpf_map *map)
429{
430	bpf_fd_array_map_clear(map);
431	fd_array_map_free(map);
432}
433
434static void *perf_event_fd_array_get_ptr(struct bpf_map *map, int fd)
 
435{
 
436	struct perf_event *event;
437	const struct perf_event_attr *attr;
438	struct file *file;
439
440	file = perf_event_get(fd);
441	if (IS_ERR(file))
442		return file;
443
444	event = file->private_data;
445
446	attr = perf_event_attrs(event);
447	if (IS_ERR(attr))
448		goto err;
449
450	if (attr->inherit)
451		goto err;
452
453	if (attr->type == PERF_TYPE_RAW)
454		return file;
455
456	if (attr->type == PERF_TYPE_HARDWARE)
457		return file;
458
459	if (attr->type == PERF_TYPE_SOFTWARE &&
460	    attr->config == PERF_COUNT_SW_BPF_OUTPUT)
461		return file;
462err:
463	fput(file);
464	return ERR_PTR(-EINVAL);
465}
466
467static void perf_event_fd_array_put_ptr(void *ptr)
468{
469	fput((struct file *)ptr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
470}
471
472static const struct bpf_map_ops perf_event_array_ops = {
473	.map_alloc = fd_array_map_alloc,
474	.map_free = perf_event_array_map_free,
 
475	.map_get_next_key = array_map_get_next_key,
476	.map_lookup_elem = fd_array_map_lookup_elem,
477	.map_update_elem = fd_array_map_update_elem,
478	.map_delete_elem = fd_array_map_delete_elem,
479	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
480	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
 
481};
482
483static struct bpf_map_type_list perf_event_array_type __read_mostly = {
484	.ops = &perf_event_array_ops,
485	.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
486};
 
487
488static int __init register_perf_event_array_map(void)
489{
490	bpf_register_map_type(&perf_event_array_type);
491	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
492}
493late_initcall(register_perf_event_array_map);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v4.17
  1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2 * Copyright (c) 2016,2017 Facebook
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of version 2 of the GNU General Public
  6 * License as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful, but
  9 * WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 11 * General Public License for more details.
 12 */
 13#include <linux/bpf.h>
 14#include <linux/err.h>
 
 15#include <linux/slab.h>
 16#include <linux/mm.h>
 17#include <linux/filter.h>
 18#include <linux/perf_event.h>
 19
 20#include "map_in_map.h"
 21
 22#define ARRAY_CREATE_FLAG_MASK \
 23	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
 24
 25static void bpf_array_free_percpu(struct bpf_array *array)
 26{
 27	int i;
 28
 29	for (i = 0; i < array->map.max_entries; i++) {
 30		free_percpu(array->pptrs[i]);
 31		cond_resched();
 32	}
 33}
 34
 35static int bpf_array_alloc_percpu(struct bpf_array *array)
 36{
 37	void __percpu *ptr;
 38	int i;
 39
 40	for (i = 0; i < array->map.max_entries; i++) {
 41		ptr = __alloc_percpu_gfp(array->elem_size, 8,
 42					 GFP_USER | __GFP_NOWARN);
 43		if (!ptr) {
 44			bpf_array_free_percpu(array);
 45			return -ENOMEM;
 46		}
 47		array->pptrs[i] = ptr;
 48		cond_resched();
 49	}
 50
 51	return 0;
 52}
 53
 54/* Called from syscall */
 55static int array_map_alloc_check(union bpf_attr *attr)
 56{
 57	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
 58	int numa_node = bpf_map_attr_numa_node(attr);
 
 
 59
 60	/* check sanity of attributes */
 61	if (attr->max_entries == 0 || attr->key_size != 4 ||
 62	    attr->value_size == 0 ||
 63	    attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
 64	    (percpu && numa_node != NUMA_NO_NODE))
 65		return -EINVAL;
 66
 67	if (attr->value_size > KMALLOC_MAX_SIZE)
 68		/* if value_size is bigger, the user space won't be able to
 69		 * access the elements.
 70		 */
 71		return -E2BIG;
 72
 73	return 0;
 74}
 75
 76static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 77{
 78	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
 79	int ret, numa_node = bpf_map_attr_numa_node(attr);
 80	u32 elem_size, index_mask, max_entries;
 81	bool unpriv = !capable(CAP_SYS_ADMIN);
 82	u64 cost, array_size, mask64;
 83	struct bpf_array *array;
 84
 85	elem_size = round_up(attr->value_size, 8);
 86
 87	max_entries = attr->max_entries;
 88
 89	/* On 32 bit archs roundup_pow_of_two() with max_entries that has
 90	 * upper most bit set in u32 space is undefined behavior due to
 91	 * resulting 1U << 32, so do it manually here in u64 space.
 92	 */
 93	mask64 = fls_long(max_entries - 1);
 94	mask64 = 1ULL << mask64;
 95	mask64 -= 1;
 96
 97	index_mask = mask64;
 98	if (unpriv) {
 99		/* round up array size to nearest power of 2,
100		 * since cpu will speculate within index_mask limits
101		 */
102		max_entries = index_mask + 1;
103		/* Check for overflows. */
104		if (max_entries < attr->max_entries)
105			return ERR_PTR(-E2BIG);
106	}
107
108	array_size = sizeof(*array);
109	if (percpu)
110		array_size += (u64) max_entries * sizeof(void *);
111	else
112		array_size += (u64) max_entries * elem_size;
113
114	/* make sure there is no u32 overflow later in round_up() */
115	cost = array_size;
116	if (cost >= U32_MAX - PAGE_SIZE)
117		return ERR_PTR(-ENOMEM);
118	if (percpu) {
119		cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
120		if (cost >= U32_MAX - PAGE_SIZE)
121			return ERR_PTR(-ENOMEM);
122	}
123	cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
124
125	ret = bpf_map_precharge_memlock(cost);
126	if (ret < 0)
127		return ERR_PTR(ret);
128
129	/* allocate all map elements and zero-initialize them */
130	array = bpf_map_area_alloc(array_size, numa_node);
131	if (!array)
132		return ERR_PTR(-ENOMEM);
133	array->index_mask = index_mask;
134	array->map.unpriv_array = unpriv;
 
135
136	/* copy mandatory map attributes */
137	bpf_map_init_from_attr(&array->map, attr);
138	array->map.pages = cost;
 
 
139	array->elem_size = elem_size;
140
141	if (percpu && bpf_array_alloc_percpu(array)) {
142		bpf_map_area_free(array);
 
 
 
 
 
 
143		return ERR_PTR(-ENOMEM);
144	}
 
 
145
146	return &array->map;
147}
148
149/* Called from syscall or from eBPF program */
150static void *array_map_lookup_elem(struct bpf_map *map, void *key)
151{
152	struct bpf_array *array = container_of(map, struct bpf_array, map);
153	u32 index = *(u32 *)key;
154
155	if (unlikely(index >= array->map.max_entries))
156		return NULL;
157
158	return array->value + array->elem_size * (index & array->index_mask);
159}
160
161/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
162static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
163{
164	struct bpf_array *array = container_of(map, struct bpf_array, map);
165	struct bpf_insn *insn = insn_buf;
166	u32 elem_size = round_up(map->value_size, 8);
167	const int ret = BPF_REG_0;
168	const int map_ptr = BPF_REG_1;
169	const int index = BPF_REG_2;
170
171	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
172	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
173	if (map->unpriv_array) {
174		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
175		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
176	} else {
177		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
178	}
179
180	if (is_power_of_2(elem_size)) {
181		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
182	} else {
183		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
184	}
185	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
186	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
187	*insn++ = BPF_MOV64_IMM(ret, 0);
188	return insn - insn_buf;
189}
190
191/* Called from eBPF program */
192static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
193{
194	struct bpf_array *array = container_of(map, struct bpf_array, map);
195	u32 index = *(u32 *)key;
196
197	if (unlikely(index >= array->map.max_entries))
198		return NULL;
199
200	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
201}
202
203int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
204{
205	struct bpf_array *array = container_of(map, struct bpf_array, map);
206	u32 index = *(u32 *)key;
207	void __percpu *pptr;
208	int cpu, off = 0;
209	u32 size;
210
211	if (unlikely(index >= array->map.max_entries))
212		return -ENOENT;
213
214	/* per_cpu areas are zero-filled and bpf programs can only
215	 * access 'value_size' of them, so copying rounded areas
216	 * will not leak any kernel data
217	 */
218	size = round_up(map->value_size, 8);
219	rcu_read_lock();
220	pptr = array->pptrs[index & array->index_mask];
221	for_each_possible_cpu(cpu) {
222		bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
223		off += size;
224	}
225	rcu_read_unlock();
226	return 0;
227}
228
229/* Called from syscall */
230static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
231{
232	struct bpf_array *array = container_of(map, struct bpf_array, map);
233	u32 index = key ? *(u32 *)key : U32_MAX;
234	u32 *next = (u32 *)next_key;
235
236	if (index >= array->map.max_entries) {
237		*next = 0;
238		return 0;
239	}
240
241	if (index == array->map.max_entries - 1)
242		return -ENOENT;
243
244	*next = index + 1;
245	return 0;
246}
247
248/* Called from syscall or from eBPF program */
249static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
250				 u64 map_flags)
251{
252	struct bpf_array *array = container_of(map, struct bpf_array, map);
253	u32 index = *(u32 *)key;
254
255	if (unlikely(map_flags > BPF_EXIST))
256		/* unknown flags */
257		return -EINVAL;
258
259	if (unlikely(index >= array->map.max_entries))
260		/* all elements were pre-allocated, cannot insert a new one */
261		return -E2BIG;
262
263	if (unlikely(map_flags == BPF_NOEXIST))
264		/* all elements already exist */
265		return -EEXIST;
266
267	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
268		memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
269		       value, map->value_size);
270	else
271		memcpy(array->value +
272		       array->elem_size * (index & array->index_mask),
273		       value, map->value_size);
274	return 0;
275}
276
277int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
278			    u64 map_flags)
279{
280	struct bpf_array *array = container_of(map, struct bpf_array, map);
281	u32 index = *(u32 *)key;
282	void __percpu *pptr;
283	int cpu, off = 0;
284	u32 size;
285
286	if (unlikely(map_flags > BPF_EXIST))
287		/* unknown flags */
288		return -EINVAL;
289
290	if (unlikely(index >= array->map.max_entries))
291		/* all elements were pre-allocated, cannot insert a new one */
292		return -E2BIG;
293
294	if (unlikely(map_flags == BPF_NOEXIST))
295		/* all elements already exist */
296		return -EEXIST;
297
298	/* the user space will provide round_up(value_size, 8) bytes that
299	 * will be copied into per-cpu area. bpf programs can only access
300	 * value_size of it. During lookup the same extra bytes will be
301	 * returned or zeros which were zero-filled by percpu_alloc,
302	 * so no kernel data leaks possible
303	 */
304	size = round_up(map->value_size, 8);
305	rcu_read_lock();
306	pptr = array->pptrs[index & array->index_mask];
307	for_each_possible_cpu(cpu) {
308		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
309		off += size;
310	}
311	rcu_read_unlock();
312	return 0;
313}
314
315/* Called from syscall or from eBPF program */
316static int array_map_delete_elem(struct bpf_map *map, void *key)
317{
318	return -EINVAL;
319}
320
321/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
322static void array_map_free(struct bpf_map *map)
323{
324	struct bpf_array *array = container_of(map, struct bpf_array, map);
325
326	/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
327	 * so the programs (can be more than one that used this map) were
328	 * disconnected from events. Wait for outstanding programs to complete
329	 * and free the array
330	 */
331	synchronize_rcu();
332
333	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
334		bpf_array_free_percpu(array);
335
336	bpf_map_area_free(array);
337}
338
339const struct bpf_map_ops array_map_ops = {
340	.map_alloc_check = array_map_alloc_check,
341	.map_alloc = array_map_alloc,
342	.map_free = array_map_free,
343	.map_get_next_key = array_map_get_next_key,
344	.map_lookup_elem = array_map_lookup_elem,
345	.map_update_elem = array_map_update_elem,
346	.map_delete_elem = array_map_delete_elem,
347	.map_gen_lookup = array_map_gen_lookup,
348};
349
350const struct bpf_map_ops percpu_array_map_ops = {
351	.map_alloc_check = array_map_alloc_check,
 
 
 
 
352	.map_alloc = array_map_alloc,
353	.map_free = array_map_free,
354	.map_get_next_key = array_map_get_next_key,
355	.map_lookup_elem = percpu_array_map_lookup_elem,
356	.map_update_elem = array_map_update_elem,
357	.map_delete_elem = array_map_delete_elem,
358};
359
360static int fd_array_map_alloc_check(union bpf_attr *attr)
 
 
 
 
 
 
 
 
 
 
 
 
 
361{
362	/* only file descriptors can be stored in this type of map */
363	if (attr->value_size != sizeof(u32))
364		return -EINVAL;
365	return array_map_alloc_check(attr);
366}
367
368static void fd_array_map_free(struct bpf_map *map)
369{
370	struct bpf_array *array = container_of(map, struct bpf_array, map);
371	int i;
372
373	synchronize_rcu();
374
375	/* make sure it's empty */
376	for (i = 0; i < array->map.max_entries; i++)
377		BUG_ON(array->ptrs[i] != NULL);
378
379	bpf_map_area_free(array);
380}
381
382static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
383{
384	return NULL;
385}
386
387/* only called from syscall */
388int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
389{
390	void **elem, *ptr;
391	int ret =  0;
392
393	if (!map->ops->map_fd_sys_lookup_elem)
394		return -ENOTSUPP;
395
396	rcu_read_lock();
397	elem = array_map_lookup_elem(map, key);
398	if (elem && (ptr = READ_ONCE(*elem)))
399		*value = map->ops->map_fd_sys_lookup_elem(ptr);
400	else
401		ret = -ENOENT;
402	rcu_read_unlock();
403
404	return ret;
405}
406
407/* only called from syscall */
408int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
409				 void *key, void *value, u64 map_flags)
410{
411	struct bpf_array *array = container_of(map, struct bpf_array, map);
412	void *new_ptr, *old_ptr;
413	u32 index = *(u32 *)key, ufd;
414
415	if (map_flags != BPF_ANY)
416		return -EINVAL;
417
418	if (index >= array->map.max_entries)
419		return -E2BIG;
420
421	ufd = *(u32 *)value;
422	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
423	if (IS_ERR(new_ptr))
424		return PTR_ERR(new_ptr);
425
426	old_ptr = xchg(array->ptrs + index, new_ptr);
427	if (old_ptr)
428		map->ops->map_fd_put_ptr(old_ptr);
429
430	return 0;
431}
432
433static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
434{
435	struct bpf_array *array = container_of(map, struct bpf_array, map);
436	void *old_ptr;
437	u32 index = *(u32 *)key;
438
439	if (index >= array->map.max_entries)
440		return -E2BIG;
441
442	old_ptr = xchg(array->ptrs + index, NULL);
443	if (old_ptr) {
444		map->ops->map_fd_put_ptr(old_ptr);
445		return 0;
446	} else {
447		return -ENOENT;
448	}
449}
450
451static void *prog_fd_array_get_ptr(struct bpf_map *map,
452				   struct file *map_file, int fd)
453{
454	struct bpf_array *array = container_of(map, struct bpf_array, map);
455	struct bpf_prog *prog = bpf_prog_get(fd);
456
457	if (IS_ERR(prog))
458		return prog;
459
460	if (!bpf_prog_array_compatible(array, prog)) {
461		bpf_prog_put(prog);
462		return ERR_PTR(-EINVAL);
463	}
464
465	return prog;
466}
467
468static void prog_fd_array_put_ptr(void *ptr)
469{
470	bpf_prog_put(ptr);
471}
472
473static u32 prog_fd_array_sys_lookup_elem(void *ptr)
474{
475	return ((struct bpf_prog *)ptr)->aux->id;
476}
477
478/* decrement refcnt of all bpf_progs that are stored in this map */
479static void bpf_fd_array_map_clear(struct bpf_map *map)
480{
481	struct bpf_array *array = container_of(map, struct bpf_array, map);
482	int i;
483
484	for (i = 0; i < array->map.max_entries; i++)
485		fd_array_map_delete_elem(map, &i);
486}
487
488const struct bpf_map_ops prog_array_map_ops = {
489	.map_alloc_check = fd_array_map_alloc_check,
490	.map_alloc = array_map_alloc,
491	.map_free = fd_array_map_free,
492	.map_get_next_key = array_map_get_next_key,
493	.map_lookup_elem = fd_array_map_lookup_elem,
 
494	.map_delete_elem = fd_array_map_delete_elem,
495	.map_fd_get_ptr = prog_fd_array_get_ptr,
496	.map_fd_put_ptr = prog_fd_array_put_ptr,
497	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
498	.map_release_uref = bpf_fd_array_map_clear,
499};
500
501static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
502						   struct file *map_file)
503{
504	struct bpf_event_entry *ee;
505
506	ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
507	if (ee) {
508		ee->event = perf_file->private_data;
509		ee->perf_file = perf_file;
510		ee->map_file = map_file;
511	}
512
513	return ee;
514}
515
516static void __bpf_event_entry_free(struct rcu_head *rcu)
517{
518	struct bpf_event_entry *ee;
519
520	ee = container_of(rcu, struct bpf_event_entry, rcu);
521	fput(ee->perf_file);
522	kfree(ee);
523}
 
524
525static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
526{
527	call_rcu(&ee->rcu, __bpf_event_entry_free);
 
528}
529
530static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
531					 struct file *map_file, int fd)
532{
533	struct bpf_event_entry *ee;
534	struct perf_event *event;
535	struct file *perf_file;
536	u64 value;
537
538	perf_file = perf_event_get(fd);
539	if (IS_ERR(perf_file))
540		return perf_file;
541
542	ee = ERR_PTR(-EOPNOTSUPP);
543	event = perf_file->private_data;
544	if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
545		goto err_out;
546
547	ee = bpf_event_entry_gen(perf_file, map_file);
548	if (ee)
549		return ee;
550	ee = ERR_PTR(-ENOMEM);
551err_out:
552	fput(perf_file);
553	return ee;
 
 
 
 
 
 
 
 
 
554}
555
556static void perf_event_fd_array_put_ptr(void *ptr)
557{
558	bpf_event_entry_free_rcu(ptr);
559}
560
561static void perf_event_fd_array_release(struct bpf_map *map,
562					struct file *map_file)
563{
564	struct bpf_array *array = container_of(map, struct bpf_array, map);
565	struct bpf_event_entry *ee;
566	int i;
567
568	rcu_read_lock();
569	for (i = 0; i < array->map.max_entries; i++) {
570		ee = READ_ONCE(array->ptrs[i]);
571		if (ee && ee->map_file == map_file)
572			fd_array_map_delete_elem(map, &i);
573	}
574	rcu_read_unlock();
575}
576
577const struct bpf_map_ops perf_event_array_map_ops = {
578	.map_alloc_check = fd_array_map_alloc_check,
579	.map_alloc = array_map_alloc,
580	.map_free = fd_array_map_free,
581	.map_get_next_key = array_map_get_next_key,
582	.map_lookup_elem = fd_array_map_lookup_elem,
 
583	.map_delete_elem = fd_array_map_delete_elem,
584	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
585	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
586	.map_release = perf_event_fd_array_release,
587};
588
589#ifdef CONFIG_CGROUPS
590static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
591				     struct file *map_file /* not used */,
592				     int fd)
593{
594	return cgroup_get_from_fd(fd);
595}
596
597static void cgroup_fd_array_put_ptr(void *ptr)
598{
599	/* cgroup_put free cgrp after a rcu grace period */
600	cgroup_put(ptr);
601}
602
603static void cgroup_fd_array_free(struct bpf_map *map)
604{
605	bpf_fd_array_map_clear(map);
606	fd_array_map_free(map);
607}
608
609const struct bpf_map_ops cgroup_array_map_ops = {
610	.map_alloc_check = fd_array_map_alloc_check,
611	.map_alloc = array_map_alloc,
612	.map_free = cgroup_fd_array_free,
613	.map_get_next_key = array_map_get_next_key,
614	.map_lookup_elem = fd_array_map_lookup_elem,
615	.map_delete_elem = fd_array_map_delete_elem,
616	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
617	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
618};
619#endif
620
621static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
622{
623	struct bpf_map *map, *inner_map_meta;
624
625	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
626	if (IS_ERR(inner_map_meta))
627		return inner_map_meta;
628
629	map = array_map_alloc(attr);
630	if (IS_ERR(map)) {
631		bpf_map_meta_free(inner_map_meta);
632		return map;
633	}
634
635	map->inner_map_meta = inner_map_meta;
636
637	return map;
638}
639
640static void array_of_map_free(struct bpf_map *map)
641{
642	/* map->inner_map_meta is only accessed by syscall which
643	 * is protected by fdget/fdput.
644	 */
645	bpf_map_meta_free(map->inner_map_meta);
646	bpf_fd_array_map_clear(map);
647	fd_array_map_free(map);
648}
649
650static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
651{
652	struct bpf_map **inner_map = array_map_lookup_elem(map, key);
653
654	if (!inner_map)
655		return NULL;
656
657	return READ_ONCE(*inner_map);
658}
659
660static u32 array_of_map_gen_lookup(struct bpf_map *map,
661				   struct bpf_insn *insn_buf)
662{
663	struct bpf_array *array = container_of(map, struct bpf_array, map);
664	u32 elem_size = round_up(map->value_size, 8);
665	struct bpf_insn *insn = insn_buf;
666	const int ret = BPF_REG_0;
667	const int map_ptr = BPF_REG_1;
668	const int index = BPF_REG_2;
669
670	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
671	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
672	if (map->unpriv_array) {
673		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
674		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
675	} else {
676		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
677	}
678	if (is_power_of_2(elem_size))
679		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
680	else
681		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
682	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
683	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
684	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
685	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
686	*insn++ = BPF_MOV64_IMM(ret, 0);
687
688	return insn - insn_buf;
689}
690
691const struct bpf_map_ops array_of_maps_map_ops = {
692	.map_alloc_check = fd_array_map_alloc_check,
693	.map_alloc = array_of_map_alloc,
694	.map_free = array_of_map_free,
695	.map_get_next_key = array_map_get_next_key,
696	.map_lookup_elem = array_of_map_lookup_elem,
697	.map_delete_elem = fd_array_map_delete_elem,
698	.map_fd_get_ptr = bpf_map_fd_get_ptr,
699	.map_fd_put_ptr = bpf_map_fd_put_ptr,
700	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
701	.map_gen_lookup = array_of_map_gen_lookup,
702};