Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * queue_stack_maps.c: BPF queue and stack maps
  4 *
  5 * Copyright (c) 2018 Politecnico di Torino
  6 */
  7#include <linux/bpf.h>
  8#include <linux/list.h>
  9#include <linux/slab.h>
 10#include <linux/capability.h>
 11#include <linux/btf_ids.h>
 12#include "percpu_freelist.h"
 13
 14#define QUEUE_STACK_CREATE_FLAG_MASK \
 15	(BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
 16
 17struct bpf_queue_stack {
 18	struct bpf_map map;
 19	raw_spinlock_t lock;
 20	u32 head, tail;
 21	u32 size; /* max_entries + 1 */
 22
 23	char elements[] __aligned(8);
 24};
 25
 26static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
 27{
 28	return container_of(map, struct bpf_queue_stack, map);
 29}
 30
 31static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
 32{
 33	return qs->head == qs->tail;
 34}
 35
 36static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
 37{
 38	u32 head = qs->head + 1;
 39
 40	if (unlikely(head >= qs->size))
 41		head = 0;
 42
 43	return head == qs->tail;
 44}
 45
 46/* Called from syscall */
 47static int queue_stack_map_alloc_check(union bpf_attr *attr)
 48{
 49	if (!bpf_capable())
 50		return -EPERM;
 51
 52	/* check sanity of attributes */
 53	if (attr->max_entries == 0 || attr->key_size != 0 ||
 54	    attr->value_size == 0 ||
 55	    attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK ||
 56	    !bpf_map_flags_access_ok(attr->map_flags))
 57		return -EINVAL;
 58
 59	if (attr->value_size > KMALLOC_MAX_SIZE)
 60		/* if value_size is bigger, the user space won't be able to
 61		 * access the elements.
 62		 */
 63		return -E2BIG;
 64
 65	return 0;
 66}
 67
 68static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
 69{
 70	int numa_node = bpf_map_attr_numa_node(attr);
 71	struct bpf_queue_stack *qs;
 72	u64 size, queue_size;
 73
 74	size = (u64) attr->max_entries + 1;
 75	queue_size = sizeof(*qs) + size * attr->value_size;
 76
 77	qs = bpf_map_area_alloc(queue_size, numa_node);
 78	if (!qs)
 79		return ERR_PTR(-ENOMEM);
 80
 
 
 81	bpf_map_init_from_attr(&qs->map, attr);
 82
 83	qs->size = size;
 84
 85	raw_spin_lock_init(&qs->lock);
 86
 87	return &qs->map;
 88}
 89
 90/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
 91static void queue_stack_map_free(struct bpf_map *map)
 92{
 93	struct bpf_queue_stack *qs = bpf_queue_stack(map);
 94
 95	bpf_map_area_free(qs);
 96}
 97
 98static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
 99{
100	struct bpf_queue_stack *qs = bpf_queue_stack(map);
101	unsigned long flags;
102	int err = 0;
103	void *ptr;
104
105	raw_spin_lock_irqsave(&qs->lock, flags);
106
107	if (queue_stack_map_is_empty(qs)) {
108		memset(value, 0, qs->map.value_size);
109		err = -ENOENT;
110		goto out;
111	}
112
113	ptr = &qs->elements[qs->tail * qs->map.value_size];
114	memcpy(value, ptr, qs->map.value_size);
115
116	if (delete) {
117		if (unlikely(++qs->tail >= qs->size))
118			qs->tail = 0;
119	}
120
121out:
122	raw_spin_unlock_irqrestore(&qs->lock, flags);
123	return err;
124}
125
126
127static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
128{
129	struct bpf_queue_stack *qs = bpf_queue_stack(map);
130	unsigned long flags;
131	int err = 0;
132	void *ptr;
133	u32 index;
134
135	raw_spin_lock_irqsave(&qs->lock, flags);
136
137	if (queue_stack_map_is_empty(qs)) {
138		memset(value, 0, qs->map.value_size);
139		err = -ENOENT;
140		goto out;
141	}
142
143	index = qs->head - 1;
144	if (unlikely(index >= qs->size))
145		index = qs->size - 1;
146
147	ptr = &qs->elements[index * qs->map.value_size];
148	memcpy(value, ptr, qs->map.value_size);
149
150	if (delete)
151		qs->head = index;
152
153out:
154	raw_spin_unlock_irqrestore(&qs->lock, flags);
155	return err;
156}
157
158/* Called from syscall or from eBPF program */
159static int queue_map_peek_elem(struct bpf_map *map, void *value)
160{
161	return __queue_map_get(map, value, false);
162}
163
164/* Called from syscall or from eBPF program */
165static int stack_map_peek_elem(struct bpf_map *map, void *value)
166{
167	return __stack_map_get(map, value, false);
168}
169
170/* Called from syscall or from eBPF program */
171static int queue_map_pop_elem(struct bpf_map *map, void *value)
172{
173	return __queue_map_get(map, value, true);
174}
175
176/* Called from syscall or from eBPF program */
177static int stack_map_pop_elem(struct bpf_map *map, void *value)
178{
179	return __stack_map_get(map, value, true);
180}
181
182/* Called from syscall or from eBPF program */
183static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
184				     u64 flags)
185{
186	struct bpf_queue_stack *qs = bpf_queue_stack(map);
187	unsigned long irq_flags;
188	int err = 0;
189	void *dst;
190
191	/* BPF_EXIST is used to force making room for a new element in case the
192	 * map is full
193	 */
194	bool replace = (flags & BPF_EXIST);
195
196	/* Check supported flags for queue and stack maps */
197	if (flags & BPF_NOEXIST || flags > BPF_EXIST)
198		return -EINVAL;
199
200	raw_spin_lock_irqsave(&qs->lock, irq_flags);
201
202	if (queue_stack_map_is_full(qs)) {
203		if (!replace) {
204			err = -E2BIG;
205			goto out;
206		}
207		/* advance tail pointer to overwrite oldest element */
208		if (unlikely(++qs->tail >= qs->size))
209			qs->tail = 0;
210	}
211
212	dst = &qs->elements[qs->head * qs->map.value_size];
213	memcpy(dst, value, qs->map.value_size);
214
215	if (unlikely(++qs->head >= qs->size))
216		qs->head = 0;
217
218out:
219	raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
220	return err;
221}
222
223/* Called from syscall or from eBPF program */
224static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
225{
226	return NULL;
227}
228
229/* Called from syscall or from eBPF program */
230static int queue_stack_map_update_elem(struct bpf_map *map, void *key,
231				       void *value, u64 flags)
232{
233	return -EINVAL;
234}
235
236/* Called from syscall or from eBPF program */
237static int queue_stack_map_delete_elem(struct bpf_map *map, void *key)
238{
239	return -EINVAL;
240}
241
242/* Called from syscall */
243static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
244					void *next_key)
245{
246	return -EINVAL;
247}
248
249BTF_ID_LIST_SINGLE(queue_map_btf_ids, struct, bpf_queue_stack)
250const struct bpf_map_ops queue_map_ops = {
251	.map_meta_equal = bpf_map_meta_equal,
252	.map_alloc_check = queue_stack_map_alloc_check,
253	.map_alloc = queue_stack_map_alloc,
254	.map_free = queue_stack_map_free,
255	.map_lookup_elem = queue_stack_map_lookup_elem,
256	.map_update_elem = queue_stack_map_update_elem,
257	.map_delete_elem = queue_stack_map_delete_elem,
258	.map_push_elem = queue_stack_map_push_elem,
259	.map_pop_elem = queue_map_pop_elem,
260	.map_peek_elem = queue_map_peek_elem,
261	.map_get_next_key = queue_stack_map_get_next_key,
262	.map_btf_id = &queue_map_btf_ids[0],
 
263};
264
 
265const struct bpf_map_ops stack_map_ops = {
266	.map_meta_equal = bpf_map_meta_equal,
267	.map_alloc_check = queue_stack_map_alloc_check,
268	.map_alloc = queue_stack_map_alloc,
269	.map_free = queue_stack_map_free,
270	.map_lookup_elem = queue_stack_map_lookup_elem,
271	.map_update_elem = queue_stack_map_update_elem,
272	.map_delete_elem = queue_stack_map_delete_elem,
273	.map_push_elem = queue_stack_map_push_elem,
274	.map_pop_elem = stack_map_pop_elem,
275	.map_peek_elem = stack_map_peek_elem,
276	.map_get_next_key = queue_stack_map_get_next_key,
277	.map_btf_id = &queue_map_btf_ids[0],
 
278};
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * queue_stack_maps.c: BPF queue and stack maps
  4 *
  5 * Copyright (c) 2018 Politecnico di Torino
  6 */
  7#include <linux/bpf.h>
  8#include <linux/list.h>
  9#include <linux/slab.h>
 10#include <linux/capability.h>
 
 11#include "percpu_freelist.h"
 12
 13#define QUEUE_STACK_CREATE_FLAG_MASK \
 14	(BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
 15
 16struct bpf_queue_stack {
 17	struct bpf_map map;
 18	raw_spinlock_t lock;
 19	u32 head, tail;
 20	u32 size; /* max_entries + 1 */
 21
 22	char elements[] __aligned(8);
 23};
 24
 25static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
 26{
 27	return container_of(map, struct bpf_queue_stack, map);
 28}
 29
 30static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
 31{
 32	return qs->head == qs->tail;
 33}
 34
 35static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
 36{
 37	u32 head = qs->head + 1;
 38
 39	if (unlikely(head >= qs->size))
 40		head = 0;
 41
 42	return head == qs->tail;
 43}
 44
 45/* Called from syscall */
 46static int queue_stack_map_alloc_check(union bpf_attr *attr)
 47{
 48	if (!bpf_capable())
 49		return -EPERM;
 50
 51	/* check sanity of attributes */
 52	if (attr->max_entries == 0 || attr->key_size != 0 ||
 53	    attr->value_size == 0 ||
 54	    attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK ||
 55	    !bpf_map_flags_access_ok(attr->map_flags))
 56		return -EINVAL;
 57
 58	if (attr->value_size > KMALLOC_MAX_SIZE)
 59		/* if value_size is bigger, the user space won't be able to
 60		 * access the elements.
 61		 */
 62		return -E2BIG;
 63
 64	return 0;
 65}
 66
 67static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
 68{
 69	int numa_node = bpf_map_attr_numa_node(attr);
 70	struct bpf_queue_stack *qs;
 71	u64 size, queue_size;
 72
 73	size = (u64) attr->max_entries + 1;
 74	queue_size = sizeof(*qs) + size * attr->value_size;
 75
 76	qs = bpf_map_area_alloc(queue_size, numa_node);
 77	if (!qs)
 78		return ERR_PTR(-ENOMEM);
 79
 80	memset(qs, 0, sizeof(*qs));
 81
 82	bpf_map_init_from_attr(&qs->map, attr);
 83
 84	qs->size = size;
 85
 86	raw_spin_lock_init(&qs->lock);
 87
 88	return &qs->map;
 89}
 90
 91/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
 92static void queue_stack_map_free(struct bpf_map *map)
 93{
 94	struct bpf_queue_stack *qs = bpf_queue_stack(map);
 95
 96	bpf_map_area_free(qs);
 97}
 98
 99static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
100{
101	struct bpf_queue_stack *qs = bpf_queue_stack(map);
102	unsigned long flags;
103	int err = 0;
104	void *ptr;
105
106	raw_spin_lock_irqsave(&qs->lock, flags);
107
108	if (queue_stack_map_is_empty(qs)) {
109		memset(value, 0, qs->map.value_size);
110		err = -ENOENT;
111		goto out;
112	}
113
114	ptr = &qs->elements[qs->tail * qs->map.value_size];
115	memcpy(value, ptr, qs->map.value_size);
116
117	if (delete) {
118		if (unlikely(++qs->tail >= qs->size))
119			qs->tail = 0;
120	}
121
122out:
123	raw_spin_unlock_irqrestore(&qs->lock, flags);
124	return err;
125}
126
127
128static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
129{
130	struct bpf_queue_stack *qs = bpf_queue_stack(map);
131	unsigned long flags;
132	int err = 0;
133	void *ptr;
134	u32 index;
135
136	raw_spin_lock_irqsave(&qs->lock, flags);
137
138	if (queue_stack_map_is_empty(qs)) {
139		memset(value, 0, qs->map.value_size);
140		err = -ENOENT;
141		goto out;
142	}
143
144	index = qs->head - 1;
145	if (unlikely(index >= qs->size))
146		index = qs->size - 1;
147
148	ptr = &qs->elements[index * qs->map.value_size];
149	memcpy(value, ptr, qs->map.value_size);
150
151	if (delete)
152		qs->head = index;
153
154out:
155	raw_spin_unlock_irqrestore(&qs->lock, flags);
156	return err;
157}
158
159/* Called from syscall or from eBPF program */
160static int queue_map_peek_elem(struct bpf_map *map, void *value)
161{
162	return __queue_map_get(map, value, false);
163}
164
165/* Called from syscall or from eBPF program */
166static int stack_map_peek_elem(struct bpf_map *map, void *value)
167{
168	return __stack_map_get(map, value, false);
169}
170
171/* Called from syscall or from eBPF program */
172static int queue_map_pop_elem(struct bpf_map *map, void *value)
173{
174	return __queue_map_get(map, value, true);
175}
176
177/* Called from syscall or from eBPF program */
178static int stack_map_pop_elem(struct bpf_map *map, void *value)
179{
180	return __stack_map_get(map, value, true);
181}
182
183/* Called from syscall or from eBPF program */
184static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
185				     u64 flags)
186{
187	struct bpf_queue_stack *qs = bpf_queue_stack(map);
188	unsigned long irq_flags;
189	int err = 0;
190	void *dst;
191
192	/* BPF_EXIST is used to force making room for a new element in case the
193	 * map is full
194	 */
195	bool replace = (flags & BPF_EXIST);
196
197	/* Check supported flags for queue and stack maps */
198	if (flags & BPF_NOEXIST || flags > BPF_EXIST)
199		return -EINVAL;
200
201	raw_spin_lock_irqsave(&qs->lock, irq_flags);
202
203	if (queue_stack_map_is_full(qs)) {
204		if (!replace) {
205			err = -E2BIG;
206			goto out;
207		}
208		/* advance tail pointer to overwrite oldest element */
209		if (unlikely(++qs->tail >= qs->size))
210			qs->tail = 0;
211	}
212
213	dst = &qs->elements[qs->head * qs->map.value_size];
214	memcpy(dst, value, qs->map.value_size);
215
216	if (unlikely(++qs->head >= qs->size))
217		qs->head = 0;
218
219out:
220	raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
221	return err;
222}
223
224/* Called from syscall or from eBPF program */
225static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
226{
227	return NULL;
228}
229
230/* Called from syscall or from eBPF program */
231static int queue_stack_map_update_elem(struct bpf_map *map, void *key,
232				       void *value, u64 flags)
233{
234	return -EINVAL;
235}
236
237/* Called from syscall or from eBPF program */
238static int queue_stack_map_delete_elem(struct bpf_map *map, void *key)
239{
240	return -EINVAL;
241}
242
243/* Called from syscall */
244static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
245					void *next_key)
246{
247	return -EINVAL;
248}
249
250static int queue_map_btf_id;
251const struct bpf_map_ops queue_map_ops = {
252	.map_meta_equal = bpf_map_meta_equal,
253	.map_alloc_check = queue_stack_map_alloc_check,
254	.map_alloc = queue_stack_map_alloc,
255	.map_free = queue_stack_map_free,
256	.map_lookup_elem = queue_stack_map_lookup_elem,
257	.map_update_elem = queue_stack_map_update_elem,
258	.map_delete_elem = queue_stack_map_delete_elem,
259	.map_push_elem = queue_stack_map_push_elem,
260	.map_pop_elem = queue_map_pop_elem,
261	.map_peek_elem = queue_map_peek_elem,
262	.map_get_next_key = queue_stack_map_get_next_key,
263	.map_btf_name = "bpf_queue_stack",
264	.map_btf_id = &queue_map_btf_id,
265};
266
267static int stack_map_btf_id;
268const struct bpf_map_ops stack_map_ops = {
269	.map_meta_equal = bpf_map_meta_equal,
270	.map_alloc_check = queue_stack_map_alloc_check,
271	.map_alloc = queue_stack_map_alloc,
272	.map_free = queue_stack_map_free,
273	.map_lookup_elem = queue_stack_map_lookup_elem,
274	.map_update_elem = queue_stack_map_update_elem,
275	.map_delete_elem = queue_stack_map_delete_elem,
276	.map_push_elem = queue_stack_map_push_elem,
277	.map_pop_elem = stack_map_pop_elem,
278	.map_peek_elem = stack_map_peek_elem,
279	.map_get_next_key = queue_stack_map_get_next_key,
280	.map_btf_name = "bpf_queue_stack",
281	.map_btf_id = &stack_map_btf_id,
282};