Linux Audio

Check our new training course

Loading...
v5.4
  1//SPDX-License-Identifier: GPL-2.0
  2#include <linux/bpf-cgroup.h>
  3#include <linux/bpf.h>
  4#include <linux/btf.h>
  5#include <linux/bug.h>
  6#include <linux/filter.h>
  7#include <linux/mm.h>
  8#include <linux/rbtree.h>
  9#include <linux/slab.h>
 10#include <uapi/linux/btf.h>
 11
 12DEFINE_PER_CPU(struct bpf_cgroup_storage*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
 13
 14#ifdef CONFIG_CGROUP_BPF
 15
 
 
 16#define LOCAL_STORAGE_CREATE_FLAG_MASK					\
 17	(BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
 18
 19struct bpf_cgroup_storage_map {
 20	struct bpf_map map;
 21
 22	spinlock_t lock;
 23	struct bpf_prog *prog;
 24	struct rb_root root;
 25	struct list_head list;
 26};
 27
 28static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map)
 29{
 30	return container_of(map, struct bpf_cgroup_storage_map, map);
 31}
 32
 33static int bpf_cgroup_storage_key_cmp(
 34	const struct bpf_cgroup_storage_key *key1,
 35	const struct bpf_cgroup_storage_key *key2)
 36{
 37	if (key1->cgroup_inode_id < key2->cgroup_inode_id)
 38		return -1;
 39	else if (key1->cgroup_inode_id > key2->cgroup_inode_id)
 40		return 1;
 41	else if (key1->attach_type < key2->attach_type)
 42		return -1;
 43	else if (key1->attach_type > key2->attach_type)
 44		return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 45	return 0;
 46}
 47
 48static struct bpf_cgroup_storage *cgroup_storage_lookup(
 49	struct bpf_cgroup_storage_map *map, struct bpf_cgroup_storage_key *key,
 50	bool locked)
 51{
 52	struct rb_root *root = &map->root;
 53	struct rb_node *node;
 54
 55	if (!locked)
 56		spin_lock_bh(&map->lock);
 57
 58	node = root->rb_node;
 59	while (node) {
 60		struct bpf_cgroup_storage *storage;
 61
 62		storage = container_of(node, struct bpf_cgroup_storage, node);
 63
 64		switch (bpf_cgroup_storage_key_cmp(key, &storage->key)) {
 65		case -1:
 66			node = node->rb_left;
 67			break;
 68		case 1:
 69			node = node->rb_right;
 70			break;
 71		default:
 72			if (!locked)
 73				spin_unlock_bh(&map->lock);
 74			return storage;
 75		}
 76	}
 77
 78	if (!locked)
 79		spin_unlock_bh(&map->lock);
 80
 81	return NULL;
 82}
 83
 84static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map,
 85				 struct bpf_cgroup_storage *storage)
 86{
 87	struct rb_root *root = &map->root;
 88	struct rb_node **new = &(root->rb_node), *parent = NULL;
 89
 90	while (*new) {
 91		struct bpf_cgroup_storage *this;
 92
 93		this = container_of(*new, struct bpf_cgroup_storage, node);
 94
 95		parent = *new;
 96		switch (bpf_cgroup_storage_key_cmp(&storage->key, &this->key)) {
 97		case -1:
 98			new = &((*new)->rb_left);
 99			break;
100		case 1:
101			new = &((*new)->rb_right);
102			break;
103		default:
104			return -EEXIST;
105		}
106	}
107
108	rb_link_node(&storage->node, parent, new);
109	rb_insert_color(&storage->node, root);
110
111	return 0;
112}
113
114static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *_key)
115{
116	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
117	struct bpf_cgroup_storage_key *key = _key;
118	struct bpf_cgroup_storage *storage;
119
120	storage = cgroup_storage_lookup(map, key, false);
121	if (!storage)
122		return NULL;
123
124	return &READ_ONCE(storage->buf)->data[0];
125}
126
127static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
128				      void *value, u64 flags)
129{
130	struct bpf_cgroup_storage_key *key = _key;
131	struct bpf_cgroup_storage *storage;
132	struct bpf_storage_buffer *new;
133
134	if (unlikely(flags & ~(BPF_F_LOCK | BPF_EXIST | BPF_NOEXIST)))
135		return -EINVAL;
136
137	if (unlikely(flags & BPF_NOEXIST))
138		return -EINVAL;
139
140	if (unlikely((flags & BPF_F_LOCK) &&
141		     !map_value_has_spin_lock(map)))
142		return -EINVAL;
143
144	storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
145					key, false);
146	if (!storage)
147		return -ENOENT;
148
149	if (flags & BPF_F_LOCK) {
150		copy_map_value_locked(map, storage->buf->data, value, false);
151		return 0;
152	}
153
154	new = kmalloc_node(sizeof(struct bpf_storage_buffer) +
155			   map->value_size,
156			   __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
157			   map->numa_node);
158	if (!new)
159		return -ENOMEM;
160
161	memcpy(&new->data[0], value, map->value_size);
162	check_and_init_map_lock(map, new->data);
163
164	new = xchg(&storage->buf, new);
165	kfree_rcu(new, rcu);
166
167	return 0;
168}
169
170int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *_key,
171				   void *value)
172{
173	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
174	struct bpf_cgroup_storage_key *key = _key;
175	struct bpf_cgroup_storage *storage;
176	int cpu, off = 0;
177	u32 size;
178
179	rcu_read_lock();
180	storage = cgroup_storage_lookup(map, key, false);
181	if (!storage) {
182		rcu_read_unlock();
183		return -ENOENT;
184	}
185
186	/* per_cpu areas are zero-filled and bpf programs can only
187	 * access 'value_size' of them, so copying rounded areas
188	 * will not leak any kernel data
189	 */
190	size = round_up(_map->value_size, 8);
191	for_each_possible_cpu(cpu) {
192		bpf_long_memcpy(value + off,
193				per_cpu_ptr(storage->percpu_buf, cpu), size);
194		off += size;
195	}
196	rcu_read_unlock();
197	return 0;
198}
199
200int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *_key,
201				     void *value, u64 map_flags)
202{
203	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
204	struct bpf_cgroup_storage_key *key = _key;
205	struct bpf_cgroup_storage *storage;
206	int cpu, off = 0;
207	u32 size;
208
209	if (map_flags != BPF_ANY && map_flags != BPF_EXIST)
210		return -EINVAL;
211
212	rcu_read_lock();
213	storage = cgroup_storage_lookup(map, key, false);
214	if (!storage) {
215		rcu_read_unlock();
216		return -ENOENT;
217	}
218
219	/* the user space will provide round_up(value_size, 8) bytes that
220	 * will be copied into per-cpu area. bpf programs can only access
221	 * value_size of it. During lookup the same extra bytes will be
222	 * returned or zeros which were zero-filled by percpu_alloc,
223	 * so no kernel data leaks possible
224	 */
225	size = round_up(_map->value_size, 8);
226	for_each_possible_cpu(cpu) {
227		bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu),
228				value + off, size);
229		off += size;
230	}
231	rcu_read_unlock();
232	return 0;
233}
234
235static int cgroup_storage_get_next_key(struct bpf_map *_map, void *_key,
236				       void *_next_key)
237{
238	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
239	struct bpf_cgroup_storage_key *key = _key;
240	struct bpf_cgroup_storage_key *next = _next_key;
241	struct bpf_cgroup_storage *storage;
242
243	spin_lock_bh(&map->lock);
244
245	if (list_empty(&map->list))
246		goto enoent;
247
248	if (key) {
249		storage = cgroup_storage_lookup(map, key, true);
250		if (!storage)
251			goto enoent;
252
253		storage = list_next_entry(storage, list);
254		if (!storage)
255			goto enoent;
256	} else {
257		storage = list_first_entry(&map->list,
258					 struct bpf_cgroup_storage, list);
259	}
260
261	spin_unlock_bh(&map->lock);
262	next->attach_type = storage->key.attach_type;
263	next->cgroup_inode_id = storage->key.cgroup_inode_id;
 
 
 
 
 
 
264	return 0;
265
266enoent:
267	spin_unlock_bh(&map->lock);
268	return -ENOENT;
269}
270
271static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
272{
273	int numa_node = bpf_map_attr_numa_node(attr);
274	struct bpf_cgroup_storage_map *map;
275	struct bpf_map_memory mem;
276	int ret;
277
278	if (attr->key_size != sizeof(struct bpf_cgroup_storage_key))
 
279		return ERR_PTR(-EINVAL);
280
281	if (attr->value_size == 0)
282		return ERR_PTR(-EINVAL);
283
284	if (attr->value_size > PAGE_SIZE)
285		return ERR_PTR(-E2BIG);
286
287	if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK ||
288	    !bpf_map_flags_access_ok(attr->map_flags))
289		return ERR_PTR(-EINVAL);
290
291	if (attr->max_entries)
292		/* max_entries is not used and enforced to be 0 */
293		return ERR_PTR(-EINVAL);
294
295	ret = bpf_map_charge_init(&mem, sizeof(struct bpf_cgroup_storage_map));
296	if (ret < 0)
297		return ERR_PTR(ret);
298
299	map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map),
300			   __GFP_ZERO | GFP_USER, numa_node);
301	if (!map) {
302		bpf_map_charge_finish(&mem);
303		return ERR_PTR(-ENOMEM);
304	}
305
306	bpf_map_charge_move(&map->map.memory, &mem);
307
308	/* copy mandatory map attributes */
309	bpf_map_init_from_attr(&map->map, attr);
310
311	spin_lock_init(&map->lock);
312	map->root = RB_ROOT;
313	INIT_LIST_HEAD(&map->list);
314
315	return &map->map;
316}
317
318static void cgroup_storage_map_free(struct bpf_map *_map)
319{
320	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
 
 
 
 
 
 
 
 
 
 
 
321
322	WARN_ON(!RB_EMPTY_ROOT(&map->root));
323	WARN_ON(!list_empty(&map->list));
324
325	kfree(map);
326}
327
328static int cgroup_storage_delete_elem(struct bpf_map *map, void *key)
329{
330	return -EINVAL;
331}
332
333static int cgroup_storage_check_btf(const struct bpf_map *map,
334				    const struct btf *btf,
335				    const struct btf_type *key_type,
336				    const struct btf_type *value_type)
337{
338	struct btf_member *m;
339	u32 offset, size;
340
341	/* Key is expected to be of struct bpf_cgroup_storage_key type,
342	 * which is:
343	 * struct bpf_cgroup_storage_key {
344	 *	__u64	cgroup_inode_id;
345	 *	__u32	attach_type;
346	 * };
347	 */
348
349	/*
350	 * Key_type must be a structure with two fields.
351	 */
352	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ||
353	    BTF_INFO_VLEN(key_type->info) != 2)
354		return -EINVAL;
355
356	/*
357	 * The first field must be a 64 bit integer at 0 offset.
358	 */
359	m = (struct btf_member *)(key_type + 1);
360	size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, cgroup_inode_id);
361	if (!btf_member_is_reg_int(btf, key_type, m, 0, size))
362		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
363
364	/*
365	 * The second field must be a 32 bit integer at 64 bit offset.
366	 */
367	m++;
368	offset = offsetof(struct bpf_cgroup_storage_key, attach_type);
369	size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, attach_type);
370	if (!btf_member_is_reg_int(btf, key_type, m, offset, size))
371		return -EINVAL;
 
 
 
372
373	return 0;
374}
375
376static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *_key,
377					 struct seq_file *m)
378{
379	enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
380	struct bpf_cgroup_storage_key *key = _key;
381	struct bpf_cgroup_storage *storage;
382	int cpu;
383
384	rcu_read_lock();
385	storage = cgroup_storage_lookup(map_to_storage(map), key, false);
386	if (!storage) {
387		rcu_read_unlock();
388		return;
389	}
390
391	btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
392	stype = cgroup_storage_type(map);
393	if (stype == BPF_CGROUP_STORAGE_SHARED) {
394		seq_puts(m, ": ");
395		btf_type_seq_show(map->btf, map->btf_value_type_id,
396				  &READ_ONCE(storage->buf)->data[0], m);
397		seq_puts(m, "\n");
398	} else {
399		seq_puts(m, ": {\n");
400		for_each_possible_cpu(cpu) {
401			seq_printf(m, "\tcpu%d: ", cpu);
402			btf_type_seq_show(map->btf, map->btf_value_type_id,
403					  per_cpu_ptr(storage->percpu_buf, cpu),
404					  m);
405			seq_puts(m, "\n");
406		}
407		seq_puts(m, "}\n");
408	}
409	rcu_read_unlock();
410}
411
 
412const struct bpf_map_ops cgroup_storage_map_ops = {
413	.map_alloc = cgroup_storage_map_alloc,
414	.map_free = cgroup_storage_map_free,
415	.map_get_next_key = cgroup_storage_get_next_key,
416	.map_lookup_elem = cgroup_storage_lookup_elem,
417	.map_update_elem = cgroup_storage_update_elem,
418	.map_delete_elem = cgroup_storage_delete_elem,
419	.map_check_btf = cgroup_storage_check_btf,
420	.map_seq_show_elem = cgroup_storage_seq_show_elem,
 
 
421};
422
423int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map)
424{
425	enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
426	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
427	int ret = -EBUSY;
428
429	spin_lock_bh(&map->lock);
430
431	if (map->prog && map->prog != prog)
432		goto unlock;
433	if (prog->aux->cgroup_storage[stype] &&
434	    prog->aux->cgroup_storage[stype] != _map)
435		goto unlock;
436
437	map->prog = prog;
438	prog->aux->cgroup_storage[stype] = _map;
439	ret = 0;
440unlock:
441	spin_unlock_bh(&map->lock);
442
443	return ret;
444}
445
446void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *_map)
447{
448	enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
449	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
450
451	spin_lock_bh(&map->lock);
452	if (map->prog == prog) {
453		WARN_ON(prog->aux->cgroup_storage[stype] != _map);
454		map->prog = NULL;
455		prog->aux->cgroup_storage[stype] = NULL;
456	}
457	spin_unlock_bh(&map->lock);
458}
459
460static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages)
461{
462	size_t size;
463
464	if (cgroup_storage_type(map) == BPF_CGROUP_STORAGE_SHARED) {
465		size = sizeof(struct bpf_storage_buffer) + map->value_size;
466		*pages = round_up(sizeof(struct bpf_cgroup_storage) + size,
467				  PAGE_SIZE) >> PAGE_SHIFT;
468	} else {
469		size = map->value_size;
470		*pages = round_up(round_up(size, 8) * num_possible_cpus(),
471				  PAGE_SIZE) >> PAGE_SHIFT;
472	}
473
474	return size;
475}
476
477struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
478					enum bpf_cgroup_storage_type stype)
479{
480	struct bpf_cgroup_storage *storage;
481	struct bpf_map *map;
482	gfp_t flags;
483	size_t size;
484	u32 pages;
485
486	map = prog->aux->cgroup_storage[stype];
487	if (!map)
488		return NULL;
489
490	size = bpf_cgroup_storage_calculate_size(map, &pages);
491
492	if (bpf_map_charge_memlock(map, pages))
493		return ERR_PTR(-EPERM);
494
495	storage = kmalloc_node(sizeof(struct bpf_cgroup_storage),
496			       __GFP_ZERO | GFP_USER, map->numa_node);
497	if (!storage)
498		goto enomem;
499
500	flags = __GFP_ZERO | GFP_USER;
501
502	if (stype == BPF_CGROUP_STORAGE_SHARED) {
503		storage->buf = kmalloc_node(size, flags, map->numa_node);
504		if (!storage->buf)
505			goto enomem;
506		check_and_init_map_lock(map, storage->buf->data);
507	} else {
508		storage->percpu_buf = __alloc_percpu_gfp(size, 8, flags);
509		if (!storage->percpu_buf)
510			goto enomem;
511	}
512
513	storage->map = (struct bpf_cgroup_storage_map *)map;
514
515	return storage;
516
517enomem:
518	bpf_map_uncharge_memlock(map, pages);
519	kfree(storage);
520	return ERR_PTR(-ENOMEM);
521}
522
523static void free_shared_cgroup_storage_rcu(struct rcu_head *rcu)
524{
525	struct bpf_cgroup_storage *storage =
526		container_of(rcu, struct bpf_cgroup_storage, rcu);
527
528	kfree(storage->buf);
529	kfree(storage);
530}
531
532static void free_percpu_cgroup_storage_rcu(struct rcu_head *rcu)
533{
534	struct bpf_cgroup_storage *storage =
535		container_of(rcu, struct bpf_cgroup_storage, rcu);
536
537	free_percpu(storage->percpu_buf);
538	kfree(storage);
539}
540
541void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage)
542{
543	enum bpf_cgroup_storage_type stype;
544	struct bpf_map *map;
545	u32 pages;
546
547	if (!storage)
548		return;
549
550	map = &storage->map->map;
551
552	bpf_cgroup_storage_calculate_size(map, &pages);
553	bpf_map_uncharge_memlock(map, pages);
554
555	stype = cgroup_storage_type(map);
556	if (stype == BPF_CGROUP_STORAGE_SHARED)
557		call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu);
558	else
559		call_rcu(&storage->rcu, free_percpu_cgroup_storage_rcu);
560}
561
562void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
563			     struct cgroup *cgroup,
564			     enum bpf_attach_type type)
565{
566	struct bpf_cgroup_storage_map *map;
567
568	if (!storage)
569		return;
570
571	storage->key.attach_type = type;
572	storage->key.cgroup_inode_id = cgroup->kn->id.id;
573
574	map = storage->map;
575
576	spin_lock_bh(&map->lock);
577	WARN_ON(cgroup_storage_insert(map, storage));
578	list_add(&storage->list, &map->list);
 
579	spin_unlock_bh(&map->lock);
580}
581
582void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage)
583{
584	struct bpf_cgroup_storage_map *map;
585	struct rb_root *root;
586
587	if (!storage)
588		return;
589
590	map = storage->map;
591
592	spin_lock_bh(&map->lock);
593	root = &map->root;
594	rb_erase(&storage->node, root);
595
596	list_del(&storage->list);
 
597	spin_unlock_bh(&map->lock);
598}
599
600#endif
v5.9
  1//SPDX-License-Identifier: GPL-2.0
  2#include <linux/bpf-cgroup.h>
  3#include <linux/bpf.h>
  4#include <linux/btf.h>
  5#include <linux/bug.h>
  6#include <linux/filter.h>
  7#include <linux/mm.h>
  8#include <linux/rbtree.h>
  9#include <linux/slab.h>
 10#include <uapi/linux/btf.h>
 11
 12DEFINE_PER_CPU(struct bpf_cgroup_storage*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
 13
 14#ifdef CONFIG_CGROUP_BPF
 15
 16#include "../cgroup/cgroup-internal.h"
 17
 18#define LOCAL_STORAGE_CREATE_FLAG_MASK					\
 19	(BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
 20
 21struct bpf_cgroup_storage_map {
 22	struct bpf_map map;
 23
 24	spinlock_t lock;
 
 25	struct rb_root root;
 26	struct list_head list;
 27};
 28
 29static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map)
 30{
 31	return container_of(map, struct bpf_cgroup_storage_map, map);
 32}
 33
 34static bool attach_type_isolated(const struct bpf_map *map)
 35{
 36	return map->key_size == sizeof(struct bpf_cgroup_storage_key);
 37}
 38
 39static int bpf_cgroup_storage_key_cmp(const struct bpf_cgroup_storage_map *map,
 40				      const void *_key1, const void *_key2)
 41{
 42	if (attach_type_isolated(&map->map)) {
 43		const struct bpf_cgroup_storage_key *key1 = _key1;
 44		const struct bpf_cgroup_storage_key *key2 = _key2;
 45
 46		if (key1->cgroup_inode_id < key2->cgroup_inode_id)
 47			return -1;
 48		else if (key1->cgroup_inode_id > key2->cgroup_inode_id)
 49			return 1;
 50		else if (key1->attach_type < key2->attach_type)
 51			return -1;
 52		else if (key1->attach_type > key2->attach_type)
 53			return 1;
 54	} else {
 55		const __u64 *cgroup_inode_id1 = _key1;
 56		const __u64 *cgroup_inode_id2 = _key2;
 57
 58		if (*cgroup_inode_id1 < *cgroup_inode_id2)
 59			return -1;
 60		else if (*cgroup_inode_id1 > *cgroup_inode_id2)
 61			return 1;
 62	}
 63	return 0;
 64}
 65
 66struct bpf_cgroup_storage *
 67cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
 68		      void *key, bool locked)
 69{
 70	struct rb_root *root = &map->root;
 71	struct rb_node *node;
 72
 73	if (!locked)
 74		spin_lock_bh(&map->lock);
 75
 76	node = root->rb_node;
 77	while (node) {
 78		struct bpf_cgroup_storage *storage;
 79
 80		storage = container_of(node, struct bpf_cgroup_storage, node);
 81
 82		switch (bpf_cgroup_storage_key_cmp(map, key, &storage->key)) {
 83		case -1:
 84			node = node->rb_left;
 85			break;
 86		case 1:
 87			node = node->rb_right;
 88			break;
 89		default:
 90			if (!locked)
 91				spin_unlock_bh(&map->lock);
 92			return storage;
 93		}
 94	}
 95
 96	if (!locked)
 97		spin_unlock_bh(&map->lock);
 98
 99	return NULL;
100}
101
102static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map,
103				 struct bpf_cgroup_storage *storage)
104{
105	struct rb_root *root = &map->root;
106	struct rb_node **new = &(root->rb_node), *parent = NULL;
107
108	while (*new) {
109		struct bpf_cgroup_storage *this;
110
111		this = container_of(*new, struct bpf_cgroup_storage, node);
112
113		parent = *new;
114		switch (bpf_cgroup_storage_key_cmp(map, &storage->key, &this->key)) {
115		case -1:
116			new = &((*new)->rb_left);
117			break;
118		case 1:
119			new = &((*new)->rb_right);
120			break;
121		default:
122			return -EEXIST;
123		}
124	}
125
126	rb_link_node(&storage->node, parent, new);
127	rb_insert_color(&storage->node, root);
128
129	return 0;
130}
131
132static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *key)
133{
134	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
 
135	struct bpf_cgroup_storage *storage;
136
137	storage = cgroup_storage_lookup(map, key, false);
138	if (!storage)
139		return NULL;
140
141	return &READ_ONCE(storage->buf)->data[0];
142}
143
144static int cgroup_storage_update_elem(struct bpf_map *map, void *key,
145				      void *value, u64 flags)
146{
 
147	struct bpf_cgroup_storage *storage;
148	struct bpf_storage_buffer *new;
149
150	if (unlikely(flags & ~(BPF_F_LOCK | BPF_EXIST)))
 
 
 
151		return -EINVAL;
152
153	if (unlikely((flags & BPF_F_LOCK) &&
154		     !map_value_has_spin_lock(map)))
155		return -EINVAL;
156
157	storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
158					key, false);
159	if (!storage)
160		return -ENOENT;
161
162	if (flags & BPF_F_LOCK) {
163		copy_map_value_locked(map, storage->buf->data, value, false);
164		return 0;
165	}
166
167	new = kmalloc_node(sizeof(struct bpf_storage_buffer) +
168			   map->value_size,
169			   __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
170			   map->numa_node);
171	if (!new)
172		return -ENOMEM;
173
174	memcpy(&new->data[0], value, map->value_size);
175	check_and_init_map_lock(map, new->data);
176
177	new = xchg(&storage->buf, new);
178	kfree_rcu(new, rcu);
179
180	return 0;
181}
182
183int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key,
184				   void *value)
185{
186	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
 
187	struct bpf_cgroup_storage *storage;
188	int cpu, off = 0;
189	u32 size;
190
191	rcu_read_lock();
192	storage = cgroup_storage_lookup(map, key, false);
193	if (!storage) {
194		rcu_read_unlock();
195		return -ENOENT;
196	}
197
198	/* per_cpu areas are zero-filled and bpf programs can only
199	 * access 'value_size' of them, so copying rounded areas
200	 * will not leak any kernel data
201	 */
202	size = round_up(_map->value_size, 8);
203	for_each_possible_cpu(cpu) {
204		bpf_long_memcpy(value + off,
205				per_cpu_ptr(storage->percpu_buf, cpu), size);
206		off += size;
207	}
208	rcu_read_unlock();
209	return 0;
210}
211
212int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key,
213				     void *value, u64 map_flags)
214{
215	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
 
216	struct bpf_cgroup_storage *storage;
217	int cpu, off = 0;
218	u32 size;
219
220	if (map_flags != BPF_ANY && map_flags != BPF_EXIST)
221		return -EINVAL;
222
223	rcu_read_lock();
224	storage = cgroup_storage_lookup(map, key, false);
225	if (!storage) {
226		rcu_read_unlock();
227		return -ENOENT;
228	}
229
230	/* the user space will provide round_up(value_size, 8) bytes that
231	 * will be copied into per-cpu area. bpf programs can only access
232	 * value_size of it. During lookup the same extra bytes will be
233	 * returned or zeros which were zero-filled by percpu_alloc,
234	 * so no kernel data leaks possible
235	 */
236	size = round_up(_map->value_size, 8);
237	for_each_possible_cpu(cpu) {
238		bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu),
239				value + off, size);
240		off += size;
241	}
242	rcu_read_unlock();
243	return 0;
244}
245
246static int cgroup_storage_get_next_key(struct bpf_map *_map, void *key,
247				       void *_next_key)
248{
249	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
 
 
250	struct bpf_cgroup_storage *storage;
251
252	spin_lock_bh(&map->lock);
253
254	if (list_empty(&map->list))
255		goto enoent;
256
257	if (key) {
258		storage = cgroup_storage_lookup(map, key, true);
259		if (!storage)
260			goto enoent;
261
262		storage = list_next_entry(storage, list_map);
263		if (!storage)
264			goto enoent;
265	} else {
266		storage = list_first_entry(&map->list,
267					 struct bpf_cgroup_storage, list_map);
268	}
269
270	spin_unlock_bh(&map->lock);
271
272	if (attach_type_isolated(&map->map)) {
273		struct bpf_cgroup_storage_key *next = _next_key;
274		*next = storage->key;
275	} else {
276		__u64 *next = _next_key;
277		*next = storage->key.cgroup_inode_id;
278	}
279	return 0;
280
281enoent:
282	spin_unlock_bh(&map->lock);
283	return -ENOENT;
284}
285
286static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
287{
288	int numa_node = bpf_map_attr_numa_node(attr);
289	struct bpf_cgroup_storage_map *map;
290	struct bpf_map_memory mem;
291	int ret;
292
293	if (attr->key_size != sizeof(struct bpf_cgroup_storage_key) &&
294	    attr->key_size != sizeof(__u64))
295		return ERR_PTR(-EINVAL);
296
297	if (attr->value_size == 0)
298		return ERR_PTR(-EINVAL);
299
300	if (attr->value_size > PAGE_SIZE)
301		return ERR_PTR(-E2BIG);
302
303	if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK ||
304	    !bpf_map_flags_access_ok(attr->map_flags))
305		return ERR_PTR(-EINVAL);
306
307	if (attr->max_entries)
308		/* max_entries is not used and enforced to be 0 */
309		return ERR_PTR(-EINVAL);
310
311	ret = bpf_map_charge_init(&mem, sizeof(struct bpf_cgroup_storage_map));
312	if (ret < 0)
313		return ERR_PTR(ret);
314
315	map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map),
316			   __GFP_ZERO | GFP_USER, numa_node);
317	if (!map) {
318		bpf_map_charge_finish(&mem);
319		return ERR_PTR(-ENOMEM);
320	}
321
322	bpf_map_charge_move(&map->map.memory, &mem);
323
324	/* copy mandatory map attributes */
325	bpf_map_init_from_attr(&map->map, attr);
326
327	spin_lock_init(&map->lock);
328	map->root = RB_ROOT;
329	INIT_LIST_HEAD(&map->list);
330
331	return &map->map;
332}
333
334static void cgroup_storage_map_free(struct bpf_map *_map)
335{
336	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
337	struct list_head *storages = &map->list;
338	struct bpf_cgroup_storage *storage, *stmp;
339
340	mutex_lock(&cgroup_mutex);
341
342	list_for_each_entry_safe(storage, stmp, storages, list_map) {
343		bpf_cgroup_storage_unlink(storage);
344		bpf_cgroup_storage_free(storage);
345	}
346
347	mutex_unlock(&cgroup_mutex);
348
349	WARN_ON(!RB_EMPTY_ROOT(&map->root));
350	WARN_ON(!list_empty(&map->list));
351
352	kfree(map);
353}
354
355static int cgroup_storage_delete_elem(struct bpf_map *map, void *key)
356{
357	return -EINVAL;
358}
359
360static int cgroup_storage_check_btf(const struct bpf_map *map,
361				    const struct btf *btf,
362				    const struct btf_type *key_type,
363				    const struct btf_type *value_type)
364{
365	if (attach_type_isolated(map)) {
366		struct btf_member *m;
367		u32 offset, size;
368
369		/* Key is expected to be of struct bpf_cgroup_storage_key type,
370		 * which is:
371		 * struct bpf_cgroup_storage_key {
372		 *	__u64	cgroup_inode_id;
373		 *	__u32	attach_type;
374		 * };
375		 */
376
377		/*
378		 * Key_type must be a structure with two fields.
379		 */
380		if (BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ||
381		    BTF_INFO_VLEN(key_type->info) != 2)
382			return -EINVAL;
383
384		/*
385		 * The first field must be a 64 bit integer at 0 offset.
386		 */
387		m = (struct btf_member *)(key_type + 1);
388		size = sizeof_field(struct bpf_cgroup_storage_key, cgroup_inode_id);
389		if (!btf_member_is_reg_int(btf, key_type, m, 0, size))
390			return -EINVAL;
391
392		/*
393		 * The second field must be a 32 bit integer at 64 bit offset.
394		 */
395		m++;
396		offset = offsetof(struct bpf_cgroup_storage_key, attach_type);
397		size = sizeof_field(struct bpf_cgroup_storage_key, attach_type);
398		if (!btf_member_is_reg_int(btf, key_type, m, offset, size))
399			return -EINVAL;
400	} else {
401		u32 int_data;
402
403		/*
404		 * Key is expected to be u64, which stores the cgroup_inode_id
405		 */
406
407		if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
408			return -EINVAL;
409
410		int_data = *(u32 *)(key_type + 1);
411		if (BTF_INT_BITS(int_data) != 64 || BTF_INT_OFFSET(int_data))
412			return -EINVAL;
413	}
414
415	return 0;
416}
417
418static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key,
419					 struct seq_file *m)
420{
421	enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
 
422	struct bpf_cgroup_storage *storage;
423	int cpu;
424
425	rcu_read_lock();
426	storage = cgroup_storage_lookup(map_to_storage(map), key, false);
427	if (!storage) {
428		rcu_read_unlock();
429		return;
430	}
431
432	btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
433	stype = cgroup_storage_type(map);
434	if (stype == BPF_CGROUP_STORAGE_SHARED) {
435		seq_puts(m, ": ");
436		btf_type_seq_show(map->btf, map->btf_value_type_id,
437				  &READ_ONCE(storage->buf)->data[0], m);
438		seq_puts(m, "\n");
439	} else {
440		seq_puts(m, ": {\n");
441		for_each_possible_cpu(cpu) {
442			seq_printf(m, "\tcpu%d: ", cpu);
443			btf_type_seq_show(map->btf, map->btf_value_type_id,
444					  per_cpu_ptr(storage->percpu_buf, cpu),
445					  m);
446			seq_puts(m, "\n");
447		}
448		seq_puts(m, "}\n");
449	}
450	rcu_read_unlock();
451}
452
453static int cgroup_storage_map_btf_id;
454const struct bpf_map_ops cgroup_storage_map_ops = {
455	.map_alloc = cgroup_storage_map_alloc,
456	.map_free = cgroup_storage_map_free,
457	.map_get_next_key = cgroup_storage_get_next_key,
458	.map_lookup_elem = cgroup_storage_lookup_elem,
459	.map_update_elem = cgroup_storage_update_elem,
460	.map_delete_elem = cgroup_storage_delete_elem,
461	.map_check_btf = cgroup_storage_check_btf,
462	.map_seq_show_elem = cgroup_storage_seq_show_elem,
463	.map_btf_name = "bpf_cgroup_storage_map",
464	.map_btf_id = &cgroup_storage_map_btf_id,
465};
466
467int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *_map)
468{
469	enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
 
 
470
471	if (aux->cgroup_storage[stype] &&
472	    aux->cgroup_storage[stype] != _map)
473		return -EBUSY;
 
 
 
 
 
 
 
 
 
 
474
475	aux->cgroup_storage[stype] = _map;
476	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
477}
478
479static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages)
480{
481	size_t size;
482
483	if (cgroup_storage_type(map) == BPF_CGROUP_STORAGE_SHARED) {
484		size = sizeof(struct bpf_storage_buffer) + map->value_size;
485		*pages = round_up(sizeof(struct bpf_cgroup_storage) + size,
486				  PAGE_SIZE) >> PAGE_SHIFT;
487	} else {
488		size = map->value_size;
489		*pages = round_up(round_up(size, 8) * num_possible_cpus(),
490				  PAGE_SIZE) >> PAGE_SHIFT;
491	}
492
493	return size;
494}
495
496struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
497					enum bpf_cgroup_storage_type stype)
498{
499	struct bpf_cgroup_storage *storage;
500	struct bpf_map *map;
501	gfp_t flags;
502	size_t size;
503	u32 pages;
504
505	map = prog->aux->cgroup_storage[stype];
506	if (!map)
507		return NULL;
508
509	size = bpf_cgroup_storage_calculate_size(map, &pages);
510
511	if (bpf_map_charge_memlock(map, pages))
512		return ERR_PTR(-EPERM);
513
514	storage = kmalloc_node(sizeof(struct bpf_cgroup_storage),
515			       __GFP_ZERO | GFP_USER, map->numa_node);
516	if (!storage)
517		goto enomem;
518
519	flags = __GFP_ZERO | GFP_USER;
520
521	if (stype == BPF_CGROUP_STORAGE_SHARED) {
522		storage->buf = kmalloc_node(size, flags, map->numa_node);
523		if (!storage->buf)
524			goto enomem;
525		check_and_init_map_lock(map, storage->buf->data);
526	} else {
527		storage->percpu_buf = __alloc_percpu_gfp(size, 8, flags);
528		if (!storage->percpu_buf)
529			goto enomem;
530	}
531
532	storage->map = (struct bpf_cgroup_storage_map *)map;
533
534	return storage;
535
536enomem:
537	bpf_map_uncharge_memlock(map, pages);
538	kfree(storage);
539	return ERR_PTR(-ENOMEM);
540}
541
542static void free_shared_cgroup_storage_rcu(struct rcu_head *rcu)
543{
544	struct bpf_cgroup_storage *storage =
545		container_of(rcu, struct bpf_cgroup_storage, rcu);
546
547	kfree(storage->buf);
548	kfree(storage);
549}
550
551static void free_percpu_cgroup_storage_rcu(struct rcu_head *rcu)
552{
553	struct bpf_cgroup_storage *storage =
554		container_of(rcu, struct bpf_cgroup_storage, rcu);
555
556	free_percpu(storage->percpu_buf);
557	kfree(storage);
558}
559
560void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage)
561{
562	enum bpf_cgroup_storage_type stype;
563	struct bpf_map *map;
564	u32 pages;
565
566	if (!storage)
567		return;
568
569	map = &storage->map->map;
570
571	bpf_cgroup_storage_calculate_size(map, &pages);
572	bpf_map_uncharge_memlock(map, pages);
573
574	stype = cgroup_storage_type(map);
575	if (stype == BPF_CGROUP_STORAGE_SHARED)
576		call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu);
577	else
578		call_rcu(&storage->rcu, free_percpu_cgroup_storage_rcu);
579}
580
581void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
582			     struct cgroup *cgroup,
583			     enum bpf_attach_type type)
584{
585	struct bpf_cgroup_storage_map *map;
586
587	if (!storage)
588		return;
589
590	storage->key.attach_type = type;
591	storage->key.cgroup_inode_id = cgroup_id(cgroup);
592
593	map = storage->map;
594
595	spin_lock_bh(&map->lock);
596	WARN_ON(cgroup_storage_insert(map, storage));
597	list_add(&storage->list_map, &map->list);
598	list_add(&storage->list_cg, &cgroup->bpf.storages);
599	spin_unlock_bh(&map->lock);
600}
601
602void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage)
603{
604	struct bpf_cgroup_storage_map *map;
605	struct rb_root *root;
606
607	if (!storage)
608		return;
609
610	map = storage->map;
611
612	spin_lock_bh(&map->lock);
613	root = &map->root;
614	rb_erase(&storage->node, root);
615
616	list_del(&storage->list_map);
617	list_del(&storage->list_cg);
618	spin_unlock_bh(&map->lock);
619}
620
621#endif