Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/bpf-cgroup.h>
  3#include <linux/bpf.h>
  4#include <linux/bpf_local_storage.h>
  5#include <linux/btf.h>
  6#include <linux/bug.h>
  7#include <linux/filter.h>
  8#include <linux/mm.h>
  9#include <linux/rbtree.h>
 10#include <linux/slab.h>
 11#include <uapi/linux/btf.h>
 12#include <linux/btf_ids.h>
 13
 14#ifdef CONFIG_CGROUP_BPF
 15
 16#include "../cgroup/cgroup-internal.h"
 17
 18#define LOCAL_STORAGE_CREATE_FLAG_MASK					\
 19	(BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
 20
 21struct bpf_cgroup_storage_map {
 22	struct bpf_map map;
 23
 24	spinlock_t lock;
 
 25	struct rb_root root;
 26	struct list_head list;
 27};
 28
 29static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map)
 30{
 31	return container_of(map, struct bpf_cgroup_storage_map, map);
 32}
 33
 34static bool attach_type_isolated(const struct bpf_map *map)
 35{
 36	return map->key_size == sizeof(struct bpf_cgroup_storage_key);
 37}
 38
 39static int bpf_cgroup_storage_key_cmp(const struct bpf_cgroup_storage_map *map,
 40				      const void *_key1, const void *_key2)
 41{
 42	if (attach_type_isolated(&map->map)) {
 43		const struct bpf_cgroup_storage_key *key1 = _key1;
 44		const struct bpf_cgroup_storage_key *key2 = _key2;
 45
 46		if (key1->cgroup_inode_id < key2->cgroup_inode_id)
 47			return -1;
 48		else if (key1->cgroup_inode_id > key2->cgroup_inode_id)
 49			return 1;
 50		else if (key1->attach_type < key2->attach_type)
 51			return -1;
 52		else if (key1->attach_type > key2->attach_type)
 53			return 1;
 54	} else {
 55		const __u64 *cgroup_inode_id1 = _key1;
 56		const __u64 *cgroup_inode_id2 = _key2;
 57
 58		if (*cgroup_inode_id1 < *cgroup_inode_id2)
 59			return -1;
 60		else if (*cgroup_inode_id1 > *cgroup_inode_id2)
 61			return 1;
 62	}
 63	return 0;
 64}
 65
 66struct bpf_cgroup_storage *
 67cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
 68		      void *key, bool locked)
 69{
 70	struct rb_root *root = &map->root;
 71	struct rb_node *node;
 72
 73	if (!locked)
 74		spin_lock_bh(&map->lock);
 75
 76	node = root->rb_node;
 77	while (node) {
 78		struct bpf_cgroup_storage *storage;
 79
 80		storage = container_of(node, struct bpf_cgroup_storage, node);
 81
 82		switch (bpf_cgroup_storage_key_cmp(map, key, &storage->key)) {
 83		case -1:
 84			node = node->rb_left;
 85			break;
 86		case 1:
 87			node = node->rb_right;
 88			break;
 89		default:
 90			if (!locked)
 91				spin_unlock_bh(&map->lock);
 92			return storage;
 93		}
 94	}
 95
 96	if (!locked)
 97		spin_unlock_bh(&map->lock);
 98
 99	return NULL;
100}
101
102static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map,
103				 struct bpf_cgroup_storage *storage)
104{
105	struct rb_root *root = &map->root;
106	struct rb_node **new = &(root->rb_node), *parent = NULL;
107
108	while (*new) {
109		struct bpf_cgroup_storage *this;
110
111		this = container_of(*new, struct bpf_cgroup_storage, node);
112
113		parent = *new;
114		switch (bpf_cgroup_storage_key_cmp(map, &storage->key, &this->key)) {
115		case -1:
116			new = &((*new)->rb_left);
117			break;
118		case 1:
119			new = &((*new)->rb_right);
120			break;
121		default:
122			return -EEXIST;
123		}
124	}
125
126	rb_link_node(&storage->node, parent, new);
127	rb_insert_color(&storage->node, root);
128
129	return 0;
130}
131
132static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *key)
133{
134	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
 
135	struct bpf_cgroup_storage *storage;
136
137	storage = cgroup_storage_lookup(map, key, false);
138	if (!storage)
139		return NULL;
140
141	return &READ_ONCE(storage->buf)->data[0];
142}
143
144static long cgroup_storage_update_elem(struct bpf_map *map, void *key,
145				       void *value, u64 flags)
146{
 
147	struct bpf_cgroup_storage *storage;
148	struct bpf_storage_buffer *new;
149
150	if (unlikely(flags & ~(BPF_F_LOCK | BPF_EXIST)))
 
 
 
151		return -EINVAL;
152
153	if (unlikely((flags & BPF_F_LOCK) &&
154		     !btf_record_has_field(map->record, BPF_SPIN_LOCK)))
155		return -EINVAL;
156
157	storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
158					key, false);
159	if (!storage)
160		return -ENOENT;
161
162	if (flags & BPF_F_LOCK) {
163		copy_map_value_locked(map, storage->buf->data, value, false);
164		return 0;
165	}
166
167	new = bpf_map_kmalloc_node(map, struct_size(new, data, map->value_size),
168				   __GFP_ZERO | GFP_NOWAIT | __GFP_NOWARN,
169				   map->numa_node);
 
170	if (!new)
171		return -ENOMEM;
172
173	memcpy(&new->data[0], value, map->value_size);
174	check_and_init_map_value(map, new->data);
175
176	new = xchg(&storage->buf, new);
177	kfree_rcu(new, rcu);
178
179	return 0;
180}
181
182int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key,
183				   void *value)
184{
185	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
 
186	struct bpf_cgroup_storage *storage;
187	int cpu, off = 0;
188	u32 size;
189
190	rcu_read_lock();
191	storage = cgroup_storage_lookup(map, key, false);
192	if (!storage) {
193		rcu_read_unlock();
194		return -ENOENT;
195	}
196
197	/* per_cpu areas are zero-filled and bpf programs can only
198	 * access 'value_size' of them, so copying rounded areas
199	 * will not leak any kernel data
200	 */
201	size = round_up(_map->value_size, 8);
202	for_each_possible_cpu(cpu) {
203		bpf_long_memcpy(value + off,
204				per_cpu_ptr(storage->percpu_buf, cpu), size);
205		off += size;
206	}
207	rcu_read_unlock();
208	return 0;
209}
210
211int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key,
212				     void *value, u64 map_flags)
213{
214	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
 
215	struct bpf_cgroup_storage *storage;
216	int cpu, off = 0;
217	u32 size;
218
219	if (map_flags != BPF_ANY && map_flags != BPF_EXIST)
220		return -EINVAL;
221
222	rcu_read_lock();
223	storage = cgroup_storage_lookup(map, key, false);
224	if (!storage) {
225		rcu_read_unlock();
226		return -ENOENT;
227	}
228
229	/* the user space will provide round_up(value_size, 8) bytes that
230	 * will be copied into per-cpu area. bpf programs can only access
231	 * value_size of it. During lookup the same extra bytes will be
232	 * returned or zeros which were zero-filled by percpu_alloc,
233	 * so no kernel data leaks possible
234	 */
235	size = round_up(_map->value_size, 8);
236	for_each_possible_cpu(cpu) {
237		bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu),
238				value + off, size);
239		off += size;
240	}
241	rcu_read_unlock();
242	return 0;
243}
244
245static int cgroup_storage_get_next_key(struct bpf_map *_map, void *key,
246				       void *_next_key)
247{
248	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
 
 
249	struct bpf_cgroup_storage *storage;
250
251	spin_lock_bh(&map->lock);
252
253	if (list_empty(&map->list))
254		goto enoent;
255
256	if (key) {
257		storage = cgroup_storage_lookup(map, key, true);
258		if (!storage)
259			goto enoent;
260
261		storage = list_next_entry(storage, list_map);
262		if (!storage)
263			goto enoent;
264	} else {
265		storage = list_first_entry(&map->list,
266					 struct bpf_cgroup_storage, list_map);
267	}
268
269	spin_unlock_bh(&map->lock);
270
271	if (attach_type_isolated(&map->map)) {
272		struct bpf_cgroup_storage_key *next = _next_key;
273		*next = storage->key;
274	} else {
275		__u64 *next = _next_key;
276		*next = storage->key.cgroup_inode_id;
277	}
278	return 0;
279
280enoent:
281	spin_unlock_bh(&map->lock);
282	return -ENOENT;
283}
284
285static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
286{
287	__u32 max_value_size = BPF_LOCAL_STORAGE_MAX_VALUE_SIZE;
288	int numa_node = bpf_map_attr_numa_node(attr);
289	struct bpf_cgroup_storage_map *map;
 
 
290
291	/* percpu is bound by PCPU_MIN_UNIT_SIZE, non-percu
292	 * is the same as other local storages.
293	 */
294	if (attr->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
295		max_value_size = min_t(__u32, max_value_size,
296				       PCPU_MIN_UNIT_SIZE);
297
298	if (attr->key_size != sizeof(struct bpf_cgroup_storage_key) &&
299	    attr->key_size != sizeof(__u64))
300		return ERR_PTR(-EINVAL);
301
302	if (attr->value_size == 0)
303		return ERR_PTR(-EINVAL);
304
305	if (attr->value_size > max_value_size)
306		return ERR_PTR(-E2BIG);
307
308	if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK ||
309	    !bpf_map_flags_access_ok(attr->map_flags))
310		return ERR_PTR(-EINVAL);
311
312	if (attr->max_entries)
313		/* max_entries is not used and enforced to be 0 */
314		return ERR_PTR(-EINVAL);
315
316	map = bpf_map_area_alloc(sizeof(struct bpf_cgroup_storage_map), numa_node);
317	if (!map)
 
 
 
 
 
 
318		return ERR_PTR(-ENOMEM);
 
 
 
319
320	/* copy mandatory map attributes */
321	bpf_map_init_from_attr(&map->map, attr);
322
323	spin_lock_init(&map->lock);
324	map->root = RB_ROOT;
325	INIT_LIST_HEAD(&map->list);
326
327	return &map->map;
328}
329
330static void cgroup_storage_map_free(struct bpf_map *_map)
331{
332	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
333	struct list_head *storages = &map->list;
334	struct bpf_cgroup_storage *storage, *stmp;
335
336	cgroup_lock();
337
338	list_for_each_entry_safe(storage, stmp, storages, list_map) {
339		bpf_cgroup_storage_unlink(storage);
340		bpf_cgroup_storage_free(storage);
341	}
342
343	cgroup_unlock();
344
345	WARN_ON(!RB_EMPTY_ROOT(&map->root));
346	WARN_ON(!list_empty(&map->list));
347
348	bpf_map_area_free(map);
349}
350
351static long cgroup_storage_delete_elem(struct bpf_map *map, void *key)
352{
353	return -EINVAL;
354}
355
356static int cgroup_storage_check_btf(const struct bpf_map *map,
357				    const struct btf *btf,
358				    const struct btf_type *key_type,
359				    const struct btf_type *value_type)
360{
361	if (attach_type_isolated(map)) {
362		struct btf_member *m;
363		u32 offset, size;
364
365		/* Key is expected to be of struct bpf_cgroup_storage_key type,
366		 * which is:
367		 * struct bpf_cgroup_storage_key {
368		 *	__u64	cgroup_inode_id;
369		 *	__u32	attach_type;
370		 * };
371		 */
372
373		/*
374		 * Key_type must be a structure with two fields.
375		 */
376		if (BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ||
377		    BTF_INFO_VLEN(key_type->info) != 2)
378			return -EINVAL;
379
380		/*
381		 * The first field must be a 64 bit integer at 0 offset.
382		 */
383		m = (struct btf_member *)(key_type + 1);
384		size = sizeof_field(struct bpf_cgroup_storage_key, cgroup_inode_id);
385		if (!btf_member_is_reg_int(btf, key_type, m, 0, size))
386			return -EINVAL;
387
388		/*
389		 * The second field must be a 32 bit integer at 64 bit offset.
390		 */
391		m++;
392		offset = offsetof(struct bpf_cgroup_storage_key, attach_type);
393		size = sizeof_field(struct bpf_cgroup_storage_key, attach_type);
394		if (!btf_member_is_reg_int(btf, key_type, m, offset, size))
395			return -EINVAL;
396	} else {
397		u32 int_data;
398
399		/*
400		 * Key is expected to be u64, which stores the cgroup_inode_id
401		 */
402
403		if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
404			return -EINVAL;
405
406		int_data = *(u32 *)(key_type + 1);
407		if (BTF_INT_BITS(int_data) != 64 || BTF_INT_OFFSET(int_data))
408			return -EINVAL;
409	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
410
411	return 0;
412}
413
414static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key,
415					 struct seq_file *m)
416{
417	enum bpf_cgroup_storage_type stype;
 
418	struct bpf_cgroup_storage *storage;
419	int cpu;
420
421	rcu_read_lock();
422	storage = cgroup_storage_lookup(map_to_storage(map), key, false);
423	if (!storage) {
424		rcu_read_unlock();
425		return;
426	}
427
428	btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
429	stype = cgroup_storage_type(map);
430	if (stype == BPF_CGROUP_STORAGE_SHARED) {
431		seq_puts(m, ": ");
432		btf_type_seq_show(map->btf, map->btf_value_type_id,
433				  &READ_ONCE(storage->buf)->data[0], m);
434		seq_puts(m, "\n");
435	} else {
436		seq_puts(m, ": {\n");
437		for_each_possible_cpu(cpu) {
438			seq_printf(m, "\tcpu%d: ", cpu);
439			btf_type_seq_show(map->btf, map->btf_value_type_id,
440					  per_cpu_ptr(storage->percpu_buf, cpu),
441					  m);
442			seq_puts(m, "\n");
443		}
444		seq_puts(m, "}\n");
445	}
446	rcu_read_unlock();
447}
448
449static u64 cgroup_storage_map_usage(const struct bpf_map *map)
450{
451	/* Currently the dynamically allocated elements are not counted. */
452	return sizeof(struct bpf_cgroup_storage_map);
453}
454
455BTF_ID_LIST_SINGLE(cgroup_storage_map_btf_ids, struct,
456		   bpf_cgroup_storage_map)
457const struct bpf_map_ops cgroup_storage_map_ops = {
458	.map_alloc = cgroup_storage_map_alloc,
459	.map_free = cgroup_storage_map_free,
460	.map_get_next_key = cgroup_storage_get_next_key,
461	.map_lookup_elem = cgroup_storage_lookup_elem,
462	.map_update_elem = cgroup_storage_update_elem,
463	.map_delete_elem = cgroup_storage_delete_elem,
464	.map_check_btf = cgroup_storage_check_btf,
465	.map_seq_show_elem = cgroup_storage_seq_show_elem,
466	.map_mem_usage = cgroup_storage_map_usage,
467	.map_btf_id = &cgroup_storage_map_btf_ids[0],
468};
469
470int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *_map)
471{
472	enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
 
 
473
474	if (aux->cgroup_storage[stype] &&
475	    aux->cgroup_storage[stype] != _map)
476		return -EBUSY;
477
478	aux->cgroup_storage[stype] = _map;
479	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
480}
481
482static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages)
483{
484	size_t size;
485
486	if (cgroup_storage_type(map) == BPF_CGROUP_STORAGE_SHARED) {
487		size = sizeof(struct bpf_storage_buffer) + map->value_size;
488		*pages = round_up(sizeof(struct bpf_cgroup_storage) + size,
489				  PAGE_SIZE) >> PAGE_SHIFT;
490	} else {
491		size = map->value_size;
492		*pages = round_up(round_up(size, 8) * num_possible_cpus(),
493				  PAGE_SIZE) >> PAGE_SHIFT;
494	}
495
496	return size;
497}
498
499struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
500					enum bpf_cgroup_storage_type stype)
501{
502	const gfp_t gfp = __GFP_ZERO | GFP_USER;
503	struct bpf_cgroup_storage *storage;
504	struct bpf_map *map;
 
505	size_t size;
506	u32 pages;
507
508	map = prog->aux->cgroup_storage[stype];
509	if (!map)
510		return NULL;
511
512	size = bpf_cgroup_storage_calculate_size(map, &pages);
513
514	storage = bpf_map_kmalloc_node(map, sizeof(struct bpf_cgroup_storage),
515				       gfp, map->numa_node);
 
 
 
516	if (!storage)
517		goto enomem;
518
 
 
519	if (stype == BPF_CGROUP_STORAGE_SHARED) {
520		storage->buf = bpf_map_kmalloc_node(map, size, gfp,
521						    map->numa_node);
522		if (!storage->buf)
523			goto enomem;
524		check_and_init_map_value(map, storage->buf->data);
525	} else {
526		storage->percpu_buf = bpf_map_alloc_percpu(map, size, 8, gfp);
527		if (!storage->percpu_buf)
528			goto enomem;
529	}
530
531	storage->map = (struct bpf_cgroup_storage_map *)map;
532
533	return storage;
534
535enomem:
 
536	kfree(storage);
537	return ERR_PTR(-ENOMEM);
538}
539
540static void free_shared_cgroup_storage_rcu(struct rcu_head *rcu)
541{
542	struct bpf_cgroup_storage *storage =
543		container_of(rcu, struct bpf_cgroup_storage, rcu);
544
545	kfree(storage->buf);
546	kfree(storage);
547}
548
549static void free_percpu_cgroup_storage_rcu(struct rcu_head *rcu)
550{
551	struct bpf_cgroup_storage *storage =
552		container_of(rcu, struct bpf_cgroup_storage, rcu);
553
554	free_percpu(storage->percpu_buf);
555	kfree(storage);
556}
557
558void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage)
559{
560	enum bpf_cgroup_storage_type stype;
561	struct bpf_map *map;
 
562
563	if (!storage)
564		return;
565
566	map = &storage->map->map;
 
 
 
 
567	stype = cgroup_storage_type(map);
568	if (stype == BPF_CGROUP_STORAGE_SHARED)
569		call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu);
570	else
571		call_rcu(&storage->rcu, free_percpu_cgroup_storage_rcu);
572}
573
574void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
575			     struct cgroup *cgroup,
576			     enum bpf_attach_type type)
577{
578	struct bpf_cgroup_storage_map *map;
579
580	if (!storage)
581		return;
582
583	storage->key.attach_type = type;
584	storage->key.cgroup_inode_id = cgroup_id(cgroup);
585
586	map = storage->map;
587
588	spin_lock_bh(&map->lock);
589	WARN_ON(cgroup_storage_insert(map, storage));
590	list_add(&storage->list_map, &map->list);
591	list_add(&storage->list_cg, &cgroup->bpf.storages);
592	spin_unlock_bh(&map->lock);
593}
594
595void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage)
596{
597	struct bpf_cgroup_storage_map *map;
598	struct rb_root *root;
599
600	if (!storage)
601		return;
602
603	map = storage->map;
604
605	spin_lock_bh(&map->lock);
606	root = &map->root;
607	rb_erase(&storage->node, root);
608
609	list_del(&storage->list_map);
610	list_del(&storage->list_cg);
611	spin_unlock_bh(&map->lock);
612}
613
614#endif
v5.4
  1//SPDX-License-Identifier: GPL-2.0
  2#include <linux/bpf-cgroup.h>
  3#include <linux/bpf.h>
 
  4#include <linux/btf.h>
  5#include <linux/bug.h>
  6#include <linux/filter.h>
  7#include <linux/mm.h>
  8#include <linux/rbtree.h>
  9#include <linux/slab.h>
 10#include <uapi/linux/btf.h>
 
 11
 12DEFINE_PER_CPU(struct bpf_cgroup_storage*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
 13
 14#ifdef CONFIG_CGROUP_BPF
 15
 16#define LOCAL_STORAGE_CREATE_FLAG_MASK					\
 17	(BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
 18
 19struct bpf_cgroup_storage_map {
 20	struct bpf_map map;
 21
 22	spinlock_t lock;
 23	struct bpf_prog *prog;
 24	struct rb_root root;
 25	struct list_head list;
 26};
 27
 28static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map)
 29{
 30	return container_of(map, struct bpf_cgroup_storage_map, map);
 31}
 32
 33static int bpf_cgroup_storage_key_cmp(
 34	const struct bpf_cgroup_storage_key *key1,
 35	const struct bpf_cgroup_storage_key *key2)
 36{
 37	if (key1->cgroup_inode_id < key2->cgroup_inode_id)
 38		return -1;
 39	else if (key1->cgroup_inode_id > key2->cgroup_inode_id)
 40		return 1;
 41	else if (key1->attach_type < key2->attach_type)
 42		return -1;
 43	else if (key1->attach_type > key2->attach_type)
 44		return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 45	return 0;
 46}
 47
 48static struct bpf_cgroup_storage *cgroup_storage_lookup(
 49	struct bpf_cgroup_storage_map *map, struct bpf_cgroup_storage_key *key,
 50	bool locked)
 51{
 52	struct rb_root *root = &map->root;
 53	struct rb_node *node;
 54
 55	if (!locked)
 56		spin_lock_bh(&map->lock);
 57
 58	node = root->rb_node;
 59	while (node) {
 60		struct bpf_cgroup_storage *storage;
 61
 62		storage = container_of(node, struct bpf_cgroup_storage, node);
 63
 64		switch (bpf_cgroup_storage_key_cmp(key, &storage->key)) {
 65		case -1:
 66			node = node->rb_left;
 67			break;
 68		case 1:
 69			node = node->rb_right;
 70			break;
 71		default:
 72			if (!locked)
 73				spin_unlock_bh(&map->lock);
 74			return storage;
 75		}
 76	}
 77
 78	if (!locked)
 79		spin_unlock_bh(&map->lock);
 80
 81	return NULL;
 82}
 83
 84static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map,
 85				 struct bpf_cgroup_storage *storage)
 86{
 87	struct rb_root *root = &map->root;
 88	struct rb_node **new = &(root->rb_node), *parent = NULL;
 89
 90	while (*new) {
 91		struct bpf_cgroup_storage *this;
 92
 93		this = container_of(*new, struct bpf_cgroup_storage, node);
 94
 95		parent = *new;
 96		switch (bpf_cgroup_storage_key_cmp(&storage->key, &this->key)) {
 97		case -1:
 98			new = &((*new)->rb_left);
 99			break;
100		case 1:
101			new = &((*new)->rb_right);
102			break;
103		default:
104			return -EEXIST;
105		}
106	}
107
108	rb_link_node(&storage->node, parent, new);
109	rb_insert_color(&storage->node, root);
110
111	return 0;
112}
113
114static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *_key)
115{
116	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
117	struct bpf_cgroup_storage_key *key = _key;
118	struct bpf_cgroup_storage *storage;
119
120	storage = cgroup_storage_lookup(map, key, false);
121	if (!storage)
122		return NULL;
123
124	return &READ_ONCE(storage->buf)->data[0];
125}
126
127static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
128				      void *value, u64 flags)
129{
130	struct bpf_cgroup_storage_key *key = _key;
131	struct bpf_cgroup_storage *storage;
132	struct bpf_storage_buffer *new;
133
134	if (unlikely(flags & ~(BPF_F_LOCK | BPF_EXIST | BPF_NOEXIST)))
135		return -EINVAL;
136
137	if (unlikely(flags & BPF_NOEXIST))
138		return -EINVAL;
139
140	if (unlikely((flags & BPF_F_LOCK) &&
141		     !map_value_has_spin_lock(map)))
142		return -EINVAL;
143
144	storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
145					key, false);
146	if (!storage)
147		return -ENOENT;
148
149	if (flags & BPF_F_LOCK) {
150		copy_map_value_locked(map, storage->buf->data, value, false);
151		return 0;
152	}
153
154	new = kmalloc_node(sizeof(struct bpf_storage_buffer) +
155			   map->value_size,
156			   __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
157			   map->numa_node);
158	if (!new)
159		return -ENOMEM;
160
161	memcpy(&new->data[0], value, map->value_size);
162	check_and_init_map_lock(map, new->data);
163
164	new = xchg(&storage->buf, new);
165	kfree_rcu(new, rcu);
166
167	return 0;
168}
169
170int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *_key,
171				   void *value)
172{
173	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
174	struct bpf_cgroup_storage_key *key = _key;
175	struct bpf_cgroup_storage *storage;
176	int cpu, off = 0;
177	u32 size;
178
179	rcu_read_lock();
180	storage = cgroup_storage_lookup(map, key, false);
181	if (!storage) {
182		rcu_read_unlock();
183		return -ENOENT;
184	}
185
186	/* per_cpu areas are zero-filled and bpf programs can only
187	 * access 'value_size' of them, so copying rounded areas
188	 * will not leak any kernel data
189	 */
190	size = round_up(_map->value_size, 8);
191	for_each_possible_cpu(cpu) {
192		bpf_long_memcpy(value + off,
193				per_cpu_ptr(storage->percpu_buf, cpu), size);
194		off += size;
195	}
196	rcu_read_unlock();
197	return 0;
198}
199
200int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *_key,
201				     void *value, u64 map_flags)
202{
203	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
204	struct bpf_cgroup_storage_key *key = _key;
205	struct bpf_cgroup_storage *storage;
206	int cpu, off = 0;
207	u32 size;
208
209	if (map_flags != BPF_ANY && map_flags != BPF_EXIST)
210		return -EINVAL;
211
212	rcu_read_lock();
213	storage = cgroup_storage_lookup(map, key, false);
214	if (!storage) {
215		rcu_read_unlock();
216		return -ENOENT;
217	}
218
219	/* the user space will provide round_up(value_size, 8) bytes that
220	 * will be copied into per-cpu area. bpf programs can only access
221	 * value_size of it. During lookup the same extra bytes will be
222	 * returned or zeros which were zero-filled by percpu_alloc,
223	 * so no kernel data leaks possible
224	 */
225	size = round_up(_map->value_size, 8);
226	for_each_possible_cpu(cpu) {
227		bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu),
228				value + off, size);
229		off += size;
230	}
231	rcu_read_unlock();
232	return 0;
233}
234
235static int cgroup_storage_get_next_key(struct bpf_map *_map, void *_key,
236				       void *_next_key)
237{
238	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
239	struct bpf_cgroup_storage_key *key = _key;
240	struct bpf_cgroup_storage_key *next = _next_key;
241	struct bpf_cgroup_storage *storage;
242
243	spin_lock_bh(&map->lock);
244
245	if (list_empty(&map->list))
246		goto enoent;
247
248	if (key) {
249		storage = cgroup_storage_lookup(map, key, true);
250		if (!storage)
251			goto enoent;
252
253		storage = list_next_entry(storage, list);
254		if (!storage)
255			goto enoent;
256	} else {
257		storage = list_first_entry(&map->list,
258					 struct bpf_cgroup_storage, list);
259	}
260
261	spin_unlock_bh(&map->lock);
262	next->attach_type = storage->key.attach_type;
263	next->cgroup_inode_id = storage->key.cgroup_inode_id;
 
 
 
 
 
 
264	return 0;
265
266enoent:
267	spin_unlock_bh(&map->lock);
268	return -ENOENT;
269}
270
271static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
272{
 
273	int numa_node = bpf_map_attr_numa_node(attr);
274	struct bpf_cgroup_storage_map *map;
275	struct bpf_map_memory mem;
276	int ret;
277
278	if (attr->key_size != sizeof(struct bpf_cgroup_storage_key))
 
 
 
 
 
 
 
 
279		return ERR_PTR(-EINVAL);
280
281	if (attr->value_size == 0)
282		return ERR_PTR(-EINVAL);
283
284	if (attr->value_size > PAGE_SIZE)
285		return ERR_PTR(-E2BIG);
286
287	if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK ||
288	    !bpf_map_flags_access_ok(attr->map_flags))
289		return ERR_PTR(-EINVAL);
290
291	if (attr->max_entries)
292		/* max_entries is not used and enforced to be 0 */
293		return ERR_PTR(-EINVAL);
294
295	ret = bpf_map_charge_init(&mem, sizeof(struct bpf_cgroup_storage_map));
296	if (ret < 0)
297		return ERR_PTR(ret);
298
299	map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map),
300			   __GFP_ZERO | GFP_USER, numa_node);
301	if (!map) {
302		bpf_map_charge_finish(&mem);
303		return ERR_PTR(-ENOMEM);
304	}
305
306	bpf_map_charge_move(&map->map.memory, &mem);
307
308	/* copy mandatory map attributes */
309	bpf_map_init_from_attr(&map->map, attr);
310
311	spin_lock_init(&map->lock);
312	map->root = RB_ROOT;
313	INIT_LIST_HEAD(&map->list);
314
315	return &map->map;
316}
317
318static void cgroup_storage_map_free(struct bpf_map *_map)
319{
320	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
 
 
 
 
 
 
 
 
 
 
 
321
322	WARN_ON(!RB_EMPTY_ROOT(&map->root));
323	WARN_ON(!list_empty(&map->list));
324
325	kfree(map);
326}
327
328static int cgroup_storage_delete_elem(struct bpf_map *map, void *key)
329{
330	return -EINVAL;
331}
332
333static int cgroup_storage_check_btf(const struct bpf_map *map,
334				    const struct btf *btf,
335				    const struct btf_type *key_type,
336				    const struct btf_type *value_type)
337{
338	struct btf_member *m;
339	u32 offset, size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340
341	/* Key is expected to be of struct bpf_cgroup_storage_key type,
342	 * which is:
343	 * struct bpf_cgroup_storage_key {
344	 *	__u64	cgroup_inode_id;
345	 *	__u32	attach_type;
346	 * };
347	 */
348
349	/*
350	 * Key_type must be a structure with two fields.
351	 */
352	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ||
353	    BTF_INFO_VLEN(key_type->info) != 2)
354		return -EINVAL;
355
356	/*
357	 * The first field must be a 64 bit integer at 0 offset.
358	 */
359	m = (struct btf_member *)(key_type + 1);
360	size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, cgroup_inode_id);
361	if (!btf_member_is_reg_int(btf, key_type, m, 0, size))
362		return -EINVAL;
363
364	/*
365	 * The second field must be a 32 bit integer at 64 bit offset.
366	 */
367	m++;
368	offset = offsetof(struct bpf_cgroup_storage_key, attach_type);
369	size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, attach_type);
370	if (!btf_member_is_reg_int(btf, key_type, m, offset, size))
371		return -EINVAL;
372
373	return 0;
374}
375
376static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *_key,
377					 struct seq_file *m)
378{
379	enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
380	struct bpf_cgroup_storage_key *key = _key;
381	struct bpf_cgroup_storage *storage;
382	int cpu;
383
384	rcu_read_lock();
385	storage = cgroup_storage_lookup(map_to_storage(map), key, false);
386	if (!storage) {
387		rcu_read_unlock();
388		return;
389	}
390
391	btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
392	stype = cgroup_storage_type(map);
393	if (stype == BPF_CGROUP_STORAGE_SHARED) {
394		seq_puts(m, ": ");
395		btf_type_seq_show(map->btf, map->btf_value_type_id,
396				  &READ_ONCE(storage->buf)->data[0], m);
397		seq_puts(m, "\n");
398	} else {
399		seq_puts(m, ": {\n");
400		for_each_possible_cpu(cpu) {
401			seq_printf(m, "\tcpu%d: ", cpu);
402			btf_type_seq_show(map->btf, map->btf_value_type_id,
403					  per_cpu_ptr(storage->percpu_buf, cpu),
404					  m);
405			seq_puts(m, "\n");
406		}
407		seq_puts(m, "}\n");
408	}
409	rcu_read_unlock();
410}
411
 
 
 
 
 
 
 
 
412const struct bpf_map_ops cgroup_storage_map_ops = {
413	.map_alloc = cgroup_storage_map_alloc,
414	.map_free = cgroup_storage_map_free,
415	.map_get_next_key = cgroup_storage_get_next_key,
416	.map_lookup_elem = cgroup_storage_lookup_elem,
417	.map_update_elem = cgroup_storage_update_elem,
418	.map_delete_elem = cgroup_storage_delete_elem,
419	.map_check_btf = cgroup_storage_check_btf,
420	.map_seq_show_elem = cgroup_storage_seq_show_elem,
 
 
421};
422
423int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map)
424{
425	enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
426	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
427	int ret = -EBUSY;
428
429	spin_lock_bh(&map->lock);
 
 
430
431	if (map->prog && map->prog != prog)
432		goto unlock;
433	if (prog->aux->cgroup_storage[stype] &&
434	    prog->aux->cgroup_storage[stype] != _map)
435		goto unlock;
436
437	map->prog = prog;
438	prog->aux->cgroup_storage[stype] = _map;
439	ret = 0;
440unlock:
441	spin_unlock_bh(&map->lock);
442
443	return ret;
444}
445
446void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *_map)
447{
448	enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
449	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
450
451	spin_lock_bh(&map->lock);
452	if (map->prog == prog) {
453		WARN_ON(prog->aux->cgroup_storage[stype] != _map);
454		map->prog = NULL;
455		prog->aux->cgroup_storage[stype] = NULL;
456	}
457	spin_unlock_bh(&map->lock);
458}
459
460static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages)
461{
462	size_t size;
463
464	if (cgroup_storage_type(map) == BPF_CGROUP_STORAGE_SHARED) {
465		size = sizeof(struct bpf_storage_buffer) + map->value_size;
466		*pages = round_up(sizeof(struct bpf_cgroup_storage) + size,
467				  PAGE_SIZE) >> PAGE_SHIFT;
468	} else {
469		size = map->value_size;
470		*pages = round_up(round_up(size, 8) * num_possible_cpus(),
471				  PAGE_SIZE) >> PAGE_SHIFT;
472	}
473
474	return size;
475}
476
477struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
478					enum bpf_cgroup_storage_type stype)
479{
 
480	struct bpf_cgroup_storage *storage;
481	struct bpf_map *map;
482	gfp_t flags;
483	size_t size;
484	u32 pages;
485
486	map = prog->aux->cgroup_storage[stype];
487	if (!map)
488		return NULL;
489
490	size = bpf_cgroup_storage_calculate_size(map, &pages);
491
492	if (bpf_map_charge_memlock(map, pages))
493		return ERR_PTR(-EPERM);
494
495	storage = kmalloc_node(sizeof(struct bpf_cgroup_storage),
496			       __GFP_ZERO | GFP_USER, map->numa_node);
497	if (!storage)
498		goto enomem;
499
500	flags = __GFP_ZERO | GFP_USER;
501
502	if (stype == BPF_CGROUP_STORAGE_SHARED) {
503		storage->buf = kmalloc_node(size, flags, map->numa_node);
 
504		if (!storage->buf)
505			goto enomem;
506		check_and_init_map_lock(map, storage->buf->data);
507	} else {
508		storage->percpu_buf = __alloc_percpu_gfp(size, 8, flags);
509		if (!storage->percpu_buf)
510			goto enomem;
511	}
512
513	storage->map = (struct bpf_cgroup_storage_map *)map;
514
515	return storage;
516
517enomem:
518	bpf_map_uncharge_memlock(map, pages);
519	kfree(storage);
520	return ERR_PTR(-ENOMEM);
521}
522
523static void free_shared_cgroup_storage_rcu(struct rcu_head *rcu)
524{
525	struct bpf_cgroup_storage *storage =
526		container_of(rcu, struct bpf_cgroup_storage, rcu);
527
528	kfree(storage->buf);
529	kfree(storage);
530}
531
532static void free_percpu_cgroup_storage_rcu(struct rcu_head *rcu)
533{
534	struct bpf_cgroup_storage *storage =
535		container_of(rcu, struct bpf_cgroup_storage, rcu);
536
537	free_percpu(storage->percpu_buf);
538	kfree(storage);
539}
540
541void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage)
542{
543	enum bpf_cgroup_storage_type stype;
544	struct bpf_map *map;
545	u32 pages;
546
547	if (!storage)
548		return;
549
550	map = &storage->map->map;
551
552	bpf_cgroup_storage_calculate_size(map, &pages);
553	bpf_map_uncharge_memlock(map, pages);
554
555	stype = cgroup_storage_type(map);
556	if (stype == BPF_CGROUP_STORAGE_SHARED)
557		call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu);
558	else
559		call_rcu(&storage->rcu, free_percpu_cgroup_storage_rcu);
560}
561
562void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
563			     struct cgroup *cgroup,
564			     enum bpf_attach_type type)
565{
566	struct bpf_cgroup_storage_map *map;
567
568	if (!storage)
569		return;
570
571	storage->key.attach_type = type;
572	storage->key.cgroup_inode_id = cgroup->kn->id.id;
573
574	map = storage->map;
575
576	spin_lock_bh(&map->lock);
577	WARN_ON(cgroup_storage_insert(map, storage));
578	list_add(&storage->list, &map->list);
 
579	spin_unlock_bh(&map->lock);
580}
581
582void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage)
583{
584	struct bpf_cgroup_storage_map *map;
585	struct rb_root *root;
586
587	if (!storage)
588		return;
589
590	map = storage->map;
591
592	spin_lock_bh(&map->lock);
593	root = &map->root;
594	rb_erase(&storage->node, root);
595
596	list_del(&storage->list);
 
597	spin_unlock_bh(&map->lock);
598}
599
600#endif