Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2019 Facebook */
  3
  4#include <linux/bpf.h>
  5#include <linux/bpf_verifier.h>
  6#include <linux/btf.h>
  7#include <linux/filter.h>
  8#include <linux/slab.h>
  9#include <linux/numa.h>
 10#include <linux/seq_file.h>
 11#include <linux/refcount.h>
 12#include <linux/mutex.h>
 13#include <linux/btf_ids.h>
 14#include <linux/rcupdate_wait.h>
 15
 16enum bpf_struct_ops_state {
 17	BPF_STRUCT_OPS_STATE_INIT,
 18	BPF_STRUCT_OPS_STATE_INUSE,
 19	BPF_STRUCT_OPS_STATE_TOBEFREE,
 20	BPF_STRUCT_OPS_STATE_READY,
 21};
 22
 23#define BPF_STRUCT_OPS_COMMON_VALUE			\
 24	refcount_t refcnt;				\
 25	enum bpf_struct_ops_state state
 26
 27struct bpf_struct_ops_value {
 28	BPF_STRUCT_OPS_COMMON_VALUE;
 29	char data[] ____cacheline_aligned_in_smp;
 30};
 31
 32struct bpf_struct_ops_map {
 33	struct bpf_map map;
 34	struct rcu_head rcu;
 35	const struct bpf_struct_ops *st_ops;
 36	/* protect map_update */
 37	struct mutex lock;
 38	/* link has all the bpf_links that is populated
 39	 * to the func ptr of the kernel's struct
 40	 * (in kvalue.data).
 41	 */
 42	struct bpf_link **links;
 43	/* image is a page that has all the trampolines
 44	 * that stores the func args before calling the bpf_prog.
 45	 * A PAGE_SIZE "image" is enough to store all trampoline for
 46	 * "links[]".
 47	 */
 48	void *image;
 49	/* uvalue->data stores the kernel struct
 50	 * (e.g. tcp_congestion_ops) that is more useful
 51	 * to userspace than the kvalue.  For example,
 52	 * the bpf_prog's id is stored instead of the kernel
 53	 * address of a func ptr.
 54	 */
 55	struct bpf_struct_ops_value *uvalue;
 56	/* kvalue.data stores the actual kernel's struct
 57	 * (e.g. tcp_congestion_ops) that will be
 58	 * registered to the kernel subsystem.
 59	 */
 60	struct bpf_struct_ops_value kvalue;
 61};
 62
 63struct bpf_struct_ops_link {
 64	struct bpf_link link;
 65	struct bpf_map __rcu *map;
 66};
 67
 68static DEFINE_MUTEX(update_mutex);
 69
 70#define VALUE_PREFIX "bpf_struct_ops_"
 71#define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
 72
 73/* bpf_struct_ops_##_name (e.g. bpf_struct_ops_tcp_congestion_ops) is
 74 * the map's value exposed to the userspace and its btf-type-id is
 75 * stored at the map->btf_vmlinux_value_type_id.
 76 *
 77 */
 78#define BPF_STRUCT_OPS_TYPE(_name)				\
 79extern struct bpf_struct_ops bpf_##_name;			\
 80								\
 81struct bpf_struct_ops_##_name {						\
 82	BPF_STRUCT_OPS_COMMON_VALUE;				\
 83	struct _name data ____cacheline_aligned_in_smp;		\
 84};
 85#include "bpf_struct_ops_types.h"
 86#undef BPF_STRUCT_OPS_TYPE
 87
 88enum {
 89#define BPF_STRUCT_OPS_TYPE(_name) BPF_STRUCT_OPS_TYPE_##_name,
 90#include "bpf_struct_ops_types.h"
 91#undef BPF_STRUCT_OPS_TYPE
 92	__NR_BPF_STRUCT_OPS_TYPE,
 93};
 94
 95static struct bpf_struct_ops * const bpf_struct_ops[] = {
 96#define BPF_STRUCT_OPS_TYPE(_name)				\
 97	[BPF_STRUCT_OPS_TYPE_##_name] = &bpf_##_name,
 98#include "bpf_struct_ops_types.h"
 99#undef BPF_STRUCT_OPS_TYPE
100};
101
102const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
103};
104
105const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
106#ifdef CONFIG_NET
107	.test_run = bpf_struct_ops_test_run,
108#endif
109};
110
111static const struct btf_type *module_type;
112
113void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log)
114{
115	s32 type_id, value_id, module_id;
116	const struct btf_member *member;
117	struct bpf_struct_ops *st_ops;
118	const struct btf_type *t;
119	char value_name[128];
120	const char *mname;
121	u32 i, j;
122
123	/* Ensure BTF type is emitted for "struct bpf_struct_ops_##_name" */
124#define BPF_STRUCT_OPS_TYPE(_name) BTF_TYPE_EMIT(struct bpf_struct_ops_##_name);
125#include "bpf_struct_ops_types.h"
126#undef BPF_STRUCT_OPS_TYPE
127
128	module_id = btf_find_by_name_kind(btf, "module", BTF_KIND_STRUCT);
129	if (module_id < 0) {
130		pr_warn("Cannot find struct module in btf_vmlinux\n");
131		return;
132	}
133	module_type = btf_type_by_id(btf, module_id);
134
135	for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
136		st_ops = bpf_struct_ops[i];
137
138		if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
139		    sizeof(value_name)) {
140			pr_warn("struct_ops name %s is too long\n",
141				st_ops->name);
142			continue;
143		}
144		sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
145
146		value_id = btf_find_by_name_kind(btf, value_name,
147						 BTF_KIND_STRUCT);
148		if (value_id < 0) {
149			pr_warn("Cannot find struct %s in btf_vmlinux\n",
150				value_name);
151			continue;
152		}
153
154		type_id = btf_find_by_name_kind(btf, st_ops->name,
155						BTF_KIND_STRUCT);
156		if (type_id < 0) {
157			pr_warn("Cannot find struct %s in btf_vmlinux\n",
158				st_ops->name);
159			continue;
160		}
161		t = btf_type_by_id(btf, type_id);
162		if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
163			pr_warn("Cannot support #%u members in struct %s\n",
164				btf_type_vlen(t), st_ops->name);
165			continue;
166		}
167
168		for_each_member(j, t, member) {
169			const struct btf_type *func_proto;
170
171			mname = btf_name_by_offset(btf, member->name_off);
172			if (!*mname) {
173				pr_warn("anon member in struct %s is not supported\n",
174					st_ops->name);
175				break;
176			}
177
178			if (__btf_member_bitfield_size(t, member)) {
179				pr_warn("bit field member %s in struct %s is not supported\n",
180					mname, st_ops->name);
181				break;
182			}
183
184			func_proto = btf_type_resolve_func_ptr(btf,
185							       member->type,
186							       NULL);
187			if (func_proto &&
188			    btf_distill_func_proto(log, btf,
189						   func_proto, mname,
190						   &st_ops->func_models[j])) {
191				pr_warn("Error in parsing func ptr %s in struct %s\n",
192					mname, st_ops->name);
193				break;
194			}
195		}
196
197		if (j == btf_type_vlen(t)) {
198			if (st_ops->init(btf)) {
199				pr_warn("Error in init bpf_struct_ops %s\n",
200					st_ops->name);
201			} else {
202				st_ops->type_id = type_id;
203				st_ops->type = t;
204				st_ops->value_id = value_id;
205				st_ops->value_type = btf_type_by_id(btf,
206								    value_id);
207			}
208		}
209	}
210}
211
212extern struct btf *btf_vmlinux;
213
214static const struct bpf_struct_ops *
215bpf_struct_ops_find_value(u32 value_id)
216{
217	unsigned int i;
218
219	if (!value_id || !btf_vmlinux)
220		return NULL;
221
222	for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
223		if (bpf_struct_ops[i]->value_id == value_id)
224			return bpf_struct_ops[i];
225	}
226
227	return NULL;
228}
229
230const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
231{
232	unsigned int i;
233
234	if (!type_id || !btf_vmlinux)
235		return NULL;
236
237	for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
238		if (bpf_struct_ops[i]->type_id == type_id)
239			return bpf_struct_ops[i];
240	}
241
242	return NULL;
243}
244
245static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
246					   void *next_key)
247{
248	if (key && *(u32 *)key == 0)
249		return -ENOENT;
250
251	*(u32 *)next_key = 0;
252	return 0;
253}
254
255int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
256				       void *value)
257{
258	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
259	struct bpf_struct_ops_value *uvalue, *kvalue;
260	enum bpf_struct_ops_state state;
261	s64 refcnt;
262
263	if (unlikely(*(u32 *)key != 0))
264		return -ENOENT;
265
266	kvalue = &st_map->kvalue;
267	/* Pair with smp_store_release() during map_update */
268	state = smp_load_acquire(&kvalue->state);
269	if (state == BPF_STRUCT_OPS_STATE_INIT) {
270		memset(value, 0, map->value_size);
271		return 0;
272	}
273
274	/* No lock is needed.  state and refcnt do not need
275	 * to be updated together under atomic context.
276	 */
277	uvalue = value;
278	memcpy(uvalue, st_map->uvalue, map->value_size);
279	uvalue->state = state;
280
281	/* This value offers the user space a general estimate of how
282	 * many sockets are still utilizing this struct_ops for TCP
283	 * congestion control. The number might not be exact, but it
284	 * should sufficiently meet our present goals.
285	 */
286	refcnt = atomic64_read(&map->refcnt) - atomic64_read(&map->usercnt);
287	refcount_set(&uvalue->refcnt, max_t(s64, refcnt, 0));
288
289	return 0;
290}
291
292static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
293{
294	return ERR_PTR(-EINVAL);
295}
296
297static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
298{
299	const struct btf_type *t = st_map->st_ops->type;
300	u32 i;
301
302	for (i = 0; i < btf_type_vlen(t); i++) {
303		if (st_map->links[i]) {
304			bpf_link_put(st_map->links[i]);
305			st_map->links[i] = NULL;
306		}
307	}
308}
309
310static int check_zero_holes(const struct btf_type *t, void *data)
311{
312	const struct btf_member *member;
313	u32 i, moff, msize, prev_mend = 0;
314	const struct btf_type *mtype;
315
316	for_each_member(i, t, member) {
317		moff = __btf_member_bit_offset(t, member) / 8;
318		if (moff > prev_mend &&
319		    memchr_inv(data + prev_mend, 0, moff - prev_mend))
320			return -EINVAL;
321
322		mtype = btf_type_by_id(btf_vmlinux, member->type);
323		mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
 
324		if (IS_ERR(mtype))
325			return PTR_ERR(mtype);
326		prev_mend = moff + msize;
327	}
328
329	if (t->size > prev_mend &&
330	    memchr_inv(data + prev_mend, 0, t->size - prev_mend))
331		return -EINVAL;
332
333	return 0;
334}
335
336static void bpf_struct_ops_link_release(struct bpf_link *link)
337{
338}
339
340static void bpf_struct_ops_link_dealloc(struct bpf_link *link)
341{
342	struct bpf_tramp_link *tlink = container_of(link, struct bpf_tramp_link, link);
343
344	kfree(tlink);
345}
346
347const struct bpf_link_ops bpf_struct_ops_link_lops = {
348	.release = bpf_struct_ops_link_release,
349	.dealloc = bpf_struct_ops_link_dealloc,
350};
351
352int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
353				      struct bpf_tramp_link *link,
354				      const struct btf_func_model *model,
355				      void *stub_func, void *image, void *image_end)
356{
357	u32 flags = BPF_TRAMP_F_INDIRECT;
358	int size;
359
360	tlinks[BPF_TRAMP_FENTRY].links[0] = link;
361	tlinks[BPF_TRAMP_FENTRY].nr_links = 1;
362
363	if (model->ret_size > 0)
364		flags |= BPF_TRAMP_F_RET_FENTRY_RET;
365
366	size = arch_bpf_trampoline_size(model, flags, tlinks, NULL);
367	if (size < 0)
368		return size;
369	if (size > (unsigned long)image_end - (unsigned long)image)
370		return -E2BIG;
371	return arch_prepare_bpf_trampoline(NULL, image, image_end,
372					   model, flags, tlinks, stub_func);
373}
374
375static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
376					   void *value, u64 flags)
377{
378	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
379	const struct bpf_struct_ops *st_ops = st_map->st_ops;
380	struct bpf_struct_ops_value *uvalue, *kvalue;
381	const struct btf_member *member;
382	const struct btf_type *t = st_ops->type;
383	struct bpf_tramp_links *tlinks;
384	void *udata, *kdata;
385	int prog_fd, err;
386	void *image, *image_end;
387	u32 i;
388
389	if (flags)
390		return -EINVAL;
391
392	if (*(u32 *)key != 0)
393		return -E2BIG;
394
395	err = check_zero_holes(st_ops->value_type, value);
396	if (err)
397		return err;
398
399	uvalue = value;
400	err = check_zero_holes(t, uvalue->data);
401	if (err)
402		return err;
403
404	if (uvalue->state || refcount_read(&uvalue->refcnt))
405		return -EINVAL;
406
407	tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
408	if (!tlinks)
409		return -ENOMEM;
410
411	uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
412	kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue;
413
414	mutex_lock(&st_map->lock);
415
416	if (kvalue->state != BPF_STRUCT_OPS_STATE_INIT) {
417		err = -EBUSY;
418		goto unlock;
419	}
420
421	memcpy(uvalue, value, map->value_size);
422
423	udata = &uvalue->data;
424	kdata = &kvalue->data;
425	image = st_map->image;
426	image_end = st_map->image + PAGE_SIZE;
427
428	for_each_member(i, t, member) {
429		const struct btf_type *mtype, *ptype;
430		struct bpf_prog *prog;
431		struct bpf_tramp_link *link;
432		u32 moff;
433
434		moff = __btf_member_bit_offset(t, member) / 8;
435		ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
436		if (ptype == module_type) {
437			if (*(void **)(udata + moff))
438				goto reset_unlock;
439			*(void **)(kdata + moff) = BPF_MODULE_OWNER;
440			continue;
441		}
442
443		err = st_ops->init_member(t, member, kdata, udata);
444		if (err < 0)
445			goto reset_unlock;
446
447		/* The ->init_member() has handled this member */
448		if (err > 0)
449			continue;
450
451		/* If st_ops->init_member does not handle it,
452		 * we will only handle func ptrs and zero-ed members
453		 * here.  Reject everything else.
454		 */
455
456		/* All non func ptr member must be 0 */
457		if (!ptype || !btf_type_is_func_proto(ptype)) {
458			u32 msize;
459
460			mtype = btf_type_by_id(btf_vmlinux, member->type);
461			mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
 
462			if (IS_ERR(mtype)) {
463				err = PTR_ERR(mtype);
464				goto reset_unlock;
465			}
466
467			if (memchr_inv(udata + moff, 0, msize)) {
468				err = -EINVAL;
469				goto reset_unlock;
470			}
471
472			continue;
473		}
474
475		prog_fd = (int)(*(unsigned long *)(udata + moff));
476		/* Similar check as the attr->attach_prog_fd */
477		if (!prog_fd)
478			continue;
479
480		prog = bpf_prog_get(prog_fd);
481		if (IS_ERR(prog)) {
482			err = PTR_ERR(prog);
483			goto reset_unlock;
484		}
 
485
486		if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
487		    prog->aux->attach_btf_id != st_ops->type_id ||
488		    prog->expected_attach_type != i) {
489			bpf_prog_put(prog);
490			err = -EINVAL;
491			goto reset_unlock;
492		}
493
494		link = kzalloc(sizeof(*link), GFP_USER);
495		if (!link) {
496			bpf_prog_put(prog);
497			err = -ENOMEM;
498			goto reset_unlock;
499		}
500		bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS,
501			      &bpf_struct_ops_link_lops, prog);
502		st_map->links[i] = &link->link;
503
504		err = bpf_struct_ops_prepare_trampoline(tlinks, link,
505							&st_ops->func_models[i],
506							*(void **)(st_ops->cfi_stubs + moff),
507							image, image_end);
508		if (err < 0)
509			goto reset_unlock;
510
511		*(void **)(kdata + moff) = image + cfi_get_offset();
512		image += err;
513
514		/* put prog_id to udata */
515		*(unsigned long *)(udata + moff) = prog->aux->id;
516	}
517
518	if (st_map->map.map_flags & BPF_F_LINK) {
519		err = 0;
520		if (st_ops->validate) {
521			err = st_ops->validate(kdata);
522			if (err)
523				goto reset_unlock;
524		}
525		arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE);
526		/* Let bpf_link handle registration & unregistration.
527		 *
528		 * Pair with smp_load_acquire() during lookup_elem().
529		 */
530		smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_READY);
531		goto unlock;
532	}
533
534	arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE);
 
535	err = st_ops->reg(kdata);
536	if (likely(!err)) {
537		/* This refcnt increment on the map here after
538		 * 'st_ops->reg()' is secure since the state of the
539		 * map must be set to INIT at this moment, and thus
540		 * bpf_struct_ops_map_delete_elem() can't unregister
541		 * or transition it to TOBEFREE concurrently.
542		 */
543		bpf_map_inc(map);
544		/* Pair with smp_load_acquire() during lookup_elem().
545		 * It ensures the above udata updates (e.g. prog->aux->id)
546		 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
547		 */
548		smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_INUSE);
549		goto unlock;
550	}
551
552	/* Error during st_ops->reg(). Can happen if this struct_ops needs to be
553	 * verified as a whole, after all init_member() calls. Can also happen if
554	 * there was a race in registering the struct_ops (under the same name) to
 
555	 * a sub-system through different struct_ops's maps.
556	 */
557	arch_unprotect_bpf_trampoline(st_map->image, PAGE_SIZE);
 
 
558
559reset_unlock:
560	bpf_struct_ops_map_put_progs(st_map);
561	memset(uvalue, 0, map->value_size);
562	memset(kvalue, 0, map->value_size);
563unlock:
564	kfree(tlinks);
565	mutex_unlock(&st_map->lock);
566	return err;
567}
568
569static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
570{
571	enum bpf_struct_ops_state prev_state;
572	struct bpf_struct_ops_map *st_map;
573
574	st_map = (struct bpf_struct_ops_map *)map;
575	if (st_map->map.map_flags & BPF_F_LINK)
576		return -EOPNOTSUPP;
577
578	prev_state = cmpxchg(&st_map->kvalue.state,
579			     BPF_STRUCT_OPS_STATE_INUSE,
580			     BPF_STRUCT_OPS_STATE_TOBEFREE);
581	switch (prev_state) {
582	case BPF_STRUCT_OPS_STATE_INUSE:
583		st_map->st_ops->unreg(&st_map->kvalue.data);
584		bpf_map_put(map);
 
585		return 0;
586	case BPF_STRUCT_OPS_STATE_TOBEFREE:
587		return -EINPROGRESS;
588	case BPF_STRUCT_OPS_STATE_INIT:
589		return -ENOENT;
590	default:
591		WARN_ON_ONCE(1);
592		/* Should never happen.  Treat it as not found. */
593		return -ENOENT;
594	}
595}
596
597static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
598					     struct seq_file *m)
599{
600	void *value;
601	int err;
602
603	value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
604	if (!value)
605		return;
606
607	err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
608	if (!err) {
609		btf_type_seq_show(btf_vmlinux, map->btf_vmlinux_value_type_id,
610				  value, m);
611		seq_puts(m, "\n");
612	}
613
614	kfree(value);
615}
616
617static void __bpf_struct_ops_map_free(struct bpf_map *map)
618{
619	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
620
621	if (st_map->links)
622		bpf_struct_ops_map_put_progs(st_map);
623	bpf_map_area_free(st_map->links);
624	if (st_map->image) {
625		arch_free_bpf_trampoline(st_map->image, PAGE_SIZE);
626		bpf_jit_uncharge_modmem(PAGE_SIZE);
627	}
628	bpf_map_area_free(st_map->uvalue);
629	bpf_map_area_free(st_map);
630}
631
632static void bpf_struct_ops_map_free(struct bpf_map *map)
633{
634	/* The struct_ops's function may switch to another struct_ops.
635	 *
636	 * For example, bpf_tcp_cc_x->init() may switch to
637	 * another tcp_cc_y by calling
638	 * setsockopt(TCP_CONGESTION, "tcp_cc_y").
639	 * During the switch,  bpf_struct_ops_put(tcp_cc_x) is called
640	 * and its refcount may reach 0 which then free its
641	 * trampoline image while tcp_cc_x is still running.
642	 *
643	 * A vanilla rcu gp is to wait for all bpf-tcp-cc prog
644	 * to finish. bpf-tcp-cc prog is non sleepable.
645	 * A rcu_tasks gp is to wait for the last few insn
646	 * in the tramopline image to finish before releasing
647	 * the trampoline image.
648	 */
649	synchronize_rcu_mult(call_rcu, call_rcu_tasks);
650
651	__bpf_struct_ops_map_free(map);
652}
653
654static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
655{
656	if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
657	    (attr->map_flags & ~BPF_F_LINK) || !attr->btf_vmlinux_value_type_id)
658		return -EINVAL;
659	return 0;
660}
661
662static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
663{
664	const struct bpf_struct_ops *st_ops;
665	size_t st_map_size;
666	struct bpf_struct_ops_map *st_map;
667	const struct btf_type *t, *vt;
 
668	struct bpf_map *map;
669	int ret;
 
 
 
670
671	st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id);
672	if (!st_ops)
673		return ERR_PTR(-ENOTSUPP);
674
675	vt = st_ops->value_type;
676	if (attr->value_size != vt->size)
677		return ERR_PTR(-EINVAL);
678
679	t = st_ops->type;
680
681	st_map_size = sizeof(*st_map) +
682		/* kvalue stores the
683		 * struct bpf_struct_ops_tcp_congestions_ops
684		 */
685		(vt->size - sizeof(struct bpf_struct_ops_value));
 
 
 
 
 
 
 
 
686
687	st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
688	if (!st_map)
 
689		return ERR_PTR(-ENOMEM);
690
691	st_map->st_ops = st_ops;
692	map = &st_map->map;
693
694	ret = bpf_jit_charge_modmem(PAGE_SIZE);
695	if (ret) {
696		__bpf_struct_ops_map_free(map);
697		return ERR_PTR(ret);
698	}
699
700	st_map->image = arch_alloc_bpf_trampoline(PAGE_SIZE);
701	if (!st_map->image) {
702		/* __bpf_struct_ops_map_free() uses st_map->image as flag
703		 * for "charged or not". In this case, we need to unchange
704		 * here.
705		 */
706		bpf_jit_uncharge_modmem(PAGE_SIZE);
707		__bpf_struct_ops_map_free(map);
708		return ERR_PTR(-ENOMEM);
709	}
710	st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
711	st_map->links =
712		bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_links *),
713				   NUMA_NO_NODE);
714	if (!st_map->uvalue || !st_map->links) {
715		__bpf_struct_ops_map_free(map);
 
 
716		return ERR_PTR(-ENOMEM);
717	}
718
719	mutex_init(&st_map->lock);
 
720	bpf_map_init_from_attr(map, attr);
 
721
722	return map;
723}
724
725static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map)
726{
727	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
728	const struct bpf_struct_ops *st_ops = st_map->st_ops;
729	const struct btf_type *vt = st_ops->value_type;
730	u64 usage;
731
732	usage = sizeof(*st_map) +
733			vt->size - sizeof(struct bpf_struct_ops_value);
734	usage += vt->size;
735	usage += btf_type_vlen(vt) * sizeof(struct bpf_links *);
736	usage += PAGE_SIZE;
737	return usage;
738}
739
740BTF_ID_LIST_SINGLE(bpf_struct_ops_map_btf_ids, struct, bpf_struct_ops_map)
741const struct bpf_map_ops bpf_struct_ops_map_ops = {
742	.map_alloc_check = bpf_struct_ops_map_alloc_check,
743	.map_alloc = bpf_struct_ops_map_alloc,
744	.map_free = bpf_struct_ops_map_free,
745	.map_get_next_key = bpf_struct_ops_map_get_next_key,
746	.map_lookup_elem = bpf_struct_ops_map_lookup_elem,
747	.map_delete_elem = bpf_struct_ops_map_delete_elem,
748	.map_update_elem = bpf_struct_ops_map_update_elem,
749	.map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
750	.map_mem_usage = bpf_struct_ops_map_mem_usage,
751	.map_btf_id = &bpf_struct_ops_map_btf_ids[0],
752};
753
754/* "const void *" because some subsystem is
755 * passing a const (e.g. const struct tcp_congestion_ops *)
756 */
757bool bpf_struct_ops_get(const void *kdata)
758{
759	struct bpf_struct_ops_value *kvalue;
760	struct bpf_struct_ops_map *st_map;
761	struct bpf_map *map;
762
763	kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
764	st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
765
766	map = __bpf_map_inc_not_zero(&st_map->map, false);
767	return !IS_ERR(map);
768}
769
770void bpf_struct_ops_put(const void *kdata)
771{
772	struct bpf_struct_ops_value *kvalue;
773	struct bpf_struct_ops_map *st_map;
774
775	kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
776	st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
777
778	bpf_map_put(&st_map->map);
779}
780
781static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map)
782{
783	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
784
785	return map->map_type == BPF_MAP_TYPE_STRUCT_OPS &&
786		map->map_flags & BPF_F_LINK &&
787		/* Pair with smp_store_release() during map_update */
788		smp_load_acquire(&st_map->kvalue.state) == BPF_STRUCT_OPS_STATE_READY;
789}
790
791static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
792{
793	struct bpf_struct_ops_link *st_link;
794	struct bpf_struct_ops_map *st_map;
795
796	st_link = container_of(link, struct bpf_struct_ops_link, link);
797	st_map = (struct bpf_struct_ops_map *)
798		rcu_dereference_protected(st_link->map, true);
799	if (st_map) {
800		/* st_link->map can be NULL if
801		 * bpf_struct_ops_link_create() fails to register.
802		 */
803		st_map->st_ops->unreg(&st_map->kvalue.data);
804		bpf_map_put(&st_map->map);
805	}
806	kfree(st_link);
807}
808
809static void bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link,
810					    struct seq_file *seq)
811{
812	struct bpf_struct_ops_link *st_link;
813	struct bpf_map *map;
814
815	st_link = container_of(link, struct bpf_struct_ops_link, link);
816	rcu_read_lock();
817	map = rcu_dereference(st_link->map);
818	seq_printf(seq, "map_id:\t%d\n", map->id);
819	rcu_read_unlock();
820}
821
822static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link,
823					       struct bpf_link_info *info)
824{
825	struct bpf_struct_ops_link *st_link;
826	struct bpf_map *map;
827
828	st_link = container_of(link, struct bpf_struct_ops_link, link);
829	rcu_read_lock();
830	map = rcu_dereference(st_link->map);
831	info->struct_ops.map_id = map->id;
832	rcu_read_unlock();
833	return 0;
834}
835
836static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map *new_map,
837					  struct bpf_map *expected_old_map)
838{
839	struct bpf_struct_ops_map *st_map, *old_st_map;
840	struct bpf_map *old_map;
841	struct bpf_struct_ops_link *st_link;
842	int err;
843
844	st_link = container_of(link, struct bpf_struct_ops_link, link);
845	st_map = container_of(new_map, struct bpf_struct_ops_map, map);
846
847	if (!bpf_struct_ops_valid_to_reg(new_map))
848		return -EINVAL;
849
850	if (!st_map->st_ops->update)
851		return -EOPNOTSUPP;
852
853	mutex_lock(&update_mutex);
854
855	old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
856	if (expected_old_map && old_map != expected_old_map) {
857		err = -EPERM;
858		goto err_out;
859	}
860
861	old_st_map = container_of(old_map, struct bpf_struct_ops_map, map);
862	/* The new and old struct_ops must be the same type. */
863	if (st_map->st_ops != old_st_map->st_ops) {
864		err = -EINVAL;
865		goto err_out;
866	}
867
868	err = st_map->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data);
869	if (err)
870		goto err_out;
871
872	bpf_map_inc(new_map);
873	rcu_assign_pointer(st_link->map, new_map);
874	bpf_map_put(old_map);
875
876err_out:
877	mutex_unlock(&update_mutex);
878
879	return err;
880}
881
882static const struct bpf_link_ops bpf_struct_ops_map_lops = {
883	.dealloc = bpf_struct_ops_map_link_dealloc,
884	.show_fdinfo = bpf_struct_ops_map_link_show_fdinfo,
885	.fill_link_info = bpf_struct_ops_map_link_fill_link_info,
886	.update_map = bpf_struct_ops_map_link_update,
887};
888
889int bpf_struct_ops_link_create(union bpf_attr *attr)
890{
891	struct bpf_struct_ops_link *link = NULL;
892	struct bpf_link_primer link_primer;
893	struct bpf_struct_ops_map *st_map;
894	struct bpf_map *map;
895	int err;
896
897	map = bpf_map_get(attr->link_create.map_fd);
898	if (IS_ERR(map))
899		return PTR_ERR(map);
900
901	st_map = (struct bpf_struct_ops_map *)map;
902
903	if (!bpf_struct_ops_valid_to_reg(map)) {
904		err = -EINVAL;
905		goto err_out;
906	}
907
908	link = kzalloc(sizeof(*link), GFP_USER);
909	if (!link) {
910		err = -ENOMEM;
911		goto err_out;
912	}
913	bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_map_lops, NULL);
914
915	err = bpf_link_prime(&link->link, &link_primer);
916	if (err)
917		goto err_out;
918
919	err = st_map->st_ops->reg(st_map->kvalue.data);
920	if (err) {
921		bpf_link_cleanup(&link_primer);
922		link = NULL;
923		goto err_out;
924	}
925	RCU_INIT_POINTER(link->map, map);
926
927	return bpf_link_settle(&link_primer);
928
929err_out:
930	bpf_map_put(map);
931	kfree(link);
932	return err;
933}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2019 Facebook */
  3
  4#include <linux/bpf.h>
  5#include <linux/bpf_verifier.h>
  6#include <linux/btf.h>
  7#include <linux/filter.h>
  8#include <linux/slab.h>
  9#include <linux/numa.h>
 10#include <linux/seq_file.h>
 11#include <linux/refcount.h>
 12#include <linux/mutex.h>
 
 
 13
 14enum bpf_struct_ops_state {
 15	BPF_STRUCT_OPS_STATE_INIT,
 16	BPF_STRUCT_OPS_STATE_INUSE,
 17	BPF_STRUCT_OPS_STATE_TOBEFREE,
 
 18};
 19
 20#define BPF_STRUCT_OPS_COMMON_VALUE			\
 21	refcount_t refcnt;				\
 22	enum bpf_struct_ops_state state
 23
 24struct bpf_struct_ops_value {
 25	BPF_STRUCT_OPS_COMMON_VALUE;
 26	char data[] ____cacheline_aligned_in_smp;
 27};
 28
 29struct bpf_struct_ops_map {
 30	struct bpf_map map;
 
 31	const struct bpf_struct_ops *st_ops;
 32	/* protect map_update */
 33	struct mutex lock;
 34	/* progs has all the bpf_prog that is populated
 35	 * to the func ptr of the kernel's struct
 36	 * (in kvalue.data).
 37	 */
 38	struct bpf_prog **progs;
 39	/* image is a page that has all the trampolines
 40	 * that stores the func args before calling the bpf_prog.
 41	 * A PAGE_SIZE "image" is enough to store all trampoline for
 42	 * "progs[]".
 43	 */
 44	void *image;
 45	/* uvalue->data stores the kernel struct
 46	 * (e.g. tcp_congestion_ops) that is more useful
 47	 * to userspace than the kvalue.  For example,
 48	 * the bpf_prog's id is stored instead of the kernel
 49	 * address of a func ptr.
 50	 */
 51	struct bpf_struct_ops_value *uvalue;
 52	/* kvalue.data stores the actual kernel's struct
 53	 * (e.g. tcp_congestion_ops) that will be
 54	 * registered to the kernel subsystem.
 55	 */
 56	struct bpf_struct_ops_value kvalue;
 57};
 58
 
 
 
 
 
 
 
 59#define VALUE_PREFIX "bpf_struct_ops_"
 60#define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
 61
 62/* bpf_struct_ops_##_name (e.g. bpf_struct_ops_tcp_congestion_ops) is
 63 * the map's value exposed to the userspace and its btf-type-id is
 64 * stored at the map->btf_vmlinux_value_type_id.
 65 *
 66 */
 67#define BPF_STRUCT_OPS_TYPE(_name)				\
 68extern struct bpf_struct_ops bpf_##_name;			\
 69								\
 70struct bpf_struct_ops_##_name {						\
 71	BPF_STRUCT_OPS_COMMON_VALUE;				\
 72	struct _name data ____cacheline_aligned_in_smp;		\
 73};
 74#include "bpf_struct_ops_types.h"
 75#undef BPF_STRUCT_OPS_TYPE
 76
 77enum {
 78#define BPF_STRUCT_OPS_TYPE(_name) BPF_STRUCT_OPS_TYPE_##_name,
 79#include "bpf_struct_ops_types.h"
 80#undef BPF_STRUCT_OPS_TYPE
 81	__NR_BPF_STRUCT_OPS_TYPE,
 82};
 83
 84static struct bpf_struct_ops * const bpf_struct_ops[] = {
 85#define BPF_STRUCT_OPS_TYPE(_name)				\
 86	[BPF_STRUCT_OPS_TYPE_##_name] = &bpf_##_name,
 87#include "bpf_struct_ops_types.h"
 88#undef BPF_STRUCT_OPS_TYPE
 89};
 90
 91const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
 92};
 93
 94const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
 
 
 
 95};
 96
 97static const struct btf_type *module_type;
 98
 99void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log)
100{
101	s32 type_id, value_id, module_id;
102	const struct btf_member *member;
103	struct bpf_struct_ops *st_ops;
104	const struct btf_type *t;
105	char value_name[128];
106	const char *mname;
107	u32 i, j;
108
109	/* Ensure BTF type is emitted for "struct bpf_struct_ops_##_name" */
110#define BPF_STRUCT_OPS_TYPE(_name) BTF_TYPE_EMIT(struct bpf_struct_ops_##_name);
111#include "bpf_struct_ops_types.h"
112#undef BPF_STRUCT_OPS_TYPE
113
114	module_id = btf_find_by_name_kind(btf, "module", BTF_KIND_STRUCT);
115	if (module_id < 0) {
116		pr_warn("Cannot find struct module in btf_vmlinux\n");
117		return;
118	}
119	module_type = btf_type_by_id(btf, module_id);
120
121	for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
122		st_ops = bpf_struct_ops[i];
123
124		if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
125		    sizeof(value_name)) {
126			pr_warn("struct_ops name %s is too long\n",
127				st_ops->name);
128			continue;
129		}
130		sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
131
132		value_id = btf_find_by_name_kind(btf, value_name,
133						 BTF_KIND_STRUCT);
134		if (value_id < 0) {
135			pr_warn("Cannot find struct %s in btf_vmlinux\n",
136				value_name);
137			continue;
138		}
139
140		type_id = btf_find_by_name_kind(btf, st_ops->name,
141						BTF_KIND_STRUCT);
142		if (type_id < 0) {
143			pr_warn("Cannot find struct %s in btf_vmlinux\n",
144				st_ops->name);
145			continue;
146		}
147		t = btf_type_by_id(btf, type_id);
148		if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
149			pr_warn("Cannot support #%u members in struct %s\n",
150				btf_type_vlen(t), st_ops->name);
151			continue;
152		}
153
154		for_each_member(j, t, member) {
155			const struct btf_type *func_proto;
156
157			mname = btf_name_by_offset(btf, member->name_off);
158			if (!*mname) {
159				pr_warn("anon member in struct %s is not supported\n",
160					st_ops->name);
161				break;
162			}
163
164			if (btf_member_bitfield_size(t, member)) {
165				pr_warn("bit field member %s in struct %s is not supported\n",
166					mname, st_ops->name);
167				break;
168			}
169
170			func_proto = btf_type_resolve_func_ptr(btf,
171							       member->type,
172							       NULL);
173			if (func_proto &&
174			    btf_distill_func_proto(log, btf,
175						   func_proto, mname,
176						   &st_ops->func_models[j])) {
177				pr_warn("Error in parsing func ptr %s in struct %s\n",
178					mname, st_ops->name);
179				break;
180			}
181		}
182
183		if (j == btf_type_vlen(t)) {
184			if (st_ops->init(btf)) {
185				pr_warn("Error in init bpf_struct_ops %s\n",
186					st_ops->name);
187			} else {
188				st_ops->type_id = type_id;
189				st_ops->type = t;
190				st_ops->value_id = value_id;
191				st_ops->value_type = btf_type_by_id(btf,
192								    value_id);
193			}
194		}
195	}
196}
197
198extern struct btf *btf_vmlinux;
199
200static const struct bpf_struct_ops *
201bpf_struct_ops_find_value(u32 value_id)
202{
203	unsigned int i;
204
205	if (!value_id || !btf_vmlinux)
206		return NULL;
207
208	for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
209		if (bpf_struct_ops[i]->value_id == value_id)
210			return bpf_struct_ops[i];
211	}
212
213	return NULL;
214}
215
216const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
217{
218	unsigned int i;
219
220	if (!type_id || !btf_vmlinux)
221		return NULL;
222
223	for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
224		if (bpf_struct_ops[i]->type_id == type_id)
225			return bpf_struct_ops[i];
226	}
227
228	return NULL;
229}
230
231static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
232					   void *next_key)
233{
234	if (key && *(u32 *)key == 0)
235		return -ENOENT;
236
237	*(u32 *)next_key = 0;
238	return 0;
239}
240
241int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
242				       void *value)
243{
244	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
245	struct bpf_struct_ops_value *uvalue, *kvalue;
246	enum bpf_struct_ops_state state;
 
247
248	if (unlikely(*(u32 *)key != 0))
249		return -ENOENT;
250
251	kvalue = &st_map->kvalue;
252	/* Pair with smp_store_release() during map_update */
253	state = smp_load_acquire(&kvalue->state);
254	if (state == BPF_STRUCT_OPS_STATE_INIT) {
255		memset(value, 0, map->value_size);
256		return 0;
257	}
258
259	/* No lock is needed.  state and refcnt do not need
260	 * to be updated together under atomic context.
261	 */
262	uvalue = (struct bpf_struct_ops_value *)value;
263	memcpy(uvalue, st_map->uvalue, map->value_size);
264	uvalue->state = state;
265	refcount_set(&uvalue->refcnt, refcount_read(&kvalue->refcnt));
 
 
 
 
 
 
 
266
267	return 0;
268}
269
270static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
271{
272	return ERR_PTR(-EINVAL);
273}
274
275static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
276{
277	const struct btf_type *t = st_map->st_ops->type;
278	u32 i;
279
280	for (i = 0; i < btf_type_vlen(t); i++) {
281		if (st_map->progs[i]) {
282			bpf_prog_put(st_map->progs[i]);
283			st_map->progs[i] = NULL;
284		}
285	}
286}
287
288static int check_zero_holes(const struct btf_type *t, void *data)
289{
290	const struct btf_member *member;
291	u32 i, moff, msize, prev_mend = 0;
292	const struct btf_type *mtype;
293
294	for_each_member(i, t, member) {
295		moff = btf_member_bit_offset(t, member) / 8;
296		if (moff > prev_mend &&
297		    memchr_inv(data + prev_mend, 0, moff - prev_mend))
298			return -EINVAL;
299
300		mtype = btf_type_by_id(btf_vmlinux, member->type);
301		mtype = btf_resolve_size(btf_vmlinux, mtype, &msize,
302					 NULL, NULL);
303		if (IS_ERR(mtype))
304			return PTR_ERR(mtype);
305		prev_mend = moff + msize;
306	}
307
308	if (t->size > prev_mend &&
309	    memchr_inv(data + prev_mend, 0, t->size - prev_mend))
310		return -EINVAL;
311
312	return 0;
313}
314
315static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
316					  void *value, u64 flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317{
318	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
319	const struct bpf_struct_ops *st_ops = st_map->st_ops;
320	struct bpf_struct_ops_value *uvalue, *kvalue;
321	const struct btf_member *member;
322	const struct btf_type *t = st_ops->type;
323	struct bpf_tramp_progs *tprogs = NULL;
324	void *udata, *kdata;
325	int prog_fd, err = 0;
326	void *image;
327	u32 i;
328
329	if (flags)
330		return -EINVAL;
331
332	if (*(u32 *)key != 0)
333		return -E2BIG;
334
335	err = check_zero_holes(st_ops->value_type, value);
336	if (err)
337		return err;
338
339	uvalue = (struct bpf_struct_ops_value *)value;
340	err = check_zero_holes(t, uvalue->data);
341	if (err)
342		return err;
343
344	if (uvalue->state || refcount_read(&uvalue->refcnt))
345		return -EINVAL;
346
347	tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
348	if (!tprogs)
349		return -ENOMEM;
350
351	uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
352	kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue;
353
354	mutex_lock(&st_map->lock);
355
356	if (kvalue->state != BPF_STRUCT_OPS_STATE_INIT) {
357		err = -EBUSY;
358		goto unlock;
359	}
360
361	memcpy(uvalue, value, map->value_size);
362
363	udata = &uvalue->data;
364	kdata = &kvalue->data;
365	image = st_map->image;
 
366
367	for_each_member(i, t, member) {
368		const struct btf_type *mtype, *ptype;
369		struct bpf_prog *prog;
 
370		u32 moff;
371
372		moff = btf_member_bit_offset(t, member) / 8;
373		ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
374		if (ptype == module_type) {
375			if (*(void **)(udata + moff))
376				goto reset_unlock;
377			*(void **)(kdata + moff) = BPF_MODULE_OWNER;
378			continue;
379		}
380
381		err = st_ops->init_member(t, member, kdata, udata);
382		if (err < 0)
383			goto reset_unlock;
384
385		/* The ->init_member() has handled this member */
386		if (err > 0)
387			continue;
388
389		/* If st_ops->init_member does not handle it,
390		 * we will only handle func ptrs and zero-ed members
391		 * here.  Reject everything else.
392		 */
393
394		/* All non func ptr member must be 0 */
395		if (!ptype || !btf_type_is_func_proto(ptype)) {
396			u32 msize;
397
398			mtype = btf_type_by_id(btf_vmlinux, member->type);
399			mtype = btf_resolve_size(btf_vmlinux, mtype, &msize,
400						 NULL, NULL);
401			if (IS_ERR(mtype)) {
402				err = PTR_ERR(mtype);
403				goto reset_unlock;
404			}
405
406			if (memchr_inv(udata + moff, 0, msize)) {
407				err = -EINVAL;
408				goto reset_unlock;
409			}
410
411			continue;
412		}
413
414		prog_fd = (int)(*(unsigned long *)(udata + moff));
415		/* Similar check as the attr->attach_prog_fd */
416		if (!prog_fd)
417			continue;
418
419		prog = bpf_prog_get(prog_fd);
420		if (IS_ERR(prog)) {
421			err = PTR_ERR(prog);
422			goto reset_unlock;
423		}
424		st_map->progs[i] = prog;
425
426		if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
427		    prog->aux->attach_btf_id != st_ops->type_id ||
428		    prog->expected_attach_type != i) {
 
429			err = -EINVAL;
430			goto reset_unlock;
431		}
432
433		tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
434		tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
435		err = arch_prepare_bpf_trampoline(image,
436						  st_map->image + PAGE_SIZE,
437						  &st_ops->func_models[i], 0,
438						  tprogs, NULL);
 
 
 
 
 
 
 
 
439		if (err < 0)
440			goto reset_unlock;
441
442		*(void **)(kdata + moff) = image;
443		image += err;
444
445		/* put prog_id to udata */
446		*(unsigned long *)(udata + moff) = prog->aux->id;
447	}
448
449	refcount_set(&kvalue->refcnt, 1);
450	bpf_map_inc(map);
 
 
 
 
 
 
 
 
 
 
 
 
 
451
452	set_memory_ro((long)st_map->image, 1);
453	set_memory_x((long)st_map->image, 1);
454	err = st_ops->reg(kdata);
455	if (likely(!err)) {
 
 
 
 
 
 
 
456		/* Pair with smp_load_acquire() during lookup_elem().
457		 * It ensures the above udata updates (e.g. prog->aux->id)
458		 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
459		 */
460		smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_INUSE);
461		goto unlock;
462	}
463
464	/* Error during st_ops->reg().  It is very unlikely since
465	 * the above init_member() should have caught it earlier
466	 * before reg().  The only possibility is if there was a race
467	 * in registering the struct_ops (under the same name) to
468	 * a sub-system through different struct_ops's maps.
469	 */
470	set_memory_nx((long)st_map->image, 1);
471	set_memory_rw((long)st_map->image, 1);
472	bpf_map_put(map);
473
474reset_unlock:
475	bpf_struct_ops_map_put_progs(st_map);
476	memset(uvalue, 0, map->value_size);
477	memset(kvalue, 0, map->value_size);
478unlock:
479	kfree(tprogs);
480	mutex_unlock(&st_map->lock);
481	return err;
482}
483
484static int bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
485{
486	enum bpf_struct_ops_state prev_state;
487	struct bpf_struct_ops_map *st_map;
488
489	st_map = (struct bpf_struct_ops_map *)map;
 
 
 
490	prev_state = cmpxchg(&st_map->kvalue.state,
491			     BPF_STRUCT_OPS_STATE_INUSE,
492			     BPF_STRUCT_OPS_STATE_TOBEFREE);
493	switch (prev_state) {
494	case BPF_STRUCT_OPS_STATE_INUSE:
495		st_map->st_ops->unreg(&st_map->kvalue.data);
496		if (refcount_dec_and_test(&st_map->kvalue.refcnt))
497			bpf_map_put(map);
498		return 0;
499	case BPF_STRUCT_OPS_STATE_TOBEFREE:
500		return -EINPROGRESS;
501	case BPF_STRUCT_OPS_STATE_INIT:
502		return -ENOENT;
503	default:
504		WARN_ON_ONCE(1);
505		/* Should never happen.  Treat it as not found. */
506		return -ENOENT;
507	}
508}
509
510static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
511					     struct seq_file *m)
512{
513	void *value;
514	int err;
515
516	value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
517	if (!value)
518		return;
519
520	err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
521	if (!err) {
522		btf_type_seq_show(btf_vmlinux, map->btf_vmlinux_value_type_id,
523				  value, m);
524		seq_puts(m, "\n");
525	}
526
527	kfree(value);
528}
529
530static void bpf_struct_ops_map_free(struct bpf_map *map)
531{
532	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
533
534	if (st_map->progs)
535		bpf_struct_ops_map_put_progs(st_map);
536	bpf_map_area_free(st_map->progs);
537	bpf_jit_free_exec(st_map->image);
 
 
 
538	bpf_map_area_free(st_map->uvalue);
539	bpf_map_area_free(st_map);
540}
541
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
542static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
543{
544	if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
545	    attr->map_flags || !attr->btf_vmlinux_value_type_id)
546		return -EINVAL;
547	return 0;
548}
549
550static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
551{
552	const struct bpf_struct_ops *st_ops;
553	size_t map_total_size, st_map_size;
554	struct bpf_struct_ops_map *st_map;
555	const struct btf_type *t, *vt;
556	struct bpf_map_memory mem;
557	struct bpf_map *map;
558	int err;
559
560	if (!bpf_capable())
561		return ERR_PTR(-EPERM);
562
563	st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id);
564	if (!st_ops)
565		return ERR_PTR(-ENOTSUPP);
566
567	vt = st_ops->value_type;
568	if (attr->value_size != vt->size)
569		return ERR_PTR(-EINVAL);
570
571	t = st_ops->type;
572
573	st_map_size = sizeof(*st_map) +
574		/* kvalue stores the
575		 * struct bpf_struct_ops_tcp_congestions_ops
576		 */
577		(vt->size - sizeof(struct bpf_struct_ops_value));
578	map_total_size = st_map_size +
579		/* uvalue */
580		sizeof(vt->size) +
581		/* struct bpf_progs **progs */
582		 btf_type_vlen(t) * sizeof(struct bpf_prog *);
583	err = bpf_map_charge_init(&mem, map_total_size);
584	if (err < 0)
585		return ERR_PTR(err);
586
587	st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
588	if (!st_map) {
589		bpf_map_charge_finish(&mem);
590		return ERR_PTR(-ENOMEM);
591	}
592	st_map->st_ops = st_ops;
593	map = &st_map->map;
594
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
595	st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
596	st_map->progs =
597		bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_prog *),
598				   NUMA_NO_NODE);
599	st_map->image = bpf_jit_alloc_exec(PAGE_SIZE);
600	if (!st_map->uvalue || !st_map->progs || !st_map->image) {
601		bpf_struct_ops_map_free(map);
602		bpf_map_charge_finish(&mem);
603		return ERR_PTR(-ENOMEM);
604	}
605
606	mutex_init(&st_map->lock);
607	set_vm_flush_reset_perms(st_map->image);
608	bpf_map_init_from_attr(map, attr);
609	bpf_map_charge_move(&map->memory, &mem);
610
611	return map;
612}
613
614static int bpf_struct_ops_map_btf_id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
615const struct bpf_map_ops bpf_struct_ops_map_ops = {
616	.map_alloc_check = bpf_struct_ops_map_alloc_check,
617	.map_alloc = bpf_struct_ops_map_alloc,
618	.map_free = bpf_struct_ops_map_free,
619	.map_get_next_key = bpf_struct_ops_map_get_next_key,
620	.map_lookup_elem = bpf_struct_ops_map_lookup_elem,
621	.map_delete_elem = bpf_struct_ops_map_delete_elem,
622	.map_update_elem = bpf_struct_ops_map_update_elem,
623	.map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
624	.map_btf_name = "bpf_struct_ops_map",
625	.map_btf_id = &bpf_struct_ops_map_btf_id,
626};
627
628/* "const void *" because some subsystem is
629 * passing a const (e.g. const struct tcp_congestion_ops *)
630 */
631bool bpf_struct_ops_get(const void *kdata)
632{
633	struct bpf_struct_ops_value *kvalue;
 
 
634
635	kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
 
636
637	return refcount_inc_not_zero(&kvalue->refcnt);
 
638}
639
640void bpf_struct_ops_put(const void *kdata)
641{
642	struct bpf_struct_ops_value *kvalue;
 
643
644	kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
645	if (refcount_dec_and_test(&kvalue->refcnt)) {
646		struct bpf_struct_ops_map *st_map;
 
 
647
648		st_map = container_of(kvalue, struct bpf_struct_ops_map,
649				      kvalue);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
650		bpf_map_put(&st_map->map);
651	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
652}