Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2019 Facebook */
  3
  4#include <linux/bpf.h>
  5#include <linux/bpf_verifier.h>
  6#include <linux/btf.h>
  7#include <linux/filter.h>
  8#include <linux/slab.h>
  9#include <linux/numa.h>
 10#include <linux/seq_file.h>
 11#include <linux/refcount.h>
 12#include <linux/mutex.h>
 13#include <linux/btf_ids.h>
 14#include <linux/rcupdate_wait.h>
 15
 16enum bpf_struct_ops_state {
 17	BPF_STRUCT_OPS_STATE_INIT,
 18	BPF_STRUCT_OPS_STATE_INUSE,
 19	BPF_STRUCT_OPS_STATE_TOBEFREE,
 20	BPF_STRUCT_OPS_STATE_READY,
 21};
 22
 23#define BPF_STRUCT_OPS_COMMON_VALUE			\
 24	refcount_t refcnt;				\
 25	enum bpf_struct_ops_state state
 26
 27struct bpf_struct_ops_value {
 28	BPF_STRUCT_OPS_COMMON_VALUE;
 29	char data[] ____cacheline_aligned_in_smp;
 30};
 31
 
 
 32struct bpf_struct_ops_map {
 33	struct bpf_map map;
 34	struct rcu_head rcu;
 35	const struct bpf_struct_ops *st_ops;
 36	/* protect map_update */
 37	struct mutex lock;
 38	/* link has all the bpf_links that is populated
 39	 * to the func ptr of the kernel's struct
 40	 * (in kvalue.data).
 41	 */
 42	struct bpf_link **links;
 43	/* image is a page that has all the trampolines
 
 
 
 
 44	 * that stores the func args before calling the bpf_prog.
 45	 * A PAGE_SIZE "image" is enough to store all trampoline for
 46	 * "links[]".
 47	 */
 48	void *image;
 
 
 49	/* uvalue->data stores the kernel struct
 50	 * (e.g. tcp_congestion_ops) that is more useful
 51	 * to userspace than the kvalue.  For example,
 52	 * the bpf_prog's id is stored instead of the kernel
 53	 * address of a func ptr.
 54	 */
 55	struct bpf_struct_ops_value *uvalue;
 56	/* kvalue.data stores the actual kernel's struct
 57	 * (e.g. tcp_congestion_ops) that will be
 58	 * registered to the kernel subsystem.
 59	 */
 60	struct bpf_struct_ops_value kvalue;
 61};
 62
 63struct bpf_struct_ops_link {
 64	struct bpf_link link;
 65	struct bpf_map __rcu *map;
 
 66};
 67
 68static DEFINE_MUTEX(update_mutex);
 69
 70#define VALUE_PREFIX "bpf_struct_ops_"
 71#define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
 72
 73/* bpf_struct_ops_##_name (e.g. bpf_struct_ops_tcp_congestion_ops) is
 74 * the map's value exposed to the userspace and its btf-type-id is
 75 * stored at the map->btf_vmlinux_value_type_id.
 76 *
 77 */
 78#define BPF_STRUCT_OPS_TYPE(_name)				\
 79extern struct bpf_struct_ops bpf_##_name;			\
 80								\
 81struct bpf_struct_ops_##_name {						\
 82	BPF_STRUCT_OPS_COMMON_VALUE;				\
 83	struct _name data ____cacheline_aligned_in_smp;		\
 84};
 85#include "bpf_struct_ops_types.h"
 86#undef BPF_STRUCT_OPS_TYPE
 87
 88enum {
 89#define BPF_STRUCT_OPS_TYPE(_name) BPF_STRUCT_OPS_TYPE_##_name,
 90#include "bpf_struct_ops_types.h"
 91#undef BPF_STRUCT_OPS_TYPE
 92	__NR_BPF_STRUCT_OPS_TYPE,
 93};
 94
 95static struct bpf_struct_ops * const bpf_struct_ops[] = {
 96#define BPF_STRUCT_OPS_TYPE(_name)				\
 97	[BPF_STRUCT_OPS_TYPE_##_name] = &bpf_##_name,
 98#include "bpf_struct_ops_types.h"
 99#undef BPF_STRUCT_OPS_TYPE
100};
101
102const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
103};
104
105const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
106#ifdef CONFIG_NET
107	.test_run = bpf_struct_ops_test_run,
108#endif
109};
110
111static const struct btf_type *module_type;
 
 
 
 
 
 
 
112
113void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log)
 
 
 
 
114{
115	s32 type_id, value_id, module_id;
116	const struct btf_member *member;
117	struct bpf_struct_ops *st_ops;
118	const struct btf_type *t;
119	char value_name[128];
120	const char *mname;
121	u32 i, j;
122
123	/* Ensure BTF type is emitted for "struct bpf_struct_ops_##_name" */
124#define BPF_STRUCT_OPS_TYPE(_name) BTF_TYPE_EMIT(struct bpf_struct_ops_##_name);
125#include "bpf_struct_ops_types.h"
126#undef BPF_STRUCT_OPS_TYPE
127
128	module_id = btf_find_by_name_kind(btf, "module", BTF_KIND_STRUCT);
129	if (module_id < 0) {
130		pr_warn("Cannot find struct module in btf_vmlinux\n");
131		return;
 
 
 
 
 
 
 
 
 
 
 
 
132	}
133	module_type = btf_type_by_id(btf, module_id);
134
135	for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
136		st_ops = bpf_struct_ops[i];
137
138		if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
139		    sizeof(value_name)) {
140			pr_warn("struct_ops name %s is too long\n",
141				st_ops->name);
142			continue;
143		}
144		sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
145
146		value_id = btf_find_by_name_kind(btf, value_name,
147						 BTF_KIND_STRUCT);
148		if (value_id < 0) {
149			pr_warn("Cannot find struct %s in btf_vmlinux\n",
150				value_name);
151			continue;
152		}
 
153
154		type_id = btf_find_by_name_kind(btf, st_ops->name,
155						BTF_KIND_STRUCT);
156		if (type_id < 0) {
157			pr_warn("Cannot find struct %s in btf_vmlinux\n",
158				st_ops->name);
159			continue;
160		}
161		t = btf_type_by_id(btf, type_id);
162		if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
163			pr_warn("Cannot support #%u members in struct %s\n",
164				btf_type_vlen(t), st_ops->name);
165			continue;
166		}
167
168		for_each_member(j, t, member) {
169			const struct btf_type *func_proto;
 
 
 
 
 
170
171			mname = btf_name_by_offset(btf, member->name_off);
172			if (!*mname) {
173				pr_warn("anon member in struct %s is not supported\n",
174					st_ops->name);
175				break;
176			}
177
178			if (__btf_member_bitfield_size(t, member)) {
179				pr_warn("bit field member %s in struct %s is not supported\n",
180					mname, st_ops->name);
181				break;
182			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
184			func_proto = btf_type_resolve_func_ptr(btf,
185							       member->type,
186							       NULL);
187			if (func_proto &&
188			    btf_distill_func_proto(log, btf,
189						   func_proto, mname,
190						   &st_ops->func_models[j])) {
191				pr_warn("Error in parsing func ptr %s in struct %s\n",
192					mname, st_ops->name);
193				break;
194			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195		}
196
197		if (j == btf_type_vlen(t)) {
198			if (st_ops->init(btf)) {
199				pr_warn("Error in init bpf_struct_ops %s\n",
200					st_ops->name);
201			} else {
202				st_ops->type_id = type_id;
203				st_ops->type = t;
204				st_ops->value_id = value_id;
205				st_ops->value_type = btf_type_by_id(btf,
206								    value_id);
207			}
208		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209	}
 
 
 
 
 
 
 
210}
211
212extern struct btf *btf_vmlinux;
 
 
 
 
 
 
 
 
 
 
 
213
214static const struct bpf_struct_ops *
215bpf_struct_ops_find_value(u32 value_id)
216{
217	unsigned int i;
218
219	if (!value_id || !btf_vmlinux)
220		return NULL;
 
221
222	for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
223		if (bpf_struct_ops[i]->value_id == value_id)
224			return bpf_struct_ops[i];
225	}
226
227	return NULL;
228}
229
230const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
 
 
231{
232	unsigned int i;
 
 
 
 
 
 
 
233
234	if (!type_id || !btf_vmlinux)
235		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
236
237	for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
238		if (bpf_struct_ops[i]->type_id == type_id)
239			return bpf_struct_ops[i];
 
 
240	}
241
242	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
243}
244
245static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
246					   void *next_key)
247{
248	if (key && *(u32 *)key == 0)
249		return -ENOENT;
250
251	*(u32 *)next_key = 0;
252	return 0;
253}
254
255int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
256				       void *value)
257{
258	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
259	struct bpf_struct_ops_value *uvalue, *kvalue;
260	enum bpf_struct_ops_state state;
261	s64 refcnt;
262
263	if (unlikely(*(u32 *)key != 0))
264		return -ENOENT;
265
266	kvalue = &st_map->kvalue;
267	/* Pair with smp_store_release() during map_update */
268	state = smp_load_acquire(&kvalue->state);
269	if (state == BPF_STRUCT_OPS_STATE_INIT) {
270		memset(value, 0, map->value_size);
271		return 0;
272	}
273
274	/* No lock is needed.  state and refcnt do not need
275	 * to be updated together under atomic context.
276	 */
277	uvalue = value;
278	memcpy(uvalue, st_map->uvalue, map->value_size);
279	uvalue->state = state;
280
281	/* This value offers the user space a general estimate of how
282	 * many sockets are still utilizing this struct_ops for TCP
283	 * congestion control. The number might not be exact, but it
284	 * should sufficiently meet our present goals.
285	 */
286	refcnt = atomic64_read(&map->refcnt) - atomic64_read(&map->usercnt);
287	refcount_set(&uvalue->refcnt, max_t(s64, refcnt, 0));
288
289	return 0;
290}
291
292static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
293{
294	return ERR_PTR(-EINVAL);
295}
296
297static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
298{
299	const struct btf_type *t = st_map->st_ops->type;
300	u32 i;
301
302	for (i = 0; i < btf_type_vlen(t); i++) {
303		if (st_map->links[i]) {
304			bpf_link_put(st_map->links[i]);
305			st_map->links[i] = NULL;
306		}
307	}
308}
309
310static int check_zero_holes(const struct btf_type *t, void *data)
 
 
 
 
 
 
 
 
 
311{
312	const struct btf_member *member;
313	u32 i, moff, msize, prev_mend = 0;
314	const struct btf_type *mtype;
315
316	for_each_member(i, t, member) {
317		moff = __btf_member_bit_offset(t, member) / 8;
318		if (moff > prev_mend &&
319		    memchr_inv(data + prev_mend, 0, moff - prev_mend))
320			return -EINVAL;
321
322		mtype = btf_type_by_id(btf_vmlinux, member->type);
323		mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
324		if (IS_ERR(mtype))
325			return PTR_ERR(mtype);
326		prev_mend = moff + msize;
327	}
328
329	if (t->size > prev_mend &&
330	    memchr_inv(data + prev_mend, 0, t->size - prev_mend))
331		return -EINVAL;
332
333	return 0;
334}
335
336static void bpf_struct_ops_link_release(struct bpf_link *link)
337{
338}
339
340static void bpf_struct_ops_link_dealloc(struct bpf_link *link)
341{
342	struct bpf_tramp_link *tlink = container_of(link, struct bpf_tramp_link, link);
343
344	kfree(tlink);
345}
346
347const struct bpf_link_ops bpf_struct_ops_link_lops = {
348	.release = bpf_struct_ops_link_release,
349	.dealloc = bpf_struct_ops_link_dealloc,
350};
351
352int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
353				      struct bpf_tramp_link *link,
354				      const struct btf_func_model *model,
355				      void *stub_func, void *image, void *image_end)
 
 
356{
357	u32 flags = BPF_TRAMP_F_INDIRECT;
 
358	int size;
359
360	tlinks[BPF_TRAMP_FENTRY].links[0] = link;
361	tlinks[BPF_TRAMP_FENTRY].nr_links = 1;
362
363	if (model->ret_size > 0)
364		flags |= BPF_TRAMP_F_RET_FENTRY_RET;
365
366	size = arch_bpf_trampoline_size(model, flags, tlinks, NULL);
367	if (size < 0)
368		return size;
369	if (size > (unsigned long)image_end - (unsigned long)image)
370		return -E2BIG;
371	return arch_prepare_bpf_trampoline(NULL, image, image_end,
 
 
 
 
 
 
 
 
 
 
 
372					   model, flags, tlinks, stub_func);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
373}
374
375static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
376					   void *value, u64 flags)
377{
378	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
379	const struct bpf_struct_ops *st_ops = st_map->st_ops;
 
380	struct bpf_struct_ops_value *uvalue, *kvalue;
 
381	const struct btf_member *member;
382	const struct btf_type *t = st_ops->type;
383	struct bpf_tramp_links *tlinks;
384	void *udata, *kdata;
385	int prog_fd, err;
386	void *image, *image_end;
387	u32 i;
 
 
 
388
389	if (flags)
390		return -EINVAL;
391
392	if (*(u32 *)key != 0)
393		return -E2BIG;
394
395	err = check_zero_holes(st_ops->value_type, value);
396	if (err)
397		return err;
398
399	uvalue = value;
400	err = check_zero_holes(t, uvalue->data);
401	if (err)
402		return err;
403
404	if (uvalue->state || refcount_read(&uvalue->refcnt))
405		return -EINVAL;
406
407	tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
408	if (!tlinks)
409		return -ENOMEM;
410
411	uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
412	kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue;
413
414	mutex_lock(&st_map->lock);
415
416	if (kvalue->state != BPF_STRUCT_OPS_STATE_INIT) {
417		err = -EBUSY;
418		goto unlock;
419	}
420
421	memcpy(uvalue, value, map->value_size);
422
423	udata = &uvalue->data;
424	kdata = &kvalue->data;
425	image = st_map->image;
426	image_end = st_map->image + PAGE_SIZE;
427
 
 
 
 
428	for_each_member(i, t, member) {
429		const struct btf_type *mtype, *ptype;
430		struct bpf_prog *prog;
431		struct bpf_tramp_link *link;
 
432		u32 moff;
433
434		moff = __btf_member_bit_offset(t, member) / 8;
435		ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
 
436		if (ptype == module_type) {
437			if (*(void **)(udata + moff))
438				goto reset_unlock;
439			*(void **)(kdata + moff) = BPF_MODULE_OWNER;
440			continue;
441		}
442
443		err = st_ops->init_member(t, member, kdata, udata);
444		if (err < 0)
445			goto reset_unlock;
446
447		/* The ->init_member() has handled this member */
448		if (err > 0)
449			continue;
450
451		/* If st_ops->init_member does not handle it,
452		 * we will only handle func ptrs and zero-ed members
453		 * here.  Reject everything else.
454		 */
455
456		/* All non func ptr member must be 0 */
457		if (!ptype || !btf_type_is_func_proto(ptype)) {
458			u32 msize;
459
460			mtype = btf_type_by_id(btf_vmlinux, member->type);
461			mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
462			if (IS_ERR(mtype)) {
463				err = PTR_ERR(mtype);
464				goto reset_unlock;
465			}
466
467			if (memchr_inv(udata + moff, 0, msize)) {
468				err = -EINVAL;
469				goto reset_unlock;
470			}
471
472			continue;
473		}
474
475		prog_fd = (int)(*(unsigned long *)(udata + moff));
476		/* Similar check as the attr->attach_prog_fd */
477		if (!prog_fd)
478			continue;
479
480		prog = bpf_prog_get(prog_fd);
481		if (IS_ERR(prog)) {
482			err = PTR_ERR(prog);
483			goto reset_unlock;
484		}
485
486		if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
487		    prog->aux->attach_btf_id != st_ops->type_id ||
488		    prog->expected_attach_type != i) {
489			bpf_prog_put(prog);
490			err = -EINVAL;
491			goto reset_unlock;
492		}
493
494		link = kzalloc(sizeof(*link), GFP_USER);
495		if (!link) {
496			bpf_prog_put(prog);
497			err = -ENOMEM;
498			goto reset_unlock;
499		}
500		bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS,
501			      &bpf_struct_ops_link_lops, prog);
502		st_map->links[i] = &link->link;
 
 
 
 
 
 
 
503
 
504		err = bpf_struct_ops_prepare_trampoline(tlinks, link,
505							&st_ops->func_models[i],
506							*(void **)(st_ops->cfi_stubs + moff),
507							image, image_end);
508		if (err < 0)
 
509			goto reset_unlock;
510
511		*(void **)(kdata + moff) = image + cfi_get_offset();
512		image += err;
 
 
 
 
 
513
514		/* put prog_id to udata */
515		*(unsigned long *)(udata + moff) = prog->aux->id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
516	}
517
518	if (st_map->map.map_flags & BPF_F_LINK) {
519		err = 0;
520		if (st_ops->validate) {
521			err = st_ops->validate(kdata);
522			if (err)
523				goto reset_unlock;
524		}
525		arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE);
526		/* Let bpf_link handle registration & unregistration.
527		 *
528		 * Pair with smp_load_acquire() during lookup_elem().
529		 */
530		smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_READY);
531		goto unlock;
532	}
533
534	arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE);
535	err = st_ops->reg(kdata);
536	if (likely(!err)) {
537		/* This refcnt increment on the map here after
538		 * 'st_ops->reg()' is secure since the state of the
539		 * map must be set to INIT at this moment, and thus
540		 * bpf_struct_ops_map_delete_elem() can't unregister
541		 * or transition it to TOBEFREE concurrently.
542		 */
543		bpf_map_inc(map);
544		/* Pair with smp_load_acquire() during lookup_elem().
545		 * It ensures the above udata updates (e.g. prog->aux->id)
546		 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
547		 */
548		smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_INUSE);
549		goto unlock;
550	}
551
552	/* Error during st_ops->reg(). Can happen if this struct_ops needs to be
553	 * verified as a whole, after all init_member() calls. Can also happen if
554	 * there was a race in registering the struct_ops (under the same name) to
555	 * a sub-system through different struct_ops's maps.
556	 */
557	arch_unprotect_bpf_trampoline(st_map->image, PAGE_SIZE);
558
559reset_unlock:
 
 
560	bpf_struct_ops_map_put_progs(st_map);
561	memset(uvalue, 0, map->value_size);
562	memset(kvalue, 0, map->value_size);
563unlock:
564	kfree(tlinks);
565	mutex_unlock(&st_map->lock);
 
 
566	return err;
567}
568
569static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
570{
571	enum bpf_struct_ops_state prev_state;
572	struct bpf_struct_ops_map *st_map;
573
574	st_map = (struct bpf_struct_ops_map *)map;
575	if (st_map->map.map_flags & BPF_F_LINK)
576		return -EOPNOTSUPP;
577
578	prev_state = cmpxchg(&st_map->kvalue.state,
579			     BPF_STRUCT_OPS_STATE_INUSE,
580			     BPF_STRUCT_OPS_STATE_TOBEFREE);
581	switch (prev_state) {
582	case BPF_STRUCT_OPS_STATE_INUSE:
583		st_map->st_ops->unreg(&st_map->kvalue.data);
584		bpf_map_put(map);
585		return 0;
586	case BPF_STRUCT_OPS_STATE_TOBEFREE:
587		return -EINPROGRESS;
588	case BPF_STRUCT_OPS_STATE_INIT:
589		return -ENOENT;
590	default:
591		WARN_ON_ONCE(1);
592		/* Should never happen.  Treat it as not found. */
593		return -ENOENT;
594	}
595}
596
597static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
598					     struct seq_file *m)
599{
 
600	void *value;
601	int err;
602
603	value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
604	if (!value)
605		return;
606
607	err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
608	if (!err) {
609		btf_type_seq_show(btf_vmlinux, map->btf_vmlinux_value_type_id,
 
610				  value, m);
611		seq_puts(m, "\n");
612	}
613
614	kfree(value);
615}
616
617static void __bpf_struct_ops_map_free(struct bpf_map *map)
618{
619	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
620
621	if (st_map->links)
622		bpf_struct_ops_map_put_progs(st_map);
 
 
623	bpf_map_area_free(st_map->links);
624	if (st_map->image) {
625		arch_free_bpf_trampoline(st_map->image, PAGE_SIZE);
626		bpf_jit_uncharge_modmem(PAGE_SIZE);
627	}
628	bpf_map_area_free(st_map->uvalue);
629	bpf_map_area_free(st_map);
630}
631
632static void bpf_struct_ops_map_free(struct bpf_map *map)
633{
 
 
 
 
 
 
 
 
 
 
 
634	/* The struct_ops's function may switch to another struct_ops.
635	 *
636	 * For example, bpf_tcp_cc_x->init() may switch to
637	 * another tcp_cc_y by calling
638	 * setsockopt(TCP_CONGESTION, "tcp_cc_y").
639	 * During the switch,  bpf_struct_ops_put(tcp_cc_x) is called
640	 * and its refcount may reach 0 which then free its
641	 * trampoline image while tcp_cc_x is still running.
642	 *
643	 * A vanilla rcu gp is to wait for all bpf-tcp-cc prog
644	 * to finish. bpf-tcp-cc prog is non sleepable.
645	 * A rcu_tasks gp is to wait for the last few insn
646	 * in the tramopline image to finish before releasing
647	 * the trampoline image.
648	 */
649	synchronize_rcu_mult(call_rcu, call_rcu_tasks);
650
651	__bpf_struct_ops_map_free(map);
652}
653
654static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
655{
656	if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
657	    (attr->map_flags & ~BPF_F_LINK) || !attr->btf_vmlinux_value_type_id)
 
658		return -EINVAL;
659	return 0;
660}
661
 
 
 
 
 
 
 
 
 
 
 
 
 
662static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
663{
664	const struct bpf_struct_ops *st_ops;
665	size_t st_map_size;
666	struct bpf_struct_ops_map *st_map;
667	const struct btf_type *t, *vt;
 
668	struct bpf_map *map;
 
669	int ret;
670
671	st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id);
672	if (!st_ops)
673		return ERR_PTR(-ENOTSUPP);
674
675	vt = st_ops->value_type;
676	if (attr->value_size != vt->size)
677		return ERR_PTR(-EINVAL);
 
 
678
679	t = st_ops->type;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
680
681	st_map_size = sizeof(*st_map) +
682		/* kvalue stores the
683		 * struct bpf_struct_ops_tcp_congestions_ops
684		 */
685		(vt->size - sizeof(struct bpf_struct_ops_value));
686
687	st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
688	if (!st_map)
689		return ERR_PTR(-ENOMEM);
 
 
690
691	st_map->st_ops = st_ops;
692	map = &st_map->map;
693
694	ret = bpf_jit_charge_modmem(PAGE_SIZE);
695	if (ret) {
696		__bpf_struct_ops_map_free(map);
697		return ERR_PTR(ret);
698	}
699
700	st_map->image = arch_alloc_bpf_trampoline(PAGE_SIZE);
701	if (!st_map->image) {
702		/* __bpf_struct_ops_map_free() uses st_map->image as flag
703		 * for "charged or not". In this case, we need to unchange
704		 * here.
705		 */
706		bpf_jit_uncharge_modmem(PAGE_SIZE);
707		__bpf_struct_ops_map_free(map);
708		return ERR_PTR(-ENOMEM);
709	}
710	st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
 
711	st_map->links =
712		bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_links *),
713				   NUMA_NO_NODE);
714	if (!st_map->uvalue || !st_map->links) {
715		__bpf_struct_ops_map_free(map);
716		return ERR_PTR(-ENOMEM);
 
 
 
 
717	}
 
718
719	mutex_init(&st_map->lock);
720	bpf_map_init_from_attr(map, attr);
721
722	return map;
 
 
 
 
 
 
 
723}
724
725static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map)
726{
727	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
728	const struct bpf_struct_ops *st_ops = st_map->st_ops;
729	const struct btf_type *vt = st_ops->value_type;
730	u64 usage;
731
732	usage = sizeof(*st_map) +
733			vt->size - sizeof(struct bpf_struct_ops_value);
734	usage += vt->size;
735	usage += btf_type_vlen(vt) * sizeof(struct bpf_links *);
 
736	usage += PAGE_SIZE;
737	return usage;
738}
739
740BTF_ID_LIST_SINGLE(bpf_struct_ops_map_btf_ids, struct, bpf_struct_ops_map)
741const struct bpf_map_ops bpf_struct_ops_map_ops = {
742	.map_alloc_check = bpf_struct_ops_map_alloc_check,
743	.map_alloc = bpf_struct_ops_map_alloc,
744	.map_free = bpf_struct_ops_map_free,
745	.map_get_next_key = bpf_struct_ops_map_get_next_key,
746	.map_lookup_elem = bpf_struct_ops_map_lookup_elem,
747	.map_delete_elem = bpf_struct_ops_map_delete_elem,
748	.map_update_elem = bpf_struct_ops_map_update_elem,
749	.map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
750	.map_mem_usage = bpf_struct_ops_map_mem_usage,
751	.map_btf_id = &bpf_struct_ops_map_btf_ids[0],
752};
753
754/* "const void *" because some subsystem is
755 * passing a const (e.g. const struct tcp_congestion_ops *)
756 */
757bool bpf_struct_ops_get(const void *kdata)
758{
759	struct bpf_struct_ops_value *kvalue;
760	struct bpf_struct_ops_map *st_map;
761	struct bpf_map *map;
762
763	kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
764	st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
765
766	map = __bpf_map_inc_not_zero(&st_map->map, false);
767	return !IS_ERR(map);
768}
769
770void bpf_struct_ops_put(const void *kdata)
771{
772	struct bpf_struct_ops_value *kvalue;
773	struct bpf_struct_ops_map *st_map;
774
775	kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
776	st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
777
778	bpf_map_put(&st_map->map);
779}
780
 
 
 
 
 
 
 
781static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map)
782{
783	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
784
785	return map->map_type == BPF_MAP_TYPE_STRUCT_OPS &&
786		map->map_flags & BPF_F_LINK &&
787		/* Pair with smp_store_release() during map_update */
788		smp_load_acquire(&st_map->kvalue.state) == BPF_STRUCT_OPS_STATE_READY;
789}
790
791static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
792{
793	struct bpf_struct_ops_link *st_link;
794	struct bpf_struct_ops_map *st_map;
795
796	st_link = container_of(link, struct bpf_struct_ops_link, link);
797	st_map = (struct bpf_struct_ops_map *)
798		rcu_dereference_protected(st_link->map, true);
799	if (st_map) {
800		/* st_link->map can be NULL if
801		 * bpf_struct_ops_link_create() fails to register.
802		 */
803		st_map->st_ops->unreg(&st_map->kvalue.data);
804		bpf_map_put(&st_map->map);
805	}
806	kfree(st_link);
807}
808
809static void bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link,
810					    struct seq_file *seq)
811{
812	struct bpf_struct_ops_link *st_link;
813	struct bpf_map *map;
814
815	st_link = container_of(link, struct bpf_struct_ops_link, link);
816	rcu_read_lock();
817	map = rcu_dereference(st_link->map);
818	seq_printf(seq, "map_id:\t%d\n", map->id);
 
819	rcu_read_unlock();
820}
821
822static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link,
823					       struct bpf_link_info *info)
824{
825	struct bpf_struct_ops_link *st_link;
826	struct bpf_map *map;
827
828	st_link = container_of(link, struct bpf_struct_ops_link, link);
829	rcu_read_lock();
830	map = rcu_dereference(st_link->map);
831	info->struct_ops.map_id = map->id;
 
832	rcu_read_unlock();
833	return 0;
834}
835
836static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map *new_map,
837					  struct bpf_map *expected_old_map)
838{
839	struct bpf_struct_ops_map *st_map, *old_st_map;
840	struct bpf_map *old_map;
841	struct bpf_struct_ops_link *st_link;
842	int err;
843
844	st_link = container_of(link, struct bpf_struct_ops_link, link);
845	st_map = container_of(new_map, struct bpf_struct_ops_map, map);
846
847	if (!bpf_struct_ops_valid_to_reg(new_map))
848		return -EINVAL;
849
850	if (!st_map->st_ops->update)
851		return -EOPNOTSUPP;
852
853	mutex_lock(&update_mutex);
854
855	old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
 
 
 
 
856	if (expected_old_map && old_map != expected_old_map) {
857		err = -EPERM;
858		goto err_out;
859	}
860
861	old_st_map = container_of(old_map, struct bpf_struct_ops_map, map);
862	/* The new and old struct_ops must be the same type. */
863	if (st_map->st_ops != old_st_map->st_ops) {
864		err = -EINVAL;
865		goto err_out;
866	}
867
868	err = st_map->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data);
869	if (err)
870		goto err_out;
871
872	bpf_map_inc(new_map);
873	rcu_assign_pointer(st_link->map, new_map);
874	bpf_map_put(old_map);
875
876err_out:
877	mutex_unlock(&update_mutex);
878
879	return err;
880}
881
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
882static const struct bpf_link_ops bpf_struct_ops_map_lops = {
883	.dealloc = bpf_struct_ops_map_link_dealloc,
 
884	.show_fdinfo = bpf_struct_ops_map_link_show_fdinfo,
885	.fill_link_info = bpf_struct_ops_map_link_fill_link_info,
886	.update_map = bpf_struct_ops_map_link_update,
 
887};
888
889int bpf_struct_ops_link_create(union bpf_attr *attr)
890{
891	struct bpf_struct_ops_link *link = NULL;
892	struct bpf_link_primer link_primer;
893	struct bpf_struct_ops_map *st_map;
894	struct bpf_map *map;
895	int err;
896
897	map = bpf_map_get(attr->link_create.map_fd);
898	if (IS_ERR(map))
899		return PTR_ERR(map);
900
901	st_map = (struct bpf_struct_ops_map *)map;
902
903	if (!bpf_struct_ops_valid_to_reg(map)) {
904		err = -EINVAL;
905		goto err_out;
906	}
907
908	link = kzalloc(sizeof(*link), GFP_USER);
909	if (!link) {
910		err = -ENOMEM;
911		goto err_out;
912	}
913	bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_map_lops, NULL);
914
915	err = bpf_link_prime(&link->link, &link_primer);
916	if (err)
917		goto err_out;
918
919	err = st_map->st_ops->reg(st_map->kvalue.data);
 
 
 
 
 
 
920	if (err) {
 
921		bpf_link_cleanup(&link_primer);
922		link = NULL;
923		goto err_out;
924	}
925	RCU_INIT_POINTER(link->map, map);
 
926
927	return bpf_link_settle(&link_primer);
928
929err_out:
930	bpf_map_put(map);
931	kfree(link);
932	return err;
 
 
 
 
 
 
 
933}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2019 Facebook */
   3
   4#include <linux/bpf.h>
   5#include <linux/bpf_verifier.h>
   6#include <linux/btf.h>
   7#include <linux/filter.h>
   8#include <linux/slab.h>
   9#include <linux/numa.h>
  10#include <linux/seq_file.h>
  11#include <linux/refcount.h>
  12#include <linux/mutex.h>
  13#include <linux/btf_ids.h>
  14#include <linux/rcupdate_wait.h>
  15#include <linux/poll.h>
 
 
 
 
 
 
 
 
 
 
  16
  17struct bpf_struct_ops_value {
  18	struct bpf_struct_ops_common_value common;
  19	char data[] ____cacheline_aligned_in_smp;
  20};
  21
  22#define MAX_TRAMP_IMAGE_PAGES 8
  23
  24struct bpf_struct_ops_map {
  25	struct bpf_map map;
  26	const struct bpf_struct_ops_desc *st_ops_desc;
 
  27	/* protect map_update */
  28	struct mutex lock;
  29	/* link has all the bpf_links that is populated
  30	 * to the func ptr of the kernel's struct
  31	 * (in kvalue.data).
  32	 */
  33	struct bpf_link **links;
  34	/* ksyms for bpf trampolines */
  35	struct bpf_ksym **ksyms;
  36	u32 funcs_cnt;
  37	u32 image_pages_cnt;
  38	/* image_pages is an array of pages that has all the trampolines
  39	 * that stores the func args before calling the bpf_prog.
 
 
  40	 */
  41	void *image_pages[MAX_TRAMP_IMAGE_PAGES];
  42	/* The owner moduler's btf. */
  43	struct btf *btf;
  44	/* uvalue->data stores the kernel struct
  45	 * (e.g. tcp_congestion_ops) that is more useful
  46	 * to userspace than the kvalue.  For example,
  47	 * the bpf_prog's id is stored instead of the kernel
  48	 * address of a func ptr.
  49	 */
  50	struct bpf_struct_ops_value *uvalue;
  51	/* kvalue.data stores the actual kernel's struct
  52	 * (e.g. tcp_congestion_ops) that will be
  53	 * registered to the kernel subsystem.
  54	 */
  55	struct bpf_struct_ops_value kvalue;
  56};
  57
  58struct bpf_struct_ops_link {
  59	struct bpf_link link;
  60	struct bpf_map __rcu *map;
  61	wait_queue_head_t wait_hup;
  62};
  63
  64static DEFINE_MUTEX(update_mutex);
  65
  66#define VALUE_PREFIX "bpf_struct_ops_"
  67#define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
  68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  69const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
  70};
  71
  72const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
  73#ifdef CONFIG_NET
  74	.test_run = bpf_struct_ops_test_run,
  75#endif
  76};
  77
  78BTF_ID_LIST(st_ops_ids)
  79BTF_ID(struct, module)
  80BTF_ID(struct, bpf_struct_ops_common_value)
  81
  82enum {
  83	IDX_MODULE_ID,
  84	IDX_ST_OPS_COMMON_VALUE_ID,
  85};
  86
  87extern struct btf *btf_vmlinux;
  88
  89static bool is_valid_value_type(struct btf *btf, s32 value_id,
  90				const struct btf_type *type,
  91				const char *value_name)
  92{
  93	const struct btf_type *common_value_type;
  94	const struct btf_member *member;
  95	const struct btf_type *vt, *mt;
 
 
 
 
  96
  97	vt = btf_type_by_id(btf, value_id);
  98	if (btf_vlen(vt) != 2) {
  99		pr_warn("The number of %s's members should be 2, but we get %d\n",
 100			value_name, btf_vlen(vt));
 101		return false;
 102	}
 103	member = btf_type_member(vt);
 104	mt = btf_type_by_id(btf, member->type);
 105	common_value_type = btf_type_by_id(btf_vmlinux,
 106					   st_ops_ids[IDX_ST_OPS_COMMON_VALUE_ID]);
 107	if (mt != common_value_type) {
 108		pr_warn("The first member of %s should be bpf_struct_ops_common_value\n",
 109			value_name);
 110		return false;
 111	}
 112	member++;
 113	mt = btf_type_by_id(btf, member->type);
 114	if (mt != type) {
 115		pr_warn("The second member of %s should be %s\n",
 116			value_name, btf_name_by_offset(btf, type->name_off));
 117		return false;
 118	}
 
 119
 120	return true;
 121}
 122
 123static void *bpf_struct_ops_image_alloc(void)
 124{
 125	void *image;
 126	int err;
 
 
 
 127
 128	err = bpf_jit_charge_modmem(PAGE_SIZE);
 129	if (err)
 130		return ERR_PTR(err);
 131	image = arch_alloc_bpf_trampoline(PAGE_SIZE);
 132	if (!image) {
 133		bpf_jit_uncharge_modmem(PAGE_SIZE);
 134		return ERR_PTR(-ENOMEM);
 135	}
 136
 137	return image;
 138}
 
 
 
 
 
 
 
 
 
 
 
 139
 140void bpf_struct_ops_image_free(void *image)
 141{
 142	if (image) {
 143		arch_free_bpf_trampoline(image, PAGE_SIZE);
 144		bpf_jit_uncharge_modmem(PAGE_SIZE);
 145	}
 146}
 147
 148#define MAYBE_NULL_SUFFIX "__nullable"
 149#define MAX_STUB_NAME 128
 
 
 
 
 150
 151/* Return the type info of a stub function, if it exists.
 152 *
 153 * The name of a stub function is made up of the name of the struct_ops and
 154 * the name of the function pointer member, separated by "__". For example,
 155 * if the struct_ops type is named "foo_ops" and the function pointer
 156 * member is named "bar", the stub function name would be "foo_ops__bar".
 157 */
 158static const struct btf_type *
 159find_stub_func_proto(const struct btf *btf, const char *st_op_name,
 160		     const char *member_name)
 161{
 162	char stub_func_name[MAX_STUB_NAME];
 163	const struct btf_type *func_type;
 164	s32 btf_id;
 165	int cp;
 166
 167	cp = snprintf(stub_func_name, MAX_STUB_NAME, "%s__%s",
 168		      st_op_name, member_name);
 169	if (cp >= MAX_STUB_NAME) {
 170		pr_warn("Stub function name too long\n");
 171		return NULL;
 172	}
 173	btf_id = btf_find_by_name_kind(btf, stub_func_name, BTF_KIND_FUNC);
 174	if (btf_id < 0)
 175		return NULL;
 176	func_type = btf_type_by_id(btf, btf_id);
 177	if (!func_type)
 178		return NULL;
 179
 180	return btf_type_by_id(btf, func_type->type); /* FUNC_PROTO */
 181}
 182
 183/* Prepare argument info for every nullable argument of a member of a
 184 * struct_ops type.
 185 *
 186 * Initialize a struct bpf_struct_ops_arg_info according to type info of
 187 * the arguments of a stub function. (Check kCFI for more information about
 188 * stub functions.)
 189 *
 190 * Each member in the struct_ops type has a struct bpf_struct_ops_arg_info
 191 * to provide an array of struct bpf_ctx_arg_aux, which in turn provides
 192 * the information that used by the verifier to check the arguments of the
 193 * BPF struct_ops program assigned to the member. Here, we only care about
 194 * the arguments that are marked as __nullable.
 195 *
 196 * The array of struct bpf_ctx_arg_aux is eventually assigned to
 197 * prog->aux->ctx_arg_info of BPF struct_ops programs and passed to the
 198 * verifier. (See check_struct_ops_btf_id())
 199 *
 200 * arg_info->info will be the list of struct bpf_ctx_arg_aux if success. If
 201 * fails, it will be kept untouched.
 202 */
 203static int prepare_arg_info(struct btf *btf,
 204			    const char *st_ops_name,
 205			    const char *member_name,
 206			    const struct btf_type *func_proto,
 207			    struct bpf_struct_ops_arg_info *arg_info)
 208{
 209	const struct btf_type *stub_func_proto, *pointed_type;
 210	const struct btf_param *stub_args, *args;
 211	struct bpf_ctx_arg_aux *info, *info_buf;
 212	u32 nargs, arg_no, info_cnt = 0;
 213	u32 arg_btf_id;
 214	int offset;
 215
 216	stub_func_proto = find_stub_func_proto(btf, st_ops_name, member_name);
 217	if (!stub_func_proto)
 218		return 0;
 219
 220	/* Check if the number of arguments of the stub function is the same
 221	 * as the number of arguments of the function pointer.
 222	 */
 223	nargs = btf_type_vlen(func_proto);
 224	if (nargs != btf_type_vlen(stub_func_proto)) {
 225		pr_warn("the number of arguments of the stub function %s__%s does not match the number of arguments of the member %s of struct %s\n",
 226			st_ops_name, member_name, member_name, st_ops_name);
 227		return -EINVAL;
 228	}
 229
 230	if (!nargs)
 231		return 0;
 232
 233	args = btf_params(func_proto);
 234	stub_args = btf_params(stub_func_proto);
 235
 236	info_buf = kcalloc(nargs, sizeof(*info_buf), GFP_KERNEL);
 237	if (!info_buf)
 238		return -ENOMEM;
 239
 240	/* Prepare info for every nullable argument */
 241	info = info_buf;
 242	for (arg_no = 0; arg_no < nargs; arg_no++) {
 243		/* Skip arguments that is not suffixed with
 244		 * "__nullable".
 245		 */
 246		if (!btf_param_match_suffix(btf, &stub_args[arg_no],
 247					    MAYBE_NULL_SUFFIX))
 248			continue;
 249
 250		/* Should be a pointer to struct */
 251		pointed_type = btf_type_resolve_ptr(btf,
 252						    args[arg_no].type,
 253						    &arg_btf_id);
 254		if (!pointed_type ||
 255		    !btf_type_is_struct(pointed_type)) {
 256			pr_warn("stub function %s__%s has %s tagging to an unsupported type\n",
 257				st_ops_name, member_name, MAYBE_NULL_SUFFIX);
 258			goto err_out;
 259		}
 260
 261		offset = btf_ctx_arg_offset(btf, func_proto, arg_no);
 262		if (offset < 0) {
 263			pr_warn("stub function %s__%s has an invalid trampoline ctx offset for arg#%u\n",
 264				st_ops_name, member_name, arg_no);
 265			goto err_out;
 
 
 
 
 
 
 266		}
 267
 268		if (args[arg_no].type != stub_args[arg_no].type) {
 269			pr_warn("arg#%u type in stub function %s__%s does not match with its original func_proto\n",
 270				arg_no, st_ops_name, member_name);
 271			goto err_out;
 272		}
 273
 274		/* Fill the information of the new argument */
 275		info->reg_type =
 276			PTR_TRUSTED | PTR_TO_BTF_ID | PTR_MAYBE_NULL;
 277		info->btf_id = arg_btf_id;
 278		info->btf = btf;
 279		info->offset = offset;
 280
 281		info++;
 282		info_cnt++;
 283	}
 284
 285	if (info_cnt) {
 286		arg_info->info = info_buf;
 287		arg_info->cnt = info_cnt;
 288	} else {
 289		kfree(info_buf);
 290	}
 291
 292	return 0;
 293
 294err_out:
 295	kfree(info_buf);
 296
 297	return -EINVAL;
 298}
 299
 300/* Clean up the arg_info in a struct bpf_struct_ops_desc. */
 301void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
 302{
 303	struct bpf_struct_ops_arg_info *arg_info;
 304	int i;
 305
 306	arg_info = st_ops_desc->arg_info;
 307	for (i = 0; i < btf_type_vlen(st_ops_desc->type); i++)
 308		kfree(arg_info[i].info);
 309
 310	kfree(arg_info);
 311}
 312
 313static bool is_module_member(const struct btf *btf, u32 id)
 
 314{
 315	const struct btf_type *t;
 316
 317	t = btf_type_resolve_ptr(btf, id, NULL);
 318	if (!t)
 319		return false;
 320
 321	if (!__btf_type_is_struct(t) && !btf_type_is_fwd(t))
 322		return false;
 
 
 323
 324	return !strcmp(btf_name_by_offset(btf, t->name_off), "module");
 325}
 326
 327int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
 328			     struct btf *btf,
 329			     struct bpf_verifier_log *log)
 330{
 331	struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
 332	struct bpf_struct_ops_arg_info *arg_info;
 333	const struct btf_member *member;
 334	const struct btf_type *t;
 335	s32 type_id, value_id;
 336	char value_name[128];
 337	const char *mname;
 338	int i, err;
 339
 340	if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
 341	    sizeof(value_name)) {
 342		pr_warn("struct_ops name %s is too long\n",
 343			st_ops->name);
 344		return -EINVAL;
 345	}
 346	sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
 347
 348	if (!st_ops->cfi_stubs) {
 349		pr_warn("struct_ops for %s has no cfi_stubs\n", st_ops->name);
 350		return -EINVAL;
 351	}
 352
 353	type_id = btf_find_by_name_kind(btf, st_ops->name,
 354					BTF_KIND_STRUCT);
 355	if (type_id < 0) {
 356		pr_warn("Cannot find struct %s in %s\n",
 357			st_ops->name, btf_get_name(btf));
 358		return -EINVAL;
 359	}
 360	t = btf_type_by_id(btf, type_id);
 361	if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
 362		pr_warn("Cannot support #%u members in struct %s\n",
 363			btf_type_vlen(t), st_ops->name);
 364		return -EINVAL;
 365	}
 366
 367	value_id = btf_find_by_name_kind(btf, value_name,
 368					 BTF_KIND_STRUCT);
 369	if (value_id < 0) {
 370		pr_warn("Cannot find struct %s in %s\n",
 371			value_name, btf_get_name(btf));
 372		return -EINVAL;
 373	}
 374	if (!is_valid_value_type(btf, value_id, t, value_name))
 375		return -EINVAL;
 376
 377	arg_info = kcalloc(btf_type_vlen(t), sizeof(*arg_info),
 378			   GFP_KERNEL);
 379	if (!arg_info)
 380		return -ENOMEM;
 381
 382	st_ops_desc->arg_info = arg_info;
 383	st_ops_desc->type = t;
 384	st_ops_desc->type_id = type_id;
 385	st_ops_desc->value_id = value_id;
 386	st_ops_desc->value_type = btf_type_by_id(btf, value_id);
 387
 388	for_each_member(i, t, member) {
 389		const struct btf_type *func_proto;
 390
 391		mname = btf_name_by_offset(btf, member->name_off);
 392		if (!*mname) {
 393			pr_warn("anon member in struct %s is not supported\n",
 394				st_ops->name);
 395			err = -EOPNOTSUPP;
 396			goto errout;
 397		}
 398
 399		if (__btf_member_bitfield_size(t, member)) {
 400			pr_warn("bit field member %s in struct %s is not supported\n",
 401				mname, st_ops->name);
 402			err = -EOPNOTSUPP;
 403			goto errout;
 404		}
 405
 406		if (!st_ops_ids[IDX_MODULE_ID] && is_module_member(btf, member->type)) {
 407			pr_warn("'struct module' btf id not found. Is CONFIG_MODULES enabled? bpf_struct_ops '%s' needs module support.\n",
 408				st_ops->name);
 409			err = -EOPNOTSUPP;
 410			goto errout;
 411		}
 412
 413		func_proto = btf_type_resolve_func_ptr(btf,
 414						       member->type,
 415						       NULL);
 416		if (!func_proto)
 417			continue;
 418
 419		if (btf_distill_func_proto(log, btf,
 420					   func_proto, mname,
 421					   &st_ops->func_models[i])) {
 422			pr_warn("Error in parsing func ptr %s in struct %s\n",
 423				mname, st_ops->name);
 424			err = -EINVAL;
 425			goto errout;
 426		}
 427
 428		err = prepare_arg_info(btf, st_ops->name, mname,
 429				       func_proto,
 430				       arg_info + i);
 431		if (err)
 432			goto errout;
 433	}
 434
 435	if (st_ops->init(btf)) {
 436		pr_warn("Error in init bpf_struct_ops %s\n",
 437			st_ops->name);
 438		err = -EINVAL;
 439		goto errout;
 440	}
 441
 442	return 0;
 443
 444errout:
 445	bpf_struct_ops_desc_release(st_ops_desc);
 446
 447	return err;
 448}
 449
 450static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
 451					   void *next_key)
 452{
 453	if (key && *(u32 *)key == 0)
 454		return -ENOENT;
 455
 456	*(u32 *)next_key = 0;
 457	return 0;
 458}
 459
 460int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
 461				       void *value)
 462{
 463	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
 464	struct bpf_struct_ops_value *uvalue, *kvalue;
 465	enum bpf_struct_ops_state state;
 466	s64 refcnt;
 467
 468	if (unlikely(*(u32 *)key != 0))
 469		return -ENOENT;
 470
 471	kvalue = &st_map->kvalue;
 472	/* Pair with smp_store_release() during map_update */
 473	state = smp_load_acquire(&kvalue->common.state);
 474	if (state == BPF_STRUCT_OPS_STATE_INIT) {
 475		memset(value, 0, map->value_size);
 476		return 0;
 477	}
 478
 479	/* No lock is needed.  state and refcnt do not need
 480	 * to be updated together under atomic context.
 481	 */
 482	uvalue = value;
 483	memcpy(uvalue, st_map->uvalue, map->value_size);
 484	uvalue->common.state = state;
 485
 486	/* This value offers the user space a general estimate of how
 487	 * many sockets are still utilizing this struct_ops for TCP
 488	 * congestion control. The number might not be exact, but it
 489	 * should sufficiently meet our present goals.
 490	 */
 491	refcnt = atomic64_read(&map->refcnt) - atomic64_read(&map->usercnt);
 492	refcount_set(&uvalue->common.refcnt, max_t(s64, refcnt, 0));
 493
 494	return 0;
 495}
 496
 497static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
 498{
 499	return ERR_PTR(-EINVAL);
 500}
 501
 502static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
 503{
 
 504	u32 i;
 505
 506	for (i = 0; i < st_map->funcs_cnt; i++) {
 507		if (!st_map->links[i])
 508			break;
 509		bpf_link_put(st_map->links[i]);
 510		st_map->links[i] = NULL;
 511	}
 512}
 513
 514static void bpf_struct_ops_map_free_image(struct bpf_struct_ops_map *st_map)
 515{
 516	int i;
 517
 518	for (i = 0; i < st_map->image_pages_cnt; i++)
 519		bpf_struct_ops_image_free(st_map->image_pages[i]);
 520	st_map->image_pages_cnt = 0;
 521}
 522
 523static int check_zero_holes(const struct btf *btf, const struct btf_type *t, void *data)
 524{
 525	const struct btf_member *member;
 526	u32 i, moff, msize, prev_mend = 0;
 527	const struct btf_type *mtype;
 528
 529	for_each_member(i, t, member) {
 530		moff = __btf_member_bit_offset(t, member) / 8;
 531		if (moff > prev_mend &&
 532		    memchr_inv(data + prev_mend, 0, moff - prev_mend))
 533			return -EINVAL;
 534
 535		mtype = btf_type_by_id(btf, member->type);
 536		mtype = btf_resolve_size(btf, mtype, &msize);
 537		if (IS_ERR(mtype))
 538			return PTR_ERR(mtype);
 539		prev_mend = moff + msize;
 540	}
 541
 542	if (t->size > prev_mend &&
 543	    memchr_inv(data + prev_mend, 0, t->size - prev_mend))
 544		return -EINVAL;
 545
 546	return 0;
 547}
 548
 549static void bpf_struct_ops_link_release(struct bpf_link *link)
 550{
 551}
 552
 553static void bpf_struct_ops_link_dealloc(struct bpf_link *link)
 554{
 555	struct bpf_tramp_link *tlink = container_of(link, struct bpf_tramp_link, link);
 556
 557	kfree(tlink);
 558}
 559
 560const struct bpf_link_ops bpf_struct_ops_link_lops = {
 561	.release = bpf_struct_ops_link_release,
 562	.dealloc = bpf_struct_ops_link_dealloc,
 563};
 564
 565int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
 566				      struct bpf_tramp_link *link,
 567				      const struct btf_func_model *model,
 568				      void *stub_func,
 569				      void **_image, u32 *_image_off,
 570				      bool allow_alloc)
 571{
 572	u32 image_off = *_image_off, flags = BPF_TRAMP_F_INDIRECT;
 573	void *image = *_image;
 574	int size;
 575
 576	tlinks[BPF_TRAMP_FENTRY].links[0] = link;
 577	tlinks[BPF_TRAMP_FENTRY].nr_links = 1;
 578
 579	if (model->ret_size > 0)
 580		flags |= BPF_TRAMP_F_RET_FENTRY_RET;
 581
 582	size = arch_bpf_trampoline_size(model, flags, tlinks, NULL);
 583	if (size <= 0)
 584		return size ? : -EFAULT;
 585
 586	/* Allocate image buffer if necessary */
 587	if (!image || size > PAGE_SIZE - image_off) {
 588		if (!allow_alloc)
 589			return -E2BIG;
 590
 591		image = bpf_struct_ops_image_alloc();
 592		if (IS_ERR(image))
 593			return PTR_ERR(image);
 594		image_off = 0;
 595	}
 596
 597	size = arch_prepare_bpf_trampoline(NULL, image + image_off,
 598					   image + image_off + size,
 599					   model, flags, tlinks, stub_func);
 600	if (size <= 0) {
 601		if (image != *_image)
 602			bpf_struct_ops_image_free(image);
 603		return size ? : -EFAULT;
 604	}
 605
 606	*_image = image;
 607	*_image_off = image_off + size;
 608	return 0;
 609}
 610
 611static void bpf_struct_ops_ksym_init(const char *tname, const char *mname,
 612				     void *image, unsigned int size,
 613				     struct bpf_ksym *ksym)
 614{
 615	snprintf(ksym->name, KSYM_NAME_LEN, "bpf__%s_%s", tname, mname);
 616	INIT_LIST_HEAD_RCU(&ksym->lnode);
 617	bpf_image_ksym_init(image, size, ksym);
 618}
 619
 620static void bpf_struct_ops_map_add_ksyms(struct bpf_struct_ops_map *st_map)
 621{
 622	u32 i;
 623
 624	for (i = 0; i < st_map->funcs_cnt; i++) {
 625		if (!st_map->ksyms[i])
 626			break;
 627		bpf_image_ksym_add(st_map->ksyms[i]);
 628	}
 629}
 630
 631static void bpf_struct_ops_map_del_ksyms(struct bpf_struct_ops_map *st_map)
 632{
 633	u32 i;
 634
 635	for (i = 0; i < st_map->funcs_cnt; i++) {
 636		if (!st_map->ksyms[i])
 637			break;
 638		bpf_image_ksym_del(st_map->ksyms[i]);
 639	}
 640}
 641
 642static void bpf_struct_ops_map_free_ksyms(struct bpf_struct_ops_map *st_map)
 643{
 644	u32 i;
 645
 646	for (i = 0; i < st_map->funcs_cnt; i++) {
 647		if (!st_map->ksyms[i])
 648			break;
 649		kfree(st_map->ksyms[i]);
 650		st_map->ksyms[i] = NULL;
 651	}
 652}
 653
 654static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
 655					   void *value, u64 flags)
 656{
 657	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
 658	const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
 659	const struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
 660	struct bpf_struct_ops_value *uvalue, *kvalue;
 661	const struct btf_type *module_type;
 662	const struct btf_member *member;
 663	const struct btf_type *t = st_ops_desc->type;
 664	struct bpf_tramp_links *tlinks;
 665	void *udata, *kdata;
 666	int prog_fd, err;
 667	u32 i, trampoline_start, image_off = 0;
 668	void *cur_image = NULL, *image = NULL;
 669	struct bpf_link **plink;
 670	struct bpf_ksym **pksym;
 671	const char *tname, *mname;
 672
 673	if (flags)
 674		return -EINVAL;
 675
 676	if (*(u32 *)key != 0)
 677		return -E2BIG;
 678
 679	err = check_zero_holes(st_map->btf, st_ops_desc->value_type, value);
 680	if (err)
 681		return err;
 682
 683	uvalue = value;
 684	err = check_zero_holes(st_map->btf, t, uvalue->data);
 685	if (err)
 686		return err;
 687
 688	if (uvalue->common.state || refcount_read(&uvalue->common.refcnt))
 689		return -EINVAL;
 690
 691	tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
 692	if (!tlinks)
 693		return -ENOMEM;
 694
 695	uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
 696	kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue;
 697
 698	mutex_lock(&st_map->lock);
 699
 700	if (kvalue->common.state != BPF_STRUCT_OPS_STATE_INIT) {
 701		err = -EBUSY;
 702		goto unlock;
 703	}
 704
 705	memcpy(uvalue, value, map->value_size);
 706
 707	udata = &uvalue->data;
 708	kdata = &kvalue->data;
 
 
 709
 710	plink = st_map->links;
 711	pksym = st_map->ksyms;
 712	tname = btf_name_by_offset(st_map->btf, t->name_off);
 713	module_type = btf_type_by_id(btf_vmlinux, st_ops_ids[IDX_MODULE_ID]);
 714	for_each_member(i, t, member) {
 715		const struct btf_type *mtype, *ptype;
 716		struct bpf_prog *prog;
 717		struct bpf_tramp_link *link;
 718		struct bpf_ksym *ksym;
 719		u32 moff;
 720
 721		moff = __btf_member_bit_offset(t, member) / 8;
 722		mname = btf_name_by_offset(st_map->btf, member->name_off);
 723		ptype = btf_type_resolve_ptr(st_map->btf, member->type, NULL);
 724		if (ptype == module_type) {
 725			if (*(void **)(udata + moff))
 726				goto reset_unlock;
 727			*(void **)(kdata + moff) = BPF_MODULE_OWNER;
 728			continue;
 729		}
 730
 731		err = st_ops->init_member(t, member, kdata, udata);
 732		if (err < 0)
 733			goto reset_unlock;
 734
 735		/* The ->init_member() has handled this member */
 736		if (err > 0)
 737			continue;
 738
 739		/* If st_ops->init_member does not handle it,
 740		 * we will only handle func ptrs and zero-ed members
 741		 * here.  Reject everything else.
 742		 */
 743
 744		/* All non func ptr member must be 0 */
 745		if (!ptype || !btf_type_is_func_proto(ptype)) {
 746			u32 msize;
 747
 748			mtype = btf_type_by_id(st_map->btf, member->type);
 749			mtype = btf_resolve_size(st_map->btf, mtype, &msize);
 750			if (IS_ERR(mtype)) {
 751				err = PTR_ERR(mtype);
 752				goto reset_unlock;
 753			}
 754
 755			if (memchr_inv(udata + moff, 0, msize)) {
 756				err = -EINVAL;
 757				goto reset_unlock;
 758			}
 759
 760			continue;
 761		}
 762
 763		prog_fd = (int)(*(unsigned long *)(udata + moff));
 764		/* Similar check as the attr->attach_prog_fd */
 765		if (!prog_fd)
 766			continue;
 767
 768		prog = bpf_prog_get(prog_fd);
 769		if (IS_ERR(prog)) {
 770			err = PTR_ERR(prog);
 771			goto reset_unlock;
 772		}
 773
 774		if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
 775		    prog->aux->attach_btf_id != st_ops_desc->type_id ||
 776		    prog->expected_attach_type != i) {
 777			bpf_prog_put(prog);
 778			err = -EINVAL;
 779			goto reset_unlock;
 780		}
 781
 782		link = kzalloc(sizeof(*link), GFP_USER);
 783		if (!link) {
 784			bpf_prog_put(prog);
 785			err = -ENOMEM;
 786			goto reset_unlock;
 787		}
 788		bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS,
 789			      &bpf_struct_ops_link_lops, prog);
 790		*plink++ = &link->link;
 791
 792		ksym = kzalloc(sizeof(*ksym), GFP_USER);
 793		if (!ksym) {
 794			err = -ENOMEM;
 795			goto reset_unlock;
 796		}
 797		*pksym++ = ksym;
 798
 799		trampoline_start = image_off;
 800		err = bpf_struct_ops_prepare_trampoline(tlinks, link,
 801						&st_ops->func_models[i],
 802						*(void **)(st_ops->cfi_stubs + moff),
 803						&image, &image_off,
 804						st_map->image_pages_cnt < MAX_TRAMP_IMAGE_PAGES);
 805		if (err)
 806			goto reset_unlock;
 807
 808		if (cur_image != image) {
 809			st_map->image_pages[st_map->image_pages_cnt++] = image;
 810			cur_image = image;
 811			trampoline_start = 0;
 812		}
 813
 814		*(void **)(kdata + moff) = image + trampoline_start + cfi_get_offset();
 815
 816		/* put prog_id to udata */
 817		*(unsigned long *)(udata + moff) = prog->aux->id;
 818
 819		/* init ksym for this trampoline */
 820		bpf_struct_ops_ksym_init(tname, mname,
 821					 image + trampoline_start,
 822					 image_off - trampoline_start,
 823					 ksym);
 824	}
 825
 826	if (st_ops->validate) {
 827		err = st_ops->validate(kdata);
 828		if (err)
 829			goto reset_unlock;
 830	}
 831	for (i = 0; i < st_map->image_pages_cnt; i++) {
 832		err = arch_protect_bpf_trampoline(st_map->image_pages[i],
 833						  PAGE_SIZE);
 834		if (err)
 835			goto reset_unlock;
 836	}
 837
 838	if (st_map->map.map_flags & BPF_F_LINK) {
 839		err = 0;
 
 
 
 
 
 
 840		/* Let bpf_link handle registration & unregistration.
 841		 *
 842		 * Pair with smp_load_acquire() during lookup_elem().
 843		 */
 844		smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_READY);
 845		goto unlock;
 846	}
 847
 848	err = st_ops->reg(kdata, NULL);
 
 849	if (likely(!err)) {
 850		/* This refcnt increment on the map here after
 851		 * 'st_ops->reg()' is secure since the state of the
 852		 * map must be set to INIT at this moment, and thus
 853		 * bpf_struct_ops_map_delete_elem() can't unregister
 854		 * or transition it to TOBEFREE concurrently.
 855		 */
 856		bpf_map_inc(map);
 857		/* Pair with smp_load_acquire() during lookup_elem().
 858		 * It ensures the above udata updates (e.g. prog->aux->id)
 859		 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
 860		 */
 861		smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_INUSE);
 862		goto unlock;
 863	}
 864
 865	/* Error during st_ops->reg(). Can happen if this struct_ops needs to be
 866	 * verified as a whole, after all init_member() calls. Can also happen if
 867	 * there was a race in registering the struct_ops (under the same name) to
 868	 * a sub-system through different struct_ops's maps.
 869	 */
 
 870
 871reset_unlock:
 872	bpf_struct_ops_map_free_ksyms(st_map);
 873	bpf_struct_ops_map_free_image(st_map);
 874	bpf_struct_ops_map_put_progs(st_map);
 875	memset(uvalue, 0, map->value_size);
 876	memset(kvalue, 0, map->value_size);
 877unlock:
 878	kfree(tlinks);
 879	mutex_unlock(&st_map->lock);
 880	if (!err)
 881		bpf_struct_ops_map_add_ksyms(st_map);
 882	return err;
 883}
 884
 885static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
 886{
 887	enum bpf_struct_ops_state prev_state;
 888	struct bpf_struct_ops_map *st_map;
 889
 890	st_map = (struct bpf_struct_ops_map *)map;
 891	if (st_map->map.map_flags & BPF_F_LINK)
 892		return -EOPNOTSUPP;
 893
 894	prev_state = cmpxchg(&st_map->kvalue.common.state,
 895			     BPF_STRUCT_OPS_STATE_INUSE,
 896			     BPF_STRUCT_OPS_STATE_TOBEFREE);
 897	switch (prev_state) {
 898	case BPF_STRUCT_OPS_STATE_INUSE:
 899		st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, NULL);
 900		bpf_map_put(map);
 901		return 0;
 902	case BPF_STRUCT_OPS_STATE_TOBEFREE:
 903		return -EINPROGRESS;
 904	case BPF_STRUCT_OPS_STATE_INIT:
 905		return -ENOENT;
 906	default:
 907		WARN_ON_ONCE(1);
 908		/* Should never happen.  Treat it as not found. */
 909		return -ENOENT;
 910	}
 911}
 912
 913static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
 914					     struct seq_file *m)
 915{
 916	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
 917	void *value;
 918	int err;
 919
 920	value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
 921	if (!value)
 922		return;
 923
 924	err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
 925	if (!err) {
 926		btf_type_seq_show(st_map->btf,
 927				  map->btf_vmlinux_value_type_id,
 928				  value, m);
 929		seq_putc(m, '\n');
 930	}
 931
 932	kfree(value);
 933}
 934
 935static void __bpf_struct_ops_map_free(struct bpf_map *map)
 936{
 937	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
 938
 939	if (st_map->links)
 940		bpf_struct_ops_map_put_progs(st_map);
 941	if (st_map->ksyms)
 942		bpf_struct_ops_map_free_ksyms(st_map);
 943	bpf_map_area_free(st_map->links);
 944	bpf_map_area_free(st_map->ksyms);
 945	bpf_struct_ops_map_free_image(st_map);
 
 
 946	bpf_map_area_free(st_map->uvalue);
 947	bpf_map_area_free(st_map);
 948}
 949
 950static void bpf_struct_ops_map_free(struct bpf_map *map)
 951{
 952	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
 953
 954	/* st_ops->owner was acquired during map_alloc to implicitly holds
 955	 * the btf's refcnt. The acquire was only done when btf_is_module()
 956	 * st_map->btf cannot be NULL here.
 957	 */
 958	if (btf_is_module(st_map->btf))
 959		module_put(st_map->st_ops_desc->st_ops->owner);
 960
 961	bpf_struct_ops_map_del_ksyms(st_map);
 962
 963	/* The struct_ops's function may switch to another struct_ops.
 964	 *
 965	 * For example, bpf_tcp_cc_x->init() may switch to
 966	 * another tcp_cc_y by calling
 967	 * setsockopt(TCP_CONGESTION, "tcp_cc_y").
 968	 * During the switch,  bpf_struct_ops_put(tcp_cc_x) is called
 969	 * and its refcount may reach 0 which then free its
 970	 * trampoline image while tcp_cc_x is still running.
 971	 *
 972	 * A vanilla rcu gp is to wait for all bpf-tcp-cc prog
 973	 * to finish. bpf-tcp-cc prog is non sleepable.
 974	 * A rcu_tasks gp is to wait for the last few insn
 975	 * in the tramopline image to finish before releasing
 976	 * the trampoline image.
 977	 */
 978	synchronize_rcu_mult(call_rcu, call_rcu_tasks);
 979
 980	__bpf_struct_ops_map_free(map);
 981}
 982
 983static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
 984{
 985	if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
 986	    (attr->map_flags & ~(BPF_F_LINK | BPF_F_VTYPE_BTF_OBJ_FD)) ||
 987	    !attr->btf_vmlinux_value_type_id)
 988		return -EINVAL;
 989	return 0;
 990}
 991
 992static u32 count_func_ptrs(const struct btf *btf, const struct btf_type *t)
 993{
 994	int i;
 995	u32 count;
 996	const struct btf_member *member;
 997
 998	count = 0;
 999	for_each_member(i, t, member)
1000		if (btf_type_resolve_func_ptr(btf, member->type, NULL))
1001			count++;
1002	return count;
1003}
1004
1005static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
1006{
1007	const struct bpf_struct_ops_desc *st_ops_desc;
1008	size_t st_map_size;
1009	struct bpf_struct_ops_map *st_map;
1010	const struct btf_type *t, *vt;
1011	struct module *mod = NULL;
1012	struct bpf_map *map;
1013	struct btf *btf;
1014	int ret;
1015
1016	if (attr->map_flags & BPF_F_VTYPE_BTF_OBJ_FD) {
1017		/* The map holds btf for its whole life time. */
1018		btf = btf_get_by_fd(attr->value_type_btf_obj_fd);
1019		if (IS_ERR(btf))
1020			return ERR_CAST(btf);
1021		if (!btf_is_module(btf)) {
1022			btf_put(btf);
1023			return ERR_PTR(-EINVAL);
1024		}
1025
1026		mod = btf_try_get_module(btf);
1027		/* mod holds a refcnt to btf. We don't need an extra refcnt
1028		 * here.
1029		 */
1030		btf_put(btf);
1031		if (!mod)
1032			return ERR_PTR(-EINVAL);
1033	} else {
1034		btf = bpf_get_btf_vmlinux();
1035		if (IS_ERR(btf))
1036			return ERR_CAST(btf);
1037		if (!btf)
1038			return ERR_PTR(-ENOTSUPP);
1039	}
1040
1041	st_ops_desc = bpf_struct_ops_find_value(btf, attr->btf_vmlinux_value_type_id);
1042	if (!st_ops_desc) {
1043		ret = -ENOTSUPP;
1044		goto errout;
1045	}
1046
1047	vt = st_ops_desc->value_type;
1048	if (attr->value_size != vt->size) {
1049		ret = -EINVAL;
1050		goto errout;
1051	}
1052
1053	t = st_ops_desc->type;
1054
1055	st_map_size = sizeof(*st_map) +
1056		/* kvalue stores the
1057		 * struct bpf_struct_ops_tcp_congestions_ops
1058		 */
1059		(vt->size - sizeof(struct bpf_struct_ops_value));
1060
1061	st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
1062	if (!st_map) {
1063		ret = -ENOMEM;
1064		goto errout;
1065	}
1066
1067	st_map->st_ops_desc = st_ops_desc;
1068	map = &st_map->map;
1069
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1070	st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
1071	st_map->funcs_cnt = count_func_ptrs(btf, t);
1072	st_map->links =
1073		bpf_map_area_alloc(st_map->funcs_cnt * sizeof(struct bpf_link *),
1074				   NUMA_NO_NODE);
1075
1076	st_map->ksyms =
1077		bpf_map_area_alloc(st_map->funcs_cnt * sizeof(struct bpf_ksym *),
1078				   NUMA_NO_NODE);
1079	if (!st_map->uvalue || !st_map->links || !st_map->ksyms) {
1080		ret = -ENOMEM;
1081		goto errout_free;
1082	}
1083	st_map->btf = btf;
1084
1085	mutex_init(&st_map->lock);
1086	bpf_map_init_from_attr(map, attr);
1087
1088	return map;
1089
1090errout_free:
1091	__bpf_struct_ops_map_free(map);
1092errout:
1093	module_put(mod);
1094
1095	return ERR_PTR(ret);
1096}
1097
1098static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map)
1099{
1100	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1101	const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
1102	const struct btf_type *vt = st_ops_desc->value_type;
1103	u64 usage;
1104
1105	usage = sizeof(*st_map) +
1106			vt->size - sizeof(struct bpf_struct_ops_value);
1107	usage += vt->size;
1108	usage += st_map->funcs_cnt * sizeof(struct bpf_link *);
1109	usage += st_map->funcs_cnt * sizeof(struct bpf_ksym *);
1110	usage += PAGE_SIZE;
1111	return usage;
1112}
1113
1114BTF_ID_LIST_SINGLE(bpf_struct_ops_map_btf_ids, struct, bpf_struct_ops_map)
1115const struct bpf_map_ops bpf_struct_ops_map_ops = {
1116	.map_alloc_check = bpf_struct_ops_map_alloc_check,
1117	.map_alloc = bpf_struct_ops_map_alloc,
1118	.map_free = bpf_struct_ops_map_free,
1119	.map_get_next_key = bpf_struct_ops_map_get_next_key,
1120	.map_lookup_elem = bpf_struct_ops_map_lookup_elem,
1121	.map_delete_elem = bpf_struct_ops_map_delete_elem,
1122	.map_update_elem = bpf_struct_ops_map_update_elem,
1123	.map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
1124	.map_mem_usage = bpf_struct_ops_map_mem_usage,
1125	.map_btf_id = &bpf_struct_ops_map_btf_ids[0],
1126};
1127
1128/* "const void *" because some subsystem is
1129 * passing a const (e.g. const struct tcp_congestion_ops *)
1130 */
1131bool bpf_struct_ops_get(const void *kdata)
1132{
1133	struct bpf_struct_ops_value *kvalue;
1134	struct bpf_struct_ops_map *st_map;
1135	struct bpf_map *map;
1136
1137	kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
1138	st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
1139
1140	map = __bpf_map_inc_not_zero(&st_map->map, false);
1141	return !IS_ERR(map);
1142}
1143
1144void bpf_struct_ops_put(const void *kdata)
1145{
1146	struct bpf_struct_ops_value *kvalue;
1147	struct bpf_struct_ops_map *st_map;
1148
1149	kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
1150	st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
1151
1152	bpf_map_put(&st_map->map);
1153}
1154
1155int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff)
1156{
1157	void *func_ptr = *(void **)(st_ops->cfi_stubs + moff);
1158
1159	return func_ptr ? 0 : -ENOTSUPP;
1160}
1161
1162static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map)
1163{
1164	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1165
1166	return map->map_type == BPF_MAP_TYPE_STRUCT_OPS &&
1167		map->map_flags & BPF_F_LINK &&
1168		/* Pair with smp_store_release() during map_update */
1169		smp_load_acquire(&st_map->kvalue.common.state) == BPF_STRUCT_OPS_STATE_READY;
1170}
1171
1172static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
1173{
1174	struct bpf_struct_ops_link *st_link;
1175	struct bpf_struct_ops_map *st_map;
1176
1177	st_link = container_of(link, struct bpf_struct_ops_link, link);
1178	st_map = (struct bpf_struct_ops_map *)
1179		rcu_dereference_protected(st_link->map, true);
1180	if (st_map) {
1181		st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
 
 
 
1182		bpf_map_put(&st_map->map);
1183	}
1184	kfree(st_link);
1185}
1186
1187static void bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link,
1188					    struct seq_file *seq)
1189{
1190	struct bpf_struct_ops_link *st_link;
1191	struct bpf_map *map;
1192
1193	st_link = container_of(link, struct bpf_struct_ops_link, link);
1194	rcu_read_lock();
1195	map = rcu_dereference(st_link->map);
1196	if (map)
1197		seq_printf(seq, "map_id:\t%d\n", map->id);
1198	rcu_read_unlock();
1199}
1200
1201static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link,
1202					       struct bpf_link_info *info)
1203{
1204	struct bpf_struct_ops_link *st_link;
1205	struct bpf_map *map;
1206
1207	st_link = container_of(link, struct bpf_struct_ops_link, link);
1208	rcu_read_lock();
1209	map = rcu_dereference(st_link->map);
1210	if (map)
1211		info->struct_ops.map_id = map->id;
1212	rcu_read_unlock();
1213	return 0;
1214}
1215
1216static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map *new_map,
1217					  struct bpf_map *expected_old_map)
1218{
1219	struct bpf_struct_ops_map *st_map, *old_st_map;
1220	struct bpf_map *old_map;
1221	struct bpf_struct_ops_link *st_link;
1222	int err;
1223
1224	st_link = container_of(link, struct bpf_struct_ops_link, link);
1225	st_map = container_of(new_map, struct bpf_struct_ops_map, map);
1226
1227	if (!bpf_struct_ops_valid_to_reg(new_map))
1228		return -EINVAL;
1229
1230	if (!st_map->st_ops_desc->st_ops->update)
1231		return -EOPNOTSUPP;
1232
1233	mutex_lock(&update_mutex);
1234
1235	old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
1236	if (!old_map) {
1237		err = -ENOLINK;
1238		goto err_out;
1239	}
1240	if (expected_old_map && old_map != expected_old_map) {
1241		err = -EPERM;
1242		goto err_out;
1243	}
1244
1245	old_st_map = container_of(old_map, struct bpf_struct_ops_map, map);
1246	/* The new and old struct_ops must be the same type. */
1247	if (st_map->st_ops_desc != old_st_map->st_ops_desc) {
1248		err = -EINVAL;
1249		goto err_out;
1250	}
1251
1252	err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data, link);
1253	if (err)
1254		goto err_out;
1255
1256	bpf_map_inc(new_map);
1257	rcu_assign_pointer(st_link->map, new_map);
1258	bpf_map_put(old_map);
1259
1260err_out:
1261	mutex_unlock(&update_mutex);
1262
1263	return err;
1264}
1265
1266static int bpf_struct_ops_map_link_detach(struct bpf_link *link)
1267{
1268	struct bpf_struct_ops_link *st_link = container_of(link, struct bpf_struct_ops_link, link);
1269	struct bpf_struct_ops_map *st_map;
1270	struct bpf_map *map;
1271
1272	mutex_lock(&update_mutex);
1273
1274	map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
1275	if (!map) {
1276		mutex_unlock(&update_mutex);
1277		return 0;
1278	}
1279	st_map = container_of(map, struct bpf_struct_ops_map, map);
1280
1281	st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
1282
1283	RCU_INIT_POINTER(st_link->map, NULL);
1284	/* Pair with bpf_map_get() in bpf_struct_ops_link_create() or
1285	 * bpf_map_inc() in bpf_struct_ops_map_link_update().
1286	 */
1287	bpf_map_put(&st_map->map);
1288
1289	mutex_unlock(&update_mutex);
1290
1291	wake_up_interruptible_poll(&st_link->wait_hup, EPOLLHUP);
1292
1293	return 0;
1294}
1295
1296static __poll_t bpf_struct_ops_map_link_poll(struct file *file,
1297					     struct poll_table_struct *pts)
1298{
1299	struct bpf_struct_ops_link *st_link = file->private_data;
1300
1301	poll_wait(file, &st_link->wait_hup, pts);
1302
1303	return rcu_access_pointer(st_link->map) ? 0 : EPOLLHUP;
1304}
1305
1306static const struct bpf_link_ops bpf_struct_ops_map_lops = {
1307	.dealloc = bpf_struct_ops_map_link_dealloc,
1308	.detach = bpf_struct_ops_map_link_detach,
1309	.show_fdinfo = bpf_struct_ops_map_link_show_fdinfo,
1310	.fill_link_info = bpf_struct_ops_map_link_fill_link_info,
1311	.update_map = bpf_struct_ops_map_link_update,
1312	.poll = bpf_struct_ops_map_link_poll,
1313};
1314
1315int bpf_struct_ops_link_create(union bpf_attr *attr)
1316{
1317	struct bpf_struct_ops_link *link = NULL;
1318	struct bpf_link_primer link_primer;
1319	struct bpf_struct_ops_map *st_map;
1320	struct bpf_map *map;
1321	int err;
1322
1323	map = bpf_map_get(attr->link_create.map_fd);
1324	if (IS_ERR(map))
1325		return PTR_ERR(map);
1326
1327	st_map = (struct bpf_struct_ops_map *)map;
1328
1329	if (!bpf_struct_ops_valid_to_reg(map)) {
1330		err = -EINVAL;
1331		goto err_out;
1332	}
1333
1334	link = kzalloc(sizeof(*link), GFP_USER);
1335	if (!link) {
1336		err = -ENOMEM;
1337		goto err_out;
1338	}
1339	bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_map_lops, NULL);
1340
1341	err = bpf_link_prime(&link->link, &link_primer);
1342	if (err)
1343		goto err_out;
1344
1345	init_waitqueue_head(&link->wait_hup);
1346
1347	/* Hold the update_mutex such that the subsystem cannot
1348	 * do link->ops->detach() before the link is fully initialized.
1349	 */
1350	mutex_lock(&update_mutex);
1351	err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data, &link->link);
1352	if (err) {
1353		mutex_unlock(&update_mutex);
1354		bpf_link_cleanup(&link_primer);
1355		link = NULL;
1356		goto err_out;
1357	}
1358	RCU_INIT_POINTER(link->map, map);
1359	mutex_unlock(&update_mutex);
1360
1361	return bpf_link_settle(&link_primer);
1362
1363err_out:
1364	bpf_map_put(map);
1365	kfree(link);
1366	return err;
1367}
1368
1369void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
1370{
1371	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1372
1373	info->btf_vmlinux_id = btf_obj_id(st_map->btf);
1374}