Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Minimal file system backend for holding eBPF maps and programs,
  4 * used by bpf(2) object pinning.
  5 *
  6 * Authors:
  7 *
  8 *	Daniel Borkmann <daniel@iogearbox.net>
  9 */
 10
 11#include <linux/init.h>
 12#include <linux/magic.h>
 13#include <linux/major.h>
 14#include <linux/mount.h>
 15#include <linux/namei.h>
 16#include <linux/fs.h>
 17#include <linux/fs_context.h>
 18#include <linux/fs_parser.h>
 19#include <linux/kdev_t.h>
 20#include <linux/filter.h>
 21#include <linux/bpf.h>
 22#include <linux/bpf_trace.h>
 23#include "preload/bpf_preload.h"
 24
 25enum bpf_type {
 26	BPF_TYPE_UNSPEC	= 0,
 27	BPF_TYPE_PROG,
 28	BPF_TYPE_MAP,
 29	BPF_TYPE_LINK,
 30};
 31
 32static void *bpf_any_get(void *raw, enum bpf_type type)
 33{
 34	switch (type) {
 35	case BPF_TYPE_PROG:
 36		bpf_prog_inc(raw);
 37		break;
 38	case BPF_TYPE_MAP:
 39		bpf_map_inc_with_uref(raw);
 40		break;
 41	case BPF_TYPE_LINK:
 42		bpf_link_inc(raw);
 43		break;
 44	default:
 45		WARN_ON_ONCE(1);
 46		break;
 47	}
 48
 49	return raw;
 50}
 51
 52static void bpf_any_put(void *raw, enum bpf_type type)
 53{
 54	switch (type) {
 55	case BPF_TYPE_PROG:
 56		bpf_prog_put(raw);
 57		break;
 58	case BPF_TYPE_MAP:
 59		bpf_map_put_with_uref(raw);
 60		break;
 61	case BPF_TYPE_LINK:
 62		bpf_link_put(raw);
 63		break;
 64	default:
 65		WARN_ON_ONCE(1);
 66		break;
 67	}
 68}
 69
 70static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type)
 71{
 72	void *raw;
 73
 74	raw = bpf_map_get_with_uref(ufd);
 75	if (!IS_ERR(raw)) {
 76		*type = BPF_TYPE_MAP;
 77		return raw;
 78	}
 79
 80	raw = bpf_prog_get(ufd);
 81	if (!IS_ERR(raw)) {
 82		*type = BPF_TYPE_PROG;
 83		return raw;
 84	}
 85
 86	raw = bpf_link_get_from_fd(ufd);
 87	if (!IS_ERR(raw)) {
 88		*type = BPF_TYPE_LINK;
 89		return raw;
 90	}
 91
 92	return ERR_PTR(-EINVAL);
 93}
 94
 95static const struct inode_operations bpf_dir_iops;
 96
 97static const struct inode_operations bpf_prog_iops = { };
 98static const struct inode_operations bpf_map_iops  = { };
 99static const struct inode_operations bpf_link_iops  = { };
100
101static struct inode *bpf_get_inode(struct super_block *sb,
102				   const struct inode *dir,
103				   umode_t mode)
104{
105	struct inode *inode;
106
107	switch (mode & S_IFMT) {
108	case S_IFDIR:
109	case S_IFREG:
110	case S_IFLNK:
111		break;
112	default:
113		return ERR_PTR(-EINVAL);
114	}
115
116	inode = new_inode(sb);
117	if (!inode)
118		return ERR_PTR(-ENOSPC);
119
120	inode->i_ino = get_next_ino();
121	inode->i_atime = current_time(inode);
122	inode->i_mtime = inode->i_atime;
123	inode->i_ctime = inode->i_atime;
124
125	inode_init_owner(&init_user_ns, inode, dir, mode);
126
127	return inode;
128}
129
130static int bpf_inode_type(const struct inode *inode, enum bpf_type *type)
131{
132	*type = BPF_TYPE_UNSPEC;
133	if (inode->i_op == &bpf_prog_iops)
134		*type = BPF_TYPE_PROG;
135	else if (inode->i_op == &bpf_map_iops)
136		*type = BPF_TYPE_MAP;
137	else if (inode->i_op == &bpf_link_iops)
138		*type = BPF_TYPE_LINK;
139	else
140		return -EACCES;
141
142	return 0;
143}
144
145static void bpf_dentry_finalize(struct dentry *dentry, struct inode *inode,
146				struct inode *dir)
147{
148	d_instantiate(dentry, inode);
149	dget(dentry);
150
151	dir->i_mtime = current_time(dir);
152	dir->i_ctime = dir->i_mtime;
153}
154
155static int bpf_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
156		     struct dentry *dentry, umode_t mode)
157{
158	struct inode *inode;
159
160	inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFDIR);
161	if (IS_ERR(inode))
162		return PTR_ERR(inode);
163
164	inode->i_op = &bpf_dir_iops;
165	inode->i_fop = &simple_dir_operations;
166
167	inc_nlink(inode);
168	inc_nlink(dir);
169
170	bpf_dentry_finalize(dentry, inode, dir);
171	return 0;
172}
173
174struct map_iter {
175	void *key;
176	bool done;
177};
178
179static struct map_iter *map_iter(struct seq_file *m)
180{
181	return m->private;
182}
183
184static struct bpf_map *seq_file_to_map(struct seq_file *m)
185{
186	return file_inode(m->file)->i_private;
187}
188
189static void map_iter_free(struct map_iter *iter)
190{
191	if (iter) {
192		kfree(iter->key);
193		kfree(iter);
194	}
195}
196
197static struct map_iter *map_iter_alloc(struct bpf_map *map)
198{
199	struct map_iter *iter;
200
201	iter = kzalloc(sizeof(*iter), GFP_KERNEL | __GFP_NOWARN);
202	if (!iter)
203		goto error;
204
205	iter->key = kzalloc(map->key_size, GFP_KERNEL | __GFP_NOWARN);
206	if (!iter->key)
207		goto error;
208
209	return iter;
210
211error:
212	map_iter_free(iter);
213	return NULL;
214}
215
216static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
217{
218	struct bpf_map *map = seq_file_to_map(m);
219	void *key = map_iter(m)->key;
220	void *prev_key;
221
222	(*pos)++;
223	if (map_iter(m)->done)
224		return NULL;
225
226	if (unlikely(v == SEQ_START_TOKEN))
227		prev_key = NULL;
228	else
229		prev_key = key;
230
231	rcu_read_lock();
232	if (map->ops->map_get_next_key(map, prev_key, key)) {
233		map_iter(m)->done = true;
234		key = NULL;
235	}
236	rcu_read_unlock();
237	return key;
238}
239
240static void *map_seq_start(struct seq_file *m, loff_t *pos)
241{
242	if (map_iter(m)->done)
243		return NULL;
244
245	return *pos ? map_iter(m)->key : SEQ_START_TOKEN;
246}
247
248static void map_seq_stop(struct seq_file *m, void *v)
249{
250}
251
252static int map_seq_show(struct seq_file *m, void *v)
253{
254	struct bpf_map *map = seq_file_to_map(m);
255	void *key = map_iter(m)->key;
256
257	if (unlikely(v == SEQ_START_TOKEN)) {
258		seq_puts(m, "# WARNING!! The output is for debug purpose only\n");
259		seq_puts(m, "# WARNING!! The output format will change\n");
260	} else {
261		map->ops->map_seq_show_elem(map, key, m);
262	}
263
264	return 0;
265}
266
267static const struct seq_operations bpffs_map_seq_ops = {
268	.start	= map_seq_start,
269	.next	= map_seq_next,
270	.show	= map_seq_show,
271	.stop	= map_seq_stop,
272};
273
274static int bpffs_map_open(struct inode *inode, struct file *file)
275{
276	struct bpf_map *map = inode->i_private;
277	struct map_iter *iter;
278	struct seq_file *m;
279	int err;
280
281	iter = map_iter_alloc(map);
282	if (!iter)
283		return -ENOMEM;
284
285	err = seq_open(file, &bpffs_map_seq_ops);
286	if (err) {
287		map_iter_free(iter);
288		return err;
289	}
290
291	m = file->private_data;
292	m->private = iter;
293
294	return 0;
295}
296
297static int bpffs_map_release(struct inode *inode, struct file *file)
298{
299	struct seq_file *m = file->private_data;
300
301	map_iter_free(map_iter(m));
302
303	return seq_release(inode, file);
304}
305
306/* bpffs_map_fops should only implement the basic
307 * read operation for a BPF map.  The purpose is to
308 * provide a simple user intuitive way to do
309 * "cat bpffs/pathto/a-pinned-map".
310 *
311 * Other operations (e.g. write, lookup...) should be realized by
312 * the userspace tools (e.g. bpftool) through the
313 * BPF_OBJ_GET_INFO_BY_FD and the map's lookup/update
314 * interface.
315 */
316static const struct file_operations bpffs_map_fops = {
317	.open		= bpffs_map_open,
318	.read		= seq_read,
319	.release	= bpffs_map_release,
320};
321
322static int bpffs_obj_open(struct inode *inode, struct file *file)
323{
324	return -EIO;
325}
326
327static const struct file_operations bpffs_obj_fops = {
328	.open		= bpffs_obj_open,
329};
330
331static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw,
332			 const struct inode_operations *iops,
333			 const struct file_operations *fops)
334{
335	struct inode *dir = dentry->d_parent->d_inode;
336	struct inode *inode = bpf_get_inode(dir->i_sb, dir, mode);
337	if (IS_ERR(inode))
338		return PTR_ERR(inode);
339
340	inode->i_op = iops;
341	inode->i_fop = fops;
342	inode->i_private = raw;
343
344	bpf_dentry_finalize(dentry, inode, dir);
345	return 0;
346}
347
348static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg)
349{
350	return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops,
351			     &bpffs_obj_fops);
352}
353
354static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
355{
356	struct bpf_map *map = arg;
357
358	return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops,
359			     bpf_map_support_seq_show(map) ?
360			     &bpffs_map_fops : &bpffs_obj_fops);
361}
362
363static int bpf_mklink(struct dentry *dentry, umode_t mode, void *arg)
364{
365	struct bpf_link *link = arg;
366
367	return bpf_mkobj_ops(dentry, mode, arg, &bpf_link_iops,
368			     bpf_link_is_iter(link) ?
369			     &bpf_iter_fops : &bpffs_obj_fops);
370}
371
372static struct dentry *
373bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags)
374{
375	/* Dots in names (e.g. "/sys/fs/bpf/foo.bar") are reserved for future
376	 * extensions. That allows popoulate_bpffs() create special files.
377	 */
378	if ((dir->i_mode & S_IALLUGO) &&
379	    strchr(dentry->d_name.name, '.'))
380		return ERR_PTR(-EPERM);
381
382	return simple_lookup(dir, dentry, flags);
383}
384
385static int bpf_symlink(struct user_namespace *mnt_userns, struct inode *dir,
386		       struct dentry *dentry, const char *target)
387{
388	char *link = kstrdup(target, GFP_USER | __GFP_NOWARN);
389	struct inode *inode;
390
391	if (!link)
392		return -ENOMEM;
393
394	inode = bpf_get_inode(dir->i_sb, dir, S_IRWXUGO | S_IFLNK);
395	if (IS_ERR(inode)) {
396		kfree(link);
397		return PTR_ERR(inode);
398	}
399
400	inode->i_op = &simple_symlink_inode_operations;
401	inode->i_link = link;
402
403	bpf_dentry_finalize(dentry, inode, dir);
404	return 0;
405}
406
407static const struct inode_operations bpf_dir_iops = {
408	.lookup		= bpf_lookup,
409	.mkdir		= bpf_mkdir,
410	.symlink	= bpf_symlink,
411	.rmdir		= simple_rmdir,
412	.rename		= simple_rename,
413	.link		= simple_link,
414	.unlink		= simple_unlink,
415};
416
417/* pin iterator link into bpffs */
418static int bpf_iter_link_pin_kernel(struct dentry *parent,
419				    const char *name, struct bpf_link *link)
420{
421	umode_t mode = S_IFREG | S_IRUSR;
422	struct dentry *dentry;
423	int ret;
424
425	inode_lock(parent->d_inode);
426	dentry = lookup_one_len(name, parent, strlen(name));
427	if (IS_ERR(dentry)) {
428		inode_unlock(parent->d_inode);
429		return PTR_ERR(dentry);
430	}
431	ret = bpf_mkobj_ops(dentry, mode, link, &bpf_link_iops,
432			    &bpf_iter_fops);
433	dput(dentry);
434	inode_unlock(parent->d_inode);
435	return ret;
436}
437
438static int bpf_obj_do_pin(const char __user *pathname, void *raw,
439			  enum bpf_type type)
440{
441	struct dentry *dentry;
442	struct inode *dir;
443	struct path path;
444	umode_t mode;
445	int ret;
446
447	dentry = user_path_create(AT_FDCWD, pathname, &path, 0);
448	if (IS_ERR(dentry))
449		return PTR_ERR(dentry);
450
451	mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
452
453	ret = security_path_mknod(&path, dentry, mode, 0);
454	if (ret)
455		goto out;
456
457	dir = d_inode(path.dentry);
458	if (dir->i_op != &bpf_dir_iops) {
459		ret = -EPERM;
460		goto out;
461	}
462
463	switch (type) {
464	case BPF_TYPE_PROG:
465		ret = vfs_mkobj(dentry, mode, bpf_mkprog, raw);
466		break;
467	case BPF_TYPE_MAP:
468		ret = vfs_mkobj(dentry, mode, bpf_mkmap, raw);
469		break;
470	case BPF_TYPE_LINK:
471		ret = vfs_mkobj(dentry, mode, bpf_mklink, raw);
472		break;
473	default:
474		ret = -EPERM;
475	}
476out:
477	done_path_create(&path, dentry);
478	return ret;
479}
480
481int bpf_obj_pin_user(u32 ufd, const char __user *pathname)
482{
483	enum bpf_type type;
484	void *raw;
485	int ret;
486
487	raw = bpf_fd_probe_obj(ufd, &type);
488	if (IS_ERR(raw))
489		return PTR_ERR(raw);
490
491	ret = bpf_obj_do_pin(pathname, raw, type);
492	if (ret != 0)
493		bpf_any_put(raw, type);
494
495	return ret;
496}
497
498static void *bpf_obj_do_get(const char __user *pathname,
499			    enum bpf_type *type, int flags)
500{
501	struct inode *inode;
502	struct path path;
503	void *raw;
504	int ret;
505
506	ret = user_path_at(AT_FDCWD, pathname, LOOKUP_FOLLOW, &path);
507	if (ret)
508		return ERR_PTR(ret);
509
510	inode = d_backing_inode(path.dentry);
511	ret = path_permission(&path, ACC_MODE(flags));
512	if (ret)
513		goto out;
514
515	ret = bpf_inode_type(inode, type);
516	if (ret)
517		goto out;
518
519	raw = bpf_any_get(inode->i_private, *type);
520	if (!IS_ERR(raw))
521		touch_atime(&path);
522
523	path_put(&path);
524	return raw;
525out:
526	path_put(&path);
527	return ERR_PTR(ret);
528}
529
530int bpf_obj_get_user(const char __user *pathname, int flags)
531{
532	enum bpf_type type = BPF_TYPE_UNSPEC;
533	int f_flags;
534	void *raw;
535	int ret;
536
537	f_flags = bpf_get_file_flag(flags);
538	if (f_flags < 0)
539		return f_flags;
540
541	raw = bpf_obj_do_get(pathname, &type, f_flags);
542	if (IS_ERR(raw))
543		return PTR_ERR(raw);
544
545	if (type == BPF_TYPE_PROG)
546		ret = bpf_prog_new_fd(raw);
547	else if (type == BPF_TYPE_MAP)
548		ret = bpf_map_new_fd(raw, f_flags);
549	else if (type == BPF_TYPE_LINK)
550		ret = (f_flags != O_RDWR) ? -EINVAL : bpf_link_new_fd(raw);
551	else
552		return -ENOENT;
553
554	if (ret < 0)
555		bpf_any_put(raw, type);
556	return ret;
557}
558
559static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
560{
561	struct bpf_prog *prog;
562	int ret = inode_permission(&init_user_ns, inode, MAY_READ);
563	if (ret)
564		return ERR_PTR(ret);
565
566	if (inode->i_op == &bpf_map_iops)
567		return ERR_PTR(-EINVAL);
568	if (inode->i_op == &bpf_link_iops)
569		return ERR_PTR(-EINVAL);
570	if (inode->i_op != &bpf_prog_iops)
571		return ERR_PTR(-EACCES);
572
573	prog = inode->i_private;
574
575	ret = security_bpf_prog(prog);
576	if (ret < 0)
577		return ERR_PTR(ret);
578
579	if (!bpf_prog_get_ok(prog, &type, false))
580		return ERR_PTR(-EINVAL);
581
582	bpf_prog_inc(prog);
583	return prog;
584}
585
586struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
587{
588	struct bpf_prog *prog;
589	struct path path;
590	int ret = kern_path(name, LOOKUP_FOLLOW, &path);
591	if (ret)
592		return ERR_PTR(ret);
593	prog = __get_prog_inode(d_backing_inode(path.dentry), type);
594	if (!IS_ERR(prog))
595		touch_atime(&path);
596	path_put(&path);
597	return prog;
598}
599EXPORT_SYMBOL(bpf_prog_get_type_path);
600
601/*
602 * Display the mount options in /proc/mounts.
603 */
604static int bpf_show_options(struct seq_file *m, struct dentry *root)
605{
606	umode_t mode = d_inode(root)->i_mode & S_IALLUGO & ~S_ISVTX;
607
608	if (mode != S_IRWXUGO)
609		seq_printf(m, ",mode=%o", mode);
610	return 0;
611}
612
613static void bpf_free_inode(struct inode *inode)
614{
615	enum bpf_type type;
616
617	if (S_ISLNK(inode->i_mode))
618		kfree(inode->i_link);
619	if (!bpf_inode_type(inode, &type))
620		bpf_any_put(inode->i_private, type);
621	free_inode_nonrcu(inode);
622}
623
624static const struct super_operations bpf_super_ops = {
625	.statfs		= simple_statfs,
626	.drop_inode	= generic_delete_inode,
627	.show_options	= bpf_show_options,
628	.free_inode	= bpf_free_inode,
629};
630
631enum {
632	OPT_MODE,
633};
634
635static const struct fs_parameter_spec bpf_fs_parameters[] = {
636	fsparam_u32oct	("mode",			OPT_MODE),
637	{}
638};
639
640struct bpf_mount_opts {
641	umode_t mode;
642};
643
644static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
645{
646	struct bpf_mount_opts *opts = fc->fs_private;
647	struct fs_parse_result result;
648	int opt;
649
650	opt = fs_parse(fc, bpf_fs_parameters, param, &result);
651	if (opt < 0) {
652		/* We might like to report bad mount options here, but
653		 * traditionally we've ignored all mount options, so we'd
654		 * better continue to ignore non-existing options for bpf.
655		 */
656		if (opt == -ENOPARAM) {
657			opt = vfs_parse_fs_param_source(fc, param);
658			if (opt != -ENOPARAM)
659				return opt;
660
661			return 0;
662		}
663
664		if (opt < 0)
665			return opt;
666	}
667
668	switch (opt) {
669	case OPT_MODE:
670		opts->mode = result.uint_32 & S_IALLUGO;
671		break;
672	}
673
674	return 0;
675}
676
677struct bpf_preload_ops *bpf_preload_ops;
678EXPORT_SYMBOL_GPL(bpf_preload_ops);
679
680static bool bpf_preload_mod_get(void)
681{
682	/* If bpf_preload.ko wasn't loaded earlier then load it now.
683	 * When bpf_preload is built into vmlinux the module's __init
684	 * function will populate it.
685	 */
686	if (!bpf_preload_ops) {
687		request_module("bpf_preload");
688		if (!bpf_preload_ops)
689			return false;
690	}
691	/* And grab the reference, so the module doesn't disappear while the
692	 * kernel is interacting with the kernel module and its UMD.
693	 */
694	if (!try_module_get(bpf_preload_ops->owner)) {
695		pr_err("bpf_preload module get failed.\n");
696		return false;
697	}
698	return true;
699}
700
701static void bpf_preload_mod_put(void)
702{
703	if (bpf_preload_ops)
704		/* now user can "rmmod bpf_preload" if necessary */
705		module_put(bpf_preload_ops->owner);
706}
707
708static DEFINE_MUTEX(bpf_preload_lock);
709
710static int populate_bpffs(struct dentry *parent)
711{
712	struct bpf_preload_info objs[BPF_PRELOAD_LINKS] = {};
713	int err = 0, i;
714
715	/* grab the mutex to make sure the kernel interactions with bpf_preload
716	 * are serialized
717	 */
718	mutex_lock(&bpf_preload_lock);
719
720	/* if bpf_preload.ko wasn't built into vmlinux then load it */
721	if (!bpf_preload_mod_get())
722		goto out;
723
724	err = bpf_preload_ops->preload(objs);
725	if (err)
726		goto out_put;
727	for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
728		bpf_link_inc(objs[i].link);
729		err = bpf_iter_link_pin_kernel(parent,
730					       objs[i].link_name, objs[i].link);
731		if (err) {
732			bpf_link_put(objs[i].link);
733			goto out_put;
734		}
735	}
736out_put:
737	bpf_preload_mod_put();
738out:
739	mutex_unlock(&bpf_preload_lock);
740	return err;
741}
742
743static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
744{
745	static const struct tree_descr bpf_rfiles[] = { { "" } };
746	struct bpf_mount_opts *opts = fc->fs_private;
747	struct inode *inode;
748	int ret;
749
750	ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles);
751	if (ret)
752		return ret;
753
754	sb->s_op = &bpf_super_ops;
755
756	inode = sb->s_root->d_inode;
757	inode->i_op = &bpf_dir_iops;
758	inode->i_mode &= ~S_IALLUGO;
759	populate_bpffs(sb->s_root);
760	inode->i_mode |= S_ISVTX | opts->mode;
 
761	return 0;
762}
763
764static int bpf_get_tree(struct fs_context *fc)
765{
766	return get_tree_nodev(fc, bpf_fill_super);
767}
768
769static void bpf_free_fc(struct fs_context *fc)
770{
771	kfree(fc->fs_private);
772}
773
774static const struct fs_context_operations bpf_context_ops = {
775	.free		= bpf_free_fc,
776	.parse_param	= bpf_parse_param,
777	.get_tree	= bpf_get_tree,
778};
779
780/*
781 * Set up the filesystem mount context.
782 */
783static int bpf_init_fs_context(struct fs_context *fc)
784{
785	struct bpf_mount_opts *opts;
786
787	opts = kzalloc(sizeof(struct bpf_mount_opts), GFP_KERNEL);
788	if (!opts)
789		return -ENOMEM;
790
791	opts->mode = S_IRWXUGO;
792
793	fc->fs_private = opts;
794	fc->ops = &bpf_context_ops;
795	return 0;
796}
797
798static struct file_system_type bpf_fs_type = {
799	.owner		= THIS_MODULE,
800	.name		= "bpf",
801	.init_fs_context = bpf_init_fs_context,
802	.parameters	= bpf_fs_parameters,
803	.kill_sb	= kill_litter_super,
804};
805
806static int __init bpf_init(void)
807{
808	int ret;
809
810	ret = sysfs_create_mount_point(fs_kobj, "bpf");
811	if (ret)
812		return ret;
813
814	ret = register_filesystem(&bpf_fs_type);
815	if (ret)
816		sysfs_remove_mount_point(fs_kobj, "bpf");
817
818	return ret;
819}
820fs_initcall(bpf_init);
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Minimal file system backend for holding eBPF maps and programs,
  4 * used by bpf(2) object pinning.
  5 *
  6 * Authors:
  7 *
  8 *	Daniel Borkmann <daniel@iogearbox.net>
  9 */
 10
 11#include <linux/init.h>
 12#include <linux/magic.h>
 13#include <linux/major.h>
 14#include <linux/mount.h>
 15#include <linux/namei.h>
 16#include <linux/fs.h>
 17#include <linux/fs_context.h>
 18#include <linux/fs_parser.h>
 19#include <linux/kdev_t.h>
 20#include <linux/filter.h>
 21#include <linux/bpf.h>
 22#include <linux/bpf_trace.h>
 
 23
 24enum bpf_type {
 25	BPF_TYPE_UNSPEC	= 0,
 26	BPF_TYPE_PROG,
 27	BPF_TYPE_MAP,
 28	BPF_TYPE_LINK,
 29};
 30
 31static void *bpf_any_get(void *raw, enum bpf_type type)
 32{
 33	switch (type) {
 34	case BPF_TYPE_PROG:
 35		bpf_prog_inc(raw);
 36		break;
 37	case BPF_TYPE_MAP:
 38		bpf_map_inc_with_uref(raw);
 39		break;
 40	case BPF_TYPE_LINK:
 41		bpf_link_inc(raw);
 42		break;
 43	default:
 44		WARN_ON_ONCE(1);
 45		break;
 46	}
 47
 48	return raw;
 49}
 50
 51static void bpf_any_put(void *raw, enum bpf_type type)
 52{
 53	switch (type) {
 54	case BPF_TYPE_PROG:
 55		bpf_prog_put(raw);
 56		break;
 57	case BPF_TYPE_MAP:
 58		bpf_map_put_with_uref(raw);
 59		break;
 60	case BPF_TYPE_LINK:
 61		bpf_link_put(raw);
 62		break;
 63	default:
 64		WARN_ON_ONCE(1);
 65		break;
 66	}
 67}
 68
 69static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type)
 70{
 71	void *raw;
 72
 73	raw = bpf_map_get_with_uref(ufd);
 74	if (!IS_ERR(raw)) {
 75		*type = BPF_TYPE_MAP;
 76		return raw;
 77	}
 78
 79	raw = bpf_prog_get(ufd);
 80	if (!IS_ERR(raw)) {
 81		*type = BPF_TYPE_PROG;
 82		return raw;
 83	}
 84
 85	raw = bpf_link_get_from_fd(ufd);
 86	if (!IS_ERR(raw)) {
 87		*type = BPF_TYPE_LINK;
 88		return raw;
 89	}
 90
 91	return ERR_PTR(-EINVAL);
 92}
 93
 94static const struct inode_operations bpf_dir_iops;
 95
 96static const struct inode_operations bpf_prog_iops = { };
 97static const struct inode_operations bpf_map_iops  = { };
 98static const struct inode_operations bpf_link_iops  = { };
 99
100static struct inode *bpf_get_inode(struct super_block *sb,
101				   const struct inode *dir,
102				   umode_t mode)
103{
104	struct inode *inode;
105
106	switch (mode & S_IFMT) {
107	case S_IFDIR:
108	case S_IFREG:
109	case S_IFLNK:
110		break;
111	default:
112		return ERR_PTR(-EINVAL);
113	}
114
115	inode = new_inode(sb);
116	if (!inode)
117		return ERR_PTR(-ENOSPC);
118
119	inode->i_ino = get_next_ino();
120	inode->i_atime = current_time(inode);
121	inode->i_mtime = inode->i_atime;
122	inode->i_ctime = inode->i_atime;
123
124	inode_init_owner(inode, dir, mode);
125
126	return inode;
127}
128
129static int bpf_inode_type(const struct inode *inode, enum bpf_type *type)
130{
131	*type = BPF_TYPE_UNSPEC;
132	if (inode->i_op == &bpf_prog_iops)
133		*type = BPF_TYPE_PROG;
134	else if (inode->i_op == &bpf_map_iops)
135		*type = BPF_TYPE_MAP;
136	else if (inode->i_op == &bpf_link_iops)
137		*type = BPF_TYPE_LINK;
138	else
139		return -EACCES;
140
141	return 0;
142}
143
144static void bpf_dentry_finalize(struct dentry *dentry, struct inode *inode,
145				struct inode *dir)
146{
147	d_instantiate(dentry, inode);
148	dget(dentry);
149
150	dir->i_mtime = current_time(dir);
151	dir->i_ctime = dir->i_mtime;
152}
153
154static int bpf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 
155{
156	struct inode *inode;
157
158	inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFDIR);
159	if (IS_ERR(inode))
160		return PTR_ERR(inode);
161
162	inode->i_op = &bpf_dir_iops;
163	inode->i_fop = &simple_dir_operations;
164
165	inc_nlink(inode);
166	inc_nlink(dir);
167
168	bpf_dentry_finalize(dentry, inode, dir);
169	return 0;
170}
171
172struct map_iter {
173	void *key;
174	bool done;
175};
176
177static struct map_iter *map_iter(struct seq_file *m)
178{
179	return m->private;
180}
181
182static struct bpf_map *seq_file_to_map(struct seq_file *m)
183{
184	return file_inode(m->file)->i_private;
185}
186
187static void map_iter_free(struct map_iter *iter)
188{
189	if (iter) {
190		kfree(iter->key);
191		kfree(iter);
192	}
193}
194
195static struct map_iter *map_iter_alloc(struct bpf_map *map)
196{
197	struct map_iter *iter;
198
199	iter = kzalloc(sizeof(*iter), GFP_KERNEL | __GFP_NOWARN);
200	if (!iter)
201		goto error;
202
203	iter->key = kzalloc(map->key_size, GFP_KERNEL | __GFP_NOWARN);
204	if (!iter->key)
205		goto error;
206
207	return iter;
208
209error:
210	map_iter_free(iter);
211	return NULL;
212}
213
214static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
215{
216	struct bpf_map *map = seq_file_to_map(m);
217	void *key = map_iter(m)->key;
218	void *prev_key;
219
220	(*pos)++;
221	if (map_iter(m)->done)
222		return NULL;
223
224	if (unlikely(v == SEQ_START_TOKEN))
225		prev_key = NULL;
226	else
227		prev_key = key;
228
229	rcu_read_lock();
230	if (map->ops->map_get_next_key(map, prev_key, key)) {
231		map_iter(m)->done = true;
232		key = NULL;
233	}
234	rcu_read_unlock();
235	return key;
236}
237
238static void *map_seq_start(struct seq_file *m, loff_t *pos)
239{
240	if (map_iter(m)->done)
241		return NULL;
242
243	return *pos ? map_iter(m)->key : SEQ_START_TOKEN;
244}
245
246static void map_seq_stop(struct seq_file *m, void *v)
247{
248}
249
250static int map_seq_show(struct seq_file *m, void *v)
251{
252	struct bpf_map *map = seq_file_to_map(m);
253	void *key = map_iter(m)->key;
254
255	if (unlikely(v == SEQ_START_TOKEN)) {
256		seq_puts(m, "# WARNING!! The output is for debug purpose only\n");
257		seq_puts(m, "# WARNING!! The output format will change\n");
258	} else {
259		map->ops->map_seq_show_elem(map, key, m);
260	}
261
262	return 0;
263}
264
265static const struct seq_operations bpffs_map_seq_ops = {
266	.start	= map_seq_start,
267	.next	= map_seq_next,
268	.show	= map_seq_show,
269	.stop	= map_seq_stop,
270};
271
272static int bpffs_map_open(struct inode *inode, struct file *file)
273{
274	struct bpf_map *map = inode->i_private;
275	struct map_iter *iter;
276	struct seq_file *m;
277	int err;
278
279	iter = map_iter_alloc(map);
280	if (!iter)
281		return -ENOMEM;
282
283	err = seq_open(file, &bpffs_map_seq_ops);
284	if (err) {
285		map_iter_free(iter);
286		return err;
287	}
288
289	m = file->private_data;
290	m->private = iter;
291
292	return 0;
293}
294
295static int bpffs_map_release(struct inode *inode, struct file *file)
296{
297	struct seq_file *m = file->private_data;
298
299	map_iter_free(map_iter(m));
300
301	return seq_release(inode, file);
302}
303
304/* bpffs_map_fops should only implement the basic
305 * read operation for a BPF map.  The purpose is to
306 * provide a simple user intuitive way to do
307 * "cat bpffs/pathto/a-pinned-map".
308 *
309 * Other operations (e.g. write, lookup...) should be realized by
310 * the userspace tools (e.g. bpftool) through the
311 * BPF_OBJ_GET_INFO_BY_FD and the map's lookup/update
312 * interface.
313 */
314static const struct file_operations bpffs_map_fops = {
315	.open		= bpffs_map_open,
316	.read		= seq_read,
317	.release	= bpffs_map_release,
318};
319
320static int bpffs_obj_open(struct inode *inode, struct file *file)
321{
322	return -EIO;
323}
324
325static const struct file_operations bpffs_obj_fops = {
326	.open		= bpffs_obj_open,
327};
328
329static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw,
330			 const struct inode_operations *iops,
331			 const struct file_operations *fops)
332{
333	struct inode *dir = dentry->d_parent->d_inode;
334	struct inode *inode = bpf_get_inode(dir->i_sb, dir, mode);
335	if (IS_ERR(inode))
336		return PTR_ERR(inode);
337
338	inode->i_op = iops;
339	inode->i_fop = fops;
340	inode->i_private = raw;
341
342	bpf_dentry_finalize(dentry, inode, dir);
343	return 0;
344}
345
346static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg)
347{
348	return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops,
349			     &bpffs_obj_fops);
350}
351
352static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
353{
354	struct bpf_map *map = arg;
355
356	return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops,
357			     bpf_map_support_seq_show(map) ?
358			     &bpffs_map_fops : &bpffs_obj_fops);
359}
360
361static int bpf_mklink(struct dentry *dentry, umode_t mode, void *arg)
362{
363	struct bpf_link *link = arg;
364
365	return bpf_mkobj_ops(dentry, mode, arg, &bpf_link_iops,
366			     bpf_link_is_iter(link) ?
367			     &bpf_iter_fops : &bpffs_obj_fops);
368}
369
370static struct dentry *
371bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags)
372{
373	/* Dots in names (e.g. "/sys/fs/bpf/foo.bar") are reserved for future
374	 * extensions.
375	 */
376	if (strchr(dentry->d_name.name, '.'))
 
377		return ERR_PTR(-EPERM);
378
379	return simple_lookup(dir, dentry, flags);
380}
381
382static int bpf_symlink(struct inode *dir, struct dentry *dentry,
383		       const char *target)
384{
385	char *link = kstrdup(target, GFP_USER | __GFP_NOWARN);
386	struct inode *inode;
387
388	if (!link)
389		return -ENOMEM;
390
391	inode = bpf_get_inode(dir->i_sb, dir, S_IRWXUGO | S_IFLNK);
392	if (IS_ERR(inode)) {
393		kfree(link);
394		return PTR_ERR(inode);
395	}
396
397	inode->i_op = &simple_symlink_inode_operations;
398	inode->i_link = link;
399
400	bpf_dentry_finalize(dentry, inode, dir);
401	return 0;
402}
403
404static const struct inode_operations bpf_dir_iops = {
405	.lookup		= bpf_lookup,
406	.mkdir		= bpf_mkdir,
407	.symlink	= bpf_symlink,
408	.rmdir		= simple_rmdir,
409	.rename		= simple_rename,
410	.link		= simple_link,
411	.unlink		= simple_unlink,
412};
413
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
414static int bpf_obj_do_pin(const char __user *pathname, void *raw,
415			  enum bpf_type type)
416{
417	struct dentry *dentry;
418	struct inode *dir;
419	struct path path;
420	umode_t mode;
421	int ret;
422
423	dentry = user_path_create(AT_FDCWD, pathname, &path, 0);
424	if (IS_ERR(dentry))
425		return PTR_ERR(dentry);
426
427	mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
428
429	ret = security_path_mknod(&path, dentry, mode, 0);
430	if (ret)
431		goto out;
432
433	dir = d_inode(path.dentry);
434	if (dir->i_op != &bpf_dir_iops) {
435		ret = -EPERM;
436		goto out;
437	}
438
439	switch (type) {
440	case BPF_TYPE_PROG:
441		ret = vfs_mkobj(dentry, mode, bpf_mkprog, raw);
442		break;
443	case BPF_TYPE_MAP:
444		ret = vfs_mkobj(dentry, mode, bpf_mkmap, raw);
445		break;
446	case BPF_TYPE_LINK:
447		ret = vfs_mkobj(dentry, mode, bpf_mklink, raw);
448		break;
449	default:
450		ret = -EPERM;
451	}
452out:
453	done_path_create(&path, dentry);
454	return ret;
455}
456
457int bpf_obj_pin_user(u32 ufd, const char __user *pathname)
458{
459	enum bpf_type type;
460	void *raw;
461	int ret;
462
463	raw = bpf_fd_probe_obj(ufd, &type);
464	if (IS_ERR(raw))
465		return PTR_ERR(raw);
466
467	ret = bpf_obj_do_pin(pathname, raw, type);
468	if (ret != 0)
469		bpf_any_put(raw, type);
470
471	return ret;
472}
473
474static void *bpf_obj_do_get(const char __user *pathname,
475			    enum bpf_type *type, int flags)
476{
477	struct inode *inode;
478	struct path path;
479	void *raw;
480	int ret;
481
482	ret = user_path_at(AT_FDCWD, pathname, LOOKUP_FOLLOW, &path);
483	if (ret)
484		return ERR_PTR(ret);
485
486	inode = d_backing_inode(path.dentry);
487	ret = inode_permission(inode, ACC_MODE(flags));
488	if (ret)
489		goto out;
490
491	ret = bpf_inode_type(inode, type);
492	if (ret)
493		goto out;
494
495	raw = bpf_any_get(inode->i_private, *type);
496	if (!IS_ERR(raw))
497		touch_atime(&path);
498
499	path_put(&path);
500	return raw;
501out:
502	path_put(&path);
503	return ERR_PTR(ret);
504}
505
506int bpf_obj_get_user(const char __user *pathname, int flags)
507{
508	enum bpf_type type = BPF_TYPE_UNSPEC;
509	int f_flags;
510	void *raw;
511	int ret;
512
513	f_flags = bpf_get_file_flag(flags);
514	if (f_flags < 0)
515		return f_flags;
516
517	raw = bpf_obj_do_get(pathname, &type, f_flags);
518	if (IS_ERR(raw))
519		return PTR_ERR(raw);
520
521	if (type == BPF_TYPE_PROG)
522		ret = bpf_prog_new_fd(raw);
523	else if (type == BPF_TYPE_MAP)
524		ret = bpf_map_new_fd(raw, f_flags);
525	else if (type == BPF_TYPE_LINK)
526		ret = bpf_link_new_fd(raw);
527	else
528		return -ENOENT;
529
530	if (ret < 0)
531		bpf_any_put(raw, type);
532	return ret;
533}
534
535static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
536{
537	struct bpf_prog *prog;
538	int ret = inode_permission(inode, MAY_READ);
539	if (ret)
540		return ERR_PTR(ret);
541
542	if (inode->i_op == &bpf_map_iops)
543		return ERR_PTR(-EINVAL);
544	if (inode->i_op == &bpf_link_iops)
545		return ERR_PTR(-EINVAL);
546	if (inode->i_op != &bpf_prog_iops)
547		return ERR_PTR(-EACCES);
548
549	prog = inode->i_private;
550
551	ret = security_bpf_prog(prog);
552	if (ret < 0)
553		return ERR_PTR(ret);
554
555	if (!bpf_prog_get_ok(prog, &type, false))
556		return ERR_PTR(-EINVAL);
557
558	bpf_prog_inc(prog);
559	return prog;
560}
561
562struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
563{
564	struct bpf_prog *prog;
565	struct path path;
566	int ret = kern_path(name, LOOKUP_FOLLOW, &path);
567	if (ret)
568		return ERR_PTR(ret);
569	prog = __get_prog_inode(d_backing_inode(path.dentry), type);
570	if (!IS_ERR(prog))
571		touch_atime(&path);
572	path_put(&path);
573	return prog;
574}
575EXPORT_SYMBOL(bpf_prog_get_type_path);
576
577/*
578 * Display the mount options in /proc/mounts.
579 */
580static int bpf_show_options(struct seq_file *m, struct dentry *root)
581{
582	umode_t mode = d_inode(root)->i_mode & S_IALLUGO & ~S_ISVTX;
583
584	if (mode != S_IRWXUGO)
585		seq_printf(m, ",mode=%o", mode);
586	return 0;
587}
588
589static void bpf_free_inode(struct inode *inode)
590{
591	enum bpf_type type;
592
593	if (S_ISLNK(inode->i_mode))
594		kfree(inode->i_link);
595	if (!bpf_inode_type(inode, &type))
596		bpf_any_put(inode->i_private, type);
597	free_inode_nonrcu(inode);
598}
599
600static const struct super_operations bpf_super_ops = {
601	.statfs		= simple_statfs,
602	.drop_inode	= generic_delete_inode,
603	.show_options	= bpf_show_options,
604	.free_inode	= bpf_free_inode,
605};
606
607enum {
608	OPT_MODE,
609};
610
611static const struct fs_parameter_spec bpf_fs_parameters[] = {
612	fsparam_u32oct	("mode",			OPT_MODE),
613	{}
614};
615
616struct bpf_mount_opts {
617	umode_t mode;
618};
619
620static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
621{
622	struct bpf_mount_opts *opts = fc->fs_private;
623	struct fs_parse_result result;
624	int opt;
625
626	opt = fs_parse(fc, bpf_fs_parameters, param, &result);
627	if (opt < 0)
628		/* We might like to report bad mount options here, but
629		 * traditionally we've ignored all mount options, so we'd
630		 * better continue to ignore non-existing options for bpf.
631		 */
632		return opt == -ENOPARAM ? 0 : opt;
 
 
 
 
 
 
 
 
 
 
633
634	switch (opt) {
635	case OPT_MODE:
636		opts->mode = result.uint_32 & S_IALLUGO;
637		break;
638	}
639
640	return 0;
641}
642
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
643static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
644{
645	static const struct tree_descr bpf_rfiles[] = { { "" } };
646	struct bpf_mount_opts *opts = fc->fs_private;
647	struct inode *inode;
648	int ret;
649
650	ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles);
651	if (ret)
652		return ret;
653
654	sb->s_op = &bpf_super_ops;
655
656	inode = sb->s_root->d_inode;
657	inode->i_op = &bpf_dir_iops;
658	inode->i_mode &= ~S_IALLUGO;
 
659	inode->i_mode |= S_ISVTX | opts->mode;
660
661	return 0;
662}
663
664static int bpf_get_tree(struct fs_context *fc)
665{
666	return get_tree_nodev(fc, bpf_fill_super);
667}
668
669static void bpf_free_fc(struct fs_context *fc)
670{
671	kfree(fc->fs_private);
672}
673
674static const struct fs_context_operations bpf_context_ops = {
675	.free		= bpf_free_fc,
676	.parse_param	= bpf_parse_param,
677	.get_tree	= bpf_get_tree,
678};
679
680/*
681 * Set up the filesystem mount context.
682 */
683static int bpf_init_fs_context(struct fs_context *fc)
684{
685	struct bpf_mount_opts *opts;
686
687	opts = kzalloc(sizeof(struct bpf_mount_opts), GFP_KERNEL);
688	if (!opts)
689		return -ENOMEM;
690
691	opts->mode = S_IRWXUGO;
692
693	fc->fs_private = opts;
694	fc->ops = &bpf_context_ops;
695	return 0;
696}
697
698static struct file_system_type bpf_fs_type = {
699	.owner		= THIS_MODULE,
700	.name		= "bpf",
701	.init_fs_context = bpf_init_fs_context,
702	.parameters	= bpf_fs_parameters,
703	.kill_sb	= kill_litter_super,
704};
705
706static int __init bpf_init(void)
707{
708	int ret;
709
710	ret = sysfs_create_mount_point(fs_kobj, "bpf");
711	if (ret)
712		return ret;
713
714	ret = register_filesystem(&bpf_fs_type);
715	if (ret)
716		sysfs_remove_mount_point(fs_kobj, "bpf");
717
718	return ret;
719}
720fs_initcall(bpf_init);