Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Minimal file system backend for holding eBPF maps and programs,
  4 * used by bpf(2) object pinning.
  5 *
  6 * Authors:
  7 *
  8 *	Daniel Borkmann <daniel@iogearbox.net>
  9 */
 10
 11#include <linux/init.h>
 12#include <linux/magic.h>
 13#include <linux/major.h>
 14#include <linux/mount.h>
 15#include <linux/namei.h>
 16#include <linux/fs.h>
 17#include <linux/fs_context.h>
 18#include <linux/fs_parser.h>
 19#include <linux/kdev_t.h>
 20#include <linux/filter.h>
 21#include <linux/bpf.h>
 22#include <linux/bpf_trace.h>
 23#include "preload/bpf_preload.h"
 24
 25enum bpf_type {
 26	BPF_TYPE_UNSPEC	= 0,
 27	BPF_TYPE_PROG,
 28	BPF_TYPE_MAP,
 29	BPF_TYPE_LINK,
 30};
 31
 32static void *bpf_any_get(void *raw, enum bpf_type type)
 33{
 34	switch (type) {
 35	case BPF_TYPE_PROG:
 36		bpf_prog_inc(raw);
 37		break;
 38	case BPF_TYPE_MAP:
 39		bpf_map_inc_with_uref(raw);
 40		break;
 41	case BPF_TYPE_LINK:
 42		bpf_link_inc(raw);
 43		break;
 44	default:
 45		WARN_ON_ONCE(1);
 46		break;
 47	}
 48
 49	return raw;
 50}
 51
 52static void bpf_any_put(void *raw, enum bpf_type type)
 53{
 54	switch (type) {
 55	case BPF_TYPE_PROG:
 56		bpf_prog_put(raw);
 57		break;
 58	case BPF_TYPE_MAP:
 59		bpf_map_put_with_uref(raw);
 60		break;
 61	case BPF_TYPE_LINK:
 62		bpf_link_put(raw);
 63		break;
 64	default:
 65		WARN_ON_ONCE(1);
 66		break;
 67	}
 68}
 69
 70static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type)
 71{
 72	void *raw;
 73
 74	raw = bpf_map_get_with_uref(ufd);
 75	if (!IS_ERR(raw)) {
 76		*type = BPF_TYPE_MAP;
 77		return raw;
 78	}
 79
 80	raw = bpf_prog_get(ufd);
 81	if (!IS_ERR(raw)) {
 82		*type = BPF_TYPE_PROG;
 83		return raw;
 84	}
 85
 86	raw = bpf_link_get_from_fd(ufd);
 87	if (!IS_ERR(raw)) {
 88		*type = BPF_TYPE_LINK;
 89		return raw;
 90	}
 91
 92	return ERR_PTR(-EINVAL);
 93}
 94
 95static const struct inode_operations bpf_dir_iops;
 96
 97static const struct inode_operations bpf_prog_iops = { };
 98static const struct inode_operations bpf_map_iops  = { };
 99static const struct inode_operations bpf_link_iops  = { };
100
101static struct inode *bpf_get_inode(struct super_block *sb,
102				   const struct inode *dir,
103				   umode_t mode)
104{
105	struct inode *inode;
106
107	switch (mode & S_IFMT) {
108	case S_IFDIR:
109	case S_IFREG:
110	case S_IFLNK:
111		break;
112	default:
113		return ERR_PTR(-EINVAL);
114	}
115
116	inode = new_inode(sb);
117	if (!inode)
118		return ERR_PTR(-ENOSPC);
119
120	inode->i_ino = get_next_ino();
121	inode->i_atime = current_time(inode);
122	inode->i_mtime = inode->i_atime;
123	inode->i_ctime = inode->i_atime;
124
125	inode_init_owner(&init_user_ns, inode, dir, mode);
126
127	return inode;
128}
129
130static int bpf_inode_type(const struct inode *inode, enum bpf_type *type)
131{
132	*type = BPF_TYPE_UNSPEC;
133	if (inode->i_op == &bpf_prog_iops)
134		*type = BPF_TYPE_PROG;
135	else if (inode->i_op == &bpf_map_iops)
136		*type = BPF_TYPE_MAP;
137	else if (inode->i_op == &bpf_link_iops)
138		*type = BPF_TYPE_LINK;
139	else
140		return -EACCES;
141
142	return 0;
143}
144
145static void bpf_dentry_finalize(struct dentry *dentry, struct inode *inode,
146				struct inode *dir)
147{
148	d_instantiate(dentry, inode);
149	dget(dentry);
150
151	dir->i_mtime = current_time(dir);
152	dir->i_ctime = dir->i_mtime;
153}
154
155static int bpf_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
156		     struct dentry *dentry, umode_t mode)
157{
158	struct inode *inode;
159
160	inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFDIR);
161	if (IS_ERR(inode))
162		return PTR_ERR(inode);
163
164	inode->i_op = &bpf_dir_iops;
165	inode->i_fop = &simple_dir_operations;
166
167	inc_nlink(inode);
168	inc_nlink(dir);
169
170	bpf_dentry_finalize(dentry, inode, dir);
171	return 0;
172}
173
174struct map_iter {
175	void *key;
176	bool done;
177};
178
179static struct map_iter *map_iter(struct seq_file *m)
180{
181	return m->private;
182}
183
184static struct bpf_map *seq_file_to_map(struct seq_file *m)
185{
186	return file_inode(m->file)->i_private;
187}
188
189static void map_iter_free(struct map_iter *iter)
190{
191	if (iter) {
192		kfree(iter->key);
193		kfree(iter);
194	}
195}
196
197static struct map_iter *map_iter_alloc(struct bpf_map *map)
198{
199	struct map_iter *iter;
200
201	iter = kzalloc(sizeof(*iter), GFP_KERNEL | __GFP_NOWARN);
202	if (!iter)
203		goto error;
204
205	iter->key = kzalloc(map->key_size, GFP_KERNEL | __GFP_NOWARN);
206	if (!iter->key)
207		goto error;
208
209	return iter;
210
211error:
212	map_iter_free(iter);
213	return NULL;
214}
215
216static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
217{
218	struct bpf_map *map = seq_file_to_map(m);
219	void *key = map_iter(m)->key;
220	void *prev_key;
221
222	(*pos)++;
223	if (map_iter(m)->done)
224		return NULL;
225
226	if (unlikely(v == SEQ_START_TOKEN))
227		prev_key = NULL;
228	else
229		prev_key = key;
230
231	rcu_read_lock();
232	if (map->ops->map_get_next_key(map, prev_key, key)) {
233		map_iter(m)->done = true;
234		key = NULL;
235	}
236	rcu_read_unlock();
237	return key;
238}
239
240static void *map_seq_start(struct seq_file *m, loff_t *pos)
241{
242	if (map_iter(m)->done)
243		return NULL;
244
245	return *pos ? map_iter(m)->key : SEQ_START_TOKEN;
246}
247
248static void map_seq_stop(struct seq_file *m, void *v)
249{
250}
251
252static int map_seq_show(struct seq_file *m, void *v)
253{
254	struct bpf_map *map = seq_file_to_map(m);
255	void *key = map_iter(m)->key;
256
257	if (unlikely(v == SEQ_START_TOKEN)) {
258		seq_puts(m, "# WARNING!! The output is for debug purpose only\n");
259		seq_puts(m, "# WARNING!! The output format will change\n");
260	} else {
261		map->ops->map_seq_show_elem(map, key, m);
262	}
263
264	return 0;
265}
266
267static const struct seq_operations bpffs_map_seq_ops = {
268	.start	= map_seq_start,
269	.next	= map_seq_next,
270	.show	= map_seq_show,
271	.stop	= map_seq_stop,
272};
273
274static int bpffs_map_open(struct inode *inode, struct file *file)
275{
276	struct bpf_map *map = inode->i_private;
277	struct map_iter *iter;
278	struct seq_file *m;
279	int err;
280
281	iter = map_iter_alloc(map);
282	if (!iter)
283		return -ENOMEM;
284
285	err = seq_open(file, &bpffs_map_seq_ops);
286	if (err) {
287		map_iter_free(iter);
288		return err;
289	}
290
291	m = file->private_data;
292	m->private = iter;
293
294	return 0;
295}
296
297static int bpffs_map_release(struct inode *inode, struct file *file)
298{
299	struct seq_file *m = file->private_data;
300
301	map_iter_free(map_iter(m));
302
303	return seq_release(inode, file);
304}
305
306/* bpffs_map_fops should only implement the basic
307 * read operation for a BPF map.  The purpose is to
308 * provide a simple user intuitive way to do
309 * "cat bpffs/pathto/a-pinned-map".
310 *
311 * Other operations (e.g. write, lookup...) should be realized by
312 * the userspace tools (e.g. bpftool) through the
313 * BPF_OBJ_GET_INFO_BY_FD and the map's lookup/update
314 * interface.
315 */
316static const struct file_operations bpffs_map_fops = {
317	.open		= bpffs_map_open,
318	.read		= seq_read,
319	.release	= bpffs_map_release,
320};
321
322static int bpffs_obj_open(struct inode *inode, struct file *file)
323{
324	return -EIO;
325}
326
327static const struct file_operations bpffs_obj_fops = {
328	.open		= bpffs_obj_open,
329};
330
331static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw,
332			 const struct inode_operations *iops,
333			 const struct file_operations *fops)
334{
335	struct inode *dir = dentry->d_parent->d_inode;
336	struct inode *inode = bpf_get_inode(dir->i_sb, dir, mode);
337	if (IS_ERR(inode))
338		return PTR_ERR(inode);
339
340	inode->i_op = iops;
341	inode->i_fop = fops;
342	inode->i_private = raw;
343
344	bpf_dentry_finalize(dentry, inode, dir);
345	return 0;
346}
347
348static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg)
349{
350	return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops,
351			     &bpffs_obj_fops);
352}
353
354static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
355{
356	struct bpf_map *map = arg;
357
358	return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops,
359			     bpf_map_support_seq_show(map) ?
360			     &bpffs_map_fops : &bpffs_obj_fops);
361}
362
363static int bpf_mklink(struct dentry *dentry, umode_t mode, void *arg)
364{
365	struct bpf_link *link = arg;
366
367	return bpf_mkobj_ops(dentry, mode, arg, &bpf_link_iops,
368			     bpf_link_is_iter(link) ?
369			     &bpf_iter_fops : &bpffs_obj_fops);
370}
371
372static struct dentry *
373bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags)
374{
375	/* Dots in names (e.g. "/sys/fs/bpf/foo.bar") are reserved for future
376	 * extensions. That allows popoulate_bpffs() create special files.
377	 */
378	if ((dir->i_mode & S_IALLUGO) &&
379	    strchr(dentry->d_name.name, '.'))
380		return ERR_PTR(-EPERM);
381
382	return simple_lookup(dir, dentry, flags);
383}
384
385static int bpf_symlink(struct user_namespace *mnt_userns, struct inode *dir,
386		       struct dentry *dentry, const char *target)
387{
388	char *link = kstrdup(target, GFP_USER | __GFP_NOWARN);
389	struct inode *inode;
390
391	if (!link)
392		return -ENOMEM;
393
394	inode = bpf_get_inode(dir->i_sb, dir, S_IRWXUGO | S_IFLNK);
395	if (IS_ERR(inode)) {
396		kfree(link);
397		return PTR_ERR(inode);
398	}
399
400	inode->i_op = &simple_symlink_inode_operations;
401	inode->i_link = link;
402
403	bpf_dentry_finalize(dentry, inode, dir);
404	return 0;
405}
406
407static const struct inode_operations bpf_dir_iops = {
408	.lookup		= bpf_lookup,
409	.mkdir		= bpf_mkdir,
410	.symlink	= bpf_symlink,
411	.rmdir		= simple_rmdir,
412	.rename		= simple_rename,
413	.link		= simple_link,
414	.unlink		= simple_unlink,
415};
416
417/* pin iterator link into bpffs */
418static int bpf_iter_link_pin_kernel(struct dentry *parent,
419				    const char *name, struct bpf_link *link)
420{
421	umode_t mode = S_IFREG | S_IRUSR;
422	struct dentry *dentry;
423	int ret;
424
425	inode_lock(parent->d_inode);
426	dentry = lookup_one_len(name, parent, strlen(name));
427	if (IS_ERR(dentry)) {
428		inode_unlock(parent->d_inode);
429		return PTR_ERR(dentry);
430	}
431	ret = bpf_mkobj_ops(dentry, mode, link, &bpf_link_iops,
432			    &bpf_iter_fops);
433	dput(dentry);
434	inode_unlock(parent->d_inode);
435	return ret;
436}
437
438static int bpf_obj_do_pin(const char __user *pathname, void *raw,
439			  enum bpf_type type)
440{
441	struct dentry *dentry;
442	struct inode *dir;
443	struct path path;
444	umode_t mode;
445	int ret;
446
447	dentry = user_path_create(AT_FDCWD, pathname, &path, 0);
448	if (IS_ERR(dentry))
449		return PTR_ERR(dentry);
450
451	mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
452
453	ret = security_path_mknod(&path, dentry, mode, 0);
454	if (ret)
455		goto out;
456
457	dir = d_inode(path.dentry);
458	if (dir->i_op != &bpf_dir_iops) {
459		ret = -EPERM;
460		goto out;
461	}
462
 
 
 
 
 
463	switch (type) {
464	case BPF_TYPE_PROG:
465		ret = vfs_mkobj(dentry, mode, bpf_mkprog, raw);
466		break;
467	case BPF_TYPE_MAP:
468		ret = vfs_mkobj(dentry, mode, bpf_mkmap, raw);
469		break;
470	case BPF_TYPE_LINK:
471		ret = vfs_mkobj(dentry, mode, bpf_mklink, raw);
472		break;
473	default:
474		ret = -EPERM;
475	}
476out:
477	done_path_create(&path, dentry);
478	return ret;
479}
480
481int bpf_obj_pin_user(u32 ufd, const char __user *pathname)
482{
483	enum bpf_type type;
484	void *raw;
485	int ret;
486
487	raw = bpf_fd_probe_obj(ufd, &type);
488	if (IS_ERR(raw))
489		return PTR_ERR(raw);
490
491	ret = bpf_obj_do_pin(pathname, raw, type);
492	if (ret != 0)
493		bpf_any_put(raw, type);
494
495	return ret;
496}
497
498static void *bpf_obj_do_get(const char __user *pathname,
499			    enum bpf_type *type, int flags)
500{
501	struct inode *inode;
502	struct path path;
503	void *raw;
504	int ret;
505
506	ret = user_path_at(AT_FDCWD, pathname, LOOKUP_FOLLOW, &path);
507	if (ret)
508		return ERR_PTR(ret);
509
510	inode = d_backing_inode(path.dentry);
511	ret = path_permission(&path, ACC_MODE(flags));
512	if (ret)
513		goto out;
514
515	ret = bpf_inode_type(inode, type);
516	if (ret)
517		goto out;
518
519	raw = bpf_any_get(inode->i_private, *type);
520	if (!IS_ERR(raw))
521		touch_atime(&path);
522
523	path_put(&path);
524	return raw;
525out:
526	path_put(&path);
527	return ERR_PTR(ret);
528}
529
530int bpf_obj_get_user(const char __user *pathname, int flags)
531{
532	enum bpf_type type = BPF_TYPE_UNSPEC;
533	int f_flags;
534	void *raw;
535	int ret;
536
537	f_flags = bpf_get_file_flag(flags);
538	if (f_flags < 0)
539		return f_flags;
540
541	raw = bpf_obj_do_get(pathname, &type, f_flags);
542	if (IS_ERR(raw))
543		return PTR_ERR(raw);
544
545	if (type == BPF_TYPE_PROG)
546		ret = bpf_prog_new_fd(raw);
547	else if (type == BPF_TYPE_MAP)
548		ret = bpf_map_new_fd(raw, f_flags);
549	else if (type == BPF_TYPE_LINK)
550		ret = (f_flags != O_RDWR) ? -EINVAL : bpf_link_new_fd(raw);
551	else
552		return -ENOENT;
553
554	if (ret < 0)
555		bpf_any_put(raw, type);
556	return ret;
557}
558
559static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
560{
561	struct bpf_prog *prog;
562	int ret = inode_permission(&init_user_ns, inode, MAY_READ);
563	if (ret)
564		return ERR_PTR(ret);
565
566	if (inode->i_op == &bpf_map_iops)
567		return ERR_PTR(-EINVAL);
568	if (inode->i_op == &bpf_link_iops)
569		return ERR_PTR(-EINVAL);
570	if (inode->i_op != &bpf_prog_iops)
571		return ERR_PTR(-EACCES);
572
573	prog = inode->i_private;
574
575	ret = security_bpf_prog(prog);
576	if (ret < 0)
577		return ERR_PTR(ret);
578
579	if (!bpf_prog_get_ok(prog, &type, false))
580		return ERR_PTR(-EINVAL);
581
582	bpf_prog_inc(prog);
583	return prog;
584}
585
586struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
587{
588	struct bpf_prog *prog;
589	struct path path;
590	int ret = kern_path(name, LOOKUP_FOLLOW, &path);
591	if (ret)
592		return ERR_PTR(ret);
593	prog = __get_prog_inode(d_backing_inode(path.dentry), type);
594	if (!IS_ERR(prog))
595		touch_atime(&path);
596	path_put(&path);
597	return prog;
598}
599EXPORT_SYMBOL(bpf_prog_get_type_path);
600
601/*
602 * Display the mount options in /proc/mounts.
603 */
604static int bpf_show_options(struct seq_file *m, struct dentry *root)
605{
606	umode_t mode = d_inode(root)->i_mode & S_IALLUGO & ~S_ISVTX;
 
607
 
 
 
 
 
 
608	if (mode != S_IRWXUGO)
609		seq_printf(m, ",mode=%o", mode);
610	return 0;
611}
612
613static void bpf_free_inode(struct inode *inode)
614{
615	enum bpf_type type;
616
617	if (S_ISLNK(inode->i_mode))
618		kfree(inode->i_link);
619	if (!bpf_inode_type(inode, &type))
620		bpf_any_put(inode->i_private, type);
621	free_inode_nonrcu(inode);
622}
623
624static const struct super_operations bpf_super_ops = {
625	.statfs		= simple_statfs,
626	.drop_inode	= generic_delete_inode,
627	.show_options	= bpf_show_options,
628	.free_inode	= bpf_free_inode,
629};
630
631enum {
 
 
632	OPT_MODE,
633};
634
635static const struct fs_parameter_spec bpf_fs_parameters[] = {
 
 
636	fsparam_u32oct	("mode",			OPT_MODE),
637	{}
638};
639
640struct bpf_mount_opts {
 
 
641	umode_t mode;
642};
643
644static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
645{
646	struct bpf_mount_opts *opts = fc->fs_private;
647	struct fs_parse_result result;
 
 
648	int opt;
649
650	opt = fs_parse(fc, bpf_fs_parameters, param, &result);
651	if (opt < 0) {
652		/* We might like to report bad mount options here, but
653		 * traditionally we've ignored all mount options, so we'd
654		 * better continue to ignore non-existing options for bpf.
655		 */
656		if (opt == -ENOPARAM) {
657			opt = vfs_parse_fs_param_source(fc, param);
658			if (opt != -ENOPARAM)
659				return opt;
660
661			return 0;
662		}
663
664		if (opt < 0)
665			return opt;
666	}
667
668	switch (opt) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
669	case OPT_MODE:
670		opts->mode = result.uint_32 & S_IALLUGO;
671		break;
672	}
673
674	return 0;
 
 
675}
676
677struct bpf_preload_ops *bpf_preload_ops;
678EXPORT_SYMBOL_GPL(bpf_preload_ops);
679
680static bool bpf_preload_mod_get(void)
681{
682	/* If bpf_preload.ko wasn't loaded earlier then load it now.
683	 * When bpf_preload is built into vmlinux the module's __init
684	 * function will populate it.
685	 */
686	if (!bpf_preload_ops) {
687		request_module("bpf_preload");
688		if (!bpf_preload_ops)
689			return false;
690	}
691	/* And grab the reference, so the module doesn't disappear while the
692	 * kernel is interacting with the kernel module and its UMD.
693	 */
694	if (!try_module_get(bpf_preload_ops->owner)) {
695		pr_err("bpf_preload module get failed.\n");
696		return false;
697	}
698	return true;
699}
700
701static void bpf_preload_mod_put(void)
702{
703	if (bpf_preload_ops)
704		/* now user can "rmmod bpf_preload" if necessary */
705		module_put(bpf_preload_ops->owner);
706}
707
708static DEFINE_MUTEX(bpf_preload_lock);
709
710static int populate_bpffs(struct dentry *parent)
711{
712	struct bpf_preload_info objs[BPF_PRELOAD_LINKS] = {};
713	int err = 0, i;
714
715	/* grab the mutex to make sure the kernel interactions with bpf_preload
716	 * are serialized
717	 */
718	mutex_lock(&bpf_preload_lock);
719
720	/* if bpf_preload.ko wasn't built into vmlinux then load it */
721	if (!bpf_preload_mod_get())
722		goto out;
723
724	err = bpf_preload_ops->preload(objs);
725	if (err)
726		goto out_put;
727	for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
728		bpf_link_inc(objs[i].link);
729		err = bpf_iter_link_pin_kernel(parent,
730					       objs[i].link_name, objs[i].link);
731		if (err) {
732			bpf_link_put(objs[i].link);
733			goto out_put;
734		}
735	}
736out_put:
737	bpf_preload_mod_put();
738out:
739	mutex_unlock(&bpf_preload_lock);
740	return err;
741}
742
743static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
744{
745	static const struct tree_descr bpf_rfiles[] = { { "" } };
746	struct bpf_mount_opts *opts = fc->fs_private;
747	struct inode *inode;
748	int ret;
749
750	ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles);
751	if (ret)
752		return ret;
753
754	sb->s_op = &bpf_super_ops;
755
756	inode = sb->s_root->d_inode;
 
 
757	inode->i_op = &bpf_dir_iops;
758	inode->i_mode &= ~S_IALLUGO;
759	populate_bpffs(sb->s_root);
760	inode->i_mode |= S_ISVTX | opts->mode;
761	return 0;
762}
763
764static int bpf_get_tree(struct fs_context *fc)
765{
766	return get_tree_nodev(fc, bpf_fill_super);
767}
768
769static void bpf_free_fc(struct fs_context *fc)
770{
771	kfree(fc->fs_private);
772}
773
774static const struct fs_context_operations bpf_context_ops = {
775	.free		= bpf_free_fc,
776	.parse_param	= bpf_parse_param,
777	.get_tree	= bpf_get_tree,
778};
779
780/*
781 * Set up the filesystem mount context.
782 */
783static int bpf_init_fs_context(struct fs_context *fc)
784{
785	struct bpf_mount_opts *opts;
786
787	opts = kzalloc(sizeof(struct bpf_mount_opts), GFP_KERNEL);
788	if (!opts)
789		return -ENOMEM;
790
791	opts->mode = S_IRWXUGO;
 
 
792
793	fc->fs_private = opts;
794	fc->ops = &bpf_context_ops;
795	return 0;
796}
797
798static struct file_system_type bpf_fs_type = {
799	.owner		= THIS_MODULE,
800	.name		= "bpf",
801	.init_fs_context = bpf_init_fs_context,
802	.parameters	= bpf_fs_parameters,
803	.kill_sb	= kill_litter_super,
804};
805
806static int __init bpf_init(void)
807{
808	int ret;
809
810	ret = sysfs_create_mount_point(fs_kobj, "bpf");
811	if (ret)
812		return ret;
813
814	ret = register_filesystem(&bpf_fs_type);
815	if (ret)
816		sysfs_remove_mount_point(fs_kobj, "bpf");
817
818	return ret;
819}
820fs_initcall(bpf_init);
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Minimal file system backend for holding eBPF maps and programs,
  4 * used by bpf(2) object pinning.
  5 *
  6 * Authors:
  7 *
  8 *	Daniel Borkmann <daniel@iogearbox.net>
  9 */
 10
 11#include <linux/init.h>
 12#include <linux/magic.h>
 13#include <linux/major.h>
 14#include <linux/mount.h>
 15#include <linux/namei.h>
 16#include <linux/fs.h>
 17#include <linux/fs_context.h>
 18#include <linux/fs_parser.h>
 19#include <linux/kdev_t.h>
 20#include <linux/filter.h>
 21#include <linux/bpf.h>
 22#include <linux/bpf_trace.h>
 23#include "preload/bpf_preload.h"
 24
 25enum bpf_type {
 26	BPF_TYPE_UNSPEC	= 0,
 27	BPF_TYPE_PROG,
 28	BPF_TYPE_MAP,
 29	BPF_TYPE_LINK,
 30};
 31
 32static void *bpf_any_get(void *raw, enum bpf_type type)
 33{
 34	switch (type) {
 35	case BPF_TYPE_PROG:
 36		bpf_prog_inc(raw);
 37		break;
 38	case BPF_TYPE_MAP:
 39		bpf_map_inc_with_uref(raw);
 40		break;
 41	case BPF_TYPE_LINK:
 42		bpf_link_inc(raw);
 43		break;
 44	default:
 45		WARN_ON_ONCE(1);
 46		break;
 47	}
 48
 49	return raw;
 50}
 51
 52static void bpf_any_put(void *raw, enum bpf_type type)
 53{
 54	switch (type) {
 55	case BPF_TYPE_PROG:
 56		bpf_prog_put(raw);
 57		break;
 58	case BPF_TYPE_MAP:
 59		bpf_map_put_with_uref(raw);
 60		break;
 61	case BPF_TYPE_LINK:
 62		bpf_link_put(raw);
 63		break;
 64	default:
 65		WARN_ON_ONCE(1);
 66		break;
 67	}
 68}
 69
 70static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type)
 71{
 72	void *raw;
 73
 74	raw = bpf_map_get_with_uref(ufd);
 75	if (!IS_ERR(raw)) {
 76		*type = BPF_TYPE_MAP;
 77		return raw;
 78	}
 79
 80	raw = bpf_prog_get(ufd);
 81	if (!IS_ERR(raw)) {
 82		*type = BPF_TYPE_PROG;
 83		return raw;
 84	}
 85
 86	raw = bpf_link_get_from_fd(ufd);
 87	if (!IS_ERR(raw)) {
 88		*type = BPF_TYPE_LINK;
 89		return raw;
 90	}
 91
 92	return ERR_PTR(-EINVAL);
 93}
 94
 95static const struct inode_operations bpf_dir_iops;
 96
 97static const struct inode_operations bpf_prog_iops = { };
 98static const struct inode_operations bpf_map_iops  = { };
 99static const struct inode_operations bpf_link_iops  = { };
100
101static struct inode *bpf_get_inode(struct super_block *sb,
102				   const struct inode *dir,
103				   umode_t mode)
104{
105	struct inode *inode;
106
107	switch (mode & S_IFMT) {
108	case S_IFDIR:
109	case S_IFREG:
110	case S_IFLNK:
111		break;
112	default:
113		return ERR_PTR(-EINVAL);
114	}
115
116	inode = new_inode(sb);
117	if (!inode)
118		return ERR_PTR(-ENOSPC);
119
120	inode->i_ino = get_next_ino();
121	simple_inode_init_ts(inode);
 
 
122
123	inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
124
125	return inode;
126}
127
128static int bpf_inode_type(const struct inode *inode, enum bpf_type *type)
129{
130	*type = BPF_TYPE_UNSPEC;
131	if (inode->i_op == &bpf_prog_iops)
132		*type = BPF_TYPE_PROG;
133	else if (inode->i_op == &bpf_map_iops)
134		*type = BPF_TYPE_MAP;
135	else if (inode->i_op == &bpf_link_iops)
136		*type = BPF_TYPE_LINK;
137	else
138		return -EACCES;
139
140	return 0;
141}
142
143static void bpf_dentry_finalize(struct dentry *dentry, struct inode *inode,
144				struct inode *dir)
145{
146	d_instantiate(dentry, inode);
147	dget(dentry);
148
149	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
 
150}
151
152static int bpf_mkdir(struct mnt_idmap *idmap, struct inode *dir,
153		     struct dentry *dentry, umode_t mode)
154{
155	struct inode *inode;
156
157	inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFDIR);
158	if (IS_ERR(inode))
159		return PTR_ERR(inode);
160
161	inode->i_op = &bpf_dir_iops;
162	inode->i_fop = &simple_dir_operations;
163
164	inc_nlink(inode);
165	inc_nlink(dir);
166
167	bpf_dentry_finalize(dentry, inode, dir);
168	return 0;
169}
170
171struct map_iter {
172	void *key;
173	bool done;
174};
175
176static struct map_iter *map_iter(struct seq_file *m)
177{
178	return m->private;
179}
180
181static struct bpf_map *seq_file_to_map(struct seq_file *m)
182{
183	return file_inode(m->file)->i_private;
184}
185
186static void map_iter_free(struct map_iter *iter)
187{
188	if (iter) {
189		kfree(iter->key);
190		kfree(iter);
191	}
192}
193
194static struct map_iter *map_iter_alloc(struct bpf_map *map)
195{
196	struct map_iter *iter;
197
198	iter = kzalloc(sizeof(*iter), GFP_KERNEL | __GFP_NOWARN);
199	if (!iter)
200		goto error;
201
202	iter->key = kzalloc(map->key_size, GFP_KERNEL | __GFP_NOWARN);
203	if (!iter->key)
204		goto error;
205
206	return iter;
207
208error:
209	map_iter_free(iter);
210	return NULL;
211}
212
213static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
214{
215	struct bpf_map *map = seq_file_to_map(m);
216	void *key = map_iter(m)->key;
217	void *prev_key;
218
219	(*pos)++;
220	if (map_iter(m)->done)
221		return NULL;
222
223	if (unlikely(v == SEQ_START_TOKEN))
224		prev_key = NULL;
225	else
226		prev_key = key;
227
228	rcu_read_lock();
229	if (map->ops->map_get_next_key(map, prev_key, key)) {
230		map_iter(m)->done = true;
231		key = NULL;
232	}
233	rcu_read_unlock();
234	return key;
235}
236
237static void *map_seq_start(struct seq_file *m, loff_t *pos)
238{
239	if (map_iter(m)->done)
240		return NULL;
241
242	return *pos ? map_iter(m)->key : SEQ_START_TOKEN;
243}
244
245static void map_seq_stop(struct seq_file *m, void *v)
246{
247}
248
249static int map_seq_show(struct seq_file *m, void *v)
250{
251	struct bpf_map *map = seq_file_to_map(m);
252	void *key = map_iter(m)->key;
253
254	if (unlikely(v == SEQ_START_TOKEN)) {
255		seq_puts(m, "# WARNING!! The output is for debug purpose only\n");
256		seq_puts(m, "# WARNING!! The output format will change\n");
257	} else {
258		map->ops->map_seq_show_elem(map, key, m);
259	}
260
261	return 0;
262}
263
264static const struct seq_operations bpffs_map_seq_ops = {
265	.start	= map_seq_start,
266	.next	= map_seq_next,
267	.show	= map_seq_show,
268	.stop	= map_seq_stop,
269};
270
271static int bpffs_map_open(struct inode *inode, struct file *file)
272{
273	struct bpf_map *map = inode->i_private;
274	struct map_iter *iter;
275	struct seq_file *m;
276	int err;
277
278	iter = map_iter_alloc(map);
279	if (!iter)
280		return -ENOMEM;
281
282	err = seq_open(file, &bpffs_map_seq_ops);
283	if (err) {
284		map_iter_free(iter);
285		return err;
286	}
287
288	m = file->private_data;
289	m->private = iter;
290
291	return 0;
292}
293
294static int bpffs_map_release(struct inode *inode, struct file *file)
295{
296	struct seq_file *m = file->private_data;
297
298	map_iter_free(map_iter(m));
299
300	return seq_release(inode, file);
301}
302
303/* bpffs_map_fops should only implement the basic
304 * read operation for a BPF map.  The purpose is to
305 * provide a simple user intuitive way to do
306 * "cat bpffs/pathto/a-pinned-map".
307 *
308 * Other operations (e.g. write, lookup...) should be realized by
309 * the userspace tools (e.g. bpftool) through the
310 * BPF_OBJ_GET_INFO_BY_FD and the map's lookup/update
311 * interface.
312 */
313static const struct file_operations bpffs_map_fops = {
314	.open		= bpffs_map_open,
315	.read		= seq_read,
316	.release	= bpffs_map_release,
317};
318
319static int bpffs_obj_open(struct inode *inode, struct file *file)
320{
321	return -EIO;
322}
323
324static const struct file_operations bpffs_obj_fops = {
325	.open		= bpffs_obj_open,
326};
327
328static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw,
329			 const struct inode_operations *iops,
330			 const struct file_operations *fops)
331{
332	struct inode *dir = dentry->d_parent->d_inode;
333	struct inode *inode = bpf_get_inode(dir->i_sb, dir, mode);
334	if (IS_ERR(inode))
335		return PTR_ERR(inode);
336
337	inode->i_op = iops;
338	inode->i_fop = fops;
339	inode->i_private = raw;
340
341	bpf_dentry_finalize(dentry, inode, dir);
342	return 0;
343}
344
345static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg)
346{
347	return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops,
348			     &bpffs_obj_fops);
349}
350
351static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
352{
353	struct bpf_map *map = arg;
354
355	return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops,
356			     bpf_map_support_seq_show(map) ?
357			     &bpffs_map_fops : &bpffs_obj_fops);
358}
359
360static int bpf_mklink(struct dentry *dentry, umode_t mode, void *arg)
361{
362	struct bpf_link *link = arg;
363
364	return bpf_mkobj_ops(dentry, mode, arg, &bpf_link_iops,
365			     bpf_link_is_iter(link) ?
366			     &bpf_iter_fops : &bpffs_obj_fops);
367}
368
369static struct dentry *
370bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags)
371{
372	/* Dots in names (e.g. "/sys/fs/bpf/foo.bar") are reserved for future
373	 * extensions. That allows popoulate_bpffs() create special files.
374	 */
375	if ((dir->i_mode & S_IALLUGO) &&
376	    strchr(dentry->d_name.name, '.'))
377		return ERR_PTR(-EPERM);
378
379	return simple_lookup(dir, dentry, flags);
380}
381
382static int bpf_symlink(struct mnt_idmap *idmap, struct inode *dir,
383		       struct dentry *dentry, const char *target)
384{
385	char *link = kstrdup(target, GFP_USER | __GFP_NOWARN);
386	struct inode *inode;
387
388	if (!link)
389		return -ENOMEM;
390
391	inode = bpf_get_inode(dir->i_sb, dir, S_IRWXUGO | S_IFLNK);
392	if (IS_ERR(inode)) {
393		kfree(link);
394		return PTR_ERR(inode);
395	}
396
397	inode->i_op = &simple_symlink_inode_operations;
398	inode->i_link = link;
399
400	bpf_dentry_finalize(dentry, inode, dir);
401	return 0;
402}
403
404static const struct inode_operations bpf_dir_iops = {
405	.lookup		= bpf_lookup,
406	.mkdir		= bpf_mkdir,
407	.symlink	= bpf_symlink,
408	.rmdir		= simple_rmdir,
409	.rename		= simple_rename,
410	.link		= simple_link,
411	.unlink		= simple_unlink,
412};
413
414/* pin iterator link into bpffs */
415static int bpf_iter_link_pin_kernel(struct dentry *parent,
416				    const char *name, struct bpf_link *link)
417{
418	umode_t mode = S_IFREG | S_IRUSR;
419	struct dentry *dentry;
420	int ret;
421
422	inode_lock(parent->d_inode);
423	dentry = lookup_one_len(name, parent, strlen(name));
424	if (IS_ERR(dentry)) {
425		inode_unlock(parent->d_inode);
426		return PTR_ERR(dentry);
427	}
428	ret = bpf_mkobj_ops(dentry, mode, link, &bpf_link_iops,
429			    &bpf_iter_fops);
430	dput(dentry);
431	inode_unlock(parent->d_inode);
432	return ret;
433}
434
435static int bpf_obj_do_pin(int path_fd, const char __user *pathname, void *raw,
436			  enum bpf_type type)
437{
438	struct dentry *dentry;
439	struct inode *dir;
440	struct path path;
441	umode_t mode;
442	int ret;
443
444	dentry = user_path_create(path_fd, pathname, &path, 0);
445	if (IS_ERR(dentry))
446		return PTR_ERR(dentry);
447
 
 
 
 
 
 
448	dir = d_inode(path.dentry);
449	if (dir->i_op != &bpf_dir_iops) {
450		ret = -EPERM;
451		goto out;
452	}
453
454	mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
455	ret = security_path_mknod(&path, dentry, mode, 0);
456	if (ret)
457		goto out;
458
459	switch (type) {
460	case BPF_TYPE_PROG:
461		ret = vfs_mkobj(dentry, mode, bpf_mkprog, raw);
462		break;
463	case BPF_TYPE_MAP:
464		ret = vfs_mkobj(dentry, mode, bpf_mkmap, raw);
465		break;
466	case BPF_TYPE_LINK:
467		ret = vfs_mkobj(dentry, mode, bpf_mklink, raw);
468		break;
469	default:
470		ret = -EPERM;
471	}
472out:
473	done_path_create(&path, dentry);
474	return ret;
475}
476
477int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname)
478{
479	enum bpf_type type;
480	void *raw;
481	int ret;
482
483	raw = bpf_fd_probe_obj(ufd, &type);
484	if (IS_ERR(raw))
485		return PTR_ERR(raw);
486
487	ret = bpf_obj_do_pin(path_fd, pathname, raw, type);
488	if (ret != 0)
489		bpf_any_put(raw, type);
490
491	return ret;
492}
493
494static void *bpf_obj_do_get(int path_fd, const char __user *pathname,
495			    enum bpf_type *type, int flags)
496{
497	struct inode *inode;
498	struct path path;
499	void *raw;
500	int ret;
501
502	ret = user_path_at(path_fd, pathname, LOOKUP_FOLLOW, &path);
503	if (ret)
504		return ERR_PTR(ret);
505
506	inode = d_backing_inode(path.dentry);
507	ret = path_permission(&path, ACC_MODE(flags));
508	if (ret)
509		goto out;
510
511	ret = bpf_inode_type(inode, type);
512	if (ret)
513		goto out;
514
515	raw = bpf_any_get(inode->i_private, *type);
516	if (!IS_ERR(raw))
517		touch_atime(&path);
518
519	path_put(&path);
520	return raw;
521out:
522	path_put(&path);
523	return ERR_PTR(ret);
524}
525
526int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags)
527{
528	enum bpf_type type = BPF_TYPE_UNSPEC;
529	int f_flags;
530	void *raw;
531	int ret;
532
533	f_flags = bpf_get_file_flag(flags);
534	if (f_flags < 0)
535		return f_flags;
536
537	raw = bpf_obj_do_get(path_fd, pathname, &type, f_flags);
538	if (IS_ERR(raw))
539		return PTR_ERR(raw);
540
541	if (type == BPF_TYPE_PROG)
542		ret = bpf_prog_new_fd(raw);
543	else if (type == BPF_TYPE_MAP)
544		ret = bpf_map_new_fd(raw, f_flags);
545	else if (type == BPF_TYPE_LINK)
546		ret = (f_flags != O_RDWR) ? -EINVAL : bpf_link_new_fd(raw);
547	else
548		return -ENOENT;
549
550	if (ret < 0)
551		bpf_any_put(raw, type);
552	return ret;
553}
554
555static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
556{
557	struct bpf_prog *prog;
558	int ret = inode_permission(&nop_mnt_idmap, inode, MAY_READ);
559	if (ret)
560		return ERR_PTR(ret);
561
562	if (inode->i_op == &bpf_map_iops)
563		return ERR_PTR(-EINVAL);
564	if (inode->i_op == &bpf_link_iops)
565		return ERR_PTR(-EINVAL);
566	if (inode->i_op != &bpf_prog_iops)
567		return ERR_PTR(-EACCES);
568
569	prog = inode->i_private;
570
571	ret = security_bpf_prog(prog);
572	if (ret < 0)
573		return ERR_PTR(ret);
574
575	if (!bpf_prog_get_ok(prog, &type, false))
576		return ERR_PTR(-EINVAL);
577
578	bpf_prog_inc(prog);
579	return prog;
580}
581
582struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
583{
584	struct bpf_prog *prog;
585	struct path path;
586	int ret = kern_path(name, LOOKUP_FOLLOW, &path);
587	if (ret)
588		return ERR_PTR(ret);
589	prog = __get_prog_inode(d_backing_inode(path.dentry), type);
590	if (!IS_ERR(prog))
591		touch_atime(&path);
592	path_put(&path);
593	return prog;
594}
595EXPORT_SYMBOL(bpf_prog_get_type_path);
596
597/*
598 * Display the mount options in /proc/mounts.
599 */
600static int bpf_show_options(struct seq_file *m, struct dentry *root)
601{
602	struct inode *inode = d_inode(root);
603	umode_t mode = inode->i_mode & S_IALLUGO & ~S_ISVTX;
604
605	if (!uid_eq(inode->i_uid, GLOBAL_ROOT_UID))
606		seq_printf(m, ",uid=%u",
607			   from_kuid_munged(&init_user_ns, inode->i_uid));
608	if (!gid_eq(inode->i_gid, GLOBAL_ROOT_GID))
609		seq_printf(m, ",gid=%u",
610			   from_kgid_munged(&init_user_ns, inode->i_gid));
611	if (mode != S_IRWXUGO)
612		seq_printf(m, ",mode=%o", mode);
613	return 0;
614}
615
616static void bpf_free_inode(struct inode *inode)
617{
618	enum bpf_type type;
619
620	if (S_ISLNK(inode->i_mode))
621		kfree(inode->i_link);
622	if (!bpf_inode_type(inode, &type))
623		bpf_any_put(inode->i_private, type);
624	free_inode_nonrcu(inode);
625}
626
627static const struct super_operations bpf_super_ops = {
628	.statfs		= simple_statfs,
629	.drop_inode	= generic_delete_inode,
630	.show_options	= bpf_show_options,
631	.free_inode	= bpf_free_inode,
632};
633
634enum {
635	OPT_UID,
636	OPT_GID,
637	OPT_MODE,
638};
639
640static const struct fs_parameter_spec bpf_fs_parameters[] = {
641	fsparam_u32	("uid",				OPT_UID),
642	fsparam_u32	("gid",				OPT_GID),
643	fsparam_u32oct	("mode",			OPT_MODE),
644	{}
645};
646
647struct bpf_mount_opts {
648	kuid_t uid;
649	kgid_t gid;
650	umode_t mode;
651};
652
653static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
654{
655	struct bpf_mount_opts *opts = fc->fs_private;
656	struct fs_parse_result result;
657	kuid_t uid;
658	kgid_t gid;
659	int opt;
660
661	opt = fs_parse(fc, bpf_fs_parameters, param, &result);
662	if (opt < 0) {
663		/* We might like to report bad mount options here, but
664		 * traditionally we've ignored all mount options, so we'd
665		 * better continue to ignore non-existing options for bpf.
666		 */
667		if (opt == -ENOPARAM) {
668			opt = vfs_parse_fs_param_source(fc, param);
669			if (opt != -ENOPARAM)
670				return opt;
671
672			return 0;
673		}
674
675		if (opt < 0)
676			return opt;
677	}
678
679	switch (opt) {
680	case OPT_UID:
681		uid = make_kuid(current_user_ns(), result.uint_32);
682		if (!uid_valid(uid))
683			goto bad_value;
684
685		/*
686		 * The requested uid must be representable in the
687		 * filesystem's idmapping.
688		 */
689		if (!kuid_has_mapping(fc->user_ns, uid))
690			goto bad_value;
691
692		opts->uid = uid;
693		break;
694	case OPT_GID:
695		gid = make_kgid(current_user_ns(), result.uint_32);
696		if (!gid_valid(gid))
697			goto bad_value;
698
699		/*
700		 * The requested gid must be representable in the
701		 * filesystem's idmapping.
702		 */
703		if (!kgid_has_mapping(fc->user_ns, gid))
704			goto bad_value;
705
706		opts->gid = gid;
707		break;
708	case OPT_MODE:
709		opts->mode = result.uint_32 & S_IALLUGO;
710		break;
711	}
712
713	return 0;
714bad_value:
715	return invalfc(fc, "Bad value for '%s'", param->key);
716}
717
718struct bpf_preload_ops *bpf_preload_ops;
719EXPORT_SYMBOL_GPL(bpf_preload_ops);
720
721static bool bpf_preload_mod_get(void)
722{
723	/* If bpf_preload.ko wasn't loaded earlier then load it now.
724	 * When bpf_preload is built into vmlinux the module's __init
725	 * function will populate it.
726	 */
727	if (!bpf_preload_ops) {
728		request_module("bpf_preload");
729		if (!bpf_preload_ops)
730			return false;
731	}
732	/* And grab the reference, so the module doesn't disappear while the
733	 * kernel is interacting with the kernel module and its UMD.
734	 */
735	if (!try_module_get(bpf_preload_ops->owner)) {
736		pr_err("bpf_preload module get failed.\n");
737		return false;
738	}
739	return true;
740}
741
742static void bpf_preload_mod_put(void)
743{
744	if (bpf_preload_ops)
745		/* now user can "rmmod bpf_preload" if necessary */
746		module_put(bpf_preload_ops->owner);
747}
748
749static DEFINE_MUTEX(bpf_preload_lock);
750
751static int populate_bpffs(struct dentry *parent)
752{
753	struct bpf_preload_info objs[BPF_PRELOAD_LINKS] = {};
754	int err = 0, i;
755
756	/* grab the mutex to make sure the kernel interactions with bpf_preload
757	 * are serialized
758	 */
759	mutex_lock(&bpf_preload_lock);
760
761	/* if bpf_preload.ko wasn't built into vmlinux then load it */
762	if (!bpf_preload_mod_get())
763		goto out;
764
765	err = bpf_preload_ops->preload(objs);
766	if (err)
767		goto out_put;
768	for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
769		bpf_link_inc(objs[i].link);
770		err = bpf_iter_link_pin_kernel(parent,
771					       objs[i].link_name, objs[i].link);
772		if (err) {
773			bpf_link_put(objs[i].link);
774			goto out_put;
775		}
776	}
777out_put:
778	bpf_preload_mod_put();
779out:
780	mutex_unlock(&bpf_preload_lock);
781	return err;
782}
783
784static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
785{
786	static const struct tree_descr bpf_rfiles[] = { { "" } };
787	struct bpf_mount_opts *opts = fc->fs_private;
788	struct inode *inode;
789	int ret;
790
791	ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles);
792	if (ret)
793		return ret;
794
795	sb->s_op = &bpf_super_ops;
796
797	inode = sb->s_root->d_inode;
798	inode->i_uid = opts->uid;
799	inode->i_gid = opts->gid;
800	inode->i_op = &bpf_dir_iops;
801	inode->i_mode &= ~S_IALLUGO;
802	populate_bpffs(sb->s_root);
803	inode->i_mode |= S_ISVTX | opts->mode;
804	return 0;
805}
806
807static int bpf_get_tree(struct fs_context *fc)
808{
809	return get_tree_nodev(fc, bpf_fill_super);
810}
811
812static void bpf_free_fc(struct fs_context *fc)
813{
814	kfree(fc->fs_private);
815}
816
817static const struct fs_context_operations bpf_context_ops = {
818	.free		= bpf_free_fc,
819	.parse_param	= bpf_parse_param,
820	.get_tree	= bpf_get_tree,
821};
822
823/*
824 * Set up the filesystem mount context.
825 */
826static int bpf_init_fs_context(struct fs_context *fc)
827{
828	struct bpf_mount_opts *opts;
829
830	opts = kzalloc(sizeof(struct bpf_mount_opts), GFP_KERNEL);
831	if (!opts)
832		return -ENOMEM;
833
834	opts->mode = S_IRWXUGO;
835	opts->uid = current_fsuid();
836	opts->gid = current_fsgid();
837
838	fc->fs_private = opts;
839	fc->ops = &bpf_context_ops;
840	return 0;
841}
842
843static struct file_system_type bpf_fs_type = {
844	.owner		= THIS_MODULE,
845	.name		= "bpf",
846	.init_fs_context = bpf_init_fs_context,
847	.parameters	= bpf_fs_parameters,
848	.kill_sb	= kill_litter_super,
849};
850
851static int __init bpf_init(void)
852{
853	int ret;
854
855	ret = sysfs_create_mount_point(fs_kobj, "bpf");
856	if (ret)
857		return ret;
858
859	ret = register_filesystem(&bpf_fs_type);
860	if (ret)
861		sysfs_remove_mount_point(fs_kobj, "bpf");
862
863	return ret;
864}
865fs_initcall(bpf_init);