xref: /openbmc/linux/kernel/bpf/inode.c (revision 0bf49ffb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Minimal file system backend for holding eBPF maps and programs,
4  * used by bpf(2) object pinning.
5  *
6  * Authors:
7  *
8  *	Daniel Borkmann <daniel@iogearbox.net>
9  */
10 
11 #include <linux/init.h>
12 #include <linux/magic.h>
13 #include <linux/major.h>
14 #include <linux/mount.h>
15 #include <linux/namei.h>
16 #include <linux/fs.h>
17 #include <linux/fs_context.h>
18 #include <linux/fs_parser.h>
19 #include <linux/kdev_t.h>
20 #include <linux/filter.h>
21 #include <linux/bpf.h>
22 #include <linux/bpf_trace.h>
23 #include "preload/bpf_preload.h"
24 
25 enum bpf_type {
26 	BPF_TYPE_UNSPEC	= 0,
27 	BPF_TYPE_PROG,
28 	BPF_TYPE_MAP,
29 	BPF_TYPE_LINK,
30 };
31 
32 static void *bpf_any_get(void *raw, enum bpf_type type)
33 {
34 	switch (type) {
35 	case BPF_TYPE_PROG:
36 		bpf_prog_inc(raw);
37 		break;
38 	case BPF_TYPE_MAP:
39 		bpf_map_inc_with_uref(raw);
40 		break;
41 	case BPF_TYPE_LINK:
42 		bpf_link_inc(raw);
43 		break;
44 	default:
45 		WARN_ON_ONCE(1);
46 		break;
47 	}
48 
49 	return raw;
50 }
51 
52 static void bpf_any_put(void *raw, enum bpf_type type)
53 {
54 	switch (type) {
55 	case BPF_TYPE_PROG:
56 		bpf_prog_put(raw);
57 		break;
58 	case BPF_TYPE_MAP:
59 		bpf_map_put_with_uref(raw);
60 		break;
61 	case BPF_TYPE_LINK:
62 		bpf_link_put(raw);
63 		break;
64 	default:
65 		WARN_ON_ONCE(1);
66 		break;
67 	}
68 }
69 
70 static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type)
71 {
72 	void *raw;
73 
74 	raw = bpf_map_get_with_uref(ufd);
75 	if (!IS_ERR(raw)) {
76 		*type = BPF_TYPE_MAP;
77 		return raw;
78 	}
79 
80 	raw = bpf_prog_get(ufd);
81 	if (!IS_ERR(raw)) {
82 		*type = BPF_TYPE_PROG;
83 		return raw;
84 	}
85 
86 	raw = bpf_link_get_from_fd(ufd);
87 	if (!IS_ERR(raw)) {
88 		*type = BPF_TYPE_LINK;
89 		return raw;
90 	}
91 
92 	return ERR_PTR(-EINVAL);
93 }
94 
95 static const struct inode_operations bpf_dir_iops;
96 
97 static const struct inode_operations bpf_prog_iops = { };
98 static const struct inode_operations bpf_map_iops  = { };
99 static const struct inode_operations bpf_link_iops  = { };
100 
101 static struct inode *bpf_get_inode(struct super_block *sb,
102 				   const struct inode *dir,
103 				   umode_t mode)
104 {
105 	struct inode *inode;
106 
107 	switch (mode & S_IFMT) {
108 	case S_IFDIR:
109 	case S_IFREG:
110 	case S_IFLNK:
111 		break;
112 	default:
113 		return ERR_PTR(-EINVAL);
114 	}
115 
116 	inode = new_inode(sb);
117 	if (!inode)
118 		return ERR_PTR(-ENOSPC);
119 
120 	inode->i_ino = get_next_ino();
121 	inode->i_atime = current_time(inode);
122 	inode->i_mtime = inode->i_atime;
123 	inode->i_ctime = inode->i_atime;
124 
125 	inode_init_owner(&init_user_ns, inode, dir, mode);
126 
127 	return inode;
128 }
129 
130 static int bpf_inode_type(const struct inode *inode, enum bpf_type *type)
131 {
132 	*type = BPF_TYPE_UNSPEC;
133 	if (inode->i_op == &bpf_prog_iops)
134 		*type = BPF_TYPE_PROG;
135 	else if (inode->i_op == &bpf_map_iops)
136 		*type = BPF_TYPE_MAP;
137 	else if (inode->i_op == &bpf_link_iops)
138 		*type = BPF_TYPE_LINK;
139 	else
140 		return -EACCES;
141 
142 	return 0;
143 }
144 
145 static void bpf_dentry_finalize(struct dentry *dentry, struct inode *inode,
146 				struct inode *dir)
147 {
148 	d_instantiate(dentry, inode);
149 	dget(dentry);
150 
151 	dir->i_mtime = current_time(dir);
152 	dir->i_ctime = dir->i_mtime;
153 }
154 
155 static int bpf_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
156 		     struct dentry *dentry, umode_t mode)
157 {
158 	struct inode *inode;
159 
160 	inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFDIR);
161 	if (IS_ERR(inode))
162 		return PTR_ERR(inode);
163 
164 	inode->i_op = &bpf_dir_iops;
165 	inode->i_fop = &simple_dir_operations;
166 
167 	inc_nlink(inode);
168 	inc_nlink(dir);
169 
170 	bpf_dentry_finalize(dentry, inode, dir);
171 	return 0;
172 }
173 
174 struct map_iter {
175 	void *key;
176 	bool done;
177 };
178 
179 static struct map_iter *map_iter(struct seq_file *m)
180 {
181 	return m->private;
182 }
183 
184 static struct bpf_map *seq_file_to_map(struct seq_file *m)
185 {
186 	return file_inode(m->file)->i_private;
187 }
188 
189 static void map_iter_free(struct map_iter *iter)
190 {
191 	if (iter) {
192 		kfree(iter->key);
193 		kfree(iter);
194 	}
195 }
196 
197 static struct map_iter *map_iter_alloc(struct bpf_map *map)
198 {
199 	struct map_iter *iter;
200 
201 	iter = kzalloc(sizeof(*iter), GFP_KERNEL | __GFP_NOWARN);
202 	if (!iter)
203 		goto error;
204 
205 	iter->key = kzalloc(map->key_size, GFP_KERNEL | __GFP_NOWARN);
206 	if (!iter->key)
207 		goto error;
208 
209 	return iter;
210 
211 error:
212 	map_iter_free(iter);
213 	return NULL;
214 }
215 
216 static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
217 {
218 	struct bpf_map *map = seq_file_to_map(m);
219 	void *key = map_iter(m)->key;
220 	void *prev_key;
221 
222 	(*pos)++;
223 	if (map_iter(m)->done)
224 		return NULL;
225 
226 	if (unlikely(v == SEQ_START_TOKEN))
227 		prev_key = NULL;
228 	else
229 		prev_key = key;
230 
231 	rcu_read_lock();
232 	if (map->ops->map_get_next_key(map, prev_key, key)) {
233 		map_iter(m)->done = true;
234 		key = NULL;
235 	}
236 	rcu_read_unlock();
237 	return key;
238 }
239 
240 static void *map_seq_start(struct seq_file *m, loff_t *pos)
241 {
242 	if (map_iter(m)->done)
243 		return NULL;
244 
245 	return *pos ? map_iter(m)->key : SEQ_START_TOKEN;
246 }
247 
248 static void map_seq_stop(struct seq_file *m, void *v)
249 {
250 }
251 
252 static int map_seq_show(struct seq_file *m, void *v)
253 {
254 	struct bpf_map *map = seq_file_to_map(m);
255 	void *key = map_iter(m)->key;
256 
257 	if (unlikely(v == SEQ_START_TOKEN)) {
258 		seq_puts(m, "# WARNING!! The output is for debug purpose only\n");
259 		seq_puts(m, "# WARNING!! The output format will change\n");
260 	} else {
261 		map->ops->map_seq_show_elem(map, key, m);
262 	}
263 
264 	return 0;
265 }
266 
267 static const struct seq_operations bpffs_map_seq_ops = {
268 	.start	= map_seq_start,
269 	.next	= map_seq_next,
270 	.show	= map_seq_show,
271 	.stop	= map_seq_stop,
272 };
273 
274 static int bpffs_map_open(struct inode *inode, struct file *file)
275 {
276 	struct bpf_map *map = inode->i_private;
277 	struct map_iter *iter;
278 	struct seq_file *m;
279 	int err;
280 
281 	iter = map_iter_alloc(map);
282 	if (!iter)
283 		return -ENOMEM;
284 
285 	err = seq_open(file, &bpffs_map_seq_ops);
286 	if (err) {
287 		map_iter_free(iter);
288 		return err;
289 	}
290 
291 	m = file->private_data;
292 	m->private = iter;
293 
294 	return 0;
295 }
296 
297 static int bpffs_map_release(struct inode *inode, struct file *file)
298 {
299 	struct seq_file *m = file->private_data;
300 
301 	map_iter_free(map_iter(m));
302 
303 	return seq_release(inode, file);
304 }
305 
306 /* bpffs_map_fops should only implement the basic
307  * read operation for a BPF map.  The purpose is to
308  * provide a simple user intuitive way to do
309  * "cat bpffs/pathto/a-pinned-map".
310  *
311  * Other operations (e.g. write, lookup...) should be realized by
312  * the userspace tools (e.g. bpftool) through the
313  * BPF_OBJ_GET_INFO_BY_FD and the map's lookup/update
314  * interface.
315  */
316 static const struct file_operations bpffs_map_fops = {
317 	.open		= bpffs_map_open,
318 	.read		= seq_read,
319 	.release	= bpffs_map_release,
320 };
321 
322 static int bpffs_obj_open(struct inode *inode, struct file *file)
323 {
324 	return -EIO;
325 }
326 
327 static const struct file_operations bpffs_obj_fops = {
328 	.open		= bpffs_obj_open,
329 };
330 
331 static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw,
332 			 const struct inode_operations *iops,
333 			 const struct file_operations *fops)
334 {
335 	struct inode *dir = dentry->d_parent->d_inode;
336 	struct inode *inode = bpf_get_inode(dir->i_sb, dir, mode);
337 	if (IS_ERR(inode))
338 		return PTR_ERR(inode);
339 
340 	inode->i_op = iops;
341 	inode->i_fop = fops;
342 	inode->i_private = raw;
343 
344 	bpf_dentry_finalize(dentry, inode, dir);
345 	return 0;
346 }
347 
348 static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg)
349 {
350 	return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops,
351 			     &bpffs_obj_fops);
352 }
353 
354 static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
355 {
356 	struct bpf_map *map = arg;
357 
358 	return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops,
359 			     bpf_map_support_seq_show(map) ?
360 			     &bpffs_map_fops : &bpffs_obj_fops);
361 }
362 
363 static int bpf_mklink(struct dentry *dentry, umode_t mode, void *arg)
364 {
365 	struct bpf_link *link = arg;
366 
367 	return bpf_mkobj_ops(dentry, mode, arg, &bpf_link_iops,
368 			     bpf_link_is_iter(link) ?
369 			     &bpf_iter_fops : &bpffs_obj_fops);
370 }
371 
372 static struct dentry *
373 bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags)
374 {
375 	/* Dots in names (e.g. "/sys/fs/bpf/foo.bar") are reserved for future
376 	 * extensions. That allows popoulate_bpffs() create special files.
377 	 */
378 	if ((dir->i_mode & S_IALLUGO) &&
379 	    strchr(dentry->d_name.name, '.'))
380 		return ERR_PTR(-EPERM);
381 
382 	return simple_lookup(dir, dentry, flags);
383 }
384 
385 static int bpf_symlink(struct user_namespace *mnt_userns, struct inode *dir,
386 		       struct dentry *dentry, const char *target)
387 {
388 	char *link = kstrdup(target, GFP_USER | __GFP_NOWARN);
389 	struct inode *inode;
390 
391 	if (!link)
392 		return -ENOMEM;
393 
394 	inode = bpf_get_inode(dir->i_sb, dir, S_IRWXUGO | S_IFLNK);
395 	if (IS_ERR(inode)) {
396 		kfree(link);
397 		return PTR_ERR(inode);
398 	}
399 
400 	inode->i_op = &simple_symlink_inode_operations;
401 	inode->i_link = link;
402 
403 	bpf_dentry_finalize(dentry, inode, dir);
404 	return 0;
405 }
406 
407 static const struct inode_operations bpf_dir_iops = {
408 	.lookup		= bpf_lookup,
409 	.mkdir		= bpf_mkdir,
410 	.symlink	= bpf_symlink,
411 	.rmdir		= simple_rmdir,
412 	.rename		= simple_rename,
413 	.link		= simple_link,
414 	.unlink		= simple_unlink,
415 };
416 
417 /* pin iterator link into bpffs */
418 static int bpf_iter_link_pin_kernel(struct dentry *parent,
419 				    const char *name, struct bpf_link *link)
420 {
421 	umode_t mode = S_IFREG | S_IRUSR;
422 	struct dentry *dentry;
423 	int ret;
424 
425 	inode_lock(parent->d_inode);
426 	dentry = lookup_one_len(name, parent, strlen(name));
427 	if (IS_ERR(dentry)) {
428 		inode_unlock(parent->d_inode);
429 		return PTR_ERR(dentry);
430 	}
431 	ret = bpf_mkobj_ops(dentry, mode, link, &bpf_link_iops,
432 			    &bpf_iter_fops);
433 	dput(dentry);
434 	inode_unlock(parent->d_inode);
435 	return ret;
436 }
437 
438 static int bpf_obj_do_pin(const char __user *pathname, void *raw,
439 			  enum bpf_type type)
440 {
441 	struct dentry *dentry;
442 	struct inode *dir;
443 	struct path path;
444 	umode_t mode;
445 	int ret;
446 
447 	dentry = user_path_create(AT_FDCWD, pathname, &path, 0);
448 	if (IS_ERR(dentry))
449 		return PTR_ERR(dentry);
450 
451 	mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
452 
453 	ret = security_path_mknod(&path, dentry, mode, 0);
454 	if (ret)
455 		goto out;
456 
457 	dir = d_inode(path.dentry);
458 	if (dir->i_op != &bpf_dir_iops) {
459 		ret = -EPERM;
460 		goto out;
461 	}
462 
463 	switch (type) {
464 	case BPF_TYPE_PROG:
465 		ret = vfs_mkobj(dentry, mode, bpf_mkprog, raw);
466 		break;
467 	case BPF_TYPE_MAP:
468 		ret = vfs_mkobj(dentry, mode, bpf_mkmap, raw);
469 		break;
470 	case BPF_TYPE_LINK:
471 		ret = vfs_mkobj(dentry, mode, bpf_mklink, raw);
472 		break;
473 	default:
474 		ret = -EPERM;
475 	}
476 out:
477 	done_path_create(&path, dentry);
478 	return ret;
479 }
480 
481 int bpf_obj_pin_user(u32 ufd, const char __user *pathname)
482 {
483 	enum bpf_type type;
484 	void *raw;
485 	int ret;
486 
487 	raw = bpf_fd_probe_obj(ufd, &type);
488 	if (IS_ERR(raw))
489 		return PTR_ERR(raw);
490 
491 	ret = bpf_obj_do_pin(pathname, raw, type);
492 	if (ret != 0)
493 		bpf_any_put(raw, type);
494 
495 	return ret;
496 }
497 
498 static void *bpf_obj_do_get(const char __user *pathname,
499 			    enum bpf_type *type, int flags)
500 {
501 	struct inode *inode;
502 	struct path path;
503 	void *raw;
504 	int ret;
505 
506 	ret = user_path_at(AT_FDCWD, pathname, LOOKUP_FOLLOW, &path);
507 	if (ret)
508 		return ERR_PTR(ret);
509 
510 	inode = d_backing_inode(path.dentry);
511 	ret = path_permission(&path, ACC_MODE(flags));
512 	if (ret)
513 		goto out;
514 
515 	ret = bpf_inode_type(inode, type);
516 	if (ret)
517 		goto out;
518 
519 	raw = bpf_any_get(inode->i_private, *type);
520 	if (!IS_ERR(raw))
521 		touch_atime(&path);
522 
523 	path_put(&path);
524 	return raw;
525 out:
526 	path_put(&path);
527 	return ERR_PTR(ret);
528 }
529 
530 int bpf_obj_get_user(const char __user *pathname, int flags)
531 {
532 	enum bpf_type type = BPF_TYPE_UNSPEC;
533 	int f_flags;
534 	void *raw;
535 	int ret;
536 
537 	f_flags = bpf_get_file_flag(flags);
538 	if (f_flags < 0)
539 		return f_flags;
540 
541 	raw = bpf_obj_do_get(pathname, &type, f_flags);
542 	if (IS_ERR(raw))
543 		return PTR_ERR(raw);
544 
545 	if (type == BPF_TYPE_PROG)
546 		ret = bpf_prog_new_fd(raw);
547 	else if (type == BPF_TYPE_MAP)
548 		ret = bpf_map_new_fd(raw, f_flags);
549 	else if (type == BPF_TYPE_LINK)
550 		ret = bpf_link_new_fd(raw);
551 	else
552 		return -ENOENT;
553 
554 	if (ret < 0)
555 		bpf_any_put(raw, type);
556 	return ret;
557 }
558 
559 static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
560 {
561 	struct bpf_prog *prog;
562 	int ret = inode_permission(&init_user_ns, inode, MAY_READ);
563 	if (ret)
564 		return ERR_PTR(ret);
565 
566 	if (inode->i_op == &bpf_map_iops)
567 		return ERR_PTR(-EINVAL);
568 	if (inode->i_op == &bpf_link_iops)
569 		return ERR_PTR(-EINVAL);
570 	if (inode->i_op != &bpf_prog_iops)
571 		return ERR_PTR(-EACCES);
572 
573 	prog = inode->i_private;
574 
575 	ret = security_bpf_prog(prog);
576 	if (ret < 0)
577 		return ERR_PTR(ret);
578 
579 	if (!bpf_prog_get_ok(prog, &type, false))
580 		return ERR_PTR(-EINVAL);
581 
582 	bpf_prog_inc(prog);
583 	return prog;
584 }
585 
586 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
587 {
588 	struct bpf_prog *prog;
589 	struct path path;
590 	int ret = kern_path(name, LOOKUP_FOLLOW, &path);
591 	if (ret)
592 		return ERR_PTR(ret);
593 	prog = __get_prog_inode(d_backing_inode(path.dentry), type);
594 	if (!IS_ERR(prog))
595 		touch_atime(&path);
596 	path_put(&path);
597 	return prog;
598 }
599 EXPORT_SYMBOL(bpf_prog_get_type_path);
600 
601 /*
602  * Display the mount options in /proc/mounts.
603  */
604 static int bpf_show_options(struct seq_file *m, struct dentry *root)
605 {
606 	umode_t mode = d_inode(root)->i_mode & S_IALLUGO & ~S_ISVTX;
607 
608 	if (mode != S_IRWXUGO)
609 		seq_printf(m, ",mode=%o", mode);
610 	return 0;
611 }
612 
613 static void bpf_free_inode(struct inode *inode)
614 {
615 	enum bpf_type type;
616 
617 	if (S_ISLNK(inode->i_mode))
618 		kfree(inode->i_link);
619 	if (!bpf_inode_type(inode, &type))
620 		bpf_any_put(inode->i_private, type);
621 	free_inode_nonrcu(inode);
622 }
623 
624 static const struct super_operations bpf_super_ops = {
625 	.statfs		= simple_statfs,
626 	.drop_inode	= generic_delete_inode,
627 	.show_options	= bpf_show_options,
628 	.free_inode	= bpf_free_inode,
629 };
630 
631 enum {
632 	OPT_MODE,
633 };
634 
635 static const struct fs_parameter_spec bpf_fs_parameters[] = {
636 	fsparam_u32oct	("mode",			OPT_MODE),
637 	{}
638 };
639 
640 struct bpf_mount_opts {
641 	umode_t mode;
642 };
643 
644 static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
645 {
646 	struct bpf_mount_opts *opts = fc->fs_private;
647 	struct fs_parse_result result;
648 	int opt;
649 
650 	opt = fs_parse(fc, bpf_fs_parameters, param, &result);
651 	if (opt < 0)
652 		/* We might like to report bad mount options here, but
653 		 * traditionally we've ignored all mount options, so we'd
654 		 * better continue to ignore non-existing options for bpf.
655 		 */
656 		return opt == -ENOPARAM ? 0 : opt;
657 
658 	switch (opt) {
659 	case OPT_MODE:
660 		opts->mode = result.uint_32 & S_IALLUGO;
661 		break;
662 	}
663 
664 	return 0;
665 }
666 
667 struct bpf_preload_ops *bpf_preload_ops;
668 EXPORT_SYMBOL_GPL(bpf_preload_ops);
669 
670 static bool bpf_preload_mod_get(void)
671 {
672 	/* If bpf_preload.ko wasn't loaded earlier then load it now.
673 	 * When bpf_preload is built into vmlinux the module's __init
674 	 * function will populate it.
675 	 */
676 	if (!bpf_preload_ops) {
677 		request_module("bpf_preload");
678 		if (!bpf_preload_ops)
679 			return false;
680 	}
681 	/* And grab the reference, so the module doesn't disappear while the
682 	 * kernel is interacting with the kernel module and its UMD.
683 	 */
684 	if (!try_module_get(bpf_preload_ops->owner)) {
685 		pr_err("bpf_preload module get failed.\n");
686 		return false;
687 	}
688 	return true;
689 }
690 
691 static void bpf_preload_mod_put(void)
692 {
693 	if (bpf_preload_ops)
694 		/* now user can "rmmod bpf_preload" if necessary */
695 		module_put(bpf_preload_ops->owner);
696 }
697 
698 static DEFINE_MUTEX(bpf_preload_lock);
699 
700 static int populate_bpffs(struct dentry *parent)
701 {
702 	struct bpf_preload_info objs[BPF_PRELOAD_LINKS] = {};
703 	struct bpf_link *links[BPF_PRELOAD_LINKS] = {};
704 	int err = 0, i;
705 
706 	/* grab the mutex to make sure the kernel interactions with bpf_preload
707 	 * UMD are serialized
708 	 */
709 	mutex_lock(&bpf_preload_lock);
710 
711 	/* if bpf_preload.ko wasn't built into vmlinux then load it */
712 	if (!bpf_preload_mod_get())
713 		goto out;
714 
715 	if (!bpf_preload_ops->info.tgid) {
716 		/* preload() will start UMD that will load BPF iterator programs */
717 		err = bpf_preload_ops->preload(objs);
718 		if (err)
719 			goto out_put;
720 		for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
721 			links[i] = bpf_link_by_id(objs[i].link_id);
722 			if (IS_ERR(links[i])) {
723 				err = PTR_ERR(links[i]);
724 				goto out_put;
725 			}
726 		}
727 		for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
728 			err = bpf_iter_link_pin_kernel(parent,
729 						       objs[i].link_name, links[i]);
730 			if (err)
731 				goto out_put;
732 			/* do not unlink successfully pinned links even
733 			 * if later link fails to pin
734 			 */
735 			links[i] = NULL;
736 		}
737 		/* finish() will tell UMD process to exit */
738 		err = bpf_preload_ops->finish();
739 		if (err)
740 			goto out_put;
741 	}
742 out_put:
743 	bpf_preload_mod_put();
744 out:
745 	mutex_unlock(&bpf_preload_lock);
746 	for (i = 0; i < BPF_PRELOAD_LINKS && err; i++)
747 		if (!IS_ERR_OR_NULL(links[i]))
748 			bpf_link_put(links[i]);
749 	return err;
750 }
751 
752 static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
753 {
754 	static const struct tree_descr bpf_rfiles[] = { { "" } };
755 	struct bpf_mount_opts *opts = fc->fs_private;
756 	struct inode *inode;
757 	int ret;
758 
759 	ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles);
760 	if (ret)
761 		return ret;
762 
763 	sb->s_op = &bpf_super_ops;
764 
765 	inode = sb->s_root->d_inode;
766 	inode->i_op = &bpf_dir_iops;
767 	inode->i_mode &= ~S_IALLUGO;
768 	populate_bpffs(sb->s_root);
769 	inode->i_mode |= S_ISVTX | opts->mode;
770 	return 0;
771 }
772 
773 static int bpf_get_tree(struct fs_context *fc)
774 {
775 	return get_tree_nodev(fc, bpf_fill_super);
776 }
777 
778 static void bpf_free_fc(struct fs_context *fc)
779 {
780 	kfree(fc->fs_private);
781 }
782 
783 static const struct fs_context_operations bpf_context_ops = {
784 	.free		= bpf_free_fc,
785 	.parse_param	= bpf_parse_param,
786 	.get_tree	= bpf_get_tree,
787 };
788 
789 /*
790  * Set up the filesystem mount context.
791  */
792 static int bpf_init_fs_context(struct fs_context *fc)
793 {
794 	struct bpf_mount_opts *opts;
795 
796 	opts = kzalloc(sizeof(struct bpf_mount_opts), GFP_KERNEL);
797 	if (!opts)
798 		return -ENOMEM;
799 
800 	opts->mode = S_IRWXUGO;
801 
802 	fc->fs_private = opts;
803 	fc->ops = &bpf_context_ops;
804 	return 0;
805 }
806 
807 static struct file_system_type bpf_fs_type = {
808 	.owner		= THIS_MODULE,
809 	.name		= "bpf",
810 	.init_fs_context = bpf_init_fs_context,
811 	.parameters	= bpf_fs_parameters,
812 	.kill_sb	= kill_litter_super,
813 };
814 
815 static int __init bpf_init(void)
816 {
817 	int ret;
818 
819 	mutex_init(&bpf_preload_lock);
820 
821 	ret = sysfs_create_mount_point(fs_kobj, "bpf");
822 	if (ret)
823 		return ret;
824 
825 	ret = register_filesystem(&bpf_fs_type);
826 	if (ret)
827 		sysfs_remove_mount_point(fs_kobj, "bpf");
828 
829 	return ret;
830 }
831 fs_initcall(bpf_init);
832