1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Minimal file system backend for holding eBPF maps and programs,
4 * used by bpf(2) object pinning.
5 *
6 * Authors:
7 *
8 * Daniel Borkmann <daniel@iogearbox.net>
9 */
10
11 #include <linux/init.h>
12 #include <linux/magic.h>
13 #include <linux/major.h>
14 #include <linux/mount.h>
15 #include <linux/namei.h>
16 #include <linux/fs.h>
17 #include <linux/fs_context.h>
18 #include <linux/fs_parser.h>
19 #include <linux/kdev_t.h>
20 #include <linux/filter.h>
21 #include <linux/bpf.h>
22 #include <linux/bpf_trace.h>
23 #include "preload/bpf_preload.h"
24
25 enum bpf_type {
26 BPF_TYPE_UNSPEC = 0,
27 BPF_TYPE_PROG,
28 BPF_TYPE_MAP,
29 BPF_TYPE_LINK,
30 };
31
bpf_any_get(void * raw,enum bpf_type type)32 static void *bpf_any_get(void *raw, enum bpf_type type)
33 {
34 switch (type) {
35 case BPF_TYPE_PROG:
36 bpf_prog_inc(raw);
37 break;
38 case BPF_TYPE_MAP:
39 bpf_map_inc_with_uref(raw);
40 break;
41 case BPF_TYPE_LINK:
42 bpf_link_inc(raw);
43 break;
44 default:
45 WARN_ON_ONCE(1);
46 break;
47 }
48
49 return raw;
50 }
51
bpf_any_put(void * raw,enum bpf_type type)52 static void bpf_any_put(void *raw, enum bpf_type type)
53 {
54 switch (type) {
55 case BPF_TYPE_PROG:
56 bpf_prog_put(raw);
57 break;
58 case BPF_TYPE_MAP:
59 bpf_map_put_with_uref(raw);
60 break;
61 case BPF_TYPE_LINK:
62 bpf_link_put(raw);
63 break;
64 default:
65 WARN_ON_ONCE(1);
66 break;
67 }
68 }
69
bpf_fd_probe_obj(u32 ufd,enum bpf_type * type)70 static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type)
71 {
72 void *raw;
73
74 raw = bpf_map_get_with_uref(ufd);
75 if (!IS_ERR(raw)) {
76 *type = BPF_TYPE_MAP;
77 return raw;
78 }
79
80 raw = bpf_prog_get(ufd);
81 if (!IS_ERR(raw)) {
82 *type = BPF_TYPE_PROG;
83 return raw;
84 }
85
86 raw = bpf_link_get_from_fd(ufd);
87 if (!IS_ERR(raw)) {
88 *type = BPF_TYPE_LINK;
89 return raw;
90 }
91
92 return ERR_PTR(-EINVAL);
93 }
94
95 static const struct inode_operations bpf_dir_iops;
96
97 static const struct inode_operations bpf_prog_iops = { };
98 static const struct inode_operations bpf_map_iops = { };
99 static const struct inode_operations bpf_link_iops = { };
100
bpf_get_inode(struct super_block * sb,const struct inode * dir,umode_t mode)101 static struct inode *bpf_get_inode(struct super_block *sb,
102 const struct inode *dir,
103 umode_t mode)
104 {
105 struct inode *inode;
106
107 switch (mode & S_IFMT) {
108 case S_IFDIR:
109 case S_IFREG:
110 case S_IFLNK:
111 break;
112 default:
113 return ERR_PTR(-EINVAL);
114 }
115
116 inode = new_inode(sb);
117 if (!inode)
118 return ERR_PTR(-ENOSPC);
119
120 inode->i_ino = get_next_ino();
121 inode->i_atime = inode_set_ctime_current(inode);
122 inode->i_mtime = inode->i_atime;
123
124 inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
125
126 return inode;
127 }
128
bpf_inode_type(const struct inode * inode,enum bpf_type * type)129 static int bpf_inode_type(const struct inode *inode, enum bpf_type *type)
130 {
131 *type = BPF_TYPE_UNSPEC;
132 if (inode->i_op == &bpf_prog_iops)
133 *type = BPF_TYPE_PROG;
134 else if (inode->i_op == &bpf_map_iops)
135 *type = BPF_TYPE_MAP;
136 else if (inode->i_op == &bpf_link_iops)
137 *type = BPF_TYPE_LINK;
138 else
139 return -EACCES;
140
141 return 0;
142 }
143
bpf_dentry_finalize(struct dentry * dentry,struct inode * inode,struct inode * dir)144 static void bpf_dentry_finalize(struct dentry *dentry, struct inode *inode,
145 struct inode *dir)
146 {
147 d_instantiate(dentry, inode);
148 dget(dentry);
149
150 dir->i_mtime = inode_set_ctime_current(dir);
151 }
152
bpf_mkdir(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode)153 static int bpf_mkdir(struct mnt_idmap *idmap, struct inode *dir,
154 struct dentry *dentry, umode_t mode)
155 {
156 struct inode *inode;
157
158 inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFDIR);
159 if (IS_ERR(inode))
160 return PTR_ERR(inode);
161
162 inode->i_op = &bpf_dir_iops;
163 inode->i_fop = &simple_dir_operations;
164
165 inc_nlink(inode);
166 inc_nlink(dir);
167
168 bpf_dentry_finalize(dentry, inode, dir);
169 return 0;
170 }
171
172 struct map_iter {
173 void *key;
174 bool done;
175 };
176
map_iter(struct seq_file * m)177 static struct map_iter *map_iter(struct seq_file *m)
178 {
179 return m->private;
180 }
181
seq_file_to_map(struct seq_file * m)182 static struct bpf_map *seq_file_to_map(struct seq_file *m)
183 {
184 return file_inode(m->file)->i_private;
185 }
186
map_iter_free(struct map_iter * iter)187 static void map_iter_free(struct map_iter *iter)
188 {
189 if (iter) {
190 kfree(iter->key);
191 kfree(iter);
192 }
193 }
194
map_iter_alloc(struct bpf_map * map)195 static struct map_iter *map_iter_alloc(struct bpf_map *map)
196 {
197 struct map_iter *iter;
198
199 iter = kzalloc(sizeof(*iter), GFP_KERNEL | __GFP_NOWARN);
200 if (!iter)
201 goto error;
202
203 iter->key = kzalloc(map->key_size, GFP_KERNEL | __GFP_NOWARN);
204 if (!iter->key)
205 goto error;
206
207 return iter;
208
209 error:
210 map_iter_free(iter);
211 return NULL;
212 }
213
map_seq_next(struct seq_file * m,void * v,loff_t * pos)214 static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
215 {
216 struct bpf_map *map = seq_file_to_map(m);
217 void *key = map_iter(m)->key;
218 void *prev_key;
219
220 (*pos)++;
221 if (map_iter(m)->done)
222 return NULL;
223
224 if (unlikely(v == SEQ_START_TOKEN))
225 prev_key = NULL;
226 else
227 prev_key = key;
228
229 rcu_read_lock();
230 if (map->ops->map_get_next_key(map, prev_key, key)) {
231 map_iter(m)->done = true;
232 key = NULL;
233 }
234 rcu_read_unlock();
235 return key;
236 }
237
map_seq_start(struct seq_file * m,loff_t * pos)238 static void *map_seq_start(struct seq_file *m, loff_t *pos)
239 {
240 if (map_iter(m)->done)
241 return NULL;
242
243 return *pos ? map_iter(m)->key : SEQ_START_TOKEN;
244 }
245
map_seq_stop(struct seq_file * m,void * v)246 static void map_seq_stop(struct seq_file *m, void *v)
247 {
248 }
249
map_seq_show(struct seq_file * m,void * v)250 static int map_seq_show(struct seq_file *m, void *v)
251 {
252 struct bpf_map *map = seq_file_to_map(m);
253 void *key = map_iter(m)->key;
254
255 if (unlikely(v == SEQ_START_TOKEN)) {
256 seq_puts(m, "# WARNING!! The output is for debug purpose only\n");
257 seq_puts(m, "# WARNING!! The output format will change\n");
258 } else {
259 map->ops->map_seq_show_elem(map, key, m);
260 }
261
262 return 0;
263 }
264
265 static const struct seq_operations bpffs_map_seq_ops = {
266 .start = map_seq_start,
267 .next = map_seq_next,
268 .show = map_seq_show,
269 .stop = map_seq_stop,
270 };
271
bpffs_map_open(struct inode * inode,struct file * file)272 static int bpffs_map_open(struct inode *inode, struct file *file)
273 {
274 struct bpf_map *map = inode->i_private;
275 struct map_iter *iter;
276 struct seq_file *m;
277 int err;
278
279 iter = map_iter_alloc(map);
280 if (!iter)
281 return -ENOMEM;
282
283 err = seq_open(file, &bpffs_map_seq_ops);
284 if (err) {
285 map_iter_free(iter);
286 return err;
287 }
288
289 m = file->private_data;
290 m->private = iter;
291
292 return 0;
293 }
294
bpffs_map_release(struct inode * inode,struct file * file)295 static int bpffs_map_release(struct inode *inode, struct file *file)
296 {
297 struct seq_file *m = file->private_data;
298
299 map_iter_free(map_iter(m));
300
301 return seq_release(inode, file);
302 }
303
304 /* bpffs_map_fops should only implement the basic
305 * read operation for a BPF map. The purpose is to
306 * provide a simple user intuitive way to do
307 * "cat bpffs/pathto/a-pinned-map".
308 *
309 * Other operations (e.g. write, lookup...) should be realized by
310 * the userspace tools (e.g. bpftool) through the
311 * BPF_OBJ_GET_INFO_BY_FD and the map's lookup/update
312 * interface.
313 */
314 static const struct file_operations bpffs_map_fops = {
315 .open = bpffs_map_open,
316 .read = seq_read,
317 .release = bpffs_map_release,
318 };
319
bpffs_obj_open(struct inode * inode,struct file * file)320 static int bpffs_obj_open(struct inode *inode, struct file *file)
321 {
322 return -EIO;
323 }
324
325 static const struct file_operations bpffs_obj_fops = {
326 .open = bpffs_obj_open,
327 };
328
bpf_mkobj_ops(struct dentry * dentry,umode_t mode,void * raw,const struct inode_operations * iops,const struct file_operations * fops)329 static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw,
330 const struct inode_operations *iops,
331 const struct file_operations *fops)
332 {
333 struct inode *dir = dentry->d_parent->d_inode;
334 struct inode *inode = bpf_get_inode(dir->i_sb, dir, mode);
335 if (IS_ERR(inode))
336 return PTR_ERR(inode);
337
338 inode->i_op = iops;
339 inode->i_fop = fops;
340 inode->i_private = raw;
341
342 bpf_dentry_finalize(dentry, inode, dir);
343 return 0;
344 }
345
bpf_mkprog(struct dentry * dentry,umode_t mode,void * arg)346 static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg)
347 {
348 return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops,
349 &bpffs_obj_fops);
350 }
351
bpf_mkmap(struct dentry * dentry,umode_t mode,void * arg)352 static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
353 {
354 struct bpf_map *map = arg;
355
356 return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops,
357 bpf_map_support_seq_show(map) ?
358 &bpffs_map_fops : &bpffs_obj_fops);
359 }
360
bpf_mklink(struct dentry * dentry,umode_t mode,void * arg)361 static int bpf_mklink(struct dentry *dentry, umode_t mode, void *arg)
362 {
363 struct bpf_link *link = arg;
364
365 return bpf_mkobj_ops(dentry, mode, arg, &bpf_link_iops,
366 bpf_link_is_iter(link) ?
367 &bpf_iter_fops : &bpffs_obj_fops);
368 }
369
370 static struct dentry *
bpf_lookup(struct inode * dir,struct dentry * dentry,unsigned flags)371 bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags)
372 {
373 /* Dots in names (e.g. "/sys/fs/bpf/foo.bar") are reserved for future
374 * extensions. That allows popoulate_bpffs() create special files.
375 */
376 if ((dir->i_mode & S_IALLUGO) &&
377 strchr(dentry->d_name.name, '.'))
378 return ERR_PTR(-EPERM);
379
380 return simple_lookup(dir, dentry, flags);
381 }
382
bpf_symlink(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,const char * target)383 static int bpf_symlink(struct mnt_idmap *idmap, struct inode *dir,
384 struct dentry *dentry, const char *target)
385 {
386 char *link = kstrdup(target, GFP_USER | __GFP_NOWARN);
387 struct inode *inode;
388
389 if (!link)
390 return -ENOMEM;
391
392 inode = bpf_get_inode(dir->i_sb, dir, S_IRWXUGO | S_IFLNK);
393 if (IS_ERR(inode)) {
394 kfree(link);
395 return PTR_ERR(inode);
396 }
397
398 inode->i_op = &simple_symlink_inode_operations;
399 inode->i_link = link;
400
401 bpf_dentry_finalize(dentry, inode, dir);
402 return 0;
403 }
404
405 static const struct inode_operations bpf_dir_iops = {
406 .lookup = bpf_lookup,
407 .mkdir = bpf_mkdir,
408 .symlink = bpf_symlink,
409 .rmdir = simple_rmdir,
410 .rename = simple_rename,
411 .link = simple_link,
412 .unlink = simple_unlink,
413 };
414
415 /* pin iterator link into bpffs */
bpf_iter_link_pin_kernel(struct dentry * parent,const char * name,struct bpf_link * link)416 static int bpf_iter_link_pin_kernel(struct dentry *parent,
417 const char *name, struct bpf_link *link)
418 {
419 umode_t mode = S_IFREG | S_IRUSR;
420 struct dentry *dentry;
421 int ret;
422
423 inode_lock(parent->d_inode);
424 dentry = lookup_one_len(name, parent, strlen(name));
425 if (IS_ERR(dentry)) {
426 inode_unlock(parent->d_inode);
427 return PTR_ERR(dentry);
428 }
429 ret = bpf_mkobj_ops(dentry, mode, link, &bpf_link_iops,
430 &bpf_iter_fops);
431 dput(dentry);
432 inode_unlock(parent->d_inode);
433 return ret;
434 }
435
bpf_obj_do_pin(int path_fd,const char __user * pathname,void * raw,enum bpf_type type)436 static int bpf_obj_do_pin(int path_fd, const char __user *pathname, void *raw,
437 enum bpf_type type)
438 {
439 struct dentry *dentry;
440 struct inode *dir;
441 struct path path;
442 umode_t mode;
443 int ret;
444
445 dentry = user_path_create(path_fd, pathname, &path, 0);
446 if (IS_ERR(dentry))
447 return PTR_ERR(dentry);
448
449 dir = d_inode(path.dentry);
450 if (dir->i_op != &bpf_dir_iops) {
451 ret = -EPERM;
452 goto out;
453 }
454
455 mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
456 ret = security_path_mknod(&path, dentry, mode, 0);
457 if (ret)
458 goto out;
459
460 switch (type) {
461 case BPF_TYPE_PROG:
462 ret = vfs_mkobj(dentry, mode, bpf_mkprog, raw);
463 break;
464 case BPF_TYPE_MAP:
465 ret = vfs_mkobj(dentry, mode, bpf_mkmap, raw);
466 break;
467 case BPF_TYPE_LINK:
468 ret = vfs_mkobj(dentry, mode, bpf_mklink, raw);
469 break;
470 default:
471 ret = -EPERM;
472 }
473 out:
474 done_path_create(&path, dentry);
475 return ret;
476 }
477
bpf_obj_pin_user(u32 ufd,int path_fd,const char __user * pathname)478 int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname)
479 {
480 enum bpf_type type;
481 void *raw;
482 int ret;
483
484 raw = bpf_fd_probe_obj(ufd, &type);
485 if (IS_ERR(raw))
486 return PTR_ERR(raw);
487
488 ret = bpf_obj_do_pin(path_fd, pathname, raw, type);
489 if (ret != 0)
490 bpf_any_put(raw, type);
491
492 return ret;
493 }
494
bpf_obj_do_get(int path_fd,const char __user * pathname,enum bpf_type * type,int flags)495 static void *bpf_obj_do_get(int path_fd, const char __user *pathname,
496 enum bpf_type *type, int flags)
497 {
498 struct inode *inode;
499 struct path path;
500 void *raw;
501 int ret;
502
503 ret = user_path_at(path_fd, pathname, LOOKUP_FOLLOW, &path);
504 if (ret)
505 return ERR_PTR(ret);
506
507 inode = d_backing_inode(path.dentry);
508 ret = path_permission(&path, ACC_MODE(flags));
509 if (ret)
510 goto out;
511
512 ret = bpf_inode_type(inode, type);
513 if (ret)
514 goto out;
515
516 raw = bpf_any_get(inode->i_private, *type);
517 if (!IS_ERR(raw))
518 touch_atime(&path);
519
520 path_put(&path);
521 return raw;
522 out:
523 path_put(&path);
524 return ERR_PTR(ret);
525 }
526
bpf_obj_get_user(int path_fd,const char __user * pathname,int flags)527 int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags)
528 {
529 enum bpf_type type = BPF_TYPE_UNSPEC;
530 int f_flags;
531 void *raw;
532 int ret;
533
534 f_flags = bpf_get_file_flag(flags);
535 if (f_flags < 0)
536 return f_flags;
537
538 raw = bpf_obj_do_get(path_fd, pathname, &type, f_flags);
539 if (IS_ERR(raw))
540 return PTR_ERR(raw);
541
542 if (type == BPF_TYPE_PROG)
543 ret = bpf_prog_new_fd(raw);
544 else if (type == BPF_TYPE_MAP)
545 ret = bpf_map_new_fd(raw, f_flags);
546 else if (type == BPF_TYPE_LINK)
547 ret = (f_flags != O_RDWR) ? -EINVAL : bpf_link_new_fd(raw);
548 else
549 return -ENOENT;
550
551 if (ret < 0)
552 bpf_any_put(raw, type);
553 return ret;
554 }
555
__get_prog_inode(struct inode * inode,enum bpf_prog_type type)556 static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
557 {
558 struct bpf_prog *prog;
559 int ret = inode_permission(&nop_mnt_idmap, inode, MAY_READ);
560 if (ret)
561 return ERR_PTR(ret);
562
563 if (inode->i_op == &bpf_map_iops)
564 return ERR_PTR(-EINVAL);
565 if (inode->i_op == &bpf_link_iops)
566 return ERR_PTR(-EINVAL);
567 if (inode->i_op != &bpf_prog_iops)
568 return ERR_PTR(-EACCES);
569
570 prog = inode->i_private;
571
572 ret = security_bpf_prog(prog);
573 if (ret < 0)
574 return ERR_PTR(ret);
575
576 if (!bpf_prog_get_ok(prog, &type, false))
577 return ERR_PTR(-EINVAL);
578
579 bpf_prog_inc(prog);
580 return prog;
581 }
582
bpf_prog_get_type_path(const char * name,enum bpf_prog_type type)583 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
584 {
585 struct bpf_prog *prog;
586 struct path path;
587 int ret = kern_path(name, LOOKUP_FOLLOW, &path);
588 if (ret)
589 return ERR_PTR(ret);
590 prog = __get_prog_inode(d_backing_inode(path.dentry), type);
591 if (!IS_ERR(prog))
592 touch_atime(&path);
593 path_put(&path);
594 return prog;
595 }
596 EXPORT_SYMBOL(bpf_prog_get_type_path);
597
598 /*
599 * Display the mount options in /proc/mounts.
600 */
bpf_show_options(struct seq_file * m,struct dentry * root)601 static int bpf_show_options(struct seq_file *m, struct dentry *root)
602 {
603 umode_t mode = d_inode(root)->i_mode & S_IALLUGO & ~S_ISVTX;
604
605 if (mode != S_IRWXUGO)
606 seq_printf(m, ",mode=%o", mode);
607 return 0;
608 }
609
bpf_free_inode(struct inode * inode)610 static void bpf_free_inode(struct inode *inode)
611 {
612 enum bpf_type type;
613
614 if (S_ISLNK(inode->i_mode))
615 kfree(inode->i_link);
616 if (!bpf_inode_type(inode, &type))
617 bpf_any_put(inode->i_private, type);
618 free_inode_nonrcu(inode);
619 }
620
621 static const struct super_operations bpf_super_ops = {
622 .statfs = simple_statfs,
623 .drop_inode = generic_delete_inode,
624 .show_options = bpf_show_options,
625 .free_inode = bpf_free_inode,
626 };
627
628 enum {
629 OPT_MODE,
630 };
631
632 static const struct fs_parameter_spec bpf_fs_parameters[] = {
633 fsparam_u32oct ("mode", OPT_MODE),
634 {}
635 };
636
637 struct bpf_mount_opts {
638 umode_t mode;
639 };
640
bpf_parse_param(struct fs_context * fc,struct fs_parameter * param)641 static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
642 {
643 struct bpf_mount_opts *opts = fc->fs_private;
644 struct fs_parse_result result;
645 int opt;
646
647 opt = fs_parse(fc, bpf_fs_parameters, param, &result);
648 if (opt < 0) {
649 /* We might like to report bad mount options here, but
650 * traditionally we've ignored all mount options, so we'd
651 * better continue to ignore non-existing options for bpf.
652 */
653 if (opt == -ENOPARAM) {
654 opt = vfs_parse_fs_param_source(fc, param);
655 if (opt != -ENOPARAM)
656 return opt;
657
658 return 0;
659 }
660
661 if (opt < 0)
662 return opt;
663 }
664
665 switch (opt) {
666 case OPT_MODE:
667 opts->mode = result.uint_32 & S_IALLUGO;
668 break;
669 }
670
671 return 0;
672 }
673
674 struct bpf_preload_ops *bpf_preload_ops;
675 EXPORT_SYMBOL_GPL(bpf_preload_ops);
676
bpf_preload_mod_get(void)677 static bool bpf_preload_mod_get(void)
678 {
679 /* If bpf_preload.ko wasn't loaded earlier then load it now.
680 * When bpf_preload is built into vmlinux the module's __init
681 * function will populate it.
682 */
683 if (!bpf_preload_ops) {
684 request_module("bpf_preload");
685 if (!bpf_preload_ops)
686 return false;
687 }
688 /* And grab the reference, so the module doesn't disappear while the
689 * kernel is interacting with the kernel module and its UMD.
690 */
691 if (!try_module_get(bpf_preload_ops->owner)) {
692 pr_err("bpf_preload module get failed.\n");
693 return false;
694 }
695 return true;
696 }
697
bpf_preload_mod_put(void)698 static void bpf_preload_mod_put(void)
699 {
700 if (bpf_preload_ops)
701 /* now user can "rmmod bpf_preload" if necessary */
702 module_put(bpf_preload_ops->owner);
703 }
704
705 static DEFINE_MUTEX(bpf_preload_lock);
706
populate_bpffs(struct dentry * parent)707 static int populate_bpffs(struct dentry *parent)
708 {
709 struct bpf_preload_info objs[BPF_PRELOAD_LINKS] = {};
710 int err = 0, i;
711
712 /* grab the mutex to make sure the kernel interactions with bpf_preload
713 * are serialized
714 */
715 mutex_lock(&bpf_preload_lock);
716
717 /* if bpf_preload.ko wasn't built into vmlinux then load it */
718 if (!bpf_preload_mod_get())
719 goto out;
720
721 err = bpf_preload_ops->preload(objs);
722 if (err)
723 goto out_put;
724 for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
725 bpf_link_inc(objs[i].link);
726 err = bpf_iter_link_pin_kernel(parent,
727 objs[i].link_name, objs[i].link);
728 if (err) {
729 bpf_link_put(objs[i].link);
730 goto out_put;
731 }
732 }
733 out_put:
734 bpf_preload_mod_put();
735 out:
736 mutex_unlock(&bpf_preload_lock);
737 return err;
738 }
739
bpf_fill_super(struct super_block * sb,struct fs_context * fc)740 static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
741 {
742 static const struct tree_descr bpf_rfiles[] = { { "" } };
743 struct bpf_mount_opts *opts = fc->fs_private;
744 struct inode *inode;
745 int ret;
746
747 ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles);
748 if (ret)
749 return ret;
750
751 sb->s_op = &bpf_super_ops;
752
753 inode = sb->s_root->d_inode;
754 inode->i_op = &bpf_dir_iops;
755 inode->i_mode &= ~S_IALLUGO;
756 populate_bpffs(sb->s_root);
757 inode->i_mode |= S_ISVTX | opts->mode;
758 return 0;
759 }
760
bpf_get_tree(struct fs_context * fc)761 static int bpf_get_tree(struct fs_context *fc)
762 {
763 return get_tree_nodev(fc, bpf_fill_super);
764 }
765
bpf_free_fc(struct fs_context * fc)766 static void bpf_free_fc(struct fs_context *fc)
767 {
768 kfree(fc->fs_private);
769 }
770
771 static const struct fs_context_operations bpf_context_ops = {
772 .free = bpf_free_fc,
773 .parse_param = bpf_parse_param,
774 .get_tree = bpf_get_tree,
775 };
776
777 /*
778 * Set up the filesystem mount context.
779 */
bpf_init_fs_context(struct fs_context * fc)780 static int bpf_init_fs_context(struct fs_context *fc)
781 {
782 struct bpf_mount_opts *opts;
783
784 opts = kzalloc(sizeof(struct bpf_mount_opts), GFP_KERNEL);
785 if (!opts)
786 return -ENOMEM;
787
788 opts->mode = S_IRWXUGO;
789
790 fc->fs_private = opts;
791 fc->ops = &bpf_context_ops;
792 return 0;
793 }
794
795 static struct file_system_type bpf_fs_type = {
796 .owner = THIS_MODULE,
797 .name = "bpf",
798 .init_fs_context = bpf_init_fs_context,
799 .parameters = bpf_fs_parameters,
800 .kill_sb = kill_litter_super,
801 };
802
bpf_init(void)803 static int __init bpf_init(void)
804 {
805 int ret;
806
807 ret = sysfs_create_mount_point(fs_kobj, "bpf");
808 if (ret)
809 return ret;
810
811 ret = register_filesystem(&bpf_fs_type);
812 if (ret)
813 sysfs_remove_mount_point(fs_kobj, "bpf");
814
815 return ret;
816 }
817 fs_initcall(bpf_init);
818