1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Minimal file system backend for holding eBPF maps and programs, 4 * used by bpf(2) object pinning. 5 * 6 * Authors: 7 * 8 * Daniel Borkmann <daniel@iogearbox.net> 9 */ 10 11 #include <linux/init.h> 12 #include <linux/magic.h> 13 #include <linux/major.h> 14 #include <linux/mount.h> 15 #include <linux/namei.h> 16 #include <linux/fs.h> 17 #include <linux/fs_context.h> 18 #include <linux/fs_parser.h> 19 #include <linux/kdev_t.h> 20 #include <linux/filter.h> 21 #include <linux/bpf.h> 22 #include <linux/bpf_trace.h> 23 #include "preload/bpf_preload.h" 24 25 enum bpf_type { 26 BPF_TYPE_UNSPEC = 0, 27 BPF_TYPE_PROG, 28 BPF_TYPE_MAP, 29 BPF_TYPE_LINK, 30 }; 31 32 static void *bpf_any_get(void *raw, enum bpf_type type) 33 { 34 switch (type) { 35 case BPF_TYPE_PROG: 36 bpf_prog_inc(raw); 37 break; 38 case BPF_TYPE_MAP: 39 bpf_map_inc_with_uref(raw); 40 break; 41 case BPF_TYPE_LINK: 42 bpf_link_inc(raw); 43 break; 44 default: 45 WARN_ON_ONCE(1); 46 break; 47 } 48 49 return raw; 50 } 51 52 static void bpf_any_put(void *raw, enum bpf_type type) 53 { 54 switch (type) { 55 case BPF_TYPE_PROG: 56 bpf_prog_put(raw); 57 break; 58 case BPF_TYPE_MAP: 59 bpf_map_put_with_uref(raw); 60 break; 61 case BPF_TYPE_LINK: 62 bpf_link_put(raw); 63 break; 64 default: 65 WARN_ON_ONCE(1); 66 break; 67 } 68 } 69 70 static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type) 71 { 72 void *raw; 73 74 raw = bpf_map_get_with_uref(ufd); 75 if (!IS_ERR(raw)) { 76 *type = BPF_TYPE_MAP; 77 return raw; 78 } 79 80 raw = bpf_prog_get(ufd); 81 if (!IS_ERR(raw)) { 82 *type = BPF_TYPE_PROG; 83 return raw; 84 } 85 86 raw = bpf_link_get_from_fd(ufd); 87 if (!IS_ERR(raw)) { 88 *type = BPF_TYPE_LINK; 89 return raw; 90 } 91 92 return ERR_PTR(-EINVAL); 93 } 94 95 static const struct inode_operations bpf_dir_iops; 96 97 static const struct inode_operations bpf_prog_iops = { }; 98 static const struct inode_operations bpf_map_iops = { }; 99 static const struct inode_operations bpf_link_iops = { }; 100 101 static struct inode *bpf_get_inode(struct super_block *sb, 102 const struct inode *dir, 103 umode_t mode) 104 { 105 struct inode *inode; 106 107 switch (mode & S_IFMT) { 108 case S_IFDIR: 109 case S_IFREG: 110 case S_IFLNK: 111 break; 112 default: 113 return ERR_PTR(-EINVAL); 114 } 115 116 inode = new_inode(sb); 117 if (!inode) 118 return ERR_PTR(-ENOSPC); 119 120 inode->i_ino = get_next_ino(); 121 inode->i_atime = current_time(inode); 122 inode->i_mtime = inode->i_atime; 123 inode->i_ctime = inode->i_atime; 124 125 inode_init_owner(inode, dir, mode); 126 127 return inode; 128 } 129 130 static int bpf_inode_type(const struct inode *inode, enum bpf_type *type) 131 { 132 *type = BPF_TYPE_UNSPEC; 133 if (inode->i_op == &bpf_prog_iops) 134 *type = BPF_TYPE_PROG; 135 else if (inode->i_op == &bpf_map_iops) 136 *type = BPF_TYPE_MAP; 137 else if (inode->i_op == &bpf_link_iops) 138 *type = BPF_TYPE_LINK; 139 else 140 return -EACCES; 141 142 return 0; 143 } 144 145 static void bpf_dentry_finalize(struct dentry *dentry, struct inode *inode, 146 struct inode *dir) 147 { 148 d_instantiate(dentry, inode); 149 dget(dentry); 150 151 dir->i_mtime = current_time(dir); 152 dir->i_ctime = dir->i_mtime; 153 } 154 155 static int bpf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 156 { 157 struct inode *inode; 158 159 inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFDIR); 160 if (IS_ERR(inode)) 161 return PTR_ERR(inode); 162 163 inode->i_op = &bpf_dir_iops; 164 inode->i_fop = &simple_dir_operations; 165 166 inc_nlink(inode); 167 inc_nlink(dir); 168 169 bpf_dentry_finalize(dentry, inode, dir); 170 return 0; 171 } 172 173 struct map_iter { 174 void *key; 175 bool done; 176 }; 177 178 static struct map_iter *map_iter(struct seq_file *m) 179 { 180 return m->private; 181 } 182 183 static struct bpf_map *seq_file_to_map(struct seq_file *m) 184 { 185 return file_inode(m->file)->i_private; 186 } 187 188 static void map_iter_free(struct map_iter *iter) 189 { 190 if (iter) { 191 kfree(iter->key); 192 kfree(iter); 193 } 194 } 195 196 static struct map_iter *map_iter_alloc(struct bpf_map *map) 197 { 198 struct map_iter *iter; 199 200 iter = kzalloc(sizeof(*iter), GFP_KERNEL | __GFP_NOWARN); 201 if (!iter) 202 goto error; 203 204 iter->key = kzalloc(map->key_size, GFP_KERNEL | __GFP_NOWARN); 205 if (!iter->key) 206 goto error; 207 208 return iter; 209 210 error: 211 map_iter_free(iter); 212 return NULL; 213 } 214 215 static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos) 216 { 217 struct bpf_map *map = seq_file_to_map(m); 218 void *key = map_iter(m)->key; 219 void *prev_key; 220 221 (*pos)++; 222 if (map_iter(m)->done) 223 return NULL; 224 225 if (unlikely(v == SEQ_START_TOKEN)) 226 prev_key = NULL; 227 else 228 prev_key = key; 229 230 rcu_read_lock(); 231 if (map->ops->map_get_next_key(map, prev_key, key)) { 232 map_iter(m)->done = true; 233 key = NULL; 234 } 235 rcu_read_unlock(); 236 return key; 237 } 238 239 static void *map_seq_start(struct seq_file *m, loff_t *pos) 240 { 241 if (map_iter(m)->done) 242 return NULL; 243 244 return *pos ? map_iter(m)->key : SEQ_START_TOKEN; 245 } 246 247 static void map_seq_stop(struct seq_file *m, void *v) 248 { 249 } 250 251 static int map_seq_show(struct seq_file *m, void *v) 252 { 253 struct bpf_map *map = seq_file_to_map(m); 254 void *key = map_iter(m)->key; 255 256 if (unlikely(v == SEQ_START_TOKEN)) { 257 seq_puts(m, "# WARNING!! The output is for debug purpose only\n"); 258 seq_puts(m, "# WARNING!! The output format will change\n"); 259 } else { 260 map->ops->map_seq_show_elem(map, key, m); 261 } 262 263 return 0; 264 } 265 266 static const struct seq_operations bpffs_map_seq_ops = { 267 .start = map_seq_start, 268 .next = map_seq_next, 269 .show = map_seq_show, 270 .stop = map_seq_stop, 271 }; 272 273 static int bpffs_map_open(struct inode *inode, struct file *file) 274 { 275 struct bpf_map *map = inode->i_private; 276 struct map_iter *iter; 277 struct seq_file *m; 278 int err; 279 280 iter = map_iter_alloc(map); 281 if (!iter) 282 return -ENOMEM; 283 284 err = seq_open(file, &bpffs_map_seq_ops); 285 if (err) { 286 map_iter_free(iter); 287 return err; 288 } 289 290 m = file->private_data; 291 m->private = iter; 292 293 return 0; 294 } 295 296 static int bpffs_map_release(struct inode *inode, struct file *file) 297 { 298 struct seq_file *m = file->private_data; 299 300 map_iter_free(map_iter(m)); 301 302 return seq_release(inode, file); 303 } 304 305 /* bpffs_map_fops should only implement the basic 306 * read operation for a BPF map. The purpose is to 307 * provide a simple user intuitive way to do 308 * "cat bpffs/pathto/a-pinned-map". 309 * 310 * Other operations (e.g. write, lookup...) should be realized by 311 * the userspace tools (e.g. bpftool) through the 312 * BPF_OBJ_GET_INFO_BY_FD and the map's lookup/update 313 * interface. 314 */ 315 static const struct file_operations bpffs_map_fops = { 316 .open = bpffs_map_open, 317 .read = seq_read, 318 .release = bpffs_map_release, 319 }; 320 321 static int bpffs_obj_open(struct inode *inode, struct file *file) 322 { 323 return -EIO; 324 } 325 326 static const struct file_operations bpffs_obj_fops = { 327 .open = bpffs_obj_open, 328 }; 329 330 static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw, 331 const struct inode_operations *iops, 332 const struct file_operations *fops) 333 { 334 struct inode *dir = dentry->d_parent->d_inode; 335 struct inode *inode = bpf_get_inode(dir->i_sb, dir, mode); 336 if (IS_ERR(inode)) 337 return PTR_ERR(inode); 338 339 inode->i_op = iops; 340 inode->i_fop = fops; 341 inode->i_private = raw; 342 343 bpf_dentry_finalize(dentry, inode, dir); 344 return 0; 345 } 346 347 static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg) 348 { 349 return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops, 350 &bpffs_obj_fops); 351 } 352 353 static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg) 354 { 355 struct bpf_map *map = arg; 356 357 return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops, 358 bpf_map_support_seq_show(map) ? 359 &bpffs_map_fops : &bpffs_obj_fops); 360 } 361 362 static int bpf_mklink(struct dentry *dentry, umode_t mode, void *arg) 363 { 364 struct bpf_link *link = arg; 365 366 return bpf_mkobj_ops(dentry, mode, arg, &bpf_link_iops, 367 bpf_link_is_iter(link) ? 368 &bpf_iter_fops : &bpffs_obj_fops); 369 } 370 371 static struct dentry * 372 bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags) 373 { 374 /* Dots in names (e.g. "/sys/fs/bpf/foo.bar") are reserved for future 375 * extensions. That allows popoulate_bpffs() create special files. 376 */ 377 if ((dir->i_mode & S_IALLUGO) && 378 strchr(dentry->d_name.name, '.')) 379 return ERR_PTR(-EPERM); 380 381 return simple_lookup(dir, dentry, flags); 382 } 383 384 static int bpf_symlink(struct inode *dir, struct dentry *dentry, 385 const char *target) 386 { 387 char *link = kstrdup(target, GFP_USER | __GFP_NOWARN); 388 struct inode *inode; 389 390 if (!link) 391 return -ENOMEM; 392 393 inode = bpf_get_inode(dir->i_sb, dir, S_IRWXUGO | S_IFLNK); 394 if (IS_ERR(inode)) { 395 kfree(link); 396 return PTR_ERR(inode); 397 } 398 399 inode->i_op = &simple_symlink_inode_operations; 400 inode->i_link = link; 401 402 bpf_dentry_finalize(dentry, inode, dir); 403 return 0; 404 } 405 406 static const struct inode_operations bpf_dir_iops = { 407 .lookup = bpf_lookup, 408 .mkdir = bpf_mkdir, 409 .symlink = bpf_symlink, 410 .rmdir = simple_rmdir, 411 .rename = simple_rename, 412 .link = simple_link, 413 .unlink = simple_unlink, 414 }; 415 416 /* pin iterator link into bpffs */ 417 static int bpf_iter_link_pin_kernel(struct dentry *parent, 418 const char *name, struct bpf_link *link) 419 { 420 umode_t mode = S_IFREG | S_IRUSR; 421 struct dentry *dentry; 422 int ret; 423 424 inode_lock(parent->d_inode); 425 dentry = lookup_one_len(name, parent, strlen(name)); 426 if (IS_ERR(dentry)) { 427 inode_unlock(parent->d_inode); 428 return PTR_ERR(dentry); 429 } 430 ret = bpf_mkobj_ops(dentry, mode, link, &bpf_link_iops, 431 &bpf_iter_fops); 432 dput(dentry); 433 inode_unlock(parent->d_inode); 434 return ret; 435 } 436 437 static int bpf_obj_do_pin(const char __user *pathname, void *raw, 438 enum bpf_type type) 439 { 440 struct dentry *dentry; 441 struct inode *dir; 442 struct path path; 443 umode_t mode; 444 int ret; 445 446 dentry = user_path_create(AT_FDCWD, pathname, &path, 0); 447 if (IS_ERR(dentry)) 448 return PTR_ERR(dentry); 449 450 mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask()); 451 452 ret = security_path_mknod(&path, dentry, mode, 0); 453 if (ret) 454 goto out; 455 456 dir = d_inode(path.dentry); 457 if (dir->i_op != &bpf_dir_iops) { 458 ret = -EPERM; 459 goto out; 460 } 461 462 switch (type) { 463 case BPF_TYPE_PROG: 464 ret = vfs_mkobj(dentry, mode, bpf_mkprog, raw); 465 break; 466 case BPF_TYPE_MAP: 467 ret = vfs_mkobj(dentry, mode, bpf_mkmap, raw); 468 break; 469 case BPF_TYPE_LINK: 470 ret = vfs_mkobj(dentry, mode, bpf_mklink, raw); 471 break; 472 default: 473 ret = -EPERM; 474 } 475 out: 476 done_path_create(&path, dentry); 477 return ret; 478 } 479 480 int bpf_obj_pin_user(u32 ufd, const char __user *pathname) 481 { 482 enum bpf_type type; 483 void *raw; 484 int ret; 485 486 raw = bpf_fd_probe_obj(ufd, &type); 487 if (IS_ERR(raw)) 488 return PTR_ERR(raw); 489 490 ret = bpf_obj_do_pin(pathname, raw, type); 491 if (ret != 0) 492 bpf_any_put(raw, type); 493 494 return ret; 495 } 496 497 static void *bpf_obj_do_get(const char __user *pathname, 498 enum bpf_type *type, int flags) 499 { 500 struct inode *inode; 501 struct path path; 502 void *raw; 503 int ret; 504 505 ret = user_path_at(AT_FDCWD, pathname, LOOKUP_FOLLOW, &path); 506 if (ret) 507 return ERR_PTR(ret); 508 509 inode = d_backing_inode(path.dentry); 510 ret = inode_permission(inode, ACC_MODE(flags)); 511 if (ret) 512 goto out; 513 514 ret = bpf_inode_type(inode, type); 515 if (ret) 516 goto out; 517 518 raw = bpf_any_get(inode->i_private, *type); 519 if (!IS_ERR(raw)) 520 touch_atime(&path); 521 522 path_put(&path); 523 return raw; 524 out: 525 path_put(&path); 526 return ERR_PTR(ret); 527 } 528 529 int bpf_obj_get_user(const char __user *pathname, int flags) 530 { 531 enum bpf_type type = BPF_TYPE_UNSPEC; 532 int f_flags; 533 void *raw; 534 int ret; 535 536 f_flags = bpf_get_file_flag(flags); 537 if (f_flags < 0) 538 return f_flags; 539 540 raw = bpf_obj_do_get(pathname, &type, f_flags); 541 if (IS_ERR(raw)) 542 return PTR_ERR(raw); 543 544 if (type == BPF_TYPE_PROG) 545 ret = bpf_prog_new_fd(raw); 546 else if (type == BPF_TYPE_MAP) 547 ret = bpf_map_new_fd(raw, f_flags); 548 else if (type == BPF_TYPE_LINK) 549 ret = bpf_link_new_fd(raw); 550 else 551 return -ENOENT; 552 553 if (ret < 0) 554 bpf_any_put(raw, type); 555 return ret; 556 } 557 558 static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type) 559 { 560 struct bpf_prog *prog; 561 int ret = inode_permission(inode, MAY_READ); 562 if (ret) 563 return ERR_PTR(ret); 564 565 if (inode->i_op == &bpf_map_iops) 566 return ERR_PTR(-EINVAL); 567 if (inode->i_op == &bpf_link_iops) 568 return ERR_PTR(-EINVAL); 569 if (inode->i_op != &bpf_prog_iops) 570 return ERR_PTR(-EACCES); 571 572 prog = inode->i_private; 573 574 ret = security_bpf_prog(prog); 575 if (ret < 0) 576 return ERR_PTR(ret); 577 578 if (!bpf_prog_get_ok(prog, &type, false)) 579 return ERR_PTR(-EINVAL); 580 581 bpf_prog_inc(prog); 582 return prog; 583 } 584 585 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type) 586 { 587 struct bpf_prog *prog; 588 struct path path; 589 int ret = kern_path(name, LOOKUP_FOLLOW, &path); 590 if (ret) 591 return ERR_PTR(ret); 592 prog = __get_prog_inode(d_backing_inode(path.dentry), type); 593 if (!IS_ERR(prog)) 594 touch_atime(&path); 595 path_put(&path); 596 return prog; 597 } 598 EXPORT_SYMBOL(bpf_prog_get_type_path); 599 600 /* 601 * Display the mount options in /proc/mounts. 602 */ 603 static int bpf_show_options(struct seq_file *m, struct dentry *root) 604 { 605 umode_t mode = d_inode(root)->i_mode & S_IALLUGO & ~S_ISVTX; 606 607 if (mode != S_IRWXUGO) 608 seq_printf(m, ",mode=%o", mode); 609 return 0; 610 } 611 612 static void bpf_free_inode(struct inode *inode) 613 { 614 enum bpf_type type; 615 616 if (S_ISLNK(inode->i_mode)) 617 kfree(inode->i_link); 618 if (!bpf_inode_type(inode, &type)) 619 bpf_any_put(inode->i_private, type); 620 free_inode_nonrcu(inode); 621 } 622 623 static const struct super_operations bpf_super_ops = { 624 .statfs = simple_statfs, 625 .drop_inode = generic_delete_inode, 626 .show_options = bpf_show_options, 627 .free_inode = bpf_free_inode, 628 }; 629 630 enum { 631 OPT_MODE, 632 }; 633 634 static const struct fs_parameter_spec bpf_fs_parameters[] = { 635 fsparam_u32oct ("mode", OPT_MODE), 636 {} 637 }; 638 639 struct bpf_mount_opts { 640 umode_t mode; 641 }; 642 643 static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param) 644 { 645 struct bpf_mount_opts *opts = fc->fs_private; 646 struct fs_parse_result result; 647 int opt; 648 649 opt = fs_parse(fc, bpf_fs_parameters, param, &result); 650 if (opt < 0) 651 /* We might like to report bad mount options here, but 652 * traditionally we've ignored all mount options, so we'd 653 * better continue to ignore non-existing options for bpf. 654 */ 655 return opt == -ENOPARAM ? 0 : opt; 656 657 switch (opt) { 658 case OPT_MODE: 659 opts->mode = result.uint_32 & S_IALLUGO; 660 break; 661 } 662 663 return 0; 664 } 665 666 struct bpf_preload_ops *bpf_preload_ops; 667 EXPORT_SYMBOL_GPL(bpf_preload_ops); 668 669 static bool bpf_preload_mod_get(void) 670 { 671 /* If bpf_preload.ko wasn't loaded earlier then load it now. 672 * When bpf_preload is built into vmlinux the module's __init 673 * function will populate it. 674 */ 675 if (!bpf_preload_ops) { 676 request_module("bpf_preload"); 677 if (!bpf_preload_ops) 678 return false; 679 } 680 /* And grab the reference, so the module doesn't disappear while the 681 * kernel is interacting with the kernel module and its UMD. 682 */ 683 if (!try_module_get(bpf_preload_ops->owner)) { 684 pr_err("bpf_preload module get failed.\n"); 685 return false; 686 } 687 return true; 688 } 689 690 static void bpf_preload_mod_put(void) 691 { 692 if (bpf_preload_ops) 693 /* now user can "rmmod bpf_preload" if necessary */ 694 module_put(bpf_preload_ops->owner); 695 } 696 697 static DEFINE_MUTEX(bpf_preload_lock); 698 699 static int populate_bpffs(struct dentry *parent) 700 { 701 struct bpf_preload_info objs[BPF_PRELOAD_LINKS] = {}; 702 struct bpf_link *links[BPF_PRELOAD_LINKS] = {}; 703 int err = 0, i; 704 705 /* grab the mutex to make sure the kernel interactions with bpf_preload 706 * UMD are serialized 707 */ 708 mutex_lock(&bpf_preload_lock); 709 710 /* if bpf_preload.ko wasn't built into vmlinux then load it */ 711 if (!bpf_preload_mod_get()) 712 goto out; 713 714 if (!bpf_preload_ops->info.tgid) { 715 /* preload() will start UMD that will load BPF iterator programs */ 716 err = bpf_preload_ops->preload(objs); 717 if (err) 718 goto out_put; 719 for (i = 0; i < BPF_PRELOAD_LINKS; i++) { 720 links[i] = bpf_link_by_id(objs[i].link_id); 721 if (IS_ERR(links[i])) { 722 err = PTR_ERR(links[i]); 723 goto out_put; 724 } 725 } 726 for (i = 0; i < BPF_PRELOAD_LINKS; i++) { 727 err = bpf_iter_link_pin_kernel(parent, 728 objs[i].link_name, links[i]); 729 if (err) 730 goto out_put; 731 /* do not unlink successfully pinned links even 732 * if later link fails to pin 733 */ 734 links[i] = NULL; 735 } 736 /* finish() will tell UMD process to exit */ 737 err = bpf_preload_ops->finish(); 738 if (err) 739 goto out_put; 740 } 741 out_put: 742 bpf_preload_mod_put(); 743 out: 744 mutex_unlock(&bpf_preload_lock); 745 for (i = 0; i < BPF_PRELOAD_LINKS && err; i++) 746 if (!IS_ERR_OR_NULL(links[i])) 747 bpf_link_put(links[i]); 748 return err; 749 } 750 751 static int bpf_fill_super(struct super_block *sb, struct fs_context *fc) 752 { 753 static const struct tree_descr bpf_rfiles[] = { { "" } }; 754 struct bpf_mount_opts *opts = fc->fs_private; 755 struct inode *inode; 756 int ret; 757 758 ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles); 759 if (ret) 760 return ret; 761 762 sb->s_op = &bpf_super_ops; 763 764 inode = sb->s_root->d_inode; 765 inode->i_op = &bpf_dir_iops; 766 inode->i_mode &= ~S_IALLUGO; 767 populate_bpffs(sb->s_root); 768 inode->i_mode |= S_ISVTX | opts->mode; 769 return 0; 770 } 771 772 static int bpf_get_tree(struct fs_context *fc) 773 { 774 return get_tree_nodev(fc, bpf_fill_super); 775 } 776 777 static void bpf_free_fc(struct fs_context *fc) 778 { 779 kfree(fc->fs_private); 780 } 781 782 static const struct fs_context_operations bpf_context_ops = { 783 .free = bpf_free_fc, 784 .parse_param = bpf_parse_param, 785 .get_tree = bpf_get_tree, 786 }; 787 788 /* 789 * Set up the filesystem mount context. 790 */ 791 static int bpf_init_fs_context(struct fs_context *fc) 792 { 793 struct bpf_mount_opts *opts; 794 795 opts = kzalloc(sizeof(struct bpf_mount_opts), GFP_KERNEL); 796 if (!opts) 797 return -ENOMEM; 798 799 opts->mode = S_IRWXUGO; 800 801 fc->fs_private = opts; 802 fc->ops = &bpf_context_ops; 803 return 0; 804 } 805 806 static struct file_system_type bpf_fs_type = { 807 .owner = THIS_MODULE, 808 .name = "bpf", 809 .init_fs_context = bpf_init_fs_context, 810 .parameters = bpf_fs_parameters, 811 .kill_sb = kill_litter_super, 812 }; 813 814 static int __init bpf_init(void) 815 { 816 int ret; 817 818 mutex_init(&bpf_preload_lock); 819 820 ret = sysfs_create_mount_point(fs_kobj, "bpf"); 821 if (ret) 822 return ret; 823 824 ret = register_filesystem(&bpf_fs_type); 825 if (ret) 826 sysfs_remove_mount_point(fs_kobj, "bpf"); 827 828 return ret; 829 } 830 fs_initcall(bpf_init); 831