1 /* 2 FUSE: Filesystem in Userspace 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> 4 5 This program can be distributed under the terms of the GNU GPL. 6 See the file COPYING. 7 */ 8 9 #include "fuse_i.h" 10 11 #include <linux/pagemap.h> 12 #include <linux/slab.h> 13 #include <linux/file.h> 14 #include <linux/seq_file.h> 15 #include <linux/init.h> 16 #include <linux/module.h> 17 #include <linux/moduleparam.h> 18 #include <linux/fs_context.h> 19 #include <linux/fs_parser.h> 20 #include <linux/statfs.h> 21 #include <linux/random.h> 22 #include <linux/sched.h> 23 #include <linux/exportfs.h> 24 #include <linux/posix_acl.h> 25 #include <linux/pid_namespace.h> 26 #include <uapi/linux/magic.h> 27 28 MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>"); 29 MODULE_DESCRIPTION("Filesystem in Userspace"); 30 MODULE_LICENSE("GPL"); 31 32 static struct kmem_cache *fuse_inode_cachep; 33 struct list_head fuse_conn_list; 34 DEFINE_MUTEX(fuse_mutex); 35 36 static int set_global_limit(const char *val, const struct kernel_param *kp); 37 38 unsigned max_user_bgreq; 39 module_param_call(max_user_bgreq, set_global_limit, param_get_uint, 40 &max_user_bgreq, 0644); 41 __MODULE_PARM_TYPE(max_user_bgreq, "uint"); 42 MODULE_PARM_DESC(max_user_bgreq, 43 "Global limit for the maximum number of backgrounded requests an " 44 "unprivileged user can set"); 45 46 unsigned max_user_congthresh; 47 module_param_call(max_user_congthresh, set_global_limit, param_get_uint, 48 &max_user_congthresh, 0644); 49 __MODULE_PARM_TYPE(max_user_congthresh, "uint"); 50 MODULE_PARM_DESC(max_user_congthresh, 51 "Global limit for the maximum congestion threshold an " 52 "unprivileged user can set"); 53 54 #define FUSE_DEFAULT_BLKSIZE 512 55 56 /** Maximum number of outstanding background requests */ 57 #define FUSE_DEFAULT_MAX_BACKGROUND 12 58 59 /** Congestion starts at 75% of maximum */ 60 #define FUSE_DEFAULT_CONGESTION_THRESHOLD (FUSE_DEFAULT_MAX_BACKGROUND * 3 / 4) 61 62 #ifdef CONFIG_BLOCK 63 static struct file_system_type fuseblk_fs_type; 64 #endif 65 66 struct fuse_forget_link *fuse_alloc_forget(void) 67 { 68 return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL_ACCOUNT); 69 } 70 71 static struct inode *fuse_alloc_inode(struct super_block *sb) 72 { 73 struct fuse_inode *fi; 74 75 fi = alloc_inode_sb(sb, fuse_inode_cachep, GFP_KERNEL); 76 if (!fi) 77 return NULL; 78 79 fi->i_time = 0; 80 fi->inval_mask = ~0; 81 fi->nodeid = 0; 82 fi->nlookup = 0; 83 fi->attr_version = 0; 84 fi->orig_ino = 0; 85 fi->state = 0; 86 mutex_init(&fi->mutex); 87 spin_lock_init(&fi->lock); 88 fi->forget = fuse_alloc_forget(); 89 if (!fi->forget) 90 goto out_free; 91 92 if (IS_ENABLED(CONFIG_FUSE_DAX) && !fuse_dax_inode_alloc(sb, fi)) 93 goto out_free_forget; 94 95 return &fi->inode; 96 97 out_free_forget: 98 kfree(fi->forget); 99 out_free: 100 kmem_cache_free(fuse_inode_cachep, fi); 101 return NULL; 102 } 103 104 static void fuse_free_inode(struct inode *inode) 105 { 106 struct fuse_inode *fi = get_fuse_inode(inode); 107 108 mutex_destroy(&fi->mutex); 109 kfree(fi->forget); 110 #ifdef CONFIG_FUSE_DAX 111 kfree(fi->dax); 112 #endif 113 kmem_cache_free(fuse_inode_cachep, fi); 114 } 115 116 static void fuse_evict_inode(struct inode *inode) 117 { 118 struct fuse_inode *fi = get_fuse_inode(inode); 119 120 /* Will write inode on close/munmap and in all other dirtiers */ 121 WARN_ON(inode->i_state & I_DIRTY_INODE); 122 123 truncate_inode_pages_final(&inode->i_data); 124 clear_inode(inode); 125 if (inode->i_sb->s_flags & SB_ACTIVE) { 126 struct fuse_conn *fc = get_fuse_conn(inode); 127 128 if (FUSE_IS_DAX(inode)) 129 fuse_dax_inode_cleanup(inode); 130 if (fi->nlookup) { 131 fuse_queue_forget(fc, fi->forget, fi->nodeid, 132 fi->nlookup); 133 fi->forget = NULL; 134 } 135 } 136 if (S_ISREG(inode->i_mode) && !fuse_is_bad(inode)) { 137 WARN_ON(!list_empty(&fi->write_files)); 138 WARN_ON(!list_empty(&fi->queued_writes)); 139 } 140 } 141 142 static int fuse_reconfigure(struct fs_context *fsc) 143 { 144 struct super_block *sb = fsc->root->d_sb; 145 146 sync_filesystem(sb); 147 if (fsc->sb_flags & SB_MANDLOCK) 148 return -EINVAL; 149 150 return 0; 151 } 152 153 /* 154 * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down 155 * so that it will fit. 156 */ 157 static ino_t fuse_squash_ino(u64 ino64) 158 { 159 ino_t ino = (ino_t) ino64; 160 if (sizeof(ino_t) < sizeof(u64)) 161 ino ^= ino64 >> (sizeof(u64) - sizeof(ino_t)) * 8; 162 return ino; 163 } 164 165 void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, 166 u64 attr_valid, u32 cache_mask) 167 { 168 struct fuse_conn *fc = get_fuse_conn(inode); 169 struct fuse_inode *fi = get_fuse_inode(inode); 170 171 lockdep_assert_held(&fi->lock); 172 173 fi->attr_version = atomic64_inc_return(&fc->attr_version); 174 fi->i_time = attr_valid; 175 /* Clear basic stats from invalid mask */ 176 set_mask_bits(&fi->inval_mask, STATX_BASIC_STATS, 0); 177 178 inode->i_ino = fuse_squash_ino(attr->ino); 179 inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); 180 set_nlink(inode, attr->nlink); 181 inode->i_uid = make_kuid(fc->user_ns, attr->uid); 182 inode->i_gid = make_kgid(fc->user_ns, attr->gid); 183 inode->i_blocks = attr->blocks; 184 185 /* Sanitize nsecs */ 186 attr->atimensec = min_t(u32, attr->atimensec, NSEC_PER_SEC - 1); 187 attr->mtimensec = min_t(u32, attr->mtimensec, NSEC_PER_SEC - 1); 188 attr->ctimensec = min_t(u32, attr->ctimensec, NSEC_PER_SEC - 1); 189 190 inode->i_atime.tv_sec = attr->atime; 191 inode->i_atime.tv_nsec = attr->atimensec; 192 /* mtime from server may be stale due to local buffered write */ 193 if (!(cache_mask & STATX_MTIME)) { 194 inode->i_mtime.tv_sec = attr->mtime; 195 inode->i_mtime.tv_nsec = attr->mtimensec; 196 } 197 if (!(cache_mask & STATX_CTIME)) { 198 inode->i_ctime.tv_sec = attr->ctime; 199 inode->i_ctime.tv_nsec = attr->ctimensec; 200 } 201 202 if (attr->blksize != 0) 203 inode->i_blkbits = ilog2(attr->blksize); 204 else 205 inode->i_blkbits = inode->i_sb->s_blocksize_bits; 206 207 /* 208 * Don't set the sticky bit in i_mode, unless we want the VFS 209 * to check permissions. This prevents failures due to the 210 * check in may_delete(). 211 */ 212 fi->orig_i_mode = inode->i_mode; 213 if (!fc->default_permissions) 214 inode->i_mode &= ~S_ISVTX; 215 216 fi->orig_ino = attr->ino; 217 218 /* 219 * We are refreshing inode data and it is possible that another 220 * client set suid/sgid or security.capability xattr. So clear 221 * S_NOSEC. Ideally, we could have cleared it only if suid/sgid 222 * was set or if security.capability xattr was set. But we don't 223 * know if security.capability has been set or not. So clear it 224 * anyway. Its less efficient but should be safe. 225 */ 226 inode->i_flags &= ~S_NOSEC; 227 } 228 229 u32 fuse_get_cache_mask(struct inode *inode) 230 { 231 struct fuse_conn *fc = get_fuse_conn(inode); 232 233 if (!fc->writeback_cache || !S_ISREG(inode->i_mode)) 234 return 0; 235 236 return STATX_MTIME | STATX_CTIME | STATX_SIZE; 237 } 238 239 void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, 240 u64 attr_valid, u64 attr_version) 241 { 242 struct fuse_conn *fc = get_fuse_conn(inode); 243 struct fuse_inode *fi = get_fuse_inode(inode); 244 u32 cache_mask; 245 loff_t oldsize; 246 struct timespec64 old_mtime; 247 248 spin_lock(&fi->lock); 249 /* 250 * In case of writeback_cache enabled, writes update mtime, ctime and 251 * may update i_size. In these cases trust the cached value in the 252 * inode. 253 */ 254 cache_mask = fuse_get_cache_mask(inode); 255 if (cache_mask & STATX_SIZE) 256 attr->size = i_size_read(inode); 257 258 if (cache_mask & STATX_MTIME) { 259 attr->mtime = inode->i_mtime.tv_sec; 260 attr->mtimensec = inode->i_mtime.tv_nsec; 261 } 262 if (cache_mask & STATX_CTIME) { 263 attr->ctime = inode->i_ctime.tv_sec; 264 attr->ctimensec = inode->i_ctime.tv_nsec; 265 } 266 267 if ((attr_version != 0 && fi->attr_version > attr_version) || 268 test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) { 269 spin_unlock(&fi->lock); 270 return; 271 } 272 273 old_mtime = inode->i_mtime; 274 fuse_change_attributes_common(inode, attr, attr_valid, cache_mask); 275 276 oldsize = inode->i_size; 277 /* 278 * In case of writeback_cache enabled, the cached writes beyond EOF 279 * extend local i_size without keeping userspace server in sync. So, 280 * attr->size coming from server can be stale. We cannot trust it. 281 */ 282 if (!(cache_mask & STATX_SIZE)) 283 i_size_write(inode, attr->size); 284 spin_unlock(&fi->lock); 285 286 if (!cache_mask && S_ISREG(inode->i_mode)) { 287 bool inval = false; 288 289 if (oldsize != attr->size) { 290 truncate_pagecache(inode, attr->size); 291 if (!fc->explicit_inval_data) 292 inval = true; 293 } else if (fc->auto_inval_data) { 294 struct timespec64 new_mtime = { 295 .tv_sec = attr->mtime, 296 .tv_nsec = attr->mtimensec, 297 }; 298 299 /* 300 * Auto inval mode also checks and invalidates if mtime 301 * has changed. 302 */ 303 if (!timespec64_equal(&old_mtime, &new_mtime)) 304 inval = true; 305 } 306 307 if (inval) 308 invalidate_inode_pages2(inode->i_mapping); 309 } 310 311 if (IS_ENABLED(CONFIG_FUSE_DAX)) 312 fuse_dax_dontcache(inode, attr->flags); 313 } 314 315 static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr, 316 struct fuse_conn *fc) 317 { 318 inode->i_mode = attr->mode & S_IFMT; 319 inode->i_size = attr->size; 320 inode->i_mtime.tv_sec = attr->mtime; 321 inode->i_mtime.tv_nsec = attr->mtimensec; 322 inode->i_ctime.tv_sec = attr->ctime; 323 inode->i_ctime.tv_nsec = attr->ctimensec; 324 if (S_ISREG(inode->i_mode)) { 325 fuse_init_common(inode); 326 fuse_init_file_inode(inode, attr->flags); 327 } else if (S_ISDIR(inode->i_mode)) 328 fuse_init_dir(inode); 329 else if (S_ISLNK(inode->i_mode)) 330 fuse_init_symlink(inode); 331 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 332 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 333 fuse_init_common(inode); 334 init_special_inode(inode, inode->i_mode, 335 new_decode_dev(attr->rdev)); 336 } else 337 BUG(); 338 /* 339 * Ensure that we don't cache acls for daemons without FUSE_POSIX_ACL 340 * so they see the exact same behavior as before. 341 */ 342 if (!fc->posix_acl) 343 inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE; 344 } 345 346 static int fuse_inode_eq(struct inode *inode, void *_nodeidp) 347 { 348 u64 nodeid = *(u64 *) _nodeidp; 349 if (get_node_id(inode) == nodeid) 350 return 1; 351 else 352 return 0; 353 } 354 355 static int fuse_inode_set(struct inode *inode, void *_nodeidp) 356 { 357 u64 nodeid = *(u64 *) _nodeidp; 358 get_fuse_inode(inode)->nodeid = nodeid; 359 return 0; 360 } 361 362 struct inode *fuse_iget(struct super_block *sb, u64 nodeid, 363 int generation, struct fuse_attr *attr, 364 u64 attr_valid, u64 attr_version) 365 { 366 struct inode *inode; 367 struct fuse_inode *fi; 368 struct fuse_conn *fc = get_fuse_conn_super(sb); 369 370 /* 371 * Auto mount points get their node id from the submount root, which is 372 * not a unique identifier within this filesystem. 373 * 374 * To avoid conflicts, do not place submount points into the inode hash 375 * table. 376 */ 377 if (fc->auto_submounts && (attr->flags & FUSE_ATTR_SUBMOUNT) && 378 S_ISDIR(attr->mode)) { 379 inode = new_inode(sb); 380 if (!inode) 381 return NULL; 382 383 fuse_init_inode(inode, attr, fc); 384 get_fuse_inode(inode)->nodeid = nodeid; 385 inode->i_flags |= S_AUTOMOUNT; 386 goto done; 387 } 388 389 retry: 390 inode = iget5_locked(sb, nodeid, fuse_inode_eq, fuse_inode_set, &nodeid); 391 if (!inode) 392 return NULL; 393 394 if ((inode->i_state & I_NEW)) { 395 inode->i_flags |= S_NOATIME; 396 if (!fc->writeback_cache || !S_ISREG(attr->mode)) 397 inode->i_flags |= S_NOCMTIME; 398 inode->i_generation = generation; 399 fuse_init_inode(inode, attr, fc); 400 unlock_new_inode(inode); 401 } else if (fuse_stale_inode(inode, generation, attr)) { 402 /* nodeid was reused, any I/O on the old inode should fail */ 403 fuse_make_bad(inode); 404 iput(inode); 405 goto retry; 406 } 407 done: 408 fi = get_fuse_inode(inode); 409 spin_lock(&fi->lock); 410 fi->nlookup++; 411 spin_unlock(&fi->lock); 412 fuse_change_attributes(inode, attr, attr_valid, attr_version); 413 414 return inode; 415 } 416 417 struct inode *fuse_ilookup(struct fuse_conn *fc, u64 nodeid, 418 struct fuse_mount **fm) 419 { 420 struct fuse_mount *fm_iter; 421 struct inode *inode; 422 423 WARN_ON(!rwsem_is_locked(&fc->killsb)); 424 list_for_each_entry(fm_iter, &fc->mounts, fc_entry) { 425 if (!fm_iter->sb) 426 continue; 427 428 inode = ilookup5(fm_iter->sb, nodeid, fuse_inode_eq, &nodeid); 429 if (inode) { 430 if (fm) 431 *fm = fm_iter; 432 return inode; 433 } 434 } 435 436 return NULL; 437 } 438 439 int fuse_reverse_inval_inode(struct fuse_conn *fc, u64 nodeid, 440 loff_t offset, loff_t len) 441 { 442 struct fuse_inode *fi; 443 struct inode *inode; 444 pgoff_t pg_start; 445 pgoff_t pg_end; 446 447 inode = fuse_ilookup(fc, nodeid, NULL); 448 if (!inode) 449 return -ENOENT; 450 451 fi = get_fuse_inode(inode); 452 spin_lock(&fi->lock); 453 fi->attr_version = atomic64_inc_return(&fc->attr_version); 454 spin_unlock(&fi->lock); 455 456 fuse_invalidate_attr(inode); 457 forget_all_cached_acls(inode); 458 if (offset >= 0) { 459 pg_start = offset >> PAGE_SHIFT; 460 if (len <= 0) 461 pg_end = -1; 462 else 463 pg_end = (offset + len - 1) >> PAGE_SHIFT; 464 invalidate_inode_pages2_range(inode->i_mapping, 465 pg_start, pg_end); 466 } 467 iput(inode); 468 return 0; 469 } 470 471 bool fuse_lock_inode(struct inode *inode) 472 { 473 bool locked = false; 474 475 if (!get_fuse_conn(inode)->parallel_dirops) { 476 mutex_lock(&get_fuse_inode(inode)->mutex); 477 locked = true; 478 } 479 480 return locked; 481 } 482 483 void fuse_unlock_inode(struct inode *inode, bool locked) 484 { 485 if (locked) 486 mutex_unlock(&get_fuse_inode(inode)->mutex); 487 } 488 489 static void fuse_umount_begin(struct super_block *sb) 490 { 491 struct fuse_conn *fc = get_fuse_conn_super(sb); 492 493 if (fc->no_force_umount) 494 return; 495 496 fuse_abort_conn(fc); 497 498 // Only retire block-device-based superblocks. 499 if (sb->s_bdev != NULL) 500 retire_super(sb); 501 } 502 503 static void fuse_send_destroy(struct fuse_mount *fm) 504 { 505 if (fm->fc->conn_init) { 506 FUSE_ARGS(args); 507 508 args.opcode = FUSE_DESTROY; 509 args.force = true; 510 args.nocreds = true; 511 fuse_simple_request(fm, &args); 512 } 513 } 514 515 static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr) 516 { 517 stbuf->f_type = FUSE_SUPER_MAGIC; 518 stbuf->f_bsize = attr->bsize; 519 stbuf->f_frsize = attr->frsize; 520 stbuf->f_blocks = attr->blocks; 521 stbuf->f_bfree = attr->bfree; 522 stbuf->f_bavail = attr->bavail; 523 stbuf->f_files = attr->files; 524 stbuf->f_ffree = attr->ffree; 525 stbuf->f_namelen = attr->namelen; 526 /* fsid is left zero */ 527 } 528 529 static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf) 530 { 531 struct super_block *sb = dentry->d_sb; 532 struct fuse_mount *fm = get_fuse_mount_super(sb); 533 FUSE_ARGS(args); 534 struct fuse_statfs_out outarg; 535 int err; 536 537 if (!fuse_allow_current_process(fm->fc)) { 538 buf->f_type = FUSE_SUPER_MAGIC; 539 return 0; 540 } 541 542 memset(&outarg, 0, sizeof(outarg)); 543 args.in_numargs = 0; 544 args.opcode = FUSE_STATFS; 545 args.nodeid = get_node_id(d_inode(dentry)); 546 args.out_numargs = 1; 547 args.out_args[0].size = sizeof(outarg); 548 args.out_args[0].value = &outarg; 549 err = fuse_simple_request(fm, &args); 550 if (!err) 551 convert_fuse_statfs(buf, &outarg.st); 552 return err; 553 } 554 555 static struct fuse_sync_bucket *fuse_sync_bucket_alloc(void) 556 { 557 struct fuse_sync_bucket *bucket; 558 559 bucket = kzalloc(sizeof(*bucket), GFP_KERNEL | __GFP_NOFAIL); 560 if (bucket) { 561 init_waitqueue_head(&bucket->waitq); 562 /* Initial active count */ 563 atomic_set(&bucket->count, 1); 564 } 565 return bucket; 566 } 567 568 static void fuse_sync_fs_writes(struct fuse_conn *fc) 569 { 570 struct fuse_sync_bucket *bucket, *new_bucket; 571 int count; 572 573 new_bucket = fuse_sync_bucket_alloc(); 574 spin_lock(&fc->lock); 575 bucket = rcu_dereference_protected(fc->curr_bucket, 1); 576 count = atomic_read(&bucket->count); 577 WARN_ON(count < 1); 578 /* No outstanding writes? */ 579 if (count == 1) { 580 spin_unlock(&fc->lock); 581 kfree(new_bucket); 582 return; 583 } 584 585 /* 586 * Completion of new bucket depends on completion of this bucket, so add 587 * one more count. 588 */ 589 atomic_inc(&new_bucket->count); 590 rcu_assign_pointer(fc->curr_bucket, new_bucket); 591 spin_unlock(&fc->lock); 592 /* 593 * Drop initial active count. At this point if all writes in this and 594 * ancestor buckets complete, the count will go to zero and this task 595 * will be woken up. 596 */ 597 atomic_dec(&bucket->count); 598 599 wait_event(bucket->waitq, atomic_read(&bucket->count) == 0); 600 601 /* Drop temp count on descendant bucket */ 602 fuse_sync_bucket_dec(new_bucket); 603 kfree_rcu(bucket, rcu); 604 } 605 606 static int fuse_sync_fs(struct super_block *sb, int wait) 607 { 608 struct fuse_mount *fm = get_fuse_mount_super(sb); 609 struct fuse_conn *fc = fm->fc; 610 struct fuse_syncfs_in inarg; 611 FUSE_ARGS(args); 612 int err; 613 614 /* 615 * Userspace cannot handle the wait == 0 case. Avoid a 616 * gratuitous roundtrip. 617 */ 618 if (!wait) 619 return 0; 620 621 /* The filesystem is being unmounted. Nothing to do. */ 622 if (!sb->s_root) 623 return 0; 624 625 if (!fc->sync_fs) 626 return 0; 627 628 fuse_sync_fs_writes(fc); 629 630 memset(&inarg, 0, sizeof(inarg)); 631 args.in_numargs = 1; 632 args.in_args[0].size = sizeof(inarg); 633 args.in_args[0].value = &inarg; 634 args.opcode = FUSE_SYNCFS; 635 args.nodeid = get_node_id(sb->s_root->d_inode); 636 args.out_numargs = 0; 637 638 err = fuse_simple_request(fm, &args); 639 if (err == -ENOSYS) { 640 fc->sync_fs = 0; 641 err = 0; 642 } 643 644 return err; 645 } 646 647 enum { 648 OPT_SOURCE, 649 OPT_SUBTYPE, 650 OPT_FD, 651 OPT_ROOTMODE, 652 OPT_USER_ID, 653 OPT_GROUP_ID, 654 OPT_DEFAULT_PERMISSIONS, 655 OPT_ALLOW_OTHER, 656 OPT_MAX_READ, 657 OPT_BLKSIZE, 658 OPT_ERR 659 }; 660 661 static const struct fs_parameter_spec fuse_fs_parameters[] = { 662 fsparam_string ("source", OPT_SOURCE), 663 fsparam_u32 ("fd", OPT_FD), 664 fsparam_u32oct ("rootmode", OPT_ROOTMODE), 665 fsparam_u32 ("user_id", OPT_USER_ID), 666 fsparam_u32 ("group_id", OPT_GROUP_ID), 667 fsparam_flag ("default_permissions", OPT_DEFAULT_PERMISSIONS), 668 fsparam_flag ("allow_other", OPT_ALLOW_OTHER), 669 fsparam_u32 ("max_read", OPT_MAX_READ), 670 fsparam_u32 ("blksize", OPT_BLKSIZE), 671 fsparam_string ("subtype", OPT_SUBTYPE), 672 {} 673 }; 674 675 static int fuse_parse_param(struct fs_context *fsc, struct fs_parameter *param) 676 { 677 struct fs_parse_result result; 678 struct fuse_fs_context *ctx = fsc->fs_private; 679 int opt; 680 681 if (fsc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { 682 /* 683 * Ignore options coming from mount(MS_REMOUNT) for backward 684 * compatibility. 685 */ 686 if (fsc->oldapi) 687 return 0; 688 689 return invalfc(fsc, "No changes allowed in reconfigure"); 690 } 691 692 opt = fs_parse(fsc, fuse_fs_parameters, param, &result); 693 if (opt < 0) 694 return opt; 695 696 switch (opt) { 697 case OPT_SOURCE: 698 if (fsc->source) 699 return invalfc(fsc, "Multiple sources specified"); 700 fsc->source = param->string; 701 param->string = NULL; 702 break; 703 704 case OPT_SUBTYPE: 705 if (ctx->subtype) 706 return invalfc(fsc, "Multiple subtypes specified"); 707 ctx->subtype = param->string; 708 param->string = NULL; 709 return 0; 710 711 case OPT_FD: 712 ctx->fd = result.uint_32; 713 ctx->fd_present = true; 714 break; 715 716 case OPT_ROOTMODE: 717 if (!fuse_valid_type(result.uint_32)) 718 return invalfc(fsc, "Invalid rootmode"); 719 ctx->rootmode = result.uint_32; 720 ctx->rootmode_present = true; 721 break; 722 723 case OPT_USER_ID: 724 ctx->user_id = make_kuid(fsc->user_ns, result.uint_32); 725 if (!uid_valid(ctx->user_id)) 726 return invalfc(fsc, "Invalid user_id"); 727 ctx->user_id_present = true; 728 break; 729 730 case OPT_GROUP_ID: 731 ctx->group_id = make_kgid(fsc->user_ns, result.uint_32); 732 if (!gid_valid(ctx->group_id)) 733 return invalfc(fsc, "Invalid group_id"); 734 ctx->group_id_present = true; 735 break; 736 737 case OPT_DEFAULT_PERMISSIONS: 738 ctx->default_permissions = true; 739 break; 740 741 case OPT_ALLOW_OTHER: 742 ctx->allow_other = true; 743 break; 744 745 case OPT_MAX_READ: 746 ctx->max_read = result.uint_32; 747 break; 748 749 case OPT_BLKSIZE: 750 if (!ctx->is_bdev) 751 return invalfc(fsc, "blksize only supported for fuseblk"); 752 ctx->blksize = result.uint_32; 753 break; 754 755 default: 756 return -EINVAL; 757 } 758 759 return 0; 760 } 761 762 static void fuse_free_fsc(struct fs_context *fsc) 763 { 764 struct fuse_fs_context *ctx = fsc->fs_private; 765 766 if (ctx) { 767 kfree(ctx->subtype); 768 kfree(ctx); 769 } 770 } 771 772 static int fuse_show_options(struct seq_file *m, struct dentry *root) 773 { 774 struct super_block *sb = root->d_sb; 775 struct fuse_conn *fc = get_fuse_conn_super(sb); 776 777 if (fc->legacy_opts_show) { 778 seq_printf(m, ",user_id=%u", 779 from_kuid_munged(fc->user_ns, fc->user_id)); 780 seq_printf(m, ",group_id=%u", 781 from_kgid_munged(fc->user_ns, fc->group_id)); 782 if (fc->default_permissions) 783 seq_puts(m, ",default_permissions"); 784 if (fc->allow_other) 785 seq_puts(m, ",allow_other"); 786 if (fc->max_read != ~0) 787 seq_printf(m, ",max_read=%u", fc->max_read); 788 if (sb->s_bdev && sb->s_blocksize != FUSE_DEFAULT_BLKSIZE) 789 seq_printf(m, ",blksize=%lu", sb->s_blocksize); 790 } 791 #ifdef CONFIG_FUSE_DAX 792 if (fc->dax_mode == FUSE_DAX_ALWAYS) 793 seq_puts(m, ",dax=always"); 794 else if (fc->dax_mode == FUSE_DAX_NEVER) 795 seq_puts(m, ",dax=never"); 796 else if (fc->dax_mode == FUSE_DAX_INODE_USER) 797 seq_puts(m, ",dax=inode"); 798 #endif 799 800 return 0; 801 } 802 803 static void fuse_iqueue_init(struct fuse_iqueue *fiq, 804 const struct fuse_iqueue_ops *ops, 805 void *priv) 806 { 807 memset(fiq, 0, sizeof(struct fuse_iqueue)); 808 spin_lock_init(&fiq->lock); 809 init_waitqueue_head(&fiq->waitq); 810 INIT_LIST_HEAD(&fiq->pending); 811 INIT_LIST_HEAD(&fiq->interrupts); 812 fiq->forget_list_tail = &fiq->forget_list_head; 813 fiq->connected = 1; 814 fiq->ops = ops; 815 fiq->priv = priv; 816 } 817 818 static void fuse_pqueue_init(struct fuse_pqueue *fpq) 819 { 820 unsigned int i; 821 822 spin_lock_init(&fpq->lock); 823 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) 824 INIT_LIST_HEAD(&fpq->processing[i]); 825 INIT_LIST_HEAD(&fpq->io); 826 fpq->connected = 1; 827 } 828 829 void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm, 830 struct user_namespace *user_ns, 831 const struct fuse_iqueue_ops *fiq_ops, void *fiq_priv) 832 { 833 memset(fc, 0, sizeof(*fc)); 834 spin_lock_init(&fc->lock); 835 spin_lock_init(&fc->bg_lock); 836 init_rwsem(&fc->killsb); 837 refcount_set(&fc->count, 1); 838 atomic_set(&fc->dev_count, 1); 839 init_waitqueue_head(&fc->blocked_waitq); 840 fuse_iqueue_init(&fc->iq, fiq_ops, fiq_priv); 841 INIT_LIST_HEAD(&fc->bg_queue); 842 INIT_LIST_HEAD(&fc->entry); 843 INIT_LIST_HEAD(&fc->devices); 844 atomic_set(&fc->num_waiting, 0); 845 fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND; 846 fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD; 847 atomic64_set(&fc->khctr, 0); 848 fc->polled_files = RB_ROOT; 849 fc->blocked = 0; 850 fc->initialized = 0; 851 fc->connected = 1; 852 atomic64_set(&fc->attr_version, 1); 853 get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); 854 fc->pid_ns = get_pid_ns(task_active_pid_ns(current)); 855 fc->user_ns = get_user_ns(user_ns); 856 fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ; 857 fc->max_pages_limit = FUSE_MAX_MAX_PAGES; 858 859 INIT_LIST_HEAD(&fc->mounts); 860 list_add(&fm->fc_entry, &fc->mounts); 861 fm->fc = fc; 862 } 863 EXPORT_SYMBOL_GPL(fuse_conn_init); 864 865 void fuse_conn_put(struct fuse_conn *fc) 866 { 867 if (refcount_dec_and_test(&fc->count)) { 868 struct fuse_iqueue *fiq = &fc->iq; 869 struct fuse_sync_bucket *bucket; 870 871 if (IS_ENABLED(CONFIG_FUSE_DAX)) 872 fuse_dax_conn_free(fc); 873 if (fiq->ops->release) 874 fiq->ops->release(fiq); 875 put_pid_ns(fc->pid_ns); 876 put_user_ns(fc->user_ns); 877 bucket = rcu_dereference_protected(fc->curr_bucket, 1); 878 if (bucket) { 879 WARN_ON(atomic_read(&bucket->count) != 1); 880 kfree(bucket); 881 } 882 fc->release(fc); 883 } 884 } 885 EXPORT_SYMBOL_GPL(fuse_conn_put); 886 887 struct fuse_conn *fuse_conn_get(struct fuse_conn *fc) 888 { 889 refcount_inc(&fc->count); 890 return fc; 891 } 892 EXPORT_SYMBOL_GPL(fuse_conn_get); 893 894 static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned mode) 895 { 896 struct fuse_attr attr; 897 memset(&attr, 0, sizeof(attr)); 898 899 attr.mode = mode; 900 attr.ino = FUSE_ROOT_ID; 901 attr.nlink = 1; 902 return fuse_iget(sb, 1, 0, &attr, 0, 0); 903 } 904 905 struct fuse_inode_handle { 906 u64 nodeid; 907 u32 generation; 908 }; 909 910 static struct dentry *fuse_get_dentry(struct super_block *sb, 911 struct fuse_inode_handle *handle) 912 { 913 struct fuse_conn *fc = get_fuse_conn_super(sb); 914 struct inode *inode; 915 struct dentry *entry; 916 int err = -ESTALE; 917 918 if (handle->nodeid == 0) 919 goto out_err; 920 921 inode = ilookup5(sb, handle->nodeid, fuse_inode_eq, &handle->nodeid); 922 if (!inode) { 923 struct fuse_entry_out outarg; 924 const struct qstr name = QSTR_INIT(".", 1); 925 926 if (!fc->export_support) 927 goto out_err; 928 929 err = fuse_lookup_name(sb, handle->nodeid, &name, &outarg, 930 &inode); 931 if (err && err != -ENOENT) 932 goto out_err; 933 if (err || !inode) { 934 err = -ESTALE; 935 goto out_err; 936 } 937 err = -EIO; 938 if (get_node_id(inode) != handle->nodeid) 939 goto out_iput; 940 } 941 err = -ESTALE; 942 if (inode->i_generation != handle->generation) 943 goto out_iput; 944 945 entry = d_obtain_alias(inode); 946 if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID) 947 fuse_invalidate_entry_cache(entry); 948 949 return entry; 950 951 out_iput: 952 iput(inode); 953 out_err: 954 return ERR_PTR(err); 955 } 956 957 static int fuse_encode_fh(struct inode *inode, u32 *fh, int *max_len, 958 struct inode *parent) 959 { 960 int len = parent ? 6 : 3; 961 u64 nodeid; 962 u32 generation; 963 964 if (*max_len < len) { 965 *max_len = len; 966 return FILEID_INVALID; 967 } 968 969 nodeid = get_fuse_inode(inode)->nodeid; 970 generation = inode->i_generation; 971 972 fh[0] = (u32)(nodeid >> 32); 973 fh[1] = (u32)(nodeid & 0xffffffff); 974 fh[2] = generation; 975 976 if (parent) { 977 nodeid = get_fuse_inode(parent)->nodeid; 978 generation = parent->i_generation; 979 980 fh[3] = (u32)(nodeid >> 32); 981 fh[4] = (u32)(nodeid & 0xffffffff); 982 fh[5] = generation; 983 } 984 985 *max_len = len; 986 return parent ? 0x82 : 0x81; 987 } 988 989 static struct dentry *fuse_fh_to_dentry(struct super_block *sb, 990 struct fid *fid, int fh_len, int fh_type) 991 { 992 struct fuse_inode_handle handle; 993 994 if ((fh_type != 0x81 && fh_type != 0x82) || fh_len < 3) 995 return NULL; 996 997 handle.nodeid = (u64) fid->raw[0] << 32; 998 handle.nodeid |= (u64) fid->raw[1]; 999 handle.generation = fid->raw[2]; 1000 return fuse_get_dentry(sb, &handle); 1001 } 1002 1003 static struct dentry *fuse_fh_to_parent(struct super_block *sb, 1004 struct fid *fid, int fh_len, int fh_type) 1005 { 1006 struct fuse_inode_handle parent; 1007 1008 if (fh_type != 0x82 || fh_len < 6) 1009 return NULL; 1010 1011 parent.nodeid = (u64) fid->raw[3] << 32; 1012 parent.nodeid |= (u64) fid->raw[4]; 1013 parent.generation = fid->raw[5]; 1014 return fuse_get_dentry(sb, &parent); 1015 } 1016 1017 static struct dentry *fuse_get_parent(struct dentry *child) 1018 { 1019 struct inode *child_inode = d_inode(child); 1020 struct fuse_conn *fc = get_fuse_conn(child_inode); 1021 struct inode *inode; 1022 struct dentry *parent; 1023 struct fuse_entry_out outarg; 1024 int err; 1025 1026 if (!fc->export_support) 1027 return ERR_PTR(-ESTALE); 1028 1029 err = fuse_lookup_name(child_inode->i_sb, get_node_id(child_inode), 1030 &dotdot_name, &outarg, &inode); 1031 if (err) { 1032 if (err == -ENOENT) 1033 return ERR_PTR(-ESTALE); 1034 return ERR_PTR(err); 1035 } 1036 1037 parent = d_obtain_alias(inode); 1038 if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID) 1039 fuse_invalidate_entry_cache(parent); 1040 1041 return parent; 1042 } 1043 1044 static const struct export_operations fuse_export_operations = { 1045 .fh_to_dentry = fuse_fh_to_dentry, 1046 .fh_to_parent = fuse_fh_to_parent, 1047 .encode_fh = fuse_encode_fh, 1048 .get_parent = fuse_get_parent, 1049 }; 1050 1051 static const struct super_operations fuse_super_operations = { 1052 .alloc_inode = fuse_alloc_inode, 1053 .free_inode = fuse_free_inode, 1054 .evict_inode = fuse_evict_inode, 1055 .write_inode = fuse_write_inode, 1056 .drop_inode = generic_delete_inode, 1057 .umount_begin = fuse_umount_begin, 1058 .statfs = fuse_statfs, 1059 .sync_fs = fuse_sync_fs, 1060 .show_options = fuse_show_options, 1061 }; 1062 1063 static void sanitize_global_limit(unsigned *limit) 1064 { 1065 /* 1066 * The default maximum number of async requests is calculated to consume 1067 * 1/2^13 of the total memory, assuming 392 bytes per request. 1068 */ 1069 if (*limit == 0) 1070 *limit = ((totalram_pages() << PAGE_SHIFT) >> 13) / 392; 1071 1072 if (*limit >= 1 << 16) 1073 *limit = (1 << 16) - 1; 1074 } 1075 1076 static int set_global_limit(const char *val, const struct kernel_param *kp) 1077 { 1078 int rv; 1079 1080 rv = param_set_uint(val, kp); 1081 if (rv) 1082 return rv; 1083 1084 sanitize_global_limit((unsigned *)kp->arg); 1085 1086 return 0; 1087 } 1088 1089 static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg) 1090 { 1091 int cap_sys_admin = capable(CAP_SYS_ADMIN); 1092 1093 if (arg->minor < 13) 1094 return; 1095 1096 sanitize_global_limit(&max_user_bgreq); 1097 sanitize_global_limit(&max_user_congthresh); 1098 1099 spin_lock(&fc->bg_lock); 1100 if (arg->max_background) { 1101 fc->max_background = arg->max_background; 1102 1103 if (!cap_sys_admin && fc->max_background > max_user_bgreq) 1104 fc->max_background = max_user_bgreq; 1105 } 1106 if (arg->congestion_threshold) { 1107 fc->congestion_threshold = arg->congestion_threshold; 1108 1109 if (!cap_sys_admin && 1110 fc->congestion_threshold > max_user_congthresh) 1111 fc->congestion_threshold = max_user_congthresh; 1112 } 1113 spin_unlock(&fc->bg_lock); 1114 } 1115 1116 struct fuse_init_args { 1117 struct fuse_args args; 1118 struct fuse_init_in in; 1119 struct fuse_init_out out; 1120 }; 1121 1122 static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args, 1123 int error) 1124 { 1125 struct fuse_conn *fc = fm->fc; 1126 struct fuse_init_args *ia = container_of(args, typeof(*ia), args); 1127 struct fuse_init_out *arg = &ia->out; 1128 bool ok = true; 1129 1130 if (error || arg->major != FUSE_KERNEL_VERSION) 1131 ok = false; 1132 else { 1133 unsigned long ra_pages; 1134 1135 process_init_limits(fc, arg); 1136 1137 if (arg->minor >= 6) { 1138 u64 flags = arg->flags; 1139 1140 if (flags & FUSE_INIT_EXT) 1141 flags |= (u64) arg->flags2 << 32; 1142 1143 ra_pages = arg->max_readahead / PAGE_SIZE; 1144 if (flags & FUSE_ASYNC_READ) 1145 fc->async_read = 1; 1146 if (!(flags & FUSE_POSIX_LOCKS)) 1147 fc->no_lock = 1; 1148 if (arg->minor >= 17) { 1149 if (!(flags & FUSE_FLOCK_LOCKS)) 1150 fc->no_flock = 1; 1151 } else { 1152 if (!(flags & FUSE_POSIX_LOCKS)) 1153 fc->no_flock = 1; 1154 } 1155 if (flags & FUSE_ATOMIC_O_TRUNC) 1156 fc->atomic_o_trunc = 1; 1157 if (arg->minor >= 9) { 1158 /* LOOKUP has dependency on proto version */ 1159 if (flags & FUSE_EXPORT_SUPPORT) 1160 fc->export_support = 1; 1161 } 1162 if (flags & FUSE_BIG_WRITES) 1163 fc->big_writes = 1; 1164 if (flags & FUSE_DONT_MASK) 1165 fc->dont_mask = 1; 1166 if (flags & FUSE_AUTO_INVAL_DATA) 1167 fc->auto_inval_data = 1; 1168 else if (flags & FUSE_EXPLICIT_INVAL_DATA) 1169 fc->explicit_inval_data = 1; 1170 if (flags & FUSE_DO_READDIRPLUS) { 1171 fc->do_readdirplus = 1; 1172 if (flags & FUSE_READDIRPLUS_AUTO) 1173 fc->readdirplus_auto = 1; 1174 } 1175 if (flags & FUSE_ASYNC_DIO) 1176 fc->async_dio = 1; 1177 if (flags & FUSE_WRITEBACK_CACHE) 1178 fc->writeback_cache = 1; 1179 if (flags & FUSE_PARALLEL_DIROPS) 1180 fc->parallel_dirops = 1; 1181 if (flags & FUSE_HANDLE_KILLPRIV) 1182 fc->handle_killpriv = 1; 1183 if (arg->time_gran && arg->time_gran <= 1000000000) 1184 fm->sb->s_time_gran = arg->time_gran; 1185 if ((flags & FUSE_POSIX_ACL)) { 1186 fc->default_permissions = 1; 1187 fc->posix_acl = 1; 1188 } 1189 if (flags & FUSE_CACHE_SYMLINKS) 1190 fc->cache_symlinks = 1; 1191 if (flags & FUSE_ABORT_ERROR) 1192 fc->abort_err = 1; 1193 if (flags & FUSE_MAX_PAGES) { 1194 fc->max_pages = 1195 min_t(unsigned int, fc->max_pages_limit, 1196 max_t(unsigned int, arg->max_pages, 1)); 1197 } 1198 if (IS_ENABLED(CONFIG_FUSE_DAX)) { 1199 if (flags & FUSE_MAP_ALIGNMENT && 1200 !fuse_dax_check_alignment(fc, arg->map_alignment)) { 1201 ok = false; 1202 } 1203 if (flags & FUSE_HAS_INODE_DAX) 1204 fc->inode_dax = 1; 1205 } 1206 if (flags & FUSE_HANDLE_KILLPRIV_V2) { 1207 fc->handle_killpriv_v2 = 1; 1208 fm->sb->s_flags |= SB_NOSEC; 1209 } 1210 if (flags & FUSE_SETXATTR_EXT) 1211 fc->setxattr_ext = 1; 1212 if (flags & FUSE_SECURITY_CTX) 1213 fc->init_security = 1; 1214 if (flags & FUSE_CREATE_SUPP_GROUP) 1215 fc->create_supp_group = 1; 1216 if (flags & FUSE_DIRECT_IO_RELAX) 1217 fc->direct_io_relax = 1; 1218 } else { 1219 ra_pages = fc->max_read / PAGE_SIZE; 1220 fc->no_lock = 1; 1221 fc->no_flock = 1; 1222 } 1223 1224 fm->sb->s_bdi->ra_pages = 1225 min(fm->sb->s_bdi->ra_pages, ra_pages); 1226 fc->minor = arg->minor; 1227 fc->max_write = arg->minor < 5 ? 4096 : arg->max_write; 1228 fc->max_write = max_t(unsigned, 4096, fc->max_write); 1229 fc->conn_init = 1; 1230 } 1231 kfree(ia); 1232 1233 if (!ok) { 1234 fc->conn_init = 0; 1235 fc->conn_error = 1; 1236 } 1237 1238 fuse_set_initialized(fc); 1239 wake_up_all(&fc->blocked_waitq); 1240 } 1241 1242 void fuse_send_init(struct fuse_mount *fm) 1243 { 1244 struct fuse_init_args *ia; 1245 u64 flags; 1246 1247 ia = kzalloc(sizeof(*ia), GFP_KERNEL | __GFP_NOFAIL); 1248 1249 ia->in.major = FUSE_KERNEL_VERSION; 1250 ia->in.minor = FUSE_KERNEL_MINOR_VERSION; 1251 ia->in.max_readahead = fm->sb->s_bdi->ra_pages * PAGE_SIZE; 1252 flags = 1253 FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | 1254 FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK | 1255 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | 1256 FUSE_FLOCK_LOCKS | FUSE_HAS_IOCTL_DIR | FUSE_AUTO_INVAL_DATA | 1257 FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO | 1258 FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT | 1259 FUSE_PARALLEL_DIROPS | FUSE_HANDLE_KILLPRIV | FUSE_POSIX_ACL | 1260 FUSE_ABORT_ERROR | FUSE_MAX_PAGES | FUSE_CACHE_SYMLINKS | 1261 FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA | 1262 FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT | 1263 FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP | 1264 FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_RELAX; 1265 #ifdef CONFIG_FUSE_DAX 1266 if (fm->fc->dax) 1267 flags |= FUSE_MAP_ALIGNMENT; 1268 if (fuse_is_inode_dax_mode(fm->fc->dax_mode)) 1269 flags |= FUSE_HAS_INODE_DAX; 1270 #endif 1271 if (fm->fc->auto_submounts) 1272 flags |= FUSE_SUBMOUNTS; 1273 1274 ia->in.flags = flags; 1275 ia->in.flags2 = flags >> 32; 1276 1277 ia->args.opcode = FUSE_INIT; 1278 ia->args.in_numargs = 1; 1279 ia->args.in_args[0].size = sizeof(ia->in); 1280 ia->args.in_args[0].value = &ia->in; 1281 ia->args.out_numargs = 1; 1282 /* Variable length argument used for backward compatibility 1283 with interface version < 7.5. Rest of init_out is zeroed 1284 by do_get_request(), so a short reply is not a problem */ 1285 ia->args.out_argvar = true; 1286 ia->args.out_args[0].size = sizeof(ia->out); 1287 ia->args.out_args[0].value = &ia->out; 1288 ia->args.force = true; 1289 ia->args.nocreds = true; 1290 ia->args.end = process_init_reply; 1291 1292 if (fuse_simple_background(fm, &ia->args, GFP_KERNEL) != 0) 1293 process_init_reply(fm, &ia->args, -ENOTCONN); 1294 } 1295 EXPORT_SYMBOL_GPL(fuse_send_init); 1296 1297 void fuse_free_conn(struct fuse_conn *fc) 1298 { 1299 WARN_ON(!list_empty(&fc->devices)); 1300 kfree_rcu(fc, rcu); 1301 } 1302 EXPORT_SYMBOL_GPL(fuse_free_conn); 1303 1304 static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb) 1305 { 1306 int err; 1307 char *suffix = ""; 1308 1309 if (sb->s_bdev) { 1310 suffix = "-fuseblk"; 1311 /* 1312 * sb->s_bdi points to blkdev's bdi however we want to redirect 1313 * it to our private bdi... 1314 */ 1315 bdi_put(sb->s_bdi); 1316 sb->s_bdi = &noop_backing_dev_info; 1317 } 1318 err = super_setup_bdi_name(sb, "%u:%u%s", MAJOR(fc->dev), 1319 MINOR(fc->dev), suffix); 1320 if (err) 1321 return err; 1322 1323 /* fuse does it's own writeback accounting */ 1324 sb->s_bdi->capabilities &= ~BDI_CAP_WRITEBACK_ACCT; 1325 sb->s_bdi->capabilities |= BDI_CAP_STRICTLIMIT; 1326 1327 /* 1328 * For a single fuse filesystem use max 1% of dirty + 1329 * writeback threshold. 1330 * 1331 * This gives about 1M of write buffer for memory maps on a 1332 * machine with 1G and 10% dirty_ratio, which should be more 1333 * than enough. 1334 * 1335 * Privileged users can raise it by writing to 1336 * 1337 * /sys/class/bdi/<bdi>/max_ratio 1338 */ 1339 bdi_set_max_ratio(sb->s_bdi, 1); 1340 1341 return 0; 1342 } 1343 1344 struct fuse_dev *fuse_dev_alloc(void) 1345 { 1346 struct fuse_dev *fud; 1347 struct list_head *pq; 1348 1349 fud = kzalloc(sizeof(struct fuse_dev), GFP_KERNEL); 1350 if (!fud) 1351 return NULL; 1352 1353 pq = kcalloc(FUSE_PQ_HASH_SIZE, sizeof(struct list_head), GFP_KERNEL); 1354 if (!pq) { 1355 kfree(fud); 1356 return NULL; 1357 } 1358 1359 fud->pq.processing = pq; 1360 fuse_pqueue_init(&fud->pq); 1361 1362 return fud; 1363 } 1364 EXPORT_SYMBOL_GPL(fuse_dev_alloc); 1365 1366 void fuse_dev_install(struct fuse_dev *fud, struct fuse_conn *fc) 1367 { 1368 fud->fc = fuse_conn_get(fc); 1369 spin_lock(&fc->lock); 1370 list_add_tail(&fud->entry, &fc->devices); 1371 spin_unlock(&fc->lock); 1372 } 1373 EXPORT_SYMBOL_GPL(fuse_dev_install); 1374 1375 struct fuse_dev *fuse_dev_alloc_install(struct fuse_conn *fc) 1376 { 1377 struct fuse_dev *fud; 1378 1379 fud = fuse_dev_alloc(); 1380 if (!fud) 1381 return NULL; 1382 1383 fuse_dev_install(fud, fc); 1384 return fud; 1385 } 1386 EXPORT_SYMBOL_GPL(fuse_dev_alloc_install); 1387 1388 void fuse_dev_free(struct fuse_dev *fud) 1389 { 1390 struct fuse_conn *fc = fud->fc; 1391 1392 if (fc) { 1393 spin_lock(&fc->lock); 1394 list_del(&fud->entry); 1395 spin_unlock(&fc->lock); 1396 1397 fuse_conn_put(fc); 1398 } 1399 kfree(fud->pq.processing); 1400 kfree(fud); 1401 } 1402 EXPORT_SYMBOL_GPL(fuse_dev_free); 1403 1404 static void fuse_fill_attr_from_inode(struct fuse_attr *attr, 1405 const struct fuse_inode *fi) 1406 { 1407 *attr = (struct fuse_attr){ 1408 .ino = fi->inode.i_ino, 1409 .size = fi->inode.i_size, 1410 .blocks = fi->inode.i_blocks, 1411 .atime = fi->inode.i_atime.tv_sec, 1412 .mtime = fi->inode.i_mtime.tv_sec, 1413 .ctime = fi->inode.i_ctime.tv_sec, 1414 .atimensec = fi->inode.i_atime.tv_nsec, 1415 .mtimensec = fi->inode.i_mtime.tv_nsec, 1416 .ctimensec = fi->inode.i_ctime.tv_nsec, 1417 .mode = fi->inode.i_mode, 1418 .nlink = fi->inode.i_nlink, 1419 .uid = fi->inode.i_uid.val, 1420 .gid = fi->inode.i_gid.val, 1421 .rdev = fi->inode.i_rdev, 1422 .blksize = 1u << fi->inode.i_blkbits, 1423 }; 1424 } 1425 1426 static void fuse_sb_defaults(struct super_block *sb) 1427 { 1428 sb->s_magic = FUSE_SUPER_MAGIC; 1429 sb->s_op = &fuse_super_operations; 1430 sb->s_xattr = fuse_xattr_handlers; 1431 sb->s_maxbytes = MAX_LFS_FILESIZE; 1432 sb->s_time_gran = 1; 1433 sb->s_export_op = &fuse_export_operations; 1434 sb->s_iflags |= SB_I_IMA_UNVERIFIABLE_SIGNATURE; 1435 if (sb->s_user_ns != &init_user_ns) 1436 sb->s_iflags |= SB_I_UNTRUSTED_MOUNTER; 1437 sb->s_flags &= ~(SB_NOSEC | SB_I_VERSION); 1438 } 1439 1440 static int fuse_fill_super_submount(struct super_block *sb, 1441 struct fuse_inode *parent_fi) 1442 { 1443 struct fuse_mount *fm = get_fuse_mount_super(sb); 1444 struct super_block *parent_sb = parent_fi->inode.i_sb; 1445 struct fuse_attr root_attr; 1446 struct inode *root; 1447 1448 fuse_sb_defaults(sb); 1449 fm->sb = sb; 1450 1451 WARN_ON(sb->s_bdi != &noop_backing_dev_info); 1452 sb->s_bdi = bdi_get(parent_sb->s_bdi); 1453 1454 sb->s_xattr = parent_sb->s_xattr; 1455 sb->s_time_gran = parent_sb->s_time_gran; 1456 sb->s_blocksize = parent_sb->s_blocksize; 1457 sb->s_blocksize_bits = parent_sb->s_blocksize_bits; 1458 sb->s_subtype = kstrdup(parent_sb->s_subtype, GFP_KERNEL); 1459 if (parent_sb->s_subtype && !sb->s_subtype) 1460 return -ENOMEM; 1461 1462 fuse_fill_attr_from_inode(&root_attr, parent_fi); 1463 root = fuse_iget(sb, parent_fi->nodeid, 0, &root_attr, 0, 0); 1464 /* 1465 * This inode is just a duplicate, so it is not looked up and 1466 * its nlookup should not be incremented. fuse_iget() does 1467 * that, though, so undo it here. 1468 */ 1469 get_fuse_inode(root)->nlookup--; 1470 sb->s_d_op = &fuse_dentry_operations; 1471 sb->s_root = d_make_root(root); 1472 if (!sb->s_root) 1473 return -ENOMEM; 1474 1475 return 0; 1476 } 1477 1478 /* Filesystem context private data holds the FUSE inode of the mount point */ 1479 static int fuse_get_tree_submount(struct fs_context *fsc) 1480 { 1481 struct fuse_mount *fm; 1482 struct fuse_inode *mp_fi = fsc->fs_private; 1483 struct fuse_conn *fc = get_fuse_conn(&mp_fi->inode); 1484 struct super_block *sb; 1485 int err; 1486 1487 fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL); 1488 if (!fm) 1489 return -ENOMEM; 1490 1491 fm->fc = fuse_conn_get(fc); 1492 fsc->s_fs_info = fm; 1493 sb = sget_fc(fsc, NULL, set_anon_super_fc); 1494 if (fsc->s_fs_info) 1495 fuse_mount_destroy(fm); 1496 if (IS_ERR(sb)) 1497 return PTR_ERR(sb); 1498 1499 /* Initialize superblock, making @mp_fi its root */ 1500 err = fuse_fill_super_submount(sb, mp_fi); 1501 if (err) { 1502 deactivate_locked_super(sb); 1503 return err; 1504 } 1505 1506 down_write(&fc->killsb); 1507 list_add_tail(&fm->fc_entry, &fc->mounts); 1508 up_write(&fc->killsb); 1509 1510 sb->s_flags |= SB_ACTIVE; 1511 fsc->root = dget(sb->s_root); 1512 1513 return 0; 1514 } 1515 1516 static const struct fs_context_operations fuse_context_submount_ops = { 1517 .get_tree = fuse_get_tree_submount, 1518 }; 1519 1520 int fuse_init_fs_context_submount(struct fs_context *fsc) 1521 { 1522 fsc->ops = &fuse_context_submount_ops; 1523 return 0; 1524 } 1525 EXPORT_SYMBOL_GPL(fuse_init_fs_context_submount); 1526 1527 int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx) 1528 { 1529 struct fuse_dev *fud = NULL; 1530 struct fuse_mount *fm = get_fuse_mount_super(sb); 1531 struct fuse_conn *fc = fm->fc; 1532 struct inode *root; 1533 struct dentry *root_dentry; 1534 int err; 1535 1536 err = -EINVAL; 1537 if (sb->s_flags & SB_MANDLOCK) 1538 goto err; 1539 1540 rcu_assign_pointer(fc->curr_bucket, fuse_sync_bucket_alloc()); 1541 fuse_sb_defaults(sb); 1542 1543 if (ctx->is_bdev) { 1544 #ifdef CONFIG_BLOCK 1545 err = -EINVAL; 1546 if (!sb_set_blocksize(sb, ctx->blksize)) 1547 goto err; 1548 #endif 1549 } else { 1550 sb->s_blocksize = PAGE_SIZE; 1551 sb->s_blocksize_bits = PAGE_SHIFT; 1552 } 1553 1554 sb->s_subtype = ctx->subtype; 1555 ctx->subtype = NULL; 1556 if (IS_ENABLED(CONFIG_FUSE_DAX)) { 1557 err = fuse_dax_conn_alloc(fc, ctx->dax_mode, ctx->dax_dev); 1558 if (err) 1559 goto err; 1560 } 1561 1562 if (ctx->fudptr) { 1563 err = -ENOMEM; 1564 fud = fuse_dev_alloc_install(fc); 1565 if (!fud) 1566 goto err_free_dax; 1567 } 1568 1569 fc->dev = sb->s_dev; 1570 fm->sb = sb; 1571 err = fuse_bdi_init(fc, sb); 1572 if (err) 1573 goto err_dev_free; 1574 1575 /* Handle umasking inside the fuse code */ 1576 if (sb->s_flags & SB_POSIXACL) 1577 fc->dont_mask = 1; 1578 sb->s_flags |= SB_POSIXACL; 1579 1580 fc->default_permissions = ctx->default_permissions; 1581 fc->allow_other = ctx->allow_other; 1582 fc->user_id = ctx->user_id; 1583 fc->group_id = ctx->group_id; 1584 fc->legacy_opts_show = ctx->legacy_opts_show; 1585 fc->max_read = max_t(unsigned int, 4096, ctx->max_read); 1586 fc->destroy = ctx->destroy; 1587 fc->no_control = ctx->no_control; 1588 fc->no_force_umount = ctx->no_force_umount; 1589 1590 err = -ENOMEM; 1591 root = fuse_get_root_inode(sb, ctx->rootmode); 1592 sb->s_d_op = &fuse_root_dentry_operations; 1593 root_dentry = d_make_root(root); 1594 if (!root_dentry) 1595 goto err_dev_free; 1596 /* Root dentry doesn't have .d_revalidate */ 1597 sb->s_d_op = &fuse_dentry_operations; 1598 1599 mutex_lock(&fuse_mutex); 1600 err = -EINVAL; 1601 if (ctx->fudptr && *ctx->fudptr) 1602 goto err_unlock; 1603 1604 err = fuse_ctl_add_conn(fc); 1605 if (err) 1606 goto err_unlock; 1607 1608 list_add_tail(&fc->entry, &fuse_conn_list); 1609 sb->s_root = root_dentry; 1610 if (ctx->fudptr) 1611 *ctx->fudptr = fud; 1612 mutex_unlock(&fuse_mutex); 1613 return 0; 1614 1615 err_unlock: 1616 mutex_unlock(&fuse_mutex); 1617 dput(root_dentry); 1618 err_dev_free: 1619 if (fud) 1620 fuse_dev_free(fud); 1621 err_free_dax: 1622 if (IS_ENABLED(CONFIG_FUSE_DAX)) 1623 fuse_dax_conn_free(fc); 1624 err: 1625 return err; 1626 } 1627 EXPORT_SYMBOL_GPL(fuse_fill_super_common); 1628 1629 static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc) 1630 { 1631 struct fuse_fs_context *ctx = fsc->fs_private; 1632 int err; 1633 1634 if (!ctx->file || !ctx->rootmode_present || 1635 !ctx->user_id_present || !ctx->group_id_present) 1636 return -EINVAL; 1637 1638 /* 1639 * Require mount to happen from the same user namespace which 1640 * opened /dev/fuse to prevent potential attacks. 1641 */ 1642 if ((ctx->file->f_op != &fuse_dev_operations) || 1643 (ctx->file->f_cred->user_ns != sb->s_user_ns)) 1644 return -EINVAL; 1645 ctx->fudptr = &ctx->file->private_data; 1646 1647 err = fuse_fill_super_common(sb, ctx); 1648 if (err) 1649 return err; 1650 /* file->private_data shall be visible on all CPUs after this */ 1651 smp_mb(); 1652 fuse_send_init(get_fuse_mount_super(sb)); 1653 return 0; 1654 } 1655 1656 /* 1657 * This is the path where user supplied an already initialized fuse dev. In 1658 * this case never create a new super if the old one is gone. 1659 */ 1660 static int fuse_set_no_super(struct super_block *sb, struct fs_context *fsc) 1661 { 1662 return -ENOTCONN; 1663 } 1664 1665 static int fuse_test_super(struct super_block *sb, struct fs_context *fsc) 1666 { 1667 1668 return fsc->sget_key == get_fuse_conn_super(sb); 1669 } 1670 1671 static int fuse_get_tree(struct fs_context *fsc) 1672 { 1673 struct fuse_fs_context *ctx = fsc->fs_private; 1674 struct fuse_dev *fud; 1675 struct fuse_conn *fc; 1676 struct fuse_mount *fm; 1677 struct super_block *sb; 1678 int err; 1679 1680 fc = kmalloc(sizeof(*fc), GFP_KERNEL); 1681 if (!fc) 1682 return -ENOMEM; 1683 1684 fm = kzalloc(sizeof(*fm), GFP_KERNEL); 1685 if (!fm) { 1686 kfree(fc); 1687 return -ENOMEM; 1688 } 1689 1690 fuse_conn_init(fc, fm, fsc->user_ns, &fuse_dev_fiq_ops, NULL); 1691 fc->release = fuse_free_conn; 1692 1693 fsc->s_fs_info = fm; 1694 1695 if (ctx->fd_present) 1696 ctx->file = fget(ctx->fd); 1697 1698 if (IS_ENABLED(CONFIG_BLOCK) && ctx->is_bdev) { 1699 err = get_tree_bdev(fsc, fuse_fill_super); 1700 goto out; 1701 } 1702 /* 1703 * While block dev mount can be initialized with a dummy device fd 1704 * (found by device name), normal fuse mounts can't 1705 */ 1706 err = -EINVAL; 1707 if (!ctx->file) 1708 goto out; 1709 1710 /* 1711 * Allow creating a fuse mount with an already initialized fuse 1712 * connection 1713 */ 1714 fud = READ_ONCE(ctx->file->private_data); 1715 if (ctx->file->f_op == &fuse_dev_operations && fud) { 1716 fsc->sget_key = fud->fc; 1717 sb = sget_fc(fsc, fuse_test_super, fuse_set_no_super); 1718 err = PTR_ERR_OR_ZERO(sb); 1719 if (!IS_ERR(sb)) 1720 fsc->root = dget(sb->s_root); 1721 } else { 1722 err = get_tree_nodev(fsc, fuse_fill_super); 1723 } 1724 out: 1725 if (fsc->s_fs_info) 1726 fuse_mount_destroy(fm); 1727 if (ctx->file) 1728 fput(ctx->file); 1729 return err; 1730 } 1731 1732 static const struct fs_context_operations fuse_context_ops = { 1733 .free = fuse_free_fsc, 1734 .parse_param = fuse_parse_param, 1735 .reconfigure = fuse_reconfigure, 1736 .get_tree = fuse_get_tree, 1737 }; 1738 1739 /* 1740 * Set up the filesystem mount context. 1741 */ 1742 static int fuse_init_fs_context(struct fs_context *fsc) 1743 { 1744 struct fuse_fs_context *ctx; 1745 1746 ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL); 1747 if (!ctx) 1748 return -ENOMEM; 1749 1750 ctx->max_read = ~0; 1751 ctx->blksize = FUSE_DEFAULT_BLKSIZE; 1752 ctx->legacy_opts_show = true; 1753 1754 #ifdef CONFIG_BLOCK 1755 if (fsc->fs_type == &fuseblk_fs_type) { 1756 ctx->is_bdev = true; 1757 ctx->destroy = true; 1758 } 1759 #endif 1760 1761 fsc->fs_private = ctx; 1762 fsc->ops = &fuse_context_ops; 1763 return 0; 1764 } 1765 1766 bool fuse_mount_remove(struct fuse_mount *fm) 1767 { 1768 struct fuse_conn *fc = fm->fc; 1769 bool last = false; 1770 1771 down_write(&fc->killsb); 1772 list_del_init(&fm->fc_entry); 1773 if (list_empty(&fc->mounts)) 1774 last = true; 1775 up_write(&fc->killsb); 1776 1777 return last; 1778 } 1779 EXPORT_SYMBOL_GPL(fuse_mount_remove); 1780 1781 void fuse_conn_destroy(struct fuse_mount *fm) 1782 { 1783 struct fuse_conn *fc = fm->fc; 1784 1785 if (fc->destroy) 1786 fuse_send_destroy(fm); 1787 1788 fuse_abort_conn(fc); 1789 fuse_wait_aborted(fc); 1790 1791 if (!list_empty(&fc->entry)) { 1792 mutex_lock(&fuse_mutex); 1793 list_del(&fc->entry); 1794 fuse_ctl_remove_conn(fc); 1795 mutex_unlock(&fuse_mutex); 1796 } 1797 } 1798 EXPORT_SYMBOL_GPL(fuse_conn_destroy); 1799 1800 static void fuse_sb_destroy(struct super_block *sb) 1801 { 1802 struct fuse_mount *fm = get_fuse_mount_super(sb); 1803 bool last; 1804 1805 if (sb->s_root) { 1806 last = fuse_mount_remove(fm); 1807 if (last) 1808 fuse_conn_destroy(fm); 1809 } 1810 } 1811 1812 void fuse_mount_destroy(struct fuse_mount *fm) 1813 { 1814 fuse_conn_put(fm->fc); 1815 kfree(fm); 1816 } 1817 EXPORT_SYMBOL(fuse_mount_destroy); 1818 1819 static void fuse_kill_sb_anon(struct super_block *sb) 1820 { 1821 fuse_sb_destroy(sb); 1822 kill_anon_super(sb); 1823 fuse_mount_destroy(get_fuse_mount_super(sb)); 1824 } 1825 1826 static struct file_system_type fuse_fs_type = { 1827 .owner = THIS_MODULE, 1828 .name = "fuse", 1829 .fs_flags = FS_HAS_SUBTYPE | FS_USERNS_MOUNT, 1830 .init_fs_context = fuse_init_fs_context, 1831 .parameters = fuse_fs_parameters, 1832 .kill_sb = fuse_kill_sb_anon, 1833 }; 1834 MODULE_ALIAS_FS("fuse"); 1835 1836 #ifdef CONFIG_BLOCK 1837 static void fuse_kill_sb_blk(struct super_block *sb) 1838 { 1839 fuse_sb_destroy(sb); 1840 kill_block_super(sb); 1841 fuse_mount_destroy(get_fuse_mount_super(sb)); 1842 } 1843 1844 static struct file_system_type fuseblk_fs_type = { 1845 .owner = THIS_MODULE, 1846 .name = "fuseblk", 1847 .init_fs_context = fuse_init_fs_context, 1848 .parameters = fuse_fs_parameters, 1849 .kill_sb = fuse_kill_sb_blk, 1850 .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE, 1851 }; 1852 MODULE_ALIAS_FS("fuseblk"); 1853 1854 static inline int register_fuseblk(void) 1855 { 1856 return register_filesystem(&fuseblk_fs_type); 1857 } 1858 1859 static inline void unregister_fuseblk(void) 1860 { 1861 unregister_filesystem(&fuseblk_fs_type); 1862 } 1863 #else 1864 static inline int register_fuseblk(void) 1865 { 1866 return 0; 1867 } 1868 1869 static inline void unregister_fuseblk(void) 1870 { 1871 } 1872 #endif 1873 1874 static void fuse_inode_init_once(void *foo) 1875 { 1876 struct inode *inode = foo; 1877 1878 inode_init_once(inode); 1879 } 1880 1881 static int __init fuse_fs_init(void) 1882 { 1883 int err; 1884 1885 fuse_inode_cachep = kmem_cache_create("fuse_inode", 1886 sizeof(struct fuse_inode), 0, 1887 SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT, 1888 fuse_inode_init_once); 1889 err = -ENOMEM; 1890 if (!fuse_inode_cachep) 1891 goto out; 1892 1893 err = register_fuseblk(); 1894 if (err) 1895 goto out2; 1896 1897 err = register_filesystem(&fuse_fs_type); 1898 if (err) 1899 goto out3; 1900 1901 return 0; 1902 1903 out3: 1904 unregister_fuseblk(); 1905 out2: 1906 kmem_cache_destroy(fuse_inode_cachep); 1907 out: 1908 return err; 1909 } 1910 1911 static void fuse_fs_cleanup(void) 1912 { 1913 unregister_filesystem(&fuse_fs_type); 1914 unregister_fuseblk(); 1915 1916 /* 1917 * Make sure all delayed rcu free inodes are flushed before we 1918 * destroy cache. 1919 */ 1920 rcu_barrier(); 1921 kmem_cache_destroy(fuse_inode_cachep); 1922 } 1923 1924 static struct kobject *fuse_kobj; 1925 1926 static int fuse_sysfs_init(void) 1927 { 1928 int err; 1929 1930 fuse_kobj = kobject_create_and_add("fuse", fs_kobj); 1931 if (!fuse_kobj) { 1932 err = -ENOMEM; 1933 goto out_err; 1934 } 1935 1936 err = sysfs_create_mount_point(fuse_kobj, "connections"); 1937 if (err) 1938 goto out_fuse_unregister; 1939 1940 return 0; 1941 1942 out_fuse_unregister: 1943 kobject_put(fuse_kobj); 1944 out_err: 1945 return err; 1946 } 1947 1948 static void fuse_sysfs_cleanup(void) 1949 { 1950 sysfs_remove_mount_point(fuse_kobj, "connections"); 1951 kobject_put(fuse_kobj); 1952 } 1953 1954 static int __init fuse_init(void) 1955 { 1956 int res; 1957 1958 pr_info("init (API version %i.%i)\n", 1959 FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION); 1960 1961 INIT_LIST_HEAD(&fuse_conn_list); 1962 res = fuse_fs_init(); 1963 if (res) 1964 goto err; 1965 1966 res = fuse_dev_init(); 1967 if (res) 1968 goto err_fs_cleanup; 1969 1970 res = fuse_sysfs_init(); 1971 if (res) 1972 goto err_dev_cleanup; 1973 1974 res = fuse_ctl_init(); 1975 if (res) 1976 goto err_sysfs_cleanup; 1977 1978 sanitize_global_limit(&max_user_bgreq); 1979 sanitize_global_limit(&max_user_congthresh); 1980 1981 return 0; 1982 1983 err_sysfs_cleanup: 1984 fuse_sysfs_cleanup(); 1985 err_dev_cleanup: 1986 fuse_dev_cleanup(); 1987 err_fs_cleanup: 1988 fuse_fs_cleanup(); 1989 err: 1990 return res; 1991 } 1992 1993 static void __exit fuse_exit(void) 1994 { 1995 pr_debug("exit\n"); 1996 1997 fuse_ctl_cleanup(); 1998 fuse_sysfs_cleanup(); 1999 fuse_fs_cleanup(); 2000 fuse_dev_cleanup(); 2001 } 2002 2003 module_init(fuse_init); 2004 module_exit(fuse_exit); 2005