1 /* 2 FUSE: Filesystem in Userspace 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> 4 5 This program can be distributed under the terms of the GNU GPL. 6 See the file COPYING. 7 */ 8 9 #include "fuse_i.h" 10 11 #include <linux/pagemap.h> 12 #include <linux/slab.h> 13 #include <linux/file.h> 14 #include <linux/seq_file.h> 15 #include <linux/init.h> 16 #include <linux/module.h> 17 #include <linux/moduleparam.h> 18 #include <linux/fs_context.h> 19 #include <linux/fs_parser.h> 20 #include <linux/statfs.h> 21 #include <linux/random.h> 22 #include <linux/sched.h> 23 #include <linux/exportfs.h> 24 #include <linux/posix_acl.h> 25 #include <linux/pid_namespace.h> 26 #include <uapi/linux/magic.h> 27 28 MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>"); 29 MODULE_DESCRIPTION("Filesystem in Userspace"); 30 MODULE_LICENSE("GPL"); 31 32 static struct kmem_cache *fuse_inode_cachep; 33 struct list_head fuse_conn_list; 34 DEFINE_MUTEX(fuse_mutex); 35 36 static int set_global_limit(const char *val, const struct kernel_param *kp); 37 38 unsigned max_user_bgreq; 39 module_param_call(max_user_bgreq, set_global_limit, param_get_uint, 40 &max_user_bgreq, 0644); 41 __MODULE_PARM_TYPE(max_user_bgreq, "uint"); 42 MODULE_PARM_DESC(max_user_bgreq, 43 "Global limit for the maximum number of backgrounded requests an " 44 "unprivileged user can set"); 45 46 unsigned max_user_congthresh; 47 module_param_call(max_user_congthresh, set_global_limit, param_get_uint, 48 &max_user_congthresh, 0644); 49 __MODULE_PARM_TYPE(max_user_congthresh, "uint"); 50 MODULE_PARM_DESC(max_user_congthresh, 51 "Global limit for the maximum congestion threshold an " 52 "unprivileged user can set"); 53 54 #define FUSE_DEFAULT_BLKSIZE 512 55 56 /** Maximum number of outstanding background requests */ 57 #define FUSE_DEFAULT_MAX_BACKGROUND 12 58 59 /** Congestion starts at 75% of maximum */ 60 #define FUSE_DEFAULT_CONGESTION_THRESHOLD (FUSE_DEFAULT_MAX_BACKGROUND * 3 / 4) 61 62 #ifdef CONFIG_BLOCK 63 static struct file_system_type fuseblk_fs_type; 64 #endif 65 66 struct fuse_forget_link *fuse_alloc_forget(void) 67 { 68 return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL_ACCOUNT); 69 } 70 71 static struct inode *fuse_alloc_inode(struct super_block *sb) 72 { 73 struct fuse_inode *fi; 74 75 fi = alloc_inode_sb(sb, fuse_inode_cachep, GFP_KERNEL); 76 if (!fi) 77 return NULL; 78 79 fi->i_time = 0; 80 fi->inval_mask = ~0; 81 fi->nodeid = 0; 82 fi->nlookup = 0; 83 fi->attr_version = 0; 84 fi->orig_ino = 0; 85 fi->state = 0; 86 mutex_init(&fi->mutex); 87 spin_lock_init(&fi->lock); 88 fi->forget = fuse_alloc_forget(); 89 if (!fi->forget) 90 goto out_free; 91 92 if (IS_ENABLED(CONFIG_FUSE_DAX) && !fuse_dax_inode_alloc(sb, fi)) 93 goto out_free_forget; 94 95 return &fi->inode; 96 97 out_free_forget: 98 kfree(fi->forget); 99 out_free: 100 kmem_cache_free(fuse_inode_cachep, fi); 101 return NULL; 102 } 103 104 static void fuse_free_inode(struct inode *inode) 105 { 106 struct fuse_inode *fi = get_fuse_inode(inode); 107 108 mutex_destroy(&fi->mutex); 109 kfree(fi->forget); 110 #ifdef CONFIG_FUSE_DAX 111 kfree(fi->dax); 112 #endif 113 kmem_cache_free(fuse_inode_cachep, fi); 114 } 115 116 static void fuse_evict_inode(struct inode *inode) 117 { 118 struct fuse_inode *fi = get_fuse_inode(inode); 119 120 /* Will write inode on close/munmap and in all other dirtiers */ 121 WARN_ON(inode->i_state & I_DIRTY_INODE); 122 123 truncate_inode_pages_final(&inode->i_data); 124 clear_inode(inode); 125 if (inode->i_sb->s_flags & SB_ACTIVE) { 126 struct fuse_conn *fc = get_fuse_conn(inode); 127 128 if (FUSE_IS_DAX(inode)) 129 fuse_dax_inode_cleanup(inode); 130 if (fi->nlookup) { 131 fuse_queue_forget(fc, fi->forget, fi->nodeid, 132 fi->nlookup); 133 fi->forget = NULL; 134 } 135 } 136 if (S_ISREG(inode->i_mode) && !fuse_is_bad(inode)) { 137 WARN_ON(!list_empty(&fi->write_files)); 138 WARN_ON(!list_empty(&fi->queued_writes)); 139 } 140 } 141 142 static int fuse_reconfigure(struct fs_context *fsc) 143 { 144 struct super_block *sb = fsc->root->d_sb; 145 146 sync_filesystem(sb); 147 if (fsc->sb_flags & SB_MANDLOCK) 148 return -EINVAL; 149 150 return 0; 151 } 152 153 /* 154 * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down 155 * so that it will fit. 156 */ 157 static ino_t fuse_squash_ino(u64 ino64) 158 { 159 ino_t ino = (ino_t) ino64; 160 if (sizeof(ino_t) < sizeof(u64)) 161 ino ^= ino64 >> (sizeof(u64) - sizeof(ino_t)) * 8; 162 return ino; 163 } 164 165 void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, 166 struct fuse_statx *sx, 167 u64 attr_valid, u32 cache_mask) 168 { 169 struct fuse_conn *fc = get_fuse_conn(inode); 170 struct fuse_inode *fi = get_fuse_inode(inode); 171 172 lockdep_assert_held(&fi->lock); 173 174 fi->attr_version = atomic64_inc_return(&fc->attr_version); 175 fi->i_time = attr_valid; 176 /* Clear basic stats from invalid mask */ 177 set_mask_bits(&fi->inval_mask, STATX_BASIC_STATS, 0); 178 179 inode->i_ino = fuse_squash_ino(attr->ino); 180 inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); 181 set_nlink(inode, attr->nlink); 182 inode->i_uid = make_kuid(fc->user_ns, attr->uid); 183 inode->i_gid = make_kgid(fc->user_ns, attr->gid); 184 inode->i_blocks = attr->blocks; 185 186 /* Sanitize nsecs */ 187 attr->atimensec = min_t(u32, attr->atimensec, NSEC_PER_SEC - 1); 188 attr->mtimensec = min_t(u32, attr->mtimensec, NSEC_PER_SEC - 1); 189 attr->ctimensec = min_t(u32, attr->ctimensec, NSEC_PER_SEC - 1); 190 191 inode->i_atime.tv_sec = attr->atime; 192 inode->i_atime.tv_nsec = attr->atimensec; 193 /* mtime from server may be stale due to local buffered write */ 194 if (!(cache_mask & STATX_MTIME)) { 195 inode->i_mtime.tv_sec = attr->mtime; 196 inode->i_mtime.tv_nsec = attr->mtimensec; 197 } 198 if (!(cache_mask & STATX_CTIME)) { 199 inode_set_ctime(inode, attr->ctime, attr->ctimensec); 200 } 201 if (sx) { 202 /* Sanitize nsecs */ 203 sx->btime.tv_nsec = 204 min_t(u32, sx->btime.tv_nsec, NSEC_PER_SEC - 1); 205 206 /* 207 * Btime has been queried, cache is valid (whether or not btime 208 * is available or not) so clear STATX_BTIME from inval_mask. 209 * 210 * Availability of the btime attribute is indicated in 211 * FUSE_I_BTIME 212 */ 213 set_mask_bits(&fi->inval_mask, STATX_BTIME, 0); 214 if (sx->mask & STATX_BTIME) { 215 set_bit(FUSE_I_BTIME, &fi->state); 216 fi->i_btime.tv_sec = sx->btime.tv_sec; 217 fi->i_btime.tv_nsec = sx->btime.tv_nsec; 218 } 219 } 220 221 if (attr->blksize != 0) 222 inode->i_blkbits = ilog2(attr->blksize); 223 else 224 inode->i_blkbits = inode->i_sb->s_blocksize_bits; 225 226 /* 227 * Don't set the sticky bit in i_mode, unless we want the VFS 228 * to check permissions. This prevents failures due to the 229 * check in may_delete(). 230 */ 231 fi->orig_i_mode = inode->i_mode; 232 if (!fc->default_permissions) 233 inode->i_mode &= ~S_ISVTX; 234 235 fi->orig_ino = attr->ino; 236 237 /* 238 * We are refreshing inode data and it is possible that another 239 * client set suid/sgid or security.capability xattr. So clear 240 * S_NOSEC. Ideally, we could have cleared it only if suid/sgid 241 * was set or if security.capability xattr was set. But we don't 242 * know if security.capability has been set or not. So clear it 243 * anyway. Its less efficient but should be safe. 244 */ 245 inode->i_flags &= ~S_NOSEC; 246 } 247 248 u32 fuse_get_cache_mask(struct inode *inode) 249 { 250 struct fuse_conn *fc = get_fuse_conn(inode); 251 252 if (!fc->writeback_cache || !S_ISREG(inode->i_mode)) 253 return 0; 254 255 return STATX_MTIME | STATX_CTIME | STATX_SIZE; 256 } 257 258 void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, 259 struct fuse_statx *sx, 260 u64 attr_valid, u64 attr_version) 261 { 262 struct fuse_conn *fc = get_fuse_conn(inode); 263 struct fuse_inode *fi = get_fuse_inode(inode); 264 u32 cache_mask; 265 loff_t oldsize; 266 struct timespec64 old_mtime; 267 268 spin_lock(&fi->lock); 269 /* 270 * In case of writeback_cache enabled, writes update mtime, ctime and 271 * may update i_size. In these cases trust the cached value in the 272 * inode. 273 */ 274 cache_mask = fuse_get_cache_mask(inode); 275 if (cache_mask & STATX_SIZE) 276 attr->size = i_size_read(inode); 277 278 if (cache_mask & STATX_MTIME) { 279 attr->mtime = inode->i_mtime.tv_sec; 280 attr->mtimensec = inode->i_mtime.tv_nsec; 281 } 282 if (cache_mask & STATX_CTIME) { 283 attr->ctime = inode_get_ctime(inode).tv_sec; 284 attr->ctimensec = inode_get_ctime(inode).tv_nsec; 285 } 286 287 if ((attr_version != 0 && fi->attr_version > attr_version) || 288 test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) { 289 spin_unlock(&fi->lock); 290 return; 291 } 292 293 old_mtime = inode->i_mtime; 294 fuse_change_attributes_common(inode, attr, sx, attr_valid, cache_mask); 295 296 oldsize = inode->i_size; 297 /* 298 * In case of writeback_cache enabled, the cached writes beyond EOF 299 * extend local i_size without keeping userspace server in sync. So, 300 * attr->size coming from server can be stale. We cannot trust it. 301 */ 302 if (!(cache_mask & STATX_SIZE)) 303 i_size_write(inode, attr->size); 304 spin_unlock(&fi->lock); 305 306 if (!cache_mask && S_ISREG(inode->i_mode)) { 307 bool inval = false; 308 309 if (oldsize != attr->size) { 310 truncate_pagecache(inode, attr->size); 311 if (!fc->explicit_inval_data) 312 inval = true; 313 } else if (fc->auto_inval_data) { 314 struct timespec64 new_mtime = { 315 .tv_sec = attr->mtime, 316 .tv_nsec = attr->mtimensec, 317 }; 318 319 /* 320 * Auto inval mode also checks and invalidates if mtime 321 * has changed. 322 */ 323 if (!timespec64_equal(&old_mtime, &new_mtime)) 324 inval = true; 325 } 326 327 if (inval) 328 invalidate_inode_pages2(inode->i_mapping); 329 } 330 331 if (IS_ENABLED(CONFIG_FUSE_DAX)) 332 fuse_dax_dontcache(inode, attr->flags); 333 } 334 335 static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr, 336 struct fuse_conn *fc) 337 { 338 inode->i_mode = attr->mode & S_IFMT; 339 inode->i_size = attr->size; 340 inode->i_mtime.tv_sec = attr->mtime; 341 inode->i_mtime.tv_nsec = attr->mtimensec; 342 inode_set_ctime(inode, attr->ctime, attr->ctimensec); 343 if (S_ISREG(inode->i_mode)) { 344 fuse_init_common(inode); 345 fuse_init_file_inode(inode, attr->flags); 346 } else if (S_ISDIR(inode->i_mode)) 347 fuse_init_dir(inode); 348 else if (S_ISLNK(inode->i_mode)) 349 fuse_init_symlink(inode); 350 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 351 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 352 fuse_init_common(inode); 353 init_special_inode(inode, inode->i_mode, 354 new_decode_dev(attr->rdev)); 355 } else 356 BUG(); 357 /* 358 * Ensure that we don't cache acls for daemons without FUSE_POSIX_ACL 359 * so they see the exact same behavior as before. 360 */ 361 if (!fc->posix_acl) 362 inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE; 363 } 364 365 static int fuse_inode_eq(struct inode *inode, void *_nodeidp) 366 { 367 u64 nodeid = *(u64 *) _nodeidp; 368 if (get_node_id(inode) == nodeid) 369 return 1; 370 else 371 return 0; 372 } 373 374 static int fuse_inode_set(struct inode *inode, void *_nodeidp) 375 { 376 u64 nodeid = *(u64 *) _nodeidp; 377 get_fuse_inode(inode)->nodeid = nodeid; 378 return 0; 379 } 380 381 struct inode *fuse_iget(struct super_block *sb, u64 nodeid, 382 int generation, struct fuse_attr *attr, 383 u64 attr_valid, u64 attr_version) 384 { 385 struct inode *inode; 386 struct fuse_inode *fi; 387 struct fuse_conn *fc = get_fuse_conn_super(sb); 388 389 /* 390 * Auto mount points get their node id from the submount root, which is 391 * not a unique identifier within this filesystem. 392 * 393 * To avoid conflicts, do not place submount points into the inode hash 394 * table. 395 */ 396 if (fc->auto_submounts && (attr->flags & FUSE_ATTR_SUBMOUNT) && 397 S_ISDIR(attr->mode)) { 398 inode = new_inode(sb); 399 if (!inode) 400 return NULL; 401 402 fuse_init_inode(inode, attr, fc); 403 get_fuse_inode(inode)->nodeid = nodeid; 404 inode->i_flags |= S_AUTOMOUNT; 405 goto done; 406 } 407 408 retry: 409 inode = iget5_locked(sb, nodeid, fuse_inode_eq, fuse_inode_set, &nodeid); 410 if (!inode) 411 return NULL; 412 413 if ((inode->i_state & I_NEW)) { 414 inode->i_flags |= S_NOATIME; 415 if (!fc->writeback_cache || !S_ISREG(attr->mode)) 416 inode->i_flags |= S_NOCMTIME; 417 inode->i_generation = generation; 418 fuse_init_inode(inode, attr, fc); 419 unlock_new_inode(inode); 420 } else if (fuse_stale_inode(inode, generation, attr)) { 421 /* nodeid was reused, any I/O on the old inode should fail */ 422 fuse_make_bad(inode); 423 iput(inode); 424 goto retry; 425 } 426 done: 427 fi = get_fuse_inode(inode); 428 spin_lock(&fi->lock); 429 fi->nlookup++; 430 spin_unlock(&fi->lock); 431 fuse_change_attributes(inode, attr, NULL, attr_valid, attr_version); 432 433 return inode; 434 } 435 436 struct inode *fuse_ilookup(struct fuse_conn *fc, u64 nodeid, 437 struct fuse_mount **fm) 438 { 439 struct fuse_mount *fm_iter; 440 struct inode *inode; 441 442 WARN_ON(!rwsem_is_locked(&fc->killsb)); 443 list_for_each_entry(fm_iter, &fc->mounts, fc_entry) { 444 if (!fm_iter->sb) 445 continue; 446 447 inode = ilookup5(fm_iter->sb, nodeid, fuse_inode_eq, &nodeid); 448 if (inode) { 449 if (fm) 450 *fm = fm_iter; 451 return inode; 452 } 453 } 454 455 return NULL; 456 } 457 458 int fuse_reverse_inval_inode(struct fuse_conn *fc, u64 nodeid, 459 loff_t offset, loff_t len) 460 { 461 struct fuse_inode *fi; 462 struct inode *inode; 463 pgoff_t pg_start; 464 pgoff_t pg_end; 465 466 inode = fuse_ilookup(fc, nodeid, NULL); 467 if (!inode) 468 return -ENOENT; 469 470 fi = get_fuse_inode(inode); 471 spin_lock(&fi->lock); 472 fi->attr_version = atomic64_inc_return(&fc->attr_version); 473 spin_unlock(&fi->lock); 474 475 fuse_invalidate_attr(inode); 476 forget_all_cached_acls(inode); 477 if (offset >= 0) { 478 pg_start = offset >> PAGE_SHIFT; 479 if (len <= 0) 480 pg_end = -1; 481 else 482 pg_end = (offset + len - 1) >> PAGE_SHIFT; 483 invalidate_inode_pages2_range(inode->i_mapping, 484 pg_start, pg_end); 485 } 486 iput(inode); 487 return 0; 488 } 489 490 bool fuse_lock_inode(struct inode *inode) 491 { 492 bool locked = false; 493 494 if (!get_fuse_conn(inode)->parallel_dirops) { 495 mutex_lock(&get_fuse_inode(inode)->mutex); 496 locked = true; 497 } 498 499 return locked; 500 } 501 502 void fuse_unlock_inode(struct inode *inode, bool locked) 503 { 504 if (locked) 505 mutex_unlock(&get_fuse_inode(inode)->mutex); 506 } 507 508 static void fuse_umount_begin(struct super_block *sb) 509 { 510 struct fuse_conn *fc = get_fuse_conn_super(sb); 511 512 if (fc->no_force_umount) 513 return; 514 515 fuse_abort_conn(fc); 516 517 // Only retire block-device-based superblocks. 518 if (sb->s_bdev != NULL) 519 retire_super(sb); 520 } 521 522 static void fuse_send_destroy(struct fuse_mount *fm) 523 { 524 if (fm->fc->conn_init) { 525 FUSE_ARGS(args); 526 527 args.opcode = FUSE_DESTROY; 528 args.force = true; 529 args.nocreds = true; 530 fuse_simple_request(fm, &args); 531 } 532 } 533 534 static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr) 535 { 536 stbuf->f_type = FUSE_SUPER_MAGIC; 537 stbuf->f_bsize = attr->bsize; 538 stbuf->f_frsize = attr->frsize; 539 stbuf->f_blocks = attr->blocks; 540 stbuf->f_bfree = attr->bfree; 541 stbuf->f_bavail = attr->bavail; 542 stbuf->f_files = attr->files; 543 stbuf->f_ffree = attr->ffree; 544 stbuf->f_namelen = attr->namelen; 545 /* fsid is left zero */ 546 } 547 548 static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf) 549 { 550 struct super_block *sb = dentry->d_sb; 551 struct fuse_mount *fm = get_fuse_mount_super(sb); 552 FUSE_ARGS(args); 553 struct fuse_statfs_out outarg; 554 int err; 555 556 if (!fuse_allow_current_process(fm->fc)) { 557 buf->f_type = FUSE_SUPER_MAGIC; 558 return 0; 559 } 560 561 memset(&outarg, 0, sizeof(outarg)); 562 args.in_numargs = 0; 563 args.opcode = FUSE_STATFS; 564 args.nodeid = get_node_id(d_inode(dentry)); 565 args.out_numargs = 1; 566 args.out_args[0].size = sizeof(outarg); 567 args.out_args[0].value = &outarg; 568 err = fuse_simple_request(fm, &args); 569 if (!err) 570 convert_fuse_statfs(buf, &outarg.st); 571 return err; 572 } 573 574 static struct fuse_sync_bucket *fuse_sync_bucket_alloc(void) 575 { 576 struct fuse_sync_bucket *bucket; 577 578 bucket = kzalloc(sizeof(*bucket), GFP_KERNEL | __GFP_NOFAIL); 579 if (bucket) { 580 init_waitqueue_head(&bucket->waitq); 581 /* Initial active count */ 582 atomic_set(&bucket->count, 1); 583 } 584 return bucket; 585 } 586 587 static void fuse_sync_fs_writes(struct fuse_conn *fc) 588 { 589 struct fuse_sync_bucket *bucket, *new_bucket; 590 int count; 591 592 new_bucket = fuse_sync_bucket_alloc(); 593 spin_lock(&fc->lock); 594 bucket = rcu_dereference_protected(fc->curr_bucket, 1); 595 count = atomic_read(&bucket->count); 596 WARN_ON(count < 1); 597 /* No outstanding writes? */ 598 if (count == 1) { 599 spin_unlock(&fc->lock); 600 kfree(new_bucket); 601 return; 602 } 603 604 /* 605 * Completion of new bucket depends on completion of this bucket, so add 606 * one more count. 607 */ 608 atomic_inc(&new_bucket->count); 609 rcu_assign_pointer(fc->curr_bucket, new_bucket); 610 spin_unlock(&fc->lock); 611 /* 612 * Drop initial active count. At this point if all writes in this and 613 * ancestor buckets complete, the count will go to zero and this task 614 * will be woken up. 615 */ 616 atomic_dec(&bucket->count); 617 618 wait_event(bucket->waitq, atomic_read(&bucket->count) == 0); 619 620 /* Drop temp count on descendant bucket */ 621 fuse_sync_bucket_dec(new_bucket); 622 kfree_rcu(bucket, rcu); 623 } 624 625 static int fuse_sync_fs(struct super_block *sb, int wait) 626 { 627 struct fuse_mount *fm = get_fuse_mount_super(sb); 628 struct fuse_conn *fc = fm->fc; 629 struct fuse_syncfs_in inarg; 630 FUSE_ARGS(args); 631 int err; 632 633 /* 634 * Userspace cannot handle the wait == 0 case. Avoid a 635 * gratuitous roundtrip. 636 */ 637 if (!wait) 638 return 0; 639 640 /* The filesystem is being unmounted. Nothing to do. */ 641 if (!sb->s_root) 642 return 0; 643 644 if (!fc->sync_fs) 645 return 0; 646 647 fuse_sync_fs_writes(fc); 648 649 memset(&inarg, 0, sizeof(inarg)); 650 args.in_numargs = 1; 651 args.in_args[0].size = sizeof(inarg); 652 args.in_args[0].value = &inarg; 653 args.opcode = FUSE_SYNCFS; 654 args.nodeid = get_node_id(sb->s_root->d_inode); 655 args.out_numargs = 0; 656 657 err = fuse_simple_request(fm, &args); 658 if (err == -ENOSYS) { 659 fc->sync_fs = 0; 660 err = 0; 661 } 662 663 return err; 664 } 665 666 enum { 667 OPT_SOURCE, 668 OPT_SUBTYPE, 669 OPT_FD, 670 OPT_ROOTMODE, 671 OPT_USER_ID, 672 OPT_GROUP_ID, 673 OPT_DEFAULT_PERMISSIONS, 674 OPT_ALLOW_OTHER, 675 OPT_MAX_READ, 676 OPT_BLKSIZE, 677 OPT_ERR 678 }; 679 680 static const struct fs_parameter_spec fuse_fs_parameters[] = { 681 fsparam_string ("source", OPT_SOURCE), 682 fsparam_u32 ("fd", OPT_FD), 683 fsparam_u32oct ("rootmode", OPT_ROOTMODE), 684 fsparam_u32 ("user_id", OPT_USER_ID), 685 fsparam_u32 ("group_id", OPT_GROUP_ID), 686 fsparam_flag ("default_permissions", OPT_DEFAULT_PERMISSIONS), 687 fsparam_flag ("allow_other", OPT_ALLOW_OTHER), 688 fsparam_u32 ("max_read", OPT_MAX_READ), 689 fsparam_u32 ("blksize", OPT_BLKSIZE), 690 fsparam_string ("subtype", OPT_SUBTYPE), 691 {} 692 }; 693 694 static int fuse_parse_param(struct fs_context *fsc, struct fs_parameter *param) 695 { 696 struct fs_parse_result result; 697 struct fuse_fs_context *ctx = fsc->fs_private; 698 int opt; 699 700 if (fsc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { 701 /* 702 * Ignore options coming from mount(MS_REMOUNT) for backward 703 * compatibility. 704 */ 705 if (fsc->oldapi) 706 return 0; 707 708 return invalfc(fsc, "No changes allowed in reconfigure"); 709 } 710 711 opt = fs_parse(fsc, fuse_fs_parameters, param, &result); 712 if (opt < 0) 713 return opt; 714 715 switch (opt) { 716 case OPT_SOURCE: 717 if (fsc->source) 718 return invalfc(fsc, "Multiple sources specified"); 719 fsc->source = param->string; 720 param->string = NULL; 721 break; 722 723 case OPT_SUBTYPE: 724 if (ctx->subtype) 725 return invalfc(fsc, "Multiple subtypes specified"); 726 ctx->subtype = param->string; 727 param->string = NULL; 728 return 0; 729 730 case OPT_FD: 731 ctx->fd = result.uint_32; 732 ctx->fd_present = true; 733 break; 734 735 case OPT_ROOTMODE: 736 if (!fuse_valid_type(result.uint_32)) 737 return invalfc(fsc, "Invalid rootmode"); 738 ctx->rootmode = result.uint_32; 739 ctx->rootmode_present = true; 740 break; 741 742 case OPT_USER_ID: 743 ctx->user_id = make_kuid(fsc->user_ns, result.uint_32); 744 if (!uid_valid(ctx->user_id)) 745 return invalfc(fsc, "Invalid user_id"); 746 ctx->user_id_present = true; 747 break; 748 749 case OPT_GROUP_ID: 750 ctx->group_id = make_kgid(fsc->user_ns, result.uint_32); 751 if (!gid_valid(ctx->group_id)) 752 return invalfc(fsc, "Invalid group_id"); 753 ctx->group_id_present = true; 754 break; 755 756 case OPT_DEFAULT_PERMISSIONS: 757 ctx->default_permissions = true; 758 break; 759 760 case OPT_ALLOW_OTHER: 761 ctx->allow_other = true; 762 break; 763 764 case OPT_MAX_READ: 765 ctx->max_read = result.uint_32; 766 break; 767 768 case OPT_BLKSIZE: 769 if (!ctx->is_bdev) 770 return invalfc(fsc, "blksize only supported for fuseblk"); 771 ctx->blksize = result.uint_32; 772 break; 773 774 default: 775 return -EINVAL; 776 } 777 778 return 0; 779 } 780 781 static void fuse_free_fsc(struct fs_context *fsc) 782 { 783 struct fuse_fs_context *ctx = fsc->fs_private; 784 785 if (ctx) { 786 kfree(ctx->subtype); 787 kfree(ctx); 788 } 789 } 790 791 static int fuse_show_options(struct seq_file *m, struct dentry *root) 792 { 793 struct super_block *sb = root->d_sb; 794 struct fuse_conn *fc = get_fuse_conn_super(sb); 795 796 if (fc->legacy_opts_show) { 797 seq_printf(m, ",user_id=%u", 798 from_kuid_munged(fc->user_ns, fc->user_id)); 799 seq_printf(m, ",group_id=%u", 800 from_kgid_munged(fc->user_ns, fc->group_id)); 801 if (fc->default_permissions) 802 seq_puts(m, ",default_permissions"); 803 if (fc->allow_other) 804 seq_puts(m, ",allow_other"); 805 if (fc->max_read != ~0) 806 seq_printf(m, ",max_read=%u", fc->max_read); 807 if (sb->s_bdev && sb->s_blocksize != FUSE_DEFAULT_BLKSIZE) 808 seq_printf(m, ",blksize=%lu", sb->s_blocksize); 809 } 810 #ifdef CONFIG_FUSE_DAX 811 if (fc->dax_mode == FUSE_DAX_ALWAYS) 812 seq_puts(m, ",dax=always"); 813 else if (fc->dax_mode == FUSE_DAX_NEVER) 814 seq_puts(m, ",dax=never"); 815 else if (fc->dax_mode == FUSE_DAX_INODE_USER) 816 seq_puts(m, ",dax=inode"); 817 #endif 818 819 return 0; 820 } 821 822 static void fuse_iqueue_init(struct fuse_iqueue *fiq, 823 const struct fuse_iqueue_ops *ops, 824 void *priv) 825 { 826 memset(fiq, 0, sizeof(struct fuse_iqueue)); 827 spin_lock_init(&fiq->lock); 828 init_waitqueue_head(&fiq->waitq); 829 INIT_LIST_HEAD(&fiq->pending); 830 INIT_LIST_HEAD(&fiq->interrupts); 831 fiq->forget_list_tail = &fiq->forget_list_head; 832 fiq->connected = 1; 833 fiq->ops = ops; 834 fiq->priv = priv; 835 } 836 837 static void fuse_pqueue_init(struct fuse_pqueue *fpq) 838 { 839 unsigned int i; 840 841 spin_lock_init(&fpq->lock); 842 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) 843 INIT_LIST_HEAD(&fpq->processing[i]); 844 INIT_LIST_HEAD(&fpq->io); 845 fpq->connected = 1; 846 } 847 848 void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm, 849 struct user_namespace *user_ns, 850 const struct fuse_iqueue_ops *fiq_ops, void *fiq_priv) 851 { 852 memset(fc, 0, sizeof(*fc)); 853 spin_lock_init(&fc->lock); 854 spin_lock_init(&fc->bg_lock); 855 init_rwsem(&fc->killsb); 856 refcount_set(&fc->count, 1); 857 atomic_set(&fc->dev_count, 1); 858 init_waitqueue_head(&fc->blocked_waitq); 859 fuse_iqueue_init(&fc->iq, fiq_ops, fiq_priv); 860 INIT_LIST_HEAD(&fc->bg_queue); 861 INIT_LIST_HEAD(&fc->entry); 862 INIT_LIST_HEAD(&fc->devices); 863 atomic_set(&fc->num_waiting, 0); 864 fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND; 865 fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD; 866 atomic64_set(&fc->khctr, 0); 867 fc->polled_files = RB_ROOT; 868 fc->blocked = 0; 869 fc->initialized = 0; 870 fc->connected = 1; 871 atomic64_set(&fc->attr_version, 1); 872 get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); 873 fc->pid_ns = get_pid_ns(task_active_pid_ns(current)); 874 fc->user_ns = get_user_ns(user_ns); 875 fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ; 876 fc->max_pages_limit = FUSE_MAX_MAX_PAGES; 877 878 INIT_LIST_HEAD(&fc->mounts); 879 list_add(&fm->fc_entry, &fc->mounts); 880 fm->fc = fc; 881 } 882 EXPORT_SYMBOL_GPL(fuse_conn_init); 883 884 void fuse_conn_put(struct fuse_conn *fc) 885 { 886 if (refcount_dec_and_test(&fc->count)) { 887 struct fuse_iqueue *fiq = &fc->iq; 888 struct fuse_sync_bucket *bucket; 889 890 if (IS_ENABLED(CONFIG_FUSE_DAX)) 891 fuse_dax_conn_free(fc); 892 if (fiq->ops->release) 893 fiq->ops->release(fiq); 894 put_pid_ns(fc->pid_ns); 895 put_user_ns(fc->user_ns); 896 bucket = rcu_dereference_protected(fc->curr_bucket, 1); 897 if (bucket) { 898 WARN_ON(atomic_read(&bucket->count) != 1); 899 kfree(bucket); 900 } 901 fc->release(fc); 902 } 903 } 904 EXPORT_SYMBOL_GPL(fuse_conn_put); 905 906 struct fuse_conn *fuse_conn_get(struct fuse_conn *fc) 907 { 908 refcount_inc(&fc->count); 909 return fc; 910 } 911 EXPORT_SYMBOL_GPL(fuse_conn_get); 912 913 static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned mode) 914 { 915 struct fuse_attr attr; 916 memset(&attr, 0, sizeof(attr)); 917 918 attr.mode = mode; 919 attr.ino = FUSE_ROOT_ID; 920 attr.nlink = 1; 921 return fuse_iget(sb, 1, 0, &attr, 0, 0); 922 } 923 924 struct fuse_inode_handle { 925 u64 nodeid; 926 u32 generation; 927 }; 928 929 static struct dentry *fuse_get_dentry(struct super_block *sb, 930 struct fuse_inode_handle *handle) 931 { 932 struct fuse_conn *fc = get_fuse_conn_super(sb); 933 struct inode *inode; 934 struct dentry *entry; 935 int err = -ESTALE; 936 937 if (handle->nodeid == 0) 938 goto out_err; 939 940 inode = ilookup5(sb, handle->nodeid, fuse_inode_eq, &handle->nodeid); 941 if (!inode) { 942 struct fuse_entry_out outarg; 943 const struct qstr name = QSTR_INIT(".", 1); 944 945 if (!fc->export_support) 946 goto out_err; 947 948 err = fuse_lookup_name(sb, handle->nodeid, &name, &outarg, 949 &inode); 950 if (err && err != -ENOENT) 951 goto out_err; 952 if (err || !inode) { 953 err = -ESTALE; 954 goto out_err; 955 } 956 err = -EIO; 957 if (get_node_id(inode) != handle->nodeid) 958 goto out_iput; 959 } 960 err = -ESTALE; 961 if (inode->i_generation != handle->generation) 962 goto out_iput; 963 964 entry = d_obtain_alias(inode); 965 if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID) 966 fuse_invalidate_entry_cache(entry); 967 968 return entry; 969 970 out_iput: 971 iput(inode); 972 out_err: 973 return ERR_PTR(err); 974 } 975 976 static int fuse_encode_fh(struct inode *inode, u32 *fh, int *max_len, 977 struct inode *parent) 978 { 979 int len = parent ? 6 : 3; 980 u64 nodeid; 981 u32 generation; 982 983 if (*max_len < len) { 984 *max_len = len; 985 return FILEID_INVALID; 986 } 987 988 nodeid = get_fuse_inode(inode)->nodeid; 989 generation = inode->i_generation; 990 991 fh[0] = (u32)(nodeid >> 32); 992 fh[1] = (u32)(nodeid & 0xffffffff); 993 fh[2] = generation; 994 995 if (parent) { 996 nodeid = get_fuse_inode(parent)->nodeid; 997 generation = parent->i_generation; 998 999 fh[3] = (u32)(nodeid >> 32); 1000 fh[4] = (u32)(nodeid & 0xffffffff); 1001 fh[5] = generation; 1002 } 1003 1004 *max_len = len; 1005 return parent ? 0x82 : 0x81; 1006 } 1007 1008 static struct dentry *fuse_fh_to_dentry(struct super_block *sb, 1009 struct fid *fid, int fh_len, int fh_type) 1010 { 1011 struct fuse_inode_handle handle; 1012 1013 if ((fh_type != 0x81 && fh_type != 0x82) || fh_len < 3) 1014 return NULL; 1015 1016 handle.nodeid = (u64) fid->raw[0] << 32; 1017 handle.nodeid |= (u64) fid->raw[1]; 1018 handle.generation = fid->raw[2]; 1019 return fuse_get_dentry(sb, &handle); 1020 } 1021 1022 static struct dentry *fuse_fh_to_parent(struct super_block *sb, 1023 struct fid *fid, int fh_len, int fh_type) 1024 { 1025 struct fuse_inode_handle parent; 1026 1027 if (fh_type != 0x82 || fh_len < 6) 1028 return NULL; 1029 1030 parent.nodeid = (u64) fid->raw[3] << 32; 1031 parent.nodeid |= (u64) fid->raw[4]; 1032 parent.generation = fid->raw[5]; 1033 return fuse_get_dentry(sb, &parent); 1034 } 1035 1036 static struct dentry *fuse_get_parent(struct dentry *child) 1037 { 1038 struct inode *child_inode = d_inode(child); 1039 struct fuse_conn *fc = get_fuse_conn(child_inode); 1040 struct inode *inode; 1041 struct dentry *parent; 1042 struct fuse_entry_out outarg; 1043 int err; 1044 1045 if (!fc->export_support) 1046 return ERR_PTR(-ESTALE); 1047 1048 err = fuse_lookup_name(child_inode->i_sb, get_node_id(child_inode), 1049 &dotdot_name, &outarg, &inode); 1050 if (err) { 1051 if (err == -ENOENT) 1052 return ERR_PTR(-ESTALE); 1053 return ERR_PTR(err); 1054 } 1055 1056 parent = d_obtain_alias(inode); 1057 if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID) 1058 fuse_invalidate_entry_cache(parent); 1059 1060 return parent; 1061 } 1062 1063 static const struct export_operations fuse_export_operations = { 1064 .fh_to_dentry = fuse_fh_to_dentry, 1065 .fh_to_parent = fuse_fh_to_parent, 1066 .encode_fh = fuse_encode_fh, 1067 .get_parent = fuse_get_parent, 1068 }; 1069 1070 static const struct super_operations fuse_super_operations = { 1071 .alloc_inode = fuse_alloc_inode, 1072 .free_inode = fuse_free_inode, 1073 .evict_inode = fuse_evict_inode, 1074 .write_inode = fuse_write_inode, 1075 .drop_inode = generic_delete_inode, 1076 .umount_begin = fuse_umount_begin, 1077 .statfs = fuse_statfs, 1078 .sync_fs = fuse_sync_fs, 1079 .show_options = fuse_show_options, 1080 }; 1081 1082 static void sanitize_global_limit(unsigned *limit) 1083 { 1084 /* 1085 * The default maximum number of async requests is calculated to consume 1086 * 1/2^13 of the total memory, assuming 392 bytes per request. 1087 */ 1088 if (*limit == 0) 1089 *limit = ((totalram_pages() << PAGE_SHIFT) >> 13) / 392; 1090 1091 if (*limit >= 1 << 16) 1092 *limit = (1 << 16) - 1; 1093 } 1094 1095 static int set_global_limit(const char *val, const struct kernel_param *kp) 1096 { 1097 int rv; 1098 1099 rv = param_set_uint(val, kp); 1100 if (rv) 1101 return rv; 1102 1103 sanitize_global_limit((unsigned *)kp->arg); 1104 1105 return 0; 1106 } 1107 1108 static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg) 1109 { 1110 int cap_sys_admin = capable(CAP_SYS_ADMIN); 1111 1112 if (arg->minor < 13) 1113 return; 1114 1115 sanitize_global_limit(&max_user_bgreq); 1116 sanitize_global_limit(&max_user_congthresh); 1117 1118 spin_lock(&fc->bg_lock); 1119 if (arg->max_background) { 1120 fc->max_background = arg->max_background; 1121 1122 if (!cap_sys_admin && fc->max_background > max_user_bgreq) 1123 fc->max_background = max_user_bgreq; 1124 } 1125 if (arg->congestion_threshold) { 1126 fc->congestion_threshold = arg->congestion_threshold; 1127 1128 if (!cap_sys_admin && 1129 fc->congestion_threshold > max_user_congthresh) 1130 fc->congestion_threshold = max_user_congthresh; 1131 } 1132 spin_unlock(&fc->bg_lock); 1133 } 1134 1135 struct fuse_init_args { 1136 struct fuse_args args; 1137 struct fuse_init_in in; 1138 struct fuse_init_out out; 1139 }; 1140 1141 static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args, 1142 int error) 1143 { 1144 struct fuse_conn *fc = fm->fc; 1145 struct fuse_init_args *ia = container_of(args, typeof(*ia), args); 1146 struct fuse_init_out *arg = &ia->out; 1147 bool ok = true; 1148 1149 if (error || arg->major != FUSE_KERNEL_VERSION) 1150 ok = false; 1151 else { 1152 unsigned long ra_pages; 1153 1154 process_init_limits(fc, arg); 1155 1156 if (arg->minor >= 6) { 1157 u64 flags = arg->flags; 1158 1159 if (flags & FUSE_INIT_EXT) 1160 flags |= (u64) arg->flags2 << 32; 1161 1162 ra_pages = arg->max_readahead / PAGE_SIZE; 1163 if (flags & FUSE_ASYNC_READ) 1164 fc->async_read = 1; 1165 if (!(flags & FUSE_POSIX_LOCKS)) 1166 fc->no_lock = 1; 1167 if (arg->minor >= 17) { 1168 if (!(flags & FUSE_FLOCK_LOCKS)) 1169 fc->no_flock = 1; 1170 } else { 1171 if (!(flags & FUSE_POSIX_LOCKS)) 1172 fc->no_flock = 1; 1173 } 1174 if (flags & FUSE_ATOMIC_O_TRUNC) 1175 fc->atomic_o_trunc = 1; 1176 if (arg->minor >= 9) { 1177 /* LOOKUP has dependency on proto version */ 1178 if (flags & FUSE_EXPORT_SUPPORT) 1179 fc->export_support = 1; 1180 } 1181 if (flags & FUSE_BIG_WRITES) 1182 fc->big_writes = 1; 1183 if (flags & FUSE_DONT_MASK) 1184 fc->dont_mask = 1; 1185 if (flags & FUSE_AUTO_INVAL_DATA) 1186 fc->auto_inval_data = 1; 1187 else if (flags & FUSE_EXPLICIT_INVAL_DATA) 1188 fc->explicit_inval_data = 1; 1189 if (flags & FUSE_DO_READDIRPLUS) { 1190 fc->do_readdirplus = 1; 1191 if (flags & FUSE_READDIRPLUS_AUTO) 1192 fc->readdirplus_auto = 1; 1193 } 1194 if (flags & FUSE_ASYNC_DIO) 1195 fc->async_dio = 1; 1196 if (flags & FUSE_WRITEBACK_CACHE) 1197 fc->writeback_cache = 1; 1198 if (flags & FUSE_PARALLEL_DIROPS) 1199 fc->parallel_dirops = 1; 1200 if (flags & FUSE_HANDLE_KILLPRIV) 1201 fc->handle_killpriv = 1; 1202 if (arg->time_gran && arg->time_gran <= 1000000000) 1203 fm->sb->s_time_gran = arg->time_gran; 1204 if ((flags & FUSE_POSIX_ACL)) { 1205 fc->default_permissions = 1; 1206 fc->posix_acl = 1; 1207 } 1208 if (flags & FUSE_CACHE_SYMLINKS) 1209 fc->cache_symlinks = 1; 1210 if (flags & FUSE_ABORT_ERROR) 1211 fc->abort_err = 1; 1212 if (flags & FUSE_MAX_PAGES) { 1213 fc->max_pages = 1214 min_t(unsigned int, fc->max_pages_limit, 1215 max_t(unsigned int, arg->max_pages, 1)); 1216 } 1217 if (IS_ENABLED(CONFIG_FUSE_DAX)) { 1218 if (flags & FUSE_MAP_ALIGNMENT && 1219 !fuse_dax_check_alignment(fc, arg->map_alignment)) { 1220 ok = false; 1221 } 1222 if (flags & FUSE_HAS_INODE_DAX) 1223 fc->inode_dax = 1; 1224 } 1225 if (flags & FUSE_HANDLE_KILLPRIV_V2) { 1226 fc->handle_killpriv_v2 = 1; 1227 fm->sb->s_flags |= SB_NOSEC; 1228 } 1229 if (flags & FUSE_SETXATTR_EXT) 1230 fc->setxattr_ext = 1; 1231 if (flags & FUSE_SECURITY_CTX) 1232 fc->init_security = 1; 1233 if (flags & FUSE_CREATE_SUPP_GROUP) 1234 fc->create_supp_group = 1; 1235 if (flags & FUSE_DIRECT_IO_RELAX) 1236 fc->direct_io_relax = 1; 1237 } else { 1238 ra_pages = fc->max_read / PAGE_SIZE; 1239 fc->no_lock = 1; 1240 fc->no_flock = 1; 1241 } 1242 1243 fm->sb->s_bdi->ra_pages = 1244 min(fm->sb->s_bdi->ra_pages, ra_pages); 1245 fc->minor = arg->minor; 1246 fc->max_write = arg->minor < 5 ? 4096 : arg->max_write; 1247 fc->max_write = max_t(unsigned, 4096, fc->max_write); 1248 fc->conn_init = 1; 1249 } 1250 kfree(ia); 1251 1252 if (!ok) { 1253 fc->conn_init = 0; 1254 fc->conn_error = 1; 1255 } 1256 1257 fuse_set_initialized(fc); 1258 wake_up_all(&fc->blocked_waitq); 1259 } 1260 1261 void fuse_send_init(struct fuse_mount *fm) 1262 { 1263 struct fuse_init_args *ia; 1264 u64 flags; 1265 1266 ia = kzalloc(sizeof(*ia), GFP_KERNEL | __GFP_NOFAIL); 1267 1268 ia->in.major = FUSE_KERNEL_VERSION; 1269 ia->in.minor = FUSE_KERNEL_MINOR_VERSION; 1270 ia->in.max_readahead = fm->sb->s_bdi->ra_pages * PAGE_SIZE; 1271 flags = 1272 FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | 1273 FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK | 1274 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | 1275 FUSE_FLOCK_LOCKS | FUSE_HAS_IOCTL_DIR | FUSE_AUTO_INVAL_DATA | 1276 FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO | 1277 FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT | 1278 FUSE_PARALLEL_DIROPS | FUSE_HANDLE_KILLPRIV | FUSE_POSIX_ACL | 1279 FUSE_ABORT_ERROR | FUSE_MAX_PAGES | FUSE_CACHE_SYMLINKS | 1280 FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA | 1281 FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT | 1282 FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP | 1283 FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_RELAX; 1284 #ifdef CONFIG_FUSE_DAX 1285 if (fm->fc->dax) 1286 flags |= FUSE_MAP_ALIGNMENT; 1287 if (fuse_is_inode_dax_mode(fm->fc->dax_mode)) 1288 flags |= FUSE_HAS_INODE_DAX; 1289 #endif 1290 if (fm->fc->auto_submounts) 1291 flags |= FUSE_SUBMOUNTS; 1292 1293 ia->in.flags = flags; 1294 ia->in.flags2 = flags >> 32; 1295 1296 ia->args.opcode = FUSE_INIT; 1297 ia->args.in_numargs = 1; 1298 ia->args.in_args[0].size = sizeof(ia->in); 1299 ia->args.in_args[0].value = &ia->in; 1300 ia->args.out_numargs = 1; 1301 /* Variable length argument used for backward compatibility 1302 with interface version < 7.5. Rest of init_out is zeroed 1303 by do_get_request(), so a short reply is not a problem */ 1304 ia->args.out_argvar = true; 1305 ia->args.out_args[0].size = sizeof(ia->out); 1306 ia->args.out_args[0].value = &ia->out; 1307 ia->args.force = true; 1308 ia->args.nocreds = true; 1309 ia->args.end = process_init_reply; 1310 1311 if (fuse_simple_background(fm, &ia->args, GFP_KERNEL) != 0) 1312 process_init_reply(fm, &ia->args, -ENOTCONN); 1313 } 1314 EXPORT_SYMBOL_GPL(fuse_send_init); 1315 1316 void fuse_free_conn(struct fuse_conn *fc) 1317 { 1318 WARN_ON(!list_empty(&fc->devices)); 1319 kfree_rcu(fc, rcu); 1320 } 1321 EXPORT_SYMBOL_GPL(fuse_free_conn); 1322 1323 static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb) 1324 { 1325 int err; 1326 char *suffix = ""; 1327 1328 if (sb->s_bdev) { 1329 suffix = "-fuseblk"; 1330 /* 1331 * sb->s_bdi points to blkdev's bdi however we want to redirect 1332 * it to our private bdi... 1333 */ 1334 bdi_put(sb->s_bdi); 1335 sb->s_bdi = &noop_backing_dev_info; 1336 } 1337 err = super_setup_bdi_name(sb, "%u:%u%s", MAJOR(fc->dev), 1338 MINOR(fc->dev), suffix); 1339 if (err) 1340 return err; 1341 1342 /* fuse does it's own writeback accounting */ 1343 sb->s_bdi->capabilities &= ~BDI_CAP_WRITEBACK_ACCT; 1344 sb->s_bdi->capabilities |= BDI_CAP_STRICTLIMIT; 1345 1346 /* 1347 * For a single fuse filesystem use max 1% of dirty + 1348 * writeback threshold. 1349 * 1350 * This gives about 1M of write buffer for memory maps on a 1351 * machine with 1G and 10% dirty_ratio, which should be more 1352 * than enough. 1353 * 1354 * Privileged users can raise it by writing to 1355 * 1356 * /sys/class/bdi/<bdi>/max_ratio 1357 */ 1358 bdi_set_max_ratio(sb->s_bdi, 1); 1359 1360 return 0; 1361 } 1362 1363 struct fuse_dev *fuse_dev_alloc(void) 1364 { 1365 struct fuse_dev *fud; 1366 struct list_head *pq; 1367 1368 fud = kzalloc(sizeof(struct fuse_dev), GFP_KERNEL); 1369 if (!fud) 1370 return NULL; 1371 1372 pq = kcalloc(FUSE_PQ_HASH_SIZE, sizeof(struct list_head), GFP_KERNEL); 1373 if (!pq) { 1374 kfree(fud); 1375 return NULL; 1376 } 1377 1378 fud->pq.processing = pq; 1379 fuse_pqueue_init(&fud->pq); 1380 1381 return fud; 1382 } 1383 EXPORT_SYMBOL_GPL(fuse_dev_alloc); 1384 1385 void fuse_dev_install(struct fuse_dev *fud, struct fuse_conn *fc) 1386 { 1387 fud->fc = fuse_conn_get(fc); 1388 spin_lock(&fc->lock); 1389 list_add_tail(&fud->entry, &fc->devices); 1390 spin_unlock(&fc->lock); 1391 } 1392 EXPORT_SYMBOL_GPL(fuse_dev_install); 1393 1394 struct fuse_dev *fuse_dev_alloc_install(struct fuse_conn *fc) 1395 { 1396 struct fuse_dev *fud; 1397 1398 fud = fuse_dev_alloc(); 1399 if (!fud) 1400 return NULL; 1401 1402 fuse_dev_install(fud, fc); 1403 return fud; 1404 } 1405 EXPORT_SYMBOL_GPL(fuse_dev_alloc_install); 1406 1407 void fuse_dev_free(struct fuse_dev *fud) 1408 { 1409 struct fuse_conn *fc = fud->fc; 1410 1411 if (fc) { 1412 spin_lock(&fc->lock); 1413 list_del(&fud->entry); 1414 spin_unlock(&fc->lock); 1415 1416 fuse_conn_put(fc); 1417 } 1418 kfree(fud->pq.processing); 1419 kfree(fud); 1420 } 1421 EXPORT_SYMBOL_GPL(fuse_dev_free); 1422 1423 static void fuse_fill_attr_from_inode(struct fuse_attr *attr, 1424 const struct fuse_inode *fi) 1425 { 1426 struct timespec64 ctime = inode_get_ctime(&fi->inode); 1427 1428 *attr = (struct fuse_attr){ 1429 .ino = fi->inode.i_ino, 1430 .size = fi->inode.i_size, 1431 .blocks = fi->inode.i_blocks, 1432 .atime = fi->inode.i_atime.tv_sec, 1433 .mtime = fi->inode.i_mtime.tv_sec, 1434 .ctime = ctime.tv_sec, 1435 .atimensec = fi->inode.i_atime.tv_nsec, 1436 .mtimensec = fi->inode.i_mtime.tv_nsec, 1437 .ctimensec = ctime.tv_nsec, 1438 .mode = fi->inode.i_mode, 1439 .nlink = fi->inode.i_nlink, 1440 .uid = fi->inode.i_uid.val, 1441 .gid = fi->inode.i_gid.val, 1442 .rdev = fi->inode.i_rdev, 1443 .blksize = 1u << fi->inode.i_blkbits, 1444 }; 1445 } 1446 1447 static void fuse_sb_defaults(struct super_block *sb) 1448 { 1449 sb->s_magic = FUSE_SUPER_MAGIC; 1450 sb->s_op = &fuse_super_operations; 1451 sb->s_xattr = fuse_xattr_handlers; 1452 sb->s_maxbytes = MAX_LFS_FILESIZE; 1453 sb->s_time_gran = 1; 1454 sb->s_export_op = &fuse_export_operations; 1455 sb->s_iflags |= SB_I_IMA_UNVERIFIABLE_SIGNATURE; 1456 if (sb->s_user_ns != &init_user_ns) 1457 sb->s_iflags |= SB_I_UNTRUSTED_MOUNTER; 1458 sb->s_flags &= ~(SB_NOSEC | SB_I_VERSION); 1459 } 1460 1461 static int fuse_fill_super_submount(struct super_block *sb, 1462 struct fuse_inode *parent_fi) 1463 { 1464 struct fuse_mount *fm = get_fuse_mount_super(sb); 1465 struct super_block *parent_sb = parent_fi->inode.i_sb; 1466 struct fuse_attr root_attr; 1467 struct inode *root; 1468 1469 fuse_sb_defaults(sb); 1470 fm->sb = sb; 1471 1472 WARN_ON(sb->s_bdi != &noop_backing_dev_info); 1473 sb->s_bdi = bdi_get(parent_sb->s_bdi); 1474 1475 sb->s_xattr = parent_sb->s_xattr; 1476 sb->s_time_gran = parent_sb->s_time_gran; 1477 sb->s_blocksize = parent_sb->s_blocksize; 1478 sb->s_blocksize_bits = parent_sb->s_blocksize_bits; 1479 sb->s_subtype = kstrdup(parent_sb->s_subtype, GFP_KERNEL); 1480 if (parent_sb->s_subtype && !sb->s_subtype) 1481 return -ENOMEM; 1482 1483 fuse_fill_attr_from_inode(&root_attr, parent_fi); 1484 root = fuse_iget(sb, parent_fi->nodeid, 0, &root_attr, 0, 0); 1485 /* 1486 * This inode is just a duplicate, so it is not looked up and 1487 * its nlookup should not be incremented. fuse_iget() does 1488 * that, though, so undo it here. 1489 */ 1490 get_fuse_inode(root)->nlookup--; 1491 sb->s_d_op = &fuse_dentry_operations; 1492 sb->s_root = d_make_root(root); 1493 if (!sb->s_root) 1494 return -ENOMEM; 1495 1496 return 0; 1497 } 1498 1499 /* Filesystem context private data holds the FUSE inode of the mount point */ 1500 static int fuse_get_tree_submount(struct fs_context *fsc) 1501 { 1502 struct fuse_mount *fm; 1503 struct fuse_inode *mp_fi = fsc->fs_private; 1504 struct fuse_conn *fc = get_fuse_conn(&mp_fi->inode); 1505 struct super_block *sb; 1506 int err; 1507 1508 fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL); 1509 if (!fm) 1510 return -ENOMEM; 1511 1512 fm->fc = fuse_conn_get(fc); 1513 fsc->s_fs_info = fm; 1514 sb = sget_fc(fsc, NULL, set_anon_super_fc); 1515 if (fsc->s_fs_info) 1516 fuse_mount_destroy(fm); 1517 if (IS_ERR(sb)) 1518 return PTR_ERR(sb); 1519 1520 /* Initialize superblock, making @mp_fi its root */ 1521 err = fuse_fill_super_submount(sb, mp_fi); 1522 if (err) { 1523 deactivate_locked_super(sb); 1524 return err; 1525 } 1526 1527 down_write(&fc->killsb); 1528 list_add_tail(&fm->fc_entry, &fc->mounts); 1529 up_write(&fc->killsb); 1530 1531 sb->s_flags |= SB_ACTIVE; 1532 fsc->root = dget(sb->s_root); 1533 1534 return 0; 1535 } 1536 1537 static const struct fs_context_operations fuse_context_submount_ops = { 1538 .get_tree = fuse_get_tree_submount, 1539 }; 1540 1541 int fuse_init_fs_context_submount(struct fs_context *fsc) 1542 { 1543 fsc->ops = &fuse_context_submount_ops; 1544 return 0; 1545 } 1546 EXPORT_SYMBOL_GPL(fuse_init_fs_context_submount); 1547 1548 int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx) 1549 { 1550 struct fuse_dev *fud = NULL; 1551 struct fuse_mount *fm = get_fuse_mount_super(sb); 1552 struct fuse_conn *fc = fm->fc; 1553 struct inode *root; 1554 struct dentry *root_dentry; 1555 int err; 1556 1557 err = -EINVAL; 1558 if (sb->s_flags & SB_MANDLOCK) 1559 goto err; 1560 1561 rcu_assign_pointer(fc->curr_bucket, fuse_sync_bucket_alloc()); 1562 fuse_sb_defaults(sb); 1563 1564 if (ctx->is_bdev) { 1565 #ifdef CONFIG_BLOCK 1566 err = -EINVAL; 1567 if (!sb_set_blocksize(sb, ctx->blksize)) 1568 goto err; 1569 #endif 1570 } else { 1571 sb->s_blocksize = PAGE_SIZE; 1572 sb->s_blocksize_bits = PAGE_SHIFT; 1573 } 1574 1575 sb->s_subtype = ctx->subtype; 1576 ctx->subtype = NULL; 1577 if (IS_ENABLED(CONFIG_FUSE_DAX)) { 1578 err = fuse_dax_conn_alloc(fc, ctx->dax_mode, ctx->dax_dev); 1579 if (err) 1580 goto err; 1581 } 1582 1583 if (ctx->fudptr) { 1584 err = -ENOMEM; 1585 fud = fuse_dev_alloc_install(fc); 1586 if (!fud) 1587 goto err_free_dax; 1588 } 1589 1590 fc->dev = sb->s_dev; 1591 fm->sb = sb; 1592 err = fuse_bdi_init(fc, sb); 1593 if (err) 1594 goto err_dev_free; 1595 1596 /* Handle umasking inside the fuse code */ 1597 if (sb->s_flags & SB_POSIXACL) 1598 fc->dont_mask = 1; 1599 sb->s_flags |= SB_POSIXACL; 1600 1601 fc->default_permissions = ctx->default_permissions; 1602 fc->allow_other = ctx->allow_other; 1603 fc->user_id = ctx->user_id; 1604 fc->group_id = ctx->group_id; 1605 fc->legacy_opts_show = ctx->legacy_opts_show; 1606 fc->max_read = max_t(unsigned int, 4096, ctx->max_read); 1607 fc->destroy = ctx->destroy; 1608 fc->no_control = ctx->no_control; 1609 fc->no_force_umount = ctx->no_force_umount; 1610 1611 err = -ENOMEM; 1612 root = fuse_get_root_inode(sb, ctx->rootmode); 1613 sb->s_d_op = &fuse_root_dentry_operations; 1614 root_dentry = d_make_root(root); 1615 if (!root_dentry) 1616 goto err_dev_free; 1617 /* Root dentry doesn't have .d_revalidate */ 1618 sb->s_d_op = &fuse_dentry_operations; 1619 1620 mutex_lock(&fuse_mutex); 1621 err = -EINVAL; 1622 if (ctx->fudptr && *ctx->fudptr) 1623 goto err_unlock; 1624 1625 err = fuse_ctl_add_conn(fc); 1626 if (err) 1627 goto err_unlock; 1628 1629 list_add_tail(&fc->entry, &fuse_conn_list); 1630 sb->s_root = root_dentry; 1631 if (ctx->fudptr) 1632 *ctx->fudptr = fud; 1633 mutex_unlock(&fuse_mutex); 1634 return 0; 1635 1636 err_unlock: 1637 mutex_unlock(&fuse_mutex); 1638 dput(root_dentry); 1639 err_dev_free: 1640 if (fud) 1641 fuse_dev_free(fud); 1642 err_free_dax: 1643 if (IS_ENABLED(CONFIG_FUSE_DAX)) 1644 fuse_dax_conn_free(fc); 1645 err: 1646 return err; 1647 } 1648 EXPORT_SYMBOL_GPL(fuse_fill_super_common); 1649 1650 static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc) 1651 { 1652 struct fuse_fs_context *ctx = fsc->fs_private; 1653 int err; 1654 1655 if (!ctx->file || !ctx->rootmode_present || 1656 !ctx->user_id_present || !ctx->group_id_present) 1657 return -EINVAL; 1658 1659 /* 1660 * Require mount to happen from the same user namespace which 1661 * opened /dev/fuse to prevent potential attacks. 1662 */ 1663 if ((ctx->file->f_op != &fuse_dev_operations) || 1664 (ctx->file->f_cred->user_ns != sb->s_user_ns)) 1665 return -EINVAL; 1666 ctx->fudptr = &ctx->file->private_data; 1667 1668 err = fuse_fill_super_common(sb, ctx); 1669 if (err) 1670 return err; 1671 /* file->private_data shall be visible on all CPUs after this */ 1672 smp_mb(); 1673 fuse_send_init(get_fuse_mount_super(sb)); 1674 return 0; 1675 } 1676 1677 /* 1678 * This is the path where user supplied an already initialized fuse dev. In 1679 * this case never create a new super if the old one is gone. 1680 */ 1681 static int fuse_set_no_super(struct super_block *sb, struct fs_context *fsc) 1682 { 1683 return -ENOTCONN; 1684 } 1685 1686 static int fuse_test_super(struct super_block *sb, struct fs_context *fsc) 1687 { 1688 1689 return fsc->sget_key == get_fuse_conn_super(sb); 1690 } 1691 1692 static int fuse_get_tree(struct fs_context *fsc) 1693 { 1694 struct fuse_fs_context *ctx = fsc->fs_private; 1695 struct fuse_dev *fud; 1696 struct fuse_conn *fc; 1697 struct fuse_mount *fm; 1698 struct super_block *sb; 1699 int err; 1700 1701 fc = kmalloc(sizeof(*fc), GFP_KERNEL); 1702 if (!fc) 1703 return -ENOMEM; 1704 1705 fm = kzalloc(sizeof(*fm), GFP_KERNEL); 1706 if (!fm) { 1707 kfree(fc); 1708 return -ENOMEM; 1709 } 1710 1711 fuse_conn_init(fc, fm, fsc->user_ns, &fuse_dev_fiq_ops, NULL); 1712 fc->release = fuse_free_conn; 1713 1714 fsc->s_fs_info = fm; 1715 1716 if (ctx->fd_present) 1717 ctx->file = fget(ctx->fd); 1718 1719 if (IS_ENABLED(CONFIG_BLOCK) && ctx->is_bdev) { 1720 err = get_tree_bdev(fsc, fuse_fill_super); 1721 goto out; 1722 } 1723 /* 1724 * While block dev mount can be initialized with a dummy device fd 1725 * (found by device name), normal fuse mounts can't 1726 */ 1727 err = -EINVAL; 1728 if (!ctx->file) 1729 goto out; 1730 1731 /* 1732 * Allow creating a fuse mount with an already initialized fuse 1733 * connection 1734 */ 1735 fud = READ_ONCE(ctx->file->private_data); 1736 if (ctx->file->f_op == &fuse_dev_operations && fud) { 1737 fsc->sget_key = fud->fc; 1738 sb = sget_fc(fsc, fuse_test_super, fuse_set_no_super); 1739 err = PTR_ERR_OR_ZERO(sb); 1740 if (!IS_ERR(sb)) 1741 fsc->root = dget(sb->s_root); 1742 } else { 1743 err = get_tree_nodev(fsc, fuse_fill_super); 1744 } 1745 out: 1746 if (fsc->s_fs_info) 1747 fuse_mount_destroy(fm); 1748 if (ctx->file) 1749 fput(ctx->file); 1750 return err; 1751 } 1752 1753 static const struct fs_context_operations fuse_context_ops = { 1754 .free = fuse_free_fsc, 1755 .parse_param = fuse_parse_param, 1756 .reconfigure = fuse_reconfigure, 1757 .get_tree = fuse_get_tree, 1758 }; 1759 1760 /* 1761 * Set up the filesystem mount context. 1762 */ 1763 static int fuse_init_fs_context(struct fs_context *fsc) 1764 { 1765 struct fuse_fs_context *ctx; 1766 1767 ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL); 1768 if (!ctx) 1769 return -ENOMEM; 1770 1771 ctx->max_read = ~0; 1772 ctx->blksize = FUSE_DEFAULT_BLKSIZE; 1773 ctx->legacy_opts_show = true; 1774 1775 #ifdef CONFIG_BLOCK 1776 if (fsc->fs_type == &fuseblk_fs_type) { 1777 ctx->is_bdev = true; 1778 ctx->destroy = true; 1779 } 1780 #endif 1781 1782 fsc->fs_private = ctx; 1783 fsc->ops = &fuse_context_ops; 1784 return 0; 1785 } 1786 1787 bool fuse_mount_remove(struct fuse_mount *fm) 1788 { 1789 struct fuse_conn *fc = fm->fc; 1790 bool last = false; 1791 1792 down_write(&fc->killsb); 1793 list_del_init(&fm->fc_entry); 1794 if (list_empty(&fc->mounts)) 1795 last = true; 1796 up_write(&fc->killsb); 1797 1798 return last; 1799 } 1800 EXPORT_SYMBOL_GPL(fuse_mount_remove); 1801 1802 void fuse_conn_destroy(struct fuse_mount *fm) 1803 { 1804 struct fuse_conn *fc = fm->fc; 1805 1806 if (fc->destroy) 1807 fuse_send_destroy(fm); 1808 1809 fuse_abort_conn(fc); 1810 fuse_wait_aborted(fc); 1811 1812 if (!list_empty(&fc->entry)) { 1813 mutex_lock(&fuse_mutex); 1814 list_del(&fc->entry); 1815 fuse_ctl_remove_conn(fc); 1816 mutex_unlock(&fuse_mutex); 1817 } 1818 } 1819 EXPORT_SYMBOL_GPL(fuse_conn_destroy); 1820 1821 static void fuse_sb_destroy(struct super_block *sb) 1822 { 1823 struct fuse_mount *fm = get_fuse_mount_super(sb); 1824 bool last; 1825 1826 if (sb->s_root) { 1827 last = fuse_mount_remove(fm); 1828 if (last) 1829 fuse_conn_destroy(fm); 1830 } 1831 } 1832 1833 void fuse_mount_destroy(struct fuse_mount *fm) 1834 { 1835 fuse_conn_put(fm->fc); 1836 kfree(fm); 1837 } 1838 EXPORT_SYMBOL(fuse_mount_destroy); 1839 1840 static void fuse_kill_sb_anon(struct super_block *sb) 1841 { 1842 fuse_sb_destroy(sb); 1843 kill_anon_super(sb); 1844 fuse_mount_destroy(get_fuse_mount_super(sb)); 1845 } 1846 1847 static struct file_system_type fuse_fs_type = { 1848 .owner = THIS_MODULE, 1849 .name = "fuse", 1850 .fs_flags = FS_HAS_SUBTYPE | FS_USERNS_MOUNT, 1851 .init_fs_context = fuse_init_fs_context, 1852 .parameters = fuse_fs_parameters, 1853 .kill_sb = fuse_kill_sb_anon, 1854 }; 1855 MODULE_ALIAS_FS("fuse"); 1856 1857 #ifdef CONFIG_BLOCK 1858 static void fuse_kill_sb_blk(struct super_block *sb) 1859 { 1860 fuse_sb_destroy(sb); 1861 kill_block_super(sb); 1862 fuse_mount_destroy(get_fuse_mount_super(sb)); 1863 } 1864 1865 static struct file_system_type fuseblk_fs_type = { 1866 .owner = THIS_MODULE, 1867 .name = "fuseblk", 1868 .init_fs_context = fuse_init_fs_context, 1869 .parameters = fuse_fs_parameters, 1870 .kill_sb = fuse_kill_sb_blk, 1871 .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE, 1872 }; 1873 MODULE_ALIAS_FS("fuseblk"); 1874 1875 static inline int register_fuseblk(void) 1876 { 1877 return register_filesystem(&fuseblk_fs_type); 1878 } 1879 1880 static inline void unregister_fuseblk(void) 1881 { 1882 unregister_filesystem(&fuseblk_fs_type); 1883 } 1884 #else 1885 static inline int register_fuseblk(void) 1886 { 1887 return 0; 1888 } 1889 1890 static inline void unregister_fuseblk(void) 1891 { 1892 } 1893 #endif 1894 1895 static void fuse_inode_init_once(void *foo) 1896 { 1897 struct inode *inode = foo; 1898 1899 inode_init_once(inode); 1900 } 1901 1902 static int __init fuse_fs_init(void) 1903 { 1904 int err; 1905 1906 fuse_inode_cachep = kmem_cache_create("fuse_inode", 1907 sizeof(struct fuse_inode), 0, 1908 SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT, 1909 fuse_inode_init_once); 1910 err = -ENOMEM; 1911 if (!fuse_inode_cachep) 1912 goto out; 1913 1914 err = register_fuseblk(); 1915 if (err) 1916 goto out2; 1917 1918 err = register_filesystem(&fuse_fs_type); 1919 if (err) 1920 goto out3; 1921 1922 return 0; 1923 1924 out3: 1925 unregister_fuseblk(); 1926 out2: 1927 kmem_cache_destroy(fuse_inode_cachep); 1928 out: 1929 return err; 1930 } 1931 1932 static void fuse_fs_cleanup(void) 1933 { 1934 unregister_filesystem(&fuse_fs_type); 1935 unregister_fuseblk(); 1936 1937 /* 1938 * Make sure all delayed rcu free inodes are flushed before we 1939 * destroy cache. 1940 */ 1941 rcu_barrier(); 1942 kmem_cache_destroy(fuse_inode_cachep); 1943 } 1944 1945 static struct kobject *fuse_kobj; 1946 1947 static int fuse_sysfs_init(void) 1948 { 1949 int err; 1950 1951 fuse_kobj = kobject_create_and_add("fuse", fs_kobj); 1952 if (!fuse_kobj) { 1953 err = -ENOMEM; 1954 goto out_err; 1955 } 1956 1957 err = sysfs_create_mount_point(fuse_kobj, "connections"); 1958 if (err) 1959 goto out_fuse_unregister; 1960 1961 return 0; 1962 1963 out_fuse_unregister: 1964 kobject_put(fuse_kobj); 1965 out_err: 1966 return err; 1967 } 1968 1969 static void fuse_sysfs_cleanup(void) 1970 { 1971 sysfs_remove_mount_point(fuse_kobj, "connections"); 1972 kobject_put(fuse_kobj); 1973 } 1974 1975 static int __init fuse_init(void) 1976 { 1977 int res; 1978 1979 pr_info("init (API version %i.%i)\n", 1980 FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION); 1981 1982 INIT_LIST_HEAD(&fuse_conn_list); 1983 res = fuse_fs_init(); 1984 if (res) 1985 goto err; 1986 1987 res = fuse_dev_init(); 1988 if (res) 1989 goto err_fs_cleanup; 1990 1991 res = fuse_sysfs_init(); 1992 if (res) 1993 goto err_dev_cleanup; 1994 1995 res = fuse_ctl_init(); 1996 if (res) 1997 goto err_sysfs_cleanup; 1998 1999 sanitize_global_limit(&max_user_bgreq); 2000 sanitize_global_limit(&max_user_congthresh); 2001 2002 return 0; 2003 2004 err_sysfs_cleanup: 2005 fuse_sysfs_cleanup(); 2006 err_dev_cleanup: 2007 fuse_dev_cleanup(); 2008 err_fs_cleanup: 2009 fuse_fs_cleanup(); 2010 err: 2011 return res; 2012 } 2013 2014 static void __exit fuse_exit(void) 2015 { 2016 pr_debug("exit\n"); 2017 2018 fuse_ctl_cleanup(); 2019 fuse_sysfs_cleanup(); 2020 fuse_fs_cleanup(); 2021 fuse_dev_cleanup(); 2022 } 2023 2024 module_init(fuse_init); 2025 module_exit(fuse_exit); 2026