1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/super.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * super.c contains code to handle: - mount structures 8 * - super-block tables 9 * - filesystem drivers list 10 * - mount system call 11 * - umount system call 12 * - ustat system call 13 * 14 * GK 2/5/95 - Changed to support mounting the root fs via NFS 15 * 16 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall 17 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96 18 * Added options to /proc/mounts: 19 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996. 20 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998 21 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000 22 */ 23 24 #include <linux/export.h> 25 #include <linux/slab.h> 26 #include <linux/blkdev.h> 27 #include <linux/mount.h> 28 #include <linux/security.h> 29 #include <linux/writeback.h> /* for the emergency remount stuff */ 30 #include <linux/idr.h> 31 #include <linux/mutex.h> 32 #include <linux/backing-dev.h> 33 #include <linux/rculist_bl.h> 34 #include <linux/fscrypt.h> 35 #include <linux/fsnotify.h> 36 #include <linux/lockdep.h> 37 #include <linux/user_namespace.h> 38 #include <linux/fs_context.h> 39 #include <uapi/linux/mount.h> 40 #include "internal.h" 41 42 static int thaw_super_locked(struct super_block *sb); 43 44 static LIST_HEAD(super_blocks); 45 static DEFINE_SPINLOCK(sb_lock); 46 47 static char *sb_writers_name[SB_FREEZE_LEVELS] = { 48 "sb_writers", 49 "sb_pagefaults", 50 "sb_internal", 51 }; 52 53 /* 54 * One thing we have to be careful of with a per-sb shrinker is that we don't 55 * drop the last active reference to the superblock from within the shrinker. 56 * If that happens we could trigger unregistering the shrinker from within the 57 * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we 58 * take a passive reference to the superblock to avoid this from occurring. 59 */ 60 static unsigned long super_cache_scan(struct shrinker *shrink, 61 struct shrink_control *sc) 62 { 63 struct super_block *sb; 64 long fs_objects = 0; 65 long total_objects; 66 long freed = 0; 67 long dentries; 68 long inodes; 69 70 sb = container_of(shrink, struct super_block, s_shrink); 71 72 /* 73 * Deadlock avoidance. We may hold various FS locks, and we don't want 74 * to recurse into the FS that called us in clear_inode() and friends.. 75 */ 76 if (!(sc->gfp_mask & __GFP_FS)) 77 return SHRINK_STOP; 78 79 if (!trylock_super(sb)) 80 return SHRINK_STOP; 81 82 if (sb->s_op->nr_cached_objects) 83 fs_objects = sb->s_op->nr_cached_objects(sb, sc); 84 85 inodes = list_lru_shrink_count(&sb->s_inode_lru, sc); 86 dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc); 87 total_objects = dentries + inodes + fs_objects + 1; 88 if (!total_objects) 89 total_objects = 1; 90 91 /* proportion the scan between the caches */ 92 dentries = mult_frac(sc->nr_to_scan, dentries, total_objects); 93 inodes = mult_frac(sc->nr_to_scan, inodes, total_objects); 94 fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects); 95 96 /* 97 * prune the dcache first as the icache is pinned by it, then 98 * prune the icache, followed by the filesystem specific caches 99 * 100 * Ensure that we always scan at least one object - memcg kmem 101 * accounting uses this to fully empty the caches. 102 */ 103 sc->nr_to_scan = dentries + 1; 104 freed = prune_dcache_sb(sb, sc); 105 sc->nr_to_scan = inodes + 1; 106 freed += prune_icache_sb(sb, sc); 107 108 if (fs_objects) { 109 sc->nr_to_scan = fs_objects + 1; 110 freed += sb->s_op->free_cached_objects(sb, sc); 111 } 112 113 up_read(&sb->s_umount); 114 return freed; 115 } 116 117 static unsigned long super_cache_count(struct shrinker *shrink, 118 struct shrink_control *sc) 119 { 120 struct super_block *sb; 121 long total_objects = 0; 122 123 sb = container_of(shrink, struct super_block, s_shrink); 124 125 /* 126 * We don't call trylock_super() here as it is a scalability bottleneck, 127 * so we're exposed to partial setup state. The shrinker rwsem does not 128 * protect filesystem operations backing list_lru_shrink_count() or 129 * s_op->nr_cached_objects(). Counts can change between 130 * super_cache_count and super_cache_scan, so we really don't need locks 131 * here. 132 * 133 * However, if we are currently mounting the superblock, the underlying 134 * filesystem might be in a state of partial construction and hence it 135 * is dangerous to access it. trylock_super() uses a SB_BORN check to 136 * avoid this situation, so do the same here. The memory barrier is 137 * matched with the one in mount_fs() as we don't hold locks here. 138 */ 139 if (!(sb->s_flags & SB_BORN)) 140 return 0; 141 smp_rmb(); 142 143 if (sb->s_op && sb->s_op->nr_cached_objects) 144 total_objects = sb->s_op->nr_cached_objects(sb, sc); 145 146 total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc); 147 total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc); 148 149 if (!total_objects) 150 return SHRINK_EMPTY; 151 152 total_objects = vfs_pressure_ratio(total_objects); 153 return total_objects; 154 } 155 156 static void destroy_super_work(struct work_struct *work) 157 { 158 struct super_block *s = container_of(work, struct super_block, 159 destroy_work); 160 int i; 161 162 for (i = 0; i < SB_FREEZE_LEVELS; i++) 163 percpu_free_rwsem(&s->s_writers.rw_sem[i]); 164 kfree(s); 165 } 166 167 static void destroy_super_rcu(struct rcu_head *head) 168 { 169 struct super_block *s = container_of(head, struct super_block, rcu); 170 INIT_WORK(&s->destroy_work, destroy_super_work); 171 schedule_work(&s->destroy_work); 172 } 173 174 /* Free a superblock that has never been seen by anyone */ 175 static void destroy_unused_super(struct super_block *s) 176 { 177 if (!s) 178 return; 179 up_write(&s->s_umount); 180 list_lru_destroy(&s->s_dentry_lru); 181 list_lru_destroy(&s->s_inode_lru); 182 security_sb_free(s); 183 put_user_ns(s->s_user_ns); 184 kfree(s->s_subtype); 185 free_prealloced_shrinker(&s->s_shrink); 186 /* no delays needed */ 187 destroy_super_work(&s->destroy_work); 188 } 189 190 /** 191 * alloc_super - create new superblock 192 * @type: filesystem type superblock should belong to 193 * @flags: the mount flags 194 * @user_ns: User namespace for the super_block 195 * 196 * Allocates and initializes a new &struct super_block. alloc_super() 197 * returns a pointer new superblock or %NULL if allocation had failed. 198 */ 199 static struct super_block *alloc_super(struct file_system_type *type, int flags, 200 struct user_namespace *user_ns) 201 { 202 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER); 203 static const struct super_operations default_op; 204 int i; 205 206 if (!s) 207 return NULL; 208 209 INIT_LIST_HEAD(&s->s_mounts); 210 s->s_user_ns = get_user_ns(user_ns); 211 init_rwsem(&s->s_umount); 212 lockdep_set_class(&s->s_umount, &type->s_umount_key); 213 /* 214 * sget() can have s_umount recursion. 215 * 216 * When it cannot find a suitable sb, it allocates a new 217 * one (this one), and tries again to find a suitable old 218 * one. 219 * 220 * In case that succeeds, it will acquire the s_umount 221 * lock of the old one. Since these are clearly distrinct 222 * locks, and this object isn't exposed yet, there's no 223 * risk of deadlocks. 224 * 225 * Annotate this by putting this lock in a different 226 * subclass. 227 */ 228 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING); 229 230 if (security_sb_alloc(s)) 231 goto fail; 232 233 for (i = 0; i < SB_FREEZE_LEVELS; i++) { 234 if (__percpu_init_rwsem(&s->s_writers.rw_sem[i], 235 sb_writers_name[i], 236 &type->s_writers_key[i])) 237 goto fail; 238 } 239 init_waitqueue_head(&s->s_writers.wait_unfrozen); 240 s->s_bdi = &noop_backing_dev_info; 241 s->s_flags = flags; 242 if (s->s_user_ns != &init_user_ns) 243 s->s_iflags |= SB_I_NODEV; 244 INIT_HLIST_NODE(&s->s_instances); 245 INIT_HLIST_BL_HEAD(&s->s_roots); 246 mutex_init(&s->s_sync_lock); 247 INIT_LIST_HEAD(&s->s_inodes); 248 spin_lock_init(&s->s_inode_list_lock); 249 INIT_LIST_HEAD(&s->s_inodes_wb); 250 spin_lock_init(&s->s_inode_wblist_lock); 251 252 s->s_count = 1; 253 atomic_set(&s->s_active, 1); 254 mutex_init(&s->s_vfs_rename_mutex); 255 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key); 256 init_rwsem(&s->s_dquot.dqio_sem); 257 s->s_maxbytes = MAX_NON_LFS; 258 s->s_op = &default_op; 259 s->s_time_gran = 1000000000; 260 s->s_time_min = TIME64_MIN; 261 s->s_time_max = TIME64_MAX; 262 263 s->s_shrink.seeks = DEFAULT_SEEKS; 264 s->s_shrink.scan_objects = super_cache_scan; 265 s->s_shrink.count_objects = super_cache_count; 266 s->s_shrink.batch = 1024; 267 s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE; 268 if (prealloc_shrinker(&s->s_shrink, "sb-%s", type->name)) 269 goto fail; 270 if (list_lru_init_memcg(&s->s_dentry_lru, &s->s_shrink)) 271 goto fail; 272 if (list_lru_init_memcg(&s->s_inode_lru, &s->s_shrink)) 273 goto fail; 274 return s; 275 276 fail: 277 destroy_unused_super(s); 278 return NULL; 279 } 280 281 /* Superblock refcounting */ 282 283 /* 284 * Drop a superblock's refcount. The caller must hold sb_lock. 285 */ 286 static void __put_super(struct super_block *s) 287 { 288 if (!--s->s_count) { 289 list_del_init(&s->s_list); 290 WARN_ON(s->s_dentry_lru.node); 291 WARN_ON(s->s_inode_lru.node); 292 WARN_ON(!list_empty(&s->s_mounts)); 293 security_sb_free(s); 294 fscrypt_destroy_keyring(s); 295 put_user_ns(s->s_user_ns); 296 kfree(s->s_subtype); 297 call_rcu(&s->rcu, destroy_super_rcu); 298 } 299 } 300 301 /** 302 * put_super - drop a temporary reference to superblock 303 * @sb: superblock in question 304 * 305 * Drops a temporary reference, frees superblock if there's no 306 * references left. 307 */ 308 void put_super(struct super_block *sb) 309 { 310 spin_lock(&sb_lock); 311 __put_super(sb); 312 spin_unlock(&sb_lock); 313 } 314 315 316 /** 317 * deactivate_locked_super - drop an active reference to superblock 318 * @s: superblock to deactivate 319 * 320 * Drops an active reference to superblock, converting it into a temporary 321 * one if there is no other active references left. In that case we 322 * tell fs driver to shut it down and drop the temporary reference we 323 * had just acquired. 324 * 325 * Caller holds exclusive lock on superblock; that lock is released. 326 */ 327 void deactivate_locked_super(struct super_block *s) 328 { 329 struct file_system_type *fs = s->s_type; 330 if (atomic_dec_and_test(&s->s_active)) { 331 unregister_shrinker(&s->s_shrink); 332 fs->kill_sb(s); 333 334 /* 335 * Since list_lru_destroy() may sleep, we cannot call it from 336 * put_super(), where we hold the sb_lock. Therefore we destroy 337 * the lru lists right now. 338 */ 339 list_lru_destroy(&s->s_dentry_lru); 340 list_lru_destroy(&s->s_inode_lru); 341 342 put_filesystem(fs); 343 put_super(s); 344 } else { 345 up_write(&s->s_umount); 346 } 347 } 348 349 EXPORT_SYMBOL(deactivate_locked_super); 350 351 /** 352 * deactivate_super - drop an active reference to superblock 353 * @s: superblock to deactivate 354 * 355 * Variant of deactivate_locked_super(), except that superblock is *not* 356 * locked by caller. If we are going to drop the final active reference, 357 * lock will be acquired prior to that. 358 */ 359 void deactivate_super(struct super_block *s) 360 { 361 if (!atomic_add_unless(&s->s_active, -1, 1)) { 362 down_write(&s->s_umount); 363 deactivate_locked_super(s); 364 } 365 } 366 367 EXPORT_SYMBOL(deactivate_super); 368 369 /** 370 * grab_super - acquire an active reference 371 * @s: reference we are trying to make active 372 * 373 * Tries to acquire an active reference. grab_super() is used when we 374 * had just found a superblock in super_blocks or fs_type->fs_supers 375 * and want to turn it into a full-blown active reference. grab_super() 376 * is called with sb_lock held and drops it. Returns 1 in case of 377 * success, 0 if we had failed (superblock contents was already dead or 378 * dying when grab_super() had been called). Note that this is only 379 * called for superblocks not in rundown mode (== ones still on ->fs_supers 380 * of their type), so increment of ->s_count is OK here. 381 */ 382 static int grab_super(struct super_block *s) __releases(sb_lock) 383 { 384 s->s_count++; 385 spin_unlock(&sb_lock); 386 down_write(&s->s_umount); 387 if ((s->s_flags & SB_BORN) && atomic_inc_not_zero(&s->s_active)) { 388 put_super(s); 389 return 1; 390 } 391 up_write(&s->s_umount); 392 put_super(s); 393 return 0; 394 } 395 396 /* 397 * trylock_super - try to grab ->s_umount shared 398 * @sb: reference we are trying to grab 399 * 400 * Try to prevent fs shutdown. This is used in places where we 401 * cannot take an active reference but we need to ensure that the 402 * filesystem is not shut down while we are working on it. It returns 403 * false if we cannot acquire s_umount or if we lose the race and 404 * filesystem already got into shutdown, and returns true with the s_umount 405 * lock held in read mode in case of success. On successful return, 406 * the caller must drop the s_umount lock when done. 407 * 408 * Note that unlike get_super() et.al. this one does *not* bump ->s_count. 409 * The reason why it's safe is that we are OK with doing trylock instead 410 * of down_read(). There's a couple of places that are OK with that, but 411 * it's very much not a general-purpose interface. 412 */ 413 bool trylock_super(struct super_block *sb) 414 { 415 if (down_read_trylock(&sb->s_umount)) { 416 if (!hlist_unhashed(&sb->s_instances) && 417 sb->s_root && (sb->s_flags & SB_BORN)) 418 return true; 419 up_read(&sb->s_umount); 420 } 421 422 return false; 423 } 424 425 /** 426 * retire_super - prevents superblock from being reused 427 * @sb: superblock to retire 428 * 429 * The function marks superblock to be ignored in superblock test, which 430 * prevents it from being reused for any new mounts. If the superblock has 431 * a private bdi, it also unregisters it, but doesn't reduce the refcount 432 * of the superblock to prevent potential races. The refcount is reduced 433 * by generic_shutdown_super(). The function can not be called 434 * concurrently with generic_shutdown_super(). It is safe to call the 435 * function multiple times, subsequent calls have no effect. 436 * 437 * The marker will affect the re-use only for block-device-based 438 * superblocks. Other superblocks will still get marked if this function 439 * is used, but that will not affect their reusability. 440 */ 441 void retire_super(struct super_block *sb) 442 { 443 WARN_ON(!sb->s_bdev); 444 down_write(&sb->s_umount); 445 if (sb->s_iflags & SB_I_PERSB_BDI) { 446 bdi_unregister(sb->s_bdi); 447 sb->s_iflags &= ~SB_I_PERSB_BDI; 448 } 449 sb->s_iflags |= SB_I_RETIRED; 450 up_write(&sb->s_umount); 451 } 452 EXPORT_SYMBOL(retire_super); 453 454 /** 455 * generic_shutdown_super - common helper for ->kill_sb() 456 * @sb: superblock to kill 457 * 458 * generic_shutdown_super() does all fs-independent work on superblock 459 * shutdown. Typical ->kill_sb() should pick all fs-specific objects 460 * that need destruction out of superblock, call generic_shutdown_super() 461 * and release aforementioned objects. Note: dentries and inodes _are_ 462 * taken care of and do not need specific handling. 463 * 464 * Upon calling this function, the filesystem may no longer alter or 465 * rearrange the set of dentries belonging to this super_block, nor may it 466 * change the attachments of dentries to inodes. 467 */ 468 void generic_shutdown_super(struct super_block *sb) 469 { 470 const struct super_operations *sop = sb->s_op; 471 472 if (sb->s_root) { 473 shrink_dcache_for_umount(sb); 474 sync_filesystem(sb); 475 sb->s_flags &= ~SB_ACTIVE; 476 477 cgroup_writeback_umount(); 478 479 /* evict all inodes with zero refcount */ 480 evict_inodes(sb); 481 /* only nonzero refcount inodes can have marks */ 482 fsnotify_sb_delete(sb); 483 fscrypt_destroy_keyring(sb); 484 security_sb_delete(sb); 485 486 if (sb->s_dio_done_wq) { 487 destroy_workqueue(sb->s_dio_done_wq); 488 sb->s_dio_done_wq = NULL; 489 } 490 491 if (sop->put_super) 492 sop->put_super(sb); 493 494 if (!list_empty(&sb->s_inodes)) { 495 printk("VFS: Busy inodes after unmount of %s. " 496 "Self-destruct in 5 seconds. Have a nice day...\n", 497 sb->s_id); 498 } 499 } 500 spin_lock(&sb_lock); 501 /* should be initialized for __put_super_and_need_restart() */ 502 hlist_del_init(&sb->s_instances); 503 spin_unlock(&sb_lock); 504 up_write(&sb->s_umount); 505 if (sb->s_bdi != &noop_backing_dev_info) { 506 if (sb->s_iflags & SB_I_PERSB_BDI) 507 bdi_unregister(sb->s_bdi); 508 bdi_put(sb->s_bdi); 509 sb->s_bdi = &noop_backing_dev_info; 510 } 511 } 512 513 EXPORT_SYMBOL(generic_shutdown_super); 514 515 bool mount_capable(struct fs_context *fc) 516 { 517 if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) 518 return capable(CAP_SYS_ADMIN); 519 else 520 return ns_capable(fc->user_ns, CAP_SYS_ADMIN); 521 } 522 523 /** 524 * sget_fc - Find or create a superblock 525 * @fc: Filesystem context. 526 * @test: Comparison callback 527 * @set: Setup callback 528 * 529 * Find or create a superblock using the parameters stored in the filesystem 530 * context and the two callback functions. 531 * 532 * If an extant superblock is matched, then that will be returned with an 533 * elevated reference count that the caller must transfer or discard. 534 * 535 * If no match is made, a new superblock will be allocated and basic 536 * initialisation will be performed (s_type, s_fs_info and s_id will be set and 537 * the set() callback will be invoked), the superblock will be published and it 538 * will be returned in a partially constructed state with SB_BORN and SB_ACTIVE 539 * as yet unset. 540 */ 541 struct super_block *sget_fc(struct fs_context *fc, 542 int (*test)(struct super_block *, struct fs_context *), 543 int (*set)(struct super_block *, struct fs_context *)) 544 { 545 struct super_block *s = NULL; 546 struct super_block *old; 547 struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns; 548 int err; 549 550 retry: 551 spin_lock(&sb_lock); 552 if (test) { 553 hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) { 554 if (test(old, fc)) 555 goto share_extant_sb; 556 } 557 } 558 if (!s) { 559 spin_unlock(&sb_lock); 560 s = alloc_super(fc->fs_type, fc->sb_flags, user_ns); 561 if (!s) 562 return ERR_PTR(-ENOMEM); 563 goto retry; 564 } 565 566 s->s_fs_info = fc->s_fs_info; 567 err = set(s, fc); 568 if (err) { 569 s->s_fs_info = NULL; 570 spin_unlock(&sb_lock); 571 destroy_unused_super(s); 572 return ERR_PTR(err); 573 } 574 fc->s_fs_info = NULL; 575 s->s_type = fc->fs_type; 576 s->s_iflags |= fc->s_iflags; 577 strlcpy(s->s_id, s->s_type->name, sizeof(s->s_id)); 578 list_add_tail(&s->s_list, &super_blocks); 579 hlist_add_head(&s->s_instances, &s->s_type->fs_supers); 580 spin_unlock(&sb_lock); 581 get_filesystem(s->s_type); 582 register_shrinker_prepared(&s->s_shrink); 583 return s; 584 585 share_extant_sb: 586 if (user_ns != old->s_user_ns) { 587 spin_unlock(&sb_lock); 588 destroy_unused_super(s); 589 return ERR_PTR(-EBUSY); 590 } 591 if (!grab_super(old)) 592 goto retry; 593 destroy_unused_super(s); 594 return old; 595 } 596 EXPORT_SYMBOL(sget_fc); 597 598 /** 599 * sget - find or create a superblock 600 * @type: filesystem type superblock should belong to 601 * @test: comparison callback 602 * @set: setup callback 603 * @flags: mount flags 604 * @data: argument to each of them 605 */ 606 struct super_block *sget(struct file_system_type *type, 607 int (*test)(struct super_block *,void *), 608 int (*set)(struct super_block *,void *), 609 int flags, 610 void *data) 611 { 612 struct user_namespace *user_ns = current_user_ns(); 613 struct super_block *s = NULL; 614 struct super_block *old; 615 int err; 616 617 /* We don't yet pass the user namespace of the parent 618 * mount through to here so always use &init_user_ns 619 * until that changes. 620 */ 621 if (flags & SB_SUBMOUNT) 622 user_ns = &init_user_ns; 623 624 retry: 625 spin_lock(&sb_lock); 626 if (test) { 627 hlist_for_each_entry(old, &type->fs_supers, s_instances) { 628 if (!test(old, data)) 629 continue; 630 if (user_ns != old->s_user_ns) { 631 spin_unlock(&sb_lock); 632 destroy_unused_super(s); 633 return ERR_PTR(-EBUSY); 634 } 635 if (!grab_super(old)) 636 goto retry; 637 destroy_unused_super(s); 638 return old; 639 } 640 } 641 if (!s) { 642 spin_unlock(&sb_lock); 643 s = alloc_super(type, (flags & ~SB_SUBMOUNT), user_ns); 644 if (!s) 645 return ERR_PTR(-ENOMEM); 646 goto retry; 647 } 648 649 err = set(s, data); 650 if (err) { 651 spin_unlock(&sb_lock); 652 destroy_unused_super(s); 653 return ERR_PTR(err); 654 } 655 s->s_type = type; 656 strlcpy(s->s_id, type->name, sizeof(s->s_id)); 657 list_add_tail(&s->s_list, &super_blocks); 658 hlist_add_head(&s->s_instances, &type->fs_supers); 659 spin_unlock(&sb_lock); 660 get_filesystem(type); 661 register_shrinker_prepared(&s->s_shrink); 662 return s; 663 } 664 EXPORT_SYMBOL(sget); 665 666 void drop_super(struct super_block *sb) 667 { 668 up_read(&sb->s_umount); 669 put_super(sb); 670 } 671 672 EXPORT_SYMBOL(drop_super); 673 674 void drop_super_exclusive(struct super_block *sb) 675 { 676 up_write(&sb->s_umount); 677 put_super(sb); 678 } 679 EXPORT_SYMBOL(drop_super_exclusive); 680 681 static void __iterate_supers(void (*f)(struct super_block *)) 682 { 683 struct super_block *sb, *p = NULL; 684 685 spin_lock(&sb_lock); 686 list_for_each_entry(sb, &super_blocks, s_list) { 687 if (hlist_unhashed(&sb->s_instances)) 688 continue; 689 sb->s_count++; 690 spin_unlock(&sb_lock); 691 692 f(sb); 693 694 spin_lock(&sb_lock); 695 if (p) 696 __put_super(p); 697 p = sb; 698 } 699 if (p) 700 __put_super(p); 701 spin_unlock(&sb_lock); 702 } 703 /** 704 * iterate_supers - call function for all active superblocks 705 * @f: function to call 706 * @arg: argument to pass to it 707 * 708 * Scans the superblock list and calls given function, passing it 709 * locked superblock and given argument. 710 */ 711 void iterate_supers(void (*f)(struct super_block *, void *), void *arg) 712 { 713 struct super_block *sb, *p = NULL; 714 715 spin_lock(&sb_lock); 716 list_for_each_entry(sb, &super_blocks, s_list) { 717 if (hlist_unhashed(&sb->s_instances)) 718 continue; 719 sb->s_count++; 720 spin_unlock(&sb_lock); 721 722 down_read(&sb->s_umount); 723 if (sb->s_root && (sb->s_flags & SB_BORN)) 724 f(sb, arg); 725 up_read(&sb->s_umount); 726 727 spin_lock(&sb_lock); 728 if (p) 729 __put_super(p); 730 p = sb; 731 } 732 if (p) 733 __put_super(p); 734 spin_unlock(&sb_lock); 735 } 736 737 /** 738 * iterate_supers_type - call function for superblocks of given type 739 * @type: fs type 740 * @f: function to call 741 * @arg: argument to pass to it 742 * 743 * Scans the superblock list and calls given function, passing it 744 * locked superblock and given argument. 745 */ 746 void iterate_supers_type(struct file_system_type *type, 747 void (*f)(struct super_block *, void *), void *arg) 748 { 749 struct super_block *sb, *p = NULL; 750 751 spin_lock(&sb_lock); 752 hlist_for_each_entry(sb, &type->fs_supers, s_instances) { 753 sb->s_count++; 754 spin_unlock(&sb_lock); 755 756 down_read(&sb->s_umount); 757 if (sb->s_root && (sb->s_flags & SB_BORN)) 758 f(sb, arg); 759 up_read(&sb->s_umount); 760 761 spin_lock(&sb_lock); 762 if (p) 763 __put_super(p); 764 p = sb; 765 } 766 if (p) 767 __put_super(p); 768 spin_unlock(&sb_lock); 769 } 770 771 EXPORT_SYMBOL(iterate_supers_type); 772 773 /** 774 * get_super - get the superblock of a device 775 * @bdev: device to get the superblock for 776 * 777 * Scans the superblock list and finds the superblock of the file system 778 * mounted on the device given. %NULL is returned if no match is found. 779 */ 780 struct super_block *get_super(struct block_device *bdev) 781 { 782 struct super_block *sb; 783 784 if (!bdev) 785 return NULL; 786 787 spin_lock(&sb_lock); 788 rescan: 789 list_for_each_entry(sb, &super_blocks, s_list) { 790 if (hlist_unhashed(&sb->s_instances)) 791 continue; 792 if (sb->s_bdev == bdev) { 793 sb->s_count++; 794 spin_unlock(&sb_lock); 795 down_read(&sb->s_umount); 796 /* still alive? */ 797 if (sb->s_root && (sb->s_flags & SB_BORN)) 798 return sb; 799 up_read(&sb->s_umount); 800 /* nope, got unmounted */ 801 spin_lock(&sb_lock); 802 __put_super(sb); 803 goto rescan; 804 } 805 } 806 spin_unlock(&sb_lock); 807 return NULL; 808 } 809 810 /** 811 * get_active_super - get an active reference to the superblock of a device 812 * @bdev: device to get the superblock for 813 * 814 * Scans the superblock list and finds the superblock of the file system 815 * mounted on the device given. Returns the superblock with an active 816 * reference or %NULL if none was found. 817 */ 818 struct super_block *get_active_super(struct block_device *bdev) 819 { 820 struct super_block *sb; 821 822 if (!bdev) 823 return NULL; 824 825 restart: 826 spin_lock(&sb_lock); 827 list_for_each_entry(sb, &super_blocks, s_list) { 828 if (hlist_unhashed(&sb->s_instances)) 829 continue; 830 if (sb->s_bdev == bdev) { 831 if (!grab_super(sb)) 832 goto restart; 833 up_write(&sb->s_umount); 834 return sb; 835 } 836 } 837 spin_unlock(&sb_lock); 838 return NULL; 839 } 840 841 struct super_block *user_get_super(dev_t dev, bool excl) 842 { 843 struct super_block *sb; 844 845 spin_lock(&sb_lock); 846 rescan: 847 list_for_each_entry(sb, &super_blocks, s_list) { 848 if (hlist_unhashed(&sb->s_instances)) 849 continue; 850 if (sb->s_dev == dev) { 851 sb->s_count++; 852 spin_unlock(&sb_lock); 853 if (excl) 854 down_write(&sb->s_umount); 855 else 856 down_read(&sb->s_umount); 857 /* still alive? */ 858 if (sb->s_root && (sb->s_flags & SB_BORN)) 859 return sb; 860 if (excl) 861 up_write(&sb->s_umount); 862 else 863 up_read(&sb->s_umount); 864 /* nope, got unmounted */ 865 spin_lock(&sb_lock); 866 __put_super(sb); 867 goto rescan; 868 } 869 } 870 spin_unlock(&sb_lock); 871 return NULL; 872 } 873 874 /** 875 * reconfigure_super - asks filesystem to change superblock parameters 876 * @fc: The superblock and configuration 877 * 878 * Alters the configuration parameters of a live superblock. 879 */ 880 int reconfigure_super(struct fs_context *fc) 881 { 882 struct super_block *sb = fc->root->d_sb; 883 int retval; 884 bool remount_ro = false; 885 bool force = fc->sb_flags & SB_FORCE; 886 887 if (fc->sb_flags_mask & ~MS_RMT_MASK) 888 return -EINVAL; 889 if (sb->s_writers.frozen != SB_UNFROZEN) 890 return -EBUSY; 891 892 retval = security_sb_remount(sb, fc->security); 893 if (retval) 894 return retval; 895 896 if (fc->sb_flags_mask & SB_RDONLY) { 897 #ifdef CONFIG_BLOCK 898 if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev && 899 bdev_read_only(sb->s_bdev)) 900 return -EACCES; 901 #endif 902 903 remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb); 904 } 905 906 if (remount_ro) { 907 if (!hlist_empty(&sb->s_pins)) { 908 up_write(&sb->s_umount); 909 group_pin_kill(&sb->s_pins); 910 down_write(&sb->s_umount); 911 if (!sb->s_root) 912 return 0; 913 if (sb->s_writers.frozen != SB_UNFROZEN) 914 return -EBUSY; 915 remount_ro = !sb_rdonly(sb); 916 } 917 } 918 shrink_dcache_sb(sb); 919 920 /* If we are reconfiguring to RDONLY and current sb is read/write, 921 * make sure there are no files open for writing. 922 */ 923 if (remount_ro) { 924 if (force) { 925 sb->s_readonly_remount = 1; 926 smp_wmb(); 927 } else { 928 retval = sb_prepare_remount_readonly(sb); 929 if (retval) 930 return retval; 931 } 932 } 933 934 if (fc->ops->reconfigure) { 935 retval = fc->ops->reconfigure(fc); 936 if (retval) { 937 if (!force) 938 goto cancel_readonly; 939 /* If forced remount, go ahead despite any errors */ 940 WARN(1, "forced remount of a %s fs returned %i\n", 941 sb->s_type->name, retval); 942 } 943 } 944 945 WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) | 946 (fc->sb_flags & fc->sb_flags_mask))); 947 /* Needs to be ordered wrt mnt_is_readonly() */ 948 smp_wmb(); 949 sb->s_readonly_remount = 0; 950 951 /* 952 * Some filesystems modify their metadata via some other path than the 953 * bdev buffer cache (eg. use a private mapping, or directories in 954 * pagecache, etc). Also file data modifications go via their own 955 * mappings. So If we try to mount readonly then copy the filesystem 956 * from bdev, we could get stale data, so invalidate it to give a best 957 * effort at coherency. 958 */ 959 if (remount_ro && sb->s_bdev) 960 invalidate_bdev(sb->s_bdev); 961 return 0; 962 963 cancel_readonly: 964 sb->s_readonly_remount = 0; 965 return retval; 966 } 967 968 static void do_emergency_remount_callback(struct super_block *sb) 969 { 970 down_write(&sb->s_umount); 971 if (sb->s_root && sb->s_bdev && (sb->s_flags & SB_BORN) && 972 !sb_rdonly(sb)) { 973 struct fs_context *fc; 974 975 fc = fs_context_for_reconfigure(sb->s_root, 976 SB_RDONLY | SB_FORCE, SB_RDONLY); 977 if (!IS_ERR(fc)) { 978 if (parse_monolithic_mount_data(fc, NULL) == 0) 979 (void)reconfigure_super(fc); 980 put_fs_context(fc); 981 } 982 } 983 up_write(&sb->s_umount); 984 } 985 986 static void do_emergency_remount(struct work_struct *work) 987 { 988 __iterate_supers(do_emergency_remount_callback); 989 kfree(work); 990 printk("Emergency Remount complete\n"); 991 } 992 993 void emergency_remount(void) 994 { 995 struct work_struct *work; 996 997 work = kmalloc(sizeof(*work), GFP_ATOMIC); 998 if (work) { 999 INIT_WORK(work, do_emergency_remount); 1000 schedule_work(work); 1001 } 1002 } 1003 1004 static void do_thaw_all_callback(struct super_block *sb) 1005 { 1006 down_write(&sb->s_umount); 1007 if (sb->s_root && sb->s_flags & SB_BORN) { 1008 emergency_thaw_bdev(sb); 1009 thaw_super_locked(sb); 1010 } else { 1011 up_write(&sb->s_umount); 1012 } 1013 } 1014 1015 static void do_thaw_all(struct work_struct *work) 1016 { 1017 __iterate_supers(do_thaw_all_callback); 1018 kfree(work); 1019 printk(KERN_WARNING "Emergency Thaw complete\n"); 1020 } 1021 1022 /** 1023 * emergency_thaw_all -- forcibly thaw every frozen filesystem 1024 * 1025 * Used for emergency unfreeze of all filesystems via SysRq 1026 */ 1027 void emergency_thaw_all(void) 1028 { 1029 struct work_struct *work; 1030 1031 work = kmalloc(sizeof(*work), GFP_ATOMIC); 1032 if (work) { 1033 INIT_WORK(work, do_thaw_all); 1034 schedule_work(work); 1035 } 1036 } 1037 1038 static DEFINE_IDA(unnamed_dev_ida); 1039 1040 /** 1041 * get_anon_bdev - Allocate a block device for filesystems which don't have one. 1042 * @p: Pointer to a dev_t. 1043 * 1044 * Filesystems which don't use real block devices can call this function 1045 * to allocate a virtual block device. 1046 * 1047 * Context: Any context. Frequently called while holding sb_lock. 1048 * Return: 0 on success, -EMFILE if there are no anonymous bdevs left 1049 * or -ENOMEM if memory allocation failed. 1050 */ 1051 int get_anon_bdev(dev_t *p) 1052 { 1053 int dev; 1054 1055 /* 1056 * Many userspace utilities consider an FSID of 0 invalid. 1057 * Always return at least 1 from get_anon_bdev. 1058 */ 1059 dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1, 1060 GFP_ATOMIC); 1061 if (dev == -ENOSPC) 1062 dev = -EMFILE; 1063 if (dev < 0) 1064 return dev; 1065 1066 *p = MKDEV(0, dev); 1067 return 0; 1068 } 1069 EXPORT_SYMBOL(get_anon_bdev); 1070 1071 void free_anon_bdev(dev_t dev) 1072 { 1073 ida_free(&unnamed_dev_ida, MINOR(dev)); 1074 } 1075 EXPORT_SYMBOL(free_anon_bdev); 1076 1077 int set_anon_super(struct super_block *s, void *data) 1078 { 1079 return get_anon_bdev(&s->s_dev); 1080 } 1081 EXPORT_SYMBOL(set_anon_super); 1082 1083 void kill_anon_super(struct super_block *sb) 1084 { 1085 dev_t dev = sb->s_dev; 1086 generic_shutdown_super(sb); 1087 free_anon_bdev(dev); 1088 } 1089 EXPORT_SYMBOL(kill_anon_super); 1090 1091 void kill_litter_super(struct super_block *sb) 1092 { 1093 if (sb->s_root) 1094 d_genocide(sb->s_root); 1095 kill_anon_super(sb); 1096 } 1097 EXPORT_SYMBOL(kill_litter_super); 1098 1099 int set_anon_super_fc(struct super_block *sb, struct fs_context *fc) 1100 { 1101 return set_anon_super(sb, NULL); 1102 } 1103 EXPORT_SYMBOL(set_anon_super_fc); 1104 1105 static int test_keyed_super(struct super_block *sb, struct fs_context *fc) 1106 { 1107 return sb->s_fs_info == fc->s_fs_info; 1108 } 1109 1110 static int test_single_super(struct super_block *s, struct fs_context *fc) 1111 { 1112 return 1; 1113 } 1114 1115 static int vfs_get_super(struct fs_context *fc, bool reconf, 1116 int (*test)(struct super_block *, struct fs_context *), 1117 int (*fill_super)(struct super_block *sb, 1118 struct fs_context *fc)) 1119 { 1120 struct super_block *sb; 1121 int err; 1122 1123 sb = sget_fc(fc, test, set_anon_super_fc); 1124 if (IS_ERR(sb)) 1125 return PTR_ERR(sb); 1126 1127 if (!sb->s_root) { 1128 err = fill_super(sb, fc); 1129 if (err) 1130 goto error; 1131 1132 sb->s_flags |= SB_ACTIVE; 1133 fc->root = dget(sb->s_root); 1134 } else { 1135 fc->root = dget(sb->s_root); 1136 if (reconf) { 1137 err = reconfigure_super(fc); 1138 if (err < 0) { 1139 dput(fc->root); 1140 fc->root = NULL; 1141 goto error; 1142 } 1143 } 1144 } 1145 1146 return 0; 1147 1148 error: 1149 deactivate_locked_super(sb); 1150 return err; 1151 } 1152 1153 int get_tree_nodev(struct fs_context *fc, 1154 int (*fill_super)(struct super_block *sb, 1155 struct fs_context *fc)) 1156 { 1157 return vfs_get_super(fc, false, NULL, fill_super); 1158 } 1159 EXPORT_SYMBOL(get_tree_nodev); 1160 1161 int get_tree_single(struct fs_context *fc, 1162 int (*fill_super)(struct super_block *sb, 1163 struct fs_context *fc)) 1164 { 1165 return vfs_get_super(fc, false, test_single_super, fill_super); 1166 } 1167 EXPORT_SYMBOL(get_tree_single); 1168 1169 int get_tree_single_reconf(struct fs_context *fc, 1170 int (*fill_super)(struct super_block *sb, 1171 struct fs_context *fc)) 1172 { 1173 return vfs_get_super(fc, true, test_single_super, fill_super); 1174 } 1175 EXPORT_SYMBOL(get_tree_single_reconf); 1176 1177 int get_tree_keyed(struct fs_context *fc, 1178 int (*fill_super)(struct super_block *sb, 1179 struct fs_context *fc), 1180 void *key) 1181 { 1182 fc->s_fs_info = key; 1183 return vfs_get_super(fc, false, test_keyed_super, fill_super); 1184 } 1185 EXPORT_SYMBOL(get_tree_keyed); 1186 1187 #ifdef CONFIG_BLOCK 1188 1189 static int set_bdev_super(struct super_block *s, void *data) 1190 { 1191 s->s_bdev = data; 1192 s->s_dev = s->s_bdev->bd_dev; 1193 s->s_bdi = bdi_get(s->s_bdev->bd_disk->bdi); 1194 1195 if (bdev_stable_writes(s->s_bdev)) 1196 s->s_iflags |= SB_I_STABLE_WRITES; 1197 return 0; 1198 } 1199 1200 static int set_bdev_super_fc(struct super_block *s, struct fs_context *fc) 1201 { 1202 return set_bdev_super(s, fc->sget_key); 1203 } 1204 1205 static int test_bdev_super_fc(struct super_block *s, struct fs_context *fc) 1206 { 1207 return !(s->s_iflags & SB_I_RETIRED) && s->s_bdev == fc->sget_key; 1208 } 1209 1210 /** 1211 * get_tree_bdev - Get a superblock based on a single block device 1212 * @fc: The filesystem context holding the parameters 1213 * @fill_super: Helper to initialise a new superblock 1214 */ 1215 int get_tree_bdev(struct fs_context *fc, 1216 int (*fill_super)(struct super_block *, 1217 struct fs_context *)) 1218 { 1219 struct block_device *bdev; 1220 struct super_block *s; 1221 fmode_t mode = FMODE_READ | FMODE_EXCL; 1222 int error = 0; 1223 1224 if (!(fc->sb_flags & SB_RDONLY)) 1225 mode |= FMODE_WRITE; 1226 1227 if (!fc->source) 1228 return invalf(fc, "No source specified"); 1229 1230 bdev = blkdev_get_by_path(fc->source, mode, fc->fs_type); 1231 if (IS_ERR(bdev)) { 1232 errorf(fc, "%s: Can't open blockdev", fc->source); 1233 return PTR_ERR(bdev); 1234 } 1235 1236 /* Once the superblock is inserted into the list by sget_fc(), s_umount 1237 * will protect the lockfs code from trying to start a snapshot while 1238 * we are mounting 1239 */ 1240 mutex_lock(&bdev->bd_fsfreeze_mutex); 1241 if (bdev->bd_fsfreeze_count > 0) { 1242 mutex_unlock(&bdev->bd_fsfreeze_mutex); 1243 warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev); 1244 blkdev_put(bdev, mode); 1245 return -EBUSY; 1246 } 1247 1248 fc->sb_flags |= SB_NOSEC; 1249 fc->sget_key = bdev; 1250 s = sget_fc(fc, test_bdev_super_fc, set_bdev_super_fc); 1251 mutex_unlock(&bdev->bd_fsfreeze_mutex); 1252 if (IS_ERR(s)) { 1253 blkdev_put(bdev, mode); 1254 return PTR_ERR(s); 1255 } 1256 1257 if (s->s_root) { 1258 /* Don't summarily change the RO/RW state. */ 1259 if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) { 1260 warnf(fc, "%pg: Can't mount, would change RO state", bdev); 1261 deactivate_locked_super(s); 1262 blkdev_put(bdev, mode); 1263 return -EBUSY; 1264 } 1265 1266 /* 1267 * s_umount nests inside open_mutex during 1268 * __invalidate_device(). blkdev_put() acquires 1269 * open_mutex and can't be called under s_umount. Drop 1270 * s_umount temporarily. This is safe as we're 1271 * holding an active reference. 1272 */ 1273 up_write(&s->s_umount); 1274 blkdev_put(bdev, mode); 1275 down_write(&s->s_umount); 1276 } else { 1277 s->s_mode = mode; 1278 snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev); 1279 shrinker_debugfs_rename(&s->s_shrink, "sb-%s:%s", 1280 fc->fs_type->name, s->s_id); 1281 sb_set_blocksize(s, block_size(bdev)); 1282 error = fill_super(s, fc); 1283 if (error) { 1284 deactivate_locked_super(s); 1285 return error; 1286 } 1287 1288 s->s_flags |= SB_ACTIVE; 1289 bdev->bd_super = s; 1290 } 1291 1292 BUG_ON(fc->root); 1293 fc->root = dget(s->s_root); 1294 return 0; 1295 } 1296 EXPORT_SYMBOL(get_tree_bdev); 1297 1298 static int test_bdev_super(struct super_block *s, void *data) 1299 { 1300 return !(s->s_iflags & SB_I_RETIRED) && (void *)s->s_bdev == data; 1301 } 1302 1303 struct dentry *mount_bdev(struct file_system_type *fs_type, 1304 int flags, const char *dev_name, void *data, 1305 int (*fill_super)(struct super_block *, void *, int)) 1306 { 1307 struct block_device *bdev; 1308 struct super_block *s; 1309 fmode_t mode = FMODE_READ | FMODE_EXCL; 1310 int error = 0; 1311 1312 if (!(flags & SB_RDONLY)) 1313 mode |= FMODE_WRITE; 1314 1315 bdev = blkdev_get_by_path(dev_name, mode, fs_type); 1316 if (IS_ERR(bdev)) 1317 return ERR_CAST(bdev); 1318 1319 /* 1320 * once the super is inserted into the list by sget, s_umount 1321 * will protect the lockfs code from trying to start a snapshot 1322 * while we are mounting 1323 */ 1324 mutex_lock(&bdev->bd_fsfreeze_mutex); 1325 if (bdev->bd_fsfreeze_count > 0) { 1326 mutex_unlock(&bdev->bd_fsfreeze_mutex); 1327 error = -EBUSY; 1328 goto error_bdev; 1329 } 1330 s = sget(fs_type, test_bdev_super, set_bdev_super, flags | SB_NOSEC, 1331 bdev); 1332 mutex_unlock(&bdev->bd_fsfreeze_mutex); 1333 if (IS_ERR(s)) 1334 goto error_s; 1335 1336 if (s->s_root) { 1337 if ((flags ^ s->s_flags) & SB_RDONLY) { 1338 deactivate_locked_super(s); 1339 error = -EBUSY; 1340 goto error_bdev; 1341 } 1342 1343 /* 1344 * s_umount nests inside open_mutex during 1345 * __invalidate_device(). blkdev_put() acquires 1346 * open_mutex and can't be called under s_umount. Drop 1347 * s_umount temporarily. This is safe as we're 1348 * holding an active reference. 1349 */ 1350 up_write(&s->s_umount); 1351 blkdev_put(bdev, mode); 1352 down_write(&s->s_umount); 1353 } else { 1354 s->s_mode = mode; 1355 snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev); 1356 shrinker_debugfs_rename(&s->s_shrink, "sb-%s:%s", 1357 fs_type->name, s->s_id); 1358 sb_set_blocksize(s, block_size(bdev)); 1359 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0); 1360 if (error) { 1361 deactivate_locked_super(s); 1362 goto error; 1363 } 1364 1365 s->s_flags |= SB_ACTIVE; 1366 bdev->bd_super = s; 1367 } 1368 1369 return dget(s->s_root); 1370 1371 error_s: 1372 error = PTR_ERR(s); 1373 error_bdev: 1374 blkdev_put(bdev, mode); 1375 error: 1376 return ERR_PTR(error); 1377 } 1378 EXPORT_SYMBOL(mount_bdev); 1379 1380 void kill_block_super(struct super_block *sb) 1381 { 1382 struct block_device *bdev = sb->s_bdev; 1383 fmode_t mode = sb->s_mode; 1384 1385 bdev->bd_super = NULL; 1386 generic_shutdown_super(sb); 1387 sync_blockdev(bdev); 1388 WARN_ON_ONCE(!(mode & FMODE_EXCL)); 1389 blkdev_put(bdev, mode | FMODE_EXCL); 1390 } 1391 1392 EXPORT_SYMBOL(kill_block_super); 1393 #endif 1394 1395 struct dentry *mount_nodev(struct file_system_type *fs_type, 1396 int flags, void *data, 1397 int (*fill_super)(struct super_block *, void *, int)) 1398 { 1399 int error; 1400 struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL); 1401 1402 if (IS_ERR(s)) 1403 return ERR_CAST(s); 1404 1405 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0); 1406 if (error) { 1407 deactivate_locked_super(s); 1408 return ERR_PTR(error); 1409 } 1410 s->s_flags |= SB_ACTIVE; 1411 return dget(s->s_root); 1412 } 1413 EXPORT_SYMBOL(mount_nodev); 1414 1415 int reconfigure_single(struct super_block *s, 1416 int flags, void *data) 1417 { 1418 struct fs_context *fc; 1419 int ret; 1420 1421 /* The caller really need to be passing fc down into mount_single(), 1422 * then a chunk of this can be removed. [Bollocks -- AV] 1423 * Better yet, reconfiguration shouldn't happen, but rather the second 1424 * mount should be rejected if the parameters are not compatible. 1425 */ 1426 fc = fs_context_for_reconfigure(s->s_root, flags, MS_RMT_MASK); 1427 if (IS_ERR(fc)) 1428 return PTR_ERR(fc); 1429 1430 ret = parse_monolithic_mount_data(fc, data); 1431 if (ret < 0) 1432 goto out; 1433 1434 ret = reconfigure_super(fc); 1435 out: 1436 put_fs_context(fc); 1437 return ret; 1438 } 1439 1440 static int compare_single(struct super_block *s, void *p) 1441 { 1442 return 1; 1443 } 1444 1445 struct dentry *mount_single(struct file_system_type *fs_type, 1446 int flags, void *data, 1447 int (*fill_super)(struct super_block *, void *, int)) 1448 { 1449 struct super_block *s; 1450 int error; 1451 1452 s = sget(fs_type, compare_single, set_anon_super, flags, NULL); 1453 if (IS_ERR(s)) 1454 return ERR_CAST(s); 1455 if (!s->s_root) { 1456 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0); 1457 if (!error) 1458 s->s_flags |= SB_ACTIVE; 1459 } else { 1460 error = reconfigure_single(s, flags, data); 1461 } 1462 if (unlikely(error)) { 1463 deactivate_locked_super(s); 1464 return ERR_PTR(error); 1465 } 1466 return dget(s->s_root); 1467 } 1468 EXPORT_SYMBOL(mount_single); 1469 1470 /** 1471 * vfs_get_tree - Get the mountable root 1472 * @fc: The superblock configuration context. 1473 * 1474 * The filesystem is invoked to get or create a superblock which can then later 1475 * be used for mounting. The filesystem places a pointer to the root to be 1476 * used for mounting in @fc->root. 1477 */ 1478 int vfs_get_tree(struct fs_context *fc) 1479 { 1480 struct super_block *sb; 1481 int error; 1482 1483 if (fc->root) 1484 return -EBUSY; 1485 1486 /* Get the mountable root in fc->root, with a ref on the root and a ref 1487 * on the superblock. 1488 */ 1489 error = fc->ops->get_tree(fc); 1490 if (error < 0) 1491 return error; 1492 1493 if (!fc->root) { 1494 pr_err("Filesystem %s get_tree() didn't set fc->root\n", 1495 fc->fs_type->name); 1496 /* We don't know what the locking state of the superblock is - 1497 * if there is a superblock. 1498 */ 1499 BUG(); 1500 } 1501 1502 sb = fc->root->d_sb; 1503 WARN_ON(!sb->s_bdi); 1504 1505 /* 1506 * Write barrier is for super_cache_count(). We place it before setting 1507 * SB_BORN as the data dependency between the two functions is the 1508 * superblock structure contents that we just set up, not the SB_BORN 1509 * flag. 1510 */ 1511 smp_wmb(); 1512 sb->s_flags |= SB_BORN; 1513 1514 error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL); 1515 if (unlikely(error)) { 1516 fc_drop_locked(fc); 1517 return error; 1518 } 1519 1520 /* 1521 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE 1522 * but s_maxbytes was an unsigned long long for many releases. Throw 1523 * this warning for a little while to try and catch filesystems that 1524 * violate this rule. 1525 */ 1526 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to " 1527 "negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes); 1528 1529 return 0; 1530 } 1531 EXPORT_SYMBOL(vfs_get_tree); 1532 1533 /* 1534 * Setup private BDI for given superblock. It gets automatically cleaned up 1535 * in generic_shutdown_super(). 1536 */ 1537 int super_setup_bdi_name(struct super_block *sb, char *fmt, ...) 1538 { 1539 struct backing_dev_info *bdi; 1540 int err; 1541 va_list args; 1542 1543 bdi = bdi_alloc(NUMA_NO_NODE); 1544 if (!bdi) 1545 return -ENOMEM; 1546 1547 va_start(args, fmt); 1548 err = bdi_register_va(bdi, fmt, args); 1549 va_end(args); 1550 if (err) { 1551 bdi_put(bdi); 1552 return err; 1553 } 1554 WARN_ON(sb->s_bdi != &noop_backing_dev_info); 1555 sb->s_bdi = bdi; 1556 sb->s_iflags |= SB_I_PERSB_BDI; 1557 1558 return 0; 1559 } 1560 EXPORT_SYMBOL(super_setup_bdi_name); 1561 1562 /* 1563 * Setup private BDI for given superblock. I gets automatically cleaned up 1564 * in generic_shutdown_super(). 1565 */ 1566 int super_setup_bdi(struct super_block *sb) 1567 { 1568 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); 1569 1570 return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name, 1571 atomic_long_inc_return(&bdi_seq)); 1572 } 1573 EXPORT_SYMBOL(super_setup_bdi); 1574 1575 /** 1576 * sb_wait_write - wait until all writers to given file system finish 1577 * @sb: the super for which we wait 1578 * @level: type of writers we wait for (normal vs page fault) 1579 * 1580 * This function waits until there are no writers of given type to given file 1581 * system. 1582 */ 1583 static void sb_wait_write(struct super_block *sb, int level) 1584 { 1585 percpu_down_write(sb->s_writers.rw_sem + level-1); 1586 } 1587 1588 /* 1589 * We are going to return to userspace and forget about these locks, the 1590 * ownership goes to the caller of thaw_super() which does unlock(). 1591 */ 1592 static void lockdep_sb_freeze_release(struct super_block *sb) 1593 { 1594 int level; 1595 1596 for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--) 1597 percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_); 1598 } 1599 1600 /* 1601 * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb). 1602 */ 1603 static void lockdep_sb_freeze_acquire(struct super_block *sb) 1604 { 1605 int level; 1606 1607 for (level = 0; level < SB_FREEZE_LEVELS; ++level) 1608 percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_); 1609 } 1610 1611 static void sb_freeze_unlock(struct super_block *sb, int level) 1612 { 1613 for (level--; level >= 0; level--) 1614 percpu_up_write(sb->s_writers.rw_sem + level); 1615 } 1616 1617 /** 1618 * freeze_super - lock the filesystem and force it into a consistent state 1619 * @sb: the super to lock 1620 * 1621 * Syncs the super to make sure the filesystem is consistent and calls the fs's 1622 * freeze_fs. Subsequent calls to this without first thawing the fs will return 1623 * -EBUSY. 1624 * 1625 * During this function, sb->s_writers.frozen goes through these values: 1626 * 1627 * SB_UNFROZEN: File system is normal, all writes progress as usual. 1628 * 1629 * SB_FREEZE_WRITE: The file system is in the process of being frozen. New 1630 * writes should be blocked, though page faults are still allowed. We wait for 1631 * all writes to complete and then proceed to the next stage. 1632 * 1633 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked 1634 * but internal fs threads can still modify the filesystem (although they 1635 * should not dirty new pages or inodes), writeback can run etc. After waiting 1636 * for all running page faults we sync the filesystem which will clean all 1637 * dirty pages and inodes (no new dirty pages or inodes can be created when 1638 * sync is running). 1639 * 1640 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs 1641 * modification are blocked (e.g. XFS preallocation truncation on inode 1642 * reclaim). This is usually implemented by blocking new transactions for 1643 * filesystems that have them and need this additional guard. After all 1644 * internal writers are finished we call ->freeze_fs() to finish filesystem 1645 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is 1646 * mostly auxiliary for filesystems to verify they do not modify frozen fs. 1647 * 1648 * sb->s_writers.frozen is protected by sb->s_umount. 1649 */ 1650 int freeze_super(struct super_block *sb) 1651 { 1652 int ret; 1653 1654 atomic_inc(&sb->s_active); 1655 down_write(&sb->s_umount); 1656 if (sb->s_writers.frozen != SB_UNFROZEN) { 1657 deactivate_locked_super(sb); 1658 return -EBUSY; 1659 } 1660 1661 if (!(sb->s_flags & SB_BORN)) { 1662 up_write(&sb->s_umount); 1663 return 0; /* sic - it's "nothing to do" */ 1664 } 1665 1666 if (sb_rdonly(sb)) { 1667 /* Nothing to do really... */ 1668 sb->s_writers.frozen = SB_FREEZE_COMPLETE; 1669 up_write(&sb->s_umount); 1670 return 0; 1671 } 1672 1673 sb->s_writers.frozen = SB_FREEZE_WRITE; 1674 /* Release s_umount to preserve sb_start_write -> s_umount ordering */ 1675 up_write(&sb->s_umount); 1676 sb_wait_write(sb, SB_FREEZE_WRITE); 1677 down_write(&sb->s_umount); 1678 1679 /* Now we go and block page faults... */ 1680 sb->s_writers.frozen = SB_FREEZE_PAGEFAULT; 1681 sb_wait_write(sb, SB_FREEZE_PAGEFAULT); 1682 1683 /* All writers are done so after syncing there won't be dirty data */ 1684 ret = sync_filesystem(sb); 1685 if (ret) { 1686 sb->s_writers.frozen = SB_UNFROZEN; 1687 sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT); 1688 wake_up(&sb->s_writers.wait_unfrozen); 1689 deactivate_locked_super(sb); 1690 return ret; 1691 } 1692 1693 /* Now wait for internal filesystem counter */ 1694 sb->s_writers.frozen = SB_FREEZE_FS; 1695 sb_wait_write(sb, SB_FREEZE_FS); 1696 1697 if (sb->s_op->freeze_fs) { 1698 ret = sb->s_op->freeze_fs(sb); 1699 if (ret) { 1700 printk(KERN_ERR 1701 "VFS:Filesystem freeze failed\n"); 1702 sb->s_writers.frozen = SB_UNFROZEN; 1703 sb_freeze_unlock(sb, SB_FREEZE_FS); 1704 wake_up(&sb->s_writers.wait_unfrozen); 1705 deactivate_locked_super(sb); 1706 return ret; 1707 } 1708 } 1709 /* 1710 * For debugging purposes so that fs can warn if it sees write activity 1711 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super(). 1712 */ 1713 sb->s_writers.frozen = SB_FREEZE_COMPLETE; 1714 lockdep_sb_freeze_release(sb); 1715 up_write(&sb->s_umount); 1716 return 0; 1717 } 1718 EXPORT_SYMBOL(freeze_super); 1719 1720 static int thaw_super_locked(struct super_block *sb) 1721 { 1722 int error; 1723 1724 if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) { 1725 up_write(&sb->s_umount); 1726 return -EINVAL; 1727 } 1728 1729 if (sb_rdonly(sb)) { 1730 sb->s_writers.frozen = SB_UNFROZEN; 1731 goto out; 1732 } 1733 1734 lockdep_sb_freeze_acquire(sb); 1735 1736 if (sb->s_op->unfreeze_fs) { 1737 error = sb->s_op->unfreeze_fs(sb); 1738 if (error) { 1739 printk(KERN_ERR 1740 "VFS:Filesystem thaw failed\n"); 1741 lockdep_sb_freeze_release(sb); 1742 up_write(&sb->s_umount); 1743 return error; 1744 } 1745 } 1746 1747 sb->s_writers.frozen = SB_UNFROZEN; 1748 sb_freeze_unlock(sb, SB_FREEZE_FS); 1749 out: 1750 wake_up(&sb->s_writers.wait_unfrozen); 1751 deactivate_locked_super(sb); 1752 return 0; 1753 } 1754 1755 /** 1756 * thaw_super -- unlock filesystem 1757 * @sb: the super to thaw 1758 * 1759 * Unlocks the filesystem and marks it writeable again after freeze_super(). 1760 */ 1761 int thaw_super(struct super_block *sb) 1762 { 1763 down_write(&sb->s_umount); 1764 return thaw_super_locked(sb); 1765 } 1766 EXPORT_SYMBOL(thaw_super); 1767