1 /* 2 * linux/fs/super.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * super.c contains code to handle: - mount structures 7 * - super-block tables 8 * - filesystem drivers list 9 * - mount system call 10 * - umount system call 11 * - ustat system call 12 * 13 * GK 2/5/95 - Changed to support mounting the root fs via NFS 14 * 15 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall 16 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96 17 * Added options to /proc/mounts: 18 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996. 19 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998 20 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000 21 */ 22 23 #include <linux/module.h> 24 #include <linux/slab.h> 25 #include <linux/acct.h> 26 #include <linux/blkdev.h> 27 #include <linux/mount.h> 28 #include <linux/security.h> 29 #include <linux/writeback.h> /* for the emergency remount stuff */ 30 #include <linux/idr.h> 31 #include <linux/mutex.h> 32 #include <linux/backing-dev.h> 33 #include <linux/rculist_bl.h> 34 #include <linux/cleancache.h> 35 #include "internal.h" 36 37 38 LIST_HEAD(super_blocks); 39 DEFINE_SPINLOCK(sb_lock); 40 41 /* 42 * One thing we have to be careful of with a per-sb shrinker is that we don't 43 * drop the last active reference to the superblock from within the shrinker. 44 * If that happens we could trigger unregistering the shrinker from within the 45 * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we 46 * take a passive reference to the superblock to avoid this from occurring. 47 */ 48 static int prune_super(struct shrinker *shrink, struct shrink_control *sc) 49 { 50 struct super_block *sb; 51 int fs_objects = 0; 52 int total_objects; 53 54 sb = container_of(shrink, struct super_block, s_shrink); 55 56 /* 57 * Deadlock avoidance. We may hold various FS locks, and we don't want 58 * to recurse into the FS that called us in clear_inode() and friends.. 59 */ 60 if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS)) 61 return -1; 62 63 if (!grab_super_passive(sb)) 64 return -1; 65 66 if (sb->s_op && sb->s_op->nr_cached_objects) 67 fs_objects = sb->s_op->nr_cached_objects(sb); 68 69 total_objects = sb->s_nr_dentry_unused + 70 sb->s_nr_inodes_unused + fs_objects + 1; 71 72 if (sc->nr_to_scan) { 73 int dentries; 74 int inodes; 75 76 /* proportion the scan between the caches */ 77 dentries = (sc->nr_to_scan * sb->s_nr_dentry_unused) / 78 total_objects; 79 inodes = (sc->nr_to_scan * sb->s_nr_inodes_unused) / 80 total_objects; 81 if (fs_objects) 82 fs_objects = (sc->nr_to_scan * fs_objects) / 83 total_objects; 84 /* 85 * prune the dcache first as the icache is pinned by it, then 86 * prune the icache, followed by the filesystem specific caches 87 */ 88 prune_dcache_sb(sb, dentries); 89 prune_icache_sb(sb, inodes); 90 91 if (fs_objects && sb->s_op->free_cached_objects) { 92 sb->s_op->free_cached_objects(sb, fs_objects); 93 fs_objects = sb->s_op->nr_cached_objects(sb); 94 } 95 total_objects = sb->s_nr_dentry_unused + 96 sb->s_nr_inodes_unused + fs_objects; 97 } 98 99 total_objects = (total_objects / 100) * sysctl_vfs_cache_pressure; 100 drop_super(sb); 101 return total_objects; 102 } 103 104 /** 105 * alloc_super - create new superblock 106 * @type: filesystem type superblock should belong to 107 * 108 * Allocates and initializes a new &struct super_block. alloc_super() 109 * returns a pointer new superblock or %NULL if allocation had failed. 110 */ 111 static struct super_block *alloc_super(struct file_system_type *type) 112 { 113 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER); 114 static const struct super_operations default_op; 115 116 if (s) { 117 if (security_sb_alloc(s)) { 118 kfree(s); 119 s = NULL; 120 goto out; 121 } 122 #ifdef CONFIG_SMP 123 s->s_files = alloc_percpu(struct list_head); 124 if (!s->s_files) { 125 security_sb_free(s); 126 kfree(s); 127 s = NULL; 128 goto out; 129 } else { 130 int i; 131 132 for_each_possible_cpu(i) 133 INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i)); 134 } 135 #else 136 INIT_LIST_HEAD(&s->s_files); 137 #endif 138 s->s_bdi = &default_backing_dev_info; 139 INIT_LIST_HEAD(&s->s_instances); 140 INIT_HLIST_BL_HEAD(&s->s_anon); 141 INIT_LIST_HEAD(&s->s_inodes); 142 INIT_LIST_HEAD(&s->s_dentry_lru); 143 INIT_LIST_HEAD(&s->s_inode_lru); 144 spin_lock_init(&s->s_inode_lru_lock); 145 init_rwsem(&s->s_umount); 146 mutex_init(&s->s_lock); 147 lockdep_set_class(&s->s_umount, &type->s_umount_key); 148 /* 149 * The locking rules for s_lock are up to the 150 * filesystem. For example ext3fs has different 151 * lock ordering than usbfs: 152 */ 153 lockdep_set_class(&s->s_lock, &type->s_lock_key); 154 /* 155 * sget() can have s_umount recursion. 156 * 157 * When it cannot find a suitable sb, it allocates a new 158 * one (this one), and tries again to find a suitable old 159 * one. 160 * 161 * In case that succeeds, it will acquire the s_umount 162 * lock of the old one. Since these are clearly distrinct 163 * locks, and this object isn't exposed yet, there's no 164 * risk of deadlocks. 165 * 166 * Annotate this by putting this lock in a different 167 * subclass. 168 */ 169 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING); 170 s->s_count = 1; 171 atomic_set(&s->s_active, 1); 172 mutex_init(&s->s_vfs_rename_mutex); 173 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key); 174 mutex_init(&s->s_dquot.dqio_mutex); 175 mutex_init(&s->s_dquot.dqonoff_mutex); 176 init_rwsem(&s->s_dquot.dqptr_sem); 177 init_waitqueue_head(&s->s_wait_unfrozen); 178 s->s_maxbytes = MAX_NON_LFS; 179 s->s_op = &default_op; 180 s->s_time_gran = 1000000000; 181 s->cleancache_poolid = -1; 182 183 s->s_shrink.seeks = DEFAULT_SEEKS; 184 s->s_shrink.shrink = prune_super; 185 s->s_shrink.batch = 1024; 186 } 187 out: 188 return s; 189 } 190 191 /** 192 * destroy_super - frees a superblock 193 * @s: superblock to free 194 * 195 * Frees a superblock. 196 */ 197 static inline void destroy_super(struct super_block *s) 198 { 199 #ifdef CONFIG_SMP 200 free_percpu(s->s_files); 201 #endif 202 security_sb_free(s); 203 kfree(s->s_subtype); 204 kfree(s->s_options); 205 kfree(s); 206 } 207 208 /* Superblock refcounting */ 209 210 /* 211 * Drop a superblock's refcount. The caller must hold sb_lock. 212 */ 213 void __put_super(struct super_block *sb) 214 { 215 if (!--sb->s_count) { 216 list_del_init(&sb->s_list); 217 destroy_super(sb); 218 } 219 } 220 221 /** 222 * put_super - drop a temporary reference to superblock 223 * @sb: superblock in question 224 * 225 * Drops a temporary reference, frees superblock if there's no 226 * references left. 227 */ 228 void put_super(struct super_block *sb) 229 { 230 spin_lock(&sb_lock); 231 __put_super(sb); 232 spin_unlock(&sb_lock); 233 } 234 235 236 /** 237 * deactivate_locked_super - drop an active reference to superblock 238 * @s: superblock to deactivate 239 * 240 * Drops an active reference to superblock, converting it into a temprory 241 * one if there is no other active references left. In that case we 242 * tell fs driver to shut it down and drop the temporary reference we 243 * had just acquired. 244 * 245 * Caller holds exclusive lock on superblock; that lock is released. 246 */ 247 void deactivate_locked_super(struct super_block *s) 248 { 249 struct file_system_type *fs = s->s_type; 250 if (atomic_dec_and_test(&s->s_active)) { 251 cleancache_flush_fs(s); 252 fs->kill_sb(s); 253 254 /* caches are now gone, we can safely kill the shrinker now */ 255 unregister_shrinker(&s->s_shrink); 256 257 /* 258 * We need to call rcu_barrier so all the delayed rcu free 259 * inodes are flushed before we release the fs module. 260 */ 261 rcu_barrier(); 262 put_filesystem(fs); 263 put_super(s); 264 } else { 265 up_write(&s->s_umount); 266 } 267 } 268 269 EXPORT_SYMBOL(deactivate_locked_super); 270 271 /** 272 * deactivate_super - drop an active reference to superblock 273 * @s: superblock to deactivate 274 * 275 * Variant of deactivate_locked_super(), except that superblock is *not* 276 * locked by caller. If we are going to drop the final active reference, 277 * lock will be acquired prior to that. 278 */ 279 void deactivate_super(struct super_block *s) 280 { 281 if (!atomic_add_unless(&s->s_active, -1, 1)) { 282 down_write(&s->s_umount); 283 deactivate_locked_super(s); 284 } 285 } 286 287 EXPORT_SYMBOL(deactivate_super); 288 289 /** 290 * grab_super - acquire an active reference 291 * @s: reference we are trying to make active 292 * 293 * Tries to acquire an active reference. grab_super() is used when we 294 * had just found a superblock in super_blocks or fs_type->fs_supers 295 * and want to turn it into a full-blown active reference. grab_super() 296 * is called with sb_lock held and drops it. Returns 1 in case of 297 * success, 0 if we had failed (superblock contents was already dead or 298 * dying when grab_super() had been called). 299 */ 300 static int grab_super(struct super_block *s) __releases(sb_lock) 301 { 302 if (atomic_inc_not_zero(&s->s_active)) { 303 spin_unlock(&sb_lock); 304 return 1; 305 } 306 /* it's going away */ 307 s->s_count++; 308 spin_unlock(&sb_lock); 309 /* wait for it to die */ 310 down_write(&s->s_umount); 311 up_write(&s->s_umount); 312 put_super(s); 313 return 0; 314 } 315 316 /* 317 * grab_super_passive - acquire a passive reference 318 * @s: reference we are trying to grab 319 * 320 * Tries to acquire a passive reference. This is used in places where we 321 * cannot take an active reference but we need to ensure that the 322 * superblock does not go away while we are working on it. It returns 323 * false if a reference was not gained, and returns true with the s_umount 324 * lock held in read mode if a reference is gained. On successful return, 325 * the caller must drop the s_umount lock and the passive reference when 326 * done. 327 */ 328 bool grab_super_passive(struct super_block *sb) 329 { 330 spin_lock(&sb_lock); 331 if (list_empty(&sb->s_instances)) { 332 spin_unlock(&sb_lock); 333 return false; 334 } 335 336 sb->s_count++; 337 spin_unlock(&sb_lock); 338 339 if (down_read_trylock(&sb->s_umount)) { 340 if (sb->s_root) 341 return true; 342 up_read(&sb->s_umount); 343 } 344 345 put_super(sb); 346 return false; 347 } 348 349 /* 350 * Superblock locking. We really ought to get rid of these two. 351 */ 352 void lock_super(struct super_block * sb) 353 { 354 mutex_lock(&sb->s_lock); 355 } 356 357 void unlock_super(struct super_block * sb) 358 { 359 mutex_unlock(&sb->s_lock); 360 } 361 362 EXPORT_SYMBOL(lock_super); 363 EXPORT_SYMBOL(unlock_super); 364 365 /** 366 * generic_shutdown_super - common helper for ->kill_sb() 367 * @sb: superblock to kill 368 * 369 * generic_shutdown_super() does all fs-independent work on superblock 370 * shutdown. Typical ->kill_sb() should pick all fs-specific objects 371 * that need destruction out of superblock, call generic_shutdown_super() 372 * and release aforementioned objects. Note: dentries and inodes _are_ 373 * taken care of and do not need specific handling. 374 * 375 * Upon calling this function, the filesystem may no longer alter or 376 * rearrange the set of dentries belonging to this super_block, nor may it 377 * change the attachments of dentries to inodes. 378 */ 379 void generic_shutdown_super(struct super_block *sb) 380 { 381 const struct super_operations *sop = sb->s_op; 382 383 if (sb->s_root) { 384 shrink_dcache_for_umount(sb); 385 sync_filesystem(sb); 386 sb->s_flags &= ~MS_ACTIVE; 387 388 fsnotify_unmount_inodes(&sb->s_inodes); 389 390 evict_inodes(sb); 391 392 if (sop->put_super) 393 sop->put_super(sb); 394 395 if (!list_empty(&sb->s_inodes)) { 396 printk("VFS: Busy inodes after unmount of %s. " 397 "Self-destruct in 5 seconds. Have a nice day...\n", 398 sb->s_id); 399 } 400 } 401 spin_lock(&sb_lock); 402 /* should be initialized for __put_super_and_need_restart() */ 403 list_del_init(&sb->s_instances); 404 spin_unlock(&sb_lock); 405 up_write(&sb->s_umount); 406 } 407 408 EXPORT_SYMBOL(generic_shutdown_super); 409 410 /** 411 * sget - find or create a superblock 412 * @type: filesystem type superblock should belong to 413 * @test: comparison callback 414 * @set: setup callback 415 * @data: argument to each of them 416 */ 417 struct super_block *sget(struct file_system_type *type, 418 int (*test)(struct super_block *,void *), 419 int (*set)(struct super_block *,void *), 420 void *data) 421 { 422 struct super_block *s = NULL; 423 struct super_block *old; 424 int err; 425 426 retry: 427 spin_lock(&sb_lock); 428 if (test) { 429 list_for_each_entry(old, &type->fs_supers, s_instances) { 430 if (!test(old, data)) 431 continue; 432 if (!grab_super(old)) 433 goto retry; 434 if (s) { 435 up_write(&s->s_umount); 436 destroy_super(s); 437 s = NULL; 438 } 439 down_write(&old->s_umount); 440 if (unlikely(!(old->s_flags & MS_BORN))) { 441 deactivate_locked_super(old); 442 goto retry; 443 } 444 return old; 445 } 446 } 447 if (!s) { 448 spin_unlock(&sb_lock); 449 s = alloc_super(type); 450 if (!s) 451 return ERR_PTR(-ENOMEM); 452 goto retry; 453 } 454 455 err = set(s, data); 456 if (err) { 457 spin_unlock(&sb_lock); 458 up_write(&s->s_umount); 459 destroy_super(s); 460 return ERR_PTR(err); 461 } 462 s->s_type = type; 463 strlcpy(s->s_id, type->name, sizeof(s->s_id)); 464 list_add_tail(&s->s_list, &super_blocks); 465 list_add(&s->s_instances, &type->fs_supers); 466 spin_unlock(&sb_lock); 467 get_filesystem(type); 468 register_shrinker(&s->s_shrink); 469 return s; 470 } 471 472 EXPORT_SYMBOL(sget); 473 474 void drop_super(struct super_block *sb) 475 { 476 up_read(&sb->s_umount); 477 put_super(sb); 478 } 479 480 EXPORT_SYMBOL(drop_super); 481 482 /** 483 * sync_supers - helper for periodic superblock writeback 484 * 485 * Call the write_super method if present on all dirty superblocks in 486 * the system. This is for the periodic writeback used by most older 487 * filesystems. For data integrity superblock writeback use 488 * sync_filesystems() instead. 489 * 490 * Note: check the dirty flag before waiting, so we don't 491 * hold up the sync while mounting a device. (The newly 492 * mounted device won't need syncing.) 493 */ 494 void sync_supers(void) 495 { 496 struct super_block *sb, *p = NULL; 497 498 spin_lock(&sb_lock); 499 list_for_each_entry(sb, &super_blocks, s_list) { 500 if (list_empty(&sb->s_instances)) 501 continue; 502 if (sb->s_op->write_super && sb->s_dirt) { 503 sb->s_count++; 504 spin_unlock(&sb_lock); 505 506 down_read(&sb->s_umount); 507 if (sb->s_root && sb->s_dirt) 508 sb->s_op->write_super(sb); 509 up_read(&sb->s_umount); 510 511 spin_lock(&sb_lock); 512 if (p) 513 __put_super(p); 514 p = sb; 515 } 516 } 517 if (p) 518 __put_super(p); 519 spin_unlock(&sb_lock); 520 } 521 522 /** 523 * iterate_supers - call function for all active superblocks 524 * @f: function to call 525 * @arg: argument to pass to it 526 * 527 * Scans the superblock list and calls given function, passing it 528 * locked superblock and given argument. 529 */ 530 void iterate_supers(void (*f)(struct super_block *, void *), void *arg) 531 { 532 struct super_block *sb, *p = NULL; 533 534 spin_lock(&sb_lock); 535 list_for_each_entry(sb, &super_blocks, s_list) { 536 if (list_empty(&sb->s_instances)) 537 continue; 538 sb->s_count++; 539 spin_unlock(&sb_lock); 540 541 down_read(&sb->s_umount); 542 if (sb->s_root) 543 f(sb, arg); 544 up_read(&sb->s_umount); 545 546 spin_lock(&sb_lock); 547 if (p) 548 __put_super(p); 549 p = sb; 550 } 551 if (p) 552 __put_super(p); 553 spin_unlock(&sb_lock); 554 } 555 556 /** 557 * iterate_supers_type - call function for superblocks of given type 558 * @type: fs type 559 * @f: function to call 560 * @arg: argument to pass to it 561 * 562 * Scans the superblock list and calls given function, passing it 563 * locked superblock and given argument. 564 */ 565 void iterate_supers_type(struct file_system_type *type, 566 void (*f)(struct super_block *, void *), void *arg) 567 { 568 struct super_block *sb, *p = NULL; 569 570 spin_lock(&sb_lock); 571 list_for_each_entry(sb, &type->fs_supers, s_instances) { 572 sb->s_count++; 573 spin_unlock(&sb_lock); 574 575 down_read(&sb->s_umount); 576 if (sb->s_root) 577 f(sb, arg); 578 up_read(&sb->s_umount); 579 580 spin_lock(&sb_lock); 581 if (p) 582 __put_super(p); 583 p = sb; 584 } 585 if (p) 586 __put_super(p); 587 spin_unlock(&sb_lock); 588 } 589 590 EXPORT_SYMBOL(iterate_supers_type); 591 592 /** 593 * get_super - get the superblock of a device 594 * @bdev: device to get the superblock for 595 * 596 * Scans the superblock list and finds the superblock of the file system 597 * mounted on the device given. %NULL is returned if no match is found. 598 */ 599 600 struct super_block *get_super(struct block_device *bdev) 601 { 602 struct super_block *sb; 603 604 if (!bdev) 605 return NULL; 606 607 spin_lock(&sb_lock); 608 rescan: 609 list_for_each_entry(sb, &super_blocks, s_list) { 610 if (list_empty(&sb->s_instances)) 611 continue; 612 if (sb->s_bdev == bdev) { 613 sb->s_count++; 614 spin_unlock(&sb_lock); 615 down_read(&sb->s_umount); 616 /* still alive? */ 617 if (sb->s_root) 618 return sb; 619 up_read(&sb->s_umount); 620 /* nope, got unmounted */ 621 spin_lock(&sb_lock); 622 __put_super(sb); 623 goto rescan; 624 } 625 } 626 spin_unlock(&sb_lock); 627 return NULL; 628 } 629 630 EXPORT_SYMBOL(get_super); 631 632 /** 633 * get_active_super - get an active reference to the superblock of a device 634 * @bdev: device to get the superblock for 635 * 636 * Scans the superblock list and finds the superblock of the file system 637 * mounted on the device given. Returns the superblock with an active 638 * reference or %NULL if none was found. 639 */ 640 struct super_block *get_active_super(struct block_device *bdev) 641 { 642 struct super_block *sb; 643 644 if (!bdev) 645 return NULL; 646 647 restart: 648 spin_lock(&sb_lock); 649 list_for_each_entry(sb, &super_blocks, s_list) { 650 if (list_empty(&sb->s_instances)) 651 continue; 652 if (sb->s_bdev == bdev) { 653 if (grab_super(sb)) /* drops sb_lock */ 654 return sb; 655 else 656 goto restart; 657 } 658 } 659 spin_unlock(&sb_lock); 660 return NULL; 661 } 662 663 struct super_block *user_get_super(dev_t dev) 664 { 665 struct super_block *sb; 666 667 spin_lock(&sb_lock); 668 rescan: 669 list_for_each_entry(sb, &super_blocks, s_list) { 670 if (list_empty(&sb->s_instances)) 671 continue; 672 if (sb->s_dev == dev) { 673 sb->s_count++; 674 spin_unlock(&sb_lock); 675 down_read(&sb->s_umount); 676 /* still alive? */ 677 if (sb->s_root) 678 return sb; 679 up_read(&sb->s_umount); 680 /* nope, got unmounted */ 681 spin_lock(&sb_lock); 682 __put_super(sb); 683 goto rescan; 684 } 685 } 686 spin_unlock(&sb_lock); 687 return NULL; 688 } 689 690 /** 691 * do_remount_sb - asks filesystem to change mount options. 692 * @sb: superblock in question 693 * @flags: numeric part of options 694 * @data: the rest of options 695 * @force: whether or not to force the change 696 * 697 * Alters the mount options of a mounted file system. 698 */ 699 int do_remount_sb(struct super_block *sb, int flags, void *data, int force) 700 { 701 int retval; 702 int remount_ro; 703 704 if (sb->s_frozen != SB_UNFROZEN) 705 return -EBUSY; 706 707 #ifdef CONFIG_BLOCK 708 if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev)) 709 return -EACCES; 710 #endif 711 712 if (flags & MS_RDONLY) 713 acct_auto_close(sb); 714 shrink_dcache_sb(sb); 715 sync_filesystem(sb); 716 717 remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY); 718 719 /* If we are remounting RDONLY and current sb is read/write, 720 make sure there are no rw files opened */ 721 if (remount_ro) { 722 if (force) 723 mark_files_ro(sb); 724 else if (!fs_may_remount_ro(sb)) 725 return -EBUSY; 726 } 727 728 if (sb->s_op->remount_fs) { 729 retval = sb->s_op->remount_fs(sb, &flags, data); 730 if (retval) 731 return retval; 732 } 733 sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK); 734 735 /* 736 * Some filesystems modify their metadata via some other path than the 737 * bdev buffer cache (eg. use a private mapping, or directories in 738 * pagecache, etc). Also file data modifications go via their own 739 * mappings. So If we try to mount readonly then copy the filesystem 740 * from bdev, we could get stale data, so invalidate it to give a best 741 * effort at coherency. 742 */ 743 if (remount_ro && sb->s_bdev) 744 invalidate_bdev(sb->s_bdev); 745 return 0; 746 } 747 748 static void do_emergency_remount(struct work_struct *work) 749 { 750 struct super_block *sb, *p = NULL; 751 752 spin_lock(&sb_lock); 753 list_for_each_entry(sb, &super_blocks, s_list) { 754 if (list_empty(&sb->s_instances)) 755 continue; 756 sb->s_count++; 757 spin_unlock(&sb_lock); 758 down_write(&sb->s_umount); 759 if (sb->s_root && sb->s_bdev && !(sb->s_flags & MS_RDONLY)) { 760 /* 761 * What lock protects sb->s_flags?? 762 */ 763 do_remount_sb(sb, MS_RDONLY, NULL, 1); 764 } 765 up_write(&sb->s_umount); 766 spin_lock(&sb_lock); 767 if (p) 768 __put_super(p); 769 p = sb; 770 } 771 if (p) 772 __put_super(p); 773 spin_unlock(&sb_lock); 774 kfree(work); 775 printk("Emergency Remount complete\n"); 776 } 777 778 void emergency_remount(void) 779 { 780 struct work_struct *work; 781 782 work = kmalloc(sizeof(*work), GFP_ATOMIC); 783 if (work) { 784 INIT_WORK(work, do_emergency_remount); 785 schedule_work(work); 786 } 787 } 788 789 /* 790 * Unnamed block devices are dummy devices used by virtual 791 * filesystems which don't use real block-devices. -- jrs 792 */ 793 794 static DEFINE_IDA(unnamed_dev_ida); 795 static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */ 796 static int unnamed_dev_start = 0; /* don't bother trying below it */ 797 798 int get_anon_bdev(dev_t *p) 799 { 800 int dev; 801 int error; 802 803 retry: 804 if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0) 805 return -ENOMEM; 806 spin_lock(&unnamed_dev_lock); 807 error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev); 808 if (!error) 809 unnamed_dev_start = dev + 1; 810 spin_unlock(&unnamed_dev_lock); 811 if (error == -EAGAIN) 812 /* We raced and lost with another CPU. */ 813 goto retry; 814 else if (error) 815 return -EAGAIN; 816 817 if ((dev & MAX_ID_MASK) == (1 << MINORBITS)) { 818 spin_lock(&unnamed_dev_lock); 819 ida_remove(&unnamed_dev_ida, dev); 820 if (unnamed_dev_start > dev) 821 unnamed_dev_start = dev; 822 spin_unlock(&unnamed_dev_lock); 823 return -EMFILE; 824 } 825 *p = MKDEV(0, dev & MINORMASK); 826 return 0; 827 } 828 EXPORT_SYMBOL(get_anon_bdev); 829 830 void free_anon_bdev(dev_t dev) 831 { 832 int slot = MINOR(dev); 833 spin_lock(&unnamed_dev_lock); 834 ida_remove(&unnamed_dev_ida, slot); 835 if (slot < unnamed_dev_start) 836 unnamed_dev_start = slot; 837 spin_unlock(&unnamed_dev_lock); 838 } 839 EXPORT_SYMBOL(free_anon_bdev); 840 841 int set_anon_super(struct super_block *s, void *data) 842 { 843 int error = get_anon_bdev(&s->s_dev); 844 if (!error) 845 s->s_bdi = &noop_backing_dev_info; 846 return error; 847 } 848 849 EXPORT_SYMBOL(set_anon_super); 850 851 void kill_anon_super(struct super_block *sb) 852 { 853 dev_t dev = sb->s_dev; 854 generic_shutdown_super(sb); 855 free_anon_bdev(dev); 856 } 857 858 EXPORT_SYMBOL(kill_anon_super); 859 860 void kill_litter_super(struct super_block *sb) 861 { 862 if (sb->s_root) 863 d_genocide(sb->s_root); 864 kill_anon_super(sb); 865 } 866 867 EXPORT_SYMBOL(kill_litter_super); 868 869 static int ns_test_super(struct super_block *sb, void *data) 870 { 871 return sb->s_fs_info == data; 872 } 873 874 static int ns_set_super(struct super_block *sb, void *data) 875 { 876 sb->s_fs_info = data; 877 return set_anon_super(sb, NULL); 878 } 879 880 struct dentry *mount_ns(struct file_system_type *fs_type, int flags, 881 void *data, int (*fill_super)(struct super_block *, void *, int)) 882 { 883 struct super_block *sb; 884 885 sb = sget(fs_type, ns_test_super, ns_set_super, data); 886 if (IS_ERR(sb)) 887 return ERR_CAST(sb); 888 889 if (!sb->s_root) { 890 int err; 891 sb->s_flags = flags; 892 err = fill_super(sb, data, flags & MS_SILENT ? 1 : 0); 893 if (err) { 894 deactivate_locked_super(sb); 895 return ERR_PTR(err); 896 } 897 898 sb->s_flags |= MS_ACTIVE; 899 } 900 901 return dget(sb->s_root); 902 } 903 904 EXPORT_SYMBOL(mount_ns); 905 906 #ifdef CONFIG_BLOCK 907 static int set_bdev_super(struct super_block *s, void *data) 908 { 909 s->s_bdev = data; 910 s->s_dev = s->s_bdev->bd_dev; 911 912 /* 913 * We set the bdi here to the queue backing, file systems can 914 * overwrite this in ->fill_super() 915 */ 916 s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info; 917 return 0; 918 } 919 920 static int test_bdev_super(struct super_block *s, void *data) 921 { 922 return (void *)s->s_bdev == data; 923 } 924 925 struct dentry *mount_bdev(struct file_system_type *fs_type, 926 int flags, const char *dev_name, void *data, 927 int (*fill_super)(struct super_block *, void *, int)) 928 { 929 struct block_device *bdev; 930 struct super_block *s; 931 fmode_t mode = FMODE_READ | FMODE_EXCL; 932 int error = 0; 933 934 if (!(flags & MS_RDONLY)) 935 mode |= FMODE_WRITE; 936 937 bdev = blkdev_get_by_path(dev_name, mode, fs_type); 938 if (IS_ERR(bdev)) 939 return ERR_CAST(bdev); 940 941 /* 942 * once the super is inserted into the list by sget, s_umount 943 * will protect the lockfs code from trying to start a snapshot 944 * while we are mounting 945 */ 946 mutex_lock(&bdev->bd_fsfreeze_mutex); 947 if (bdev->bd_fsfreeze_count > 0) { 948 mutex_unlock(&bdev->bd_fsfreeze_mutex); 949 error = -EBUSY; 950 goto error_bdev; 951 } 952 s = sget(fs_type, test_bdev_super, set_bdev_super, bdev); 953 mutex_unlock(&bdev->bd_fsfreeze_mutex); 954 if (IS_ERR(s)) 955 goto error_s; 956 957 if (s->s_root) { 958 if ((flags ^ s->s_flags) & MS_RDONLY) { 959 deactivate_locked_super(s); 960 error = -EBUSY; 961 goto error_bdev; 962 } 963 964 /* 965 * s_umount nests inside bd_mutex during 966 * __invalidate_device(). blkdev_put() acquires 967 * bd_mutex and can't be called under s_umount. Drop 968 * s_umount temporarily. This is safe as we're 969 * holding an active reference. 970 */ 971 up_write(&s->s_umount); 972 blkdev_put(bdev, mode); 973 down_write(&s->s_umount); 974 } else { 975 char b[BDEVNAME_SIZE]; 976 977 s->s_flags = flags | MS_NOSEC; 978 s->s_mode = mode; 979 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); 980 sb_set_blocksize(s, block_size(bdev)); 981 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); 982 if (error) { 983 deactivate_locked_super(s); 984 goto error; 985 } 986 987 s->s_flags |= MS_ACTIVE; 988 bdev->bd_super = s; 989 } 990 991 return dget(s->s_root); 992 993 error_s: 994 error = PTR_ERR(s); 995 error_bdev: 996 blkdev_put(bdev, mode); 997 error: 998 return ERR_PTR(error); 999 } 1000 EXPORT_SYMBOL(mount_bdev); 1001 1002 void kill_block_super(struct super_block *sb) 1003 { 1004 struct block_device *bdev = sb->s_bdev; 1005 fmode_t mode = sb->s_mode; 1006 1007 bdev->bd_super = NULL; 1008 generic_shutdown_super(sb); 1009 sync_blockdev(bdev); 1010 WARN_ON_ONCE(!(mode & FMODE_EXCL)); 1011 blkdev_put(bdev, mode | FMODE_EXCL); 1012 } 1013 1014 EXPORT_SYMBOL(kill_block_super); 1015 #endif 1016 1017 struct dentry *mount_nodev(struct file_system_type *fs_type, 1018 int flags, void *data, 1019 int (*fill_super)(struct super_block *, void *, int)) 1020 { 1021 int error; 1022 struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL); 1023 1024 if (IS_ERR(s)) 1025 return ERR_CAST(s); 1026 1027 s->s_flags = flags; 1028 1029 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); 1030 if (error) { 1031 deactivate_locked_super(s); 1032 return ERR_PTR(error); 1033 } 1034 s->s_flags |= MS_ACTIVE; 1035 return dget(s->s_root); 1036 } 1037 EXPORT_SYMBOL(mount_nodev); 1038 1039 static int compare_single(struct super_block *s, void *p) 1040 { 1041 return 1; 1042 } 1043 1044 struct dentry *mount_single(struct file_system_type *fs_type, 1045 int flags, void *data, 1046 int (*fill_super)(struct super_block *, void *, int)) 1047 { 1048 struct super_block *s; 1049 int error; 1050 1051 s = sget(fs_type, compare_single, set_anon_super, NULL); 1052 if (IS_ERR(s)) 1053 return ERR_CAST(s); 1054 if (!s->s_root) { 1055 s->s_flags = flags; 1056 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); 1057 if (error) { 1058 deactivate_locked_super(s); 1059 return ERR_PTR(error); 1060 } 1061 s->s_flags |= MS_ACTIVE; 1062 } else { 1063 do_remount_sb(s, flags, data, 0); 1064 } 1065 return dget(s->s_root); 1066 } 1067 EXPORT_SYMBOL(mount_single); 1068 1069 struct dentry * 1070 mount_fs(struct file_system_type *type, int flags, const char *name, void *data) 1071 { 1072 struct dentry *root; 1073 struct super_block *sb; 1074 char *secdata = NULL; 1075 int error = -ENOMEM; 1076 1077 if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) { 1078 secdata = alloc_secdata(); 1079 if (!secdata) 1080 goto out; 1081 1082 error = security_sb_copy_data(data, secdata); 1083 if (error) 1084 goto out_free_secdata; 1085 } 1086 1087 root = type->mount(type, flags, name, data); 1088 if (IS_ERR(root)) { 1089 error = PTR_ERR(root); 1090 goto out_free_secdata; 1091 } 1092 sb = root->d_sb; 1093 BUG_ON(!sb); 1094 WARN_ON(!sb->s_bdi); 1095 WARN_ON(sb->s_bdi == &default_backing_dev_info); 1096 sb->s_flags |= MS_BORN; 1097 1098 error = security_sb_kern_mount(sb, flags, secdata); 1099 if (error) 1100 goto out_sb; 1101 1102 /* 1103 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE 1104 * but s_maxbytes was an unsigned long long for many releases. Throw 1105 * this warning for a little while to try and catch filesystems that 1106 * violate this rule. 1107 */ 1108 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to " 1109 "negative value (%lld)\n", type->name, sb->s_maxbytes); 1110 1111 up_write(&sb->s_umount); 1112 free_secdata(secdata); 1113 return root; 1114 out_sb: 1115 dput(root); 1116 deactivate_locked_super(sb); 1117 out_free_secdata: 1118 free_secdata(secdata); 1119 out: 1120 return ERR_PTR(error); 1121 } 1122 1123 /** 1124 * freeze_super - lock the filesystem and force it into a consistent state 1125 * @sb: the super to lock 1126 * 1127 * Syncs the super to make sure the filesystem is consistent and calls the fs's 1128 * freeze_fs. Subsequent calls to this without first thawing the fs will return 1129 * -EBUSY. 1130 */ 1131 int freeze_super(struct super_block *sb) 1132 { 1133 int ret; 1134 1135 atomic_inc(&sb->s_active); 1136 down_write(&sb->s_umount); 1137 if (sb->s_frozen) { 1138 deactivate_locked_super(sb); 1139 return -EBUSY; 1140 } 1141 1142 if (sb->s_flags & MS_RDONLY) { 1143 sb->s_frozen = SB_FREEZE_TRANS; 1144 smp_wmb(); 1145 up_write(&sb->s_umount); 1146 return 0; 1147 } 1148 1149 sb->s_frozen = SB_FREEZE_WRITE; 1150 smp_wmb(); 1151 1152 sync_filesystem(sb); 1153 1154 sb->s_frozen = SB_FREEZE_TRANS; 1155 smp_wmb(); 1156 1157 sync_blockdev(sb->s_bdev); 1158 if (sb->s_op->freeze_fs) { 1159 ret = sb->s_op->freeze_fs(sb); 1160 if (ret) { 1161 printk(KERN_ERR 1162 "VFS:Filesystem freeze failed\n"); 1163 sb->s_frozen = SB_UNFROZEN; 1164 deactivate_locked_super(sb); 1165 return ret; 1166 } 1167 } 1168 up_write(&sb->s_umount); 1169 return 0; 1170 } 1171 EXPORT_SYMBOL(freeze_super); 1172 1173 /** 1174 * thaw_super -- unlock filesystem 1175 * @sb: the super to thaw 1176 * 1177 * Unlocks the filesystem and marks it writeable again after freeze_super(). 1178 */ 1179 int thaw_super(struct super_block *sb) 1180 { 1181 int error; 1182 1183 down_write(&sb->s_umount); 1184 if (sb->s_frozen == SB_UNFROZEN) { 1185 up_write(&sb->s_umount); 1186 return -EINVAL; 1187 } 1188 1189 if (sb->s_flags & MS_RDONLY) 1190 goto out; 1191 1192 if (sb->s_op->unfreeze_fs) { 1193 error = sb->s_op->unfreeze_fs(sb); 1194 if (error) { 1195 printk(KERN_ERR 1196 "VFS:Filesystem thaw failed\n"); 1197 sb->s_frozen = SB_FREEZE_TRANS; 1198 up_write(&sb->s_umount); 1199 return error; 1200 } 1201 } 1202 1203 out: 1204 sb->s_frozen = SB_UNFROZEN; 1205 smp_wmb(); 1206 wake_up(&sb->s_wait_unfrozen); 1207 deactivate_locked_super(sb); 1208 1209 return 0; 1210 } 1211 EXPORT_SYMBOL(thaw_super); 1212