1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/blkdev.h> 20 #include <linux/module.h> 21 #include <linux/buffer_head.h> 22 #include <linux/fs.h> 23 #include <linux/pagemap.h> 24 #include <linux/highmem.h> 25 #include <linux/time.h> 26 #include <linux/init.h> 27 #include <linux/seq_file.h> 28 #include <linux/string.h> 29 #include <linux/backing-dev.h> 30 #include <linux/mount.h> 31 #include <linux/mpage.h> 32 #include <linux/swap.h> 33 #include <linux/writeback.h> 34 #include <linux/statfs.h> 35 #include <linux/compat.h> 36 #include <linux/parser.h> 37 #include <linux/ctype.h> 38 #include <linux/namei.h> 39 #include <linux/miscdevice.h> 40 #include <linux/magic.h> 41 #include <linux/slab.h> 42 #include <linux/cleancache.h> 43 #include <linux/ratelimit.h> 44 #include <linux/btrfs.h> 45 #include "delayed-inode.h" 46 #include "ctree.h" 47 #include "disk-io.h" 48 #include "transaction.h" 49 #include "btrfs_inode.h" 50 #include "print-tree.h" 51 #include "hash.h" 52 #include "props.h" 53 #include "xattr.h" 54 #include "volumes.h" 55 #include "export.h" 56 #include "compression.h" 57 #include "rcu-string.h" 58 #include "dev-replace.h" 59 #include "free-space-cache.h" 60 #include "backref.h" 61 #include "tests/btrfs-tests.h" 62 63 #include "qgroup.h" 64 #define CREATE_TRACE_POINTS 65 #include <trace/events/btrfs.h> 66 67 static const struct super_operations btrfs_super_ops; 68 static struct file_system_type btrfs_fs_type; 69 70 static int btrfs_remount(struct super_block *sb, int *flags, char *data); 71 72 const char *btrfs_decode_error(int errno) 73 { 74 char *errstr = "unknown"; 75 76 switch (errno) { 77 case -EIO: 78 errstr = "IO failure"; 79 break; 80 case -ENOMEM: 81 errstr = "Out of memory"; 82 break; 83 case -EROFS: 84 errstr = "Readonly filesystem"; 85 break; 86 case -EEXIST: 87 errstr = "Object already exists"; 88 break; 89 case -ENOSPC: 90 errstr = "No space left"; 91 break; 92 case -ENOENT: 93 errstr = "No such entry"; 94 break; 95 } 96 97 return errstr; 98 } 99 100 static void save_error_info(struct btrfs_fs_info *fs_info) 101 { 102 /* 103 * today we only save the error info into ram. Long term we'll 104 * also send it down to the disk 105 */ 106 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state); 107 } 108 109 /* btrfs handle error by forcing the filesystem readonly */ 110 static void btrfs_handle_error(struct btrfs_fs_info *fs_info) 111 { 112 struct super_block *sb = fs_info->sb; 113 114 if (sb->s_flags & MS_RDONLY) 115 return; 116 117 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 118 sb->s_flags |= MS_RDONLY; 119 btrfs_info(fs_info, "forced readonly"); 120 /* 121 * Note that a running device replace operation is not 122 * canceled here although there is no way to update 123 * the progress. It would add the risk of a deadlock, 124 * therefore the canceling is ommited. The only penalty 125 * is that some I/O remains active until the procedure 126 * completes. The next time when the filesystem is 127 * mounted writeable again, the device replace 128 * operation continues. 129 */ 130 } 131 } 132 133 #ifdef CONFIG_PRINTK 134 /* 135 * __btrfs_std_error decodes expected errors from the caller and 136 * invokes the approciate error response. 137 */ 138 __cold 139 void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, 140 unsigned int line, int errno, const char *fmt, ...) 141 { 142 struct super_block *sb = fs_info->sb; 143 const char *errstr; 144 145 /* 146 * Special case: if the error is EROFS, and we're already 147 * under MS_RDONLY, then it is safe here. 148 */ 149 if (errno == -EROFS && (sb->s_flags & MS_RDONLY)) 150 return; 151 152 errstr = btrfs_decode_error(errno); 153 if (fmt) { 154 struct va_format vaf; 155 va_list args; 156 157 va_start(args, fmt); 158 vaf.fmt = fmt; 159 vaf.va = &args; 160 161 printk(KERN_CRIT 162 "BTRFS: error (device %s) in %s:%d: errno=%d %s (%pV)\n", 163 sb->s_id, function, line, errno, errstr, &vaf); 164 va_end(args); 165 } else { 166 printk(KERN_CRIT "BTRFS: error (device %s) in %s:%d: errno=%d %s\n", 167 sb->s_id, function, line, errno, errstr); 168 } 169 170 /* Don't go through full error handling during mount */ 171 save_error_info(fs_info); 172 if (sb->s_flags & MS_BORN) 173 btrfs_handle_error(fs_info); 174 } 175 176 static const char * const logtypes[] = { 177 "emergency", 178 "alert", 179 "critical", 180 "error", 181 "warning", 182 "notice", 183 "info", 184 "debug", 185 }; 186 187 void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) 188 { 189 struct super_block *sb = fs_info->sb; 190 char lvl[4]; 191 struct va_format vaf; 192 va_list args; 193 const char *type = logtypes[4]; 194 int kern_level; 195 196 va_start(args, fmt); 197 198 kern_level = printk_get_level(fmt); 199 if (kern_level) { 200 size_t size = printk_skip_level(fmt) - fmt; 201 memcpy(lvl, fmt, size); 202 lvl[size] = '\0'; 203 fmt += size; 204 type = logtypes[kern_level - '0']; 205 } else 206 *lvl = '\0'; 207 208 vaf.fmt = fmt; 209 vaf.va = &args; 210 211 printk("%sBTRFS %s (device %s): %pV\n", lvl, type, sb->s_id, &vaf); 212 213 va_end(args); 214 } 215 216 #else 217 218 void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, 219 unsigned int line, int errno, const char *fmt, ...) 220 { 221 struct super_block *sb = fs_info->sb; 222 223 /* 224 * Special case: if the error is EROFS, and we're already 225 * under MS_RDONLY, then it is safe here. 226 */ 227 if (errno == -EROFS && (sb->s_flags & MS_RDONLY)) 228 return; 229 230 /* Don't go through full error handling during mount */ 231 if (sb->s_flags & MS_BORN) { 232 save_error_info(fs_info); 233 btrfs_handle_error(fs_info); 234 } 235 } 236 #endif 237 238 /* 239 * We only mark the transaction aborted and then set the file system read-only. 240 * This will prevent new transactions from starting or trying to join this 241 * one. 242 * 243 * This means that error recovery at the call site is limited to freeing 244 * any local memory allocations and passing the error code up without 245 * further cleanup. The transaction should complete as it normally would 246 * in the call path but will return -EIO. 247 * 248 * We'll complete the cleanup in btrfs_end_transaction and 249 * btrfs_commit_transaction. 250 */ 251 __cold 252 void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, 253 struct btrfs_root *root, const char *function, 254 unsigned int line, int errno) 255 { 256 trans->aborted = errno; 257 /* Nothing used. The other threads that have joined this 258 * transaction may be able to continue. */ 259 if (!trans->blocks_used && list_empty(&trans->new_bgs)) { 260 const char *errstr; 261 262 errstr = btrfs_decode_error(errno); 263 btrfs_warn(root->fs_info, 264 "%s:%d: Aborting unused transaction(%s).", 265 function, line, errstr); 266 return; 267 } 268 ACCESS_ONCE(trans->transaction->aborted) = errno; 269 /* Wake up anybody who may be waiting on this transaction */ 270 wake_up(&root->fs_info->transaction_wait); 271 wake_up(&root->fs_info->transaction_blocked_wait); 272 __btrfs_std_error(root->fs_info, function, line, errno, NULL); 273 } 274 /* 275 * __btrfs_panic decodes unexpected, fatal errors from the caller, 276 * issues an alert, and either panics or BUGs, depending on mount options. 277 */ 278 __cold 279 void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function, 280 unsigned int line, int errno, const char *fmt, ...) 281 { 282 char *s_id = "<unknown>"; 283 const char *errstr; 284 struct va_format vaf = { .fmt = fmt }; 285 va_list args; 286 287 if (fs_info) 288 s_id = fs_info->sb->s_id; 289 290 va_start(args, fmt); 291 vaf.va = &args; 292 293 errstr = btrfs_decode_error(errno); 294 if (fs_info && (fs_info->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR)) 295 panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (errno=%d %s)\n", 296 s_id, function, line, &vaf, errno, errstr); 297 298 btrfs_crit(fs_info, "panic in %s:%d: %pV (errno=%d %s)", 299 function, line, &vaf, errno, errstr); 300 va_end(args); 301 /* Caller calls BUG() */ 302 } 303 304 static void btrfs_put_super(struct super_block *sb) 305 { 306 close_ctree(btrfs_sb(sb)->tree_root); 307 } 308 309 enum { 310 Opt_degraded, Opt_subvol, Opt_subvolid, Opt_device, Opt_nodatasum, 311 Opt_nodatacow, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, Opt_ssd, 312 Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress, 313 Opt_compress_type, Opt_compress_force, Opt_compress_force_type, 314 Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, 315 Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, 316 Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_inode_cache, 317 Opt_no_space_cache, Opt_recovery, Opt_skip_balance, 318 Opt_check_integrity, Opt_check_integrity_including_extent_data, 319 Opt_check_integrity_print_mask, Opt_fatal_errors, Opt_rescan_uuid_tree, 320 Opt_commit_interval, Opt_barrier, Opt_nodefrag, Opt_nodiscard, 321 Opt_noenospc_debug, Opt_noflushoncommit, Opt_acl, Opt_datacow, 322 Opt_datasum, Opt_treelog, Opt_noinode_cache, 323 Opt_err, 324 }; 325 326 static match_table_t tokens = { 327 {Opt_degraded, "degraded"}, 328 {Opt_subvol, "subvol=%s"}, 329 {Opt_subvolid, "subvolid=%s"}, 330 {Opt_device, "device=%s"}, 331 {Opt_nodatasum, "nodatasum"}, 332 {Opt_datasum, "datasum"}, 333 {Opt_nodatacow, "nodatacow"}, 334 {Opt_datacow, "datacow"}, 335 {Opt_nobarrier, "nobarrier"}, 336 {Opt_barrier, "barrier"}, 337 {Opt_max_inline, "max_inline=%s"}, 338 {Opt_alloc_start, "alloc_start=%s"}, 339 {Opt_thread_pool, "thread_pool=%d"}, 340 {Opt_compress, "compress"}, 341 {Opt_compress_type, "compress=%s"}, 342 {Opt_compress_force, "compress-force"}, 343 {Opt_compress_force_type, "compress-force=%s"}, 344 {Opt_ssd, "ssd"}, 345 {Opt_ssd_spread, "ssd_spread"}, 346 {Opt_nossd, "nossd"}, 347 {Opt_acl, "acl"}, 348 {Opt_noacl, "noacl"}, 349 {Opt_notreelog, "notreelog"}, 350 {Opt_treelog, "treelog"}, 351 {Opt_flushoncommit, "flushoncommit"}, 352 {Opt_noflushoncommit, "noflushoncommit"}, 353 {Opt_ratio, "metadata_ratio=%d"}, 354 {Opt_discard, "discard"}, 355 {Opt_nodiscard, "nodiscard"}, 356 {Opt_space_cache, "space_cache"}, 357 {Opt_clear_cache, "clear_cache"}, 358 {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, 359 {Opt_enospc_debug, "enospc_debug"}, 360 {Opt_noenospc_debug, "noenospc_debug"}, 361 {Opt_subvolrootid, "subvolrootid=%d"}, 362 {Opt_defrag, "autodefrag"}, 363 {Opt_nodefrag, "noautodefrag"}, 364 {Opt_inode_cache, "inode_cache"}, 365 {Opt_noinode_cache, "noinode_cache"}, 366 {Opt_no_space_cache, "nospace_cache"}, 367 {Opt_recovery, "recovery"}, 368 {Opt_skip_balance, "skip_balance"}, 369 {Opt_check_integrity, "check_int"}, 370 {Opt_check_integrity_including_extent_data, "check_int_data"}, 371 {Opt_check_integrity_print_mask, "check_int_print_mask=%d"}, 372 {Opt_rescan_uuid_tree, "rescan_uuid_tree"}, 373 {Opt_fatal_errors, "fatal_errors=%s"}, 374 {Opt_commit_interval, "commit=%d"}, 375 {Opt_err, NULL}, 376 }; 377 378 /* 379 * Regular mount options parser. Everything that is needed only when 380 * reading in a new superblock is parsed here. 381 * XXX JDM: This needs to be cleaned up for remount. 382 */ 383 int btrfs_parse_options(struct btrfs_root *root, char *options) 384 { 385 struct btrfs_fs_info *info = root->fs_info; 386 substring_t args[MAX_OPT_ARGS]; 387 char *p, *num, *orig = NULL; 388 u64 cache_gen; 389 int intarg; 390 int ret = 0; 391 char *compress_type; 392 bool compress_force = false; 393 394 cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy); 395 if (cache_gen) 396 btrfs_set_opt(info->mount_opt, SPACE_CACHE); 397 398 if (!options) 399 goto out; 400 401 /* 402 * strsep changes the string, duplicate it because parse_options 403 * gets called twice 404 */ 405 options = kstrdup(options, GFP_NOFS); 406 if (!options) 407 return -ENOMEM; 408 409 orig = options; 410 411 while ((p = strsep(&options, ",")) != NULL) { 412 int token; 413 if (!*p) 414 continue; 415 416 token = match_token(p, tokens, args); 417 switch (token) { 418 case Opt_degraded: 419 btrfs_info(root->fs_info, "allowing degraded mounts"); 420 btrfs_set_opt(info->mount_opt, DEGRADED); 421 break; 422 case Opt_subvol: 423 case Opt_subvolid: 424 case Opt_subvolrootid: 425 case Opt_device: 426 /* 427 * These are parsed by btrfs_parse_early_options 428 * and can be happily ignored here. 429 */ 430 break; 431 case Opt_nodatasum: 432 btrfs_set_and_info(root, NODATASUM, 433 "setting nodatasum"); 434 break; 435 case Opt_datasum: 436 if (btrfs_test_opt(root, NODATASUM)) { 437 if (btrfs_test_opt(root, NODATACOW)) 438 btrfs_info(root->fs_info, "setting datasum, datacow enabled"); 439 else 440 btrfs_info(root->fs_info, "setting datasum"); 441 } 442 btrfs_clear_opt(info->mount_opt, NODATACOW); 443 btrfs_clear_opt(info->mount_opt, NODATASUM); 444 break; 445 case Opt_nodatacow: 446 if (!btrfs_test_opt(root, NODATACOW)) { 447 if (!btrfs_test_opt(root, COMPRESS) || 448 !btrfs_test_opt(root, FORCE_COMPRESS)) { 449 btrfs_info(root->fs_info, 450 "setting nodatacow, compression disabled"); 451 } else { 452 btrfs_info(root->fs_info, "setting nodatacow"); 453 } 454 } 455 btrfs_clear_opt(info->mount_opt, COMPRESS); 456 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); 457 btrfs_set_opt(info->mount_opt, NODATACOW); 458 btrfs_set_opt(info->mount_opt, NODATASUM); 459 break; 460 case Opt_datacow: 461 btrfs_clear_and_info(root, NODATACOW, 462 "setting datacow"); 463 break; 464 case Opt_compress_force: 465 case Opt_compress_force_type: 466 compress_force = true; 467 /* Fallthrough */ 468 case Opt_compress: 469 case Opt_compress_type: 470 if (token == Opt_compress || 471 token == Opt_compress_force || 472 strcmp(args[0].from, "zlib") == 0) { 473 compress_type = "zlib"; 474 info->compress_type = BTRFS_COMPRESS_ZLIB; 475 btrfs_set_opt(info->mount_opt, COMPRESS); 476 btrfs_clear_opt(info->mount_opt, NODATACOW); 477 btrfs_clear_opt(info->mount_opt, NODATASUM); 478 } else if (strcmp(args[0].from, "lzo") == 0) { 479 compress_type = "lzo"; 480 info->compress_type = BTRFS_COMPRESS_LZO; 481 btrfs_set_opt(info->mount_opt, COMPRESS); 482 btrfs_clear_opt(info->mount_opt, NODATACOW); 483 btrfs_clear_opt(info->mount_opt, NODATASUM); 484 btrfs_set_fs_incompat(info, COMPRESS_LZO); 485 } else if (strncmp(args[0].from, "no", 2) == 0) { 486 compress_type = "no"; 487 btrfs_clear_opt(info->mount_opt, COMPRESS); 488 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); 489 compress_force = false; 490 } else { 491 ret = -EINVAL; 492 goto out; 493 } 494 495 if (compress_force) { 496 btrfs_set_and_info(root, FORCE_COMPRESS, 497 "force %s compression", 498 compress_type); 499 } else { 500 if (!btrfs_test_opt(root, COMPRESS)) 501 btrfs_info(root->fs_info, 502 "btrfs: use %s compression", 503 compress_type); 504 /* 505 * If we remount from compress-force=xxx to 506 * compress=xxx, we need clear FORCE_COMPRESS 507 * flag, otherwise, there is no way for users 508 * to disable forcible compression separately. 509 */ 510 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); 511 } 512 break; 513 case Opt_ssd: 514 btrfs_set_and_info(root, SSD, 515 "use ssd allocation scheme"); 516 break; 517 case Opt_ssd_spread: 518 btrfs_set_and_info(root, SSD_SPREAD, 519 "use spread ssd allocation scheme"); 520 btrfs_set_opt(info->mount_opt, SSD); 521 break; 522 case Opt_nossd: 523 btrfs_set_and_info(root, NOSSD, 524 "not using ssd allocation scheme"); 525 btrfs_clear_opt(info->mount_opt, SSD); 526 break; 527 case Opt_barrier: 528 btrfs_clear_and_info(root, NOBARRIER, 529 "turning on barriers"); 530 break; 531 case Opt_nobarrier: 532 btrfs_set_and_info(root, NOBARRIER, 533 "turning off barriers"); 534 break; 535 case Opt_thread_pool: 536 ret = match_int(&args[0], &intarg); 537 if (ret) { 538 goto out; 539 } else if (intarg > 0) { 540 info->thread_pool_size = intarg; 541 } else { 542 ret = -EINVAL; 543 goto out; 544 } 545 break; 546 case Opt_max_inline: 547 num = match_strdup(&args[0]); 548 if (num) { 549 info->max_inline = memparse(num, NULL); 550 kfree(num); 551 552 if (info->max_inline) { 553 info->max_inline = min_t(u64, 554 info->max_inline, 555 root->sectorsize); 556 } 557 btrfs_info(root->fs_info, "max_inline at %llu", 558 info->max_inline); 559 } else { 560 ret = -ENOMEM; 561 goto out; 562 } 563 break; 564 case Opt_alloc_start: 565 num = match_strdup(&args[0]); 566 if (num) { 567 mutex_lock(&info->chunk_mutex); 568 info->alloc_start = memparse(num, NULL); 569 mutex_unlock(&info->chunk_mutex); 570 kfree(num); 571 btrfs_info(root->fs_info, "allocations start at %llu", 572 info->alloc_start); 573 } else { 574 ret = -ENOMEM; 575 goto out; 576 } 577 break; 578 case Opt_acl: 579 #ifdef CONFIG_BTRFS_FS_POSIX_ACL 580 root->fs_info->sb->s_flags |= MS_POSIXACL; 581 break; 582 #else 583 btrfs_err(root->fs_info, 584 "support for ACL not compiled in!"); 585 ret = -EINVAL; 586 goto out; 587 #endif 588 case Opt_noacl: 589 root->fs_info->sb->s_flags &= ~MS_POSIXACL; 590 break; 591 case Opt_notreelog: 592 btrfs_set_and_info(root, NOTREELOG, 593 "disabling tree log"); 594 break; 595 case Opt_treelog: 596 btrfs_clear_and_info(root, NOTREELOG, 597 "enabling tree log"); 598 break; 599 case Opt_flushoncommit: 600 btrfs_set_and_info(root, FLUSHONCOMMIT, 601 "turning on flush-on-commit"); 602 break; 603 case Opt_noflushoncommit: 604 btrfs_clear_and_info(root, FLUSHONCOMMIT, 605 "turning off flush-on-commit"); 606 break; 607 case Opt_ratio: 608 ret = match_int(&args[0], &intarg); 609 if (ret) { 610 goto out; 611 } else if (intarg >= 0) { 612 info->metadata_ratio = intarg; 613 btrfs_info(root->fs_info, "metadata ratio %d", 614 info->metadata_ratio); 615 } else { 616 ret = -EINVAL; 617 goto out; 618 } 619 break; 620 case Opt_discard: 621 btrfs_set_and_info(root, DISCARD, 622 "turning on discard"); 623 break; 624 case Opt_nodiscard: 625 btrfs_clear_and_info(root, DISCARD, 626 "turning off discard"); 627 break; 628 case Opt_space_cache: 629 btrfs_set_and_info(root, SPACE_CACHE, 630 "enabling disk space caching"); 631 break; 632 case Opt_rescan_uuid_tree: 633 btrfs_set_opt(info->mount_opt, RESCAN_UUID_TREE); 634 break; 635 case Opt_no_space_cache: 636 btrfs_clear_and_info(root, SPACE_CACHE, 637 "disabling disk space caching"); 638 break; 639 case Opt_inode_cache: 640 btrfs_set_pending_and_info(info, INODE_MAP_CACHE, 641 "enabling inode map caching"); 642 break; 643 case Opt_noinode_cache: 644 btrfs_clear_pending_and_info(info, INODE_MAP_CACHE, 645 "disabling inode map caching"); 646 break; 647 case Opt_clear_cache: 648 btrfs_set_and_info(root, CLEAR_CACHE, 649 "force clearing of disk cache"); 650 break; 651 case Opt_user_subvol_rm_allowed: 652 btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED); 653 break; 654 case Opt_enospc_debug: 655 btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG); 656 break; 657 case Opt_noenospc_debug: 658 btrfs_clear_opt(info->mount_opt, ENOSPC_DEBUG); 659 break; 660 case Opt_defrag: 661 btrfs_set_and_info(root, AUTO_DEFRAG, 662 "enabling auto defrag"); 663 break; 664 case Opt_nodefrag: 665 btrfs_clear_and_info(root, AUTO_DEFRAG, 666 "disabling auto defrag"); 667 break; 668 case Opt_recovery: 669 btrfs_info(root->fs_info, "enabling auto recovery"); 670 btrfs_set_opt(info->mount_opt, RECOVERY); 671 break; 672 case Opt_skip_balance: 673 btrfs_set_opt(info->mount_opt, SKIP_BALANCE); 674 break; 675 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 676 case Opt_check_integrity_including_extent_data: 677 btrfs_info(root->fs_info, 678 "enabling check integrity including extent data"); 679 btrfs_set_opt(info->mount_opt, 680 CHECK_INTEGRITY_INCLUDING_EXTENT_DATA); 681 btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY); 682 break; 683 case Opt_check_integrity: 684 btrfs_info(root->fs_info, "enabling check integrity"); 685 btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY); 686 break; 687 case Opt_check_integrity_print_mask: 688 ret = match_int(&args[0], &intarg); 689 if (ret) { 690 goto out; 691 } else if (intarg >= 0) { 692 info->check_integrity_print_mask = intarg; 693 btrfs_info(root->fs_info, "check_integrity_print_mask 0x%x", 694 info->check_integrity_print_mask); 695 } else { 696 ret = -EINVAL; 697 goto out; 698 } 699 break; 700 #else 701 case Opt_check_integrity_including_extent_data: 702 case Opt_check_integrity: 703 case Opt_check_integrity_print_mask: 704 btrfs_err(root->fs_info, 705 "support for check_integrity* not compiled in!"); 706 ret = -EINVAL; 707 goto out; 708 #endif 709 case Opt_fatal_errors: 710 if (strcmp(args[0].from, "panic") == 0) 711 btrfs_set_opt(info->mount_opt, 712 PANIC_ON_FATAL_ERROR); 713 else if (strcmp(args[0].from, "bug") == 0) 714 btrfs_clear_opt(info->mount_opt, 715 PANIC_ON_FATAL_ERROR); 716 else { 717 ret = -EINVAL; 718 goto out; 719 } 720 break; 721 case Opt_commit_interval: 722 intarg = 0; 723 ret = match_int(&args[0], &intarg); 724 if (ret < 0) { 725 btrfs_err(root->fs_info, "invalid commit interval"); 726 ret = -EINVAL; 727 goto out; 728 } 729 if (intarg > 0) { 730 if (intarg > 300) { 731 btrfs_warn(root->fs_info, "excessive commit interval %d", 732 intarg); 733 } 734 info->commit_interval = intarg; 735 } else { 736 btrfs_info(root->fs_info, "using default commit interval %ds", 737 BTRFS_DEFAULT_COMMIT_INTERVAL); 738 info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; 739 } 740 break; 741 case Opt_err: 742 btrfs_info(root->fs_info, "unrecognized mount option '%s'", p); 743 ret = -EINVAL; 744 goto out; 745 default: 746 break; 747 } 748 } 749 out: 750 if (!ret && btrfs_test_opt(root, SPACE_CACHE)) 751 btrfs_info(root->fs_info, "disk space caching is enabled"); 752 kfree(orig); 753 return ret; 754 } 755 756 /* 757 * Parse mount options that are required early in the mount process. 758 * 759 * All other options will be parsed on much later in the mount process and 760 * only when we need to allocate a new super block. 761 */ 762 static int btrfs_parse_early_options(const char *options, fmode_t flags, 763 void *holder, char **subvol_name, u64 *subvol_objectid, 764 struct btrfs_fs_devices **fs_devices) 765 { 766 substring_t args[MAX_OPT_ARGS]; 767 char *device_name, *opts, *orig, *p; 768 char *num = NULL; 769 int error = 0; 770 771 if (!options) 772 return 0; 773 774 /* 775 * strsep changes the string, duplicate it because parse_options 776 * gets called twice 777 */ 778 opts = kstrdup(options, GFP_KERNEL); 779 if (!opts) 780 return -ENOMEM; 781 orig = opts; 782 783 while ((p = strsep(&opts, ",")) != NULL) { 784 int token; 785 if (!*p) 786 continue; 787 788 token = match_token(p, tokens, args); 789 switch (token) { 790 case Opt_subvol: 791 kfree(*subvol_name); 792 *subvol_name = match_strdup(&args[0]); 793 if (!*subvol_name) { 794 error = -ENOMEM; 795 goto out; 796 } 797 break; 798 case Opt_subvolid: 799 num = match_strdup(&args[0]); 800 if (num) { 801 *subvol_objectid = memparse(num, NULL); 802 kfree(num); 803 /* we want the original fs_tree */ 804 if (!*subvol_objectid) 805 *subvol_objectid = 806 BTRFS_FS_TREE_OBJECTID; 807 } else { 808 error = -EINVAL; 809 goto out; 810 } 811 break; 812 case Opt_subvolrootid: 813 printk(KERN_WARNING 814 "BTRFS: 'subvolrootid' mount option is deprecated and has " 815 "no effect\n"); 816 break; 817 case Opt_device: 818 device_name = match_strdup(&args[0]); 819 if (!device_name) { 820 error = -ENOMEM; 821 goto out; 822 } 823 error = btrfs_scan_one_device(device_name, 824 flags, holder, fs_devices); 825 kfree(device_name); 826 if (error) 827 goto out; 828 break; 829 default: 830 break; 831 } 832 } 833 834 out: 835 kfree(orig); 836 return error; 837 } 838 839 static char *get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, 840 u64 subvol_objectid) 841 { 842 struct btrfs_root *root = fs_info->tree_root; 843 struct btrfs_root *fs_root; 844 struct btrfs_root_ref *root_ref; 845 struct btrfs_inode_ref *inode_ref; 846 struct btrfs_key key; 847 struct btrfs_path *path = NULL; 848 char *name = NULL, *ptr; 849 u64 dirid; 850 int len; 851 int ret; 852 853 path = btrfs_alloc_path(); 854 if (!path) { 855 ret = -ENOMEM; 856 goto err; 857 } 858 path->leave_spinning = 1; 859 860 name = kmalloc(PATH_MAX, GFP_NOFS); 861 if (!name) { 862 ret = -ENOMEM; 863 goto err; 864 } 865 ptr = name + PATH_MAX - 1; 866 ptr[0] = '\0'; 867 868 /* 869 * Walk up the subvolume trees in the tree of tree roots by root 870 * backrefs until we hit the top-level subvolume. 871 */ 872 while (subvol_objectid != BTRFS_FS_TREE_OBJECTID) { 873 key.objectid = subvol_objectid; 874 key.type = BTRFS_ROOT_BACKREF_KEY; 875 key.offset = (u64)-1; 876 877 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 878 if (ret < 0) { 879 goto err; 880 } else if (ret > 0) { 881 ret = btrfs_previous_item(root, path, subvol_objectid, 882 BTRFS_ROOT_BACKREF_KEY); 883 if (ret < 0) { 884 goto err; 885 } else if (ret > 0) { 886 ret = -ENOENT; 887 goto err; 888 } 889 } 890 891 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 892 subvol_objectid = key.offset; 893 894 root_ref = btrfs_item_ptr(path->nodes[0], path->slots[0], 895 struct btrfs_root_ref); 896 len = btrfs_root_ref_name_len(path->nodes[0], root_ref); 897 ptr -= len + 1; 898 if (ptr < name) { 899 ret = -ENAMETOOLONG; 900 goto err; 901 } 902 read_extent_buffer(path->nodes[0], ptr + 1, 903 (unsigned long)(root_ref + 1), len); 904 ptr[0] = '/'; 905 dirid = btrfs_root_ref_dirid(path->nodes[0], root_ref); 906 btrfs_release_path(path); 907 908 key.objectid = subvol_objectid; 909 key.type = BTRFS_ROOT_ITEM_KEY; 910 key.offset = (u64)-1; 911 fs_root = btrfs_read_fs_root_no_name(fs_info, &key); 912 if (IS_ERR(fs_root)) { 913 ret = PTR_ERR(fs_root); 914 goto err; 915 } 916 917 /* 918 * Walk up the filesystem tree by inode refs until we hit the 919 * root directory. 920 */ 921 while (dirid != BTRFS_FIRST_FREE_OBJECTID) { 922 key.objectid = dirid; 923 key.type = BTRFS_INODE_REF_KEY; 924 key.offset = (u64)-1; 925 926 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); 927 if (ret < 0) { 928 goto err; 929 } else if (ret > 0) { 930 ret = btrfs_previous_item(fs_root, path, dirid, 931 BTRFS_INODE_REF_KEY); 932 if (ret < 0) { 933 goto err; 934 } else if (ret > 0) { 935 ret = -ENOENT; 936 goto err; 937 } 938 } 939 940 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 941 dirid = key.offset; 942 943 inode_ref = btrfs_item_ptr(path->nodes[0], 944 path->slots[0], 945 struct btrfs_inode_ref); 946 len = btrfs_inode_ref_name_len(path->nodes[0], 947 inode_ref); 948 ptr -= len + 1; 949 if (ptr < name) { 950 ret = -ENAMETOOLONG; 951 goto err; 952 } 953 read_extent_buffer(path->nodes[0], ptr + 1, 954 (unsigned long)(inode_ref + 1), len); 955 ptr[0] = '/'; 956 btrfs_release_path(path); 957 } 958 } 959 960 btrfs_free_path(path); 961 if (ptr == name + PATH_MAX - 1) { 962 name[0] = '/'; 963 name[1] = '\0'; 964 } else { 965 memmove(name, ptr, name + PATH_MAX - ptr); 966 } 967 return name; 968 969 err: 970 btrfs_free_path(path); 971 kfree(name); 972 return ERR_PTR(ret); 973 } 974 975 static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objectid) 976 { 977 struct btrfs_root *root = fs_info->tree_root; 978 struct btrfs_dir_item *di; 979 struct btrfs_path *path; 980 struct btrfs_key location; 981 u64 dir_id; 982 983 path = btrfs_alloc_path(); 984 if (!path) 985 return -ENOMEM; 986 path->leave_spinning = 1; 987 988 /* 989 * Find the "default" dir item which points to the root item that we 990 * will mount by default if we haven't been given a specific subvolume 991 * to mount. 992 */ 993 dir_id = btrfs_super_root_dir(fs_info->super_copy); 994 di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0); 995 if (IS_ERR(di)) { 996 btrfs_free_path(path); 997 return PTR_ERR(di); 998 } 999 if (!di) { 1000 /* 1001 * Ok the default dir item isn't there. This is weird since 1002 * it's always been there, but don't freak out, just try and 1003 * mount the top-level subvolume. 1004 */ 1005 btrfs_free_path(path); 1006 *objectid = BTRFS_FS_TREE_OBJECTID; 1007 return 0; 1008 } 1009 1010 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 1011 btrfs_free_path(path); 1012 *objectid = location.objectid; 1013 return 0; 1014 } 1015 1016 static int btrfs_fill_super(struct super_block *sb, 1017 struct btrfs_fs_devices *fs_devices, 1018 void *data, int silent) 1019 { 1020 struct inode *inode; 1021 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 1022 struct btrfs_key key; 1023 int err; 1024 1025 sb->s_maxbytes = MAX_LFS_FILESIZE; 1026 sb->s_magic = BTRFS_SUPER_MAGIC; 1027 sb->s_op = &btrfs_super_ops; 1028 sb->s_d_op = &btrfs_dentry_operations; 1029 sb->s_export_op = &btrfs_export_ops; 1030 sb->s_xattr = btrfs_xattr_handlers; 1031 sb->s_time_gran = 1; 1032 #ifdef CONFIG_BTRFS_FS_POSIX_ACL 1033 sb->s_flags |= MS_POSIXACL; 1034 #endif 1035 sb->s_flags |= MS_I_VERSION; 1036 sb->s_iflags |= SB_I_CGROUPWB; 1037 err = open_ctree(sb, fs_devices, (char *)data); 1038 if (err) { 1039 printk(KERN_ERR "BTRFS: open_ctree failed\n"); 1040 return err; 1041 } 1042 1043 key.objectid = BTRFS_FIRST_FREE_OBJECTID; 1044 key.type = BTRFS_INODE_ITEM_KEY; 1045 key.offset = 0; 1046 inode = btrfs_iget(sb, &key, fs_info->fs_root, NULL); 1047 if (IS_ERR(inode)) { 1048 err = PTR_ERR(inode); 1049 goto fail_close; 1050 } 1051 1052 sb->s_root = d_make_root(inode); 1053 if (!sb->s_root) { 1054 err = -ENOMEM; 1055 goto fail_close; 1056 } 1057 1058 save_mount_options(sb, data); 1059 cleancache_init_fs(sb); 1060 sb->s_flags |= MS_ACTIVE; 1061 return 0; 1062 1063 fail_close: 1064 close_ctree(fs_info->tree_root); 1065 return err; 1066 } 1067 1068 int btrfs_sync_fs(struct super_block *sb, int wait) 1069 { 1070 struct btrfs_trans_handle *trans; 1071 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 1072 struct btrfs_root *root = fs_info->tree_root; 1073 1074 trace_btrfs_sync_fs(wait); 1075 1076 if (!wait) { 1077 filemap_flush(fs_info->btree_inode->i_mapping); 1078 return 0; 1079 } 1080 1081 btrfs_wait_ordered_roots(fs_info, -1); 1082 1083 trans = btrfs_attach_transaction_barrier(root); 1084 if (IS_ERR(trans)) { 1085 /* no transaction, don't bother */ 1086 if (PTR_ERR(trans) == -ENOENT) { 1087 /* 1088 * Exit unless we have some pending changes 1089 * that need to go through commit 1090 */ 1091 if (fs_info->pending_changes == 0) 1092 return 0; 1093 /* 1094 * A non-blocking test if the fs is frozen. We must not 1095 * start a new transaction here otherwise a deadlock 1096 * happens. The pending operations are delayed to the 1097 * next commit after thawing. 1098 */ 1099 if (__sb_start_write(sb, SB_FREEZE_WRITE, false)) 1100 __sb_end_write(sb, SB_FREEZE_WRITE); 1101 else 1102 return 0; 1103 trans = btrfs_start_transaction(root, 0); 1104 } 1105 if (IS_ERR(trans)) 1106 return PTR_ERR(trans); 1107 } 1108 return btrfs_commit_transaction(trans, root); 1109 } 1110 1111 static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) 1112 { 1113 struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb); 1114 struct btrfs_root *root = info->tree_root; 1115 char *compress_type; 1116 1117 if (btrfs_test_opt(root, DEGRADED)) 1118 seq_puts(seq, ",degraded"); 1119 if (btrfs_test_opt(root, NODATASUM)) 1120 seq_puts(seq, ",nodatasum"); 1121 if (btrfs_test_opt(root, NODATACOW)) 1122 seq_puts(seq, ",nodatacow"); 1123 if (btrfs_test_opt(root, NOBARRIER)) 1124 seq_puts(seq, ",nobarrier"); 1125 if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE) 1126 seq_printf(seq, ",max_inline=%llu", info->max_inline); 1127 if (info->alloc_start != 0) 1128 seq_printf(seq, ",alloc_start=%llu", info->alloc_start); 1129 if (info->thread_pool_size != min_t(unsigned long, 1130 num_online_cpus() + 2, 8)) 1131 seq_printf(seq, ",thread_pool=%d", info->thread_pool_size); 1132 if (btrfs_test_opt(root, COMPRESS)) { 1133 if (info->compress_type == BTRFS_COMPRESS_ZLIB) 1134 compress_type = "zlib"; 1135 else 1136 compress_type = "lzo"; 1137 if (btrfs_test_opt(root, FORCE_COMPRESS)) 1138 seq_printf(seq, ",compress-force=%s", compress_type); 1139 else 1140 seq_printf(seq, ",compress=%s", compress_type); 1141 } 1142 if (btrfs_test_opt(root, NOSSD)) 1143 seq_puts(seq, ",nossd"); 1144 if (btrfs_test_opt(root, SSD_SPREAD)) 1145 seq_puts(seq, ",ssd_spread"); 1146 else if (btrfs_test_opt(root, SSD)) 1147 seq_puts(seq, ",ssd"); 1148 if (btrfs_test_opt(root, NOTREELOG)) 1149 seq_puts(seq, ",notreelog"); 1150 if (btrfs_test_opt(root, FLUSHONCOMMIT)) 1151 seq_puts(seq, ",flushoncommit"); 1152 if (btrfs_test_opt(root, DISCARD)) 1153 seq_puts(seq, ",discard"); 1154 if (!(root->fs_info->sb->s_flags & MS_POSIXACL)) 1155 seq_puts(seq, ",noacl"); 1156 if (btrfs_test_opt(root, SPACE_CACHE)) 1157 seq_puts(seq, ",space_cache"); 1158 else 1159 seq_puts(seq, ",nospace_cache"); 1160 if (btrfs_test_opt(root, RESCAN_UUID_TREE)) 1161 seq_puts(seq, ",rescan_uuid_tree"); 1162 if (btrfs_test_opt(root, CLEAR_CACHE)) 1163 seq_puts(seq, ",clear_cache"); 1164 if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED)) 1165 seq_puts(seq, ",user_subvol_rm_allowed"); 1166 if (btrfs_test_opt(root, ENOSPC_DEBUG)) 1167 seq_puts(seq, ",enospc_debug"); 1168 if (btrfs_test_opt(root, AUTO_DEFRAG)) 1169 seq_puts(seq, ",autodefrag"); 1170 if (btrfs_test_opt(root, INODE_MAP_CACHE)) 1171 seq_puts(seq, ",inode_cache"); 1172 if (btrfs_test_opt(root, SKIP_BALANCE)) 1173 seq_puts(seq, ",skip_balance"); 1174 if (btrfs_test_opt(root, RECOVERY)) 1175 seq_puts(seq, ",recovery"); 1176 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 1177 if (btrfs_test_opt(root, CHECK_INTEGRITY_INCLUDING_EXTENT_DATA)) 1178 seq_puts(seq, ",check_int_data"); 1179 else if (btrfs_test_opt(root, CHECK_INTEGRITY)) 1180 seq_puts(seq, ",check_int"); 1181 if (info->check_integrity_print_mask) 1182 seq_printf(seq, ",check_int_print_mask=%d", 1183 info->check_integrity_print_mask); 1184 #endif 1185 if (info->metadata_ratio) 1186 seq_printf(seq, ",metadata_ratio=%d", 1187 info->metadata_ratio); 1188 if (btrfs_test_opt(root, PANIC_ON_FATAL_ERROR)) 1189 seq_puts(seq, ",fatal_errors=panic"); 1190 if (info->commit_interval != BTRFS_DEFAULT_COMMIT_INTERVAL) 1191 seq_printf(seq, ",commit=%d", info->commit_interval); 1192 seq_printf(seq, ",subvolid=%llu", 1193 BTRFS_I(d_inode(dentry))->root->root_key.objectid); 1194 seq_puts(seq, ",subvol="); 1195 seq_dentry(seq, dentry, " \t\n\\"); 1196 return 0; 1197 } 1198 1199 static int btrfs_test_super(struct super_block *s, void *data) 1200 { 1201 struct btrfs_fs_info *p = data; 1202 struct btrfs_fs_info *fs_info = btrfs_sb(s); 1203 1204 return fs_info->fs_devices == p->fs_devices; 1205 } 1206 1207 static int btrfs_set_super(struct super_block *s, void *data) 1208 { 1209 int err = set_anon_super(s, data); 1210 if (!err) 1211 s->s_fs_info = data; 1212 return err; 1213 } 1214 1215 /* 1216 * subvolumes are identified by ino 256 1217 */ 1218 static inline int is_subvolume_inode(struct inode *inode) 1219 { 1220 if (inode && inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 1221 return 1; 1222 return 0; 1223 } 1224 1225 /* 1226 * This will add subvolid=0 to the argument string while removing any subvol= 1227 * and subvolid= arguments to make sure we get the top-level root for path 1228 * walking to the subvol we want. 1229 */ 1230 static char *setup_root_args(char *args) 1231 { 1232 char *buf, *dst, *sep; 1233 1234 if (!args) 1235 return kstrdup("subvolid=0", GFP_NOFS); 1236 1237 /* The worst case is that we add ",subvolid=0" to the end. */ 1238 buf = dst = kmalloc(strlen(args) + strlen(",subvolid=0") + 1, GFP_NOFS); 1239 if (!buf) 1240 return NULL; 1241 1242 while (1) { 1243 sep = strchrnul(args, ','); 1244 if (!strstarts(args, "subvol=") && 1245 !strstarts(args, "subvolid=")) { 1246 memcpy(dst, args, sep - args); 1247 dst += sep - args; 1248 *dst++ = ','; 1249 } 1250 if (*sep) 1251 args = sep + 1; 1252 else 1253 break; 1254 } 1255 strcpy(dst, "subvolid=0"); 1256 1257 return buf; 1258 } 1259 1260 static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid, 1261 int flags, const char *device_name, 1262 char *data) 1263 { 1264 struct dentry *root; 1265 struct vfsmount *mnt = NULL; 1266 char *newargs; 1267 int ret; 1268 1269 newargs = setup_root_args(data); 1270 if (!newargs) { 1271 root = ERR_PTR(-ENOMEM); 1272 goto out; 1273 } 1274 1275 mnt = vfs_kern_mount(&btrfs_fs_type, flags, device_name, newargs); 1276 if (PTR_ERR_OR_ZERO(mnt) == -EBUSY) { 1277 if (flags & MS_RDONLY) { 1278 mnt = vfs_kern_mount(&btrfs_fs_type, flags & ~MS_RDONLY, 1279 device_name, newargs); 1280 } else { 1281 mnt = vfs_kern_mount(&btrfs_fs_type, flags | MS_RDONLY, 1282 device_name, newargs); 1283 if (IS_ERR(mnt)) { 1284 root = ERR_CAST(mnt); 1285 mnt = NULL; 1286 goto out; 1287 } 1288 1289 down_write(&mnt->mnt_sb->s_umount); 1290 ret = btrfs_remount(mnt->mnt_sb, &flags, NULL); 1291 up_write(&mnt->mnt_sb->s_umount); 1292 if (ret < 0) { 1293 root = ERR_PTR(ret); 1294 goto out; 1295 } 1296 } 1297 } 1298 if (IS_ERR(mnt)) { 1299 root = ERR_CAST(mnt); 1300 mnt = NULL; 1301 goto out; 1302 } 1303 1304 if (!subvol_name) { 1305 if (!subvol_objectid) { 1306 ret = get_default_subvol_objectid(btrfs_sb(mnt->mnt_sb), 1307 &subvol_objectid); 1308 if (ret) { 1309 root = ERR_PTR(ret); 1310 goto out; 1311 } 1312 } 1313 subvol_name = get_subvol_name_from_objectid(btrfs_sb(mnt->mnt_sb), 1314 subvol_objectid); 1315 if (IS_ERR(subvol_name)) { 1316 root = ERR_CAST(subvol_name); 1317 subvol_name = NULL; 1318 goto out; 1319 } 1320 1321 } 1322 1323 root = mount_subtree(mnt, subvol_name); 1324 /* mount_subtree() drops our reference on the vfsmount. */ 1325 mnt = NULL; 1326 1327 if (!IS_ERR(root)) { 1328 struct super_block *s = root->d_sb; 1329 struct inode *root_inode = d_inode(root); 1330 u64 root_objectid = BTRFS_I(root_inode)->root->root_key.objectid; 1331 1332 ret = 0; 1333 if (!is_subvolume_inode(root_inode)) { 1334 pr_err("BTRFS: '%s' is not a valid subvolume\n", 1335 subvol_name); 1336 ret = -EINVAL; 1337 } 1338 if (subvol_objectid && root_objectid != subvol_objectid) { 1339 /* 1340 * This will also catch a race condition where a 1341 * subvolume which was passed by ID is renamed and 1342 * another subvolume is renamed over the old location. 1343 */ 1344 pr_err("BTRFS: subvol '%s' does not match subvolid %llu\n", 1345 subvol_name, subvol_objectid); 1346 ret = -EINVAL; 1347 } 1348 if (ret) { 1349 dput(root); 1350 root = ERR_PTR(ret); 1351 deactivate_locked_super(s); 1352 } 1353 } 1354 1355 out: 1356 mntput(mnt); 1357 kfree(newargs); 1358 kfree(subvol_name); 1359 return root; 1360 } 1361 1362 static int parse_security_options(char *orig_opts, 1363 struct security_mnt_opts *sec_opts) 1364 { 1365 char *secdata = NULL; 1366 int ret = 0; 1367 1368 secdata = alloc_secdata(); 1369 if (!secdata) 1370 return -ENOMEM; 1371 ret = security_sb_copy_data(orig_opts, secdata); 1372 if (ret) { 1373 free_secdata(secdata); 1374 return ret; 1375 } 1376 ret = security_sb_parse_opts_str(secdata, sec_opts); 1377 free_secdata(secdata); 1378 return ret; 1379 } 1380 1381 static int setup_security_options(struct btrfs_fs_info *fs_info, 1382 struct super_block *sb, 1383 struct security_mnt_opts *sec_opts) 1384 { 1385 int ret = 0; 1386 1387 /* 1388 * Call security_sb_set_mnt_opts() to check whether new sec_opts 1389 * is valid. 1390 */ 1391 ret = security_sb_set_mnt_opts(sb, sec_opts, 0, NULL); 1392 if (ret) 1393 return ret; 1394 1395 #ifdef CONFIG_SECURITY 1396 if (!fs_info->security_opts.num_mnt_opts) { 1397 /* first time security setup, copy sec_opts to fs_info */ 1398 memcpy(&fs_info->security_opts, sec_opts, sizeof(*sec_opts)); 1399 } else { 1400 /* 1401 * Since SELinux(the only one supports security_mnt_opts) does 1402 * NOT support changing context during remount/mount same sb, 1403 * This must be the same or part of the same security options, 1404 * just free it. 1405 */ 1406 security_free_mnt_opts(sec_opts); 1407 } 1408 #endif 1409 return ret; 1410 } 1411 1412 /* 1413 * Find a superblock for the given device / mount point. 1414 * 1415 * Note: This is based on get_sb_bdev from fs/super.c with a few additions 1416 * for multiple device setup. Make sure to keep it in sync. 1417 */ 1418 static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, 1419 const char *device_name, void *data) 1420 { 1421 struct block_device *bdev = NULL; 1422 struct super_block *s; 1423 struct btrfs_fs_devices *fs_devices = NULL; 1424 struct btrfs_fs_info *fs_info = NULL; 1425 struct security_mnt_opts new_sec_opts; 1426 fmode_t mode = FMODE_READ; 1427 char *subvol_name = NULL; 1428 u64 subvol_objectid = 0; 1429 int error = 0; 1430 1431 if (!(flags & MS_RDONLY)) 1432 mode |= FMODE_WRITE; 1433 1434 error = btrfs_parse_early_options(data, mode, fs_type, 1435 &subvol_name, &subvol_objectid, 1436 &fs_devices); 1437 if (error) { 1438 kfree(subvol_name); 1439 return ERR_PTR(error); 1440 } 1441 1442 if (subvol_name || subvol_objectid != BTRFS_FS_TREE_OBJECTID) { 1443 /* mount_subvol() will free subvol_name. */ 1444 return mount_subvol(subvol_name, subvol_objectid, flags, 1445 device_name, data); 1446 } 1447 1448 security_init_mnt_opts(&new_sec_opts); 1449 if (data) { 1450 error = parse_security_options(data, &new_sec_opts); 1451 if (error) 1452 return ERR_PTR(error); 1453 } 1454 1455 error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices); 1456 if (error) 1457 goto error_sec_opts; 1458 1459 /* 1460 * Setup a dummy root and fs_info for test/set super. This is because 1461 * we don't actually fill this stuff out until open_ctree, but we need 1462 * it for searching for existing supers, so this lets us do that and 1463 * then open_ctree will properly initialize everything later. 1464 */ 1465 fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_NOFS); 1466 if (!fs_info) { 1467 error = -ENOMEM; 1468 goto error_sec_opts; 1469 } 1470 1471 fs_info->fs_devices = fs_devices; 1472 1473 fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS); 1474 fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS); 1475 security_init_mnt_opts(&fs_info->security_opts); 1476 if (!fs_info->super_copy || !fs_info->super_for_commit) { 1477 error = -ENOMEM; 1478 goto error_fs_info; 1479 } 1480 1481 error = btrfs_open_devices(fs_devices, mode, fs_type); 1482 if (error) 1483 goto error_fs_info; 1484 1485 if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) { 1486 error = -EACCES; 1487 goto error_close_devices; 1488 } 1489 1490 bdev = fs_devices->latest_bdev; 1491 s = sget(fs_type, btrfs_test_super, btrfs_set_super, flags | MS_NOSEC, 1492 fs_info); 1493 if (IS_ERR(s)) { 1494 error = PTR_ERR(s); 1495 goto error_close_devices; 1496 } 1497 1498 if (s->s_root) { 1499 btrfs_close_devices(fs_devices); 1500 free_fs_info(fs_info); 1501 if ((flags ^ s->s_flags) & MS_RDONLY) 1502 error = -EBUSY; 1503 } else { 1504 char b[BDEVNAME_SIZE]; 1505 1506 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); 1507 btrfs_sb(s)->bdev_holder = fs_type; 1508 error = btrfs_fill_super(s, fs_devices, data, 1509 flags & MS_SILENT ? 1 : 0); 1510 } 1511 if (error) { 1512 deactivate_locked_super(s); 1513 goto error_sec_opts; 1514 } 1515 1516 fs_info = btrfs_sb(s); 1517 error = setup_security_options(fs_info, s, &new_sec_opts); 1518 if (error) { 1519 deactivate_locked_super(s); 1520 goto error_sec_opts; 1521 } 1522 1523 return dget(s->s_root); 1524 1525 error_close_devices: 1526 btrfs_close_devices(fs_devices); 1527 error_fs_info: 1528 free_fs_info(fs_info); 1529 error_sec_opts: 1530 security_free_mnt_opts(&new_sec_opts); 1531 return ERR_PTR(error); 1532 } 1533 1534 static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info, 1535 int new_pool_size, int old_pool_size) 1536 { 1537 if (new_pool_size == old_pool_size) 1538 return; 1539 1540 fs_info->thread_pool_size = new_pool_size; 1541 1542 btrfs_info(fs_info, "resize thread pool %d -> %d", 1543 old_pool_size, new_pool_size); 1544 1545 btrfs_workqueue_set_max(fs_info->workers, new_pool_size); 1546 btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size); 1547 btrfs_workqueue_set_max(fs_info->submit_workers, new_pool_size); 1548 btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size); 1549 btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size); 1550 btrfs_workqueue_set_max(fs_info->endio_meta_workers, new_pool_size); 1551 btrfs_workqueue_set_max(fs_info->endio_meta_write_workers, 1552 new_pool_size); 1553 btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size); 1554 btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size); 1555 btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size); 1556 btrfs_workqueue_set_max(fs_info->readahead_workers, new_pool_size); 1557 btrfs_workqueue_set_max(fs_info->scrub_wr_completion_workers, 1558 new_pool_size); 1559 } 1560 1561 static inline void btrfs_remount_prepare(struct btrfs_fs_info *fs_info) 1562 { 1563 set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); 1564 } 1565 1566 static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info, 1567 unsigned long old_opts, int flags) 1568 { 1569 if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) && 1570 (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || 1571 (flags & MS_RDONLY))) { 1572 /* wait for any defraggers to finish */ 1573 wait_event(fs_info->transaction_wait, 1574 (atomic_read(&fs_info->defrag_running) == 0)); 1575 if (flags & MS_RDONLY) 1576 sync_filesystem(fs_info->sb); 1577 } 1578 } 1579 1580 static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info, 1581 unsigned long old_opts) 1582 { 1583 /* 1584 * We need cleanup all defragable inodes if the autodefragment is 1585 * close or the fs is R/O. 1586 */ 1587 if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) && 1588 (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || 1589 (fs_info->sb->s_flags & MS_RDONLY))) { 1590 btrfs_cleanup_defrag_inodes(fs_info); 1591 } 1592 1593 clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); 1594 } 1595 1596 static int btrfs_remount(struct super_block *sb, int *flags, char *data) 1597 { 1598 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 1599 struct btrfs_root *root = fs_info->tree_root; 1600 unsigned old_flags = sb->s_flags; 1601 unsigned long old_opts = fs_info->mount_opt; 1602 unsigned long old_compress_type = fs_info->compress_type; 1603 u64 old_max_inline = fs_info->max_inline; 1604 u64 old_alloc_start = fs_info->alloc_start; 1605 int old_thread_pool_size = fs_info->thread_pool_size; 1606 unsigned int old_metadata_ratio = fs_info->metadata_ratio; 1607 int ret; 1608 1609 sync_filesystem(sb); 1610 btrfs_remount_prepare(fs_info); 1611 1612 if (data) { 1613 struct security_mnt_opts new_sec_opts; 1614 1615 security_init_mnt_opts(&new_sec_opts); 1616 ret = parse_security_options(data, &new_sec_opts); 1617 if (ret) 1618 goto restore; 1619 ret = setup_security_options(fs_info, sb, 1620 &new_sec_opts); 1621 if (ret) { 1622 security_free_mnt_opts(&new_sec_opts); 1623 goto restore; 1624 } 1625 } 1626 1627 ret = btrfs_parse_options(root, data); 1628 if (ret) { 1629 ret = -EINVAL; 1630 goto restore; 1631 } 1632 1633 btrfs_remount_begin(fs_info, old_opts, *flags); 1634 btrfs_resize_thread_pool(fs_info, 1635 fs_info->thread_pool_size, old_thread_pool_size); 1636 1637 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) 1638 goto out; 1639 1640 if (*flags & MS_RDONLY) { 1641 /* 1642 * this also happens on 'umount -rf' or on shutdown, when 1643 * the filesystem is busy. 1644 */ 1645 cancel_work_sync(&fs_info->async_reclaim_work); 1646 1647 /* wait for the uuid_scan task to finish */ 1648 down(&fs_info->uuid_tree_rescan_sem); 1649 /* avoid complains from lockdep et al. */ 1650 up(&fs_info->uuid_tree_rescan_sem); 1651 1652 sb->s_flags |= MS_RDONLY; 1653 1654 /* 1655 * Setting MS_RDONLY will put the cleaner thread to 1656 * sleep at the next loop if it's already active. 1657 * If it's already asleep, we'll leave unused block 1658 * groups on disk until we're mounted read-write again 1659 * unless we clean them up here. 1660 */ 1661 btrfs_delete_unused_bgs(fs_info); 1662 1663 btrfs_dev_replace_suspend_for_unmount(fs_info); 1664 btrfs_scrub_cancel(fs_info); 1665 btrfs_pause_balance(fs_info); 1666 1667 ret = btrfs_commit_super(root); 1668 if (ret) 1669 goto restore; 1670 } else { 1671 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) { 1672 btrfs_err(fs_info, 1673 "Remounting read-write after error is not allowed"); 1674 ret = -EINVAL; 1675 goto restore; 1676 } 1677 if (fs_info->fs_devices->rw_devices == 0) { 1678 ret = -EACCES; 1679 goto restore; 1680 } 1681 1682 if (fs_info->fs_devices->missing_devices > 1683 fs_info->num_tolerated_disk_barrier_failures && 1684 !(*flags & MS_RDONLY)) { 1685 btrfs_warn(fs_info, 1686 "too many missing devices, writeable remount is not allowed"); 1687 ret = -EACCES; 1688 goto restore; 1689 } 1690 1691 if (btrfs_super_log_root(fs_info->super_copy) != 0) { 1692 ret = -EINVAL; 1693 goto restore; 1694 } 1695 1696 ret = btrfs_cleanup_fs_roots(fs_info); 1697 if (ret) 1698 goto restore; 1699 1700 /* recover relocation */ 1701 mutex_lock(&fs_info->cleaner_mutex); 1702 ret = btrfs_recover_relocation(root); 1703 mutex_unlock(&fs_info->cleaner_mutex); 1704 if (ret) 1705 goto restore; 1706 1707 ret = btrfs_resume_balance_async(fs_info); 1708 if (ret) 1709 goto restore; 1710 1711 ret = btrfs_resume_dev_replace_async(fs_info); 1712 if (ret) { 1713 btrfs_warn(fs_info, "failed to resume dev_replace"); 1714 goto restore; 1715 } 1716 1717 if (!fs_info->uuid_root) { 1718 btrfs_info(fs_info, "creating UUID tree"); 1719 ret = btrfs_create_uuid_tree(fs_info); 1720 if (ret) { 1721 btrfs_warn(fs_info, "failed to create the UUID tree %d", ret); 1722 goto restore; 1723 } 1724 } 1725 sb->s_flags &= ~MS_RDONLY; 1726 } 1727 out: 1728 wake_up_process(fs_info->transaction_kthread); 1729 btrfs_remount_cleanup(fs_info, old_opts); 1730 return 0; 1731 1732 restore: 1733 /* We've hit an error - don't reset MS_RDONLY */ 1734 if (sb->s_flags & MS_RDONLY) 1735 old_flags |= MS_RDONLY; 1736 sb->s_flags = old_flags; 1737 fs_info->mount_opt = old_opts; 1738 fs_info->compress_type = old_compress_type; 1739 fs_info->max_inline = old_max_inline; 1740 mutex_lock(&fs_info->chunk_mutex); 1741 fs_info->alloc_start = old_alloc_start; 1742 mutex_unlock(&fs_info->chunk_mutex); 1743 btrfs_resize_thread_pool(fs_info, 1744 old_thread_pool_size, fs_info->thread_pool_size); 1745 fs_info->metadata_ratio = old_metadata_ratio; 1746 btrfs_remount_cleanup(fs_info, old_opts); 1747 return ret; 1748 } 1749 1750 /* Used to sort the devices by max_avail(descending sort) */ 1751 static int btrfs_cmp_device_free_bytes(const void *dev_info1, 1752 const void *dev_info2) 1753 { 1754 if (((struct btrfs_device_info *)dev_info1)->max_avail > 1755 ((struct btrfs_device_info *)dev_info2)->max_avail) 1756 return -1; 1757 else if (((struct btrfs_device_info *)dev_info1)->max_avail < 1758 ((struct btrfs_device_info *)dev_info2)->max_avail) 1759 return 1; 1760 else 1761 return 0; 1762 } 1763 1764 /* 1765 * sort the devices by max_avail, in which max free extent size of each device 1766 * is stored.(Descending Sort) 1767 */ 1768 static inline void btrfs_descending_sort_devices( 1769 struct btrfs_device_info *devices, 1770 size_t nr_devices) 1771 { 1772 sort(devices, nr_devices, sizeof(struct btrfs_device_info), 1773 btrfs_cmp_device_free_bytes, NULL); 1774 } 1775 1776 /* 1777 * The helper to calc the free space on the devices that can be used to store 1778 * file data. 1779 */ 1780 static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes) 1781 { 1782 struct btrfs_fs_info *fs_info = root->fs_info; 1783 struct btrfs_device_info *devices_info; 1784 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 1785 struct btrfs_device *device; 1786 u64 skip_space; 1787 u64 type; 1788 u64 avail_space; 1789 u64 used_space; 1790 u64 min_stripe_size; 1791 int min_stripes = 1, num_stripes = 1; 1792 int i = 0, nr_devices; 1793 int ret; 1794 1795 /* 1796 * We aren't under the device list lock, so this is racey-ish, but good 1797 * enough for our purposes. 1798 */ 1799 nr_devices = fs_info->fs_devices->open_devices; 1800 if (!nr_devices) { 1801 smp_mb(); 1802 nr_devices = fs_info->fs_devices->open_devices; 1803 ASSERT(nr_devices); 1804 if (!nr_devices) { 1805 *free_bytes = 0; 1806 return 0; 1807 } 1808 } 1809 1810 devices_info = kmalloc_array(nr_devices, sizeof(*devices_info), 1811 GFP_NOFS); 1812 if (!devices_info) 1813 return -ENOMEM; 1814 1815 /* calc min stripe number for data space alloction */ 1816 type = btrfs_get_alloc_profile(root, 1); 1817 if (type & BTRFS_BLOCK_GROUP_RAID0) { 1818 min_stripes = 2; 1819 num_stripes = nr_devices; 1820 } else if (type & BTRFS_BLOCK_GROUP_RAID1) { 1821 min_stripes = 2; 1822 num_stripes = 2; 1823 } else if (type & BTRFS_BLOCK_GROUP_RAID10) { 1824 min_stripes = 4; 1825 num_stripes = 4; 1826 } 1827 1828 if (type & BTRFS_BLOCK_GROUP_DUP) 1829 min_stripe_size = 2 * BTRFS_STRIPE_LEN; 1830 else 1831 min_stripe_size = BTRFS_STRIPE_LEN; 1832 1833 if (fs_info->alloc_start) 1834 mutex_lock(&fs_devices->device_list_mutex); 1835 rcu_read_lock(); 1836 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 1837 if (!device->in_fs_metadata || !device->bdev || 1838 device->is_tgtdev_for_dev_replace) 1839 continue; 1840 1841 if (i >= nr_devices) 1842 break; 1843 1844 avail_space = device->total_bytes - device->bytes_used; 1845 1846 /* align with stripe_len */ 1847 avail_space = div_u64(avail_space, BTRFS_STRIPE_LEN); 1848 avail_space *= BTRFS_STRIPE_LEN; 1849 1850 /* 1851 * In order to avoid overwritting the superblock on the drive, 1852 * btrfs starts at an offset of at least 1MB when doing chunk 1853 * allocation. 1854 */ 1855 skip_space = 1024 * 1024; 1856 1857 /* user can set the offset in fs_info->alloc_start. */ 1858 if (fs_info->alloc_start && 1859 fs_info->alloc_start + BTRFS_STRIPE_LEN <= 1860 device->total_bytes) { 1861 rcu_read_unlock(); 1862 skip_space = max(fs_info->alloc_start, skip_space); 1863 1864 /* 1865 * btrfs can not use the free space in 1866 * [0, skip_space - 1], we must subtract it from the 1867 * total. In order to implement it, we account the used 1868 * space in this range first. 1869 */ 1870 ret = btrfs_account_dev_extents_size(device, 0, 1871 skip_space - 1, 1872 &used_space); 1873 if (ret) { 1874 kfree(devices_info); 1875 mutex_unlock(&fs_devices->device_list_mutex); 1876 return ret; 1877 } 1878 1879 rcu_read_lock(); 1880 1881 /* calc the free space in [0, skip_space - 1] */ 1882 skip_space -= used_space; 1883 } 1884 1885 /* 1886 * we can use the free space in [0, skip_space - 1], subtract 1887 * it from the total. 1888 */ 1889 if (avail_space && avail_space >= skip_space) 1890 avail_space -= skip_space; 1891 else 1892 avail_space = 0; 1893 1894 if (avail_space < min_stripe_size) 1895 continue; 1896 1897 devices_info[i].dev = device; 1898 devices_info[i].max_avail = avail_space; 1899 1900 i++; 1901 } 1902 rcu_read_unlock(); 1903 if (fs_info->alloc_start) 1904 mutex_unlock(&fs_devices->device_list_mutex); 1905 1906 nr_devices = i; 1907 1908 btrfs_descending_sort_devices(devices_info, nr_devices); 1909 1910 i = nr_devices - 1; 1911 avail_space = 0; 1912 while (nr_devices >= min_stripes) { 1913 if (num_stripes > nr_devices) 1914 num_stripes = nr_devices; 1915 1916 if (devices_info[i].max_avail >= min_stripe_size) { 1917 int j; 1918 u64 alloc_size; 1919 1920 avail_space += devices_info[i].max_avail * num_stripes; 1921 alloc_size = devices_info[i].max_avail; 1922 for (j = i + 1 - num_stripes; j <= i; j++) 1923 devices_info[j].max_avail -= alloc_size; 1924 } 1925 i--; 1926 nr_devices--; 1927 } 1928 1929 kfree(devices_info); 1930 *free_bytes = avail_space; 1931 return 0; 1932 } 1933 1934 /* 1935 * Calculate numbers for 'df', pessimistic in case of mixed raid profiles. 1936 * 1937 * If there's a redundant raid level at DATA block groups, use the respective 1938 * multiplier to scale the sizes. 1939 * 1940 * Unused device space usage is based on simulating the chunk allocator 1941 * algorithm that respects the device sizes, order of allocations and the 1942 * 'alloc_start' value, this is a close approximation of the actual use but 1943 * there are other factors that may change the result (like a new metadata 1944 * chunk). 1945 * 1946 * FIXME: not accurate for mixed block groups, total and free/used are ok, 1947 * available appears slightly larger. 1948 */ 1949 static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) 1950 { 1951 struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb); 1952 struct btrfs_super_block *disk_super = fs_info->super_copy; 1953 struct list_head *head = &fs_info->space_info; 1954 struct btrfs_space_info *found; 1955 u64 total_used = 0; 1956 u64 total_free_data = 0; 1957 int bits = dentry->d_sb->s_blocksize_bits; 1958 __be32 *fsid = (__be32 *)fs_info->fsid; 1959 unsigned factor = 1; 1960 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; 1961 int ret; 1962 1963 /* 1964 * holding chunk_muext to avoid allocating new chunks, holding 1965 * device_list_mutex to avoid the device being removed 1966 */ 1967 rcu_read_lock(); 1968 list_for_each_entry_rcu(found, head, list) { 1969 if (found->flags & BTRFS_BLOCK_GROUP_DATA) { 1970 int i; 1971 1972 total_free_data += found->disk_total - found->disk_used; 1973 total_free_data -= 1974 btrfs_account_ro_block_groups_free_space(found); 1975 1976 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1977 if (!list_empty(&found->block_groups[i])) { 1978 switch (i) { 1979 case BTRFS_RAID_DUP: 1980 case BTRFS_RAID_RAID1: 1981 case BTRFS_RAID_RAID10: 1982 factor = 2; 1983 } 1984 } 1985 } 1986 } 1987 1988 total_used += found->disk_used; 1989 } 1990 1991 rcu_read_unlock(); 1992 1993 buf->f_blocks = div_u64(btrfs_super_total_bytes(disk_super), factor); 1994 buf->f_blocks >>= bits; 1995 buf->f_bfree = buf->f_blocks - (div_u64(total_used, factor) >> bits); 1996 1997 /* Account global block reserve as used, it's in logical size already */ 1998 spin_lock(&block_rsv->lock); 1999 buf->f_bfree -= block_rsv->size >> bits; 2000 spin_unlock(&block_rsv->lock); 2001 2002 buf->f_bavail = div_u64(total_free_data, factor); 2003 ret = btrfs_calc_avail_data_space(fs_info->tree_root, &total_free_data); 2004 if (ret) 2005 return ret; 2006 buf->f_bavail += div_u64(total_free_data, factor); 2007 buf->f_bavail = buf->f_bavail >> bits; 2008 2009 buf->f_type = BTRFS_SUPER_MAGIC; 2010 buf->f_bsize = dentry->d_sb->s_blocksize; 2011 buf->f_namelen = BTRFS_NAME_LEN; 2012 2013 /* We treat it as constant endianness (it doesn't matter _which_) 2014 because we want the fsid to come out the same whether mounted 2015 on a big-endian or little-endian host */ 2016 buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]); 2017 buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]); 2018 /* Mask in the root object ID too, to disambiguate subvols */ 2019 buf->f_fsid.val[0] ^= BTRFS_I(d_inode(dentry))->root->objectid >> 32; 2020 buf->f_fsid.val[1] ^= BTRFS_I(d_inode(dentry))->root->objectid; 2021 2022 return 0; 2023 } 2024 2025 static void btrfs_kill_super(struct super_block *sb) 2026 { 2027 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 2028 kill_anon_super(sb); 2029 free_fs_info(fs_info); 2030 } 2031 2032 static struct file_system_type btrfs_fs_type = { 2033 .owner = THIS_MODULE, 2034 .name = "btrfs", 2035 .mount = btrfs_mount, 2036 .kill_sb = btrfs_kill_super, 2037 .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA, 2038 }; 2039 MODULE_ALIAS_FS("btrfs"); 2040 2041 static int btrfs_control_open(struct inode *inode, struct file *file) 2042 { 2043 /* 2044 * The control file's private_data is used to hold the 2045 * transaction when it is started and is used to keep 2046 * track of whether a transaction is already in progress. 2047 */ 2048 file->private_data = NULL; 2049 return 0; 2050 } 2051 2052 /* 2053 * used by btrfsctl to scan devices when no FS is mounted 2054 */ 2055 static long btrfs_control_ioctl(struct file *file, unsigned int cmd, 2056 unsigned long arg) 2057 { 2058 struct btrfs_ioctl_vol_args *vol; 2059 struct btrfs_fs_devices *fs_devices; 2060 int ret = -ENOTTY; 2061 2062 if (!capable(CAP_SYS_ADMIN)) 2063 return -EPERM; 2064 2065 vol = memdup_user((void __user *)arg, sizeof(*vol)); 2066 if (IS_ERR(vol)) 2067 return PTR_ERR(vol); 2068 2069 switch (cmd) { 2070 case BTRFS_IOC_SCAN_DEV: 2071 ret = btrfs_scan_one_device(vol->name, FMODE_READ, 2072 &btrfs_fs_type, &fs_devices); 2073 break; 2074 case BTRFS_IOC_DEVICES_READY: 2075 ret = btrfs_scan_one_device(vol->name, FMODE_READ, 2076 &btrfs_fs_type, &fs_devices); 2077 if (ret) 2078 break; 2079 ret = !(fs_devices->num_devices == fs_devices->total_devices); 2080 break; 2081 } 2082 2083 kfree(vol); 2084 return ret; 2085 } 2086 2087 static int btrfs_freeze(struct super_block *sb) 2088 { 2089 struct btrfs_trans_handle *trans; 2090 struct btrfs_root *root = btrfs_sb(sb)->tree_root; 2091 2092 trans = btrfs_attach_transaction_barrier(root); 2093 if (IS_ERR(trans)) { 2094 /* no transaction, don't bother */ 2095 if (PTR_ERR(trans) == -ENOENT) 2096 return 0; 2097 return PTR_ERR(trans); 2098 } 2099 return btrfs_commit_transaction(trans, root); 2100 } 2101 2102 static int btrfs_show_devname(struct seq_file *m, struct dentry *root) 2103 { 2104 struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb); 2105 struct btrfs_fs_devices *cur_devices; 2106 struct btrfs_device *dev, *first_dev = NULL; 2107 struct list_head *head; 2108 struct rcu_string *name; 2109 2110 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2111 cur_devices = fs_info->fs_devices; 2112 while (cur_devices) { 2113 head = &cur_devices->devices; 2114 list_for_each_entry(dev, head, dev_list) { 2115 if (dev->missing) 2116 continue; 2117 if (!dev->name) 2118 continue; 2119 if (!first_dev || dev->devid < first_dev->devid) 2120 first_dev = dev; 2121 } 2122 cur_devices = cur_devices->seed; 2123 } 2124 2125 if (first_dev) { 2126 rcu_read_lock(); 2127 name = rcu_dereference(first_dev->name); 2128 seq_escape(m, name->str, " \t\n\\"); 2129 rcu_read_unlock(); 2130 } else { 2131 WARN_ON(1); 2132 } 2133 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2134 return 0; 2135 } 2136 2137 static const struct super_operations btrfs_super_ops = { 2138 .drop_inode = btrfs_drop_inode, 2139 .evict_inode = btrfs_evict_inode, 2140 .put_super = btrfs_put_super, 2141 .sync_fs = btrfs_sync_fs, 2142 .show_options = btrfs_show_options, 2143 .show_devname = btrfs_show_devname, 2144 .write_inode = btrfs_write_inode, 2145 .alloc_inode = btrfs_alloc_inode, 2146 .destroy_inode = btrfs_destroy_inode, 2147 .statfs = btrfs_statfs, 2148 .remount_fs = btrfs_remount, 2149 .freeze_fs = btrfs_freeze, 2150 }; 2151 2152 static const struct file_operations btrfs_ctl_fops = { 2153 .open = btrfs_control_open, 2154 .unlocked_ioctl = btrfs_control_ioctl, 2155 .compat_ioctl = btrfs_control_ioctl, 2156 .owner = THIS_MODULE, 2157 .llseek = noop_llseek, 2158 }; 2159 2160 static struct miscdevice btrfs_misc = { 2161 .minor = BTRFS_MINOR, 2162 .name = "btrfs-control", 2163 .fops = &btrfs_ctl_fops 2164 }; 2165 2166 MODULE_ALIAS_MISCDEV(BTRFS_MINOR); 2167 MODULE_ALIAS("devname:btrfs-control"); 2168 2169 static int btrfs_interface_init(void) 2170 { 2171 return misc_register(&btrfs_misc); 2172 } 2173 2174 static void btrfs_interface_exit(void) 2175 { 2176 misc_deregister(&btrfs_misc); 2177 } 2178 2179 static void btrfs_print_info(void) 2180 { 2181 printk(KERN_INFO "Btrfs loaded" 2182 #ifdef CONFIG_BTRFS_DEBUG 2183 ", debug=on" 2184 #endif 2185 #ifdef CONFIG_BTRFS_ASSERT 2186 ", assert=on" 2187 #endif 2188 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 2189 ", integrity-checker=on" 2190 #endif 2191 "\n"); 2192 } 2193 2194 static int btrfs_run_sanity_tests(void) 2195 { 2196 int ret; 2197 2198 ret = btrfs_init_test_fs(); 2199 if (ret) 2200 return ret; 2201 2202 ret = btrfs_test_free_space_cache(); 2203 if (ret) 2204 goto out; 2205 ret = btrfs_test_extent_buffer_operations(); 2206 if (ret) 2207 goto out; 2208 ret = btrfs_test_extent_io(); 2209 if (ret) 2210 goto out; 2211 ret = btrfs_test_inodes(); 2212 if (ret) 2213 goto out; 2214 ret = btrfs_test_qgroups(); 2215 out: 2216 btrfs_destroy_test_fs(); 2217 return ret; 2218 } 2219 2220 static int __init init_btrfs_fs(void) 2221 { 2222 int err; 2223 2224 err = btrfs_hash_init(); 2225 if (err) 2226 return err; 2227 2228 btrfs_props_init(); 2229 2230 err = btrfs_init_sysfs(); 2231 if (err) 2232 goto free_hash; 2233 2234 btrfs_init_compress(); 2235 2236 err = btrfs_init_cachep(); 2237 if (err) 2238 goto free_compress; 2239 2240 err = extent_io_init(); 2241 if (err) 2242 goto free_cachep; 2243 2244 err = extent_map_init(); 2245 if (err) 2246 goto free_extent_io; 2247 2248 err = ordered_data_init(); 2249 if (err) 2250 goto free_extent_map; 2251 2252 err = btrfs_delayed_inode_init(); 2253 if (err) 2254 goto free_ordered_data; 2255 2256 err = btrfs_auto_defrag_init(); 2257 if (err) 2258 goto free_delayed_inode; 2259 2260 err = btrfs_delayed_ref_init(); 2261 if (err) 2262 goto free_auto_defrag; 2263 2264 err = btrfs_prelim_ref_init(); 2265 if (err) 2266 goto free_delayed_ref; 2267 2268 err = btrfs_end_io_wq_init(); 2269 if (err) 2270 goto free_prelim_ref; 2271 2272 err = btrfs_interface_init(); 2273 if (err) 2274 goto free_end_io_wq; 2275 2276 btrfs_init_lockdep(); 2277 2278 btrfs_print_info(); 2279 2280 err = btrfs_run_sanity_tests(); 2281 if (err) 2282 goto unregister_ioctl; 2283 2284 err = register_filesystem(&btrfs_fs_type); 2285 if (err) 2286 goto unregister_ioctl; 2287 2288 return 0; 2289 2290 unregister_ioctl: 2291 btrfs_interface_exit(); 2292 free_end_io_wq: 2293 btrfs_end_io_wq_exit(); 2294 free_prelim_ref: 2295 btrfs_prelim_ref_exit(); 2296 free_delayed_ref: 2297 btrfs_delayed_ref_exit(); 2298 free_auto_defrag: 2299 btrfs_auto_defrag_exit(); 2300 free_delayed_inode: 2301 btrfs_delayed_inode_exit(); 2302 free_ordered_data: 2303 ordered_data_exit(); 2304 free_extent_map: 2305 extent_map_exit(); 2306 free_extent_io: 2307 extent_io_exit(); 2308 free_cachep: 2309 btrfs_destroy_cachep(); 2310 free_compress: 2311 btrfs_exit_compress(); 2312 btrfs_exit_sysfs(); 2313 free_hash: 2314 btrfs_hash_exit(); 2315 return err; 2316 } 2317 2318 static void __exit exit_btrfs_fs(void) 2319 { 2320 btrfs_destroy_cachep(); 2321 btrfs_delayed_ref_exit(); 2322 btrfs_auto_defrag_exit(); 2323 btrfs_delayed_inode_exit(); 2324 btrfs_prelim_ref_exit(); 2325 ordered_data_exit(); 2326 extent_map_exit(); 2327 extent_io_exit(); 2328 btrfs_interface_exit(); 2329 btrfs_end_io_wq_exit(); 2330 unregister_filesystem(&btrfs_fs_type); 2331 btrfs_exit_sysfs(); 2332 btrfs_cleanup_fs_uuids(); 2333 btrfs_exit_compress(); 2334 btrfs_hash_exit(); 2335 } 2336 2337 late_initcall(init_btrfs_fs); 2338 module_exit(exit_btrfs_fs) 2339 2340 MODULE_LICENSE("GPL"); 2341