1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_bit.h" 25 #include "xfs_sb.h" 26 #include "xfs_mount.h" 27 #include "xfs_defer.h" 28 #include "xfs_da_format.h" 29 #include "xfs_da_btree.h" 30 #include "xfs_inode.h" 31 #include "xfs_dir2.h" 32 #include "xfs_ialloc.h" 33 #include "xfs_alloc.h" 34 #include "xfs_rtalloc.h" 35 #include "xfs_bmap.h" 36 #include "xfs_trans.h" 37 #include "xfs_trans_priv.h" 38 #include "xfs_log.h" 39 #include "xfs_error.h" 40 #include "xfs_quota.h" 41 #include "xfs_fsops.h" 42 #include "xfs_trace.h" 43 #include "xfs_icache.h" 44 #include "xfs_sysfs.h" 45 #include "xfs_rmap_btree.h" 46 #include "xfs_refcount_btree.h" 47 #include "xfs_reflink.h" 48 49 50 static DEFINE_MUTEX(xfs_uuid_table_mutex); 51 static int xfs_uuid_table_size; 52 static uuid_t *xfs_uuid_table; 53 54 void 55 xfs_uuid_table_free(void) 56 { 57 if (xfs_uuid_table_size == 0) 58 return; 59 kmem_free(xfs_uuid_table); 60 xfs_uuid_table = NULL; 61 xfs_uuid_table_size = 0; 62 } 63 64 /* 65 * See if the UUID is unique among mounted XFS filesystems. 66 * Mount fails if UUID is nil or a FS with the same UUID is already mounted. 67 */ 68 STATIC int 69 xfs_uuid_mount( 70 struct xfs_mount *mp) 71 { 72 uuid_t *uuid = &mp->m_sb.sb_uuid; 73 int hole, i; 74 75 if (mp->m_flags & XFS_MOUNT_NOUUID) 76 return 0; 77 78 if (uuid_is_nil(uuid)) { 79 xfs_warn(mp, "Filesystem has nil UUID - can't mount"); 80 return -EINVAL; 81 } 82 83 mutex_lock(&xfs_uuid_table_mutex); 84 for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) { 85 if (uuid_is_nil(&xfs_uuid_table[i])) { 86 hole = i; 87 continue; 88 } 89 if (uuid_equal(uuid, &xfs_uuid_table[i])) 90 goto out_duplicate; 91 } 92 93 if (hole < 0) { 94 xfs_uuid_table = kmem_realloc(xfs_uuid_table, 95 (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table), 96 KM_SLEEP); 97 hole = xfs_uuid_table_size++; 98 } 99 xfs_uuid_table[hole] = *uuid; 100 mutex_unlock(&xfs_uuid_table_mutex); 101 102 return 0; 103 104 out_duplicate: 105 mutex_unlock(&xfs_uuid_table_mutex); 106 xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid); 107 return -EINVAL; 108 } 109 110 STATIC void 111 xfs_uuid_unmount( 112 struct xfs_mount *mp) 113 { 114 uuid_t *uuid = &mp->m_sb.sb_uuid; 115 int i; 116 117 if (mp->m_flags & XFS_MOUNT_NOUUID) 118 return; 119 120 mutex_lock(&xfs_uuid_table_mutex); 121 for (i = 0; i < xfs_uuid_table_size; i++) { 122 if (uuid_is_nil(&xfs_uuid_table[i])) 123 continue; 124 if (!uuid_equal(uuid, &xfs_uuid_table[i])) 125 continue; 126 memset(&xfs_uuid_table[i], 0, sizeof(uuid_t)); 127 break; 128 } 129 ASSERT(i < xfs_uuid_table_size); 130 mutex_unlock(&xfs_uuid_table_mutex); 131 } 132 133 134 STATIC void 135 __xfs_free_perag( 136 struct rcu_head *head) 137 { 138 struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head); 139 140 ASSERT(atomic_read(&pag->pag_ref) == 0); 141 kmem_free(pag); 142 } 143 144 /* 145 * Free up the per-ag resources associated with the mount structure. 146 */ 147 STATIC void 148 xfs_free_perag( 149 xfs_mount_t *mp) 150 { 151 xfs_agnumber_t agno; 152 struct xfs_perag *pag; 153 154 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 155 spin_lock(&mp->m_perag_lock); 156 pag = radix_tree_delete(&mp->m_perag_tree, agno); 157 spin_unlock(&mp->m_perag_lock); 158 ASSERT(pag); 159 ASSERT(atomic_read(&pag->pag_ref) == 0); 160 call_rcu(&pag->rcu_head, __xfs_free_perag); 161 } 162 } 163 164 /* 165 * Check size of device based on the (data/realtime) block count. 166 * Note: this check is used by the growfs code as well as mount. 167 */ 168 int 169 xfs_sb_validate_fsb_count( 170 xfs_sb_t *sbp, 171 __uint64_t nblocks) 172 { 173 ASSERT(PAGE_SHIFT >= sbp->sb_blocklog); 174 ASSERT(sbp->sb_blocklog >= BBSHIFT); 175 176 /* Limited by ULONG_MAX of page cache index */ 177 if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX) 178 return -EFBIG; 179 return 0; 180 } 181 182 int 183 xfs_initialize_perag( 184 xfs_mount_t *mp, 185 xfs_agnumber_t agcount, 186 xfs_agnumber_t *maxagi) 187 { 188 xfs_agnumber_t index; 189 xfs_agnumber_t first_initialised = 0; 190 xfs_perag_t *pag; 191 int error = -ENOMEM; 192 193 /* 194 * Walk the current per-ag tree so we don't try to initialise AGs 195 * that already exist (growfs case). Allocate and insert all the 196 * AGs we don't find ready for initialisation. 197 */ 198 for (index = 0; index < agcount; index++) { 199 pag = xfs_perag_get(mp, index); 200 if (pag) { 201 xfs_perag_put(pag); 202 continue; 203 } 204 if (!first_initialised) 205 first_initialised = index; 206 207 pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL); 208 if (!pag) 209 goto out_unwind; 210 pag->pag_agno = index; 211 pag->pag_mount = mp; 212 spin_lock_init(&pag->pag_ici_lock); 213 mutex_init(&pag->pag_ici_reclaim_lock); 214 INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); 215 spin_lock_init(&pag->pag_buf_lock); 216 pag->pag_buf_tree = RB_ROOT; 217 218 if (radix_tree_preload(GFP_NOFS)) 219 goto out_unwind; 220 221 spin_lock(&mp->m_perag_lock); 222 if (radix_tree_insert(&mp->m_perag_tree, index, pag)) { 223 BUG(); 224 spin_unlock(&mp->m_perag_lock); 225 radix_tree_preload_end(); 226 error = -EEXIST; 227 goto out_unwind; 228 } 229 spin_unlock(&mp->m_perag_lock); 230 radix_tree_preload_end(); 231 } 232 233 index = xfs_set_inode_alloc(mp, agcount); 234 235 if (maxagi) 236 *maxagi = index; 237 238 mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp); 239 return 0; 240 241 out_unwind: 242 kmem_free(pag); 243 for (; index > first_initialised; index--) { 244 pag = radix_tree_delete(&mp->m_perag_tree, index); 245 kmem_free(pag); 246 } 247 return error; 248 } 249 250 /* 251 * xfs_readsb 252 * 253 * Does the initial read of the superblock. 254 */ 255 int 256 xfs_readsb( 257 struct xfs_mount *mp, 258 int flags) 259 { 260 unsigned int sector_size; 261 struct xfs_buf *bp; 262 struct xfs_sb *sbp = &mp->m_sb; 263 int error; 264 int loud = !(flags & XFS_MFSI_QUIET); 265 const struct xfs_buf_ops *buf_ops; 266 267 ASSERT(mp->m_sb_bp == NULL); 268 ASSERT(mp->m_ddev_targp != NULL); 269 270 /* 271 * For the initial read, we must guess at the sector 272 * size based on the block device. It's enough to 273 * get the sb_sectsize out of the superblock and 274 * then reread with the proper length. 275 * We don't verify it yet, because it may not be complete. 276 */ 277 sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); 278 buf_ops = NULL; 279 280 /* 281 * Allocate a (locked) buffer to hold the superblock. This will be kept 282 * around at all times to optimize access to the superblock. Therefore, 283 * set XBF_NO_IOACCT to make sure it doesn't hold the buftarg count 284 * elevated. 285 */ 286 reread: 287 error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR, 288 BTOBB(sector_size), XBF_NO_IOACCT, &bp, 289 buf_ops); 290 if (error) { 291 if (loud) 292 xfs_warn(mp, "SB validate failed with error %d.", error); 293 /* bad CRC means corrupted metadata */ 294 if (error == -EFSBADCRC) 295 error = -EFSCORRUPTED; 296 return error; 297 } 298 299 /* 300 * Initialize the mount structure from the superblock. 301 */ 302 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp)); 303 304 /* 305 * If we haven't validated the superblock, do so now before we try 306 * to check the sector size and reread the superblock appropriately. 307 */ 308 if (sbp->sb_magicnum != XFS_SB_MAGIC) { 309 if (loud) 310 xfs_warn(mp, "Invalid superblock magic number"); 311 error = -EINVAL; 312 goto release_buf; 313 } 314 315 /* 316 * We must be able to do sector-sized and sector-aligned IO. 317 */ 318 if (sector_size > sbp->sb_sectsize) { 319 if (loud) 320 xfs_warn(mp, "device supports %u byte sectors (not %u)", 321 sector_size, sbp->sb_sectsize); 322 error = -ENOSYS; 323 goto release_buf; 324 } 325 326 if (buf_ops == NULL) { 327 /* 328 * Re-read the superblock so the buffer is correctly sized, 329 * and properly verified. 330 */ 331 xfs_buf_relse(bp); 332 sector_size = sbp->sb_sectsize; 333 buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops; 334 goto reread; 335 } 336 337 xfs_reinit_percpu_counters(mp); 338 339 /* no need to be quiet anymore, so reset the buf ops */ 340 bp->b_ops = &xfs_sb_buf_ops; 341 342 mp->m_sb_bp = bp; 343 xfs_buf_unlock(bp); 344 return 0; 345 346 release_buf: 347 xfs_buf_relse(bp); 348 return error; 349 } 350 351 /* 352 * Update alignment values based on mount options and sb values 353 */ 354 STATIC int 355 xfs_update_alignment(xfs_mount_t *mp) 356 { 357 xfs_sb_t *sbp = &(mp->m_sb); 358 359 if (mp->m_dalign) { 360 /* 361 * If stripe unit and stripe width are not multiples 362 * of the fs blocksize turn off alignment. 363 */ 364 if ((BBTOB(mp->m_dalign) & mp->m_blockmask) || 365 (BBTOB(mp->m_swidth) & mp->m_blockmask)) { 366 xfs_warn(mp, 367 "alignment check failed: sunit/swidth vs. blocksize(%d)", 368 sbp->sb_blocksize); 369 return -EINVAL; 370 } else { 371 /* 372 * Convert the stripe unit and width to FSBs. 373 */ 374 mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign); 375 if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) { 376 xfs_warn(mp, 377 "alignment check failed: sunit/swidth vs. agsize(%d)", 378 sbp->sb_agblocks); 379 return -EINVAL; 380 } else if (mp->m_dalign) { 381 mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth); 382 } else { 383 xfs_warn(mp, 384 "alignment check failed: sunit(%d) less than bsize(%d)", 385 mp->m_dalign, sbp->sb_blocksize); 386 return -EINVAL; 387 } 388 } 389 390 /* 391 * Update superblock with new values 392 * and log changes 393 */ 394 if (xfs_sb_version_hasdalign(sbp)) { 395 if (sbp->sb_unit != mp->m_dalign) { 396 sbp->sb_unit = mp->m_dalign; 397 mp->m_update_sb = true; 398 } 399 if (sbp->sb_width != mp->m_swidth) { 400 sbp->sb_width = mp->m_swidth; 401 mp->m_update_sb = true; 402 } 403 } else { 404 xfs_warn(mp, 405 "cannot change alignment: superblock does not support data alignment"); 406 return -EINVAL; 407 } 408 } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && 409 xfs_sb_version_hasdalign(&mp->m_sb)) { 410 mp->m_dalign = sbp->sb_unit; 411 mp->m_swidth = sbp->sb_width; 412 } 413 414 return 0; 415 } 416 417 /* 418 * Set the maximum inode count for this filesystem 419 */ 420 STATIC void 421 xfs_set_maxicount(xfs_mount_t *mp) 422 { 423 xfs_sb_t *sbp = &(mp->m_sb); 424 __uint64_t icount; 425 426 if (sbp->sb_imax_pct) { 427 /* 428 * Make sure the maximum inode count is a multiple 429 * of the units we allocate inodes in. 430 */ 431 icount = sbp->sb_dblocks * sbp->sb_imax_pct; 432 do_div(icount, 100); 433 do_div(icount, mp->m_ialloc_blks); 434 mp->m_maxicount = (icount * mp->m_ialloc_blks) << 435 sbp->sb_inopblog; 436 } else { 437 mp->m_maxicount = 0; 438 } 439 } 440 441 /* 442 * Set the default minimum read and write sizes unless 443 * already specified in a mount option. 444 * We use smaller I/O sizes when the file system 445 * is being used for NFS service (wsync mount option). 446 */ 447 STATIC void 448 xfs_set_rw_sizes(xfs_mount_t *mp) 449 { 450 xfs_sb_t *sbp = &(mp->m_sb); 451 int readio_log, writeio_log; 452 453 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) { 454 if (mp->m_flags & XFS_MOUNT_WSYNC) { 455 readio_log = XFS_WSYNC_READIO_LOG; 456 writeio_log = XFS_WSYNC_WRITEIO_LOG; 457 } else { 458 readio_log = XFS_READIO_LOG_LARGE; 459 writeio_log = XFS_WRITEIO_LOG_LARGE; 460 } 461 } else { 462 readio_log = mp->m_readio_log; 463 writeio_log = mp->m_writeio_log; 464 } 465 466 if (sbp->sb_blocklog > readio_log) { 467 mp->m_readio_log = sbp->sb_blocklog; 468 } else { 469 mp->m_readio_log = readio_log; 470 } 471 mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog); 472 if (sbp->sb_blocklog > writeio_log) { 473 mp->m_writeio_log = sbp->sb_blocklog; 474 } else { 475 mp->m_writeio_log = writeio_log; 476 } 477 mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog); 478 } 479 480 /* 481 * precalculate the low space thresholds for dynamic speculative preallocation. 482 */ 483 void 484 xfs_set_low_space_thresholds( 485 struct xfs_mount *mp) 486 { 487 int i; 488 489 for (i = 0; i < XFS_LOWSP_MAX; i++) { 490 __uint64_t space = mp->m_sb.sb_dblocks; 491 492 do_div(space, 100); 493 mp->m_low_space[i] = space * (i + 1); 494 } 495 } 496 497 498 /* 499 * Set whether we're using inode alignment. 500 */ 501 STATIC void 502 xfs_set_inoalignment(xfs_mount_t *mp) 503 { 504 if (xfs_sb_version_hasalign(&mp->m_sb) && 505 mp->m_sb.sb_inoalignmt >= 506 XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size)) 507 mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1; 508 else 509 mp->m_inoalign_mask = 0; 510 /* 511 * If we are using stripe alignment, check whether 512 * the stripe unit is a multiple of the inode alignment 513 */ 514 if (mp->m_dalign && mp->m_inoalign_mask && 515 !(mp->m_dalign & mp->m_inoalign_mask)) 516 mp->m_sinoalign = mp->m_dalign; 517 else 518 mp->m_sinoalign = 0; 519 } 520 521 /* 522 * Check that the data (and log if separate) is an ok size. 523 */ 524 STATIC int 525 xfs_check_sizes( 526 struct xfs_mount *mp) 527 { 528 struct xfs_buf *bp; 529 xfs_daddr_t d; 530 int error; 531 532 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); 533 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { 534 xfs_warn(mp, "filesystem size mismatch detected"); 535 return -EFBIG; 536 } 537 error = xfs_buf_read_uncached(mp->m_ddev_targp, 538 d - XFS_FSS_TO_BB(mp, 1), 539 XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL); 540 if (error) { 541 xfs_warn(mp, "last sector read failed"); 542 return error; 543 } 544 xfs_buf_relse(bp); 545 546 if (mp->m_logdev_targp == mp->m_ddev_targp) 547 return 0; 548 549 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); 550 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { 551 xfs_warn(mp, "log size mismatch detected"); 552 return -EFBIG; 553 } 554 error = xfs_buf_read_uncached(mp->m_logdev_targp, 555 d - XFS_FSB_TO_BB(mp, 1), 556 XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL); 557 if (error) { 558 xfs_warn(mp, "log device read failed"); 559 return error; 560 } 561 xfs_buf_relse(bp); 562 return 0; 563 } 564 565 /* 566 * Clear the quotaflags in memory and in the superblock. 567 */ 568 int 569 xfs_mount_reset_sbqflags( 570 struct xfs_mount *mp) 571 { 572 mp->m_qflags = 0; 573 574 /* It is OK to look at sb_qflags in the mount path without m_sb_lock. */ 575 if (mp->m_sb.sb_qflags == 0) 576 return 0; 577 spin_lock(&mp->m_sb_lock); 578 mp->m_sb.sb_qflags = 0; 579 spin_unlock(&mp->m_sb_lock); 580 581 if (!xfs_fs_writable(mp, SB_FREEZE_WRITE)) 582 return 0; 583 584 return xfs_sync_sb(mp, false); 585 } 586 587 __uint64_t 588 xfs_default_resblks(xfs_mount_t *mp) 589 { 590 __uint64_t resblks; 591 592 /* 593 * We default to 5% or 8192 fsbs of space reserved, whichever is 594 * smaller. This is intended to cover concurrent allocation 595 * transactions when we initially hit enospc. These each require a 4 596 * block reservation. Hence by default we cover roughly 2000 concurrent 597 * allocation reservations. 598 */ 599 resblks = mp->m_sb.sb_dblocks; 600 do_div(resblks, 20); 601 resblks = min_t(__uint64_t, resblks, 8192); 602 return resblks; 603 } 604 605 /* 606 * This function does the following on an initial mount of a file system: 607 * - reads the superblock from disk and init the mount struct 608 * - if we're a 32-bit kernel, do a size check on the superblock 609 * so we don't mount terabyte filesystems 610 * - init mount struct realtime fields 611 * - allocate inode hash table for fs 612 * - init directory manager 613 * - perform recovery and init the log manager 614 */ 615 int 616 xfs_mountfs( 617 struct xfs_mount *mp) 618 { 619 struct xfs_sb *sbp = &(mp->m_sb); 620 struct xfs_inode *rip; 621 __uint64_t resblks; 622 uint quotamount = 0; 623 uint quotaflags = 0; 624 int error = 0; 625 626 xfs_sb_mount_common(mp, sbp); 627 628 /* 629 * Check for a mismatched features2 values. Older kernels read & wrote 630 * into the wrong sb offset for sb_features2 on some platforms due to 631 * xfs_sb_t not being 64bit size aligned when sb_features2 was added, 632 * which made older superblock reading/writing routines swap it as a 633 * 64-bit value. 634 * 635 * For backwards compatibility, we make both slots equal. 636 * 637 * If we detect a mismatched field, we OR the set bits into the existing 638 * features2 field in case it has already been modified; we don't want 639 * to lose any features. We then update the bad location with the ORed 640 * value so that older kernels will see any features2 flags. The 641 * superblock writeback code ensures the new sb_features2 is copied to 642 * sb_bad_features2 before it is logged or written to disk. 643 */ 644 if (xfs_sb_has_mismatched_features2(sbp)) { 645 xfs_warn(mp, "correcting sb_features alignment problem"); 646 sbp->sb_features2 |= sbp->sb_bad_features2; 647 mp->m_update_sb = true; 648 649 /* 650 * Re-check for ATTR2 in case it was found in bad_features2 651 * slot. 652 */ 653 if (xfs_sb_version_hasattr2(&mp->m_sb) && 654 !(mp->m_flags & XFS_MOUNT_NOATTR2)) 655 mp->m_flags |= XFS_MOUNT_ATTR2; 656 } 657 658 if (xfs_sb_version_hasattr2(&mp->m_sb) && 659 (mp->m_flags & XFS_MOUNT_NOATTR2)) { 660 xfs_sb_version_removeattr2(&mp->m_sb); 661 mp->m_update_sb = true; 662 663 /* update sb_versionnum for the clearing of the morebits */ 664 if (!sbp->sb_features2) 665 mp->m_update_sb = true; 666 } 667 668 /* always use v2 inodes by default now */ 669 if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) { 670 mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT; 671 mp->m_update_sb = true; 672 } 673 674 /* 675 * Check if sb_agblocks is aligned at stripe boundary 676 * If sb_agblocks is NOT aligned turn off m_dalign since 677 * allocator alignment is within an ag, therefore ag has 678 * to be aligned at stripe boundary. 679 */ 680 error = xfs_update_alignment(mp); 681 if (error) 682 goto out; 683 684 xfs_alloc_compute_maxlevels(mp); 685 xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK); 686 xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK); 687 xfs_ialloc_compute_maxlevels(mp); 688 xfs_rmapbt_compute_maxlevels(mp); 689 xfs_refcountbt_compute_maxlevels(mp); 690 691 xfs_set_maxicount(mp); 692 693 /* enable fail_at_unmount as default */ 694 mp->m_fail_unmount = 1; 695 696 error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname); 697 if (error) 698 goto out; 699 700 error = xfs_sysfs_init(&mp->m_stats.xs_kobj, &xfs_stats_ktype, 701 &mp->m_kobj, "stats"); 702 if (error) 703 goto out_remove_sysfs; 704 705 error = xfs_error_sysfs_init(mp); 706 if (error) 707 goto out_del_stats; 708 709 710 error = xfs_uuid_mount(mp); 711 if (error) 712 goto out_remove_error_sysfs; 713 714 /* 715 * Set the minimum read and write sizes 716 */ 717 xfs_set_rw_sizes(mp); 718 719 /* set the low space thresholds for dynamic preallocation */ 720 xfs_set_low_space_thresholds(mp); 721 722 /* 723 * Set the inode cluster size. 724 * This may still be overridden by the file system 725 * block size if it is larger than the chosen cluster size. 726 * 727 * For v5 filesystems, scale the cluster size with the inode size to 728 * keep a constant ratio of inode per cluster buffer, but only if mkfs 729 * has set the inode alignment value appropriately for larger cluster 730 * sizes. 731 */ 732 mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE; 733 if (xfs_sb_version_hascrc(&mp->m_sb)) { 734 int new_size = mp->m_inode_cluster_size; 735 736 new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE; 737 if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size)) 738 mp->m_inode_cluster_size = new_size; 739 } 740 741 /* 742 * If enabled, sparse inode chunk alignment is expected to match the 743 * cluster size. Full inode chunk alignment must match the chunk size, 744 * but that is checked on sb read verification... 745 */ 746 if (xfs_sb_version_hassparseinodes(&mp->m_sb) && 747 mp->m_sb.sb_spino_align != 748 XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size)) { 749 xfs_warn(mp, 750 "Sparse inode block alignment (%u) must match cluster size (%llu).", 751 mp->m_sb.sb_spino_align, 752 XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size)); 753 error = -EINVAL; 754 goto out_remove_uuid; 755 } 756 757 /* 758 * Set inode alignment fields 759 */ 760 xfs_set_inoalignment(mp); 761 762 /* 763 * Check that the data (and log if separate) is an ok size. 764 */ 765 error = xfs_check_sizes(mp); 766 if (error) 767 goto out_remove_uuid; 768 769 /* 770 * Initialize realtime fields in the mount structure 771 */ 772 error = xfs_rtmount_init(mp); 773 if (error) { 774 xfs_warn(mp, "RT mount failed"); 775 goto out_remove_uuid; 776 } 777 778 /* 779 * Copies the low order bits of the timestamp and the randomly 780 * set "sequence" number out of a UUID. 781 */ 782 uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid); 783 784 mp->m_dmevmask = 0; /* not persistent; set after each mount */ 785 786 error = xfs_da_mount(mp); 787 if (error) { 788 xfs_warn(mp, "Failed dir/attr init: %d", error); 789 goto out_remove_uuid; 790 } 791 792 /* 793 * Initialize the precomputed transaction reservations values. 794 */ 795 xfs_trans_init(mp); 796 797 /* 798 * Allocate and initialize the per-ag data. 799 */ 800 spin_lock_init(&mp->m_perag_lock); 801 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); 802 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi); 803 if (error) { 804 xfs_warn(mp, "Failed per-ag init: %d", error); 805 goto out_free_dir; 806 } 807 808 if (!sbp->sb_logblocks) { 809 xfs_warn(mp, "no log defined"); 810 XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp); 811 error = -EFSCORRUPTED; 812 goto out_free_perag; 813 } 814 815 /* 816 * Log's mount-time initialization. The first part of recovery can place 817 * some items on the AIL, to be handled when recovery is finished or 818 * cancelled. 819 */ 820 error = xfs_log_mount(mp, mp->m_logdev_targp, 821 XFS_FSB_TO_DADDR(mp, sbp->sb_logstart), 822 XFS_FSB_TO_BB(mp, sbp->sb_logblocks)); 823 if (error) { 824 xfs_warn(mp, "log mount failed"); 825 goto out_fail_wait; 826 } 827 828 /* 829 * Now the log is mounted, we know if it was an unclean shutdown or 830 * not. If it was, with the first phase of recovery has completed, we 831 * have consistent AG blocks on disk. We have not recovered EFIs yet, 832 * but they are recovered transactionally in the second recovery phase 833 * later. 834 * 835 * Hence we can safely re-initialise incore superblock counters from 836 * the per-ag data. These may not be correct if the filesystem was not 837 * cleanly unmounted, so we need to wait for recovery to finish before 838 * doing this. 839 * 840 * If the filesystem was cleanly unmounted, then we can trust the 841 * values in the superblock to be correct and we don't need to do 842 * anything here. 843 * 844 * If we are currently making the filesystem, the initialisation will 845 * fail as the perag data is in an undefined state. 846 */ 847 if (xfs_sb_version_haslazysbcount(&mp->m_sb) && 848 !XFS_LAST_UNMOUNT_WAS_CLEAN(mp) && 849 !mp->m_sb.sb_inprogress) { 850 error = xfs_initialize_perag_data(mp, sbp->sb_agcount); 851 if (error) 852 goto out_log_dealloc; 853 } 854 855 /* 856 * Get and sanity-check the root inode. 857 * Save the pointer to it in the mount structure. 858 */ 859 error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip); 860 if (error) { 861 xfs_warn(mp, "failed to read root inode"); 862 goto out_log_dealloc; 863 } 864 865 ASSERT(rip != NULL); 866 867 if (unlikely(!S_ISDIR(VFS_I(rip)->i_mode))) { 868 xfs_warn(mp, "corrupted root inode %llu: not a directory", 869 (unsigned long long)rip->i_ino); 870 xfs_iunlock(rip, XFS_ILOCK_EXCL); 871 XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW, 872 mp); 873 error = -EFSCORRUPTED; 874 goto out_rele_rip; 875 } 876 mp->m_rootip = rip; /* save it */ 877 878 xfs_iunlock(rip, XFS_ILOCK_EXCL); 879 880 /* 881 * Initialize realtime inode pointers in the mount structure 882 */ 883 error = xfs_rtmount_inodes(mp); 884 if (error) { 885 /* 886 * Free up the root inode. 887 */ 888 xfs_warn(mp, "failed to read RT inodes"); 889 goto out_rele_rip; 890 } 891 892 /* 893 * If this is a read-only mount defer the superblock updates until 894 * the next remount into writeable mode. Otherwise we would never 895 * perform the update e.g. for the root filesystem. 896 */ 897 if (mp->m_update_sb && !(mp->m_flags & XFS_MOUNT_RDONLY)) { 898 error = xfs_sync_sb(mp, false); 899 if (error) { 900 xfs_warn(mp, "failed to write sb changes"); 901 goto out_rtunmount; 902 } 903 } 904 905 /* 906 * Initialise the XFS quota management subsystem for this mount 907 */ 908 if (XFS_IS_QUOTA_RUNNING(mp)) { 909 error = xfs_qm_newmount(mp, "amount, "aflags); 910 if (error) 911 goto out_rtunmount; 912 } else { 913 ASSERT(!XFS_IS_QUOTA_ON(mp)); 914 915 /* 916 * If a file system had quotas running earlier, but decided to 917 * mount without -o uquota/pquota/gquota options, revoke the 918 * quotachecked license. 919 */ 920 if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) { 921 xfs_notice(mp, "resetting quota flags"); 922 error = xfs_mount_reset_sbqflags(mp); 923 if (error) 924 goto out_rtunmount; 925 } 926 } 927 928 /* 929 * During the second phase of log recovery, we need iget and 930 * iput to behave like they do for an active filesystem. 931 * xfs_fs_drop_inode needs to be able to prevent the deletion 932 * of inodes before we're done replaying log items on those 933 * inodes. 934 */ 935 mp->m_super->s_flags |= MS_ACTIVE; 936 937 /* 938 * Finish recovering the file system. This part needed to be delayed 939 * until after the root and real-time bitmap inodes were consistently 940 * read in. 941 */ 942 error = xfs_log_mount_finish(mp); 943 if (error) { 944 xfs_warn(mp, "log mount finish failed"); 945 goto out_rtunmount; 946 } 947 948 /* 949 * Now the log is fully replayed, we can transition to full read-only 950 * mode for read-only mounts. This will sync all the metadata and clean 951 * the log so that the recovery we just performed does not have to be 952 * replayed again on the next mount. 953 * 954 * We use the same quiesce mechanism as the rw->ro remount, as they are 955 * semantically identical operations. 956 */ 957 if ((mp->m_flags & (XFS_MOUNT_RDONLY|XFS_MOUNT_NORECOVERY)) == 958 XFS_MOUNT_RDONLY) { 959 xfs_quiesce_attr(mp); 960 } 961 962 /* 963 * Complete the quota initialisation, post-log-replay component. 964 */ 965 if (quotamount) { 966 ASSERT(mp->m_qflags == 0); 967 mp->m_qflags = quotaflags; 968 969 xfs_qm_mount_quotas(mp); 970 } 971 972 /* 973 * Now we are mounted, reserve a small amount of unused space for 974 * privileged transactions. This is needed so that transaction 975 * space required for critical operations can dip into this pool 976 * when at ENOSPC. This is needed for operations like create with 977 * attr, unwritten extent conversion at ENOSPC, etc. Data allocations 978 * are not allowed to use this reserved space. 979 * 980 * This may drive us straight to ENOSPC on mount, but that implies 981 * we were already there on the last unmount. Warn if this occurs. 982 */ 983 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { 984 resblks = xfs_default_resblks(mp); 985 error = xfs_reserve_blocks(mp, &resblks, NULL); 986 if (error) 987 xfs_warn(mp, 988 "Unable to allocate reserve blocks. Continuing without reserve pool."); 989 990 /* Recover any CoW blocks that never got remapped. */ 991 error = xfs_reflink_recover_cow(mp); 992 if (error) { 993 xfs_err(mp, 994 "Error %d recovering leftover CoW allocations.", error); 995 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 996 goto out_quota; 997 } 998 999 /* Reserve AG blocks for future btree expansion. */ 1000 error = xfs_fs_reserve_ag_blocks(mp); 1001 if (error && error != -ENOSPC) 1002 goto out_agresv; 1003 } 1004 1005 return 0; 1006 1007 out_agresv: 1008 xfs_fs_unreserve_ag_blocks(mp); 1009 out_quota: 1010 xfs_qm_unmount_quotas(mp); 1011 out_rtunmount: 1012 xfs_rtunmount_inodes(mp); 1013 out_rele_rip: 1014 IRELE(rip); 1015 cancel_delayed_work_sync(&mp->m_reclaim_work); 1016 xfs_reclaim_inodes(mp, SYNC_WAIT); 1017 out_log_dealloc: 1018 mp->m_flags |= XFS_MOUNT_UNMOUNTING; 1019 xfs_log_mount_cancel(mp); 1020 out_fail_wait: 1021 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) 1022 xfs_wait_buftarg(mp->m_logdev_targp); 1023 xfs_wait_buftarg(mp->m_ddev_targp); 1024 out_free_perag: 1025 xfs_free_perag(mp); 1026 out_free_dir: 1027 xfs_da_unmount(mp); 1028 out_remove_uuid: 1029 xfs_uuid_unmount(mp); 1030 out_remove_error_sysfs: 1031 xfs_error_sysfs_del(mp); 1032 out_del_stats: 1033 xfs_sysfs_del(&mp->m_stats.xs_kobj); 1034 out_remove_sysfs: 1035 xfs_sysfs_del(&mp->m_kobj); 1036 out: 1037 return error; 1038 } 1039 1040 /* 1041 * This flushes out the inodes,dquots and the superblock, unmounts the 1042 * log and makes sure that incore structures are freed. 1043 */ 1044 void 1045 xfs_unmountfs( 1046 struct xfs_mount *mp) 1047 { 1048 __uint64_t resblks; 1049 int error; 1050 1051 cancel_delayed_work_sync(&mp->m_eofblocks_work); 1052 cancel_delayed_work_sync(&mp->m_cowblocks_work); 1053 1054 xfs_fs_unreserve_ag_blocks(mp); 1055 xfs_qm_unmount_quotas(mp); 1056 xfs_rtunmount_inodes(mp); 1057 IRELE(mp->m_rootip); 1058 1059 /* 1060 * We can potentially deadlock here if we have an inode cluster 1061 * that has been freed has its buffer still pinned in memory because 1062 * the transaction is still sitting in a iclog. The stale inodes 1063 * on that buffer will have their flush locks held until the 1064 * transaction hits the disk and the callbacks run. the inode 1065 * flush takes the flush lock unconditionally and with nothing to 1066 * push out the iclog we will never get that unlocked. hence we 1067 * need to force the log first. 1068 */ 1069 xfs_log_force(mp, XFS_LOG_SYNC); 1070 1071 /* 1072 * We now need to tell the world we are unmounting. This will allow 1073 * us to detect that the filesystem is going away and we should error 1074 * out anything that we have been retrying in the background. This will 1075 * prevent neverending retries in AIL pushing from hanging the unmount. 1076 */ 1077 mp->m_flags |= XFS_MOUNT_UNMOUNTING; 1078 1079 /* 1080 * Flush all pending changes from the AIL. 1081 */ 1082 xfs_ail_push_all_sync(mp->m_ail); 1083 1084 /* 1085 * And reclaim all inodes. At this point there should be no dirty 1086 * inodes and none should be pinned or locked, but use synchronous 1087 * reclaim just to be sure. We can stop background inode reclaim 1088 * here as well if it is still running. 1089 */ 1090 cancel_delayed_work_sync(&mp->m_reclaim_work); 1091 xfs_reclaim_inodes(mp, SYNC_WAIT); 1092 1093 xfs_qm_unmount(mp); 1094 1095 /* 1096 * Unreserve any blocks we have so that when we unmount we don't account 1097 * the reserved free space as used. This is really only necessary for 1098 * lazy superblock counting because it trusts the incore superblock 1099 * counters to be absolutely correct on clean unmount. 1100 * 1101 * We don't bother correcting this elsewhere for lazy superblock 1102 * counting because on mount of an unclean filesystem we reconstruct the 1103 * correct counter value and this is irrelevant. 1104 * 1105 * For non-lazy counter filesystems, this doesn't matter at all because 1106 * we only every apply deltas to the superblock and hence the incore 1107 * value does not matter.... 1108 */ 1109 resblks = 0; 1110 error = xfs_reserve_blocks(mp, &resblks, NULL); 1111 if (error) 1112 xfs_warn(mp, "Unable to free reserved block pool. " 1113 "Freespace may not be correct on next mount."); 1114 1115 error = xfs_log_sbcount(mp); 1116 if (error) 1117 xfs_warn(mp, "Unable to update superblock counters. " 1118 "Freespace may not be correct on next mount."); 1119 1120 1121 xfs_log_unmount(mp); 1122 xfs_da_unmount(mp); 1123 xfs_uuid_unmount(mp); 1124 1125 #if defined(DEBUG) 1126 xfs_errortag_clearall(mp, 0); 1127 #endif 1128 xfs_free_perag(mp); 1129 1130 xfs_error_sysfs_del(mp); 1131 xfs_sysfs_del(&mp->m_stats.xs_kobj); 1132 xfs_sysfs_del(&mp->m_kobj); 1133 } 1134 1135 /* 1136 * Determine whether modifications can proceed. The caller specifies the minimum 1137 * freeze level for which modifications should not be allowed. This allows 1138 * certain operations to proceed while the freeze sequence is in progress, if 1139 * necessary. 1140 */ 1141 bool 1142 xfs_fs_writable( 1143 struct xfs_mount *mp, 1144 int level) 1145 { 1146 ASSERT(level > SB_UNFROZEN); 1147 if ((mp->m_super->s_writers.frozen >= level) || 1148 XFS_FORCED_SHUTDOWN(mp) || (mp->m_flags & XFS_MOUNT_RDONLY)) 1149 return false; 1150 1151 return true; 1152 } 1153 1154 /* 1155 * xfs_log_sbcount 1156 * 1157 * Sync the superblock counters to disk. 1158 * 1159 * Note this code can be called during the process of freezing, so we use the 1160 * transaction allocator that does not block when the transaction subsystem is 1161 * in its frozen state. 1162 */ 1163 int 1164 xfs_log_sbcount(xfs_mount_t *mp) 1165 { 1166 /* allow this to proceed during the freeze sequence... */ 1167 if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE)) 1168 return 0; 1169 1170 /* 1171 * we don't need to do this if we are updating the superblock 1172 * counters on every modification. 1173 */ 1174 if (!xfs_sb_version_haslazysbcount(&mp->m_sb)) 1175 return 0; 1176 1177 return xfs_sync_sb(mp, true); 1178 } 1179 1180 /* 1181 * Deltas for the inode count are +/-64, hence we use a large batch size 1182 * of 128 so we don't need to take the counter lock on every update. 1183 */ 1184 #define XFS_ICOUNT_BATCH 128 1185 int 1186 xfs_mod_icount( 1187 struct xfs_mount *mp, 1188 int64_t delta) 1189 { 1190 __percpu_counter_add(&mp->m_icount, delta, XFS_ICOUNT_BATCH); 1191 if (__percpu_counter_compare(&mp->m_icount, 0, XFS_ICOUNT_BATCH) < 0) { 1192 ASSERT(0); 1193 percpu_counter_add(&mp->m_icount, -delta); 1194 return -EINVAL; 1195 } 1196 return 0; 1197 } 1198 1199 int 1200 xfs_mod_ifree( 1201 struct xfs_mount *mp, 1202 int64_t delta) 1203 { 1204 percpu_counter_add(&mp->m_ifree, delta); 1205 if (percpu_counter_compare(&mp->m_ifree, 0) < 0) { 1206 ASSERT(0); 1207 percpu_counter_add(&mp->m_ifree, -delta); 1208 return -EINVAL; 1209 } 1210 return 0; 1211 } 1212 1213 /* 1214 * Deltas for the block count can vary from 1 to very large, but lock contention 1215 * only occurs on frequent small block count updates such as in the delayed 1216 * allocation path for buffered writes (page a time updates). Hence we set 1217 * a large batch count (1024) to minimise global counter updates except when 1218 * we get near to ENOSPC and we have to be very accurate with our updates. 1219 */ 1220 #define XFS_FDBLOCKS_BATCH 1024 1221 int 1222 xfs_mod_fdblocks( 1223 struct xfs_mount *mp, 1224 int64_t delta, 1225 bool rsvd) 1226 { 1227 int64_t lcounter; 1228 long long res_used; 1229 s32 batch; 1230 1231 if (delta > 0) { 1232 /* 1233 * If the reserve pool is depleted, put blocks back into it 1234 * first. Most of the time the pool is full. 1235 */ 1236 if (likely(mp->m_resblks == mp->m_resblks_avail)) { 1237 percpu_counter_add(&mp->m_fdblocks, delta); 1238 return 0; 1239 } 1240 1241 spin_lock(&mp->m_sb_lock); 1242 res_used = (long long)(mp->m_resblks - mp->m_resblks_avail); 1243 1244 if (res_used > delta) { 1245 mp->m_resblks_avail += delta; 1246 } else { 1247 delta -= res_used; 1248 mp->m_resblks_avail = mp->m_resblks; 1249 percpu_counter_add(&mp->m_fdblocks, delta); 1250 } 1251 spin_unlock(&mp->m_sb_lock); 1252 return 0; 1253 } 1254 1255 /* 1256 * Taking blocks away, need to be more accurate the closer we 1257 * are to zero. 1258 * 1259 * If the counter has a value of less than 2 * max batch size, 1260 * then make everything serialise as we are real close to 1261 * ENOSPC. 1262 */ 1263 if (__percpu_counter_compare(&mp->m_fdblocks, 2 * XFS_FDBLOCKS_BATCH, 1264 XFS_FDBLOCKS_BATCH) < 0) 1265 batch = 1; 1266 else 1267 batch = XFS_FDBLOCKS_BATCH; 1268 1269 __percpu_counter_add(&mp->m_fdblocks, delta, batch); 1270 if (__percpu_counter_compare(&mp->m_fdblocks, mp->m_alloc_set_aside, 1271 XFS_FDBLOCKS_BATCH) >= 0) { 1272 /* we had space! */ 1273 return 0; 1274 } 1275 1276 /* 1277 * lock up the sb for dipping into reserves before releasing the space 1278 * that took us to ENOSPC. 1279 */ 1280 spin_lock(&mp->m_sb_lock); 1281 percpu_counter_add(&mp->m_fdblocks, -delta); 1282 if (!rsvd) 1283 goto fdblocks_enospc; 1284 1285 lcounter = (long long)mp->m_resblks_avail + delta; 1286 if (lcounter >= 0) { 1287 mp->m_resblks_avail = lcounter; 1288 spin_unlock(&mp->m_sb_lock); 1289 return 0; 1290 } 1291 printk_once(KERN_WARNING 1292 "Filesystem \"%s\": reserve blocks depleted! " 1293 "Consider increasing reserve pool size.", 1294 mp->m_fsname); 1295 fdblocks_enospc: 1296 spin_unlock(&mp->m_sb_lock); 1297 return -ENOSPC; 1298 } 1299 1300 int 1301 xfs_mod_frextents( 1302 struct xfs_mount *mp, 1303 int64_t delta) 1304 { 1305 int64_t lcounter; 1306 int ret = 0; 1307 1308 spin_lock(&mp->m_sb_lock); 1309 lcounter = mp->m_sb.sb_frextents + delta; 1310 if (lcounter < 0) 1311 ret = -ENOSPC; 1312 else 1313 mp->m_sb.sb_frextents = lcounter; 1314 spin_unlock(&mp->m_sb_lock); 1315 return ret; 1316 } 1317 1318 /* 1319 * xfs_getsb() is called to obtain the buffer for the superblock. 1320 * The buffer is returned locked and read in from disk. 1321 * The buffer should be released with a call to xfs_brelse(). 1322 * 1323 * If the flags parameter is BUF_TRYLOCK, then we'll only return 1324 * the superblock buffer if it can be locked without sleeping. 1325 * If it can't then we'll return NULL. 1326 */ 1327 struct xfs_buf * 1328 xfs_getsb( 1329 struct xfs_mount *mp, 1330 int flags) 1331 { 1332 struct xfs_buf *bp = mp->m_sb_bp; 1333 1334 if (!xfs_buf_trylock(bp)) { 1335 if (flags & XBF_TRYLOCK) 1336 return NULL; 1337 xfs_buf_lock(bp); 1338 } 1339 1340 xfs_buf_hold(bp); 1341 ASSERT(bp->b_flags & XBF_DONE); 1342 return bp; 1343 } 1344 1345 /* 1346 * Used to free the superblock along various error paths. 1347 */ 1348 void 1349 xfs_freesb( 1350 struct xfs_mount *mp) 1351 { 1352 struct xfs_buf *bp = mp->m_sb_bp; 1353 1354 xfs_buf_lock(bp); 1355 mp->m_sb_bp = NULL; 1356 xfs_buf_relse(bp); 1357 } 1358 1359 /* 1360 * If the underlying (data/log/rt) device is readonly, there are some 1361 * operations that cannot proceed. 1362 */ 1363 int 1364 xfs_dev_is_read_only( 1365 struct xfs_mount *mp, 1366 char *message) 1367 { 1368 if (xfs_readonly_buftarg(mp->m_ddev_targp) || 1369 xfs_readonly_buftarg(mp->m_logdev_targp) || 1370 (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) { 1371 xfs_notice(mp, "%s required on read-only device.", message); 1372 xfs_notice(mp, "write access unavailable, cannot proceed."); 1373 return -EROFS; 1374 } 1375 return 0; 1376 } 1377