1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_bit.h" 25 #include "xfs_inum.h" 26 #include "xfs_sb.h" 27 #include "xfs_ag.h" 28 #include "xfs_mount.h" 29 #include "xfs_da_format.h" 30 #include "xfs_inode.h" 31 #include "xfs_dir2.h" 32 #include "xfs_ialloc.h" 33 #include "xfs_alloc.h" 34 #include "xfs_rtalloc.h" 35 #include "xfs_bmap.h" 36 #include "xfs_trans.h" 37 #include "xfs_trans_priv.h" 38 #include "xfs_log.h" 39 #include "xfs_error.h" 40 #include "xfs_quota.h" 41 #include "xfs_fsops.h" 42 #include "xfs_trace.h" 43 #include "xfs_icache.h" 44 #include "xfs_dinode.h" 45 46 47 #ifdef HAVE_PERCPU_SB 48 STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t, 49 int); 50 STATIC void xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t, 51 int); 52 STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t); 53 #else 54 55 #define xfs_icsb_balance_counter(mp, a, b) do { } while (0) 56 #define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0) 57 #endif 58 59 static DEFINE_MUTEX(xfs_uuid_table_mutex); 60 static int xfs_uuid_table_size; 61 static uuid_t *xfs_uuid_table; 62 63 /* 64 * See if the UUID is unique among mounted XFS filesystems. 65 * Mount fails if UUID is nil or a FS with the same UUID is already mounted. 66 */ 67 STATIC int 68 xfs_uuid_mount( 69 struct xfs_mount *mp) 70 { 71 uuid_t *uuid = &mp->m_sb.sb_uuid; 72 int hole, i; 73 74 if (mp->m_flags & XFS_MOUNT_NOUUID) 75 return 0; 76 77 if (uuid_is_nil(uuid)) { 78 xfs_warn(mp, "Filesystem has nil UUID - can't mount"); 79 return XFS_ERROR(EINVAL); 80 } 81 82 mutex_lock(&xfs_uuid_table_mutex); 83 for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) { 84 if (uuid_is_nil(&xfs_uuid_table[i])) { 85 hole = i; 86 continue; 87 } 88 if (uuid_equal(uuid, &xfs_uuid_table[i])) 89 goto out_duplicate; 90 } 91 92 if (hole < 0) { 93 xfs_uuid_table = kmem_realloc(xfs_uuid_table, 94 (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table), 95 xfs_uuid_table_size * sizeof(*xfs_uuid_table), 96 KM_SLEEP); 97 hole = xfs_uuid_table_size++; 98 } 99 xfs_uuid_table[hole] = *uuid; 100 mutex_unlock(&xfs_uuid_table_mutex); 101 102 return 0; 103 104 out_duplicate: 105 mutex_unlock(&xfs_uuid_table_mutex); 106 xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid); 107 return XFS_ERROR(EINVAL); 108 } 109 110 STATIC void 111 xfs_uuid_unmount( 112 struct xfs_mount *mp) 113 { 114 uuid_t *uuid = &mp->m_sb.sb_uuid; 115 int i; 116 117 if (mp->m_flags & XFS_MOUNT_NOUUID) 118 return; 119 120 mutex_lock(&xfs_uuid_table_mutex); 121 for (i = 0; i < xfs_uuid_table_size; i++) { 122 if (uuid_is_nil(&xfs_uuid_table[i])) 123 continue; 124 if (!uuid_equal(uuid, &xfs_uuid_table[i])) 125 continue; 126 memset(&xfs_uuid_table[i], 0, sizeof(uuid_t)); 127 break; 128 } 129 ASSERT(i < xfs_uuid_table_size); 130 mutex_unlock(&xfs_uuid_table_mutex); 131 } 132 133 134 STATIC void 135 __xfs_free_perag( 136 struct rcu_head *head) 137 { 138 struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head); 139 140 ASSERT(atomic_read(&pag->pag_ref) == 0); 141 kmem_free(pag); 142 } 143 144 /* 145 * Free up the per-ag resources associated with the mount structure. 146 */ 147 STATIC void 148 xfs_free_perag( 149 xfs_mount_t *mp) 150 { 151 xfs_agnumber_t agno; 152 struct xfs_perag *pag; 153 154 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 155 spin_lock(&mp->m_perag_lock); 156 pag = radix_tree_delete(&mp->m_perag_tree, agno); 157 spin_unlock(&mp->m_perag_lock); 158 ASSERT(pag); 159 ASSERT(atomic_read(&pag->pag_ref) == 0); 160 call_rcu(&pag->rcu_head, __xfs_free_perag); 161 } 162 } 163 164 /* 165 * Check size of device based on the (data/realtime) block count. 166 * Note: this check is used by the growfs code as well as mount. 167 */ 168 int 169 xfs_sb_validate_fsb_count( 170 xfs_sb_t *sbp, 171 __uint64_t nblocks) 172 { 173 ASSERT(PAGE_SHIFT >= sbp->sb_blocklog); 174 ASSERT(sbp->sb_blocklog >= BBSHIFT); 175 176 #if XFS_BIG_BLKNOS /* Limited by ULONG_MAX of page cache index */ 177 if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX) 178 return EFBIG; 179 #else /* Limited by UINT_MAX of sectors */ 180 if (nblocks << (sbp->sb_blocklog - BBSHIFT) > UINT_MAX) 181 return EFBIG; 182 #endif 183 return 0; 184 } 185 186 int 187 xfs_initialize_perag( 188 xfs_mount_t *mp, 189 xfs_agnumber_t agcount, 190 xfs_agnumber_t *maxagi) 191 { 192 xfs_agnumber_t index; 193 xfs_agnumber_t first_initialised = 0; 194 xfs_perag_t *pag; 195 xfs_agino_t agino; 196 xfs_ino_t ino; 197 xfs_sb_t *sbp = &mp->m_sb; 198 int error = -ENOMEM; 199 200 /* 201 * Walk the current per-ag tree so we don't try to initialise AGs 202 * that already exist (growfs case). Allocate and insert all the 203 * AGs we don't find ready for initialisation. 204 */ 205 for (index = 0; index < agcount; index++) { 206 pag = xfs_perag_get(mp, index); 207 if (pag) { 208 xfs_perag_put(pag); 209 continue; 210 } 211 if (!first_initialised) 212 first_initialised = index; 213 214 pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL); 215 if (!pag) 216 goto out_unwind; 217 pag->pag_agno = index; 218 pag->pag_mount = mp; 219 spin_lock_init(&pag->pag_ici_lock); 220 mutex_init(&pag->pag_ici_reclaim_lock); 221 INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); 222 spin_lock_init(&pag->pag_buf_lock); 223 pag->pag_buf_tree = RB_ROOT; 224 225 if (radix_tree_preload(GFP_NOFS)) 226 goto out_unwind; 227 228 spin_lock(&mp->m_perag_lock); 229 if (radix_tree_insert(&mp->m_perag_tree, index, pag)) { 230 BUG(); 231 spin_unlock(&mp->m_perag_lock); 232 radix_tree_preload_end(); 233 error = -EEXIST; 234 goto out_unwind; 235 } 236 spin_unlock(&mp->m_perag_lock); 237 radix_tree_preload_end(); 238 } 239 240 /* 241 * If we mount with the inode64 option, or no inode overflows 242 * the legacy 32-bit address space clear the inode32 option. 243 */ 244 agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0); 245 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); 246 247 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32) 248 mp->m_flags |= XFS_MOUNT_32BITINODES; 249 else 250 mp->m_flags &= ~XFS_MOUNT_32BITINODES; 251 252 if (mp->m_flags & XFS_MOUNT_32BITINODES) 253 index = xfs_set_inode32(mp); 254 else 255 index = xfs_set_inode64(mp); 256 257 if (maxagi) 258 *maxagi = index; 259 return 0; 260 261 out_unwind: 262 kmem_free(pag); 263 for (; index > first_initialised; index--) { 264 pag = radix_tree_delete(&mp->m_perag_tree, index); 265 kmem_free(pag); 266 } 267 return error; 268 } 269 270 /* 271 * xfs_readsb 272 * 273 * Does the initial read of the superblock. 274 */ 275 int 276 xfs_readsb( 277 struct xfs_mount *mp, 278 int flags) 279 { 280 unsigned int sector_size; 281 struct xfs_buf *bp; 282 struct xfs_sb *sbp = &mp->m_sb; 283 int error; 284 int loud = !(flags & XFS_MFSI_QUIET); 285 const struct xfs_buf_ops *buf_ops; 286 287 ASSERT(mp->m_sb_bp == NULL); 288 ASSERT(mp->m_ddev_targp != NULL); 289 290 /* 291 * For the initial read, we must guess at the sector 292 * size based on the block device. It's enough to 293 * get the sb_sectsize out of the superblock and 294 * then reread with the proper length. 295 * We don't verify it yet, because it may not be complete. 296 */ 297 sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); 298 buf_ops = NULL; 299 300 /* 301 * Allocate a (locked) buffer to hold the superblock. 302 * This will be kept around at all times to optimize 303 * access to the superblock. 304 */ 305 reread: 306 bp = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR, 307 BTOBB(sector_size), 0, buf_ops); 308 if (!bp) { 309 if (loud) 310 xfs_warn(mp, "SB buffer read failed"); 311 return EIO; 312 } 313 if (bp->b_error) { 314 error = bp->b_error; 315 if (loud) 316 xfs_warn(mp, "SB validate failed with error %d.", error); 317 /* bad CRC means corrupted metadata */ 318 if (error == EFSBADCRC) 319 error = EFSCORRUPTED; 320 goto release_buf; 321 } 322 323 /* 324 * Initialize the mount structure from the superblock. 325 */ 326 xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp)); 327 xfs_sb_quota_from_disk(&mp->m_sb); 328 329 /* 330 * We must be able to do sector-sized and sector-aligned IO. 331 */ 332 if (sector_size > sbp->sb_sectsize) { 333 if (loud) 334 xfs_warn(mp, "device supports %u byte sectors (not %u)", 335 sector_size, sbp->sb_sectsize); 336 error = ENOSYS; 337 goto release_buf; 338 } 339 340 /* 341 * Re-read the superblock so the buffer is correctly sized, 342 * and properly verified. 343 */ 344 if (buf_ops == NULL) { 345 xfs_buf_relse(bp); 346 sector_size = sbp->sb_sectsize; 347 buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops; 348 goto reread; 349 } 350 351 /* Initialize per-cpu counters */ 352 xfs_icsb_reinit_counters(mp); 353 354 /* no need to be quiet anymore, so reset the buf ops */ 355 bp->b_ops = &xfs_sb_buf_ops; 356 357 mp->m_sb_bp = bp; 358 xfs_buf_unlock(bp); 359 return 0; 360 361 release_buf: 362 xfs_buf_relse(bp); 363 return error; 364 } 365 366 /* 367 * Update alignment values based on mount options and sb values 368 */ 369 STATIC int 370 xfs_update_alignment(xfs_mount_t *mp) 371 { 372 xfs_sb_t *sbp = &(mp->m_sb); 373 374 if (mp->m_dalign) { 375 /* 376 * If stripe unit and stripe width are not multiples 377 * of the fs blocksize turn off alignment. 378 */ 379 if ((BBTOB(mp->m_dalign) & mp->m_blockmask) || 380 (BBTOB(mp->m_swidth) & mp->m_blockmask)) { 381 xfs_warn(mp, 382 "alignment check failed: sunit/swidth vs. blocksize(%d)", 383 sbp->sb_blocksize); 384 return XFS_ERROR(EINVAL); 385 } else { 386 /* 387 * Convert the stripe unit and width to FSBs. 388 */ 389 mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign); 390 if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) { 391 xfs_warn(mp, 392 "alignment check failed: sunit/swidth vs. agsize(%d)", 393 sbp->sb_agblocks); 394 return XFS_ERROR(EINVAL); 395 } else if (mp->m_dalign) { 396 mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth); 397 } else { 398 xfs_warn(mp, 399 "alignment check failed: sunit(%d) less than bsize(%d)", 400 mp->m_dalign, sbp->sb_blocksize); 401 return XFS_ERROR(EINVAL); 402 } 403 } 404 405 /* 406 * Update superblock with new values 407 * and log changes 408 */ 409 if (xfs_sb_version_hasdalign(sbp)) { 410 if (sbp->sb_unit != mp->m_dalign) { 411 sbp->sb_unit = mp->m_dalign; 412 mp->m_update_flags |= XFS_SB_UNIT; 413 } 414 if (sbp->sb_width != mp->m_swidth) { 415 sbp->sb_width = mp->m_swidth; 416 mp->m_update_flags |= XFS_SB_WIDTH; 417 } 418 } else { 419 xfs_warn(mp, 420 "cannot change alignment: superblock does not support data alignment"); 421 return XFS_ERROR(EINVAL); 422 } 423 } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && 424 xfs_sb_version_hasdalign(&mp->m_sb)) { 425 mp->m_dalign = sbp->sb_unit; 426 mp->m_swidth = sbp->sb_width; 427 } 428 429 return 0; 430 } 431 432 /* 433 * Set the maximum inode count for this filesystem 434 */ 435 STATIC void 436 xfs_set_maxicount(xfs_mount_t *mp) 437 { 438 xfs_sb_t *sbp = &(mp->m_sb); 439 __uint64_t icount; 440 441 if (sbp->sb_imax_pct) { 442 /* 443 * Make sure the maximum inode count is a multiple 444 * of the units we allocate inodes in. 445 */ 446 icount = sbp->sb_dblocks * sbp->sb_imax_pct; 447 do_div(icount, 100); 448 do_div(icount, mp->m_ialloc_blks); 449 mp->m_maxicount = (icount * mp->m_ialloc_blks) << 450 sbp->sb_inopblog; 451 } else { 452 mp->m_maxicount = 0; 453 } 454 } 455 456 /* 457 * Set the default minimum read and write sizes unless 458 * already specified in a mount option. 459 * We use smaller I/O sizes when the file system 460 * is being used for NFS service (wsync mount option). 461 */ 462 STATIC void 463 xfs_set_rw_sizes(xfs_mount_t *mp) 464 { 465 xfs_sb_t *sbp = &(mp->m_sb); 466 int readio_log, writeio_log; 467 468 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) { 469 if (mp->m_flags & XFS_MOUNT_WSYNC) { 470 readio_log = XFS_WSYNC_READIO_LOG; 471 writeio_log = XFS_WSYNC_WRITEIO_LOG; 472 } else { 473 readio_log = XFS_READIO_LOG_LARGE; 474 writeio_log = XFS_WRITEIO_LOG_LARGE; 475 } 476 } else { 477 readio_log = mp->m_readio_log; 478 writeio_log = mp->m_writeio_log; 479 } 480 481 if (sbp->sb_blocklog > readio_log) { 482 mp->m_readio_log = sbp->sb_blocklog; 483 } else { 484 mp->m_readio_log = readio_log; 485 } 486 mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog); 487 if (sbp->sb_blocklog > writeio_log) { 488 mp->m_writeio_log = sbp->sb_blocklog; 489 } else { 490 mp->m_writeio_log = writeio_log; 491 } 492 mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog); 493 } 494 495 /* 496 * precalculate the low space thresholds for dynamic speculative preallocation. 497 */ 498 void 499 xfs_set_low_space_thresholds( 500 struct xfs_mount *mp) 501 { 502 int i; 503 504 for (i = 0; i < XFS_LOWSP_MAX; i++) { 505 __uint64_t space = mp->m_sb.sb_dblocks; 506 507 do_div(space, 100); 508 mp->m_low_space[i] = space * (i + 1); 509 } 510 } 511 512 513 /* 514 * Set whether we're using inode alignment. 515 */ 516 STATIC void 517 xfs_set_inoalignment(xfs_mount_t *mp) 518 { 519 if (xfs_sb_version_hasalign(&mp->m_sb) && 520 mp->m_sb.sb_inoalignmt >= 521 XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size)) 522 mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1; 523 else 524 mp->m_inoalign_mask = 0; 525 /* 526 * If we are using stripe alignment, check whether 527 * the stripe unit is a multiple of the inode alignment 528 */ 529 if (mp->m_dalign && mp->m_inoalign_mask && 530 !(mp->m_dalign & mp->m_inoalign_mask)) 531 mp->m_sinoalign = mp->m_dalign; 532 else 533 mp->m_sinoalign = 0; 534 } 535 536 /* 537 * Check that the data (and log if separate) is an ok size. 538 */ 539 STATIC int 540 xfs_check_sizes(xfs_mount_t *mp) 541 { 542 xfs_buf_t *bp; 543 xfs_daddr_t d; 544 545 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); 546 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { 547 xfs_warn(mp, "filesystem size mismatch detected"); 548 return XFS_ERROR(EFBIG); 549 } 550 bp = xfs_buf_read_uncached(mp->m_ddev_targp, 551 d - XFS_FSS_TO_BB(mp, 1), 552 XFS_FSS_TO_BB(mp, 1), 0, NULL); 553 if (!bp) { 554 xfs_warn(mp, "last sector read failed"); 555 return EIO; 556 } 557 xfs_buf_relse(bp); 558 559 if (mp->m_logdev_targp != mp->m_ddev_targp) { 560 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); 561 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { 562 xfs_warn(mp, "log size mismatch detected"); 563 return XFS_ERROR(EFBIG); 564 } 565 bp = xfs_buf_read_uncached(mp->m_logdev_targp, 566 d - XFS_FSB_TO_BB(mp, 1), 567 XFS_FSB_TO_BB(mp, 1), 0, NULL); 568 if (!bp) { 569 xfs_warn(mp, "log device read failed"); 570 return EIO; 571 } 572 xfs_buf_relse(bp); 573 } 574 return 0; 575 } 576 577 /* 578 * Clear the quotaflags in memory and in the superblock. 579 */ 580 int 581 xfs_mount_reset_sbqflags( 582 struct xfs_mount *mp) 583 { 584 int error; 585 struct xfs_trans *tp; 586 587 mp->m_qflags = 0; 588 589 /* 590 * It is OK to look at sb_qflags here in mount path, 591 * without m_sb_lock. 592 */ 593 if (mp->m_sb.sb_qflags == 0) 594 return 0; 595 spin_lock(&mp->m_sb_lock); 596 mp->m_sb.sb_qflags = 0; 597 spin_unlock(&mp->m_sb_lock); 598 599 /* 600 * If the fs is readonly, let the incore superblock run 601 * with quotas off but don't flush the update out to disk 602 */ 603 if (mp->m_flags & XFS_MOUNT_RDONLY) 604 return 0; 605 606 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); 607 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0); 608 if (error) { 609 xfs_trans_cancel(tp, 0); 610 xfs_alert(mp, "%s: Superblock update failed!", __func__); 611 return error; 612 } 613 614 xfs_mod_sb(tp, XFS_SB_QFLAGS); 615 return xfs_trans_commit(tp, 0); 616 } 617 618 __uint64_t 619 xfs_default_resblks(xfs_mount_t *mp) 620 { 621 __uint64_t resblks; 622 623 /* 624 * We default to 5% or 8192 fsbs of space reserved, whichever is 625 * smaller. This is intended to cover concurrent allocation 626 * transactions when we initially hit enospc. These each require a 4 627 * block reservation. Hence by default we cover roughly 2000 concurrent 628 * allocation reservations. 629 */ 630 resblks = mp->m_sb.sb_dblocks; 631 do_div(resblks, 20); 632 resblks = min_t(__uint64_t, resblks, 8192); 633 return resblks; 634 } 635 636 /* 637 * This function does the following on an initial mount of a file system: 638 * - reads the superblock from disk and init the mount struct 639 * - if we're a 32-bit kernel, do a size check on the superblock 640 * so we don't mount terabyte filesystems 641 * - init mount struct realtime fields 642 * - allocate inode hash table for fs 643 * - init directory manager 644 * - perform recovery and init the log manager 645 */ 646 int 647 xfs_mountfs( 648 xfs_mount_t *mp) 649 { 650 xfs_sb_t *sbp = &(mp->m_sb); 651 xfs_inode_t *rip; 652 __uint64_t resblks; 653 uint quotamount = 0; 654 uint quotaflags = 0; 655 int error = 0; 656 657 xfs_sb_mount_common(mp, sbp); 658 659 /* 660 * Check for a mismatched features2 values. Older kernels 661 * read & wrote into the wrong sb offset for sb_features2 662 * on some platforms due to xfs_sb_t not being 64bit size aligned 663 * when sb_features2 was added, which made older superblock 664 * reading/writing routines swap it as a 64-bit value. 665 * 666 * For backwards compatibility, we make both slots equal. 667 * 668 * If we detect a mismatched field, we OR the set bits into the 669 * existing features2 field in case it has already been modified; we 670 * don't want to lose any features. We then update the bad location 671 * with the ORed value so that older kernels will see any features2 672 * flags, and mark the two fields as needing updates once the 673 * transaction subsystem is online. 674 */ 675 if (xfs_sb_has_mismatched_features2(sbp)) { 676 xfs_warn(mp, "correcting sb_features alignment problem"); 677 sbp->sb_features2 |= sbp->sb_bad_features2; 678 sbp->sb_bad_features2 = sbp->sb_features2; 679 mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2; 680 681 /* 682 * Re-check for ATTR2 in case it was found in bad_features2 683 * slot. 684 */ 685 if (xfs_sb_version_hasattr2(&mp->m_sb) && 686 !(mp->m_flags & XFS_MOUNT_NOATTR2)) 687 mp->m_flags |= XFS_MOUNT_ATTR2; 688 } 689 690 if (xfs_sb_version_hasattr2(&mp->m_sb) && 691 (mp->m_flags & XFS_MOUNT_NOATTR2)) { 692 xfs_sb_version_removeattr2(&mp->m_sb); 693 mp->m_update_flags |= XFS_SB_FEATURES2; 694 695 /* update sb_versionnum for the clearing of the morebits */ 696 if (!sbp->sb_features2) 697 mp->m_update_flags |= XFS_SB_VERSIONNUM; 698 } 699 700 /* 701 * Check if sb_agblocks is aligned at stripe boundary 702 * If sb_agblocks is NOT aligned turn off m_dalign since 703 * allocator alignment is within an ag, therefore ag has 704 * to be aligned at stripe boundary. 705 */ 706 error = xfs_update_alignment(mp); 707 if (error) 708 goto out; 709 710 xfs_alloc_compute_maxlevels(mp); 711 xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK); 712 xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK); 713 xfs_ialloc_compute_maxlevels(mp); 714 715 xfs_set_maxicount(mp); 716 717 error = xfs_uuid_mount(mp); 718 if (error) 719 goto out; 720 721 /* 722 * Set the minimum read and write sizes 723 */ 724 xfs_set_rw_sizes(mp); 725 726 /* set the low space thresholds for dynamic preallocation */ 727 xfs_set_low_space_thresholds(mp); 728 729 /* 730 * Set the inode cluster size. 731 * This may still be overridden by the file system 732 * block size if it is larger than the chosen cluster size. 733 * 734 * For v5 filesystems, scale the cluster size with the inode size to 735 * keep a constant ratio of inode per cluster buffer, but only if mkfs 736 * has set the inode alignment value appropriately for larger cluster 737 * sizes. 738 */ 739 mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE; 740 if (xfs_sb_version_hascrc(&mp->m_sb)) { 741 int new_size = mp->m_inode_cluster_size; 742 743 new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE; 744 if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size)) 745 mp->m_inode_cluster_size = new_size; 746 xfs_info(mp, "Using inode cluster size of %d bytes", 747 mp->m_inode_cluster_size); 748 } 749 750 /* 751 * Set inode alignment fields 752 */ 753 xfs_set_inoalignment(mp); 754 755 /* 756 * Check that the data (and log if separate) is an ok size. 757 */ 758 error = xfs_check_sizes(mp); 759 if (error) 760 goto out_remove_uuid; 761 762 /* 763 * Initialize realtime fields in the mount structure 764 */ 765 error = xfs_rtmount_init(mp); 766 if (error) { 767 xfs_warn(mp, "RT mount failed"); 768 goto out_remove_uuid; 769 } 770 771 /* 772 * Copies the low order bits of the timestamp and the randomly 773 * set "sequence" number out of a UUID. 774 */ 775 uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid); 776 777 mp->m_dmevmask = 0; /* not persistent; set after each mount */ 778 779 xfs_dir_mount(mp); 780 781 /* 782 * Initialize the attribute manager's entries. 783 */ 784 mp->m_attr_magicpct = (mp->m_sb.sb_blocksize * 37) / 100; 785 786 /* 787 * Initialize the precomputed transaction reservations values. 788 */ 789 xfs_trans_init(mp); 790 791 /* 792 * Allocate and initialize the per-ag data. 793 */ 794 spin_lock_init(&mp->m_perag_lock); 795 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); 796 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi); 797 if (error) { 798 xfs_warn(mp, "Failed per-ag init: %d", error); 799 goto out_remove_uuid; 800 } 801 802 if (!sbp->sb_logblocks) { 803 xfs_warn(mp, "no log defined"); 804 XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp); 805 error = XFS_ERROR(EFSCORRUPTED); 806 goto out_free_perag; 807 } 808 809 /* 810 * log's mount-time initialization. Perform 1st part recovery if needed 811 */ 812 error = xfs_log_mount(mp, mp->m_logdev_targp, 813 XFS_FSB_TO_DADDR(mp, sbp->sb_logstart), 814 XFS_FSB_TO_BB(mp, sbp->sb_logblocks)); 815 if (error) { 816 xfs_warn(mp, "log mount failed"); 817 goto out_fail_wait; 818 } 819 820 /* 821 * Now the log is mounted, we know if it was an unclean shutdown or 822 * not. If it was, with the first phase of recovery has completed, we 823 * have consistent AG blocks on disk. We have not recovered EFIs yet, 824 * but they are recovered transactionally in the second recovery phase 825 * later. 826 * 827 * Hence we can safely re-initialise incore superblock counters from 828 * the per-ag data. These may not be correct if the filesystem was not 829 * cleanly unmounted, so we need to wait for recovery to finish before 830 * doing this. 831 * 832 * If the filesystem was cleanly unmounted, then we can trust the 833 * values in the superblock to be correct and we don't need to do 834 * anything here. 835 * 836 * If we are currently making the filesystem, the initialisation will 837 * fail as the perag data is in an undefined state. 838 */ 839 if (xfs_sb_version_haslazysbcount(&mp->m_sb) && 840 !XFS_LAST_UNMOUNT_WAS_CLEAN(mp) && 841 !mp->m_sb.sb_inprogress) { 842 error = xfs_initialize_perag_data(mp, sbp->sb_agcount); 843 if (error) 844 goto out_fail_wait; 845 } 846 847 /* 848 * Get and sanity-check the root inode. 849 * Save the pointer to it in the mount structure. 850 */ 851 error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip); 852 if (error) { 853 xfs_warn(mp, "failed to read root inode"); 854 goto out_log_dealloc; 855 } 856 857 ASSERT(rip != NULL); 858 859 if (unlikely(!S_ISDIR(rip->i_d.di_mode))) { 860 xfs_warn(mp, "corrupted root inode %llu: not a directory", 861 (unsigned long long)rip->i_ino); 862 xfs_iunlock(rip, XFS_ILOCK_EXCL); 863 XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW, 864 mp); 865 error = XFS_ERROR(EFSCORRUPTED); 866 goto out_rele_rip; 867 } 868 mp->m_rootip = rip; /* save it */ 869 870 xfs_iunlock(rip, XFS_ILOCK_EXCL); 871 872 /* 873 * Initialize realtime inode pointers in the mount structure 874 */ 875 error = xfs_rtmount_inodes(mp); 876 if (error) { 877 /* 878 * Free up the root inode. 879 */ 880 xfs_warn(mp, "failed to read RT inodes"); 881 goto out_rele_rip; 882 } 883 884 /* 885 * If this is a read-only mount defer the superblock updates until 886 * the next remount into writeable mode. Otherwise we would never 887 * perform the update e.g. for the root filesystem. 888 */ 889 if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) { 890 error = xfs_mount_log_sb(mp, mp->m_update_flags); 891 if (error) { 892 xfs_warn(mp, "failed to write sb changes"); 893 goto out_rtunmount; 894 } 895 } 896 897 /* 898 * Initialise the XFS quota management subsystem for this mount 899 */ 900 if (XFS_IS_QUOTA_RUNNING(mp)) { 901 error = xfs_qm_newmount(mp, "amount, "aflags); 902 if (error) 903 goto out_rtunmount; 904 } else { 905 ASSERT(!XFS_IS_QUOTA_ON(mp)); 906 907 /* 908 * If a file system had quotas running earlier, but decided to 909 * mount without -o uquota/pquota/gquota options, revoke the 910 * quotachecked license. 911 */ 912 if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) { 913 xfs_notice(mp, "resetting quota flags"); 914 error = xfs_mount_reset_sbqflags(mp); 915 if (error) 916 return error; 917 } 918 } 919 920 /* 921 * Finish recovering the file system. This part needed to be 922 * delayed until after the root and real-time bitmap inodes 923 * were consistently read in. 924 */ 925 error = xfs_log_mount_finish(mp); 926 if (error) { 927 xfs_warn(mp, "log mount finish failed"); 928 goto out_rtunmount; 929 } 930 931 /* 932 * Complete the quota initialisation, post-log-replay component. 933 */ 934 if (quotamount) { 935 ASSERT(mp->m_qflags == 0); 936 mp->m_qflags = quotaflags; 937 938 xfs_qm_mount_quotas(mp); 939 } 940 941 /* 942 * Now we are mounted, reserve a small amount of unused space for 943 * privileged transactions. This is needed so that transaction 944 * space required for critical operations can dip into this pool 945 * when at ENOSPC. This is needed for operations like create with 946 * attr, unwritten extent conversion at ENOSPC, etc. Data allocations 947 * are not allowed to use this reserved space. 948 * 949 * This may drive us straight to ENOSPC on mount, but that implies 950 * we were already there on the last unmount. Warn if this occurs. 951 */ 952 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { 953 resblks = xfs_default_resblks(mp); 954 error = xfs_reserve_blocks(mp, &resblks, NULL); 955 if (error) 956 xfs_warn(mp, 957 "Unable to allocate reserve blocks. Continuing without reserve pool."); 958 } 959 960 return 0; 961 962 out_rtunmount: 963 xfs_rtunmount_inodes(mp); 964 out_rele_rip: 965 IRELE(rip); 966 out_log_dealloc: 967 xfs_log_unmount(mp); 968 out_fail_wait: 969 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) 970 xfs_wait_buftarg(mp->m_logdev_targp); 971 xfs_wait_buftarg(mp->m_ddev_targp); 972 out_free_perag: 973 xfs_free_perag(mp); 974 out_remove_uuid: 975 xfs_uuid_unmount(mp); 976 out: 977 return error; 978 } 979 980 /* 981 * This flushes out the inodes,dquots and the superblock, unmounts the 982 * log and makes sure that incore structures are freed. 983 */ 984 void 985 xfs_unmountfs( 986 struct xfs_mount *mp) 987 { 988 __uint64_t resblks; 989 int error; 990 991 cancel_delayed_work_sync(&mp->m_eofblocks_work); 992 993 xfs_qm_unmount_quotas(mp); 994 xfs_rtunmount_inodes(mp); 995 IRELE(mp->m_rootip); 996 997 /* 998 * We can potentially deadlock here if we have an inode cluster 999 * that has been freed has its buffer still pinned in memory because 1000 * the transaction is still sitting in a iclog. The stale inodes 1001 * on that buffer will have their flush locks held until the 1002 * transaction hits the disk and the callbacks run. the inode 1003 * flush takes the flush lock unconditionally and with nothing to 1004 * push out the iclog we will never get that unlocked. hence we 1005 * need to force the log first. 1006 */ 1007 xfs_log_force(mp, XFS_LOG_SYNC); 1008 1009 /* 1010 * Flush all pending changes from the AIL. 1011 */ 1012 xfs_ail_push_all_sync(mp->m_ail); 1013 1014 /* 1015 * And reclaim all inodes. At this point there should be no dirty 1016 * inodes and none should be pinned or locked, but use synchronous 1017 * reclaim just to be sure. We can stop background inode reclaim 1018 * here as well if it is still running. 1019 */ 1020 cancel_delayed_work_sync(&mp->m_reclaim_work); 1021 xfs_reclaim_inodes(mp, SYNC_WAIT); 1022 1023 xfs_qm_unmount(mp); 1024 1025 /* 1026 * Unreserve any blocks we have so that when we unmount we don't account 1027 * the reserved free space as used. This is really only necessary for 1028 * lazy superblock counting because it trusts the incore superblock 1029 * counters to be absolutely correct on clean unmount. 1030 * 1031 * We don't bother correcting this elsewhere for lazy superblock 1032 * counting because on mount of an unclean filesystem we reconstruct the 1033 * correct counter value and this is irrelevant. 1034 * 1035 * For non-lazy counter filesystems, this doesn't matter at all because 1036 * we only every apply deltas to the superblock and hence the incore 1037 * value does not matter.... 1038 */ 1039 resblks = 0; 1040 error = xfs_reserve_blocks(mp, &resblks, NULL); 1041 if (error) 1042 xfs_warn(mp, "Unable to free reserved block pool. " 1043 "Freespace may not be correct on next mount."); 1044 1045 error = xfs_log_sbcount(mp); 1046 if (error) 1047 xfs_warn(mp, "Unable to update superblock counters. " 1048 "Freespace may not be correct on next mount."); 1049 1050 xfs_log_unmount(mp); 1051 xfs_uuid_unmount(mp); 1052 1053 #if defined(DEBUG) 1054 xfs_errortag_clearall(mp, 0); 1055 #endif 1056 xfs_free_perag(mp); 1057 } 1058 1059 int 1060 xfs_fs_writable(xfs_mount_t *mp) 1061 { 1062 return !(mp->m_super->s_writers.frozen || XFS_FORCED_SHUTDOWN(mp) || 1063 (mp->m_flags & XFS_MOUNT_RDONLY)); 1064 } 1065 1066 /* 1067 * xfs_log_sbcount 1068 * 1069 * Sync the superblock counters to disk. 1070 * 1071 * Note this code can be called during the process of freezing, so 1072 * we may need to use the transaction allocator which does not 1073 * block when the transaction subsystem is in its frozen state. 1074 */ 1075 int 1076 xfs_log_sbcount(xfs_mount_t *mp) 1077 { 1078 xfs_trans_t *tp; 1079 int error; 1080 1081 if (!xfs_fs_writable(mp)) 1082 return 0; 1083 1084 xfs_icsb_sync_counters(mp, 0); 1085 1086 /* 1087 * we don't need to do this if we are updating the superblock 1088 * counters on every modification. 1089 */ 1090 if (!xfs_sb_version_haslazysbcount(&mp->m_sb)) 1091 return 0; 1092 1093 tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP); 1094 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0); 1095 if (error) { 1096 xfs_trans_cancel(tp, 0); 1097 return error; 1098 } 1099 1100 xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS); 1101 xfs_trans_set_sync(tp); 1102 error = xfs_trans_commit(tp, 0); 1103 return error; 1104 } 1105 1106 /* 1107 * xfs_mod_incore_sb_unlocked() is a utility routine commonly used to apply 1108 * a delta to a specified field in the in-core superblock. Simply 1109 * switch on the field indicated and apply the delta to that field. 1110 * Fields are not allowed to dip below zero, so if the delta would 1111 * do this do not apply it and return EINVAL. 1112 * 1113 * The m_sb_lock must be held when this routine is called. 1114 */ 1115 STATIC int 1116 xfs_mod_incore_sb_unlocked( 1117 xfs_mount_t *mp, 1118 xfs_sb_field_t field, 1119 int64_t delta, 1120 int rsvd) 1121 { 1122 int scounter; /* short counter for 32 bit fields */ 1123 long long lcounter; /* long counter for 64 bit fields */ 1124 long long res_used, rem; 1125 1126 /* 1127 * With the in-core superblock spin lock held, switch 1128 * on the indicated field. Apply the delta to the 1129 * proper field. If the fields value would dip below 1130 * 0, then do not apply the delta and return EINVAL. 1131 */ 1132 switch (field) { 1133 case XFS_SBS_ICOUNT: 1134 lcounter = (long long)mp->m_sb.sb_icount; 1135 lcounter += delta; 1136 if (lcounter < 0) { 1137 ASSERT(0); 1138 return XFS_ERROR(EINVAL); 1139 } 1140 mp->m_sb.sb_icount = lcounter; 1141 return 0; 1142 case XFS_SBS_IFREE: 1143 lcounter = (long long)mp->m_sb.sb_ifree; 1144 lcounter += delta; 1145 if (lcounter < 0) { 1146 ASSERT(0); 1147 return XFS_ERROR(EINVAL); 1148 } 1149 mp->m_sb.sb_ifree = lcounter; 1150 return 0; 1151 case XFS_SBS_FDBLOCKS: 1152 lcounter = (long long) 1153 mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 1154 res_used = (long long)(mp->m_resblks - mp->m_resblks_avail); 1155 1156 if (delta > 0) { /* Putting blocks back */ 1157 if (res_used > delta) { 1158 mp->m_resblks_avail += delta; 1159 } else { 1160 rem = delta - res_used; 1161 mp->m_resblks_avail = mp->m_resblks; 1162 lcounter += rem; 1163 } 1164 } else { /* Taking blocks away */ 1165 lcounter += delta; 1166 if (lcounter >= 0) { 1167 mp->m_sb.sb_fdblocks = lcounter + 1168 XFS_ALLOC_SET_ASIDE(mp); 1169 return 0; 1170 } 1171 1172 /* 1173 * We are out of blocks, use any available reserved 1174 * blocks if were allowed to. 1175 */ 1176 if (!rsvd) 1177 return XFS_ERROR(ENOSPC); 1178 1179 lcounter = (long long)mp->m_resblks_avail + delta; 1180 if (lcounter >= 0) { 1181 mp->m_resblks_avail = lcounter; 1182 return 0; 1183 } 1184 printk_once(KERN_WARNING 1185 "Filesystem \"%s\": reserve blocks depleted! " 1186 "Consider increasing reserve pool size.", 1187 mp->m_fsname); 1188 return XFS_ERROR(ENOSPC); 1189 } 1190 1191 mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); 1192 return 0; 1193 case XFS_SBS_FREXTENTS: 1194 lcounter = (long long)mp->m_sb.sb_frextents; 1195 lcounter += delta; 1196 if (lcounter < 0) { 1197 return XFS_ERROR(ENOSPC); 1198 } 1199 mp->m_sb.sb_frextents = lcounter; 1200 return 0; 1201 case XFS_SBS_DBLOCKS: 1202 lcounter = (long long)mp->m_sb.sb_dblocks; 1203 lcounter += delta; 1204 if (lcounter < 0) { 1205 ASSERT(0); 1206 return XFS_ERROR(EINVAL); 1207 } 1208 mp->m_sb.sb_dblocks = lcounter; 1209 return 0; 1210 case XFS_SBS_AGCOUNT: 1211 scounter = mp->m_sb.sb_agcount; 1212 scounter += delta; 1213 if (scounter < 0) { 1214 ASSERT(0); 1215 return XFS_ERROR(EINVAL); 1216 } 1217 mp->m_sb.sb_agcount = scounter; 1218 return 0; 1219 case XFS_SBS_IMAX_PCT: 1220 scounter = mp->m_sb.sb_imax_pct; 1221 scounter += delta; 1222 if (scounter < 0) { 1223 ASSERT(0); 1224 return XFS_ERROR(EINVAL); 1225 } 1226 mp->m_sb.sb_imax_pct = scounter; 1227 return 0; 1228 case XFS_SBS_REXTSIZE: 1229 scounter = mp->m_sb.sb_rextsize; 1230 scounter += delta; 1231 if (scounter < 0) { 1232 ASSERT(0); 1233 return XFS_ERROR(EINVAL); 1234 } 1235 mp->m_sb.sb_rextsize = scounter; 1236 return 0; 1237 case XFS_SBS_RBMBLOCKS: 1238 scounter = mp->m_sb.sb_rbmblocks; 1239 scounter += delta; 1240 if (scounter < 0) { 1241 ASSERT(0); 1242 return XFS_ERROR(EINVAL); 1243 } 1244 mp->m_sb.sb_rbmblocks = scounter; 1245 return 0; 1246 case XFS_SBS_RBLOCKS: 1247 lcounter = (long long)mp->m_sb.sb_rblocks; 1248 lcounter += delta; 1249 if (lcounter < 0) { 1250 ASSERT(0); 1251 return XFS_ERROR(EINVAL); 1252 } 1253 mp->m_sb.sb_rblocks = lcounter; 1254 return 0; 1255 case XFS_SBS_REXTENTS: 1256 lcounter = (long long)mp->m_sb.sb_rextents; 1257 lcounter += delta; 1258 if (lcounter < 0) { 1259 ASSERT(0); 1260 return XFS_ERROR(EINVAL); 1261 } 1262 mp->m_sb.sb_rextents = lcounter; 1263 return 0; 1264 case XFS_SBS_REXTSLOG: 1265 scounter = mp->m_sb.sb_rextslog; 1266 scounter += delta; 1267 if (scounter < 0) { 1268 ASSERT(0); 1269 return XFS_ERROR(EINVAL); 1270 } 1271 mp->m_sb.sb_rextslog = scounter; 1272 return 0; 1273 default: 1274 ASSERT(0); 1275 return XFS_ERROR(EINVAL); 1276 } 1277 } 1278 1279 /* 1280 * xfs_mod_incore_sb() is used to change a field in the in-core 1281 * superblock structure by the specified delta. This modification 1282 * is protected by the m_sb_lock. Just use the xfs_mod_incore_sb_unlocked() 1283 * routine to do the work. 1284 */ 1285 int 1286 xfs_mod_incore_sb( 1287 struct xfs_mount *mp, 1288 xfs_sb_field_t field, 1289 int64_t delta, 1290 int rsvd) 1291 { 1292 int status; 1293 1294 #ifdef HAVE_PERCPU_SB 1295 ASSERT(field < XFS_SBS_ICOUNT || field > XFS_SBS_FDBLOCKS); 1296 #endif 1297 spin_lock(&mp->m_sb_lock); 1298 status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); 1299 spin_unlock(&mp->m_sb_lock); 1300 1301 return status; 1302 } 1303 1304 /* 1305 * Change more than one field in the in-core superblock structure at a time. 1306 * 1307 * The fields and changes to those fields are specified in the array of 1308 * xfs_mod_sb structures passed in. Either all of the specified deltas 1309 * will be applied or none of them will. If any modified field dips below 0, 1310 * then all modifications will be backed out and EINVAL will be returned. 1311 * 1312 * Note that this function may not be used for the superblock values that 1313 * are tracked with the in-memory per-cpu counters - a direct call to 1314 * xfs_icsb_modify_counters is required for these. 1315 */ 1316 int 1317 xfs_mod_incore_sb_batch( 1318 struct xfs_mount *mp, 1319 xfs_mod_sb_t *msb, 1320 uint nmsb, 1321 int rsvd) 1322 { 1323 xfs_mod_sb_t *msbp; 1324 int error = 0; 1325 1326 /* 1327 * Loop through the array of mod structures and apply each individually. 1328 * If any fail, then back out all those which have already been applied. 1329 * Do all of this within the scope of the m_sb_lock so that all of the 1330 * changes will be atomic. 1331 */ 1332 spin_lock(&mp->m_sb_lock); 1333 for (msbp = msb; msbp < (msb + nmsb); msbp++) { 1334 ASSERT(msbp->msb_field < XFS_SBS_ICOUNT || 1335 msbp->msb_field > XFS_SBS_FDBLOCKS); 1336 1337 error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field, 1338 msbp->msb_delta, rsvd); 1339 if (error) 1340 goto unwind; 1341 } 1342 spin_unlock(&mp->m_sb_lock); 1343 return 0; 1344 1345 unwind: 1346 while (--msbp >= msb) { 1347 error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field, 1348 -msbp->msb_delta, rsvd); 1349 ASSERT(error == 0); 1350 } 1351 spin_unlock(&mp->m_sb_lock); 1352 return error; 1353 } 1354 1355 /* 1356 * xfs_getsb() is called to obtain the buffer for the superblock. 1357 * The buffer is returned locked and read in from disk. 1358 * The buffer should be released with a call to xfs_brelse(). 1359 * 1360 * If the flags parameter is BUF_TRYLOCK, then we'll only return 1361 * the superblock buffer if it can be locked without sleeping. 1362 * If it can't then we'll return NULL. 1363 */ 1364 struct xfs_buf * 1365 xfs_getsb( 1366 struct xfs_mount *mp, 1367 int flags) 1368 { 1369 struct xfs_buf *bp = mp->m_sb_bp; 1370 1371 if (!xfs_buf_trylock(bp)) { 1372 if (flags & XBF_TRYLOCK) 1373 return NULL; 1374 xfs_buf_lock(bp); 1375 } 1376 1377 xfs_buf_hold(bp); 1378 ASSERT(XFS_BUF_ISDONE(bp)); 1379 return bp; 1380 } 1381 1382 /* 1383 * Used to free the superblock along various error paths. 1384 */ 1385 void 1386 xfs_freesb( 1387 struct xfs_mount *mp) 1388 { 1389 struct xfs_buf *bp = mp->m_sb_bp; 1390 1391 xfs_buf_lock(bp); 1392 mp->m_sb_bp = NULL; 1393 xfs_buf_relse(bp); 1394 } 1395 1396 /* 1397 * Used to log changes to the superblock unit and width fields which could 1398 * be altered by the mount options, as well as any potential sb_features2 1399 * fixup. Only the first superblock is updated. 1400 */ 1401 int 1402 xfs_mount_log_sb( 1403 xfs_mount_t *mp, 1404 __int64_t fields) 1405 { 1406 xfs_trans_t *tp; 1407 int error; 1408 1409 ASSERT(fields & (XFS_SB_UNIT | XFS_SB_WIDTH | XFS_SB_UUID | 1410 XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2 | 1411 XFS_SB_VERSIONNUM)); 1412 1413 tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT); 1414 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0); 1415 if (error) { 1416 xfs_trans_cancel(tp, 0); 1417 return error; 1418 } 1419 xfs_mod_sb(tp, fields); 1420 error = xfs_trans_commit(tp, 0); 1421 return error; 1422 } 1423 1424 /* 1425 * If the underlying (data/log/rt) device is readonly, there are some 1426 * operations that cannot proceed. 1427 */ 1428 int 1429 xfs_dev_is_read_only( 1430 struct xfs_mount *mp, 1431 char *message) 1432 { 1433 if (xfs_readonly_buftarg(mp->m_ddev_targp) || 1434 xfs_readonly_buftarg(mp->m_logdev_targp) || 1435 (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) { 1436 xfs_notice(mp, "%s required on read-only device.", message); 1437 xfs_notice(mp, "write access unavailable, cannot proceed."); 1438 return EROFS; 1439 } 1440 return 0; 1441 } 1442 1443 #ifdef HAVE_PERCPU_SB 1444 /* 1445 * Per-cpu incore superblock counters 1446 * 1447 * Simple concept, difficult implementation 1448 * 1449 * Basically, replace the incore superblock counters with a distributed per cpu 1450 * counter for contended fields (e.g. free block count). 1451 * 1452 * Difficulties arise in that the incore sb is used for ENOSPC checking, and 1453 * hence needs to be accurately read when we are running low on space. Hence 1454 * there is a method to enable and disable the per-cpu counters based on how 1455 * much "stuff" is available in them. 1456 * 1457 * Basically, a counter is enabled if there is enough free resource to justify 1458 * running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local 1459 * ENOSPC), then we disable the counters to synchronise all callers and 1460 * re-distribute the available resources. 1461 * 1462 * If, once we redistributed the available resources, we still get a failure, 1463 * we disable the per-cpu counter and go through the slow path. 1464 * 1465 * The slow path is the current xfs_mod_incore_sb() function. This means that 1466 * when we disable a per-cpu counter, we need to drain its resources back to 1467 * the global superblock. We do this after disabling the counter to prevent 1468 * more threads from queueing up on the counter. 1469 * 1470 * Essentially, this means that we still need a lock in the fast path to enable 1471 * synchronisation between the global counters and the per-cpu counters. This 1472 * is not a problem because the lock will be local to a CPU almost all the time 1473 * and have little contention except when we get to ENOSPC conditions. 1474 * 1475 * Basically, this lock becomes a barrier that enables us to lock out the fast 1476 * path while we do things like enabling and disabling counters and 1477 * synchronising the counters. 1478 * 1479 * Locking rules: 1480 * 1481 * 1. m_sb_lock before picking up per-cpu locks 1482 * 2. per-cpu locks always picked up via for_each_online_cpu() order 1483 * 3. accurate counter sync requires m_sb_lock + per cpu locks 1484 * 4. modifying per-cpu counters requires holding per-cpu lock 1485 * 5. modifying global counters requires holding m_sb_lock 1486 * 6. enabling or disabling a counter requires holding the m_sb_lock 1487 * and _none_ of the per-cpu locks. 1488 * 1489 * Disabled counters are only ever re-enabled by a balance operation 1490 * that results in more free resources per CPU than a given threshold. 1491 * To ensure counters don't remain disabled, they are rebalanced when 1492 * the global resource goes above a higher threshold (i.e. some hysteresis 1493 * is present to prevent thrashing). 1494 */ 1495 1496 #ifdef CONFIG_HOTPLUG_CPU 1497 /* 1498 * hot-plug CPU notifier support. 1499 * 1500 * We need a notifier per filesystem as we need to be able to identify 1501 * the filesystem to balance the counters out. This is achieved by 1502 * having a notifier block embedded in the xfs_mount_t and doing pointer 1503 * magic to get the mount pointer from the notifier block address. 1504 */ 1505 STATIC int 1506 xfs_icsb_cpu_notify( 1507 struct notifier_block *nfb, 1508 unsigned long action, 1509 void *hcpu) 1510 { 1511 xfs_icsb_cnts_t *cntp; 1512 xfs_mount_t *mp; 1513 1514 mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier); 1515 cntp = (xfs_icsb_cnts_t *) 1516 per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu); 1517 switch (action) { 1518 case CPU_UP_PREPARE: 1519 case CPU_UP_PREPARE_FROZEN: 1520 /* Easy Case - initialize the area and locks, and 1521 * then rebalance when online does everything else for us. */ 1522 memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); 1523 break; 1524 case CPU_ONLINE: 1525 case CPU_ONLINE_FROZEN: 1526 xfs_icsb_lock(mp); 1527 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); 1528 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); 1529 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); 1530 xfs_icsb_unlock(mp); 1531 break; 1532 case CPU_DEAD: 1533 case CPU_DEAD_FROZEN: 1534 /* Disable all the counters, then fold the dead cpu's 1535 * count into the total on the global superblock and 1536 * re-enable the counters. */ 1537 xfs_icsb_lock(mp); 1538 spin_lock(&mp->m_sb_lock); 1539 xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT); 1540 xfs_icsb_disable_counter(mp, XFS_SBS_IFREE); 1541 xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS); 1542 1543 mp->m_sb.sb_icount += cntp->icsb_icount; 1544 mp->m_sb.sb_ifree += cntp->icsb_ifree; 1545 mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks; 1546 1547 memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); 1548 1549 xfs_icsb_balance_counter_locked(mp, XFS_SBS_ICOUNT, 0); 1550 xfs_icsb_balance_counter_locked(mp, XFS_SBS_IFREE, 0); 1551 xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0); 1552 spin_unlock(&mp->m_sb_lock); 1553 xfs_icsb_unlock(mp); 1554 break; 1555 } 1556 1557 return NOTIFY_OK; 1558 } 1559 #endif /* CONFIG_HOTPLUG_CPU */ 1560 1561 int 1562 xfs_icsb_init_counters( 1563 xfs_mount_t *mp) 1564 { 1565 xfs_icsb_cnts_t *cntp; 1566 int i; 1567 1568 mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t); 1569 if (mp->m_sb_cnts == NULL) 1570 return -ENOMEM; 1571 1572 for_each_online_cpu(i) { 1573 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); 1574 memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); 1575 } 1576 1577 mutex_init(&mp->m_icsb_mutex); 1578 1579 /* 1580 * start with all counters disabled so that the 1581 * initial balance kicks us off correctly 1582 */ 1583 mp->m_icsb_counters = -1; 1584 1585 #ifdef CONFIG_HOTPLUG_CPU 1586 mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify; 1587 mp->m_icsb_notifier.priority = 0; 1588 register_hotcpu_notifier(&mp->m_icsb_notifier); 1589 #endif /* CONFIG_HOTPLUG_CPU */ 1590 1591 return 0; 1592 } 1593 1594 void 1595 xfs_icsb_reinit_counters( 1596 xfs_mount_t *mp) 1597 { 1598 xfs_icsb_lock(mp); 1599 /* 1600 * start with all counters disabled so that the 1601 * initial balance kicks us off correctly 1602 */ 1603 mp->m_icsb_counters = -1; 1604 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); 1605 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); 1606 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); 1607 xfs_icsb_unlock(mp); 1608 } 1609 1610 void 1611 xfs_icsb_destroy_counters( 1612 xfs_mount_t *mp) 1613 { 1614 if (mp->m_sb_cnts) { 1615 unregister_hotcpu_notifier(&mp->m_icsb_notifier); 1616 free_percpu(mp->m_sb_cnts); 1617 } 1618 mutex_destroy(&mp->m_icsb_mutex); 1619 } 1620 1621 STATIC void 1622 xfs_icsb_lock_cntr( 1623 xfs_icsb_cnts_t *icsbp) 1624 { 1625 while (test_and_set_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags)) { 1626 ndelay(1000); 1627 } 1628 } 1629 1630 STATIC void 1631 xfs_icsb_unlock_cntr( 1632 xfs_icsb_cnts_t *icsbp) 1633 { 1634 clear_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags); 1635 } 1636 1637 1638 STATIC void 1639 xfs_icsb_lock_all_counters( 1640 xfs_mount_t *mp) 1641 { 1642 xfs_icsb_cnts_t *cntp; 1643 int i; 1644 1645 for_each_online_cpu(i) { 1646 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); 1647 xfs_icsb_lock_cntr(cntp); 1648 } 1649 } 1650 1651 STATIC void 1652 xfs_icsb_unlock_all_counters( 1653 xfs_mount_t *mp) 1654 { 1655 xfs_icsb_cnts_t *cntp; 1656 int i; 1657 1658 for_each_online_cpu(i) { 1659 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); 1660 xfs_icsb_unlock_cntr(cntp); 1661 } 1662 } 1663 1664 STATIC void 1665 xfs_icsb_count( 1666 xfs_mount_t *mp, 1667 xfs_icsb_cnts_t *cnt, 1668 int flags) 1669 { 1670 xfs_icsb_cnts_t *cntp; 1671 int i; 1672 1673 memset(cnt, 0, sizeof(xfs_icsb_cnts_t)); 1674 1675 if (!(flags & XFS_ICSB_LAZY_COUNT)) 1676 xfs_icsb_lock_all_counters(mp); 1677 1678 for_each_online_cpu(i) { 1679 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); 1680 cnt->icsb_icount += cntp->icsb_icount; 1681 cnt->icsb_ifree += cntp->icsb_ifree; 1682 cnt->icsb_fdblocks += cntp->icsb_fdblocks; 1683 } 1684 1685 if (!(flags & XFS_ICSB_LAZY_COUNT)) 1686 xfs_icsb_unlock_all_counters(mp); 1687 } 1688 1689 STATIC int 1690 xfs_icsb_counter_disabled( 1691 xfs_mount_t *mp, 1692 xfs_sb_field_t field) 1693 { 1694 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); 1695 return test_bit(field, &mp->m_icsb_counters); 1696 } 1697 1698 STATIC void 1699 xfs_icsb_disable_counter( 1700 xfs_mount_t *mp, 1701 xfs_sb_field_t field) 1702 { 1703 xfs_icsb_cnts_t cnt; 1704 1705 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); 1706 1707 /* 1708 * If we are already disabled, then there is nothing to do 1709 * here. We check before locking all the counters to avoid 1710 * the expensive lock operation when being called in the 1711 * slow path and the counter is already disabled. This is 1712 * safe because the only time we set or clear this state is under 1713 * the m_icsb_mutex. 1714 */ 1715 if (xfs_icsb_counter_disabled(mp, field)) 1716 return; 1717 1718 xfs_icsb_lock_all_counters(mp); 1719 if (!test_and_set_bit(field, &mp->m_icsb_counters)) { 1720 /* drain back to superblock */ 1721 1722 xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT); 1723 switch(field) { 1724 case XFS_SBS_ICOUNT: 1725 mp->m_sb.sb_icount = cnt.icsb_icount; 1726 break; 1727 case XFS_SBS_IFREE: 1728 mp->m_sb.sb_ifree = cnt.icsb_ifree; 1729 break; 1730 case XFS_SBS_FDBLOCKS: 1731 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; 1732 break; 1733 default: 1734 BUG(); 1735 } 1736 } 1737 1738 xfs_icsb_unlock_all_counters(mp); 1739 } 1740 1741 STATIC void 1742 xfs_icsb_enable_counter( 1743 xfs_mount_t *mp, 1744 xfs_sb_field_t field, 1745 uint64_t count, 1746 uint64_t resid) 1747 { 1748 xfs_icsb_cnts_t *cntp; 1749 int i; 1750 1751 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); 1752 1753 xfs_icsb_lock_all_counters(mp); 1754 for_each_online_cpu(i) { 1755 cntp = per_cpu_ptr(mp->m_sb_cnts, i); 1756 switch (field) { 1757 case XFS_SBS_ICOUNT: 1758 cntp->icsb_icount = count + resid; 1759 break; 1760 case XFS_SBS_IFREE: 1761 cntp->icsb_ifree = count + resid; 1762 break; 1763 case XFS_SBS_FDBLOCKS: 1764 cntp->icsb_fdblocks = count + resid; 1765 break; 1766 default: 1767 BUG(); 1768 break; 1769 } 1770 resid = 0; 1771 } 1772 clear_bit(field, &mp->m_icsb_counters); 1773 xfs_icsb_unlock_all_counters(mp); 1774 } 1775 1776 void 1777 xfs_icsb_sync_counters_locked( 1778 xfs_mount_t *mp, 1779 int flags) 1780 { 1781 xfs_icsb_cnts_t cnt; 1782 1783 xfs_icsb_count(mp, &cnt, flags); 1784 1785 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT)) 1786 mp->m_sb.sb_icount = cnt.icsb_icount; 1787 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE)) 1788 mp->m_sb.sb_ifree = cnt.icsb_ifree; 1789 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS)) 1790 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; 1791 } 1792 1793 /* 1794 * Accurate update of per-cpu counters to incore superblock 1795 */ 1796 void 1797 xfs_icsb_sync_counters( 1798 xfs_mount_t *mp, 1799 int flags) 1800 { 1801 spin_lock(&mp->m_sb_lock); 1802 xfs_icsb_sync_counters_locked(mp, flags); 1803 spin_unlock(&mp->m_sb_lock); 1804 } 1805 1806 /* 1807 * Balance and enable/disable counters as necessary. 1808 * 1809 * Thresholds for re-enabling counters are somewhat magic. inode counts are 1810 * chosen to be the same number as single on disk allocation chunk per CPU, and 1811 * free blocks is something far enough zero that we aren't going thrash when we 1812 * get near ENOSPC. We also need to supply a minimum we require per cpu to 1813 * prevent looping endlessly when xfs_alloc_space asks for more than will 1814 * be distributed to a single CPU but each CPU has enough blocks to be 1815 * reenabled. 1816 * 1817 * Note that we can be called when counters are already disabled. 1818 * xfs_icsb_disable_counter() optimises the counter locking in this case to 1819 * prevent locking every per-cpu counter needlessly. 1820 */ 1821 1822 #define XFS_ICSB_INO_CNTR_REENABLE (uint64_t)64 1823 #define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \ 1824 (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp)) 1825 STATIC void 1826 xfs_icsb_balance_counter_locked( 1827 xfs_mount_t *mp, 1828 xfs_sb_field_t field, 1829 int min_per_cpu) 1830 { 1831 uint64_t count, resid; 1832 int weight = num_online_cpus(); 1833 uint64_t min = (uint64_t)min_per_cpu; 1834 1835 /* disable counter and sync counter */ 1836 xfs_icsb_disable_counter(mp, field); 1837 1838 /* update counters - first CPU gets residual*/ 1839 switch (field) { 1840 case XFS_SBS_ICOUNT: 1841 count = mp->m_sb.sb_icount; 1842 resid = do_div(count, weight); 1843 if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE)) 1844 return; 1845 break; 1846 case XFS_SBS_IFREE: 1847 count = mp->m_sb.sb_ifree; 1848 resid = do_div(count, weight); 1849 if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE)) 1850 return; 1851 break; 1852 case XFS_SBS_FDBLOCKS: 1853 count = mp->m_sb.sb_fdblocks; 1854 resid = do_div(count, weight); 1855 if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp))) 1856 return; 1857 break; 1858 default: 1859 BUG(); 1860 count = resid = 0; /* quiet, gcc */ 1861 break; 1862 } 1863 1864 xfs_icsb_enable_counter(mp, field, count, resid); 1865 } 1866 1867 STATIC void 1868 xfs_icsb_balance_counter( 1869 xfs_mount_t *mp, 1870 xfs_sb_field_t fields, 1871 int min_per_cpu) 1872 { 1873 spin_lock(&mp->m_sb_lock); 1874 xfs_icsb_balance_counter_locked(mp, fields, min_per_cpu); 1875 spin_unlock(&mp->m_sb_lock); 1876 } 1877 1878 int 1879 xfs_icsb_modify_counters( 1880 xfs_mount_t *mp, 1881 xfs_sb_field_t field, 1882 int64_t delta, 1883 int rsvd) 1884 { 1885 xfs_icsb_cnts_t *icsbp; 1886 long long lcounter; /* long counter for 64 bit fields */ 1887 int ret = 0; 1888 1889 might_sleep(); 1890 again: 1891 preempt_disable(); 1892 icsbp = this_cpu_ptr(mp->m_sb_cnts); 1893 1894 /* 1895 * if the counter is disabled, go to slow path 1896 */ 1897 if (unlikely(xfs_icsb_counter_disabled(mp, field))) 1898 goto slow_path; 1899 xfs_icsb_lock_cntr(icsbp); 1900 if (unlikely(xfs_icsb_counter_disabled(mp, field))) { 1901 xfs_icsb_unlock_cntr(icsbp); 1902 goto slow_path; 1903 } 1904 1905 switch (field) { 1906 case XFS_SBS_ICOUNT: 1907 lcounter = icsbp->icsb_icount; 1908 lcounter += delta; 1909 if (unlikely(lcounter < 0)) 1910 goto balance_counter; 1911 icsbp->icsb_icount = lcounter; 1912 break; 1913 1914 case XFS_SBS_IFREE: 1915 lcounter = icsbp->icsb_ifree; 1916 lcounter += delta; 1917 if (unlikely(lcounter < 0)) 1918 goto balance_counter; 1919 icsbp->icsb_ifree = lcounter; 1920 break; 1921 1922 case XFS_SBS_FDBLOCKS: 1923 BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0); 1924 1925 lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 1926 lcounter += delta; 1927 if (unlikely(lcounter < 0)) 1928 goto balance_counter; 1929 icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); 1930 break; 1931 default: 1932 BUG(); 1933 break; 1934 } 1935 xfs_icsb_unlock_cntr(icsbp); 1936 preempt_enable(); 1937 return 0; 1938 1939 slow_path: 1940 preempt_enable(); 1941 1942 /* 1943 * serialise with a mutex so we don't burn lots of cpu on 1944 * the superblock lock. We still need to hold the superblock 1945 * lock, however, when we modify the global structures. 1946 */ 1947 xfs_icsb_lock(mp); 1948 1949 /* 1950 * Now running atomically. 1951 * 1952 * If the counter is enabled, someone has beaten us to rebalancing. 1953 * Drop the lock and try again in the fast path.... 1954 */ 1955 if (!(xfs_icsb_counter_disabled(mp, field))) { 1956 xfs_icsb_unlock(mp); 1957 goto again; 1958 } 1959 1960 /* 1961 * The counter is currently disabled. Because we are 1962 * running atomically here, we know a rebalance cannot 1963 * be in progress. Hence we can go straight to operating 1964 * on the global superblock. We do not call xfs_mod_incore_sb() 1965 * here even though we need to get the m_sb_lock. Doing so 1966 * will cause us to re-enter this function and deadlock. 1967 * Hence we get the m_sb_lock ourselves and then call 1968 * xfs_mod_incore_sb_unlocked() as the unlocked path operates 1969 * directly on the global counters. 1970 */ 1971 spin_lock(&mp->m_sb_lock); 1972 ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); 1973 spin_unlock(&mp->m_sb_lock); 1974 1975 /* 1976 * Now that we've modified the global superblock, we 1977 * may be able to re-enable the distributed counters 1978 * (e.g. lots of space just got freed). After that 1979 * we are done. 1980 */ 1981 if (ret != ENOSPC) 1982 xfs_icsb_balance_counter(mp, field, 0); 1983 xfs_icsb_unlock(mp); 1984 return ret; 1985 1986 balance_counter: 1987 xfs_icsb_unlock_cntr(icsbp); 1988 preempt_enable(); 1989 1990 /* 1991 * We may have multiple threads here if multiple per-cpu 1992 * counters run dry at the same time. This will mean we can 1993 * do more balances than strictly necessary but it is not 1994 * the common slowpath case. 1995 */ 1996 xfs_icsb_lock(mp); 1997 1998 /* 1999 * running atomically. 2000 * 2001 * This will leave the counter in the correct state for future 2002 * accesses. After the rebalance, we simply try again and our retry 2003 * will either succeed through the fast path or slow path without 2004 * another balance operation being required. 2005 */ 2006 xfs_icsb_balance_counter(mp, field, delta); 2007 xfs_icsb_unlock(mp); 2008 goto again; 2009 } 2010 2011 #endif 2012