1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_types.h" 21 #include "xfs_log.h" 22 #include "xfs_trans.h" 23 #include "xfs_sb.h" 24 #include "xfs_ag.h" 25 #include "xfs_mount.h" 26 #include "xfs_bmap_btree.h" 27 #include "xfs_alloc_btree.h" 28 #include "xfs_ialloc_btree.h" 29 #include "xfs_dinode.h" 30 #include "xfs_inode.h" 31 #include "xfs_inode_item.h" 32 #include "xfs_btree.h" 33 #include "xfs_error.h" 34 #include "xfs_alloc.h" 35 #include "xfs_ialloc.h" 36 #include "xfs_fsops.h" 37 #include "xfs_itable.h" 38 #include "xfs_trans_space.h" 39 #include "xfs_rtalloc.h" 40 #include "xfs_filestream.h" 41 #include "xfs_trace.h" 42 43 /* 44 * File system operations 45 */ 46 47 int 48 xfs_fs_geometry( 49 xfs_mount_t *mp, 50 xfs_fsop_geom_t *geo, 51 int new_version) 52 { 53 54 memset(geo, 0, sizeof(*geo)); 55 56 geo->blocksize = mp->m_sb.sb_blocksize; 57 geo->rtextsize = mp->m_sb.sb_rextsize; 58 geo->agblocks = mp->m_sb.sb_agblocks; 59 geo->agcount = mp->m_sb.sb_agcount; 60 geo->logblocks = mp->m_sb.sb_logblocks; 61 geo->sectsize = mp->m_sb.sb_sectsize; 62 geo->inodesize = mp->m_sb.sb_inodesize; 63 geo->imaxpct = mp->m_sb.sb_imax_pct; 64 geo->datablocks = mp->m_sb.sb_dblocks; 65 geo->rtblocks = mp->m_sb.sb_rblocks; 66 geo->rtextents = mp->m_sb.sb_rextents; 67 geo->logstart = mp->m_sb.sb_logstart; 68 ASSERT(sizeof(geo->uuid)==sizeof(mp->m_sb.sb_uuid)); 69 memcpy(geo->uuid, &mp->m_sb.sb_uuid, sizeof(mp->m_sb.sb_uuid)); 70 if (new_version >= 2) { 71 geo->sunit = mp->m_sb.sb_unit; 72 geo->swidth = mp->m_sb.sb_width; 73 } 74 if (new_version >= 3) { 75 geo->version = XFS_FSOP_GEOM_VERSION; 76 geo->flags = 77 (xfs_sb_version_hasattr(&mp->m_sb) ? 78 XFS_FSOP_GEOM_FLAGS_ATTR : 0) | 79 (xfs_sb_version_hasnlink(&mp->m_sb) ? 80 XFS_FSOP_GEOM_FLAGS_NLINK : 0) | 81 (xfs_sb_version_hasquota(&mp->m_sb) ? 82 XFS_FSOP_GEOM_FLAGS_QUOTA : 0) | 83 (xfs_sb_version_hasalign(&mp->m_sb) ? 84 XFS_FSOP_GEOM_FLAGS_IALIGN : 0) | 85 (xfs_sb_version_hasdalign(&mp->m_sb) ? 86 XFS_FSOP_GEOM_FLAGS_DALIGN : 0) | 87 (xfs_sb_version_hasshared(&mp->m_sb) ? 88 XFS_FSOP_GEOM_FLAGS_SHARED : 0) | 89 (xfs_sb_version_hasextflgbit(&mp->m_sb) ? 90 XFS_FSOP_GEOM_FLAGS_EXTFLG : 0) | 91 (xfs_sb_version_hasdirv2(&mp->m_sb) ? 92 XFS_FSOP_GEOM_FLAGS_DIRV2 : 0) | 93 (xfs_sb_version_hassector(&mp->m_sb) ? 94 XFS_FSOP_GEOM_FLAGS_SECTOR : 0) | 95 (xfs_sb_version_hasasciici(&mp->m_sb) ? 96 XFS_FSOP_GEOM_FLAGS_DIRV2CI : 0) | 97 (xfs_sb_version_haslazysbcount(&mp->m_sb) ? 98 XFS_FSOP_GEOM_FLAGS_LAZYSB : 0) | 99 (xfs_sb_version_hasattr2(&mp->m_sb) ? 100 XFS_FSOP_GEOM_FLAGS_ATTR2 : 0) | 101 (xfs_sb_version_hasprojid32bit(&mp->m_sb) ? 102 XFS_FSOP_GEOM_FLAGS_PROJID32 : 0); 103 geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ? 104 mp->m_sb.sb_logsectsize : BBSIZE; 105 geo->rtsectsize = mp->m_sb.sb_blocksize; 106 geo->dirblocksize = mp->m_dirblksize; 107 } 108 if (new_version >= 4) { 109 geo->flags |= 110 (xfs_sb_version_haslogv2(&mp->m_sb) ? 111 XFS_FSOP_GEOM_FLAGS_LOGV2 : 0); 112 geo->logsunit = mp->m_sb.sb_logsunit; 113 } 114 return 0; 115 } 116 117 static struct xfs_buf * 118 xfs_growfs_get_hdr_buf( 119 struct xfs_mount *mp, 120 xfs_daddr_t blkno, 121 size_t numblks, 122 int flags, 123 const struct xfs_buf_ops *ops) 124 { 125 struct xfs_buf *bp; 126 127 bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, flags); 128 if (!bp) 129 return NULL; 130 131 xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); 132 bp->b_bn = blkno; 133 bp->b_maps[0].bm_bn = blkno; 134 bp->b_ops = ops; 135 136 return bp; 137 } 138 139 static int 140 xfs_growfs_data_private( 141 xfs_mount_t *mp, /* mount point for filesystem */ 142 xfs_growfs_data_t *in) /* growfs data input struct */ 143 { 144 xfs_agf_t *agf; 145 struct xfs_agfl *agfl; 146 xfs_agi_t *agi; 147 xfs_agnumber_t agno; 148 xfs_extlen_t agsize; 149 xfs_extlen_t tmpsize; 150 xfs_alloc_rec_t *arec; 151 xfs_buf_t *bp; 152 int bucket; 153 int dpct; 154 int error; 155 xfs_agnumber_t nagcount; 156 xfs_agnumber_t nagimax = 0; 157 xfs_rfsblock_t nb, nb_mod; 158 xfs_rfsblock_t new; 159 xfs_rfsblock_t nfree; 160 xfs_agnumber_t oagcount; 161 int pct; 162 xfs_trans_t *tp; 163 164 nb = in->newblocks; 165 pct = in->imaxpct; 166 if (nb < mp->m_sb.sb_dblocks || pct < 0 || pct > 100) 167 return XFS_ERROR(EINVAL); 168 if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb))) 169 return error; 170 dpct = pct - mp->m_sb.sb_imax_pct; 171 bp = xfs_buf_read_uncached(mp->m_ddev_targp, 172 XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1), 173 XFS_FSS_TO_BB(mp, 1), 0, NULL); 174 if (!bp) 175 return EIO; 176 if (bp->b_error) { 177 int error = bp->b_error; 178 xfs_buf_relse(bp); 179 return error; 180 } 181 xfs_buf_relse(bp); 182 183 new = nb; /* use new as a temporary here */ 184 nb_mod = do_div(new, mp->m_sb.sb_agblocks); 185 nagcount = new + (nb_mod != 0); 186 if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) { 187 nagcount--; 188 nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks; 189 if (nb < mp->m_sb.sb_dblocks) 190 return XFS_ERROR(EINVAL); 191 } 192 new = nb - mp->m_sb.sb_dblocks; 193 oagcount = mp->m_sb.sb_agcount; 194 195 /* allocate the new per-ag structures */ 196 if (nagcount > oagcount) { 197 error = xfs_initialize_perag(mp, nagcount, &nagimax); 198 if (error) 199 return error; 200 } 201 202 tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS); 203 tp->t_flags |= XFS_TRANS_RESERVE; 204 if ((error = xfs_trans_reserve(tp, XFS_GROWFS_SPACE_RES(mp), 205 XFS_GROWDATA_LOG_RES(mp), 0, 0, 0))) { 206 xfs_trans_cancel(tp, 0); 207 return error; 208 } 209 210 /* 211 * Write new AG headers to disk. Non-transactional, but written 212 * synchronously so they are completed prior to the growfs transaction 213 * being logged. 214 */ 215 nfree = 0; 216 for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) { 217 /* 218 * AG freespace header block 219 */ 220 bp = xfs_growfs_get_hdr_buf(mp, 221 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)), 222 XFS_FSS_TO_BB(mp, 1), 0, 223 &xfs_agf_buf_ops); 224 if (!bp) { 225 error = ENOMEM; 226 goto error0; 227 } 228 229 agf = XFS_BUF_TO_AGF(bp); 230 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC); 231 agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION); 232 agf->agf_seqno = cpu_to_be32(agno); 233 if (agno == nagcount - 1) 234 agsize = 235 nb - 236 (agno * (xfs_rfsblock_t)mp->m_sb.sb_agblocks); 237 else 238 agsize = mp->m_sb.sb_agblocks; 239 agf->agf_length = cpu_to_be32(agsize); 240 agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp)); 241 agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp)); 242 agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1); 243 agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1); 244 agf->agf_flfirst = 0; 245 agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1); 246 agf->agf_flcount = 0; 247 tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp); 248 agf->agf_freeblks = cpu_to_be32(tmpsize); 249 agf->agf_longest = cpu_to_be32(tmpsize); 250 if (xfs_sb_version_hascrc(&mp->m_sb)) 251 uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_uuid); 252 253 error = xfs_bwrite(bp); 254 xfs_buf_relse(bp); 255 if (error) 256 goto error0; 257 258 /* 259 * AG freelist header block 260 */ 261 bp = xfs_growfs_get_hdr_buf(mp, 262 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)), 263 XFS_FSS_TO_BB(mp, 1), 0, 264 &xfs_agfl_buf_ops); 265 if (!bp) { 266 error = ENOMEM; 267 goto error0; 268 } 269 270 agfl = XFS_BUF_TO_AGFL(bp); 271 if (xfs_sb_version_hascrc(&mp->m_sb)) { 272 agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC); 273 agfl->agfl_seqno = cpu_to_be32(agno); 274 uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_uuid); 275 } 276 for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++) 277 agfl->agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); 278 279 error = xfs_bwrite(bp); 280 xfs_buf_relse(bp); 281 if (error) 282 goto error0; 283 284 /* 285 * AG inode header block 286 */ 287 bp = xfs_growfs_get_hdr_buf(mp, 288 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), 289 XFS_FSS_TO_BB(mp, 1), 0, 290 &xfs_agi_buf_ops); 291 if (!bp) { 292 error = ENOMEM; 293 goto error0; 294 } 295 296 agi = XFS_BUF_TO_AGI(bp); 297 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC); 298 agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION); 299 agi->agi_seqno = cpu_to_be32(agno); 300 agi->agi_length = cpu_to_be32(agsize); 301 agi->agi_count = 0; 302 agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp)); 303 agi->agi_level = cpu_to_be32(1); 304 agi->agi_freecount = 0; 305 agi->agi_newino = cpu_to_be32(NULLAGINO); 306 agi->agi_dirino = cpu_to_be32(NULLAGINO); 307 if (xfs_sb_version_hascrc(&mp->m_sb)) 308 uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_uuid); 309 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) 310 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); 311 312 error = xfs_bwrite(bp); 313 xfs_buf_relse(bp); 314 if (error) 315 goto error0; 316 317 /* 318 * BNO btree root block 319 */ 320 bp = xfs_growfs_get_hdr_buf(mp, 321 XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)), 322 BTOBB(mp->m_sb.sb_blocksize), 0, 323 &xfs_allocbt_buf_ops); 324 325 if (!bp) { 326 error = ENOMEM; 327 goto error0; 328 } 329 330 if (xfs_sb_version_hascrc(&mp->m_sb)) 331 xfs_btree_init_block(mp, bp, XFS_ABTB_CRC_MAGIC, 0, 1, 332 agno, XFS_BTREE_CRC_BLOCKS); 333 else 334 xfs_btree_init_block(mp, bp, XFS_ABTB_MAGIC, 0, 1, 335 agno, 0); 336 337 arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1); 338 arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp)); 339 arec->ar_blockcount = cpu_to_be32( 340 agsize - be32_to_cpu(arec->ar_startblock)); 341 342 error = xfs_bwrite(bp); 343 xfs_buf_relse(bp); 344 if (error) 345 goto error0; 346 347 /* 348 * CNT btree root block 349 */ 350 bp = xfs_growfs_get_hdr_buf(mp, 351 XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)), 352 BTOBB(mp->m_sb.sb_blocksize), 0, 353 &xfs_allocbt_buf_ops); 354 if (!bp) { 355 error = ENOMEM; 356 goto error0; 357 } 358 359 if (xfs_sb_version_hascrc(&mp->m_sb)) 360 xfs_btree_init_block(mp, bp, XFS_ABTC_CRC_MAGIC, 0, 1, 361 agno, XFS_BTREE_CRC_BLOCKS); 362 else 363 xfs_btree_init_block(mp, bp, XFS_ABTC_MAGIC, 0, 1, 364 agno, 0); 365 366 arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1); 367 arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp)); 368 arec->ar_blockcount = cpu_to_be32( 369 agsize - be32_to_cpu(arec->ar_startblock)); 370 nfree += be32_to_cpu(arec->ar_blockcount); 371 372 error = xfs_bwrite(bp); 373 xfs_buf_relse(bp); 374 if (error) 375 goto error0; 376 377 /* 378 * INO btree root block 379 */ 380 bp = xfs_growfs_get_hdr_buf(mp, 381 XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)), 382 BTOBB(mp->m_sb.sb_blocksize), 0, 383 &xfs_inobt_buf_ops); 384 if (!bp) { 385 error = ENOMEM; 386 goto error0; 387 } 388 389 if (xfs_sb_version_hascrc(&mp->m_sb)) 390 xfs_btree_init_block(mp, bp, XFS_IBT_CRC_MAGIC, 0, 0, 391 agno, XFS_BTREE_CRC_BLOCKS); 392 else 393 xfs_btree_init_block(mp, bp, XFS_IBT_MAGIC, 0, 0, 394 agno, 0); 395 396 error = xfs_bwrite(bp); 397 xfs_buf_relse(bp); 398 if (error) 399 goto error0; 400 } 401 xfs_trans_agblocks_delta(tp, nfree); 402 /* 403 * There are new blocks in the old last a.g. 404 */ 405 if (new) { 406 /* 407 * Change the agi length. 408 */ 409 error = xfs_ialloc_read_agi(mp, tp, agno, &bp); 410 if (error) { 411 goto error0; 412 } 413 ASSERT(bp); 414 agi = XFS_BUF_TO_AGI(bp); 415 be32_add_cpu(&agi->agi_length, new); 416 ASSERT(nagcount == oagcount || 417 be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks); 418 xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH); 419 /* 420 * Change agf length. 421 */ 422 error = xfs_alloc_read_agf(mp, tp, agno, 0, &bp); 423 if (error) { 424 goto error0; 425 } 426 ASSERT(bp); 427 agf = XFS_BUF_TO_AGF(bp); 428 be32_add_cpu(&agf->agf_length, new); 429 ASSERT(be32_to_cpu(agf->agf_length) == 430 be32_to_cpu(agi->agi_length)); 431 432 xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH); 433 /* 434 * Free the new space. 435 */ 436 error = xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, agno, 437 be32_to_cpu(agf->agf_length) - new), new); 438 if (error) { 439 goto error0; 440 } 441 } 442 443 /* 444 * Update changed superblock fields transactionally. These are not 445 * seen by the rest of the world until the transaction commit applies 446 * them atomically to the superblock. 447 */ 448 if (nagcount > oagcount) 449 xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount); 450 if (nb > mp->m_sb.sb_dblocks) 451 xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS, 452 nb - mp->m_sb.sb_dblocks); 453 if (nfree) 454 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, nfree); 455 if (dpct) 456 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct); 457 error = xfs_trans_commit(tp, 0); 458 if (error) 459 return error; 460 461 /* New allocation groups fully initialized, so update mount struct */ 462 if (nagimax) 463 mp->m_maxagi = nagimax; 464 if (mp->m_sb.sb_imax_pct) { 465 __uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct; 466 do_div(icount, 100); 467 mp->m_maxicount = icount << mp->m_sb.sb_inopblog; 468 } else 469 mp->m_maxicount = 0; 470 xfs_set_low_space_thresholds(mp); 471 472 /* update secondary superblocks. */ 473 for (agno = 1; agno < nagcount; agno++) { 474 error = 0; 475 /* 476 * new secondary superblocks need to be zeroed, not read from 477 * disk as the contents of the new area we are growing into is 478 * completely unknown. 479 */ 480 if (agno < oagcount) { 481 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, 482 XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)), 483 XFS_FSS_TO_BB(mp, 1), 0, &bp, 484 &xfs_sb_buf_ops); 485 } else { 486 bp = xfs_trans_get_buf(NULL, mp->m_ddev_targp, 487 XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)), 488 XFS_FSS_TO_BB(mp, 1), 0); 489 if (bp) { 490 bp->b_ops = &xfs_sb_buf_ops; 491 xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); 492 } else 493 error = ENOMEM; 494 } 495 496 if (error) { 497 xfs_warn(mp, 498 "error %d reading secondary superblock for ag %d", 499 error, agno); 500 break; 501 } 502 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, XFS_SB_ALL_BITS); 503 504 /* 505 * If we get an error writing out the alternate superblocks, 506 * just issue a warning and continue. The real work is 507 * already done and committed. 508 */ 509 error = xfs_bwrite(bp); 510 xfs_buf_relse(bp); 511 if (error) { 512 xfs_warn(mp, 513 "write error %d updating secondary superblock for ag %d", 514 error, agno); 515 break; /* no point in continuing */ 516 } 517 } 518 return error; 519 520 error0: 521 xfs_trans_cancel(tp, XFS_TRANS_ABORT); 522 return error; 523 } 524 525 static int 526 xfs_growfs_log_private( 527 xfs_mount_t *mp, /* mount point for filesystem */ 528 xfs_growfs_log_t *in) /* growfs log input struct */ 529 { 530 xfs_extlen_t nb; 531 532 nb = in->newblocks; 533 if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES)) 534 return XFS_ERROR(EINVAL); 535 if (nb == mp->m_sb.sb_logblocks && 536 in->isint == (mp->m_sb.sb_logstart != 0)) 537 return XFS_ERROR(EINVAL); 538 /* 539 * Moving the log is hard, need new interfaces to sync 540 * the log first, hold off all activity while moving it. 541 * Can have shorter or longer log in the same space, 542 * or transform internal to external log or vice versa. 543 */ 544 return XFS_ERROR(ENOSYS); 545 } 546 547 /* 548 * protected versions of growfs function acquire and release locks on the mount 549 * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG, 550 * XFS_IOC_FSGROWFSRT 551 */ 552 553 554 int 555 xfs_growfs_data( 556 xfs_mount_t *mp, 557 xfs_growfs_data_t *in) 558 { 559 int error; 560 561 if (!capable(CAP_SYS_ADMIN)) 562 return XFS_ERROR(EPERM); 563 if (!mutex_trylock(&mp->m_growlock)) 564 return XFS_ERROR(EWOULDBLOCK); 565 error = xfs_growfs_data_private(mp, in); 566 mutex_unlock(&mp->m_growlock); 567 return error; 568 } 569 570 int 571 xfs_growfs_log( 572 xfs_mount_t *mp, 573 xfs_growfs_log_t *in) 574 { 575 int error; 576 577 if (!capable(CAP_SYS_ADMIN)) 578 return XFS_ERROR(EPERM); 579 if (!mutex_trylock(&mp->m_growlock)) 580 return XFS_ERROR(EWOULDBLOCK); 581 error = xfs_growfs_log_private(mp, in); 582 mutex_unlock(&mp->m_growlock); 583 return error; 584 } 585 586 /* 587 * exported through ioctl XFS_IOC_FSCOUNTS 588 */ 589 590 int 591 xfs_fs_counts( 592 xfs_mount_t *mp, 593 xfs_fsop_counts_t *cnt) 594 { 595 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); 596 spin_lock(&mp->m_sb_lock); 597 cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 598 cnt->freertx = mp->m_sb.sb_frextents; 599 cnt->freeino = mp->m_sb.sb_ifree; 600 cnt->allocino = mp->m_sb.sb_icount; 601 spin_unlock(&mp->m_sb_lock); 602 return 0; 603 } 604 605 /* 606 * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS 607 * 608 * xfs_reserve_blocks is called to set m_resblks 609 * in the in-core mount table. The number of unused reserved blocks 610 * is kept in m_resblks_avail. 611 * 612 * Reserve the requested number of blocks if available. Otherwise return 613 * as many as possible to satisfy the request. The actual number 614 * reserved are returned in outval 615 * 616 * A null inval pointer indicates that only the current reserved blocks 617 * available should be returned no settings are changed. 618 */ 619 620 int 621 xfs_reserve_blocks( 622 xfs_mount_t *mp, 623 __uint64_t *inval, 624 xfs_fsop_resblks_t *outval) 625 { 626 __int64_t lcounter, delta, fdblks_delta; 627 __uint64_t request; 628 629 /* If inval is null, report current values and return */ 630 if (inval == (__uint64_t *)NULL) { 631 if (!outval) 632 return EINVAL; 633 outval->resblks = mp->m_resblks; 634 outval->resblks_avail = mp->m_resblks_avail; 635 return 0; 636 } 637 638 request = *inval; 639 640 /* 641 * With per-cpu counters, this becomes an interesting 642 * problem. we needto work out if we are freeing or allocation 643 * blocks first, then we can do the modification as necessary. 644 * 645 * We do this under the m_sb_lock so that if we are near 646 * ENOSPC, we will hold out any changes while we work out 647 * what to do. This means that the amount of free space can 648 * change while we do this, so we need to retry if we end up 649 * trying to reserve more space than is available. 650 * 651 * We also use the xfs_mod_incore_sb() interface so that we 652 * don't have to care about whether per cpu counter are 653 * enabled, disabled or even compiled in.... 654 */ 655 retry: 656 spin_lock(&mp->m_sb_lock); 657 xfs_icsb_sync_counters_locked(mp, 0); 658 659 /* 660 * If our previous reservation was larger than the current value, 661 * then move any unused blocks back to the free pool. 662 */ 663 fdblks_delta = 0; 664 if (mp->m_resblks > request) { 665 lcounter = mp->m_resblks_avail - request; 666 if (lcounter > 0) { /* release unused blocks */ 667 fdblks_delta = lcounter; 668 mp->m_resblks_avail -= lcounter; 669 } 670 mp->m_resblks = request; 671 } else { 672 __int64_t free; 673 674 free = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 675 if (!free) 676 goto out; /* ENOSPC and fdblks_delta = 0 */ 677 678 delta = request - mp->m_resblks; 679 lcounter = free - delta; 680 if (lcounter < 0) { 681 /* We can't satisfy the request, just get what we can */ 682 mp->m_resblks += free; 683 mp->m_resblks_avail += free; 684 fdblks_delta = -free; 685 } else { 686 fdblks_delta = -delta; 687 mp->m_resblks = request; 688 mp->m_resblks_avail += delta; 689 } 690 } 691 out: 692 if (outval) { 693 outval->resblks = mp->m_resblks; 694 outval->resblks_avail = mp->m_resblks_avail; 695 } 696 spin_unlock(&mp->m_sb_lock); 697 698 if (fdblks_delta) { 699 /* 700 * If we are putting blocks back here, m_resblks_avail is 701 * already at its max so this will put it in the free pool. 702 * 703 * If we need space, we'll either succeed in getting it 704 * from the free block count or we'll get an enospc. If 705 * we get a ENOSPC, it means things changed while we were 706 * calculating fdblks_delta and so we should try again to 707 * see if there is anything left to reserve. 708 * 709 * Don't set the reserved flag here - we don't want to reserve 710 * the extra reserve blocks from the reserve..... 711 */ 712 int error; 713 error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, 714 fdblks_delta, 0); 715 if (error == ENOSPC) 716 goto retry; 717 } 718 return 0; 719 } 720 721 /* 722 * Dump a transaction into the log that contains no real change. This is needed 723 * to be able to make the log dirty or stamp the current tail LSN into the log 724 * during the covering operation. 725 * 726 * We cannot use an inode here for this - that will push dirty state back up 727 * into the VFS and then periodic inode flushing will prevent log covering from 728 * making progress. Hence we log a field in the superblock instead and use a 729 * synchronous transaction to ensure the superblock is immediately unpinned 730 * and can be written back. 731 */ 732 int 733 xfs_fs_log_dummy( 734 xfs_mount_t *mp) 735 { 736 xfs_trans_t *tp; 737 int error; 738 739 tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP); 740 error = xfs_trans_reserve(tp, 0, XFS_SB_LOG_RES(mp), 0, 0, 741 XFS_DEFAULT_LOG_COUNT); 742 if (error) { 743 xfs_trans_cancel(tp, 0); 744 return error; 745 } 746 747 /* log the UUID because it is an unchanging field */ 748 xfs_mod_sb(tp, XFS_SB_UUID); 749 xfs_trans_set_sync(tp); 750 return xfs_trans_commit(tp, 0); 751 } 752 753 int 754 xfs_fs_goingdown( 755 xfs_mount_t *mp, 756 __uint32_t inflags) 757 { 758 switch (inflags) { 759 case XFS_FSOP_GOING_FLAGS_DEFAULT: { 760 struct super_block *sb = freeze_bdev(mp->m_super->s_bdev); 761 762 if (sb && !IS_ERR(sb)) { 763 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT); 764 thaw_bdev(sb->s_bdev, sb); 765 } 766 767 break; 768 } 769 case XFS_FSOP_GOING_FLAGS_LOGFLUSH: 770 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT); 771 break; 772 case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH: 773 xfs_force_shutdown(mp, 774 SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR); 775 break; 776 default: 777 return XFS_ERROR(EINVAL); 778 } 779 780 return 0; 781 } 782 783 /* 784 * Force a shutdown of the filesystem instantly while keeping the filesystem 785 * consistent. We don't do an unmount here; just shutdown the shop, make sure 786 * that absolutely nothing persistent happens to this filesystem after this 787 * point. 788 */ 789 void 790 xfs_do_force_shutdown( 791 xfs_mount_t *mp, 792 int flags, 793 char *fname, 794 int lnnum) 795 { 796 int logerror; 797 798 logerror = flags & SHUTDOWN_LOG_IO_ERROR; 799 800 if (!(flags & SHUTDOWN_FORCE_UMOUNT)) { 801 xfs_notice(mp, 802 "%s(0x%x) called from line %d of file %s. Return address = 0x%p", 803 __func__, flags, lnnum, fname, __return_address); 804 } 805 /* 806 * No need to duplicate efforts. 807 */ 808 if (XFS_FORCED_SHUTDOWN(mp) && !logerror) 809 return; 810 811 /* 812 * This flags XFS_MOUNT_FS_SHUTDOWN, makes sure that we don't 813 * queue up anybody new on the log reservations, and wakes up 814 * everybody who's sleeping on log reservations to tell them 815 * the bad news. 816 */ 817 if (xfs_log_force_umount(mp, logerror)) 818 return; 819 820 if (flags & SHUTDOWN_CORRUPT_INCORE) { 821 xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT, 822 "Corruption of in-memory data detected. Shutting down filesystem"); 823 if (XFS_ERRLEVEL_HIGH <= xfs_error_level) 824 xfs_stack_trace(); 825 } else if (!(flags & SHUTDOWN_FORCE_UMOUNT)) { 826 if (logerror) { 827 xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR, 828 "Log I/O Error Detected. Shutting down filesystem"); 829 } else if (flags & SHUTDOWN_DEVICE_REQ) { 830 xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR, 831 "All device paths lost. Shutting down filesystem"); 832 } else if (!(flags & SHUTDOWN_REMOTE_REQ)) { 833 xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR, 834 "I/O Error Detected. Shutting down filesystem"); 835 } 836 } 837 if (!(flags & SHUTDOWN_FORCE_UMOUNT)) { 838 xfs_alert(mp, 839 "Please umount the filesystem and rectify the problem(s)"); 840 } 841 } 842