1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_types.h" 21 #include "xfs_log.h" 22 #include "xfs_trans.h" 23 #include "xfs_sb.h" 24 #include "xfs_ag.h" 25 #include "xfs_mount.h" 26 #include "xfs_bmap_btree.h" 27 #include "xfs_alloc_btree.h" 28 #include "xfs_ialloc_btree.h" 29 #include "xfs_dinode.h" 30 #include "xfs_inode.h" 31 #include "xfs_inode_item.h" 32 #include "xfs_btree.h" 33 #include "xfs_error.h" 34 #include "xfs_alloc.h" 35 #include "xfs_ialloc.h" 36 #include "xfs_fsops.h" 37 #include "xfs_itable.h" 38 #include "xfs_trans_space.h" 39 #include "xfs_rtalloc.h" 40 #include "xfs_filestream.h" 41 #include "xfs_trace.h" 42 43 /* 44 * File system operations 45 */ 46 47 int 48 xfs_fs_geometry( 49 xfs_mount_t *mp, 50 xfs_fsop_geom_t *geo, 51 int new_version) 52 { 53 54 memset(geo, 0, sizeof(*geo)); 55 56 geo->blocksize = mp->m_sb.sb_blocksize; 57 geo->rtextsize = mp->m_sb.sb_rextsize; 58 geo->agblocks = mp->m_sb.sb_agblocks; 59 geo->agcount = mp->m_sb.sb_agcount; 60 geo->logblocks = mp->m_sb.sb_logblocks; 61 geo->sectsize = mp->m_sb.sb_sectsize; 62 geo->inodesize = mp->m_sb.sb_inodesize; 63 geo->imaxpct = mp->m_sb.sb_imax_pct; 64 geo->datablocks = mp->m_sb.sb_dblocks; 65 geo->rtblocks = mp->m_sb.sb_rblocks; 66 geo->rtextents = mp->m_sb.sb_rextents; 67 geo->logstart = mp->m_sb.sb_logstart; 68 ASSERT(sizeof(geo->uuid)==sizeof(mp->m_sb.sb_uuid)); 69 memcpy(geo->uuid, &mp->m_sb.sb_uuid, sizeof(mp->m_sb.sb_uuid)); 70 if (new_version >= 2) { 71 geo->sunit = mp->m_sb.sb_unit; 72 geo->swidth = mp->m_sb.sb_width; 73 } 74 if (new_version >= 3) { 75 geo->version = XFS_FSOP_GEOM_VERSION; 76 geo->flags = 77 (xfs_sb_version_hasattr(&mp->m_sb) ? 78 XFS_FSOP_GEOM_FLAGS_ATTR : 0) | 79 (xfs_sb_version_hasnlink(&mp->m_sb) ? 80 XFS_FSOP_GEOM_FLAGS_NLINK : 0) | 81 (xfs_sb_version_hasquota(&mp->m_sb) ? 82 XFS_FSOP_GEOM_FLAGS_QUOTA : 0) | 83 (xfs_sb_version_hasalign(&mp->m_sb) ? 84 XFS_FSOP_GEOM_FLAGS_IALIGN : 0) | 85 (xfs_sb_version_hasdalign(&mp->m_sb) ? 86 XFS_FSOP_GEOM_FLAGS_DALIGN : 0) | 87 (xfs_sb_version_hasshared(&mp->m_sb) ? 88 XFS_FSOP_GEOM_FLAGS_SHARED : 0) | 89 (xfs_sb_version_hasextflgbit(&mp->m_sb) ? 90 XFS_FSOP_GEOM_FLAGS_EXTFLG : 0) | 91 (xfs_sb_version_hasdirv2(&mp->m_sb) ? 92 XFS_FSOP_GEOM_FLAGS_DIRV2 : 0) | 93 (xfs_sb_version_hassector(&mp->m_sb) ? 94 XFS_FSOP_GEOM_FLAGS_SECTOR : 0) | 95 (xfs_sb_version_hasasciici(&mp->m_sb) ? 96 XFS_FSOP_GEOM_FLAGS_DIRV2CI : 0) | 97 (xfs_sb_version_haslazysbcount(&mp->m_sb) ? 98 XFS_FSOP_GEOM_FLAGS_LAZYSB : 0) | 99 (xfs_sb_version_hasattr2(&mp->m_sb) ? 100 XFS_FSOP_GEOM_FLAGS_ATTR2 : 0) | 101 (xfs_sb_version_hasprojid32bit(&mp->m_sb) ? 102 XFS_FSOP_GEOM_FLAGS_PROJID32 : 0) | 103 (xfs_sb_version_hascrc(&mp->m_sb) ? 104 XFS_FSOP_GEOM_FLAGS_V5SB : 0); 105 geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ? 106 mp->m_sb.sb_logsectsize : BBSIZE; 107 geo->rtsectsize = mp->m_sb.sb_blocksize; 108 geo->dirblocksize = mp->m_dirblksize; 109 } 110 if (new_version >= 4) { 111 geo->flags |= 112 (xfs_sb_version_haslogv2(&mp->m_sb) ? 113 XFS_FSOP_GEOM_FLAGS_LOGV2 : 0); 114 geo->logsunit = mp->m_sb.sb_logsunit; 115 } 116 return 0; 117 } 118 119 static struct xfs_buf * 120 xfs_growfs_get_hdr_buf( 121 struct xfs_mount *mp, 122 xfs_daddr_t blkno, 123 size_t numblks, 124 int flags, 125 const struct xfs_buf_ops *ops) 126 { 127 struct xfs_buf *bp; 128 129 bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, flags); 130 if (!bp) 131 return NULL; 132 133 xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); 134 bp->b_bn = blkno; 135 bp->b_maps[0].bm_bn = blkno; 136 bp->b_ops = ops; 137 138 return bp; 139 } 140 141 static int 142 xfs_growfs_data_private( 143 xfs_mount_t *mp, /* mount point for filesystem */ 144 xfs_growfs_data_t *in) /* growfs data input struct */ 145 { 146 xfs_agf_t *agf; 147 struct xfs_agfl *agfl; 148 xfs_agi_t *agi; 149 xfs_agnumber_t agno; 150 xfs_extlen_t agsize; 151 xfs_extlen_t tmpsize; 152 xfs_alloc_rec_t *arec; 153 xfs_buf_t *bp; 154 int bucket; 155 int dpct; 156 int error; 157 xfs_agnumber_t nagcount; 158 xfs_agnumber_t nagimax = 0; 159 xfs_rfsblock_t nb, nb_mod; 160 xfs_rfsblock_t new; 161 xfs_rfsblock_t nfree; 162 xfs_agnumber_t oagcount; 163 int pct; 164 xfs_trans_t *tp; 165 166 nb = in->newblocks; 167 pct = in->imaxpct; 168 if (nb < mp->m_sb.sb_dblocks || pct < 0 || pct > 100) 169 return XFS_ERROR(EINVAL); 170 if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb))) 171 return error; 172 dpct = pct - mp->m_sb.sb_imax_pct; 173 bp = xfs_buf_read_uncached(mp->m_ddev_targp, 174 XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1), 175 XFS_FSS_TO_BB(mp, 1), 0, NULL); 176 if (!bp) 177 return EIO; 178 if (bp->b_error) { 179 error = bp->b_error; 180 xfs_buf_relse(bp); 181 return error; 182 } 183 xfs_buf_relse(bp); 184 185 new = nb; /* use new as a temporary here */ 186 nb_mod = do_div(new, mp->m_sb.sb_agblocks); 187 nagcount = new + (nb_mod != 0); 188 if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) { 189 nagcount--; 190 nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks; 191 if (nb < mp->m_sb.sb_dblocks) 192 return XFS_ERROR(EINVAL); 193 } 194 new = nb - mp->m_sb.sb_dblocks; 195 oagcount = mp->m_sb.sb_agcount; 196 197 /* allocate the new per-ag structures */ 198 if (nagcount > oagcount) { 199 error = xfs_initialize_perag(mp, nagcount, &nagimax); 200 if (error) 201 return error; 202 } 203 204 tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS); 205 tp->t_flags |= XFS_TRANS_RESERVE; 206 if ((error = xfs_trans_reserve(tp, XFS_GROWFS_SPACE_RES(mp), 207 XFS_GROWDATA_LOG_RES(mp), 0, 0, 0))) { 208 xfs_trans_cancel(tp, 0); 209 return error; 210 } 211 212 /* 213 * Write new AG headers to disk. Non-transactional, but written 214 * synchronously so they are completed prior to the growfs transaction 215 * being logged. 216 */ 217 nfree = 0; 218 for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) { 219 /* 220 * AG freespace header block 221 */ 222 bp = xfs_growfs_get_hdr_buf(mp, 223 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)), 224 XFS_FSS_TO_BB(mp, 1), 0, 225 &xfs_agf_buf_ops); 226 if (!bp) { 227 error = ENOMEM; 228 goto error0; 229 } 230 231 agf = XFS_BUF_TO_AGF(bp); 232 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC); 233 agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION); 234 agf->agf_seqno = cpu_to_be32(agno); 235 if (agno == nagcount - 1) 236 agsize = 237 nb - 238 (agno * (xfs_rfsblock_t)mp->m_sb.sb_agblocks); 239 else 240 agsize = mp->m_sb.sb_agblocks; 241 agf->agf_length = cpu_to_be32(agsize); 242 agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp)); 243 agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp)); 244 agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1); 245 agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1); 246 agf->agf_flfirst = 0; 247 agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1); 248 agf->agf_flcount = 0; 249 tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp); 250 agf->agf_freeblks = cpu_to_be32(tmpsize); 251 agf->agf_longest = cpu_to_be32(tmpsize); 252 if (xfs_sb_version_hascrc(&mp->m_sb)) 253 uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_uuid); 254 255 error = xfs_bwrite(bp); 256 xfs_buf_relse(bp); 257 if (error) 258 goto error0; 259 260 /* 261 * AG freelist header block 262 */ 263 bp = xfs_growfs_get_hdr_buf(mp, 264 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)), 265 XFS_FSS_TO_BB(mp, 1), 0, 266 &xfs_agfl_buf_ops); 267 if (!bp) { 268 error = ENOMEM; 269 goto error0; 270 } 271 272 agfl = XFS_BUF_TO_AGFL(bp); 273 if (xfs_sb_version_hascrc(&mp->m_sb)) { 274 agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC); 275 agfl->agfl_seqno = cpu_to_be32(agno); 276 uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_uuid); 277 } 278 for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++) 279 agfl->agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); 280 281 error = xfs_bwrite(bp); 282 xfs_buf_relse(bp); 283 if (error) 284 goto error0; 285 286 /* 287 * AG inode header block 288 */ 289 bp = xfs_growfs_get_hdr_buf(mp, 290 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), 291 XFS_FSS_TO_BB(mp, 1), 0, 292 &xfs_agi_buf_ops); 293 if (!bp) { 294 error = ENOMEM; 295 goto error0; 296 } 297 298 agi = XFS_BUF_TO_AGI(bp); 299 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC); 300 agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION); 301 agi->agi_seqno = cpu_to_be32(agno); 302 agi->agi_length = cpu_to_be32(agsize); 303 agi->agi_count = 0; 304 agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp)); 305 agi->agi_level = cpu_to_be32(1); 306 agi->agi_freecount = 0; 307 agi->agi_newino = cpu_to_be32(NULLAGINO); 308 agi->agi_dirino = cpu_to_be32(NULLAGINO); 309 if (xfs_sb_version_hascrc(&mp->m_sb)) 310 uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_uuid); 311 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) 312 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); 313 314 error = xfs_bwrite(bp); 315 xfs_buf_relse(bp); 316 if (error) 317 goto error0; 318 319 /* 320 * BNO btree root block 321 */ 322 bp = xfs_growfs_get_hdr_buf(mp, 323 XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)), 324 BTOBB(mp->m_sb.sb_blocksize), 0, 325 &xfs_allocbt_buf_ops); 326 327 if (!bp) { 328 error = ENOMEM; 329 goto error0; 330 } 331 332 if (xfs_sb_version_hascrc(&mp->m_sb)) 333 xfs_btree_init_block(mp, bp, XFS_ABTB_CRC_MAGIC, 0, 1, 334 agno, XFS_BTREE_CRC_BLOCKS); 335 else 336 xfs_btree_init_block(mp, bp, XFS_ABTB_MAGIC, 0, 1, 337 agno, 0); 338 339 arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1); 340 arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp)); 341 arec->ar_blockcount = cpu_to_be32( 342 agsize - be32_to_cpu(arec->ar_startblock)); 343 344 error = xfs_bwrite(bp); 345 xfs_buf_relse(bp); 346 if (error) 347 goto error0; 348 349 /* 350 * CNT btree root block 351 */ 352 bp = xfs_growfs_get_hdr_buf(mp, 353 XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)), 354 BTOBB(mp->m_sb.sb_blocksize), 0, 355 &xfs_allocbt_buf_ops); 356 if (!bp) { 357 error = ENOMEM; 358 goto error0; 359 } 360 361 if (xfs_sb_version_hascrc(&mp->m_sb)) 362 xfs_btree_init_block(mp, bp, XFS_ABTC_CRC_MAGIC, 0, 1, 363 agno, XFS_BTREE_CRC_BLOCKS); 364 else 365 xfs_btree_init_block(mp, bp, XFS_ABTC_MAGIC, 0, 1, 366 agno, 0); 367 368 arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1); 369 arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp)); 370 arec->ar_blockcount = cpu_to_be32( 371 agsize - be32_to_cpu(arec->ar_startblock)); 372 nfree += be32_to_cpu(arec->ar_blockcount); 373 374 error = xfs_bwrite(bp); 375 xfs_buf_relse(bp); 376 if (error) 377 goto error0; 378 379 /* 380 * INO btree root block 381 */ 382 bp = xfs_growfs_get_hdr_buf(mp, 383 XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)), 384 BTOBB(mp->m_sb.sb_blocksize), 0, 385 &xfs_inobt_buf_ops); 386 if (!bp) { 387 error = ENOMEM; 388 goto error0; 389 } 390 391 if (xfs_sb_version_hascrc(&mp->m_sb)) 392 xfs_btree_init_block(mp, bp, XFS_IBT_CRC_MAGIC, 0, 0, 393 agno, XFS_BTREE_CRC_BLOCKS); 394 else 395 xfs_btree_init_block(mp, bp, XFS_IBT_MAGIC, 0, 0, 396 agno, 0); 397 398 error = xfs_bwrite(bp); 399 xfs_buf_relse(bp); 400 if (error) 401 goto error0; 402 } 403 xfs_trans_agblocks_delta(tp, nfree); 404 /* 405 * There are new blocks in the old last a.g. 406 */ 407 if (new) { 408 /* 409 * Change the agi length. 410 */ 411 error = xfs_ialloc_read_agi(mp, tp, agno, &bp); 412 if (error) { 413 goto error0; 414 } 415 ASSERT(bp); 416 agi = XFS_BUF_TO_AGI(bp); 417 be32_add_cpu(&agi->agi_length, new); 418 ASSERT(nagcount == oagcount || 419 be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks); 420 xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH); 421 /* 422 * Change agf length. 423 */ 424 error = xfs_alloc_read_agf(mp, tp, agno, 0, &bp); 425 if (error) { 426 goto error0; 427 } 428 ASSERT(bp); 429 agf = XFS_BUF_TO_AGF(bp); 430 be32_add_cpu(&agf->agf_length, new); 431 ASSERT(be32_to_cpu(agf->agf_length) == 432 be32_to_cpu(agi->agi_length)); 433 434 xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH); 435 /* 436 * Free the new space. 437 */ 438 error = xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, agno, 439 be32_to_cpu(agf->agf_length) - new), new); 440 if (error) { 441 goto error0; 442 } 443 } 444 445 /* 446 * Update changed superblock fields transactionally. These are not 447 * seen by the rest of the world until the transaction commit applies 448 * them atomically to the superblock. 449 */ 450 if (nagcount > oagcount) 451 xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount); 452 if (nb > mp->m_sb.sb_dblocks) 453 xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS, 454 nb - mp->m_sb.sb_dblocks); 455 if (nfree) 456 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, nfree); 457 if (dpct) 458 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct); 459 error = xfs_trans_commit(tp, 0); 460 if (error) 461 return error; 462 463 /* New allocation groups fully initialized, so update mount struct */ 464 if (nagimax) 465 mp->m_maxagi = nagimax; 466 if (mp->m_sb.sb_imax_pct) { 467 __uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct; 468 do_div(icount, 100); 469 mp->m_maxicount = icount << mp->m_sb.sb_inopblog; 470 } else 471 mp->m_maxicount = 0; 472 xfs_set_low_space_thresholds(mp); 473 474 /* update secondary superblocks. */ 475 for (agno = 1; agno < nagcount; agno++) { 476 error = 0; 477 /* 478 * new secondary superblocks need to be zeroed, not read from 479 * disk as the contents of the new area we are growing into is 480 * completely unknown. 481 */ 482 if (agno < oagcount) { 483 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, 484 XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)), 485 XFS_FSS_TO_BB(mp, 1), 0, &bp, 486 &xfs_sb_buf_ops); 487 } else { 488 bp = xfs_trans_get_buf(NULL, mp->m_ddev_targp, 489 XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)), 490 XFS_FSS_TO_BB(mp, 1), 0); 491 if (bp) { 492 bp->b_ops = &xfs_sb_buf_ops; 493 xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); 494 } else 495 error = ENOMEM; 496 } 497 498 if (error) { 499 xfs_warn(mp, 500 "error %d reading secondary superblock for ag %d", 501 error, agno); 502 break; 503 } 504 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, XFS_SB_ALL_BITS); 505 506 /* 507 * If we get an error writing out the alternate superblocks, 508 * just issue a warning and continue. The real work is 509 * already done and committed. 510 */ 511 error = xfs_bwrite(bp); 512 xfs_buf_relse(bp); 513 if (error) { 514 xfs_warn(mp, 515 "write error %d updating secondary superblock for ag %d", 516 error, agno); 517 break; /* no point in continuing */ 518 } 519 } 520 return error; 521 522 error0: 523 xfs_trans_cancel(tp, XFS_TRANS_ABORT); 524 return error; 525 } 526 527 static int 528 xfs_growfs_log_private( 529 xfs_mount_t *mp, /* mount point for filesystem */ 530 xfs_growfs_log_t *in) /* growfs log input struct */ 531 { 532 xfs_extlen_t nb; 533 534 nb = in->newblocks; 535 if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES)) 536 return XFS_ERROR(EINVAL); 537 if (nb == mp->m_sb.sb_logblocks && 538 in->isint == (mp->m_sb.sb_logstart != 0)) 539 return XFS_ERROR(EINVAL); 540 /* 541 * Moving the log is hard, need new interfaces to sync 542 * the log first, hold off all activity while moving it. 543 * Can have shorter or longer log in the same space, 544 * or transform internal to external log or vice versa. 545 */ 546 return XFS_ERROR(ENOSYS); 547 } 548 549 /* 550 * protected versions of growfs function acquire and release locks on the mount 551 * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG, 552 * XFS_IOC_FSGROWFSRT 553 */ 554 555 556 int 557 xfs_growfs_data( 558 xfs_mount_t *mp, 559 xfs_growfs_data_t *in) 560 { 561 int error; 562 563 if (!capable(CAP_SYS_ADMIN)) 564 return XFS_ERROR(EPERM); 565 if (!mutex_trylock(&mp->m_growlock)) 566 return XFS_ERROR(EWOULDBLOCK); 567 error = xfs_growfs_data_private(mp, in); 568 mutex_unlock(&mp->m_growlock); 569 return error; 570 } 571 572 int 573 xfs_growfs_log( 574 xfs_mount_t *mp, 575 xfs_growfs_log_t *in) 576 { 577 int error; 578 579 if (!capable(CAP_SYS_ADMIN)) 580 return XFS_ERROR(EPERM); 581 if (!mutex_trylock(&mp->m_growlock)) 582 return XFS_ERROR(EWOULDBLOCK); 583 error = xfs_growfs_log_private(mp, in); 584 mutex_unlock(&mp->m_growlock); 585 return error; 586 } 587 588 /* 589 * exported through ioctl XFS_IOC_FSCOUNTS 590 */ 591 592 int 593 xfs_fs_counts( 594 xfs_mount_t *mp, 595 xfs_fsop_counts_t *cnt) 596 { 597 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); 598 spin_lock(&mp->m_sb_lock); 599 cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 600 cnt->freertx = mp->m_sb.sb_frextents; 601 cnt->freeino = mp->m_sb.sb_ifree; 602 cnt->allocino = mp->m_sb.sb_icount; 603 spin_unlock(&mp->m_sb_lock); 604 return 0; 605 } 606 607 /* 608 * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS 609 * 610 * xfs_reserve_blocks is called to set m_resblks 611 * in the in-core mount table. The number of unused reserved blocks 612 * is kept in m_resblks_avail. 613 * 614 * Reserve the requested number of blocks if available. Otherwise return 615 * as many as possible to satisfy the request. The actual number 616 * reserved are returned in outval 617 * 618 * A null inval pointer indicates that only the current reserved blocks 619 * available should be returned no settings are changed. 620 */ 621 622 int 623 xfs_reserve_blocks( 624 xfs_mount_t *mp, 625 __uint64_t *inval, 626 xfs_fsop_resblks_t *outval) 627 { 628 __int64_t lcounter, delta, fdblks_delta; 629 __uint64_t request; 630 631 /* If inval is null, report current values and return */ 632 if (inval == (__uint64_t *)NULL) { 633 if (!outval) 634 return EINVAL; 635 outval->resblks = mp->m_resblks; 636 outval->resblks_avail = mp->m_resblks_avail; 637 return 0; 638 } 639 640 request = *inval; 641 642 /* 643 * With per-cpu counters, this becomes an interesting 644 * problem. we needto work out if we are freeing or allocation 645 * blocks first, then we can do the modification as necessary. 646 * 647 * We do this under the m_sb_lock so that if we are near 648 * ENOSPC, we will hold out any changes while we work out 649 * what to do. This means that the amount of free space can 650 * change while we do this, so we need to retry if we end up 651 * trying to reserve more space than is available. 652 * 653 * We also use the xfs_mod_incore_sb() interface so that we 654 * don't have to care about whether per cpu counter are 655 * enabled, disabled or even compiled in.... 656 */ 657 retry: 658 spin_lock(&mp->m_sb_lock); 659 xfs_icsb_sync_counters_locked(mp, 0); 660 661 /* 662 * If our previous reservation was larger than the current value, 663 * then move any unused blocks back to the free pool. 664 */ 665 fdblks_delta = 0; 666 if (mp->m_resblks > request) { 667 lcounter = mp->m_resblks_avail - request; 668 if (lcounter > 0) { /* release unused blocks */ 669 fdblks_delta = lcounter; 670 mp->m_resblks_avail -= lcounter; 671 } 672 mp->m_resblks = request; 673 } else { 674 __int64_t free; 675 676 free = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 677 if (!free) 678 goto out; /* ENOSPC and fdblks_delta = 0 */ 679 680 delta = request - mp->m_resblks; 681 lcounter = free - delta; 682 if (lcounter < 0) { 683 /* We can't satisfy the request, just get what we can */ 684 mp->m_resblks += free; 685 mp->m_resblks_avail += free; 686 fdblks_delta = -free; 687 } else { 688 fdblks_delta = -delta; 689 mp->m_resblks = request; 690 mp->m_resblks_avail += delta; 691 } 692 } 693 out: 694 if (outval) { 695 outval->resblks = mp->m_resblks; 696 outval->resblks_avail = mp->m_resblks_avail; 697 } 698 spin_unlock(&mp->m_sb_lock); 699 700 if (fdblks_delta) { 701 /* 702 * If we are putting blocks back here, m_resblks_avail is 703 * already at its max so this will put it in the free pool. 704 * 705 * If we need space, we'll either succeed in getting it 706 * from the free block count or we'll get an enospc. If 707 * we get a ENOSPC, it means things changed while we were 708 * calculating fdblks_delta and so we should try again to 709 * see if there is anything left to reserve. 710 * 711 * Don't set the reserved flag here - we don't want to reserve 712 * the extra reserve blocks from the reserve..... 713 */ 714 int error; 715 error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, 716 fdblks_delta, 0); 717 if (error == ENOSPC) 718 goto retry; 719 } 720 return 0; 721 } 722 723 /* 724 * Dump a transaction into the log that contains no real change. This is needed 725 * to be able to make the log dirty or stamp the current tail LSN into the log 726 * during the covering operation. 727 * 728 * We cannot use an inode here for this - that will push dirty state back up 729 * into the VFS and then periodic inode flushing will prevent log covering from 730 * making progress. Hence we log a field in the superblock instead and use a 731 * synchronous transaction to ensure the superblock is immediately unpinned 732 * and can be written back. 733 */ 734 int 735 xfs_fs_log_dummy( 736 xfs_mount_t *mp) 737 { 738 xfs_trans_t *tp; 739 int error; 740 741 tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP); 742 error = xfs_trans_reserve(tp, 0, XFS_SB_LOG_RES(mp), 0, 0, 743 XFS_DEFAULT_LOG_COUNT); 744 if (error) { 745 xfs_trans_cancel(tp, 0); 746 return error; 747 } 748 749 /* log the UUID because it is an unchanging field */ 750 xfs_mod_sb(tp, XFS_SB_UUID); 751 xfs_trans_set_sync(tp); 752 return xfs_trans_commit(tp, 0); 753 } 754 755 int 756 xfs_fs_goingdown( 757 xfs_mount_t *mp, 758 __uint32_t inflags) 759 { 760 switch (inflags) { 761 case XFS_FSOP_GOING_FLAGS_DEFAULT: { 762 struct super_block *sb = freeze_bdev(mp->m_super->s_bdev); 763 764 if (sb && !IS_ERR(sb)) { 765 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT); 766 thaw_bdev(sb->s_bdev, sb); 767 } 768 769 break; 770 } 771 case XFS_FSOP_GOING_FLAGS_LOGFLUSH: 772 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT); 773 break; 774 case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH: 775 xfs_force_shutdown(mp, 776 SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR); 777 break; 778 default: 779 return XFS_ERROR(EINVAL); 780 } 781 782 return 0; 783 } 784 785 /* 786 * Force a shutdown of the filesystem instantly while keeping the filesystem 787 * consistent. We don't do an unmount here; just shutdown the shop, make sure 788 * that absolutely nothing persistent happens to this filesystem after this 789 * point. 790 */ 791 void 792 xfs_do_force_shutdown( 793 xfs_mount_t *mp, 794 int flags, 795 char *fname, 796 int lnnum) 797 { 798 int logerror; 799 800 logerror = flags & SHUTDOWN_LOG_IO_ERROR; 801 802 if (!(flags & SHUTDOWN_FORCE_UMOUNT)) { 803 xfs_notice(mp, 804 "%s(0x%x) called from line %d of file %s. Return address = 0x%p", 805 __func__, flags, lnnum, fname, __return_address); 806 } 807 /* 808 * No need to duplicate efforts. 809 */ 810 if (XFS_FORCED_SHUTDOWN(mp) && !logerror) 811 return; 812 813 /* 814 * This flags XFS_MOUNT_FS_SHUTDOWN, makes sure that we don't 815 * queue up anybody new on the log reservations, and wakes up 816 * everybody who's sleeping on log reservations to tell them 817 * the bad news. 818 */ 819 if (xfs_log_force_umount(mp, logerror)) 820 return; 821 822 if (flags & SHUTDOWN_CORRUPT_INCORE) { 823 xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT, 824 "Corruption of in-memory data detected. Shutting down filesystem"); 825 if (XFS_ERRLEVEL_HIGH <= xfs_error_level) 826 xfs_stack_trace(); 827 } else if (!(flags & SHUTDOWN_FORCE_UMOUNT)) { 828 if (logerror) { 829 xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR, 830 "Log I/O Error Detected. Shutting down filesystem"); 831 } else if (flags & SHUTDOWN_DEVICE_REQ) { 832 xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR, 833 "All device paths lost. Shutting down filesystem"); 834 } else if (!(flags & SHUTDOWN_REMOTE_REQ)) { 835 xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR, 836 "I/O Error Detected. Shutting down filesystem"); 837 } 838 } 839 if (!(flags & SHUTDOWN_FORCE_UMOUNT)) { 840 xfs_alert(mp, 841 "Please umount the filesystem and rectify the problem(s)"); 842 } 843 } 844