1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * Copyright (c) 2018 Red Hat, Inc. 5 * All rights reserved. 6 */ 7 8 #include "xfs.h" 9 #include "xfs_fs.h" 10 #include "xfs_shared.h" 11 #include "xfs_format.h" 12 #include "xfs_trans_resv.h" 13 #include "xfs_bit.h" 14 #include "xfs_sb.h" 15 #include "xfs_mount.h" 16 #include "xfs_btree.h" 17 #include "xfs_alloc_btree.h" 18 #include "xfs_rmap_btree.h" 19 #include "xfs_alloc.h" 20 #include "xfs_ialloc.h" 21 #include "xfs_rmap.h" 22 #include "xfs_ag.h" 23 #include "xfs_ag_resv.h" 24 #include "xfs_health.h" 25 #include "xfs_error.h" 26 #include "xfs_bmap.h" 27 #include "xfs_defer.h" 28 #include "xfs_log_format.h" 29 #include "xfs_trans.h" 30 #include "xfs_trace.h" 31 #include "xfs_inode.h" 32 #include "xfs_icache.h" 33 34 35 /* 36 * Passive reference counting access wrappers to the perag structures. If the 37 * per-ag structure is to be freed, the freeing code is responsible for cleaning 38 * up objects with passive references before freeing the structure. This is 39 * things like cached buffers. 40 */ 41 struct xfs_perag * 42 xfs_perag_get( 43 struct xfs_mount *mp, 44 xfs_agnumber_t agno) 45 { 46 struct xfs_perag *pag; 47 48 rcu_read_lock(); 49 pag = radix_tree_lookup(&mp->m_perag_tree, agno); 50 if (pag) { 51 trace_xfs_perag_get(pag, _RET_IP_); 52 ASSERT(atomic_read(&pag->pag_ref) >= 0); 53 atomic_inc(&pag->pag_ref); 54 } 55 rcu_read_unlock(); 56 return pag; 57 } 58 59 /* 60 * search from @first to find the next perag with the given tag set. 61 */ 62 struct xfs_perag * 63 xfs_perag_get_tag( 64 struct xfs_mount *mp, 65 xfs_agnumber_t first, 66 unsigned int tag) 67 { 68 struct xfs_perag *pag; 69 int found; 70 71 rcu_read_lock(); 72 found = radix_tree_gang_lookup_tag(&mp->m_perag_tree, 73 (void **)&pag, first, 1, tag); 74 if (found <= 0) { 75 rcu_read_unlock(); 76 return NULL; 77 } 78 trace_xfs_perag_get_tag(pag, _RET_IP_); 79 atomic_inc(&pag->pag_ref); 80 rcu_read_unlock(); 81 return pag; 82 } 83 84 /* Get a passive reference to the given perag. */ 85 struct xfs_perag * 86 xfs_perag_hold( 87 struct xfs_perag *pag) 88 { 89 ASSERT(atomic_read(&pag->pag_ref) > 0 || 90 atomic_read(&pag->pag_active_ref) > 0); 91 92 trace_xfs_perag_hold(pag, _RET_IP_); 93 atomic_inc(&pag->pag_ref); 94 return pag; 95 } 96 97 void 98 xfs_perag_put( 99 struct xfs_perag *pag) 100 { 101 trace_xfs_perag_put(pag, _RET_IP_); 102 ASSERT(atomic_read(&pag->pag_ref) > 0); 103 atomic_dec(&pag->pag_ref); 104 } 105 106 /* 107 * Active references for perag structures. This is for short term access to the 108 * per ag structures for walking trees or accessing state. If an AG is being 109 * shrunk or is offline, then this will fail to find that AG and return NULL 110 * instead. 111 */ 112 struct xfs_perag * 113 xfs_perag_grab( 114 struct xfs_mount *mp, 115 xfs_agnumber_t agno) 116 { 117 struct xfs_perag *pag; 118 119 rcu_read_lock(); 120 pag = radix_tree_lookup(&mp->m_perag_tree, agno); 121 if (pag) { 122 trace_xfs_perag_grab(pag, _RET_IP_); 123 if (!atomic_inc_not_zero(&pag->pag_active_ref)) 124 pag = NULL; 125 } 126 rcu_read_unlock(); 127 return pag; 128 } 129 130 /* 131 * search from @first to find the next perag with the given tag set. 132 */ 133 struct xfs_perag * 134 xfs_perag_grab_tag( 135 struct xfs_mount *mp, 136 xfs_agnumber_t first, 137 int tag) 138 { 139 struct xfs_perag *pag; 140 int found; 141 142 rcu_read_lock(); 143 found = radix_tree_gang_lookup_tag(&mp->m_perag_tree, 144 (void **)&pag, first, 1, tag); 145 if (found <= 0) { 146 rcu_read_unlock(); 147 return NULL; 148 } 149 trace_xfs_perag_grab_tag(pag, _RET_IP_); 150 if (!atomic_inc_not_zero(&pag->pag_active_ref)) 151 pag = NULL; 152 rcu_read_unlock(); 153 return pag; 154 } 155 156 void 157 xfs_perag_rele( 158 struct xfs_perag *pag) 159 { 160 trace_xfs_perag_rele(pag, _RET_IP_); 161 if (atomic_dec_and_test(&pag->pag_active_ref)) 162 wake_up(&pag->pag_active_wq); 163 } 164 165 /* 166 * xfs_initialize_perag_data 167 * 168 * Read in each per-ag structure so we can count up the number of 169 * allocated inodes, free inodes and used filesystem blocks as this 170 * information is no longer persistent in the superblock. Once we have 171 * this information, write it into the in-core superblock structure. 172 */ 173 int 174 xfs_initialize_perag_data( 175 struct xfs_mount *mp, 176 xfs_agnumber_t agcount) 177 { 178 xfs_agnumber_t index; 179 struct xfs_perag *pag; 180 struct xfs_sb *sbp = &mp->m_sb; 181 uint64_t ifree = 0; 182 uint64_t ialloc = 0; 183 uint64_t bfree = 0; 184 uint64_t bfreelst = 0; 185 uint64_t btree = 0; 186 uint64_t fdblocks; 187 int error = 0; 188 189 for (index = 0; index < agcount; index++) { 190 /* 191 * Read the AGF and AGI buffers to populate the per-ag 192 * structures for us. 193 */ 194 pag = xfs_perag_get(mp, index); 195 error = xfs_alloc_read_agf(pag, NULL, 0, NULL); 196 if (!error) 197 error = xfs_ialloc_read_agi(pag, NULL, NULL); 198 if (error) { 199 xfs_perag_put(pag); 200 return error; 201 } 202 203 ifree += pag->pagi_freecount; 204 ialloc += pag->pagi_count; 205 bfree += pag->pagf_freeblks; 206 bfreelst += pag->pagf_flcount; 207 btree += pag->pagf_btreeblks; 208 xfs_perag_put(pag); 209 } 210 fdblocks = bfree + bfreelst + btree; 211 212 /* 213 * If the new summary counts are obviously incorrect, fail the 214 * mount operation because that implies the AGFs are also corrupt. 215 * Clear FS_COUNTERS so that we don't unmount with a dirty log, which 216 * will prevent xfs_repair from fixing anything. 217 */ 218 if (fdblocks > sbp->sb_dblocks || ifree > ialloc) { 219 xfs_alert(mp, "AGF corruption. Please run xfs_repair."); 220 error = -EFSCORRUPTED; 221 goto out; 222 } 223 224 /* Overwrite incore superblock counters with just-read data */ 225 spin_lock(&mp->m_sb_lock); 226 sbp->sb_ifree = ifree; 227 sbp->sb_icount = ialloc; 228 sbp->sb_fdblocks = fdblocks; 229 spin_unlock(&mp->m_sb_lock); 230 231 xfs_reinit_percpu_counters(mp); 232 out: 233 xfs_fs_mark_healthy(mp, XFS_SICK_FS_COUNTERS); 234 return error; 235 } 236 237 STATIC void 238 __xfs_free_perag( 239 struct rcu_head *head) 240 { 241 struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head); 242 243 ASSERT(!delayed_work_pending(&pag->pag_blockgc_work)); 244 kmem_free(pag); 245 } 246 247 /* 248 * Free up the per-ag resources associated with the mount structure. 249 */ 250 void 251 xfs_free_perag( 252 struct xfs_mount *mp) 253 { 254 struct xfs_perag *pag; 255 xfs_agnumber_t agno; 256 257 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 258 spin_lock(&mp->m_perag_lock); 259 pag = radix_tree_delete(&mp->m_perag_tree, agno); 260 spin_unlock(&mp->m_perag_lock); 261 ASSERT(pag); 262 XFS_IS_CORRUPT(pag->pag_mount, atomic_read(&pag->pag_ref) != 0); 263 xfs_defer_drain_free(&pag->pag_intents_drain); 264 265 cancel_delayed_work_sync(&pag->pag_blockgc_work); 266 xfs_buf_hash_destroy(pag); 267 268 /* drop the mount's active reference */ 269 xfs_perag_rele(pag); 270 XFS_IS_CORRUPT(pag->pag_mount, 271 atomic_read(&pag->pag_active_ref) != 0); 272 call_rcu(&pag->rcu_head, __xfs_free_perag); 273 } 274 } 275 276 /* Find the size of the AG, in blocks. */ 277 static xfs_agblock_t 278 __xfs_ag_block_count( 279 struct xfs_mount *mp, 280 xfs_agnumber_t agno, 281 xfs_agnumber_t agcount, 282 xfs_rfsblock_t dblocks) 283 { 284 ASSERT(agno < agcount); 285 286 if (agno < agcount - 1) 287 return mp->m_sb.sb_agblocks; 288 return dblocks - (agno * mp->m_sb.sb_agblocks); 289 } 290 291 xfs_agblock_t 292 xfs_ag_block_count( 293 struct xfs_mount *mp, 294 xfs_agnumber_t agno) 295 { 296 return __xfs_ag_block_count(mp, agno, mp->m_sb.sb_agcount, 297 mp->m_sb.sb_dblocks); 298 } 299 300 /* Calculate the first and last possible inode number in an AG. */ 301 static void 302 __xfs_agino_range( 303 struct xfs_mount *mp, 304 xfs_agblock_t eoag, 305 xfs_agino_t *first, 306 xfs_agino_t *last) 307 { 308 xfs_agblock_t bno; 309 310 /* 311 * Calculate the first inode, which will be in the first 312 * cluster-aligned block after the AGFL. 313 */ 314 bno = round_up(XFS_AGFL_BLOCK(mp) + 1, M_IGEO(mp)->cluster_align); 315 *first = XFS_AGB_TO_AGINO(mp, bno); 316 317 /* 318 * Calculate the last inode, which will be at the end of the 319 * last (aligned) cluster that can be allocated in the AG. 320 */ 321 bno = round_down(eoag, M_IGEO(mp)->cluster_align); 322 *last = XFS_AGB_TO_AGINO(mp, bno) - 1; 323 } 324 325 void 326 xfs_agino_range( 327 struct xfs_mount *mp, 328 xfs_agnumber_t agno, 329 xfs_agino_t *first, 330 xfs_agino_t *last) 331 { 332 return __xfs_agino_range(mp, xfs_ag_block_count(mp, agno), first, last); 333 } 334 335 /* 336 * Free perag within the specified AG range, it is only used to free unused 337 * perags under the error handling path. 338 */ 339 void 340 xfs_free_unused_perag_range( 341 struct xfs_mount *mp, 342 xfs_agnumber_t agstart, 343 xfs_agnumber_t agend) 344 { 345 struct xfs_perag *pag; 346 xfs_agnumber_t index; 347 348 for (index = agstart; index < agend; index++) { 349 spin_lock(&mp->m_perag_lock); 350 pag = radix_tree_delete(&mp->m_perag_tree, index); 351 spin_unlock(&mp->m_perag_lock); 352 if (!pag) 353 break; 354 xfs_buf_hash_destroy(pag); 355 xfs_defer_drain_free(&pag->pag_intents_drain); 356 kmem_free(pag); 357 } 358 } 359 360 int 361 xfs_initialize_perag( 362 struct xfs_mount *mp, 363 xfs_agnumber_t agcount, 364 xfs_rfsblock_t dblocks, 365 xfs_agnumber_t *maxagi) 366 { 367 struct xfs_perag *pag; 368 xfs_agnumber_t index; 369 xfs_agnumber_t first_initialised = NULLAGNUMBER; 370 int error; 371 372 /* 373 * Walk the current per-ag tree so we don't try to initialise AGs 374 * that already exist (growfs case). Allocate and insert all the 375 * AGs we don't find ready for initialisation. 376 */ 377 for (index = 0; index < agcount; index++) { 378 pag = xfs_perag_get(mp, index); 379 if (pag) { 380 xfs_perag_put(pag); 381 continue; 382 } 383 384 pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL); 385 if (!pag) { 386 error = -ENOMEM; 387 goto out_unwind_new_pags; 388 } 389 pag->pag_agno = index; 390 pag->pag_mount = mp; 391 392 error = radix_tree_preload(GFP_NOFS); 393 if (error) 394 goto out_free_pag; 395 396 spin_lock(&mp->m_perag_lock); 397 if (radix_tree_insert(&mp->m_perag_tree, index, pag)) { 398 WARN_ON_ONCE(1); 399 spin_unlock(&mp->m_perag_lock); 400 radix_tree_preload_end(); 401 error = -EEXIST; 402 goto out_free_pag; 403 } 404 spin_unlock(&mp->m_perag_lock); 405 radix_tree_preload_end(); 406 407 #ifdef __KERNEL__ 408 /* Place kernel structure only init below this point. */ 409 spin_lock_init(&pag->pag_ici_lock); 410 spin_lock_init(&pag->pagb_lock); 411 spin_lock_init(&pag->pag_state_lock); 412 INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker); 413 INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); 414 xfs_defer_drain_init(&pag->pag_intents_drain); 415 init_waitqueue_head(&pag->pagb_wait); 416 init_waitqueue_head(&pag->pag_active_wq); 417 pag->pagb_count = 0; 418 pag->pagb_tree = RB_ROOT; 419 #endif /* __KERNEL__ */ 420 421 error = xfs_buf_hash_init(pag); 422 if (error) 423 goto out_remove_pag; 424 425 /* Active ref owned by mount indicates AG is online. */ 426 atomic_set(&pag->pag_active_ref, 1); 427 428 /* first new pag is fully initialized */ 429 if (first_initialised == NULLAGNUMBER) 430 first_initialised = index; 431 432 /* 433 * Pre-calculated geometry 434 */ 435 pag->block_count = __xfs_ag_block_count(mp, index, agcount, 436 dblocks); 437 pag->min_block = XFS_AGFL_BLOCK(mp); 438 __xfs_agino_range(mp, pag->block_count, &pag->agino_min, 439 &pag->agino_max); 440 } 441 442 index = xfs_set_inode_alloc(mp, agcount); 443 444 if (maxagi) 445 *maxagi = index; 446 447 mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp); 448 return 0; 449 450 out_remove_pag: 451 xfs_defer_drain_free(&pag->pag_intents_drain); 452 spin_lock(&mp->m_perag_lock); 453 radix_tree_delete(&mp->m_perag_tree, index); 454 spin_unlock(&mp->m_perag_lock); 455 out_free_pag: 456 kmem_free(pag); 457 out_unwind_new_pags: 458 /* unwind any prior newly initialized pags */ 459 xfs_free_unused_perag_range(mp, first_initialised, agcount); 460 return error; 461 } 462 463 static int 464 xfs_get_aghdr_buf( 465 struct xfs_mount *mp, 466 xfs_daddr_t blkno, 467 size_t numblks, 468 struct xfs_buf **bpp, 469 const struct xfs_buf_ops *ops) 470 { 471 struct xfs_buf *bp; 472 int error; 473 474 error = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, 0, &bp); 475 if (error) 476 return error; 477 478 bp->b_maps[0].bm_bn = blkno; 479 bp->b_ops = ops; 480 481 *bpp = bp; 482 return 0; 483 } 484 485 /* 486 * Generic btree root block init function 487 */ 488 static void 489 xfs_btroot_init( 490 struct xfs_mount *mp, 491 struct xfs_buf *bp, 492 struct aghdr_init_data *id) 493 { 494 xfs_btree_init_block(mp, bp, id->type, 0, 0, id->agno); 495 } 496 497 /* Finish initializing a free space btree. */ 498 static void 499 xfs_freesp_init_recs( 500 struct xfs_mount *mp, 501 struct xfs_buf *bp, 502 struct aghdr_init_data *id) 503 { 504 struct xfs_alloc_rec *arec; 505 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 506 507 arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1); 508 arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks); 509 510 if (xfs_ag_contains_log(mp, id->agno)) { 511 struct xfs_alloc_rec *nrec; 512 xfs_agblock_t start = XFS_FSB_TO_AGBNO(mp, 513 mp->m_sb.sb_logstart); 514 515 ASSERT(start >= mp->m_ag_prealloc_blocks); 516 if (start != mp->m_ag_prealloc_blocks) { 517 /* 518 * Modify first record to pad stripe align of log and 519 * bump the record count. 520 */ 521 arec->ar_blockcount = cpu_to_be32(start - 522 mp->m_ag_prealloc_blocks); 523 be16_add_cpu(&block->bb_numrecs, 1); 524 nrec = arec + 1; 525 526 /* 527 * Insert second record at start of internal log 528 * which then gets trimmed. 529 */ 530 nrec->ar_startblock = cpu_to_be32( 531 be32_to_cpu(arec->ar_startblock) + 532 be32_to_cpu(arec->ar_blockcount)); 533 arec = nrec; 534 } 535 /* 536 * Change record start to after the internal log 537 */ 538 be32_add_cpu(&arec->ar_startblock, mp->m_sb.sb_logblocks); 539 } 540 541 /* 542 * Calculate the block count of this record; if it is nonzero, 543 * increment the record count. 544 */ 545 arec->ar_blockcount = cpu_to_be32(id->agsize - 546 be32_to_cpu(arec->ar_startblock)); 547 if (arec->ar_blockcount) 548 be16_add_cpu(&block->bb_numrecs, 1); 549 } 550 551 /* 552 * Alloc btree root block init functions 553 */ 554 static void 555 xfs_bnoroot_init( 556 struct xfs_mount *mp, 557 struct xfs_buf *bp, 558 struct aghdr_init_data *id) 559 { 560 xfs_btree_init_block(mp, bp, XFS_BTNUM_BNO, 0, 0, id->agno); 561 xfs_freesp_init_recs(mp, bp, id); 562 } 563 564 static void 565 xfs_cntroot_init( 566 struct xfs_mount *mp, 567 struct xfs_buf *bp, 568 struct aghdr_init_data *id) 569 { 570 xfs_btree_init_block(mp, bp, XFS_BTNUM_CNT, 0, 0, id->agno); 571 xfs_freesp_init_recs(mp, bp, id); 572 } 573 574 /* 575 * Reverse map root block init 576 */ 577 static void 578 xfs_rmaproot_init( 579 struct xfs_mount *mp, 580 struct xfs_buf *bp, 581 struct aghdr_init_data *id) 582 { 583 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 584 struct xfs_rmap_rec *rrec; 585 586 xfs_btree_init_block(mp, bp, XFS_BTNUM_RMAP, 0, 4, id->agno); 587 588 /* 589 * mark the AG header regions as static metadata The BNO 590 * btree block is the first block after the headers, so 591 * it's location defines the size of region the static 592 * metadata consumes. 593 * 594 * Note: unlike mkfs, we never have to account for log 595 * space when growing the data regions 596 */ 597 rrec = XFS_RMAP_REC_ADDR(block, 1); 598 rrec->rm_startblock = 0; 599 rrec->rm_blockcount = cpu_to_be32(XFS_BNO_BLOCK(mp)); 600 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_FS); 601 rrec->rm_offset = 0; 602 603 /* account freespace btree root blocks */ 604 rrec = XFS_RMAP_REC_ADDR(block, 2); 605 rrec->rm_startblock = cpu_to_be32(XFS_BNO_BLOCK(mp)); 606 rrec->rm_blockcount = cpu_to_be32(2); 607 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG); 608 rrec->rm_offset = 0; 609 610 /* account inode btree root blocks */ 611 rrec = XFS_RMAP_REC_ADDR(block, 3); 612 rrec->rm_startblock = cpu_to_be32(XFS_IBT_BLOCK(mp)); 613 rrec->rm_blockcount = cpu_to_be32(XFS_RMAP_BLOCK(mp) - 614 XFS_IBT_BLOCK(mp)); 615 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_INOBT); 616 rrec->rm_offset = 0; 617 618 /* account for rmap btree root */ 619 rrec = XFS_RMAP_REC_ADDR(block, 4); 620 rrec->rm_startblock = cpu_to_be32(XFS_RMAP_BLOCK(mp)); 621 rrec->rm_blockcount = cpu_to_be32(1); 622 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG); 623 rrec->rm_offset = 0; 624 625 /* account for refc btree root */ 626 if (xfs_has_reflink(mp)) { 627 rrec = XFS_RMAP_REC_ADDR(block, 5); 628 rrec->rm_startblock = cpu_to_be32(xfs_refc_block(mp)); 629 rrec->rm_blockcount = cpu_to_be32(1); 630 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_REFC); 631 rrec->rm_offset = 0; 632 be16_add_cpu(&block->bb_numrecs, 1); 633 } 634 635 /* account for the log space */ 636 if (xfs_ag_contains_log(mp, id->agno)) { 637 rrec = XFS_RMAP_REC_ADDR(block, 638 be16_to_cpu(block->bb_numrecs) + 1); 639 rrec->rm_startblock = cpu_to_be32( 640 XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart)); 641 rrec->rm_blockcount = cpu_to_be32(mp->m_sb.sb_logblocks); 642 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_LOG); 643 rrec->rm_offset = 0; 644 be16_add_cpu(&block->bb_numrecs, 1); 645 } 646 } 647 648 /* 649 * Initialise new secondary superblocks with the pre-grow geometry, but mark 650 * them as "in progress" so we know they haven't yet been activated. This will 651 * get cleared when the update with the new geometry information is done after 652 * changes to the primary are committed. This isn't strictly necessary, but we 653 * get it for free with the delayed buffer write lists and it means we can tell 654 * if a grow operation didn't complete properly after the fact. 655 */ 656 static void 657 xfs_sbblock_init( 658 struct xfs_mount *mp, 659 struct xfs_buf *bp, 660 struct aghdr_init_data *id) 661 { 662 struct xfs_dsb *dsb = bp->b_addr; 663 664 xfs_sb_to_disk(dsb, &mp->m_sb); 665 dsb->sb_inprogress = 1; 666 } 667 668 static void 669 xfs_agfblock_init( 670 struct xfs_mount *mp, 671 struct xfs_buf *bp, 672 struct aghdr_init_data *id) 673 { 674 struct xfs_agf *agf = bp->b_addr; 675 xfs_extlen_t tmpsize; 676 677 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC); 678 agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION); 679 agf->agf_seqno = cpu_to_be32(id->agno); 680 agf->agf_length = cpu_to_be32(id->agsize); 681 agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp)); 682 agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp)); 683 agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1); 684 agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1); 685 if (xfs_has_rmapbt(mp)) { 686 agf->agf_roots[XFS_BTNUM_RMAPi] = 687 cpu_to_be32(XFS_RMAP_BLOCK(mp)); 688 agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1); 689 agf->agf_rmap_blocks = cpu_to_be32(1); 690 } 691 692 agf->agf_flfirst = cpu_to_be32(1); 693 agf->agf_fllast = 0; 694 agf->agf_flcount = 0; 695 tmpsize = id->agsize - mp->m_ag_prealloc_blocks; 696 agf->agf_freeblks = cpu_to_be32(tmpsize); 697 agf->agf_longest = cpu_to_be32(tmpsize); 698 if (xfs_has_crc(mp)) 699 uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid); 700 if (xfs_has_reflink(mp)) { 701 agf->agf_refcount_root = cpu_to_be32( 702 xfs_refc_block(mp)); 703 agf->agf_refcount_level = cpu_to_be32(1); 704 agf->agf_refcount_blocks = cpu_to_be32(1); 705 } 706 707 if (xfs_ag_contains_log(mp, id->agno)) { 708 int64_t logblocks = mp->m_sb.sb_logblocks; 709 710 be32_add_cpu(&agf->agf_freeblks, -logblocks); 711 agf->agf_longest = cpu_to_be32(id->agsize - 712 XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart) - logblocks); 713 } 714 } 715 716 static void 717 xfs_agflblock_init( 718 struct xfs_mount *mp, 719 struct xfs_buf *bp, 720 struct aghdr_init_data *id) 721 { 722 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp); 723 __be32 *agfl_bno; 724 int bucket; 725 726 if (xfs_has_crc(mp)) { 727 agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC); 728 agfl->agfl_seqno = cpu_to_be32(id->agno); 729 uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid); 730 } 731 732 agfl_bno = xfs_buf_to_agfl_bno(bp); 733 for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++) 734 agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); 735 } 736 737 static void 738 xfs_agiblock_init( 739 struct xfs_mount *mp, 740 struct xfs_buf *bp, 741 struct aghdr_init_data *id) 742 { 743 struct xfs_agi *agi = bp->b_addr; 744 int bucket; 745 746 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC); 747 agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION); 748 agi->agi_seqno = cpu_to_be32(id->agno); 749 agi->agi_length = cpu_to_be32(id->agsize); 750 agi->agi_count = 0; 751 agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp)); 752 agi->agi_level = cpu_to_be32(1); 753 agi->agi_freecount = 0; 754 agi->agi_newino = cpu_to_be32(NULLAGINO); 755 agi->agi_dirino = cpu_to_be32(NULLAGINO); 756 if (xfs_has_crc(mp)) 757 uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid); 758 if (xfs_has_finobt(mp)) { 759 agi->agi_free_root = cpu_to_be32(XFS_FIBT_BLOCK(mp)); 760 agi->agi_free_level = cpu_to_be32(1); 761 } 762 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) 763 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); 764 if (xfs_has_inobtcounts(mp)) { 765 agi->agi_iblocks = cpu_to_be32(1); 766 if (xfs_has_finobt(mp)) 767 agi->agi_fblocks = cpu_to_be32(1); 768 } 769 } 770 771 typedef void (*aghdr_init_work_f)(struct xfs_mount *mp, struct xfs_buf *bp, 772 struct aghdr_init_data *id); 773 static int 774 xfs_ag_init_hdr( 775 struct xfs_mount *mp, 776 struct aghdr_init_data *id, 777 aghdr_init_work_f work, 778 const struct xfs_buf_ops *ops) 779 { 780 struct xfs_buf *bp; 781 int error; 782 783 error = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, &bp, ops); 784 if (error) 785 return error; 786 787 (*work)(mp, bp, id); 788 789 xfs_buf_delwri_queue(bp, &id->buffer_list); 790 xfs_buf_relse(bp); 791 return 0; 792 } 793 794 struct xfs_aghdr_grow_data { 795 xfs_daddr_t daddr; 796 size_t numblks; 797 const struct xfs_buf_ops *ops; 798 aghdr_init_work_f work; 799 xfs_btnum_t type; 800 bool need_init; 801 }; 802 803 /* 804 * Prepare new AG headers to be written to disk. We use uncached buffers here, 805 * as it is assumed these new AG headers are currently beyond the currently 806 * valid filesystem address space. Using cached buffers would trip over EOFS 807 * corruption detection alogrithms in the buffer cache lookup routines. 808 * 809 * This is a non-transactional function, but the prepared buffers are added to a 810 * delayed write buffer list supplied by the caller so they can submit them to 811 * disk and wait on them as required. 812 */ 813 int 814 xfs_ag_init_headers( 815 struct xfs_mount *mp, 816 struct aghdr_init_data *id) 817 818 { 819 struct xfs_aghdr_grow_data aghdr_data[] = { 820 { /* SB */ 821 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_SB_DADDR), 822 .numblks = XFS_FSS_TO_BB(mp, 1), 823 .ops = &xfs_sb_buf_ops, 824 .work = &xfs_sbblock_init, 825 .need_init = true 826 }, 827 { /* AGF */ 828 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGF_DADDR(mp)), 829 .numblks = XFS_FSS_TO_BB(mp, 1), 830 .ops = &xfs_agf_buf_ops, 831 .work = &xfs_agfblock_init, 832 .need_init = true 833 }, 834 { /* AGFL */ 835 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGFL_DADDR(mp)), 836 .numblks = XFS_FSS_TO_BB(mp, 1), 837 .ops = &xfs_agfl_buf_ops, 838 .work = &xfs_agflblock_init, 839 .need_init = true 840 }, 841 { /* AGI */ 842 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGI_DADDR(mp)), 843 .numblks = XFS_FSS_TO_BB(mp, 1), 844 .ops = &xfs_agi_buf_ops, 845 .work = &xfs_agiblock_init, 846 .need_init = true 847 }, 848 { /* BNO root block */ 849 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_BNO_BLOCK(mp)), 850 .numblks = BTOBB(mp->m_sb.sb_blocksize), 851 .ops = &xfs_bnobt_buf_ops, 852 .work = &xfs_bnoroot_init, 853 .need_init = true 854 }, 855 { /* CNT root block */ 856 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_CNT_BLOCK(mp)), 857 .numblks = BTOBB(mp->m_sb.sb_blocksize), 858 .ops = &xfs_cntbt_buf_ops, 859 .work = &xfs_cntroot_init, 860 .need_init = true 861 }, 862 { /* INO root block */ 863 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_IBT_BLOCK(mp)), 864 .numblks = BTOBB(mp->m_sb.sb_blocksize), 865 .ops = &xfs_inobt_buf_ops, 866 .work = &xfs_btroot_init, 867 .type = XFS_BTNUM_INO, 868 .need_init = true 869 }, 870 { /* FINO root block */ 871 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_FIBT_BLOCK(mp)), 872 .numblks = BTOBB(mp->m_sb.sb_blocksize), 873 .ops = &xfs_finobt_buf_ops, 874 .work = &xfs_btroot_init, 875 .type = XFS_BTNUM_FINO, 876 .need_init = xfs_has_finobt(mp) 877 }, 878 { /* RMAP root block */ 879 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_RMAP_BLOCK(mp)), 880 .numblks = BTOBB(mp->m_sb.sb_blocksize), 881 .ops = &xfs_rmapbt_buf_ops, 882 .work = &xfs_rmaproot_init, 883 .need_init = xfs_has_rmapbt(mp) 884 }, 885 { /* REFC root block */ 886 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, xfs_refc_block(mp)), 887 .numblks = BTOBB(mp->m_sb.sb_blocksize), 888 .ops = &xfs_refcountbt_buf_ops, 889 .work = &xfs_btroot_init, 890 .type = XFS_BTNUM_REFC, 891 .need_init = xfs_has_reflink(mp) 892 }, 893 { /* NULL terminating block */ 894 .daddr = XFS_BUF_DADDR_NULL, 895 } 896 }; 897 struct xfs_aghdr_grow_data *dp; 898 int error = 0; 899 900 /* Account for AG free space in new AG */ 901 id->nfree += id->agsize - mp->m_ag_prealloc_blocks; 902 for (dp = &aghdr_data[0]; dp->daddr != XFS_BUF_DADDR_NULL; dp++) { 903 if (!dp->need_init) 904 continue; 905 906 id->daddr = dp->daddr; 907 id->numblks = dp->numblks; 908 id->type = dp->type; 909 error = xfs_ag_init_hdr(mp, id, dp->work, dp->ops); 910 if (error) 911 break; 912 } 913 return error; 914 } 915 916 int 917 xfs_ag_shrink_space( 918 struct xfs_perag *pag, 919 struct xfs_trans **tpp, 920 xfs_extlen_t delta) 921 { 922 struct xfs_mount *mp = pag->pag_mount; 923 struct xfs_alloc_arg args = { 924 .tp = *tpp, 925 .mp = mp, 926 .pag = pag, 927 .minlen = delta, 928 .maxlen = delta, 929 .oinfo = XFS_RMAP_OINFO_SKIP_UPDATE, 930 .resv = XFS_AG_RESV_NONE, 931 .prod = 1 932 }; 933 struct xfs_buf *agibp, *agfbp; 934 struct xfs_agi *agi; 935 struct xfs_agf *agf; 936 xfs_agblock_t aglen; 937 int error, err2; 938 939 ASSERT(pag->pag_agno == mp->m_sb.sb_agcount - 1); 940 error = xfs_ialloc_read_agi(pag, *tpp, &agibp); 941 if (error) 942 return error; 943 944 agi = agibp->b_addr; 945 946 error = xfs_alloc_read_agf(pag, *tpp, 0, &agfbp); 947 if (error) 948 return error; 949 950 agf = agfbp->b_addr; 951 aglen = be32_to_cpu(agi->agi_length); 952 /* some extra paranoid checks before we shrink the ag */ 953 if (XFS_IS_CORRUPT(mp, agf->agf_length != agi->agi_length)) 954 return -EFSCORRUPTED; 955 if (delta >= aglen) 956 return -EINVAL; 957 958 /* 959 * Make sure that the last inode cluster cannot overlap with the new 960 * end of the AG, even if it's sparse. 961 */ 962 error = xfs_ialloc_check_shrink(pag, *tpp, agibp, aglen - delta); 963 if (error) 964 return error; 965 966 /* 967 * Disable perag reservations so it doesn't cause the allocation request 968 * to fail. We'll reestablish reservation before we return. 969 */ 970 error = xfs_ag_resv_free(pag); 971 if (error) 972 return error; 973 974 /* internal log shouldn't also show up in the free space btrees */ 975 error = xfs_alloc_vextent_exact_bno(&args, 976 XFS_AGB_TO_FSB(mp, pag->pag_agno, aglen - delta)); 977 if (!error && args.agbno == NULLAGBLOCK) 978 error = -ENOSPC; 979 980 if (error) { 981 /* 982 * If extent allocation fails, need to roll the transaction to 983 * ensure that the AGFL fixup has been committed anyway. 984 * 985 * We need to hold the AGF across the roll to ensure nothing can 986 * access the AG for allocation until the shrink is fully 987 * cleaned up. And due to the resetting of the AG block 988 * reservation space needing to lock the AGI, we also have to 989 * hold that so we don't get AGI/AGF lock order inversions in 990 * the error handling path. 991 */ 992 xfs_trans_bhold(*tpp, agfbp); 993 xfs_trans_bhold(*tpp, agibp); 994 err2 = xfs_trans_roll(tpp); 995 if (err2) 996 return err2; 997 xfs_trans_bjoin(*tpp, agfbp); 998 xfs_trans_bjoin(*tpp, agibp); 999 goto resv_init_out; 1000 } 1001 1002 /* 1003 * if successfully deleted from freespace btrees, need to confirm 1004 * per-AG reservation works as expected. 1005 */ 1006 be32_add_cpu(&agi->agi_length, -delta); 1007 be32_add_cpu(&agf->agf_length, -delta); 1008 1009 err2 = xfs_ag_resv_init(pag, *tpp); 1010 if (err2) { 1011 be32_add_cpu(&agi->agi_length, delta); 1012 be32_add_cpu(&agf->agf_length, delta); 1013 if (err2 != -ENOSPC) 1014 goto resv_err; 1015 1016 err2 = __xfs_free_extent_later(*tpp, args.fsbno, delta, NULL, 1017 XFS_AG_RESV_NONE, true); 1018 if (err2) 1019 goto resv_err; 1020 1021 /* 1022 * Roll the transaction before trying to re-init the per-ag 1023 * reservation. The new transaction is clean so it will cancel 1024 * without any side effects. 1025 */ 1026 error = xfs_defer_finish(tpp); 1027 if (error) 1028 return error; 1029 1030 error = -ENOSPC; 1031 goto resv_init_out; 1032 } 1033 1034 /* Update perag geometry */ 1035 pag->block_count -= delta; 1036 __xfs_agino_range(pag->pag_mount, pag->block_count, &pag->agino_min, 1037 &pag->agino_max); 1038 1039 xfs_ialloc_log_agi(*tpp, agibp, XFS_AGI_LENGTH); 1040 xfs_alloc_log_agf(*tpp, agfbp, XFS_AGF_LENGTH); 1041 return 0; 1042 1043 resv_init_out: 1044 err2 = xfs_ag_resv_init(pag, *tpp); 1045 if (!err2) 1046 return error; 1047 resv_err: 1048 xfs_warn(mp, "Error %d reserving per-AG metadata reserve pool.", err2); 1049 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1050 return err2; 1051 } 1052 1053 /* 1054 * Extent the AG indicated by the @id by the length passed in 1055 */ 1056 int 1057 xfs_ag_extend_space( 1058 struct xfs_perag *pag, 1059 struct xfs_trans *tp, 1060 xfs_extlen_t len) 1061 { 1062 struct xfs_buf *bp; 1063 struct xfs_agi *agi; 1064 struct xfs_agf *agf; 1065 int error; 1066 1067 ASSERT(pag->pag_agno == pag->pag_mount->m_sb.sb_agcount - 1); 1068 1069 error = xfs_ialloc_read_agi(pag, tp, &bp); 1070 if (error) 1071 return error; 1072 1073 agi = bp->b_addr; 1074 be32_add_cpu(&agi->agi_length, len); 1075 xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH); 1076 1077 /* 1078 * Change agf length. 1079 */ 1080 error = xfs_alloc_read_agf(pag, tp, 0, &bp); 1081 if (error) 1082 return error; 1083 1084 agf = bp->b_addr; 1085 be32_add_cpu(&agf->agf_length, len); 1086 ASSERT(agf->agf_length == agi->agi_length); 1087 xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH); 1088 1089 /* 1090 * Free the new space. 1091 * 1092 * XFS_RMAP_OINFO_SKIP_UPDATE is used here to tell the rmap btree that 1093 * this doesn't actually exist in the rmap btree. 1094 */ 1095 error = xfs_rmap_free(tp, bp, pag, be32_to_cpu(agf->agf_length) - len, 1096 len, &XFS_RMAP_OINFO_SKIP_UPDATE); 1097 if (error) 1098 return error; 1099 1100 error = xfs_free_extent(tp, pag, be32_to_cpu(agf->agf_length) - len, 1101 len, &XFS_RMAP_OINFO_SKIP_UPDATE, XFS_AG_RESV_NONE); 1102 if (error) 1103 return error; 1104 1105 /* Update perag geometry */ 1106 pag->block_count = be32_to_cpu(agf->agf_length); 1107 __xfs_agino_range(pag->pag_mount, pag->block_count, &pag->agino_min, 1108 &pag->agino_max); 1109 return 0; 1110 } 1111 1112 /* Retrieve AG geometry. */ 1113 int 1114 xfs_ag_get_geometry( 1115 struct xfs_perag *pag, 1116 struct xfs_ag_geometry *ageo) 1117 { 1118 struct xfs_buf *agi_bp; 1119 struct xfs_buf *agf_bp; 1120 struct xfs_agi *agi; 1121 struct xfs_agf *agf; 1122 unsigned int freeblks; 1123 int error; 1124 1125 /* Lock the AG headers. */ 1126 error = xfs_ialloc_read_agi(pag, NULL, &agi_bp); 1127 if (error) 1128 return error; 1129 error = xfs_alloc_read_agf(pag, NULL, 0, &agf_bp); 1130 if (error) 1131 goto out_agi; 1132 1133 /* Fill out form. */ 1134 memset(ageo, 0, sizeof(*ageo)); 1135 ageo->ag_number = pag->pag_agno; 1136 1137 agi = agi_bp->b_addr; 1138 ageo->ag_icount = be32_to_cpu(agi->agi_count); 1139 ageo->ag_ifree = be32_to_cpu(agi->agi_freecount); 1140 1141 agf = agf_bp->b_addr; 1142 ageo->ag_length = be32_to_cpu(agf->agf_length); 1143 freeblks = pag->pagf_freeblks + 1144 pag->pagf_flcount + 1145 pag->pagf_btreeblks - 1146 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE); 1147 ageo->ag_freeblks = freeblks; 1148 xfs_ag_geom_health(pag, ageo); 1149 1150 /* Release resources. */ 1151 xfs_buf_relse(agf_bp); 1152 out_agi: 1153 xfs_buf_relse(agi_bp); 1154 return error; 1155 } 1156