1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_mount.h" 14 #include "xfs_btree.h" 15 #include "xfs_btree_staging.h" 16 #include "xfs_ialloc.h" 17 #include "xfs_ialloc_btree.h" 18 #include "xfs_alloc.h" 19 #include "xfs_error.h" 20 #include "xfs_trace.h" 21 #include "xfs_trans.h" 22 #include "xfs_rmap.h" 23 #include "xfs_ag.h" 24 25 STATIC int 26 xfs_inobt_get_minrecs( 27 struct xfs_btree_cur *cur, 28 int level) 29 { 30 return M_IGEO(cur->bc_mp)->inobt_mnr[level != 0]; 31 } 32 33 STATIC struct xfs_btree_cur * 34 xfs_inobt_dup_cursor( 35 struct xfs_btree_cur *cur) 36 { 37 return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp, 38 cur->bc_ag.agbp, cur->bc_ag.pag, cur->bc_btnum); 39 } 40 41 STATIC void 42 xfs_inobt_set_root( 43 struct xfs_btree_cur *cur, 44 union xfs_btree_ptr *nptr, 45 int inc) /* level change */ 46 { 47 struct xfs_buf *agbp = cur->bc_ag.agbp; 48 struct xfs_agi *agi = agbp->b_addr; 49 50 agi->agi_root = nptr->s; 51 be32_add_cpu(&agi->agi_level, inc); 52 xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL); 53 } 54 55 STATIC void 56 xfs_finobt_set_root( 57 struct xfs_btree_cur *cur, 58 union xfs_btree_ptr *nptr, 59 int inc) /* level change */ 60 { 61 struct xfs_buf *agbp = cur->bc_ag.agbp; 62 struct xfs_agi *agi = agbp->b_addr; 63 64 agi->agi_free_root = nptr->s; 65 be32_add_cpu(&agi->agi_free_level, inc); 66 xfs_ialloc_log_agi(cur->bc_tp, agbp, 67 XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL); 68 } 69 70 /* Update the inode btree block counter for this btree. */ 71 static inline void 72 xfs_inobt_mod_blockcount( 73 struct xfs_btree_cur *cur, 74 int howmuch) 75 { 76 struct xfs_buf *agbp = cur->bc_ag.agbp; 77 struct xfs_agi *agi = agbp->b_addr; 78 79 if (!xfs_sb_version_hasinobtcounts(&cur->bc_mp->m_sb)) 80 return; 81 82 if (cur->bc_btnum == XFS_BTNUM_FINO) 83 be32_add_cpu(&agi->agi_fblocks, howmuch); 84 else if (cur->bc_btnum == XFS_BTNUM_INO) 85 be32_add_cpu(&agi->agi_iblocks, howmuch); 86 xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_IBLOCKS); 87 } 88 89 STATIC int 90 __xfs_inobt_alloc_block( 91 struct xfs_btree_cur *cur, 92 union xfs_btree_ptr *start, 93 union xfs_btree_ptr *new, 94 int *stat, 95 enum xfs_ag_resv_type resv) 96 { 97 xfs_alloc_arg_t args; /* block allocation args */ 98 int error; /* error return value */ 99 xfs_agblock_t sbno = be32_to_cpu(start->s); 100 101 memset(&args, 0, sizeof(args)); 102 args.tp = cur->bc_tp; 103 args.mp = cur->bc_mp; 104 args.oinfo = XFS_RMAP_OINFO_INOBT; 105 args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_ag.pag->pag_agno, sbno); 106 args.minlen = 1; 107 args.maxlen = 1; 108 args.prod = 1; 109 args.type = XFS_ALLOCTYPE_NEAR_BNO; 110 args.resv = resv; 111 112 error = xfs_alloc_vextent(&args); 113 if (error) 114 return error; 115 116 if (args.fsbno == NULLFSBLOCK) { 117 *stat = 0; 118 return 0; 119 } 120 ASSERT(args.len == 1); 121 122 new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno)); 123 *stat = 1; 124 xfs_inobt_mod_blockcount(cur, 1); 125 return 0; 126 } 127 128 STATIC int 129 xfs_inobt_alloc_block( 130 struct xfs_btree_cur *cur, 131 union xfs_btree_ptr *start, 132 union xfs_btree_ptr *new, 133 int *stat) 134 { 135 return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE); 136 } 137 138 STATIC int 139 xfs_finobt_alloc_block( 140 struct xfs_btree_cur *cur, 141 union xfs_btree_ptr *start, 142 union xfs_btree_ptr *new, 143 int *stat) 144 { 145 if (cur->bc_mp->m_finobt_nores) 146 return xfs_inobt_alloc_block(cur, start, new, stat); 147 return __xfs_inobt_alloc_block(cur, start, new, stat, 148 XFS_AG_RESV_METADATA); 149 } 150 151 STATIC int 152 __xfs_inobt_free_block( 153 struct xfs_btree_cur *cur, 154 struct xfs_buf *bp, 155 enum xfs_ag_resv_type resv) 156 { 157 xfs_inobt_mod_blockcount(cur, -1); 158 return xfs_free_extent(cur->bc_tp, 159 XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1, 160 &XFS_RMAP_OINFO_INOBT, resv); 161 } 162 163 STATIC int 164 xfs_inobt_free_block( 165 struct xfs_btree_cur *cur, 166 struct xfs_buf *bp) 167 { 168 return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_NONE); 169 } 170 171 STATIC int 172 xfs_finobt_free_block( 173 struct xfs_btree_cur *cur, 174 struct xfs_buf *bp) 175 { 176 if (cur->bc_mp->m_finobt_nores) 177 return xfs_inobt_free_block(cur, bp); 178 return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA); 179 } 180 181 STATIC int 182 xfs_inobt_get_maxrecs( 183 struct xfs_btree_cur *cur, 184 int level) 185 { 186 return M_IGEO(cur->bc_mp)->inobt_mxr[level != 0]; 187 } 188 189 STATIC void 190 xfs_inobt_init_key_from_rec( 191 union xfs_btree_key *key, 192 union xfs_btree_rec *rec) 193 { 194 key->inobt.ir_startino = rec->inobt.ir_startino; 195 } 196 197 STATIC void 198 xfs_inobt_init_high_key_from_rec( 199 union xfs_btree_key *key, 200 union xfs_btree_rec *rec) 201 { 202 __u32 x; 203 204 x = be32_to_cpu(rec->inobt.ir_startino); 205 x += XFS_INODES_PER_CHUNK - 1; 206 key->inobt.ir_startino = cpu_to_be32(x); 207 } 208 209 STATIC void 210 xfs_inobt_init_rec_from_cur( 211 struct xfs_btree_cur *cur, 212 union xfs_btree_rec *rec) 213 { 214 rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino); 215 if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) { 216 rec->inobt.ir_u.sp.ir_holemask = 217 cpu_to_be16(cur->bc_rec.i.ir_holemask); 218 rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count; 219 rec->inobt.ir_u.sp.ir_freecount = cur->bc_rec.i.ir_freecount; 220 } else { 221 /* ir_holemask/ir_count not supported on-disk */ 222 rec->inobt.ir_u.f.ir_freecount = 223 cpu_to_be32(cur->bc_rec.i.ir_freecount); 224 } 225 rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free); 226 } 227 228 /* 229 * initial value of ptr for lookup 230 */ 231 STATIC void 232 xfs_inobt_init_ptr_from_cur( 233 struct xfs_btree_cur *cur, 234 union xfs_btree_ptr *ptr) 235 { 236 struct xfs_agi *agi = cur->bc_ag.agbp->b_addr; 237 238 ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno)); 239 240 ptr->s = agi->agi_root; 241 } 242 243 STATIC void 244 xfs_finobt_init_ptr_from_cur( 245 struct xfs_btree_cur *cur, 246 union xfs_btree_ptr *ptr) 247 { 248 struct xfs_agi *agi = cur->bc_ag.agbp->b_addr; 249 250 ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno)); 251 ptr->s = agi->agi_free_root; 252 } 253 254 STATIC int64_t 255 xfs_inobt_key_diff( 256 struct xfs_btree_cur *cur, 257 union xfs_btree_key *key) 258 { 259 return (int64_t)be32_to_cpu(key->inobt.ir_startino) - 260 cur->bc_rec.i.ir_startino; 261 } 262 263 STATIC int64_t 264 xfs_inobt_diff_two_keys( 265 struct xfs_btree_cur *cur, 266 union xfs_btree_key *k1, 267 union xfs_btree_key *k2) 268 { 269 return (int64_t)be32_to_cpu(k1->inobt.ir_startino) - 270 be32_to_cpu(k2->inobt.ir_startino); 271 } 272 273 static xfs_failaddr_t 274 xfs_inobt_verify( 275 struct xfs_buf *bp) 276 { 277 struct xfs_mount *mp = bp->b_mount; 278 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 279 xfs_failaddr_t fa; 280 unsigned int level; 281 282 if (!xfs_verify_magic(bp, block->bb_magic)) 283 return __this_address; 284 285 /* 286 * During growfs operations, we can't verify the exact owner as the 287 * perag is not fully initialised and hence not attached to the buffer. 288 * 289 * Similarly, during log recovery we will have a perag structure 290 * attached, but the agi information will not yet have been initialised 291 * from the on disk AGI. We don't currently use any of this information, 292 * but beware of the landmine (i.e. need to check pag->pagi_init) if we 293 * ever do. 294 */ 295 if (xfs_sb_version_hascrc(&mp->m_sb)) { 296 fa = xfs_btree_sblock_v5hdr_verify(bp); 297 if (fa) 298 return fa; 299 } 300 301 /* level verification */ 302 level = be16_to_cpu(block->bb_level); 303 if (level >= M_IGEO(mp)->inobt_maxlevels) 304 return __this_address; 305 306 return xfs_btree_sblock_verify(bp, 307 M_IGEO(mp)->inobt_mxr[level != 0]); 308 } 309 310 static void 311 xfs_inobt_read_verify( 312 struct xfs_buf *bp) 313 { 314 xfs_failaddr_t fa; 315 316 if (!xfs_btree_sblock_verify_crc(bp)) 317 xfs_verifier_error(bp, -EFSBADCRC, __this_address); 318 else { 319 fa = xfs_inobt_verify(bp); 320 if (fa) 321 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 322 } 323 324 if (bp->b_error) 325 trace_xfs_btree_corrupt(bp, _RET_IP_); 326 } 327 328 static void 329 xfs_inobt_write_verify( 330 struct xfs_buf *bp) 331 { 332 xfs_failaddr_t fa; 333 334 fa = xfs_inobt_verify(bp); 335 if (fa) { 336 trace_xfs_btree_corrupt(bp, _RET_IP_); 337 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 338 return; 339 } 340 xfs_btree_sblock_calc_crc(bp); 341 342 } 343 344 const struct xfs_buf_ops xfs_inobt_buf_ops = { 345 .name = "xfs_inobt", 346 .magic = { cpu_to_be32(XFS_IBT_MAGIC), cpu_to_be32(XFS_IBT_CRC_MAGIC) }, 347 .verify_read = xfs_inobt_read_verify, 348 .verify_write = xfs_inobt_write_verify, 349 .verify_struct = xfs_inobt_verify, 350 }; 351 352 const struct xfs_buf_ops xfs_finobt_buf_ops = { 353 .name = "xfs_finobt", 354 .magic = { cpu_to_be32(XFS_FIBT_MAGIC), 355 cpu_to_be32(XFS_FIBT_CRC_MAGIC) }, 356 .verify_read = xfs_inobt_read_verify, 357 .verify_write = xfs_inobt_write_verify, 358 .verify_struct = xfs_inobt_verify, 359 }; 360 361 STATIC int 362 xfs_inobt_keys_inorder( 363 struct xfs_btree_cur *cur, 364 union xfs_btree_key *k1, 365 union xfs_btree_key *k2) 366 { 367 return be32_to_cpu(k1->inobt.ir_startino) < 368 be32_to_cpu(k2->inobt.ir_startino); 369 } 370 371 STATIC int 372 xfs_inobt_recs_inorder( 373 struct xfs_btree_cur *cur, 374 union xfs_btree_rec *r1, 375 union xfs_btree_rec *r2) 376 { 377 return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <= 378 be32_to_cpu(r2->inobt.ir_startino); 379 } 380 381 static const struct xfs_btree_ops xfs_inobt_ops = { 382 .rec_len = sizeof(xfs_inobt_rec_t), 383 .key_len = sizeof(xfs_inobt_key_t), 384 385 .dup_cursor = xfs_inobt_dup_cursor, 386 .set_root = xfs_inobt_set_root, 387 .alloc_block = xfs_inobt_alloc_block, 388 .free_block = xfs_inobt_free_block, 389 .get_minrecs = xfs_inobt_get_minrecs, 390 .get_maxrecs = xfs_inobt_get_maxrecs, 391 .init_key_from_rec = xfs_inobt_init_key_from_rec, 392 .init_high_key_from_rec = xfs_inobt_init_high_key_from_rec, 393 .init_rec_from_cur = xfs_inobt_init_rec_from_cur, 394 .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur, 395 .key_diff = xfs_inobt_key_diff, 396 .buf_ops = &xfs_inobt_buf_ops, 397 .diff_two_keys = xfs_inobt_diff_two_keys, 398 .keys_inorder = xfs_inobt_keys_inorder, 399 .recs_inorder = xfs_inobt_recs_inorder, 400 }; 401 402 static const struct xfs_btree_ops xfs_finobt_ops = { 403 .rec_len = sizeof(xfs_inobt_rec_t), 404 .key_len = sizeof(xfs_inobt_key_t), 405 406 .dup_cursor = xfs_inobt_dup_cursor, 407 .set_root = xfs_finobt_set_root, 408 .alloc_block = xfs_finobt_alloc_block, 409 .free_block = xfs_finobt_free_block, 410 .get_minrecs = xfs_inobt_get_minrecs, 411 .get_maxrecs = xfs_inobt_get_maxrecs, 412 .init_key_from_rec = xfs_inobt_init_key_from_rec, 413 .init_high_key_from_rec = xfs_inobt_init_high_key_from_rec, 414 .init_rec_from_cur = xfs_inobt_init_rec_from_cur, 415 .init_ptr_from_cur = xfs_finobt_init_ptr_from_cur, 416 .key_diff = xfs_inobt_key_diff, 417 .buf_ops = &xfs_finobt_buf_ops, 418 .diff_two_keys = xfs_inobt_diff_two_keys, 419 .keys_inorder = xfs_inobt_keys_inorder, 420 .recs_inorder = xfs_inobt_recs_inorder, 421 }; 422 423 /* 424 * Initialize a new inode btree cursor. 425 */ 426 static struct xfs_btree_cur * 427 xfs_inobt_init_common( 428 struct xfs_mount *mp, /* file system mount point */ 429 struct xfs_trans *tp, /* transaction pointer */ 430 struct xfs_perag *pag, 431 xfs_btnum_t btnum) /* ialloc or free ino btree */ 432 { 433 struct xfs_btree_cur *cur; 434 435 cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL); 436 cur->bc_tp = tp; 437 cur->bc_mp = mp; 438 cur->bc_btnum = btnum; 439 if (btnum == XFS_BTNUM_INO) { 440 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2); 441 cur->bc_ops = &xfs_inobt_ops; 442 } else { 443 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2); 444 cur->bc_ops = &xfs_finobt_ops; 445 } 446 447 cur->bc_blocklog = mp->m_sb.sb_blocklog; 448 449 if (xfs_sb_version_hascrc(&mp->m_sb)) 450 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; 451 452 /* take a reference for the cursor */ 453 atomic_inc(&pag->pag_ref); 454 cur->bc_ag.pag = pag; 455 return cur; 456 } 457 458 /* Create an inode btree cursor. */ 459 struct xfs_btree_cur * 460 xfs_inobt_init_cursor( 461 struct xfs_mount *mp, 462 struct xfs_trans *tp, 463 struct xfs_buf *agbp, 464 struct xfs_perag *pag, 465 xfs_btnum_t btnum) 466 { 467 struct xfs_btree_cur *cur; 468 struct xfs_agi *agi = agbp->b_addr; 469 470 cur = xfs_inobt_init_common(mp, tp, pag, btnum); 471 if (btnum == XFS_BTNUM_INO) 472 cur->bc_nlevels = be32_to_cpu(agi->agi_level); 473 else 474 cur->bc_nlevels = be32_to_cpu(agi->agi_free_level); 475 cur->bc_ag.agbp = agbp; 476 return cur; 477 } 478 479 /* Create an inode btree cursor with a fake root for staging. */ 480 struct xfs_btree_cur * 481 xfs_inobt_stage_cursor( 482 struct xfs_mount *mp, 483 struct xbtree_afakeroot *afake, 484 struct xfs_perag *pag, 485 xfs_btnum_t btnum) 486 { 487 struct xfs_btree_cur *cur; 488 489 cur = xfs_inobt_init_common(mp, NULL, pag, btnum); 490 xfs_btree_stage_afakeroot(cur, afake); 491 return cur; 492 } 493 494 /* 495 * Install a new inobt btree root. Caller is responsible for invalidating 496 * and freeing the old btree blocks. 497 */ 498 void 499 xfs_inobt_commit_staged_btree( 500 struct xfs_btree_cur *cur, 501 struct xfs_trans *tp, 502 struct xfs_buf *agbp) 503 { 504 struct xfs_agi *agi = agbp->b_addr; 505 struct xbtree_afakeroot *afake = cur->bc_ag.afake; 506 int fields; 507 508 ASSERT(cur->bc_flags & XFS_BTREE_STAGING); 509 510 if (cur->bc_btnum == XFS_BTNUM_INO) { 511 fields = XFS_AGI_ROOT | XFS_AGI_LEVEL; 512 agi->agi_root = cpu_to_be32(afake->af_root); 513 agi->agi_level = cpu_to_be32(afake->af_levels); 514 if (xfs_sb_version_hasinobtcounts(&cur->bc_mp->m_sb)) { 515 agi->agi_iblocks = cpu_to_be32(afake->af_blocks); 516 fields |= XFS_AGI_IBLOCKS; 517 } 518 xfs_ialloc_log_agi(tp, agbp, fields); 519 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_inobt_ops); 520 } else { 521 fields = XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL; 522 agi->agi_free_root = cpu_to_be32(afake->af_root); 523 agi->agi_free_level = cpu_to_be32(afake->af_levels); 524 if (xfs_sb_version_hasinobtcounts(&cur->bc_mp->m_sb)) { 525 agi->agi_fblocks = cpu_to_be32(afake->af_blocks); 526 fields |= XFS_AGI_IBLOCKS; 527 } 528 xfs_ialloc_log_agi(tp, agbp, fields); 529 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_finobt_ops); 530 } 531 } 532 533 /* 534 * Calculate number of records in an inobt btree block. 535 */ 536 int 537 xfs_inobt_maxrecs( 538 struct xfs_mount *mp, 539 int blocklen, 540 int leaf) 541 { 542 blocklen -= XFS_INOBT_BLOCK_LEN(mp); 543 544 if (leaf) 545 return blocklen / sizeof(xfs_inobt_rec_t); 546 return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t)); 547 } 548 549 /* 550 * Convert the inode record holemask to an inode allocation bitmap. The inode 551 * allocation bitmap is inode granularity and specifies whether an inode is 552 * physically allocated on disk (not whether the inode is considered allocated 553 * or free by the fs). 554 * 555 * A bit value of 1 means the inode is allocated, a value of 0 means it is free. 556 */ 557 uint64_t 558 xfs_inobt_irec_to_allocmask( 559 struct xfs_inobt_rec_incore *rec) 560 { 561 uint64_t bitmap = 0; 562 uint64_t inodespbit; 563 int nextbit; 564 uint allocbitmap; 565 566 /* 567 * The holemask has 16-bits for a 64 inode record. Therefore each 568 * holemask bit represents multiple inodes. Create a mask of bits to set 569 * in the allocmask for each holemask bit. 570 */ 571 inodespbit = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1; 572 573 /* 574 * Allocated inodes are represented by 0 bits in holemask. Invert the 0 575 * bits to 1 and convert to a uint so we can use xfs_next_bit(). Mask 576 * anything beyond the 16 holemask bits since this casts to a larger 577 * type. 578 */ 579 allocbitmap = ~rec->ir_holemask & ((1 << XFS_INOBT_HOLEMASK_BITS) - 1); 580 581 /* 582 * allocbitmap is the inverted holemask so every set bit represents 583 * allocated inodes. To expand from 16-bit holemask granularity to 584 * 64-bit (e.g., bit-per-inode), set inodespbit bits in the target 585 * bitmap for every holemask bit. 586 */ 587 nextbit = xfs_next_bit(&allocbitmap, 1, 0); 588 while (nextbit != -1) { 589 ASSERT(nextbit < (sizeof(rec->ir_holemask) * NBBY)); 590 591 bitmap |= (inodespbit << 592 (nextbit * XFS_INODES_PER_HOLEMASK_BIT)); 593 594 nextbit = xfs_next_bit(&allocbitmap, 1, nextbit + 1); 595 } 596 597 return bitmap; 598 } 599 600 #if defined(DEBUG) || defined(XFS_WARN) 601 /* 602 * Verify that an in-core inode record has a valid inode count. 603 */ 604 int 605 xfs_inobt_rec_check_count( 606 struct xfs_mount *mp, 607 struct xfs_inobt_rec_incore *rec) 608 { 609 int inocount = 0; 610 int nextbit = 0; 611 uint64_t allocbmap; 612 int wordsz; 613 614 wordsz = sizeof(allocbmap) / sizeof(unsigned int); 615 allocbmap = xfs_inobt_irec_to_allocmask(rec); 616 617 nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, nextbit); 618 while (nextbit != -1) { 619 inocount++; 620 nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, 621 nextbit + 1); 622 } 623 624 if (inocount != rec->ir_count) 625 return -EFSCORRUPTED; 626 627 return 0; 628 } 629 #endif /* DEBUG */ 630 631 static xfs_extlen_t 632 xfs_inobt_max_size( 633 struct xfs_mount *mp, 634 xfs_agnumber_t agno) 635 { 636 xfs_agblock_t agblocks = xfs_ag_block_count(mp, agno); 637 638 /* Bail out if we're uninitialized, which can happen in mkfs. */ 639 if (M_IGEO(mp)->inobt_mxr[0] == 0) 640 return 0; 641 642 /* 643 * The log is permanently allocated, so the space it occupies will 644 * never be available for the kinds of things that would require btree 645 * expansion. We therefore can pretend the space isn't there. 646 */ 647 if (mp->m_sb.sb_logstart && 648 XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == agno) 649 agblocks -= mp->m_sb.sb_logblocks; 650 651 return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr, 652 (uint64_t)agblocks * mp->m_sb.sb_inopblock / 653 XFS_INODES_PER_CHUNK); 654 } 655 656 /* Read AGI and create inobt cursor. */ 657 int 658 xfs_inobt_cur( 659 struct xfs_mount *mp, 660 struct xfs_trans *tp, 661 struct xfs_perag *pag, 662 xfs_btnum_t which, 663 struct xfs_btree_cur **curpp, 664 struct xfs_buf **agi_bpp) 665 { 666 struct xfs_btree_cur *cur; 667 int error; 668 669 ASSERT(*agi_bpp == NULL); 670 ASSERT(*curpp == NULL); 671 672 error = xfs_ialloc_read_agi(mp, tp, pag->pag_agno, agi_bpp); 673 if (error) 674 return error; 675 676 cur = xfs_inobt_init_cursor(mp, tp, *agi_bpp, pag, which); 677 *curpp = cur; 678 return 0; 679 } 680 681 static int 682 xfs_inobt_count_blocks( 683 struct xfs_mount *mp, 684 struct xfs_trans *tp, 685 struct xfs_perag *pag, 686 xfs_btnum_t btnum, 687 xfs_extlen_t *tree_blocks) 688 { 689 struct xfs_buf *agbp = NULL; 690 struct xfs_btree_cur *cur = NULL; 691 int error; 692 693 error = xfs_inobt_cur(mp, tp, pag, btnum, &cur, &agbp); 694 if (error) 695 return error; 696 697 error = xfs_btree_count_blocks(cur, tree_blocks); 698 xfs_btree_del_cursor(cur, error); 699 xfs_trans_brelse(tp, agbp); 700 701 return error; 702 } 703 704 /* Read finobt block count from AGI header. */ 705 static int 706 xfs_finobt_read_blocks( 707 struct xfs_mount *mp, 708 struct xfs_trans *tp, 709 struct xfs_perag *pag, 710 xfs_extlen_t *tree_blocks) 711 { 712 struct xfs_buf *agbp; 713 struct xfs_agi *agi; 714 int error; 715 716 error = xfs_ialloc_read_agi(mp, tp, pag->pag_agno, &agbp); 717 if (error) 718 return error; 719 720 agi = agbp->b_addr; 721 *tree_blocks = be32_to_cpu(agi->agi_fblocks); 722 xfs_trans_brelse(tp, agbp); 723 return 0; 724 } 725 726 /* 727 * Figure out how many blocks to reserve and how many are used by this btree. 728 */ 729 int 730 xfs_finobt_calc_reserves( 731 struct xfs_mount *mp, 732 struct xfs_trans *tp, 733 struct xfs_perag *pag, 734 xfs_extlen_t *ask, 735 xfs_extlen_t *used) 736 { 737 xfs_extlen_t tree_len = 0; 738 int error; 739 740 if (!xfs_sb_version_hasfinobt(&mp->m_sb)) 741 return 0; 742 743 if (xfs_sb_version_hasinobtcounts(&mp->m_sb)) 744 error = xfs_finobt_read_blocks(mp, tp, pag, &tree_len); 745 else 746 error = xfs_inobt_count_blocks(mp, tp, pag, XFS_BTNUM_FINO, 747 &tree_len); 748 if (error) 749 return error; 750 751 *ask += xfs_inobt_max_size(mp, pag->pag_agno); 752 *used += tree_len; 753 return 0; 754 } 755 756 /* Calculate the inobt btree size for some records. */ 757 xfs_extlen_t 758 xfs_iallocbt_calc_size( 759 struct xfs_mount *mp, 760 unsigned long long len) 761 { 762 return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr, len); 763 } 764