1 /* 2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_bit.h" 25 #include "xfs_mount.h" 26 #include "xfs_inode.h" 27 #include "xfs_btree.h" 28 #include "xfs_ialloc.h" 29 #include "xfs_ialloc_btree.h" 30 #include "xfs_alloc.h" 31 #include "xfs_error.h" 32 #include "xfs_trace.h" 33 #include "xfs_cksum.h" 34 #include "xfs_trans.h" 35 #include "xfs_rmap.h" 36 37 38 STATIC int 39 xfs_inobt_get_minrecs( 40 struct xfs_btree_cur *cur, 41 int level) 42 { 43 return cur->bc_mp->m_inobt_mnr[level != 0]; 44 } 45 46 STATIC struct xfs_btree_cur * 47 xfs_inobt_dup_cursor( 48 struct xfs_btree_cur *cur) 49 { 50 return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp, 51 cur->bc_private.a.agbp, cur->bc_private.a.agno, 52 cur->bc_btnum); 53 } 54 55 STATIC void 56 xfs_inobt_set_root( 57 struct xfs_btree_cur *cur, 58 union xfs_btree_ptr *nptr, 59 int inc) /* level change */ 60 { 61 struct xfs_buf *agbp = cur->bc_private.a.agbp; 62 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); 63 64 agi->agi_root = nptr->s; 65 be32_add_cpu(&agi->agi_level, inc); 66 xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL); 67 } 68 69 STATIC void 70 xfs_finobt_set_root( 71 struct xfs_btree_cur *cur, 72 union xfs_btree_ptr *nptr, 73 int inc) /* level change */ 74 { 75 struct xfs_buf *agbp = cur->bc_private.a.agbp; 76 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); 77 78 agi->agi_free_root = nptr->s; 79 be32_add_cpu(&agi->agi_free_level, inc); 80 xfs_ialloc_log_agi(cur->bc_tp, agbp, 81 XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL); 82 } 83 84 STATIC int 85 __xfs_inobt_alloc_block( 86 struct xfs_btree_cur *cur, 87 union xfs_btree_ptr *start, 88 union xfs_btree_ptr *new, 89 int *stat, 90 enum xfs_ag_resv_type resv) 91 { 92 xfs_alloc_arg_t args; /* block allocation args */ 93 int error; /* error return value */ 94 xfs_agblock_t sbno = be32_to_cpu(start->s); 95 96 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 97 98 memset(&args, 0, sizeof(args)); 99 args.tp = cur->bc_tp; 100 args.mp = cur->bc_mp; 101 xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_INOBT); 102 args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, sbno); 103 args.minlen = 1; 104 args.maxlen = 1; 105 args.prod = 1; 106 args.type = XFS_ALLOCTYPE_NEAR_BNO; 107 args.resv = resv; 108 109 error = xfs_alloc_vextent(&args); 110 if (error) { 111 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); 112 return error; 113 } 114 if (args.fsbno == NULLFSBLOCK) { 115 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 116 *stat = 0; 117 return 0; 118 } 119 ASSERT(args.len == 1); 120 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 121 122 new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno)); 123 *stat = 1; 124 return 0; 125 } 126 127 STATIC int 128 xfs_inobt_alloc_block( 129 struct xfs_btree_cur *cur, 130 union xfs_btree_ptr *start, 131 union xfs_btree_ptr *new, 132 int *stat) 133 { 134 return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE); 135 } 136 137 STATIC int 138 xfs_finobt_alloc_block( 139 struct xfs_btree_cur *cur, 140 union xfs_btree_ptr *start, 141 union xfs_btree_ptr *new, 142 int *stat) 143 { 144 return __xfs_inobt_alloc_block(cur, start, new, stat, 145 XFS_AG_RESV_METADATA); 146 } 147 148 STATIC int 149 xfs_inobt_free_block( 150 struct xfs_btree_cur *cur, 151 struct xfs_buf *bp) 152 { 153 struct xfs_owner_info oinfo; 154 155 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT); 156 return xfs_free_extent(cur->bc_tp, 157 XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1, 158 &oinfo, XFS_AG_RESV_NONE); 159 } 160 161 STATIC int 162 xfs_inobt_get_maxrecs( 163 struct xfs_btree_cur *cur, 164 int level) 165 { 166 return cur->bc_mp->m_inobt_mxr[level != 0]; 167 } 168 169 STATIC void 170 xfs_inobt_init_key_from_rec( 171 union xfs_btree_key *key, 172 union xfs_btree_rec *rec) 173 { 174 key->inobt.ir_startino = rec->inobt.ir_startino; 175 } 176 177 STATIC void 178 xfs_inobt_init_rec_from_cur( 179 struct xfs_btree_cur *cur, 180 union xfs_btree_rec *rec) 181 { 182 rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino); 183 if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) { 184 rec->inobt.ir_u.sp.ir_holemask = 185 cpu_to_be16(cur->bc_rec.i.ir_holemask); 186 rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count; 187 rec->inobt.ir_u.sp.ir_freecount = cur->bc_rec.i.ir_freecount; 188 } else { 189 /* ir_holemask/ir_count not supported on-disk */ 190 rec->inobt.ir_u.f.ir_freecount = 191 cpu_to_be32(cur->bc_rec.i.ir_freecount); 192 } 193 rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free); 194 } 195 196 /* 197 * initial value of ptr for lookup 198 */ 199 STATIC void 200 xfs_inobt_init_ptr_from_cur( 201 struct xfs_btree_cur *cur, 202 union xfs_btree_ptr *ptr) 203 { 204 struct xfs_agi *agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp); 205 206 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno)); 207 208 ptr->s = agi->agi_root; 209 } 210 211 STATIC void 212 xfs_finobt_init_ptr_from_cur( 213 struct xfs_btree_cur *cur, 214 union xfs_btree_ptr *ptr) 215 { 216 struct xfs_agi *agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp); 217 218 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno)); 219 ptr->s = agi->agi_free_root; 220 } 221 222 STATIC __int64_t 223 xfs_inobt_key_diff( 224 struct xfs_btree_cur *cur, 225 union xfs_btree_key *key) 226 { 227 return (__int64_t)be32_to_cpu(key->inobt.ir_startino) - 228 cur->bc_rec.i.ir_startino; 229 } 230 231 static int 232 xfs_inobt_verify( 233 struct xfs_buf *bp) 234 { 235 struct xfs_mount *mp = bp->b_target->bt_mount; 236 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 237 unsigned int level; 238 239 /* 240 * During growfs operations, we can't verify the exact owner as the 241 * perag is not fully initialised and hence not attached to the buffer. 242 * 243 * Similarly, during log recovery we will have a perag structure 244 * attached, but the agi information will not yet have been initialised 245 * from the on disk AGI. We don't currently use any of this information, 246 * but beware of the landmine (i.e. need to check pag->pagi_init) if we 247 * ever do. 248 */ 249 switch (block->bb_magic) { 250 case cpu_to_be32(XFS_IBT_CRC_MAGIC): 251 case cpu_to_be32(XFS_FIBT_CRC_MAGIC): 252 if (!xfs_btree_sblock_v5hdr_verify(bp)) 253 return false; 254 /* fall through */ 255 case cpu_to_be32(XFS_IBT_MAGIC): 256 case cpu_to_be32(XFS_FIBT_MAGIC): 257 break; 258 default: 259 return 0; 260 } 261 262 /* level verification */ 263 level = be16_to_cpu(block->bb_level); 264 if (level >= mp->m_in_maxlevels) 265 return false; 266 267 return xfs_btree_sblock_verify(bp, mp->m_inobt_mxr[level != 0]); 268 } 269 270 static void 271 xfs_inobt_read_verify( 272 struct xfs_buf *bp) 273 { 274 if (!xfs_btree_sblock_verify_crc(bp)) 275 xfs_buf_ioerror(bp, -EFSBADCRC); 276 else if (!xfs_inobt_verify(bp)) 277 xfs_buf_ioerror(bp, -EFSCORRUPTED); 278 279 if (bp->b_error) { 280 trace_xfs_btree_corrupt(bp, _RET_IP_); 281 xfs_verifier_error(bp); 282 } 283 } 284 285 static void 286 xfs_inobt_write_verify( 287 struct xfs_buf *bp) 288 { 289 if (!xfs_inobt_verify(bp)) { 290 trace_xfs_btree_corrupt(bp, _RET_IP_); 291 xfs_buf_ioerror(bp, -EFSCORRUPTED); 292 xfs_verifier_error(bp); 293 return; 294 } 295 xfs_btree_sblock_calc_crc(bp); 296 297 } 298 299 const struct xfs_buf_ops xfs_inobt_buf_ops = { 300 .name = "xfs_inobt", 301 .verify_read = xfs_inobt_read_verify, 302 .verify_write = xfs_inobt_write_verify, 303 }; 304 305 #if defined(DEBUG) || defined(XFS_WARN) 306 STATIC int 307 xfs_inobt_keys_inorder( 308 struct xfs_btree_cur *cur, 309 union xfs_btree_key *k1, 310 union xfs_btree_key *k2) 311 { 312 return be32_to_cpu(k1->inobt.ir_startino) < 313 be32_to_cpu(k2->inobt.ir_startino); 314 } 315 316 STATIC int 317 xfs_inobt_recs_inorder( 318 struct xfs_btree_cur *cur, 319 union xfs_btree_rec *r1, 320 union xfs_btree_rec *r2) 321 { 322 return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <= 323 be32_to_cpu(r2->inobt.ir_startino); 324 } 325 #endif /* DEBUG */ 326 327 static const struct xfs_btree_ops xfs_inobt_ops = { 328 .rec_len = sizeof(xfs_inobt_rec_t), 329 .key_len = sizeof(xfs_inobt_key_t), 330 331 .dup_cursor = xfs_inobt_dup_cursor, 332 .set_root = xfs_inobt_set_root, 333 .alloc_block = xfs_inobt_alloc_block, 334 .free_block = xfs_inobt_free_block, 335 .get_minrecs = xfs_inobt_get_minrecs, 336 .get_maxrecs = xfs_inobt_get_maxrecs, 337 .init_key_from_rec = xfs_inobt_init_key_from_rec, 338 .init_rec_from_cur = xfs_inobt_init_rec_from_cur, 339 .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur, 340 .key_diff = xfs_inobt_key_diff, 341 .buf_ops = &xfs_inobt_buf_ops, 342 #if defined(DEBUG) || defined(XFS_WARN) 343 .keys_inorder = xfs_inobt_keys_inorder, 344 .recs_inorder = xfs_inobt_recs_inorder, 345 #endif 346 }; 347 348 static const struct xfs_btree_ops xfs_finobt_ops = { 349 .rec_len = sizeof(xfs_inobt_rec_t), 350 .key_len = sizeof(xfs_inobt_key_t), 351 352 .dup_cursor = xfs_inobt_dup_cursor, 353 .set_root = xfs_finobt_set_root, 354 .alloc_block = xfs_finobt_alloc_block, 355 .free_block = xfs_inobt_free_block, 356 .get_minrecs = xfs_inobt_get_minrecs, 357 .get_maxrecs = xfs_inobt_get_maxrecs, 358 .init_key_from_rec = xfs_inobt_init_key_from_rec, 359 .init_rec_from_cur = xfs_inobt_init_rec_from_cur, 360 .init_ptr_from_cur = xfs_finobt_init_ptr_from_cur, 361 .key_diff = xfs_inobt_key_diff, 362 .buf_ops = &xfs_inobt_buf_ops, 363 #if defined(DEBUG) || defined(XFS_WARN) 364 .keys_inorder = xfs_inobt_keys_inorder, 365 .recs_inorder = xfs_inobt_recs_inorder, 366 #endif 367 }; 368 369 /* 370 * Allocate a new inode btree cursor. 371 */ 372 struct xfs_btree_cur * /* new inode btree cursor */ 373 xfs_inobt_init_cursor( 374 struct xfs_mount *mp, /* file system mount point */ 375 struct xfs_trans *tp, /* transaction pointer */ 376 struct xfs_buf *agbp, /* buffer for agi structure */ 377 xfs_agnumber_t agno, /* allocation group number */ 378 xfs_btnum_t btnum) /* ialloc or free ino btree */ 379 { 380 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); 381 struct xfs_btree_cur *cur; 382 383 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS); 384 385 cur->bc_tp = tp; 386 cur->bc_mp = mp; 387 cur->bc_btnum = btnum; 388 if (btnum == XFS_BTNUM_INO) { 389 cur->bc_nlevels = be32_to_cpu(agi->agi_level); 390 cur->bc_ops = &xfs_inobt_ops; 391 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2); 392 } else { 393 cur->bc_nlevels = be32_to_cpu(agi->agi_free_level); 394 cur->bc_ops = &xfs_finobt_ops; 395 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2); 396 } 397 398 cur->bc_blocklog = mp->m_sb.sb_blocklog; 399 400 if (xfs_sb_version_hascrc(&mp->m_sb)) 401 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; 402 403 cur->bc_private.a.agbp = agbp; 404 cur->bc_private.a.agno = agno; 405 406 return cur; 407 } 408 409 /* 410 * Calculate number of records in an inobt btree block. 411 */ 412 int 413 xfs_inobt_maxrecs( 414 struct xfs_mount *mp, 415 int blocklen, 416 int leaf) 417 { 418 blocklen -= XFS_INOBT_BLOCK_LEN(mp); 419 420 if (leaf) 421 return blocklen / sizeof(xfs_inobt_rec_t); 422 return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t)); 423 } 424 425 /* 426 * Convert the inode record holemask to an inode allocation bitmap. The inode 427 * allocation bitmap is inode granularity and specifies whether an inode is 428 * physically allocated on disk (not whether the inode is considered allocated 429 * or free by the fs). 430 * 431 * A bit value of 1 means the inode is allocated, a value of 0 means it is free. 432 */ 433 uint64_t 434 xfs_inobt_irec_to_allocmask( 435 struct xfs_inobt_rec_incore *rec) 436 { 437 uint64_t bitmap = 0; 438 uint64_t inodespbit; 439 int nextbit; 440 uint allocbitmap; 441 442 /* 443 * The holemask has 16-bits for a 64 inode record. Therefore each 444 * holemask bit represents multiple inodes. Create a mask of bits to set 445 * in the allocmask for each holemask bit. 446 */ 447 inodespbit = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1; 448 449 /* 450 * Allocated inodes are represented by 0 bits in holemask. Invert the 0 451 * bits to 1 and convert to a uint so we can use xfs_next_bit(). Mask 452 * anything beyond the 16 holemask bits since this casts to a larger 453 * type. 454 */ 455 allocbitmap = ~rec->ir_holemask & ((1 << XFS_INOBT_HOLEMASK_BITS) - 1); 456 457 /* 458 * allocbitmap is the inverted holemask so every set bit represents 459 * allocated inodes. To expand from 16-bit holemask granularity to 460 * 64-bit (e.g., bit-per-inode), set inodespbit bits in the target 461 * bitmap for every holemask bit. 462 */ 463 nextbit = xfs_next_bit(&allocbitmap, 1, 0); 464 while (nextbit != -1) { 465 ASSERT(nextbit < (sizeof(rec->ir_holemask) * NBBY)); 466 467 bitmap |= (inodespbit << 468 (nextbit * XFS_INODES_PER_HOLEMASK_BIT)); 469 470 nextbit = xfs_next_bit(&allocbitmap, 1, nextbit + 1); 471 } 472 473 return bitmap; 474 } 475 476 #if defined(DEBUG) || defined(XFS_WARN) 477 /* 478 * Verify that an in-core inode record has a valid inode count. 479 */ 480 int 481 xfs_inobt_rec_check_count( 482 struct xfs_mount *mp, 483 struct xfs_inobt_rec_incore *rec) 484 { 485 int inocount = 0; 486 int nextbit = 0; 487 uint64_t allocbmap; 488 int wordsz; 489 490 wordsz = sizeof(allocbmap) / sizeof(unsigned int); 491 allocbmap = xfs_inobt_irec_to_allocmask(rec); 492 493 nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, nextbit); 494 while (nextbit != -1) { 495 inocount++; 496 nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, 497 nextbit + 1); 498 } 499 500 if (inocount != rec->ir_count) 501 return -EFSCORRUPTED; 502 503 return 0; 504 } 505 #endif /* DEBUG */ 506 507 static xfs_extlen_t 508 xfs_inobt_max_size( 509 struct xfs_mount *mp) 510 { 511 /* Bail out if we're uninitialized, which can happen in mkfs. */ 512 if (mp->m_inobt_mxr[0] == 0) 513 return 0; 514 515 return xfs_btree_calc_size(mp, mp->m_inobt_mnr, 516 (uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock / 517 XFS_INODES_PER_CHUNK); 518 } 519 520 static int 521 xfs_inobt_count_blocks( 522 struct xfs_mount *mp, 523 xfs_agnumber_t agno, 524 xfs_btnum_t btnum, 525 xfs_extlen_t *tree_blocks) 526 { 527 struct xfs_buf *agbp; 528 struct xfs_btree_cur *cur; 529 int error; 530 531 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 532 if (error) 533 return error; 534 535 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, btnum); 536 error = xfs_btree_count_blocks(cur, tree_blocks); 537 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 538 xfs_buf_relse(agbp); 539 540 return error; 541 } 542 543 /* 544 * Figure out how many blocks to reserve and how many are used by this btree. 545 */ 546 int 547 xfs_finobt_calc_reserves( 548 struct xfs_mount *mp, 549 xfs_agnumber_t agno, 550 xfs_extlen_t *ask, 551 xfs_extlen_t *used) 552 { 553 xfs_extlen_t tree_len = 0; 554 int error; 555 556 if (!xfs_sb_version_hasfinobt(&mp->m_sb)) 557 return 0; 558 559 error = xfs_inobt_count_blocks(mp, agno, XFS_BTNUM_FINO, &tree_len); 560 if (error) 561 return error; 562 563 *ask += xfs_inobt_max_size(mp); 564 *used += tree_len; 565 return 0; 566 } 567