1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_trans.h" 16 #include "xfs_alloc.h" 17 #include "xfs_btree.h" 18 #include "xfs_bmap_btree.h" 19 #include "xfs_bmap.h" 20 #include "xfs_error.h" 21 #include "xfs_quota.h" 22 #include "xfs_trace.h" 23 #include "xfs_rmap.h" 24 #include "xfs_ag.h" 25 26 static struct kmem_cache *xfs_bmbt_cur_cache; 27 28 /* 29 * Convert on-disk form of btree root to in-memory form. 30 */ 31 void 32 xfs_bmdr_to_bmbt( 33 struct xfs_inode *ip, 34 xfs_bmdr_block_t *dblock, 35 int dblocklen, 36 struct xfs_btree_block *rblock, 37 int rblocklen) 38 { 39 struct xfs_mount *mp = ip->i_mount; 40 int dmxr; 41 xfs_bmbt_key_t *fkp; 42 __be64 *fpp; 43 xfs_bmbt_key_t *tkp; 44 __be64 *tpp; 45 46 xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL, 47 XFS_BTNUM_BMAP, 0, 0, ip->i_ino, 48 XFS_BTREE_LONG_PTRS); 49 rblock->bb_level = dblock->bb_level; 50 ASSERT(be16_to_cpu(rblock->bb_level) > 0); 51 rblock->bb_numrecs = dblock->bb_numrecs; 52 dmxr = xfs_bmdr_maxrecs(dblocklen, 0); 53 fkp = XFS_BMDR_KEY_ADDR(dblock, 1); 54 tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1); 55 fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr); 56 tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen); 57 dmxr = be16_to_cpu(dblock->bb_numrecs); 58 memcpy(tkp, fkp, sizeof(*fkp) * dmxr); 59 memcpy(tpp, fpp, sizeof(*fpp) * dmxr); 60 } 61 62 void 63 xfs_bmbt_disk_get_all( 64 const struct xfs_bmbt_rec *rec, 65 struct xfs_bmbt_irec *irec) 66 { 67 uint64_t l0 = get_unaligned_be64(&rec->l0); 68 uint64_t l1 = get_unaligned_be64(&rec->l1); 69 70 irec->br_startoff = (l0 & xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; 71 irec->br_startblock = ((l0 & xfs_mask64lo(9)) << 43) | (l1 >> 21); 72 irec->br_blockcount = l1 & xfs_mask64lo(21); 73 if (l0 >> (64 - BMBT_EXNTFLAG_BITLEN)) 74 irec->br_state = XFS_EXT_UNWRITTEN; 75 else 76 irec->br_state = XFS_EXT_NORM; 77 } 78 79 /* 80 * Extract the blockcount field from an on disk bmap extent record. 81 */ 82 xfs_filblks_t 83 xfs_bmbt_disk_get_blockcount( 84 const struct xfs_bmbt_rec *r) 85 { 86 return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21)); 87 } 88 89 /* 90 * Extract the startoff field from a disk format bmap extent record. 91 */ 92 xfs_fileoff_t 93 xfs_bmbt_disk_get_startoff( 94 const struct xfs_bmbt_rec *r) 95 { 96 return ((xfs_fileoff_t)be64_to_cpu(r->l0) & 97 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; 98 } 99 100 /* 101 * Set all the fields in a bmap extent record from the uncompressed form. 102 */ 103 void 104 xfs_bmbt_disk_set_all( 105 struct xfs_bmbt_rec *r, 106 struct xfs_bmbt_irec *s) 107 { 108 int extent_flag = (s->br_state != XFS_EXT_NORM); 109 110 ASSERT(s->br_state == XFS_EXT_NORM || s->br_state == XFS_EXT_UNWRITTEN); 111 ASSERT(!(s->br_startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN))); 112 ASSERT(!(s->br_blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN))); 113 ASSERT(!(s->br_startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN))); 114 115 put_unaligned_be64( 116 ((xfs_bmbt_rec_base_t)extent_flag << 63) | 117 ((xfs_bmbt_rec_base_t)s->br_startoff << 9) | 118 ((xfs_bmbt_rec_base_t)s->br_startblock >> 43), &r->l0); 119 put_unaligned_be64( 120 ((xfs_bmbt_rec_base_t)s->br_startblock << 21) | 121 ((xfs_bmbt_rec_base_t)s->br_blockcount & 122 (xfs_bmbt_rec_base_t)xfs_mask64lo(21)), &r->l1); 123 } 124 125 /* 126 * Convert in-memory form of btree root to on-disk form. 127 */ 128 void 129 xfs_bmbt_to_bmdr( 130 struct xfs_mount *mp, 131 struct xfs_btree_block *rblock, 132 int rblocklen, 133 xfs_bmdr_block_t *dblock, 134 int dblocklen) 135 { 136 int dmxr; 137 xfs_bmbt_key_t *fkp; 138 __be64 *fpp; 139 xfs_bmbt_key_t *tkp; 140 __be64 *tpp; 141 142 if (xfs_has_crc(mp)) { 143 ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_CRC_MAGIC)); 144 ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid, 145 &mp->m_sb.sb_meta_uuid)); 146 ASSERT(rblock->bb_u.l.bb_blkno == 147 cpu_to_be64(XFS_BUF_DADDR_NULL)); 148 } else 149 ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC)); 150 ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK)); 151 ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK)); 152 ASSERT(rblock->bb_level != 0); 153 dblock->bb_level = rblock->bb_level; 154 dblock->bb_numrecs = rblock->bb_numrecs; 155 dmxr = xfs_bmdr_maxrecs(dblocklen, 0); 156 fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1); 157 tkp = XFS_BMDR_KEY_ADDR(dblock, 1); 158 fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen); 159 tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr); 160 dmxr = be16_to_cpu(dblock->bb_numrecs); 161 memcpy(tkp, fkp, sizeof(*fkp) * dmxr); 162 memcpy(tpp, fpp, sizeof(*fpp) * dmxr); 163 } 164 165 STATIC struct xfs_btree_cur * 166 xfs_bmbt_dup_cursor( 167 struct xfs_btree_cur *cur) 168 { 169 struct xfs_btree_cur *new; 170 171 new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp, 172 cur->bc_ino.ip, cur->bc_ino.whichfork); 173 174 /* 175 * Copy the firstblock, dfops, and flags values, 176 * since init cursor doesn't get them. 177 */ 178 new->bc_ino.flags = cur->bc_ino.flags; 179 180 return new; 181 } 182 183 STATIC void 184 xfs_bmbt_update_cursor( 185 struct xfs_btree_cur *src, 186 struct xfs_btree_cur *dst) 187 { 188 ASSERT((dst->bc_tp->t_highest_agno != NULLAGNUMBER) || 189 (dst->bc_ino.ip->i_diflags & XFS_DIFLAG_REALTIME)); 190 191 dst->bc_ino.allocated += src->bc_ino.allocated; 192 dst->bc_tp->t_highest_agno = src->bc_tp->t_highest_agno; 193 194 src->bc_ino.allocated = 0; 195 } 196 197 STATIC int 198 xfs_bmbt_alloc_block( 199 struct xfs_btree_cur *cur, 200 const union xfs_btree_ptr *start, 201 union xfs_btree_ptr *new, 202 int *stat) 203 { 204 struct xfs_alloc_arg args; 205 int error; 206 207 memset(&args, 0, sizeof(args)); 208 args.tp = cur->bc_tp; 209 args.mp = cur->bc_mp; 210 xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_ino.ip->i_ino, 211 cur->bc_ino.whichfork); 212 args.minlen = args.maxlen = args.prod = 1; 213 args.wasdel = cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL; 214 if (!args.wasdel && args.tp->t_blk_res == 0) 215 return -ENOSPC; 216 217 /* 218 * If we are coming here from something like unwritten extent 219 * conversion, there has been no data extent allocation already done, so 220 * we have to ensure that we attempt to locate the entire set of bmbt 221 * allocations in the same AG, as xfs_bmapi_write() would have reserved. 222 */ 223 if (cur->bc_tp->t_highest_agno == NULLAGNUMBER) 224 args.minleft = xfs_bmapi_minleft(cur->bc_tp, cur->bc_ino.ip, 225 cur->bc_ino.whichfork); 226 227 error = xfs_alloc_vextent_start_ag(&args, be64_to_cpu(start->l)); 228 if (error) 229 return error; 230 231 if (args.fsbno == NULLFSBLOCK && args.minleft) { 232 /* 233 * Could not find an AG with enough free space to satisfy 234 * a full btree split. Try again and if 235 * successful activate the lowspace algorithm. 236 */ 237 args.minleft = 0; 238 error = xfs_alloc_vextent_start_ag(&args, 0); 239 if (error) 240 return error; 241 cur->bc_tp->t_flags |= XFS_TRANS_LOWMODE; 242 } 243 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { 244 *stat = 0; 245 return 0; 246 } 247 248 ASSERT(args.len == 1); 249 cur->bc_ino.allocated++; 250 cur->bc_ino.ip->i_nblocks++; 251 xfs_trans_log_inode(args.tp, cur->bc_ino.ip, XFS_ILOG_CORE); 252 xfs_trans_mod_dquot_byino(args.tp, cur->bc_ino.ip, 253 XFS_TRANS_DQ_BCOUNT, 1L); 254 255 new->l = cpu_to_be64(args.fsbno); 256 257 *stat = 1; 258 return 0; 259 } 260 261 STATIC int 262 xfs_bmbt_free_block( 263 struct xfs_btree_cur *cur, 264 struct xfs_buf *bp) 265 { 266 struct xfs_mount *mp = cur->bc_mp; 267 struct xfs_inode *ip = cur->bc_ino.ip; 268 struct xfs_trans *tp = cur->bc_tp; 269 xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp)); 270 struct xfs_owner_info oinfo; 271 int error; 272 273 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_ino.whichfork); 274 error = xfs_free_extent_later(cur->bc_tp, fsbno, 1, &oinfo); 275 if (error) 276 return error; 277 278 ip->i_nblocks--; 279 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 280 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 281 return 0; 282 } 283 284 STATIC int 285 xfs_bmbt_get_minrecs( 286 struct xfs_btree_cur *cur, 287 int level) 288 { 289 if (level == cur->bc_nlevels - 1) { 290 struct xfs_ifork *ifp; 291 292 ifp = xfs_ifork_ptr(cur->bc_ino.ip, 293 cur->bc_ino.whichfork); 294 295 return xfs_bmbt_maxrecs(cur->bc_mp, 296 ifp->if_broot_bytes, level == 0) / 2; 297 } 298 299 return cur->bc_mp->m_bmap_dmnr[level != 0]; 300 } 301 302 int 303 xfs_bmbt_get_maxrecs( 304 struct xfs_btree_cur *cur, 305 int level) 306 { 307 if (level == cur->bc_nlevels - 1) { 308 struct xfs_ifork *ifp; 309 310 ifp = xfs_ifork_ptr(cur->bc_ino.ip, 311 cur->bc_ino.whichfork); 312 313 return xfs_bmbt_maxrecs(cur->bc_mp, 314 ifp->if_broot_bytes, level == 0); 315 } 316 317 return cur->bc_mp->m_bmap_dmxr[level != 0]; 318 319 } 320 321 /* 322 * Get the maximum records we could store in the on-disk format. 323 * 324 * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but 325 * for the root node this checks the available space in the dinode fork 326 * so that we can resize the in-memory buffer to match it. After a 327 * resize to the maximum size this function returns the same value 328 * as xfs_bmbt_get_maxrecs for the root node, too. 329 */ 330 STATIC int 331 xfs_bmbt_get_dmaxrecs( 332 struct xfs_btree_cur *cur, 333 int level) 334 { 335 if (level != cur->bc_nlevels - 1) 336 return cur->bc_mp->m_bmap_dmxr[level != 0]; 337 return xfs_bmdr_maxrecs(cur->bc_ino.forksize, level == 0); 338 } 339 340 STATIC void 341 xfs_bmbt_init_key_from_rec( 342 union xfs_btree_key *key, 343 const union xfs_btree_rec *rec) 344 { 345 key->bmbt.br_startoff = 346 cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt)); 347 } 348 349 STATIC void 350 xfs_bmbt_init_high_key_from_rec( 351 union xfs_btree_key *key, 352 const union xfs_btree_rec *rec) 353 { 354 key->bmbt.br_startoff = cpu_to_be64( 355 xfs_bmbt_disk_get_startoff(&rec->bmbt) + 356 xfs_bmbt_disk_get_blockcount(&rec->bmbt) - 1); 357 } 358 359 STATIC void 360 xfs_bmbt_init_rec_from_cur( 361 struct xfs_btree_cur *cur, 362 union xfs_btree_rec *rec) 363 { 364 xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b); 365 } 366 367 STATIC void 368 xfs_bmbt_init_ptr_from_cur( 369 struct xfs_btree_cur *cur, 370 union xfs_btree_ptr *ptr) 371 { 372 ptr->l = 0; 373 } 374 375 STATIC int64_t 376 xfs_bmbt_key_diff( 377 struct xfs_btree_cur *cur, 378 const union xfs_btree_key *key) 379 { 380 return (int64_t)be64_to_cpu(key->bmbt.br_startoff) - 381 cur->bc_rec.b.br_startoff; 382 } 383 384 STATIC int64_t 385 xfs_bmbt_diff_two_keys( 386 struct xfs_btree_cur *cur, 387 const union xfs_btree_key *k1, 388 const union xfs_btree_key *k2, 389 const union xfs_btree_key *mask) 390 { 391 uint64_t a = be64_to_cpu(k1->bmbt.br_startoff); 392 uint64_t b = be64_to_cpu(k2->bmbt.br_startoff); 393 394 ASSERT(!mask || mask->bmbt.br_startoff); 395 396 /* 397 * Note: This routine previously casted a and b to int64 and subtracted 398 * them to generate a result. This lead to problems if b was the 399 * "maximum" key value (all ones) being signed incorrectly, hence this 400 * somewhat less efficient version. 401 */ 402 if (a > b) 403 return 1; 404 if (b > a) 405 return -1; 406 return 0; 407 } 408 409 static xfs_failaddr_t 410 xfs_bmbt_verify( 411 struct xfs_buf *bp) 412 { 413 struct xfs_mount *mp = bp->b_mount; 414 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 415 xfs_failaddr_t fa; 416 unsigned int level; 417 418 if (!xfs_verify_magic(bp, block->bb_magic)) 419 return __this_address; 420 421 if (xfs_has_crc(mp)) { 422 /* 423 * XXX: need a better way of verifying the owner here. Right now 424 * just make sure there has been one set. 425 */ 426 fa = xfs_btree_lblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN); 427 if (fa) 428 return fa; 429 } 430 431 /* 432 * numrecs and level verification. 433 * 434 * We don't know what fork we belong to, so just verify that the level 435 * is less than the maximum of the two. Later checks will be more 436 * precise. 437 */ 438 level = be16_to_cpu(block->bb_level); 439 if (level > max(mp->m_bm_maxlevels[0], mp->m_bm_maxlevels[1])) 440 return __this_address; 441 442 return xfs_btree_lblock_verify(bp, mp->m_bmap_dmxr[level != 0]); 443 } 444 445 static void 446 xfs_bmbt_read_verify( 447 struct xfs_buf *bp) 448 { 449 xfs_failaddr_t fa; 450 451 if (!xfs_btree_lblock_verify_crc(bp)) 452 xfs_verifier_error(bp, -EFSBADCRC, __this_address); 453 else { 454 fa = xfs_bmbt_verify(bp); 455 if (fa) 456 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 457 } 458 459 if (bp->b_error) 460 trace_xfs_btree_corrupt(bp, _RET_IP_); 461 } 462 463 static void 464 xfs_bmbt_write_verify( 465 struct xfs_buf *bp) 466 { 467 xfs_failaddr_t fa; 468 469 fa = xfs_bmbt_verify(bp); 470 if (fa) { 471 trace_xfs_btree_corrupt(bp, _RET_IP_); 472 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 473 return; 474 } 475 xfs_btree_lblock_calc_crc(bp); 476 } 477 478 const struct xfs_buf_ops xfs_bmbt_buf_ops = { 479 .name = "xfs_bmbt", 480 .magic = { cpu_to_be32(XFS_BMAP_MAGIC), 481 cpu_to_be32(XFS_BMAP_CRC_MAGIC) }, 482 .verify_read = xfs_bmbt_read_verify, 483 .verify_write = xfs_bmbt_write_verify, 484 .verify_struct = xfs_bmbt_verify, 485 }; 486 487 488 STATIC int 489 xfs_bmbt_keys_inorder( 490 struct xfs_btree_cur *cur, 491 const union xfs_btree_key *k1, 492 const union xfs_btree_key *k2) 493 { 494 return be64_to_cpu(k1->bmbt.br_startoff) < 495 be64_to_cpu(k2->bmbt.br_startoff); 496 } 497 498 STATIC int 499 xfs_bmbt_recs_inorder( 500 struct xfs_btree_cur *cur, 501 const union xfs_btree_rec *r1, 502 const union xfs_btree_rec *r2) 503 { 504 return xfs_bmbt_disk_get_startoff(&r1->bmbt) + 505 xfs_bmbt_disk_get_blockcount(&r1->bmbt) <= 506 xfs_bmbt_disk_get_startoff(&r2->bmbt); 507 } 508 509 STATIC enum xbtree_key_contig 510 xfs_bmbt_keys_contiguous( 511 struct xfs_btree_cur *cur, 512 const union xfs_btree_key *key1, 513 const union xfs_btree_key *key2, 514 const union xfs_btree_key *mask) 515 { 516 ASSERT(!mask || mask->bmbt.br_startoff); 517 518 return xbtree_key_contig(be64_to_cpu(key1->bmbt.br_startoff), 519 be64_to_cpu(key2->bmbt.br_startoff)); 520 } 521 522 static const struct xfs_btree_ops xfs_bmbt_ops = { 523 .rec_len = sizeof(xfs_bmbt_rec_t), 524 .key_len = sizeof(xfs_bmbt_key_t), 525 526 .dup_cursor = xfs_bmbt_dup_cursor, 527 .update_cursor = xfs_bmbt_update_cursor, 528 .alloc_block = xfs_bmbt_alloc_block, 529 .free_block = xfs_bmbt_free_block, 530 .get_maxrecs = xfs_bmbt_get_maxrecs, 531 .get_minrecs = xfs_bmbt_get_minrecs, 532 .get_dmaxrecs = xfs_bmbt_get_dmaxrecs, 533 .init_key_from_rec = xfs_bmbt_init_key_from_rec, 534 .init_high_key_from_rec = xfs_bmbt_init_high_key_from_rec, 535 .init_rec_from_cur = xfs_bmbt_init_rec_from_cur, 536 .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur, 537 .key_diff = xfs_bmbt_key_diff, 538 .diff_two_keys = xfs_bmbt_diff_two_keys, 539 .buf_ops = &xfs_bmbt_buf_ops, 540 .keys_inorder = xfs_bmbt_keys_inorder, 541 .recs_inorder = xfs_bmbt_recs_inorder, 542 .keys_contiguous = xfs_bmbt_keys_contiguous, 543 }; 544 545 /* 546 * Allocate a new bmap btree cursor. 547 */ 548 struct xfs_btree_cur * /* new bmap btree cursor */ 549 xfs_bmbt_init_cursor( 550 struct xfs_mount *mp, /* file system mount point */ 551 struct xfs_trans *tp, /* transaction pointer */ 552 struct xfs_inode *ip, /* inode owning the btree */ 553 int whichfork) /* data or attr fork */ 554 { 555 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 556 struct xfs_btree_cur *cur; 557 ASSERT(whichfork != XFS_COW_FORK); 558 559 cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP, 560 mp->m_bm_maxlevels[whichfork], xfs_bmbt_cur_cache); 561 cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1; 562 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2); 563 564 cur->bc_ops = &xfs_bmbt_ops; 565 cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE; 566 if (xfs_has_crc(mp)) 567 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; 568 569 cur->bc_ino.forksize = xfs_inode_fork_size(ip, whichfork); 570 cur->bc_ino.ip = ip; 571 cur->bc_ino.allocated = 0; 572 cur->bc_ino.flags = 0; 573 cur->bc_ino.whichfork = whichfork; 574 575 return cur; 576 } 577 578 /* Calculate number of records in a block mapping btree block. */ 579 static inline unsigned int 580 xfs_bmbt_block_maxrecs( 581 unsigned int blocklen, 582 bool leaf) 583 { 584 if (leaf) 585 return blocklen / sizeof(xfs_bmbt_rec_t); 586 return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t)); 587 } 588 589 /* 590 * Calculate number of records in a bmap btree block. 591 */ 592 int 593 xfs_bmbt_maxrecs( 594 struct xfs_mount *mp, 595 int blocklen, 596 int leaf) 597 { 598 blocklen -= XFS_BMBT_BLOCK_LEN(mp); 599 return xfs_bmbt_block_maxrecs(blocklen, leaf); 600 } 601 602 /* 603 * Calculate the maximum possible height of the btree that the on-disk format 604 * supports. This is used for sizing structures large enough to support every 605 * possible configuration of a filesystem that might get mounted. 606 */ 607 unsigned int 608 xfs_bmbt_maxlevels_ondisk(void) 609 { 610 unsigned int minrecs[2]; 611 unsigned int blocklen; 612 613 blocklen = min(XFS_MIN_BLOCKSIZE - XFS_BTREE_SBLOCK_LEN, 614 XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN); 615 616 minrecs[0] = xfs_bmbt_block_maxrecs(blocklen, true) / 2; 617 minrecs[1] = xfs_bmbt_block_maxrecs(blocklen, false) / 2; 618 619 /* One extra level for the inode root. */ 620 return xfs_btree_compute_maxlevels(minrecs, 621 XFS_MAX_EXTCNT_DATA_FORK_LARGE) + 1; 622 } 623 624 /* 625 * Calculate number of records in a bmap btree inode root. 626 */ 627 int 628 xfs_bmdr_maxrecs( 629 int blocklen, 630 int leaf) 631 { 632 blocklen -= sizeof(xfs_bmdr_block_t); 633 634 if (leaf) 635 return blocklen / sizeof(xfs_bmdr_rec_t); 636 return blocklen / (sizeof(xfs_bmdr_key_t) + sizeof(xfs_bmdr_ptr_t)); 637 } 638 639 /* 640 * Change the owner of a btree format fork fo the inode passed in. Change it to 641 * the owner of that is passed in so that we can change owners before or after 642 * we switch forks between inodes. The operation that the caller is doing will 643 * determine whether is needs to change owner before or after the switch. 644 * 645 * For demand paged transactional modification, the fork switch should be done 646 * after reading in all the blocks, modifying them and pinning them in the 647 * transaction. For modification when the buffers are already pinned in memory, 648 * the fork switch can be done before changing the owner as we won't need to 649 * validate the owner until the btree buffers are unpinned and writes can occur 650 * again. 651 * 652 * For recovery based ownership change, there is no transactional context and 653 * so a buffer list must be supplied so that we can record the buffers that we 654 * modified for the caller to issue IO on. 655 */ 656 int 657 xfs_bmbt_change_owner( 658 struct xfs_trans *tp, 659 struct xfs_inode *ip, 660 int whichfork, 661 xfs_ino_t new_owner, 662 struct list_head *buffer_list) 663 { 664 struct xfs_btree_cur *cur; 665 int error; 666 667 ASSERT(tp || buffer_list); 668 ASSERT(!(tp && buffer_list)); 669 ASSERT(xfs_ifork_ptr(ip, whichfork)->if_format == XFS_DINODE_FMT_BTREE); 670 671 cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork); 672 cur->bc_ino.flags |= XFS_BTCUR_BMBT_INVALID_OWNER; 673 674 error = xfs_btree_change_owner(cur, new_owner, buffer_list); 675 xfs_btree_del_cursor(cur, error); 676 return error; 677 } 678 679 /* Calculate the bmap btree size for some records. */ 680 unsigned long long 681 xfs_bmbt_calc_size( 682 struct xfs_mount *mp, 683 unsigned long long len) 684 { 685 return xfs_btree_calc_size(mp->m_bmap_dmnr, len); 686 } 687 688 int __init 689 xfs_bmbt_init_cur_cache(void) 690 { 691 xfs_bmbt_cur_cache = kmem_cache_create("xfs_bmbt_cur", 692 xfs_btree_cur_sizeof(xfs_bmbt_maxlevels_ondisk()), 693 0, 0, NULL); 694 695 if (!xfs_bmbt_cur_cache) 696 return -ENOMEM; 697 return 0; 698 } 699 700 void 701 xfs_bmbt_destroy_cur_cache(void) 702 { 703 kmem_cache_destroy(xfs_bmbt_cur_cache); 704 xfs_bmbt_cur_cache = NULL; 705 } 706