1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_trans.h" 16 #include "xfs_alloc.h" 17 #include "xfs_btree.h" 18 #include "xfs_bmap_btree.h" 19 #include "xfs_bmap.h" 20 #include "xfs_error.h" 21 #include "xfs_quota.h" 22 #include "xfs_trace.h" 23 #include "xfs_rmap.h" 24 25 static struct kmem_cache *xfs_bmbt_cur_cache; 26 27 /* 28 * Convert on-disk form of btree root to in-memory form. 29 */ 30 void 31 xfs_bmdr_to_bmbt( 32 struct xfs_inode *ip, 33 xfs_bmdr_block_t *dblock, 34 int dblocklen, 35 struct xfs_btree_block *rblock, 36 int rblocklen) 37 { 38 struct xfs_mount *mp = ip->i_mount; 39 int dmxr; 40 xfs_bmbt_key_t *fkp; 41 __be64 *fpp; 42 xfs_bmbt_key_t *tkp; 43 __be64 *tpp; 44 45 xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL, 46 XFS_BTNUM_BMAP, 0, 0, ip->i_ino, 47 XFS_BTREE_LONG_PTRS); 48 rblock->bb_level = dblock->bb_level; 49 ASSERT(be16_to_cpu(rblock->bb_level) > 0); 50 rblock->bb_numrecs = dblock->bb_numrecs; 51 dmxr = xfs_bmdr_maxrecs(dblocklen, 0); 52 fkp = XFS_BMDR_KEY_ADDR(dblock, 1); 53 tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1); 54 fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr); 55 tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen); 56 dmxr = be16_to_cpu(dblock->bb_numrecs); 57 memcpy(tkp, fkp, sizeof(*fkp) * dmxr); 58 memcpy(tpp, fpp, sizeof(*fpp) * dmxr); 59 } 60 61 void 62 xfs_bmbt_disk_get_all( 63 const struct xfs_bmbt_rec *rec, 64 struct xfs_bmbt_irec *irec) 65 { 66 uint64_t l0 = get_unaligned_be64(&rec->l0); 67 uint64_t l1 = get_unaligned_be64(&rec->l1); 68 69 irec->br_startoff = (l0 & xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; 70 irec->br_startblock = ((l0 & xfs_mask64lo(9)) << 43) | (l1 >> 21); 71 irec->br_blockcount = l1 & xfs_mask64lo(21); 72 if (l0 >> (64 - BMBT_EXNTFLAG_BITLEN)) 73 irec->br_state = XFS_EXT_UNWRITTEN; 74 else 75 irec->br_state = XFS_EXT_NORM; 76 } 77 78 /* 79 * Extract the blockcount field from an on disk bmap extent record. 80 */ 81 xfs_filblks_t 82 xfs_bmbt_disk_get_blockcount( 83 const struct xfs_bmbt_rec *r) 84 { 85 return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21)); 86 } 87 88 /* 89 * Extract the startoff field from a disk format bmap extent record. 90 */ 91 xfs_fileoff_t 92 xfs_bmbt_disk_get_startoff( 93 const struct xfs_bmbt_rec *r) 94 { 95 return ((xfs_fileoff_t)be64_to_cpu(r->l0) & 96 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; 97 } 98 99 /* 100 * Set all the fields in a bmap extent record from the uncompressed form. 101 */ 102 void 103 xfs_bmbt_disk_set_all( 104 struct xfs_bmbt_rec *r, 105 struct xfs_bmbt_irec *s) 106 { 107 int extent_flag = (s->br_state != XFS_EXT_NORM); 108 109 ASSERT(s->br_state == XFS_EXT_NORM || s->br_state == XFS_EXT_UNWRITTEN); 110 ASSERT(!(s->br_startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN))); 111 ASSERT(!(s->br_blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN))); 112 ASSERT(!(s->br_startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN))); 113 114 put_unaligned_be64( 115 ((xfs_bmbt_rec_base_t)extent_flag << 63) | 116 ((xfs_bmbt_rec_base_t)s->br_startoff << 9) | 117 ((xfs_bmbt_rec_base_t)s->br_startblock >> 43), &r->l0); 118 put_unaligned_be64( 119 ((xfs_bmbt_rec_base_t)s->br_startblock << 21) | 120 ((xfs_bmbt_rec_base_t)s->br_blockcount & 121 (xfs_bmbt_rec_base_t)xfs_mask64lo(21)), &r->l1); 122 } 123 124 /* 125 * Convert in-memory form of btree root to on-disk form. 126 */ 127 void 128 xfs_bmbt_to_bmdr( 129 struct xfs_mount *mp, 130 struct xfs_btree_block *rblock, 131 int rblocklen, 132 xfs_bmdr_block_t *dblock, 133 int dblocklen) 134 { 135 int dmxr; 136 xfs_bmbt_key_t *fkp; 137 __be64 *fpp; 138 xfs_bmbt_key_t *tkp; 139 __be64 *tpp; 140 141 if (xfs_has_crc(mp)) { 142 ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_CRC_MAGIC)); 143 ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid, 144 &mp->m_sb.sb_meta_uuid)); 145 ASSERT(rblock->bb_u.l.bb_blkno == 146 cpu_to_be64(XFS_BUF_DADDR_NULL)); 147 } else 148 ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC)); 149 ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK)); 150 ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK)); 151 ASSERT(rblock->bb_level != 0); 152 dblock->bb_level = rblock->bb_level; 153 dblock->bb_numrecs = rblock->bb_numrecs; 154 dmxr = xfs_bmdr_maxrecs(dblocklen, 0); 155 fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1); 156 tkp = XFS_BMDR_KEY_ADDR(dblock, 1); 157 fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen); 158 tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr); 159 dmxr = be16_to_cpu(dblock->bb_numrecs); 160 memcpy(tkp, fkp, sizeof(*fkp) * dmxr); 161 memcpy(tpp, fpp, sizeof(*fpp) * dmxr); 162 } 163 164 STATIC struct xfs_btree_cur * 165 xfs_bmbt_dup_cursor( 166 struct xfs_btree_cur *cur) 167 { 168 struct xfs_btree_cur *new; 169 170 new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp, 171 cur->bc_ino.ip, cur->bc_ino.whichfork); 172 173 /* 174 * Copy the firstblock, dfops, and flags values, 175 * since init cursor doesn't get them. 176 */ 177 new->bc_ino.flags = cur->bc_ino.flags; 178 179 return new; 180 } 181 182 STATIC void 183 xfs_bmbt_update_cursor( 184 struct xfs_btree_cur *src, 185 struct xfs_btree_cur *dst) 186 { 187 ASSERT((dst->bc_tp->t_firstblock != NULLFSBLOCK) || 188 (dst->bc_ino.ip->i_diflags & XFS_DIFLAG_REALTIME)); 189 190 dst->bc_ino.allocated += src->bc_ino.allocated; 191 dst->bc_tp->t_firstblock = src->bc_tp->t_firstblock; 192 193 src->bc_ino.allocated = 0; 194 } 195 196 STATIC int 197 xfs_bmbt_alloc_block( 198 struct xfs_btree_cur *cur, 199 const union xfs_btree_ptr *start, 200 union xfs_btree_ptr *new, 201 int *stat) 202 { 203 xfs_alloc_arg_t args; /* block allocation args */ 204 int error; /* error return value */ 205 206 memset(&args, 0, sizeof(args)); 207 args.tp = cur->bc_tp; 208 args.mp = cur->bc_mp; 209 args.fsbno = cur->bc_tp->t_firstblock; 210 xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_ino.ip->i_ino, 211 cur->bc_ino.whichfork); 212 213 if (args.fsbno == NULLFSBLOCK) { 214 args.fsbno = be64_to_cpu(start->l); 215 args.type = XFS_ALLOCTYPE_START_BNO; 216 /* 217 * Make sure there is sufficient room left in the AG to 218 * complete a full tree split for an extent insert. If 219 * we are converting the middle part of an extent then 220 * we may need space for two tree splits. 221 * 222 * We are relying on the caller to make the correct block 223 * reservation for this operation to succeed. If the 224 * reservation amount is insufficient then we may fail a 225 * block allocation here and corrupt the filesystem. 226 */ 227 args.minleft = args.tp->t_blk_res; 228 } else if (cur->bc_tp->t_flags & XFS_TRANS_LOWMODE) { 229 args.type = XFS_ALLOCTYPE_START_BNO; 230 } else { 231 args.type = XFS_ALLOCTYPE_NEAR_BNO; 232 } 233 234 args.minlen = args.maxlen = args.prod = 1; 235 args.wasdel = cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL; 236 if (!args.wasdel && args.tp->t_blk_res == 0) { 237 error = -ENOSPC; 238 goto error0; 239 } 240 error = xfs_alloc_vextent(&args); 241 if (error) 242 goto error0; 243 244 if (args.fsbno == NULLFSBLOCK && args.minleft) { 245 /* 246 * Could not find an AG with enough free space to satisfy 247 * a full btree split. Try again and if 248 * successful activate the lowspace algorithm. 249 */ 250 args.fsbno = 0; 251 args.type = XFS_ALLOCTYPE_FIRST_AG; 252 error = xfs_alloc_vextent(&args); 253 if (error) 254 goto error0; 255 cur->bc_tp->t_flags |= XFS_TRANS_LOWMODE; 256 } 257 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { 258 *stat = 0; 259 return 0; 260 } 261 262 ASSERT(args.len == 1); 263 cur->bc_tp->t_firstblock = args.fsbno; 264 cur->bc_ino.allocated++; 265 cur->bc_ino.ip->i_nblocks++; 266 xfs_trans_log_inode(args.tp, cur->bc_ino.ip, XFS_ILOG_CORE); 267 xfs_trans_mod_dquot_byino(args.tp, cur->bc_ino.ip, 268 XFS_TRANS_DQ_BCOUNT, 1L); 269 270 new->l = cpu_to_be64(args.fsbno); 271 272 *stat = 1; 273 return 0; 274 275 error0: 276 return error; 277 } 278 279 STATIC int 280 xfs_bmbt_free_block( 281 struct xfs_btree_cur *cur, 282 struct xfs_buf *bp) 283 { 284 struct xfs_mount *mp = cur->bc_mp; 285 struct xfs_inode *ip = cur->bc_ino.ip; 286 struct xfs_trans *tp = cur->bc_tp; 287 xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp)); 288 struct xfs_owner_info oinfo; 289 290 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_ino.whichfork); 291 xfs_free_extent_later(cur->bc_tp, fsbno, 1, &oinfo); 292 ip->i_nblocks--; 293 294 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 295 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 296 return 0; 297 } 298 299 STATIC int 300 xfs_bmbt_get_minrecs( 301 struct xfs_btree_cur *cur, 302 int level) 303 { 304 if (level == cur->bc_nlevels - 1) { 305 struct xfs_ifork *ifp; 306 307 ifp = XFS_IFORK_PTR(cur->bc_ino.ip, 308 cur->bc_ino.whichfork); 309 310 return xfs_bmbt_maxrecs(cur->bc_mp, 311 ifp->if_broot_bytes, level == 0) / 2; 312 } 313 314 return cur->bc_mp->m_bmap_dmnr[level != 0]; 315 } 316 317 int 318 xfs_bmbt_get_maxrecs( 319 struct xfs_btree_cur *cur, 320 int level) 321 { 322 if (level == cur->bc_nlevels - 1) { 323 struct xfs_ifork *ifp; 324 325 ifp = XFS_IFORK_PTR(cur->bc_ino.ip, 326 cur->bc_ino.whichfork); 327 328 return xfs_bmbt_maxrecs(cur->bc_mp, 329 ifp->if_broot_bytes, level == 0); 330 } 331 332 return cur->bc_mp->m_bmap_dmxr[level != 0]; 333 334 } 335 336 /* 337 * Get the maximum records we could store in the on-disk format. 338 * 339 * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but 340 * for the root node this checks the available space in the dinode fork 341 * so that we can resize the in-memory buffer to match it. After a 342 * resize to the maximum size this function returns the same value 343 * as xfs_bmbt_get_maxrecs for the root node, too. 344 */ 345 STATIC int 346 xfs_bmbt_get_dmaxrecs( 347 struct xfs_btree_cur *cur, 348 int level) 349 { 350 if (level != cur->bc_nlevels - 1) 351 return cur->bc_mp->m_bmap_dmxr[level != 0]; 352 return xfs_bmdr_maxrecs(cur->bc_ino.forksize, level == 0); 353 } 354 355 STATIC void 356 xfs_bmbt_init_key_from_rec( 357 union xfs_btree_key *key, 358 const union xfs_btree_rec *rec) 359 { 360 key->bmbt.br_startoff = 361 cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt)); 362 } 363 364 STATIC void 365 xfs_bmbt_init_high_key_from_rec( 366 union xfs_btree_key *key, 367 const union xfs_btree_rec *rec) 368 { 369 key->bmbt.br_startoff = cpu_to_be64( 370 xfs_bmbt_disk_get_startoff(&rec->bmbt) + 371 xfs_bmbt_disk_get_blockcount(&rec->bmbt) - 1); 372 } 373 374 STATIC void 375 xfs_bmbt_init_rec_from_cur( 376 struct xfs_btree_cur *cur, 377 union xfs_btree_rec *rec) 378 { 379 xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b); 380 } 381 382 STATIC void 383 xfs_bmbt_init_ptr_from_cur( 384 struct xfs_btree_cur *cur, 385 union xfs_btree_ptr *ptr) 386 { 387 ptr->l = 0; 388 } 389 390 STATIC int64_t 391 xfs_bmbt_key_diff( 392 struct xfs_btree_cur *cur, 393 const union xfs_btree_key *key) 394 { 395 return (int64_t)be64_to_cpu(key->bmbt.br_startoff) - 396 cur->bc_rec.b.br_startoff; 397 } 398 399 STATIC int64_t 400 xfs_bmbt_diff_two_keys( 401 struct xfs_btree_cur *cur, 402 const union xfs_btree_key *k1, 403 const union xfs_btree_key *k2) 404 { 405 uint64_t a = be64_to_cpu(k1->bmbt.br_startoff); 406 uint64_t b = be64_to_cpu(k2->bmbt.br_startoff); 407 408 /* 409 * Note: This routine previously casted a and b to int64 and subtracted 410 * them to generate a result. This lead to problems if b was the 411 * "maximum" key value (all ones) being signed incorrectly, hence this 412 * somewhat less efficient version. 413 */ 414 if (a > b) 415 return 1; 416 if (b > a) 417 return -1; 418 return 0; 419 } 420 421 static xfs_failaddr_t 422 xfs_bmbt_verify( 423 struct xfs_buf *bp) 424 { 425 struct xfs_mount *mp = bp->b_mount; 426 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 427 xfs_failaddr_t fa; 428 unsigned int level; 429 430 if (!xfs_verify_magic(bp, block->bb_magic)) 431 return __this_address; 432 433 if (xfs_has_crc(mp)) { 434 /* 435 * XXX: need a better way of verifying the owner here. Right now 436 * just make sure there has been one set. 437 */ 438 fa = xfs_btree_lblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN); 439 if (fa) 440 return fa; 441 } 442 443 /* 444 * numrecs and level verification. 445 * 446 * We don't know what fork we belong to, so just verify that the level 447 * is less than the maximum of the two. Later checks will be more 448 * precise. 449 */ 450 level = be16_to_cpu(block->bb_level); 451 if (level > max(mp->m_bm_maxlevels[0], mp->m_bm_maxlevels[1])) 452 return __this_address; 453 454 return xfs_btree_lblock_verify(bp, mp->m_bmap_dmxr[level != 0]); 455 } 456 457 static void 458 xfs_bmbt_read_verify( 459 struct xfs_buf *bp) 460 { 461 xfs_failaddr_t fa; 462 463 if (!xfs_btree_lblock_verify_crc(bp)) 464 xfs_verifier_error(bp, -EFSBADCRC, __this_address); 465 else { 466 fa = xfs_bmbt_verify(bp); 467 if (fa) 468 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 469 } 470 471 if (bp->b_error) 472 trace_xfs_btree_corrupt(bp, _RET_IP_); 473 } 474 475 static void 476 xfs_bmbt_write_verify( 477 struct xfs_buf *bp) 478 { 479 xfs_failaddr_t fa; 480 481 fa = xfs_bmbt_verify(bp); 482 if (fa) { 483 trace_xfs_btree_corrupt(bp, _RET_IP_); 484 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 485 return; 486 } 487 xfs_btree_lblock_calc_crc(bp); 488 } 489 490 const struct xfs_buf_ops xfs_bmbt_buf_ops = { 491 .name = "xfs_bmbt", 492 .magic = { cpu_to_be32(XFS_BMAP_MAGIC), 493 cpu_to_be32(XFS_BMAP_CRC_MAGIC) }, 494 .verify_read = xfs_bmbt_read_verify, 495 .verify_write = xfs_bmbt_write_verify, 496 .verify_struct = xfs_bmbt_verify, 497 }; 498 499 500 STATIC int 501 xfs_bmbt_keys_inorder( 502 struct xfs_btree_cur *cur, 503 const union xfs_btree_key *k1, 504 const union xfs_btree_key *k2) 505 { 506 return be64_to_cpu(k1->bmbt.br_startoff) < 507 be64_to_cpu(k2->bmbt.br_startoff); 508 } 509 510 STATIC int 511 xfs_bmbt_recs_inorder( 512 struct xfs_btree_cur *cur, 513 const union xfs_btree_rec *r1, 514 const union xfs_btree_rec *r2) 515 { 516 return xfs_bmbt_disk_get_startoff(&r1->bmbt) + 517 xfs_bmbt_disk_get_blockcount(&r1->bmbt) <= 518 xfs_bmbt_disk_get_startoff(&r2->bmbt); 519 } 520 521 static const struct xfs_btree_ops xfs_bmbt_ops = { 522 .rec_len = sizeof(xfs_bmbt_rec_t), 523 .key_len = sizeof(xfs_bmbt_key_t), 524 525 .dup_cursor = xfs_bmbt_dup_cursor, 526 .update_cursor = xfs_bmbt_update_cursor, 527 .alloc_block = xfs_bmbt_alloc_block, 528 .free_block = xfs_bmbt_free_block, 529 .get_maxrecs = xfs_bmbt_get_maxrecs, 530 .get_minrecs = xfs_bmbt_get_minrecs, 531 .get_dmaxrecs = xfs_bmbt_get_dmaxrecs, 532 .init_key_from_rec = xfs_bmbt_init_key_from_rec, 533 .init_high_key_from_rec = xfs_bmbt_init_high_key_from_rec, 534 .init_rec_from_cur = xfs_bmbt_init_rec_from_cur, 535 .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur, 536 .key_diff = xfs_bmbt_key_diff, 537 .diff_two_keys = xfs_bmbt_diff_two_keys, 538 .buf_ops = &xfs_bmbt_buf_ops, 539 .keys_inorder = xfs_bmbt_keys_inorder, 540 .recs_inorder = xfs_bmbt_recs_inorder, 541 }; 542 543 /* 544 * Allocate a new bmap btree cursor. 545 */ 546 struct xfs_btree_cur * /* new bmap btree cursor */ 547 xfs_bmbt_init_cursor( 548 struct xfs_mount *mp, /* file system mount point */ 549 struct xfs_trans *tp, /* transaction pointer */ 550 struct xfs_inode *ip, /* inode owning the btree */ 551 int whichfork) /* data or attr fork */ 552 { 553 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 554 struct xfs_btree_cur *cur; 555 ASSERT(whichfork != XFS_COW_FORK); 556 557 cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP, 558 mp->m_bm_maxlevels[whichfork], xfs_bmbt_cur_cache); 559 cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1; 560 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2); 561 562 cur->bc_ops = &xfs_bmbt_ops; 563 cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE; 564 if (xfs_has_crc(mp)) 565 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; 566 567 cur->bc_ino.forksize = XFS_IFORK_SIZE(ip, whichfork); 568 cur->bc_ino.ip = ip; 569 cur->bc_ino.allocated = 0; 570 cur->bc_ino.flags = 0; 571 cur->bc_ino.whichfork = whichfork; 572 573 return cur; 574 } 575 576 /* Calculate number of records in a block mapping btree block. */ 577 static inline unsigned int 578 xfs_bmbt_block_maxrecs( 579 unsigned int blocklen, 580 bool leaf) 581 { 582 if (leaf) 583 return blocklen / sizeof(xfs_bmbt_rec_t); 584 return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t)); 585 } 586 587 /* 588 * Calculate number of records in a bmap btree block. 589 */ 590 int 591 xfs_bmbt_maxrecs( 592 struct xfs_mount *mp, 593 int blocklen, 594 int leaf) 595 { 596 blocklen -= XFS_BMBT_BLOCK_LEN(mp); 597 return xfs_bmbt_block_maxrecs(blocklen, leaf); 598 } 599 600 /* 601 * Calculate the maximum possible height of the btree that the on-disk format 602 * supports. This is used for sizing structures large enough to support every 603 * possible configuration of a filesystem that might get mounted. 604 */ 605 unsigned int 606 xfs_bmbt_maxlevels_ondisk(void) 607 { 608 unsigned int minrecs[2]; 609 unsigned int blocklen; 610 611 blocklen = min(XFS_MIN_BLOCKSIZE - XFS_BTREE_SBLOCK_LEN, 612 XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN); 613 614 minrecs[0] = xfs_bmbt_block_maxrecs(blocklen, true) / 2; 615 minrecs[1] = xfs_bmbt_block_maxrecs(blocklen, false) / 2; 616 617 /* One extra level for the inode root. */ 618 return xfs_btree_compute_maxlevels(minrecs, 619 XFS_MAX_EXTCNT_DATA_FORK_LARGE) + 1; 620 } 621 622 /* 623 * Calculate number of records in a bmap btree inode root. 624 */ 625 int 626 xfs_bmdr_maxrecs( 627 int blocklen, 628 int leaf) 629 { 630 blocklen -= sizeof(xfs_bmdr_block_t); 631 632 if (leaf) 633 return blocklen / sizeof(xfs_bmdr_rec_t); 634 return blocklen / (sizeof(xfs_bmdr_key_t) + sizeof(xfs_bmdr_ptr_t)); 635 } 636 637 /* 638 * Change the owner of a btree format fork fo the inode passed in. Change it to 639 * the owner of that is passed in so that we can change owners before or after 640 * we switch forks between inodes. The operation that the caller is doing will 641 * determine whether is needs to change owner before or after the switch. 642 * 643 * For demand paged transactional modification, the fork switch should be done 644 * after reading in all the blocks, modifying them and pinning them in the 645 * transaction. For modification when the buffers are already pinned in memory, 646 * the fork switch can be done before changing the owner as we won't need to 647 * validate the owner until the btree buffers are unpinned and writes can occur 648 * again. 649 * 650 * For recovery based ownership change, there is no transactional context and 651 * so a buffer list must be supplied so that we can record the buffers that we 652 * modified for the caller to issue IO on. 653 */ 654 int 655 xfs_bmbt_change_owner( 656 struct xfs_trans *tp, 657 struct xfs_inode *ip, 658 int whichfork, 659 xfs_ino_t new_owner, 660 struct list_head *buffer_list) 661 { 662 struct xfs_btree_cur *cur; 663 int error; 664 665 ASSERT(tp || buffer_list); 666 ASSERT(!(tp && buffer_list)); 667 ASSERT(XFS_IFORK_PTR(ip, whichfork)->if_format == XFS_DINODE_FMT_BTREE); 668 669 cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork); 670 cur->bc_ino.flags |= XFS_BTCUR_BMBT_INVALID_OWNER; 671 672 error = xfs_btree_change_owner(cur, new_owner, buffer_list); 673 xfs_btree_del_cursor(cur, error); 674 return error; 675 } 676 677 /* Calculate the bmap btree size for some records. */ 678 unsigned long long 679 xfs_bmbt_calc_size( 680 struct xfs_mount *mp, 681 unsigned long long len) 682 { 683 return xfs_btree_calc_size(mp->m_bmap_dmnr, len); 684 } 685 686 int __init 687 xfs_bmbt_init_cur_cache(void) 688 { 689 xfs_bmbt_cur_cache = kmem_cache_create("xfs_bmbt_cur", 690 xfs_btree_cur_sizeof(xfs_bmbt_maxlevels_ondisk()), 691 0, 0, NULL); 692 693 if (!xfs_bmbt_cur_cache) 694 return -ENOMEM; 695 return 0; 696 } 697 698 void 699 xfs_bmbt_destroy_cur_cache(void) 700 { 701 kmem_cache_destroy(xfs_bmbt_cur_cache); 702 xfs_bmbt_cur_cache = NULL; 703 } 704