1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2020 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_trans.h" 16 #include "xfs_btree.h" 17 #include "xfs_trace.h" 18 #include "xfs_btree_staging.h" 19 20 /* 21 * Staging Cursors and Fake Roots for Btrees 22 * ========================================= 23 * 24 * A staging btree cursor is a special type of btree cursor that callers must 25 * use to construct a new btree index using the btree bulk loader code. The 26 * bulk loading code uses the staging btree cursor to abstract the details of 27 * initializing new btree blocks and filling them with records or key/ptr 28 * pairs. Regular btree operations (e.g. queries and modifications) are not 29 * supported with staging cursors, and callers must not invoke them. 30 * 31 * Fake root structures contain all the information about a btree that is under 32 * construction by the bulk loading code. Staging btree cursors point to fake 33 * root structures instead of the usual AG header or inode structure. 34 * 35 * Callers are expected to initialize a fake root structure and pass it into 36 * the _stage_cursor function for a specific btree type. When bulk loading is 37 * complete, callers should call the _commit_staged_btree function for that 38 * specific btree type to commit the new btree into the filesystem. 39 */ 40 41 /* 42 * Don't allow staging cursors to be duplicated because they're supposed to be 43 * kept private to a single thread. 44 */ 45 STATIC struct xfs_btree_cur * 46 xfs_btree_fakeroot_dup_cursor( 47 struct xfs_btree_cur *cur) 48 { 49 ASSERT(0); 50 return NULL; 51 } 52 53 /* 54 * Don't allow block allocation for a staging cursor, because staging cursors 55 * do not support regular btree modifications. 56 * 57 * Bulk loading uses a separate callback to obtain new blocks from a 58 * preallocated list, which prevents ENOSPC failures during loading. 59 */ 60 STATIC int 61 xfs_btree_fakeroot_alloc_block( 62 struct xfs_btree_cur *cur, 63 union xfs_btree_ptr *start_bno, 64 union xfs_btree_ptr *new_bno, 65 int *stat) 66 { 67 ASSERT(0); 68 return -EFSCORRUPTED; 69 } 70 71 /* 72 * Don't allow block freeing for a staging cursor, because staging cursors 73 * do not support regular btree modifications. 74 */ 75 STATIC int 76 xfs_btree_fakeroot_free_block( 77 struct xfs_btree_cur *cur, 78 struct xfs_buf *bp) 79 { 80 ASSERT(0); 81 return -EFSCORRUPTED; 82 } 83 84 /* Initialize a pointer to the root block from the fakeroot. */ 85 STATIC void 86 xfs_btree_fakeroot_init_ptr_from_cur( 87 struct xfs_btree_cur *cur, 88 union xfs_btree_ptr *ptr) 89 { 90 struct xbtree_afakeroot *afake; 91 92 ASSERT(cur->bc_flags & XFS_BTREE_STAGING); 93 94 afake = cur->bc_ag.afake; 95 ptr->s = cpu_to_be32(afake->af_root); 96 } 97 98 /* 99 * Bulk Loading for AG Btrees 100 * ========================== 101 * 102 * For a btree rooted in an AG header, pass a xbtree_afakeroot structure to the 103 * staging cursor. Callers should initialize this to zero. 104 * 105 * The _stage_cursor() function for a specific btree type should call 106 * xfs_btree_stage_afakeroot to set up the in-memory cursor as a staging 107 * cursor. The corresponding _commit_staged_btree() function should log the 108 * new root and call xfs_btree_commit_afakeroot() to transform the staging 109 * cursor into a regular btree cursor. 110 */ 111 112 /* Update the btree root information for a per-AG fake root. */ 113 STATIC void 114 xfs_btree_afakeroot_set_root( 115 struct xfs_btree_cur *cur, 116 union xfs_btree_ptr *ptr, 117 int inc) 118 { 119 struct xbtree_afakeroot *afake = cur->bc_ag.afake; 120 121 ASSERT(cur->bc_flags & XFS_BTREE_STAGING); 122 afake->af_root = be32_to_cpu(ptr->s); 123 afake->af_levels += inc; 124 } 125 126 /* 127 * Initialize a AG-rooted btree cursor with the given AG btree fake root. 128 * The btree cursor's bc_ops will be overridden as needed to make the staging 129 * functionality work. 130 */ 131 void 132 xfs_btree_stage_afakeroot( 133 struct xfs_btree_cur *cur, 134 struct xbtree_afakeroot *afake) 135 { 136 struct xfs_btree_ops *nops; 137 138 ASSERT(!(cur->bc_flags & XFS_BTREE_STAGING)); 139 ASSERT(!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)); 140 ASSERT(cur->bc_tp == NULL); 141 142 nops = kmem_alloc(sizeof(struct xfs_btree_ops), KM_NOFS); 143 memcpy(nops, cur->bc_ops, sizeof(struct xfs_btree_ops)); 144 nops->alloc_block = xfs_btree_fakeroot_alloc_block; 145 nops->free_block = xfs_btree_fakeroot_free_block; 146 nops->init_ptr_from_cur = xfs_btree_fakeroot_init_ptr_from_cur; 147 nops->set_root = xfs_btree_afakeroot_set_root; 148 nops->dup_cursor = xfs_btree_fakeroot_dup_cursor; 149 150 cur->bc_ag.afake = afake; 151 cur->bc_nlevels = afake->af_levels; 152 cur->bc_ops = nops; 153 cur->bc_flags |= XFS_BTREE_STAGING; 154 } 155 156 /* 157 * Transform an AG-rooted staging btree cursor back into a regular cursor by 158 * substituting a real btree root for the fake one and restoring normal btree 159 * cursor ops. The caller must log the btree root change prior to calling 160 * this. 161 */ 162 void 163 xfs_btree_commit_afakeroot( 164 struct xfs_btree_cur *cur, 165 struct xfs_trans *tp, 166 struct xfs_buf *agbp, 167 const struct xfs_btree_ops *ops) 168 { 169 ASSERT(cur->bc_flags & XFS_BTREE_STAGING); 170 ASSERT(cur->bc_tp == NULL); 171 172 trace_xfs_btree_commit_afakeroot(cur); 173 174 kmem_free((void *)cur->bc_ops); 175 cur->bc_ag.agbp = agbp; 176 cur->bc_ops = ops; 177 cur->bc_flags &= ~XFS_BTREE_STAGING; 178 cur->bc_tp = tp; 179 } 180 181 /* 182 * Bulk Loading for Inode-Rooted Btrees 183 * ==================================== 184 * 185 * For a btree rooted in an inode fork, pass a xbtree_ifakeroot structure to 186 * the staging cursor. This structure should be initialized as follows: 187 * 188 * - if_fork_size field should be set to the number of bytes available to the 189 * fork in the inode. 190 * 191 * - if_fork should point to a freshly allocated struct xfs_ifork. 192 * 193 * - if_format should be set to the appropriate fork type (e.g. 194 * XFS_DINODE_FMT_BTREE). 195 * 196 * All other fields must be zero. 197 * 198 * The _stage_cursor() function for a specific btree type should call 199 * xfs_btree_stage_ifakeroot to set up the in-memory cursor as a staging 200 * cursor. The corresponding _commit_staged_btree() function should log the 201 * new root and call xfs_btree_commit_ifakeroot() to transform the staging 202 * cursor into a regular btree cursor. 203 */ 204 205 /* 206 * Initialize an inode-rooted btree cursor with the given inode btree fake 207 * root. The btree cursor's bc_ops will be overridden as needed to make the 208 * staging functionality work. If new_ops is not NULL, these new ops will be 209 * passed out to the caller for further overriding. 210 */ 211 void 212 xfs_btree_stage_ifakeroot( 213 struct xfs_btree_cur *cur, 214 struct xbtree_ifakeroot *ifake, 215 struct xfs_btree_ops **new_ops) 216 { 217 struct xfs_btree_ops *nops; 218 219 ASSERT(!(cur->bc_flags & XFS_BTREE_STAGING)); 220 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE); 221 ASSERT(cur->bc_tp == NULL); 222 223 nops = kmem_alloc(sizeof(struct xfs_btree_ops), KM_NOFS); 224 memcpy(nops, cur->bc_ops, sizeof(struct xfs_btree_ops)); 225 nops->alloc_block = xfs_btree_fakeroot_alloc_block; 226 nops->free_block = xfs_btree_fakeroot_free_block; 227 nops->init_ptr_from_cur = xfs_btree_fakeroot_init_ptr_from_cur; 228 nops->dup_cursor = xfs_btree_fakeroot_dup_cursor; 229 230 cur->bc_ino.ifake = ifake; 231 cur->bc_nlevels = ifake->if_levels; 232 cur->bc_ops = nops; 233 cur->bc_flags |= XFS_BTREE_STAGING; 234 235 if (new_ops) 236 *new_ops = nops; 237 } 238 239 /* 240 * Transform an inode-rooted staging btree cursor back into a regular cursor by 241 * substituting a real btree root for the fake one and restoring normal btree 242 * cursor ops. The caller must log the btree root change prior to calling 243 * this. 244 */ 245 void 246 xfs_btree_commit_ifakeroot( 247 struct xfs_btree_cur *cur, 248 struct xfs_trans *tp, 249 int whichfork, 250 const struct xfs_btree_ops *ops) 251 { 252 ASSERT(cur->bc_flags & XFS_BTREE_STAGING); 253 ASSERT(cur->bc_tp == NULL); 254 255 trace_xfs_btree_commit_ifakeroot(cur); 256 257 kmem_free((void *)cur->bc_ops); 258 cur->bc_ino.ifake = NULL; 259 cur->bc_ino.whichfork = whichfork; 260 cur->bc_ops = ops; 261 cur->bc_flags &= ~XFS_BTREE_STAGING; 262 cur->bc_tp = tp; 263 } 264 265 /* 266 * Bulk Loading of Staged Btrees 267 * ============================= 268 * 269 * This interface is used with a staged btree cursor to create a totally new 270 * btree with a large number of records (i.e. more than what would fit in a 271 * single root block). When the creation is complete, the new root can be 272 * linked atomically into the filesystem by committing the staged cursor. 273 * 274 * Creation of a new btree proceeds roughly as follows: 275 * 276 * The first step is to initialize an appropriate fake btree root structure and 277 * then construct a staged btree cursor. Refer to the block comments about 278 * "Bulk Loading for AG Btrees" and "Bulk Loading for Inode-Rooted Btrees" for 279 * more information about how to do this. 280 * 281 * The second step is to initialize a struct xfs_btree_bload context as 282 * documented in the structure definition. 283 * 284 * The third step is to call xfs_btree_bload_compute_geometry to compute the 285 * height of and the number of blocks needed to construct the btree. See the 286 * section "Computing the Geometry of the New Btree" for details about this 287 * computation. 288 * 289 * In step four, the caller must allocate xfs_btree_bload.nr_blocks blocks and 290 * save them for later use by ->claim_block(). Bulk loading requires all 291 * blocks to be allocated beforehand to avoid ENOSPC failures midway through a 292 * rebuild, and to minimize seek distances of the new btree. 293 * 294 * Step five is to call xfs_btree_bload() to start constructing the btree. 295 * 296 * The final step is to commit the staging btree cursor, which logs the new 297 * btree root and turns the staging cursor into a regular cursor. The caller 298 * is responsible for cleaning up the previous btree blocks, if any. 299 * 300 * Computing the Geometry of the New Btree 301 * ======================================= 302 * 303 * The number of items placed in each btree block is computed via the following 304 * algorithm: For leaf levels, the number of items for the level is nr_records 305 * in the bload structure. For node levels, the number of items for the level 306 * is the number of blocks in the next lower level of the tree. For each 307 * level, the desired number of items per block is defined as: 308 * 309 * desired = max(minrecs, maxrecs - slack factor) 310 * 311 * The number of blocks for the level is defined to be: 312 * 313 * blocks = floor(nr_items / desired) 314 * 315 * Note this is rounded down so that the npb calculation below will never fall 316 * below minrecs. The number of items that will actually be loaded into each 317 * btree block is defined as: 318 * 319 * npb = nr_items / blocks 320 * 321 * Some of the leftmost blocks in the level will contain one extra record as 322 * needed to handle uneven division. If the number of records in any block 323 * would exceed maxrecs for that level, blocks is incremented and npb is 324 * recalculated. 325 * 326 * In other words, we compute the number of blocks needed to satisfy a given 327 * loading level, then spread the items as evenly as possible. 328 * 329 * The height and number of fs blocks required to create the btree are computed 330 * and returned via btree_height and nr_blocks. 331 */ 332 333 /* 334 * Put a btree block that we're loading onto the ordered list and release it. 335 * The btree blocks will be written to disk when bulk loading is finished. 336 */ 337 static void 338 xfs_btree_bload_drop_buf( 339 struct list_head *buffers_list, 340 struct xfs_buf **bpp) 341 { 342 if (*bpp == NULL) 343 return; 344 345 if (!xfs_buf_delwri_queue(*bpp, buffers_list)) 346 ASSERT(0); 347 348 xfs_buf_relse(*bpp); 349 *bpp = NULL; 350 } 351 352 /* 353 * Allocate and initialize one btree block for bulk loading. 354 * 355 * The new btree block will have its level and numrecs fields set to the values 356 * of the level and nr_this_block parameters, respectively. 357 * 358 * The caller should ensure that ptrp, bpp, and blockp refer to the left 359 * sibling of the new block, if there is any. On exit, ptrp, bpp, and blockp 360 * will all point to the new block. 361 */ 362 STATIC int 363 xfs_btree_bload_prep_block( 364 struct xfs_btree_cur *cur, 365 struct xfs_btree_bload *bbl, 366 struct list_head *buffers_list, 367 unsigned int level, 368 unsigned int nr_this_block, 369 union xfs_btree_ptr *ptrp, /* in/out */ 370 struct xfs_buf **bpp, /* in/out */ 371 struct xfs_btree_block **blockp, /* in/out */ 372 void *priv) 373 { 374 union xfs_btree_ptr new_ptr; 375 struct xfs_buf *new_bp; 376 struct xfs_btree_block *new_block; 377 int ret; 378 379 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && 380 level == cur->bc_nlevels - 1) { 381 struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur); 382 size_t new_size; 383 384 ASSERT(*bpp == NULL); 385 386 /* Allocate a new incore btree root block. */ 387 new_size = bbl->iroot_size(cur, nr_this_block, priv); 388 ifp->if_broot = kmem_zalloc(new_size, 0); 389 ifp->if_broot_bytes = (int)new_size; 390 ifp->if_flags |= XFS_IFBROOT; 391 392 /* Initialize it and send it out. */ 393 xfs_btree_init_block_int(cur->bc_mp, ifp->if_broot, 394 XFS_BUF_DADDR_NULL, cur->bc_btnum, level, 395 nr_this_block, cur->bc_ino.ip->i_ino, 396 cur->bc_flags); 397 398 *bpp = NULL; 399 *blockp = ifp->if_broot; 400 xfs_btree_set_ptr_null(cur, ptrp); 401 return 0; 402 } 403 404 /* Claim one of the caller's preallocated blocks. */ 405 xfs_btree_set_ptr_null(cur, &new_ptr); 406 ret = bbl->claim_block(cur, &new_ptr, priv); 407 if (ret) 408 return ret; 409 410 ASSERT(!xfs_btree_ptr_is_null(cur, &new_ptr)); 411 412 ret = xfs_btree_get_buf_block(cur, &new_ptr, &new_block, &new_bp); 413 if (ret) 414 return ret; 415 416 /* 417 * The previous block (if any) is the left sibling of the new block, 418 * so set its right sibling pointer to the new block and drop it. 419 */ 420 if (*blockp) 421 xfs_btree_set_sibling(cur, *blockp, &new_ptr, XFS_BB_RIGHTSIB); 422 xfs_btree_bload_drop_buf(buffers_list, bpp); 423 424 /* Initialize the new btree block. */ 425 xfs_btree_init_block_cur(cur, new_bp, level, nr_this_block); 426 xfs_btree_set_sibling(cur, new_block, ptrp, XFS_BB_LEFTSIB); 427 428 /* Set the out parameters. */ 429 *bpp = new_bp; 430 *blockp = new_block; 431 xfs_btree_copy_ptrs(cur, ptrp, &new_ptr, 1); 432 return 0; 433 } 434 435 /* Load one leaf block. */ 436 STATIC int 437 xfs_btree_bload_leaf( 438 struct xfs_btree_cur *cur, 439 unsigned int recs_this_block, 440 xfs_btree_bload_get_record_fn get_record, 441 struct xfs_btree_block *block, 442 void *priv) 443 { 444 unsigned int j; 445 int ret; 446 447 /* Fill the leaf block with records. */ 448 for (j = 1; j <= recs_this_block; j++) { 449 union xfs_btree_rec *block_rec; 450 451 ret = get_record(cur, priv); 452 if (ret) 453 return ret; 454 block_rec = xfs_btree_rec_addr(cur, j, block); 455 cur->bc_ops->init_rec_from_cur(cur, block_rec); 456 } 457 458 return 0; 459 } 460 461 /* 462 * Load one node block with key/ptr pairs. 463 * 464 * child_ptr must point to a block within the next level down in the tree. A 465 * key/ptr entry will be created in the new node block to the block pointed to 466 * by child_ptr. On exit, child_ptr points to the next block on the child 467 * level that needs processing. 468 */ 469 STATIC int 470 xfs_btree_bload_node( 471 struct xfs_btree_cur *cur, 472 unsigned int recs_this_block, 473 union xfs_btree_ptr *child_ptr, 474 struct xfs_btree_block *block) 475 { 476 unsigned int j; 477 int ret; 478 479 /* Fill the node block with keys and pointers. */ 480 for (j = 1; j <= recs_this_block; j++) { 481 union xfs_btree_key child_key; 482 union xfs_btree_ptr *block_ptr; 483 union xfs_btree_key *block_key; 484 struct xfs_btree_block *child_block; 485 struct xfs_buf *child_bp; 486 487 ASSERT(!xfs_btree_ptr_is_null(cur, child_ptr)); 488 489 ret = xfs_btree_get_buf_block(cur, child_ptr, &child_block, 490 &child_bp); 491 if (ret) 492 return ret; 493 494 block_ptr = xfs_btree_ptr_addr(cur, j, block); 495 xfs_btree_copy_ptrs(cur, block_ptr, child_ptr, 1); 496 497 block_key = xfs_btree_key_addr(cur, j, block); 498 xfs_btree_get_keys(cur, child_block, &child_key); 499 xfs_btree_copy_keys(cur, block_key, &child_key, 1); 500 501 xfs_btree_get_sibling(cur, child_block, child_ptr, 502 XFS_BB_RIGHTSIB); 503 xfs_buf_relse(child_bp); 504 } 505 506 return 0; 507 } 508 509 /* 510 * Compute the maximum number of records (or keyptrs) per block that we want to 511 * install at this level in the btree. Caller is responsible for having set 512 * @cur->bc_ino.forksize to the desired fork size, if appropriate. 513 */ 514 STATIC unsigned int 515 xfs_btree_bload_max_npb( 516 struct xfs_btree_cur *cur, 517 struct xfs_btree_bload *bbl, 518 unsigned int level) 519 { 520 unsigned int ret; 521 522 if (level == cur->bc_nlevels - 1 && cur->bc_ops->get_dmaxrecs) 523 return cur->bc_ops->get_dmaxrecs(cur, level); 524 525 ret = cur->bc_ops->get_maxrecs(cur, level); 526 if (level == 0) 527 ret -= bbl->leaf_slack; 528 else 529 ret -= bbl->node_slack; 530 return ret; 531 } 532 533 /* 534 * Compute the desired number of records (or keyptrs) per block that we want to 535 * install at this level in the btree, which must be somewhere between minrecs 536 * and max_npb. The caller is free to install fewer records per block. 537 */ 538 STATIC unsigned int 539 xfs_btree_bload_desired_npb( 540 struct xfs_btree_cur *cur, 541 struct xfs_btree_bload *bbl, 542 unsigned int level) 543 { 544 unsigned int npb = xfs_btree_bload_max_npb(cur, bbl, level); 545 546 /* Root blocks are not subject to minrecs rules. */ 547 if (level == cur->bc_nlevels - 1) 548 return max(1U, npb); 549 550 return max_t(unsigned int, cur->bc_ops->get_minrecs(cur, level), npb); 551 } 552 553 /* 554 * Compute the number of records to be stored in each block at this level and 555 * the number of blocks for this level. For leaf levels, we must populate an 556 * empty root block even if there are no records, so we have to have at least 557 * one block. 558 */ 559 STATIC void 560 xfs_btree_bload_level_geometry( 561 struct xfs_btree_cur *cur, 562 struct xfs_btree_bload *bbl, 563 unsigned int level, 564 uint64_t nr_this_level, 565 unsigned int *avg_per_block, 566 uint64_t *blocks, 567 uint64_t *blocks_with_extra) 568 { 569 uint64_t npb; 570 uint64_t dontcare; 571 unsigned int desired_npb; 572 unsigned int maxnr; 573 574 maxnr = cur->bc_ops->get_maxrecs(cur, level); 575 576 /* 577 * Compute the number of blocks we need to fill each block with the 578 * desired number of records/keyptrs per block. Because desired_npb 579 * could be minrecs, we use regular integer division (which rounds 580 * the block count down) so that in the next step the effective # of 581 * items per block will never be less than desired_npb. 582 */ 583 desired_npb = xfs_btree_bload_desired_npb(cur, bbl, level); 584 *blocks = div64_u64_rem(nr_this_level, desired_npb, &dontcare); 585 *blocks = max(1ULL, *blocks); 586 587 /* 588 * Compute the number of records that we will actually put in each 589 * block, assuming that we want to spread the records evenly between 590 * the blocks. Take care that the effective # of items per block (npb) 591 * won't exceed maxrecs even for the blocks that get an extra record, 592 * since desired_npb could be maxrecs, and in the previous step we 593 * rounded the block count down. 594 */ 595 npb = div64_u64_rem(nr_this_level, *blocks, blocks_with_extra); 596 if (npb > maxnr || (npb == maxnr && *blocks_with_extra > 0)) { 597 (*blocks)++; 598 npb = div64_u64_rem(nr_this_level, *blocks, blocks_with_extra); 599 } 600 601 *avg_per_block = min_t(uint64_t, npb, nr_this_level); 602 603 trace_xfs_btree_bload_level_geometry(cur, level, nr_this_level, 604 *avg_per_block, desired_npb, *blocks, 605 *blocks_with_extra); 606 } 607 608 /* 609 * Ensure a slack value is appropriate for the btree. 610 * 611 * If the slack value is negative, set slack so that we fill the block to 612 * halfway between minrecs and maxrecs. Make sure the slack is never so large 613 * that we can underflow minrecs. 614 */ 615 static void 616 xfs_btree_bload_ensure_slack( 617 struct xfs_btree_cur *cur, 618 int *slack, 619 int level) 620 { 621 int maxr; 622 int minr; 623 624 maxr = cur->bc_ops->get_maxrecs(cur, level); 625 minr = cur->bc_ops->get_minrecs(cur, level); 626 627 /* 628 * If slack is negative, automatically set slack so that we load the 629 * btree block approximately halfway between minrecs and maxrecs. 630 * Generally, this will net us 75% loading. 631 */ 632 if (*slack < 0) 633 *slack = maxr - ((maxr + minr) >> 1); 634 635 *slack = min(*slack, maxr - minr); 636 } 637 638 /* 639 * Prepare a btree cursor for a bulk load operation by computing the geometry 640 * fields in bbl. Caller must ensure that the btree cursor is a staging 641 * cursor. This function can be called multiple times. 642 */ 643 int 644 xfs_btree_bload_compute_geometry( 645 struct xfs_btree_cur *cur, 646 struct xfs_btree_bload *bbl, 647 uint64_t nr_records) 648 { 649 uint64_t nr_blocks = 0; 650 uint64_t nr_this_level; 651 652 ASSERT(cur->bc_flags & XFS_BTREE_STAGING); 653 654 /* 655 * Make sure that the slack values make sense for traditional leaf and 656 * node blocks. Inode-rooted btrees will return different minrecs and 657 * maxrecs values for the root block (bc_nlevels == level - 1). We're 658 * checking levels 0 and 1 here, so set bc_nlevels such that the btree 659 * code doesn't interpret either as the root level. 660 */ 661 cur->bc_nlevels = XFS_BTREE_MAXLEVELS - 1; 662 xfs_btree_bload_ensure_slack(cur, &bbl->leaf_slack, 0); 663 xfs_btree_bload_ensure_slack(cur, &bbl->node_slack, 1); 664 665 bbl->nr_records = nr_this_level = nr_records; 666 for (cur->bc_nlevels = 1; cur->bc_nlevels < XFS_BTREE_MAXLEVELS;) { 667 uint64_t level_blocks; 668 uint64_t dontcare64; 669 unsigned int level = cur->bc_nlevels - 1; 670 unsigned int avg_per_block; 671 672 xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level, 673 &avg_per_block, &level_blocks, &dontcare64); 674 675 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) { 676 /* 677 * If all the items we want to store at this level 678 * would fit in the inode root block, then we have our 679 * btree root and are done. 680 * 681 * Note that bmap btrees forbid records in the root. 682 */ 683 if (level != 0 && nr_this_level <= avg_per_block) { 684 nr_blocks++; 685 break; 686 } 687 688 /* 689 * Otherwise, we have to store all the items for this 690 * level in traditional btree blocks and therefore need 691 * another level of btree to point to those blocks. 692 * 693 * We have to re-compute the geometry for each level of 694 * an inode-rooted btree because the geometry differs 695 * between a btree root in an inode fork and a 696 * traditional btree block. 697 * 698 * This distinction is made in the btree code based on 699 * whether level == bc_nlevels - 1. Based on the 700 * previous root block size check against the root 701 * block geometry, we know that we aren't yet ready to 702 * populate the root. Increment bc_nevels and 703 * recalculate the geometry for a traditional 704 * block-based btree level. 705 */ 706 cur->bc_nlevels++; 707 xfs_btree_bload_level_geometry(cur, bbl, level, 708 nr_this_level, &avg_per_block, 709 &level_blocks, &dontcare64); 710 } else { 711 /* 712 * If all the items we want to store at this level 713 * would fit in a single root block, we're done. 714 */ 715 if (nr_this_level <= avg_per_block) { 716 nr_blocks++; 717 break; 718 } 719 720 /* Otherwise, we need another level of btree. */ 721 cur->bc_nlevels++; 722 } 723 724 nr_blocks += level_blocks; 725 nr_this_level = level_blocks; 726 } 727 728 if (cur->bc_nlevels == XFS_BTREE_MAXLEVELS) 729 return -EOVERFLOW; 730 731 bbl->btree_height = cur->bc_nlevels; 732 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) 733 bbl->nr_blocks = nr_blocks - 1; 734 else 735 bbl->nr_blocks = nr_blocks; 736 return 0; 737 } 738 739 /* Bulk load a btree given the parameters and geometry established in bbl. */ 740 int 741 xfs_btree_bload( 742 struct xfs_btree_cur *cur, 743 struct xfs_btree_bload *bbl, 744 void *priv) 745 { 746 struct list_head buffers_list; 747 union xfs_btree_ptr child_ptr; 748 union xfs_btree_ptr ptr; 749 struct xfs_buf *bp = NULL; 750 struct xfs_btree_block *block = NULL; 751 uint64_t nr_this_level = bbl->nr_records; 752 uint64_t blocks; 753 uint64_t i; 754 uint64_t blocks_with_extra; 755 uint64_t total_blocks = 0; 756 unsigned int avg_per_block; 757 unsigned int level = 0; 758 int ret; 759 760 ASSERT(cur->bc_flags & XFS_BTREE_STAGING); 761 762 INIT_LIST_HEAD(&buffers_list); 763 cur->bc_nlevels = bbl->btree_height; 764 xfs_btree_set_ptr_null(cur, &child_ptr); 765 xfs_btree_set_ptr_null(cur, &ptr); 766 767 xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level, 768 &avg_per_block, &blocks, &blocks_with_extra); 769 770 /* Load each leaf block. */ 771 for (i = 0; i < blocks; i++) { 772 unsigned int nr_this_block = avg_per_block; 773 774 /* 775 * Due to rounding, btree blocks will not be evenly populated 776 * in most cases. blocks_with_extra tells us how many blocks 777 * will receive an extra record to distribute the excess across 778 * the current level as evenly as possible. 779 */ 780 if (i < blocks_with_extra) 781 nr_this_block++; 782 783 ret = xfs_btree_bload_prep_block(cur, bbl, &buffers_list, level, 784 nr_this_block, &ptr, &bp, &block, priv); 785 if (ret) 786 goto out; 787 788 trace_xfs_btree_bload_block(cur, level, i, blocks, &ptr, 789 nr_this_block); 790 791 ret = xfs_btree_bload_leaf(cur, nr_this_block, bbl->get_record, 792 block, priv); 793 if (ret) 794 goto out; 795 796 /* 797 * Record the leftmost leaf pointer so we know where to start 798 * with the first node level. 799 */ 800 if (i == 0) 801 xfs_btree_copy_ptrs(cur, &child_ptr, &ptr, 1); 802 } 803 total_blocks += blocks; 804 xfs_btree_bload_drop_buf(&buffers_list, &bp); 805 806 /* Populate the internal btree nodes. */ 807 for (level = 1; level < cur->bc_nlevels; level++) { 808 union xfs_btree_ptr first_ptr; 809 810 nr_this_level = blocks; 811 block = NULL; 812 xfs_btree_set_ptr_null(cur, &ptr); 813 814 xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level, 815 &avg_per_block, &blocks, &blocks_with_extra); 816 817 /* Load each node block. */ 818 for (i = 0; i < blocks; i++) { 819 unsigned int nr_this_block = avg_per_block; 820 821 if (i < blocks_with_extra) 822 nr_this_block++; 823 824 ret = xfs_btree_bload_prep_block(cur, bbl, 825 &buffers_list, level, nr_this_block, 826 &ptr, &bp, &block, priv); 827 if (ret) 828 goto out; 829 830 trace_xfs_btree_bload_block(cur, level, i, blocks, 831 &ptr, nr_this_block); 832 833 ret = xfs_btree_bload_node(cur, nr_this_block, 834 &child_ptr, block); 835 if (ret) 836 goto out; 837 838 /* 839 * Record the leftmost node pointer so that we know 840 * where to start the next node level above this one. 841 */ 842 if (i == 0) 843 xfs_btree_copy_ptrs(cur, &first_ptr, &ptr, 1); 844 } 845 total_blocks += blocks; 846 xfs_btree_bload_drop_buf(&buffers_list, &bp); 847 xfs_btree_copy_ptrs(cur, &child_ptr, &first_ptr, 1); 848 } 849 850 /* Initialize the new root. */ 851 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) { 852 ASSERT(xfs_btree_ptr_is_null(cur, &ptr)); 853 cur->bc_ino.ifake->if_levels = cur->bc_nlevels; 854 cur->bc_ino.ifake->if_blocks = total_blocks - 1; 855 } else { 856 cur->bc_ag.afake->af_root = be32_to_cpu(ptr.s); 857 cur->bc_ag.afake->af_levels = cur->bc_nlevels; 858 cur->bc_ag.afake->af_blocks = total_blocks; 859 } 860 861 /* 862 * Write the new blocks to disk. If the ordered list isn't empty after 863 * that, then something went wrong and we have to fail. This should 864 * never happen, but we'll check anyway. 865 */ 866 ret = xfs_buf_delwri_submit(&buffers_list); 867 if (ret) 868 goto out; 869 if (!list_empty(&buffers_list)) { 870 ASSERT(list_empty(&buffers_list)); 871 ret = -EIO; 872 } 873 874 out: 875 xfs_buf_delwri_cancel(&buffers_list); 876 if (bp) 877 xfs_buf_relse(bp); 878 return ret; 879 } 880