1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * Copyright (c) 2013 Red Hat, Inc. 5 * All Rights Reserved. 6 */ 7 #include "xfs.h" 8 #include "xfs_fs.h" 9 #include "xfs_shared.h" 10 #include "xfs_format.h" 11 #include "xfs_log_format.h" 12 #include "xfs_trans_resv.h" 13 #include "xfs_bit.h" 14 #include "xfs_mount.h" 15 #include "xfs_dir2.h" 16 #include "xfs_dir2_priv.h" 17 #include "xfs_inode.h" 18 #include "xfs_trans.h" 19 #include "xfs_bmap.h" 20 #include "xfs_attr_leaf.h" 21 #include "xfs_error.h" 22 #include "xfs_trace.h" 23 #include "xfs_buf_item.h" 24 #include "xfs_log.h" 25 26 /* 27 * xfs_da_btree.c 28 * 29 * Routines to implement directories as Btrees of hashed names. 30 */ 31 32 /*======================================================================== 33 * Function prototypes for the kernel. 34 *========================================================================*/ 35 36 /* 37 * Routines used for growing the Btree. 38 */ 39 STATIC int xfs_da3_root_split(xfs_da_state_t *state, 40 xfs_da_state_blk_t *existing_root, 41 xfs_da_state_blk_t *new_child); 42 STATIC int xfs_da3_node_split(xfs_da_state_t *state, 43 xfs_da_state_blk_t *existing_blk, 44 xfs_da_state_blk_t *split_blk, 45 xfs_da_state_blk_t *blk_to_add, 46 int treelevel, 47 int *result); 48 STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state, 49 xfs_da_state_blk_t *node_blk_1, 50 xfs_da_state_blk_t *node_blk_2); 51 STATIC void xfs_da3_node_add(xfs_da_state_t *state, 52 xfs_da_state_blk_t *old_node_blk, 53 xfs_da_state_blk_t *new_node_blk); 54 55 /* 56 * Routines used for shrinking the Btree. 57 */ 58 STATIC int xfs_da3_root_join(xfs_da_state_t *state, 59 xfs_da_state_blk_t *root_blk); 60 STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval); 61 STATIC void xfs_da3_node_remove(xfs_da_state_t *state, 62 xfs_da_state_blk_t *drop_blk); 63 STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state, 64 xfs_da_state_blk_t *src_node_blk, 65 xfs_da_state_blk_t *dst_node_blk); 66 67 /* 68 * Utility routines. 69 */ 70 STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state, 71 xfs_da_state_blk_t *drop_blk, 72 xfs_da_state_blk_t *save_blk); 73 74 75 kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */ 76 77 /* 78 * Allocate a dir-state structure. 79 * We don't put them on the stack since they're large. 80 */ 81 xfs_da_state_t * 82 xfs_da_state_alloc(void) 83 { 84 return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS); 85 } 86 87 /* 88 * Kill the altpath contents of a da-state structure. 89 */ 90 STATIC void 91 xfs_da_state_kill_altpath(xfs_da_state_t *state) 92 { 93 int i; 94 95 for (i = 0; i < state->altpath.active; i++) 96 state->altpath.blk[i].bp = NULL; 97 state->altpath.active = 0; 98 } 99 100 /* 101 * Free a da-state structure. 102 */ 103 void 104 xfs_da_state_free(xfs_da_state_t *state) 105 { 106 xfs_da_state_kill_altpath(state); 107 #ifdef DEBUG 108 memset((char *)state, 0, sizeof(*state)); 109 #endif /* DEBUG */ 110 kmem_zone_free(xfs_da_state_zone, state); 111 } 112 113 /* 114 * Verify an xfs_da3_blkinfo structure. Note that the da3 fields are only 115 * accessible on v5 filesystems. This header format is common across da node, 116 * attr leaf and dir leaf blocks. 117 */ 118 xfs_failaddr_t 119 xfs_da3_blkinfo_verify( 120 struct xfs_buf *bp, 121 struct xfs_da3_blkinfo *hdr3) 122 { 123 struct xfs_mount *mp = bp->b_mount; 124 struct xfs_da_blkinfo *hdr = &hdr3->hdr; 125 126 if (!xfs_verify_magic16(bp, hdr->magic)) 127 return __this_address; 128 129 if (xfs_sb_version_hascrc(&mp->m_sb)) { 130 if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid)) 131 return __this_address; 132 if (be64_to_cpu(hdr3->blkno) != bp->b_bn) 133 return __this_address; 134 if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn))) 135 return __this_address; 136 } 137 138 return NULL; 139 } 140 141 static xfs_failaddr_t 142 xfs_da3_node_verify( 143 struct xfs_buf *bp) 144 { 145 struct xfs_mount *mp = bp->b_mount; 146 struct xfs_da_intnode *hdr = bp->b_addr; 147 struct xfs_da3_icnode_hdr ichdr; 148 const struct xfs_dir_ops *ops; 149 xfs_failaddr_t fa; 150 151 ops = xfs_dir_get_ops(mp, NULL); 152 153 ops->node_hdr_from_disk(&ichdr, hdr); 154 155 fa = xfs_da3_blkinfo_verify(bp, bp->b_addr); 156 if (fa) 157 return fa; 158 159 if (ichdr.level == 0) 160 return __this_address; 161 if (ichdr.level > XFS_DA_NODE_MAXDEPTH) 162 return __this_address; 163 if (ichdr.count == 0) 164 return __this_address; 165 166 /* 167 * we don't know if the node is for and attribute or directory tree, 168 * so only fail if the count is outside both bounds 169 */ 170 if (ichdr.count > mp->m_dir_geo->node_ents && 171 ichdr.count > mp->m_attr_geo->node_ents) 172 return __this_address; 173 174 /* XXX: hash order check? */ 175 176 return NULL; 177 } 178 179 static void 180 xfs_da3_node_write_verify( 181 struct xfs_buf *bp) 182 { 183 struct xfs_mount *mp = bp->b_mount; 184 struct xfs_buf_log_item *bip = bp->b_log_item; 185 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; 186 xfs_failaddr_t fa; 187 188 fa = xfs_da3_node_verify(bp); 189 if (fa) { 190 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 191 return; 192 } 193 194 if (!xfs_sb_version_hascrc(&mp->m_sb)) 195 return; 196 197 if (bip) 198 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn); 199 200 xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF); 201 } 202 203 /* 204 * leaf/node format detection on trees is sketchy, so a node read can be done on 205 * leaf level blocks when detection identifies the tree as a node format tree 206 * incorrectly. In this case, we need to swap the verifier to match the correct 207 * format of the block being read. 208 */ 209 static void 210 xfs_da3_node_read_verify( 211 struct xfs_buf *bp) 212 { 213 struct xfs_da_blkinfo *info = bp->b_addr; 214 xfs_failaddr_t fa; 215 216 switch (be16_to_cpu(info->magic)) { 217 case XFS_DA3_NODE_MAGIC: 218 if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) { 219 xfs_verifier_error(bp, -EFSBADCRC, 220 __this_address); 221 break; 222 } 223 /* fall through */ 224 case XFS_DA_NODE_MAGIC: 225 fa = xfs_da3_node_verify(bp); 226 if (fa) 227 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 228 return; 229 case XFS_ATTR_LEAF_MAGIC: 230 case XFS_ATTR3_LEAF_MAGIC: 231 bp->b_ops = &xfs_attr3_leaf_buf_ops; 232 bp->b_ops->verify_read(bp); 233 return; 234 case XFS_DIR2_LEAFN_MAGIC: 235 case XFS_DIR3_LEAFN_MAGIC: 236 bp->b_ops = &xfs_dir3_leafn_buf_ops; 237 bp->b_ops->verify_read(bp); 238 return; 239 default: 240 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address); 241 break; 242 } 243 } 244 245 /* Verify the structure of a da3 block. */ 246 static xfs_failaddr_t 247 xfs_da3_node_verify_struct( 248 struct xfs_buf *bp) 249 { 250 struct xfs_da_blkinfo *info = bp->b_addr; 251 252 switch (be16_to_cpu(info->magic)) { 253 case XFS_DA3_NODE_MAGIC: 254 case XFS_DA_NODE_MAGIC: 255 return xfs_da3_node_verify(bp); 256 case XFS_ATTR_LEAF_MAGIC: 257 case XFS_ATTR3_LEAF_MAGIC: 258 bp->b_ops = &xfs_attr3_leaf_buf_ops; 259 return bp->b_ops->verify_struct(bp); 260 case XFS_DIR2_LEAFN_MAGIC: 261 case XFS_DIR3_LEAFN_MAGIC: 262 bp->b_ops = &xfs_dir3_leafn_buf_ops; 263 return bp->b_ops->verify_struct(bp); 264 default: 265 return __this_address; 266 } 267 } 268 269 const struct xfs_buf_ops xfs_da3_node_buf_ops = { 270 .name = "xfs_da3_node", 271 .magic16 = { cpu_to_be16(XFS_DA_NODE_MAGIC), 272 cpu_to_be16(XFS_DA3_NODE_MAGIC) }, 273 .verify_read = xfs_da3_node_read_verify, 274 .verify_write = xfs_da3_node_write_verify, 275 .verify_struct = xfs_da3_node_verify_struct, 276 }; 277 278 int 279 xfs_da3_node_read( 280 struct xfs_trans *tp, 281 struct xfs_inode *dp, 282 xfs_dablk_t bno, 283 xfs_daddr_t mappedbno, 284 struct xfs_buf **bpp, 285 int which_fork) 286 { 287 int err; 288 289 err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp, 290 which_fork, &xfs_da3_node_buf_ops); 291 if (!err && tp && *bpp) { 292 struct xfs_da_blkinfo *info = (*bpp)->b_addr; 293 int type; 294 295 switch (be16_to_cpu(info->magic)) { 296 case XFS_DA_NODE_MAGIC: 297 case XFS_DA3_NODE_MAGIC: 298 type = XFS_BLFT_DA_NODE_BUF; 299 break; 300 case XFS_ATTR_LEAF_MAGIC: 301 case XFS_ATTR3_LEAF_MAGIC: 302 type = XFS_BLFT_ATTR_LEAF_BUF; 303 break; 304 case XFS_DIR2_LEAFN_MAGIC: 305 case XFS_DIR3_LEAFN_MAGIC: 306 type = XFS_BLFT_DIR_LEAFN_BUF; 307 break; 308 default: 309 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, 310 tp->t_mountp, info, sizeof(*info)); 311 xfs_trans_brelse(tp, *bpp); 312 *bpp = NULL; 313 return -EFSCORRUPTED; 314 } 315 xfs_trans_buf_set_type(tp, *bpp, type); 316 } 317 return err; 318 } 319 320 /*======================================================================== 321 * Routines used for growing the Btree. 322 *========================================================================*/ 323 324 /* 325 * Create the initial contents of an intermediate node. 326 */ 327 int 328 xfs_da3_node_create( 329 struct xfs_da_args *args, 330 xfs_dablk_t blkno, 331 int level, 332 struct xfs_buf **bpp, 333 int whichfork) 334 { 335 struct xfs_da_intnode *node; 336 struct xfs_trans *tp = args->trans; 337 struct xfs_mount *mp = tp->t_mountp; 338 struct xfs_da3_icnode_hdr ichdr = {0}; 339 struct xfs_buf *bp; 340 int error; 341 struct xfs_inode *dp = args->dp; 342 343 trace_xfs_da_node_create(args); 344 ASSERT(level <= XFS_DA_NODE_MAXDEPTH); 345 346 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork); 347 if (error) 348 return error; 349 bp->b_ops = &xfs_da3_node_buf_ops; 350 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF); 351 node = bp->b_addr; 352 353 if (xfs_sb_version_hascrc(&mp->m_sb)) { 354 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; 355 356 memset(hdr3, 0, sizeof(struct xfs_da3_node_hdr)); 357 ichdr.magic = XFS_DA3_NODE_MAGIC; 358 hdr3->info.blkno = cpu_to_be64(bp->b_bn); 359 hdr3->info.owner = cpu_to_be64(args->dp->i_ino); 360 uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid); 361 } else { 362 ichdr.magic = XFS_DA_NODE_MAGIC; 363 } 364 ichdr.level = level; 365 366 dp->d_ops->node_hdr_to_disk(node, &ichdr); 367 xfs_trans_log_buf(tp, bp, 368 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size)); 369 370 *bpp = bp; 371 return 0; 372 } 373 374 /* 375 * Split a leaf node, rebalance, then possibly split 376 * intermediate nodes, rebalance, etc. 377 */ 378 int /* error */ 379 xfs_da3_split( 380 struct xfs_da_state *state) 381 { 382 struct xfs_da_state_blk *oldblk; 383 struct xfs_da_state_blk *newblk; 384 struct xfs_da_state_blk *addblk; 385 struct xfs_da_intnode *node; 386 int max; 387 int action = 0; 388 int error; 389 int i; 390 391 trace_xfs_da_split(state->args); 392 393 /* 394 * Walk back up the tree splitting/inserting/adjusting as necessary. 395 * If we need to insert and there isn't room, split the node, then 396 * decide which fragment to insert the new block from below into. 397 * Note that we may split the root this way, but we need more fixup. 398 */ 399 max = state->path.active - 1; 400 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH)); 401 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC || 402 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC); 403 404 addblk = &state->path.blk[max]; /* initial dummy value */ 405 for (i = max; (i >= 0) && addblk; state->path.active--, i--) { 406 oldblk = &state->path.blk[i]; 407 newblk = &state->altpath.blk[i]; 408 409 /* 410 * If a leaf node then 411 * Allocate a new leaf node, then rebalance across them. 412 * else if an intermediate node then 413 * We split on the last layer, must we split the node? 414 */ 415 switch (oldblk->magic) { 416 case XFS_ATTR_LEAF_MAGIC: 417 error = xfs_attr3_leaf_split(state, oldblk, newblk); 418 if ((error != 0) && (error != -ENOSPC)) { 419 return error; /* GROT: attr is inconsistent */ 420 } 421 if (!error) { 422 addblk = newblk; 423 break; 424 } 425 /* 426 * Entry wouldn't fit, split the leaf again. The new 427 * extrablk will be consumed by xfs_da3_node_split if 428 * the node is split. 429 */ 430 state->extravalid = 1; 431 if (state->inleaf) { 432 state->extraafter = 0; /* before newblk */ 433 trace_xfs_attr_leaf_split_before(state->args); 434 error = xfs_attr3_leaf_split(state, oldblk, 435 &state->extrablk); 436 } else { 437 state->extraafter = 1; /* after newblk */ 438 trace_xfs_attr_leaf_split_after(state->args); 439 error = xfs_attr3_leaf_split(state, newblk, 440 &state->extrablk); 441 } 442 if (error) 443 return error; /* GROT: attr inconsistent */ 444 addblk = newblk; 445 break; 446 case XFS_DIR2_LEAFN_MAGIC: 447 error = xfs_dir2_leafn_split(state, oldblk, newblk); 448 if (error) 449 return error; 450 addblk = newblk; 451 break; 452 case XFS_DA_NODE_MAGIC: 453 error = xfs_da3_node_split(state, oldblk, newblk, addblk, 454 max - i, &action); 455 addblk->bp = NULL; 456 if (error) 457 return error; /* GROT: dir is inconsistent */ 458 /* 459 * Record the newly split block for the next time thru? 460 */ 461 if (action) 462 addblk = newblk; 463 else 464 addblk = NULL; 465 break; 466 } 467 468 /* 469 * Update the btree to show the new hashval for this child. 470 */ 471 xfs_da3_fixhashpath(state, &state->path); 472 } 473 if (!addblk) 474 return 0; 475 476 /* 477 * xfs_da3_node_split() should have consumed any extra blocks we added 478 * during a double leaf split in the attr fork. This is guaranteed as 479 * we can't be here if the attr fork only has a single leaf block. 480 */ 481 ASSERT(state->extravalid == 0 || 482 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC); 483 484 /* 485 * Split the root node. 486 */ 487 ASSERT(state->path.active == 0); 488 oldblk = &state->path.blk[0]; 489 error = xfs_da3_root_split(state, oldblk, addblk); 490 if (error) 491 goto out; 492 493 /* 494 * Update pointers to the node which used to be block 0 and just got 495 * bumped because of the addition of a new root node. Note that the 496 * original block 0 could be at any position in the list of blocks in 497 * the tree. 498 * 499 * Note: the magic numbers and sibling pointers are in the same physical 500 * place for both v2 and v3 headers (by design). Hence it doesn't matter 501 * which version of the xfs_da_intnode structure we use here as the 502 * result will be the same using either structure. 503 */ 504 node = oldblk->bp->b_addr; 505 if (node->hdr.info.forw) { 506 if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) { 507 error = -EFSCORRUPTED; 508 goto out; 509 } 510 node = addblk->bp->b_addr; 511 node->hdr.info.back = cpu_to_be32(oldblk->blkno); 512 xfs_trans_log_buf(state->args->trans, addblk->bp, 513 XFS_DA_LOGRANGE(node, &node->hdr.info, 514 sizeof(node->hdr.info))); 515 } 516 node = oldblk->bp->b_addr; 517 if (node->hdr.info.back) { 518 if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) { 519 error = -EFSCORRUPTED; 520 goto out; 521 } 522 node = addblk->bp->b_addr; 523 node->hdr.info.forw = cpu_to_be32(oldblk->blkno); 524 xfs_trans_log_buf(state->args->trans, addblk->bp, 525 XFS_DA_LOGRANGE(node, &node->hdr.info, 526 sizeof(node->hdr.info))); 527 } 528 out: 529 addblk->bp = NULL; 530 return error; 531 } 532 533 /* 534 * Split the root. We have to create a new root and point to the two 535 * parts (the split old root) that we just created. Copy block zero to 536 * the EOF, extending the inode in process. 537 */ 538 STATIC int /* error */ 539 xfs_da3_root_split( 540 struct xfs_da_state *state, 541 struct xfs_da_state_blk *blk1, 542 struct xfs_da_state_blk *blk2) 543 { 544 struct xfs_da_intnode *node; 545 struct xfs_da_intnode *oldroot; 546 struct xfs_da_node_entry *btree; 547 struct xfs_da3_icnode_hdr nodehdr; 548 struct xfs_da_args *args; 549 struct xfs_buf *bp; 550 struct xfs_inode *dp; 551 struct xfs_trans *tp; 552 struct xfs_dir2_leaf *leaf; 553 xfs_dablk_t blkno; 554 int level; 555 int error; 556 int size; 557 558 trace_xfs_da_root_split(state->args); 559 560 /* 561 * Copy the existing (incorrect) block from the root node position 562 * to a free space somewhere. 563 */ 564 args = state->args; 565 error = xfs_da_grow_inode(args, &blkno); 566 if (error) 567 return error; 568 569 dp = args->dp; 570 tp = args->trans; 571 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork); 572 if (error) 573 return error; 574 node = bp->b_addr; 575 oldroot = blk1->bp->b_addr; 576 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || 577 oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) { 578 struct xfs_da3_icnode_hdr icnodehdr; 579 580 dp->d_ops->node_hdr_from_disk(&icnodehdr, oldroot); 581 btree = dp->d_ops->node_tree_p(oldroot); 582 size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot); 583 level = icnodehdr.level; 584 585 /* 586 * we are about to copy oldroot to bp, so set up the type 587 * of bp while we know exactly what it will be. 588 */ 589 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF); 590 } else { 591 struct xfs_dir3_icleaf_hdr leafhdr; 592 struct xfs_dir2_leaf_entry *ents; 593 594 leaf = (xfs_dir2_leaf_t *)oldroot; 595 dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf); 596 ents = dp->d_ops->leaf_ents_p(leaf); 597 598 ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC || 599 leafhdr.magic == XFS_DIR3_LEAFN_MAGIC); 600 size = (int)((char *)&ents[leafhdr.count] - (char *)leaf); 601 level = 0; 602 603 /* 604 * we are about to copy oldroot to bp, so set up the type 605 * of bp while we know exactly what it will be. 606 */ 607 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF); 608 } 609 610 /* 611 * we can copy most of the information in the node from one block to 612 * another, but for CRC enabled headers we have to make sure that the 613 * block specific identifiers are kept intact. We update the buffer 614 * directly for this. 615 */ 616 memcpy(node, oldroot, size); 617 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) || 618 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) { 619 struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node; 620 621 node3->hdr.info.blkno = cpu_to_be64(bp->b_bn); 622 } 623 xfs_trans_log_buf(tp, bp, 0, size - 1); 624 625 bp->b_ops = blk1->bp->b_ops; 626 xfs_trans_buf_copy_type(bp, blk1->bp); 627 blk1->bp = bp; 628 blk1->blkno = blkno; 629 630 /* 631 * Set up the new root node. 632 */ 633 error = xfs_da3_node_create(args, 634 (args->whichfork == XFS_DATA_FORK) ? args->geo->leafblk : 0, 635 level + 1, &bp, args->whichfork); 636 if (error) 637 return error; 638 639 node = bp->b_addr; 640 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 641 btree = dp->d_ops->node_tree_p(node); 642 btree[0].hashval = cpu_to_be32(blk1->hashval); 643 btree[0].before = cpu_to_be32(blk1->blkno); 644 btree[1].hashval = cpu_to_be32(blk2->hashval); 645 btree[1].before = cpu_to_be32(blk2->blkno); 646 nodehdr.count = 2; 647 dp->d_ops->node_hdr_to_disk(node, &nodehdr); 648 649 #ifdef DEBUG 650 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 651 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) { 652 ASSERT(blk1->blkno >= args->geo->leafblk && 653 blk1->blkno < args->geo->freeblk); 654 ASSERT(blk2->blkno >= args->geo->leafblk && 655 blk2->blkno < args->geo->freeblk); 656 } 657 #endif 658 659 /* Header is already logged by xfs_da_node_create */ 660 xfs_trans_log_buf(tp, bp, 661 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2)); 662 663 return 0; 664 } 665 666 /* 667 * Split the node, rebalance, then add the new entry. 668 */ 669 STATIC int /* error */ 670 xfs_da3_node_split( 671 struct xfs_da_state *state, 672 struct xfs_da_state_blk *oldblk, 673 struct xfs_da_state_blk *newblk, 674 struct xfs_da_state_blk *addblk, 675 int treelevel, 676 int *result) 677 { 678 struct xfs_da_intnode *node; 679 struct xfs_da3_icnode_hdr nodehdr; 680 xfs_dablk_t blkno; 681 int newcount; 682 int error; 683 int useextra; 684 struct xfs_inode *dp = state->args->dp; 685 686 trace_xfs_da_node_split(state->args); 687 688 node = oldblk->bp->b_addr; 689 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 690 691 /* 692 * With V2 dirs the extra block is data or freespace. 693 */ 694 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK; 695 newcount = 1 + useextra; 696 /* 697 * Do we have to split the node? 698 */ 699 if (nodehdr.count + newcount > state->args->geo->node_ents) { 700 /* 701 * Allocate a new node, add to the doubly linked chain of 702 * nodes, then move some of our excess entries into it. 703 */ 704 error = xfs_da_grow_inode(state->args, &blkno); 705 if (error) 706 return error; /* GROT: dir is inconsistent */ 707 708 error = xfs_da3_node_create(state->args, blkno, treelevel, 709 &newblk->bp, state->args->whichfork); 710 if (error) 711 return error; /* GROT: dir is inconsistent */ 712 newblk->blkno = blkno; 713 newblk->magic = XFS_DA_NODE_MAGIC; 714 xfs_da3_node_rebalance(state, oldblk, newblk); 715 error = xfs_da3_blk_link(state, oldblk, newblk); 716 if (error) 717 return error; 718 *result = 1; 719 } else { 720 *result = 0; 721 } 722 723 /* 724 * Insert the new entry(s) into the correct block 725 * (updating last hashval in the process). 726 * 727 * xfs_da3_node_add() inserts BEFORE the given index, 728 * and as a result of using node_lookup_int() we always 729 * point to a valid entry (not after one), but a split 730 * operation always results in a new block whose hashvals 731 * FOLLOW the current block. 732 * 733 * If we had double-split op below us, then add the extra block too. 734 */ 735 node = oldblk->bp->b_addr; 736 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 737 if (oldblk->index <= nodehdr.count) { 738 oldblk->index++; 739 xfs_da3_node_add(state, oldblk, addblk); 740 if (useextra) { 741 if (state->extraafter) 742 oldblk->index++; 743 xfs_da3_node_add(state, oldblk, &state->extrablk); 744 state->extravalid = 0; 745 } 746 } else { 747 newblk->index++; 748 xfs_da3_node_add(state, newblk, addblk); 749 if (useextra) { 750 if (state->extraafter) 751 newblk->index++; 752 xfs_da3_node_add(state, newblk, &state->extrablk); 753 state->extravalid = 0; 754 } 755 } 756 757 return 0; 758 } 759 760 /* 761 * Balance the btree elements between two intermediate nodes, 762 * usually one full and one empty. 763 * 764 * NOTE: if blk2 is empty, then it will get the upper half of blk1. 765 */ 766 STATIC void 767 xfs_da3_node_rebalance( 768 struct xfs_da_state *state, 769 struct xfs_da_state_blk *blk1, 770 struct xfs_da_state_blk *blk2) 771 { 772 struct xfs_da_intnode *node1; 773 struct xfs_da_intnode *node2; 774 struct xfs_da_intnode *tmpnode; 775 struct xfs_da_node_entry *btree1; 776 struct xfs_da_node_entry *btree2; 777 struct xfs_da_node_entry *btree_s; 778 struct xfs_da_node_entry *btree_d; 779 struct xfs_da3_icnode_hdr nodehdr1; 780 struct xfs_da3_icnode_hdr nodehdr2; 781 struct xfs_trans *tp; 782 int count; 783 int tmp; 784 int swap = 0; 785 struct xfs_inode *dp = state->args->dp; 786 787 trace_xfs_da_node_rebalance(state->args); 788 789 node1 = blk1->bp->b_addr; 790 node2 = blk2->bp->b_addr; 791 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1); 792 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2); 793 btree1 = dp->d_ops->node_tree_p(node1); 794 btree2 = dp->d_ops->node_tree_p(node2); 795 796 /* 797 * Figure out how many entries need to move, and in which direction. 798 * Swap the nodes around if that makes it simpler. 799 */ 800 if (nodehdr1.count > 0 && nodehdr2.count > 0 && 801 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) || 802 (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) < 803 be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) { 804 tmpnode = node1; 805 node1 = node2; 806 node2 = tmpnode; 807 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1); 808 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2); 809 btree1 = dp->d_ops->node_tree_p(node1); 810 btree2 = dp->d_ops->node_tree_p(node2); 811 swap = 1; 812 } 813 814 count = (nodehdr1.count - nodehdr2.count) / 2; 815 if (count == 0) 816 return; 817 tp = state->args->trans; 818 /* 819 * Two cases: high-to-low and low-to-high. 820 */ 821 if (count > 0) { 822 /* 823 * Move elements in node2 up to make a hole. 824 */ 825 tmp = nodehdr2.count; 826 if (tmp > 0) { 827 tmp *= (uint)sizeof(xfs_da_node_entry_t); 828 btree_s = &btree2[0]; 829 btree_d = &btree2[count]; 830 memmove(btree_d, btree_s, tmp); 831 } 832 833 /* 834 * Move the req'd B-tree elements from high in node1 to 835 * low in node2. 836 */ 837 nodehdr2.count += count; 838 tmp = count * (uint)sizeof(xfs_da_node_entry_t); 839 btree_s = &btree1[nodehdr1.count - count]; 840 btree_d = &btree2[0]; 841 memcpy(btree_d, btree_s, tmp); 842 nodehdr1.count -= count; 843 } else { 844 /* 845 * Move the req'd B-tree elements from low in node2 to 846 * high in node1. 847 */ 848 count = -count; 849 tmp = count * (uint)sizeof(xfs_da_node_entry_t); 850 btree_s = &btree2[0]; 851 btree_d = &btree1[nodehdr1.count]; 852 memcpy(btree_d, btree_s, tmp); 853 nodehdr1.count += count; 854 855 xfs_trans_log_buf(tp, blk1->bp, 856 XFS_DA_LOGRANGE(node1, btree_d, tmp)); 857 858 /* 859 * Move elements in node2 down to fill the hole. 860 */ 861 tmp = nodehdr2.count - count; 862 tmp *= (uint)sizeof(xfs_da_node_entry_t); 863 btree_s = &btree2[count]; 864 btree_d = &btree2[0]; 865 memmove(btree_d, btree_s, tmp); 866 nodehdr2.count -= count; 867 } 868 869 /* 870 * Log header of node 1 and all current bits of node 2. 871 */ 872 dp->d_ops->node_hdr_to_disk(node1, &nodehdr1); 873 xfs_trans_log_buf(tp, blk1->bp, 874 XFS_DA_LOGRANGE(node1, &node1->hdr, dp->d_ops->node_hdr_size)); 875 876 dp->d_ops->node_hdr_to_disk(node2, &nodehdr2); 877 xfs_trans_log_buf(tp, blk2->bp, 878 XFS_DA_LOGRANGE(node2, &node2->hdr, 879 dp->d_ops->node_hdr_size + 880 (sizeof(btree2[0]) * nodehdr2.count))); 881 882 /* 883 * Record the last hashval from each block for upward propagation. 884 * (note: don't use the swapped node pointers) 885 */ 886 if (swap) { 887 node1 = blk1->bp->b_addr; 888 node2 = blk2->bp->b_addr; 889 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1); 890 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2); 891 btree1 = dp->d_ops->node_tree_p(node1); 892 btree2 = dp->d_ops->node_tree_p(node2); 893 } 894 blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval); 895 blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval); 896 897 /* 898 * Adjust the expected index for insertion. 899 */ 900 if (blk1->index >= nodehdr1.count) { 901 blk2->index = blk1->index - nodehdr1.count; 902 blk1->index = nodehdr1.count + 1; /* make it invalid */ 903 } 904 } 905 906 /* 907 * Add a new entry to an intermediate node. 908 */ 909 STATIC void 910 xfs_da3_node_add( 911 struct xfs_da_state *state, 912 struct xfs_da_state_blk *oldblk, 913 struct xfs_da_state_blk *newblk) 914 { 915 struct xfs_da_intnode *node; 916 struct xfs_da3_icnode_hdr nodehdr; 917 struct xfs_da_node_entry *btree; 918 int tmp; 919 struct xfs_inode *dp = state->args->dp; 920 921 trace_xfs_da_node_add(state->args); 922 923 node = oldblk->bp->b_addr; 924 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 925 btree = dp->d_ops->node_tree_p(node); 926 927 ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count); 928 ASSERT(newblk->blkno != 0); 929 if (state->args->whichfork == XFS_DATA_FORK) 930 ASSERT(newblk->blkno >= state->args->geo->leafblk && 931 newblk->blkno < state->args->geo->freeblk); 932 933 /* 934 * We may need to make some room before we insert the new node. 935 */ 936 tmp = 0; 937 if (oldblk->index < nodehdr.count) { 938 tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree); 939 memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp); 940 } 941 btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval); 942 btree[oldblk->index].before = cpu_to_be32(newblk->blkno); 943 xfs_trans_log_buf(state->args->trans, oldblk->bp, 944 XFS_DA_LOGRANGE(node, &btree[oldblk->index], 945 tmp + sizeof(*btree))); 946 947 nodehdr.count += 1; 948 dp->d_ops->node_hdr_to_disk(node, &nodehdr); 949 xfs_trans_log_buf(state->args->trans, oldblk->bp, 950 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size)); 951 952 /* 953 * Copy the last hash value from the oldblk to propagate upwards. 954 */ 955 oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval); 956 } 957 958 /*======================================================================== 959 * Routines used for shrinking the Btree. 960 *========================================================================*/ 961 962 /* 963 * Deallocate an empty leaf node, remove it from its parent, 964 * possibly deallocating that block, etc... 965 */ 966 int 967 xfs_da3_join( 968 struct xfs_da_state *state) 969 { 970 struct xfs_da_state_blk *drop_blk; 971 struct xfs_da_state_blk *save_blk; 972 int action = 0; 973 int error; 974 975 trace_xfs_da_join(state->args); 976 977 drop_blk = &state->path.blk[ state->path.active-1 ]; 978 save_blk = &state->altpath.blk[ state->path.active-1 ]; 979 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC); 980 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC || 981 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC); 982 983 /* 984 * Walk back up the tree joining/deallocating as necessary. 985 * When we stop dropping blocks, break out. 986 */ 987 for ( ; state->path.active >= 2; drop_blk--, save_blk--, 988 state->path.active--) { 989 /* 990 * See if we can combine the block with a neighbor. 991 * (action == 0) => no options, just leave 992 * (action == 1) => coalesce, then unlink 993 * (action == 2) => block empty, unlink it 994 */ 995 switch (drop_blk->magic) { 996 case XFS_ATTR_LEAF_MAGIC: 997 error = xfs_attr3_leaf_toosmall(state, &action); 998 if (error) 999 return error; 1000 if (action == 0) 1001 return 0; 1002 xfs_attr3_leaf_unbalance(state, drop_blk, save_blk); 1003 break; 1004 case XFS_DIR2_LEAFN_MAGIC: 1005 error = xfs_dir2_leafn_toosmall(state, &action); 1006 if (error) 1007 return error; 1008 if (action == 0) 1009 return 0; 1010 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk); 1011 break; 1012 case XFS_DA_NODE_MAGIC: 1013 /* 1014 * Remove the offending node, fixup hashvals, 1015 * check for a toosmall neighbor. 1016 */ 1017 xfs_da3_node_remove(state, drop_blk); 1018 xfs_da3_fixhashpath(state, &state->path); 1019 error = xfs_da3_node_toosmall(state, &action); 1020 if (error) 1021 return error; 1022 if (action == 0) 1023 return 0; 1024 xfs_da3_node_unbalance(state, drop_blk, save_blk); 1025 break; 1026 } 1027 xfs_da3_fixhashpath(state, &state->altpath); 1028 error = xfs_da3_blk_unlink(state, drop_blk, save_blk); 1029 xfs_da_state_kill_altpath(state); 1030 if (error) 1031 return error; 1032 error = xfs_da_shrink_inode(state->args, drop_blk->blkno, 1033 drop_blk->bp); 1034 drop_blk->bp = NULL; 1035 if (error) 1036 return error; 1037 } 1038 /* 1039 * We joined all the way to the top. If it turns out that 1040 * we only have one entry in the root, make the child block 1041 * the new root. 1042 */ 1043 xfs_da3_node_remove(state, drop_blk); 1044 xfs_da3_fixhashpath(state, &state->path); 1045 error = xfs_da3_root_join(state, &state->path.blk[0]); 1046 return error; 1047 } 1048 1049 #ifdef DEBUG 1050 static void 1051 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level) 1052 { 1053 __be16 magic = blkinfo->magic; 1054 1055 if (level == 1) { 1056 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 1057 magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) || 1058 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) || 1059 magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)); 1060 } else { 1061 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || 1062 magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)); 1063 } 1064 ASSERT(!blkinfo->forw); 1065 ASSERT(!blkinfo->back); 1066 } 1067 #else /* !DEBUG */ 1068 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level) 1069 #endif /* !DEBUG */ 1070 1071 /* 1072 * We have only one entry in the root. Copy the only remaining child of 1073 * the old root to block 0 as the new root node. 1074 */ 1075 STATIC int 1076 xfs_da3_root_join( 1077 struct xfs_da_state *state, 1078 struct xfs_da_state_blk *root_blk) 1079 { 1080 struct xfs_da_intnode *oldroot; 1081 struct xfs_da_args *args; 1082 xfs_dablk_t child; 1083 struct xfs_buf *bp; 1084 struct xfs_da3_icnode_hdr oldroothdr; 1085 struct xfs_da_node_entry *btree; 1086 int error; 1087 struct xfs_inode *dp = state->args->dp; 1088 1089 trace_xfs_da_root_join(state->args); 1090 1091 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC); 1092 1093 args = state->args; 1094 oldroot = root_blk->bp->b_addr; 1095 dp->d_ops->node_hdr_from_disk(&oldroothdr, oldroot); 1096 ASSERT(oldroothdr.forw == 0); 1097 ASSERT(oldroothdr.back == 0); 1098 1099 /* 1100 * If the root has more than one child, then don't do anything. 1101 */ 1102 if (oldroothdr.count > 1) 1103 return 0; 1104 1105 /* 1106 * Read in the (only) child block, then copy those bytes into 1107 * the root block's buffer and free the original child block. 1108 */ 1109 btree = dp->d_ops->node_tree_p(oldroot); 1110 child = be32_to_cpu(btree[0].before); 1111 ASSERT(child != 0); 1112 error = xfs_da3_node_read(args->trans, dp, child, -1, &bp, 1113 args->whichfork); 1114 if (error) 1115 return error; 1116 xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level); 1117 1118 /* 1119 * This could be copying a leaf back into the root block in the case of 1120 * there only being a single leaf block left in the tree. Hence we have 1121 * to update the b_ops pointer as well to match the buffer type change 1122 * that could occur. For dir3 blocks we also need to update the block 1123 * number in the buffer header. 1124 */ 1125 memcpy(root_blk->bp->b_addr, bp->b_addr, args->geo->blksize); 1126 root_blk->bp->b_ops = bp->b_ops; 1127 xfs_trans_buf_copy_type(root_blk->bp, bp); 1128 if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) { 1129 struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr; 1130 da3->blkno = cpu_to_be64(root_blk->bp->b_bn); 1131 } 1132 xfs_trans_log_buf(args->trans, root_blk->bp, 0, 1133 args->geo->blksize - 1); 1134 error = xfs_da_shrink_inode(args, child, bp); 1135 return error; 1136 } 1137 1138 /* 1139 * Check a node block and its neighbors to see if the block should be 1140 * collapsed into one or the other neighbor. Always keep the block 1141 * with the smaller block number. 1142 * If the current block is over 50% full, don't try to join it, return 0. 1143 * If the block is empty, fill in the state structure and return 2. 1144 * If it can be collapsed, fill in the state structure and return 1. 1145 * If nothing can be done, return 0. 1146 */ 1147 STATIC int 1148 xfs_da3_node_toosmall( 1149 struct xfs_da_state *state, 1150 int *action) 1151 { 1152 struct xfs_da_intnode *node; 1153 struct xfs_da_state_blk *blk; 1154 struct xfs_da_blkinfo *info; 1155 xfs_dablk_t blkno; 1156 struct xfs_buf *bp; 1157 struct xfs_da3_icnode_hdr nodehdr; 1158 int count; 1159 int forward; 1160 int error; 1161 int retval; 1162 int i; 1163 struct xfs_inode *dp = state->args->dp; 1164 1165 trace_xfs_da_node_toosmall(state->args); 1166 1167 /* 1168 * Check for the degenerate case of the block being over 50% full. 1169 * If so, it's not worth even looking to see if we might be able 1170 * to coalesce with a sibling. 1171 */ 1172 blk = &state->path.blk[ state->path.active-1 ]; 1173 info = blk->bp->b_addr; 1174 node = (xfs_da_intnode_t *)info; 1175 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 1176 if (nodehdr.count > (state->args->geo->node_ents >> 1)) { 1177 *action = 0; /* blk over 50%, don't try to join */ 1178 return 0; /* blk over 50%, don't try to join */ 1179 } 1180 1181 /* 1182 * Check for the degenerate case of the block being empty. 1183 * If the block is empty, we'll simply delete it, no need to 1184 * coalesce it with a sibling block. We choose (arbitrarily) 1185 * to merge with the forward block unless it is NULL. 1186 */ 1187 if (nodehdr.count == 0) { 1188 /* 1189 * Make altpath point to the block we want to keep and 1190 * path point to the block we want to drop (this one). 1191 */ 1192 forward = (info->forw != 0); 1193 memcpy(&state->altpath, &state->path, sizeof(state->path)); 1194 error = xfs_da3_path_shift(state, &state->altpath, forward, 1195 0, &retval); 1196 if (error) 1197 return error; 1198 if (retval) { 1199 *action = 0; 1200 } else { 1201 *action = 2; 1202 } 1203 return 0; 1204 } 1205 1206 /* 1207 * Examine each sibling block to see if we can coalesce with 1208 * at least 25% free space to spare. We need to figure out 1209 * whether to merge with the forward or the backward block. 1210 * We prefer coalescing with the lower numbered sibling so as 1211 * to shrink a directory over time. 1212 */ 1213 count = state->args->geo->node_ents; 1214 count -= state->args->geo->node_ents >> 2; 1215 count -= nodehdr.count; 1216 1217 /* start with smaller blk num */ 1218 forward = nodehdr.forw < nodehdr.back; 1219 for (i = 0; i < 2; forward = !forward, i++) { 1220 struct xfs_da3_icnode_hdr thdr; 1221 if (forward) 1222 blkno = nodehdr.forw; 1223 else 1224 blkno = nodehdr.back; 1225 if (blkno == 0) 1226 continue; 1227 error = xfs_da3_node_read(state->args->trans, dp, 1228 blkno, -1, &bp, state->args->whichfork); 1229 if (error) 1230 return error; 1231 1232 node = bp->b_addr; 1233 dp->d_ops->node_hdr_from_disk(&thdr, node); 1234 xfs_trans_brelse(state->args->trans, bp); 1235 1236 if (count - thdr.count >= 0) 1237 break; /* fits with at least 25% to spare */ 1238 } 1239 if (i >= 2) { 1240 *action = 0; 1241 return 0; 1242 } 1243 1244 /* 1245 * Make altpath point to the block we want to keep (the lower 1246 * numbered block) and path point to the block we want to drop. 1247 */ 1248 memcpy(&state->altpath, &state->path, sizeof(state->path)); 1249 if (blkno < blk->blkno) { 1250 error = xfs_da3_path_shift(state, &state->altpath, forward, 1251 0, &retval); 1252 } else { 1253 error = xfs_da3_path_shift(state, &state->path, forward, 1254 0, &retval); 1255 } 1256 if (error) 1257 return error; 1258 if (retval) { 1259 *action = 0; 1260 return 0; 1261 } 1262 *action = 1; 1263 return 0; 1264 } 1265 1266 /* 1267 * Pick up the last hashvalue from an intermediate node. 1268 */ 1269 STATIC uint 1270 xfs_da3_node_lasthash( 1271 struct xfs_inode *dp, 1272 struct xfs_buf *bp, 1273 int *count) 1274 { 1275 struct xfs_da_intnode *node; 1276 struct xfs_da_node_entry *btree; 1277 struct xfs_da3_icnode_hdr nodehdr; 1278 1279 node = bp->b_addr; 1280 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 1281 if (count) 1282 *count = nodehdr.count; 1283 if (!nodehdr.count) 1284 return 0; 1285 btree = dp->d_ops->node_tree_p(node); 1286 return be32_to_cpu(btree[nodehdr.count - 1].hashval); 1287 } 1288 1289 /* 1290 * Walk back up the tree adjusting hash values as necessary, 1291 * when we stop making changes, return. 1292 */ 1293 void 1294 xfs_da3_fixhashpath( 1295 struct xfs_da_state *state, 1296 struct xfs_da_state_path *path) 1297 { 1298 struct xfs_da_state_blk *blk; 1299 struct xfs_da_intnode *node; 1300 struct xfs_da_node_entry *btree; 1301 xfs_dahash_t lasthash=0; 1302 int level; 1303 int count; 1304 struct xfs_inode *dp = state->args->dp; 1305 1306 trace_xfs_da_fixhashpath(state->args); 1307 1308 level = path->active-1; 1309 blk = &path->blk[ level ]; 1310 switch (blk->magic) { 1311 case XFS_ATTR_LEAF_MAGIC: 1312 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count); 1313 if (count == 0) 1314 return; 1315 break; 1316 case XFS_DIR2_LEAFN_MAGIC: 1317 lasthash = xfs_dir2_leaf_lasthash(dp, blk->bp, &count); 1318 if (count == 0) 1319 return; 1320 break; 1321 case XFS_DA_NODE_MAGIC: 1322 lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count); 1323 if (count == 0) 1324 return; 1325 break; 1326 } 1327 for (blk--, level--; level >= 0; blk--, level--) { 1328 struct xfs_da3_icnode_hdr nodehdr; 1329 1330 node = blk->bp->b_addr; 1331 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 1332 btree = dp->d_ops->node_tree_p(node); 1333 if (be32_to_cpu(btree[blk->index].hashval) == lasthash) 1334 break; 1335 blk->hashval = lasthash; 1336 btree[blk->index].hashval = cpu_to_be32(lasthash); 1337 xfs_trans_log_buf(state->args->trans, blk->bp, 1338 XFS_DA_LOGRANGE(node, &btree[blk->index], 1339 sizeof(*btree))); 1340 1341 lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval); 1342 } 1343 } 1344 1345 /* 1346 * Remove an entry from an intermediate node. 1347 */ 1348 STATIC void 1349 xfs_da3_node_remove( 1350 struct xfs_da_state *state, 1351 struct xfs_da_state_blk *drop_blk) 1352 { 1353 struct xfs_da_intnode *node; 1354 struct xfs_da3_icnode_hdr nodehdr; 1355 struct xfs_da_node_entry *btree; 1356 int index; 1357 int tmp; 1358 struct xfs_inode *dp = state->args->dp; 1359 1360 trace_xfs_da_node_remove(state->args); 1361 1362 node = drop_blk->bp->b_addr; 1363 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 1364 ASSERT(drop_blk->index < nodehdr.count); 1365 ASSERT(drop_blk->index >= 0); 1366 1367 /* 1368 * Copy over the offending entry, or just zero it out. 1369 */ 1370 index = drop_blk->index; 1371 btree = dp->d_ops->node_tree_p(node); 1372 if (index < nodehdr.count - 1) { 1373 tmp = nodehdr.count - index - 1; 1374 tmp *= (uint)sizeof(xfs_da_node_entry_t); 1375 memmove(&btree[index], &btree[index + 1], tmp); 1376 xfs_trans_log_buf(state->args->trans, drop_blk->bp, 1377 XFS_DA_LOGRANGE(node, &btree[index], tmp)); 1378 index = nodehdr.count - 1; 1379 } 1380 memset(&btree[index], 0, sizeof(xfs_da_node_entry_t)); 1381 xfs_trans_log_buf(state->args->trans, drop_blk->bp, 1382 XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index]))); 1383 nodehdr.count -= 1; 1384 dp->d_ops->node_hdr_to_disk(node, &nodehdr); 1385 xfs_trans_log_buf(state->args->trans, drop_blk->bp, 1386 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size)); 1387 1388 /* 1389 * Copy the last hash value from the block to propagate upwards. 1390 */ 1391 drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval); 1392 } 1393 1394 /* 1395 * Unbalance the elements between two intermediate nodes, 1396 * move all Btree elements from one node into another. 1397 */ 1398 STATIC void 1399 xfs_da3_node_unbalance( 1400 struct xfs_da_state *state, 1401 struct xfs_da_state_blk *drop_blk, 1402 struct xfs_da_state_blk *save_blk) 1403 { 1404 struct xfs_da_intnode *drop_node; 1405 struct xfs_da_intnode *save_node; 1406 struct xfs_da_node_entry *drop_btree; 1407 struct xfs_da_node_entry *save_btree; 1408 struct xfs_da3_icnode_hdr drop_hdr; 1409 struct xfs_da3_icnode_hdr save_hdr; 1410 struct xfs_trans *tp; 1411 int sindex; 1412 int tmp; 1413 struct xfs_inode *dp = state->args->dp; 1414 1415 trace_xfs_da_node_unbalance(state->args); 1416 1417 drop_node = drop_blk->bp->b_addr; 1418 save_node = save_blk->bp->b_addr; 1419 dp->d_ops->node_hdr_from_disk(&drop_hdr, drop_node); 1420 dp->d_ops->node_hdr_from_disk(&save_hdr, save_node); 1421 drop_btree = dp->d_ops->node_tree_p(drop_node); 1422 save_btree = dp->d_ops->node_tree_p(save_node); 1423 tp = state->args->trans; 1424 1425 /* 1426 * If the dying block has lower hashvals, then move all the 1427 * elements in the remaining block up to make a hole. 1428 */ 1429 if ((be32_to_cpu(drop_btree[0].hashval) < 1430 be32_to_cpu(save_btree[0].hashval)) || 1431 (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) < 1432 be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) { 1433 /* XXX: check this - is memmove dst correct? */ 1434 tmp = save_hdr.count * sizeof(xfs_da_node_entry_t); 1435 memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp); 1436 1437 sindex = 0; 1438 xfs_trans_log_buf(tp, save_blk->bp, 1439 XFS_DA_LOGRANGE(save_node, &save_btree[0], 1440 (save_hdr.count + drop_hdr.count) * 1441 sizeof(xfs_da_node_entry_t))); 1442 } else { 1443 sindex = save_hdr.count; 1444 xfs_trans_log_buf(tp, save_blk->bp, 1445 XFS_DA_LOGRANGE(save_node, &save_btree[sindex], 1446 drop_hdr.count * sizeof(xfs_da_node_entry_t))); 1447 } 1448 1449 /* 1450 * Move all the B-tree elements from drop_blk to save_blk. 1451 */ 1452 tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t); 1453 memcpy(&save_btree[sindex], &drop_btree[0], tmp); 1454 save_hdr.count += drop_hdr.count; 1455 1456 dp->d_ops->node_hdr_to_disk(save_node, &save_hdr); 1457 xfs_trans_log_buf(tp, save_blk->bp, 1458 XFS_DA_LOGRANGE(save_node, &save_node->hdr, 1459 dp->d_ops->node_hdr_size)); 1460 1461 /* 1462 * Save the last hashval in the remaining block for upward propagation. 1463 */ 1464 save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval); 1465 } 1466 1467 /*======================================================================== 1468 * Routines used for finding things in the Btree. 1469 *========================================================================*/ 1470 1471 /* 1472 * Walk down the Btree looking for a particular filename, filling 1473 * in the state structure as we go. 1474 * 1475 * We will set the state structure to point to each of the elements 1476 * in each of the nodes where either the hashval is or should be. 1477 * 1478 * We support duplicate hashval's so for each entry in the current 1479 * node that could contain the desired hashval, descend. This is a 1480 * pruned depth-first tree search. 1481 */ 1482 int /* error */ 1483 xfs_da3_node_lookup_int( 1484 struct xfs_da_state *state, 1485 int *result) 1486 { 1487 struct xfs_da_state_blk *blk; 1488 struct xfs_da_blkinfo *curr; 1489 struct xfs_da_intnode *node; 1490 struct xfs_da_node_entry *btree; 1491 struct xfs_da3_icnode_hdr nodehdr; 1492 struct xfs_da_args *args; 1493 xfs_dablk_t blkno; 1494 xfs_dahash_t hashval; 1495 xfs_dahash_t btreehashval; 1496 int probe; 1497 int span; 1498 int max; 1499 int error; 1500 int retval; 1501 unsigned int expected_level = 0; 1502 uint16_t magic; 1503 struct xfs_inode *dp = state->args->dp; 1504 1505 args = state->args; 1506 1507 /* 1508 * Descend thru the B-tree searching each level for the right 1509 * node to use, until the right hashval is found. 1510 */ 1511 blkno = args->geo->leafblk; 1512 for (blk = &state->path.blk[0], state->path.active = 1; 1513 state->path.active <= XFS_DA_NODE_MAXDEPTH; 1514 blk++, state->path.active++) { 1515 /* 1516 * Read the next node down in the tree. 1517 */ 1518 blk->blkno = blkno; 1519 error = xfs_da3_node_read(args->trans, args->dp, blkno, 1520 -1, &blk->bp, args->whichfork); 1521 if (error) { 1522 blk->blkno = 0; 1523 state->path.active--; 1524 return error; 1525 } 1526 curr = blk->bp->b_addr; 1527 magic = be16_to_cpu(curr->magic); 1528 1529 if (magic == XFS_ATTR_LEAF_MAGIC || 1530 magic == XFS_ATTR3_LEAF_MAGIC) { 1531 blk->magic = XFS_ATTR_LEAF_MAGIC; 1532 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); 1533 break; 1534 } 1535 1536 if (magic == XFS_DIR2_LEAFN_MAGIC || 1537 magic == XFS_DIR3_LEAFN_MAGIC) { 1538 blk->magic = XFS_DIR2_LEAFN_MAGIC; 1539 blk->hashval = xfs_dir2_leaf_lasthash(args->dp, 1540 blk->bp, NULL); 1541 break; 1542 } 1543 1544 if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC) 1545 return -EFSCORRUPTED; 1546 1547 blk->magic = XFS_DA_NODE_MAGIC; 1548 1549 /* 1550 * Search an intermediate node for a match. 1551 */ 1552 node = blk->bp->b_addr; 1553 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 1554 btree = dp->d_ops->node_tree_p(node); 1555 1556 /* Tree taller than we can handle; bail out! */ 1557 if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) 1558 return -EFSCORRUPTED; 1559 1560 /* Check the level from the root. */ 1561 if (blkno == args->geo->leafblk) 1562 expected_level = nodehdr.level - 1; 1563 else if (expected_level != nodehdr.level) 1564 return -EFSCORRUPTED; 1565 else 1566 expected_level--; 1567 1568 max = nodehdr.count; 1569 blk->hashval = be32_to_cpu(btree[max - 1].hashval); 1570 1571 /* 1572 * Binary search. (note: small blocks will skip loop) 1573 */ 1574 probe = span = max / 2; 1575 hashval = args->hashval; 1576 while (span > 4) { 1577 span /= 2; 1578 btreehashval = be32_to_cpu(btree[probe].hashval); 1579 if (btreehashval < hashval) 1580 probe += span; 1581 else if (btreehashval > hashval) 1582 probe -= span; 1583 else 1584 break; 1585 } 1586 ASSERT((probe >= 0) && (probe < max)); 1587 ASSERT((span <= 4) || 1588 (be32_to_cpu(btree[probe].hashval) == hashval)); 1589 1590 /* 1591 * Since we may have duplicate hashval's, find the first 1592 * matching hashval in the node. 1593 */ 1594 while (probe > 0 && 1595 be32_to_cpu(btree[probe].hashval) >= hashval) { 1596 probe--; 1597 } 1598 while (probe < max && 1599 be32_to_cpu(btree[probe].hashval) < hashval) { 1600 probe++; 1601 } 1602 1603 /* 1604 * Pick the right block to descend on. 1605 */ 1606 if (probe == max) { 1607 blk->index = max - 1; 1608 blkno = be32_to_cpu(btree[max - 1].before); 1609 } else { 1610 blk->index = probe; 1611 blkno = be32_to_cpu(btree[probe].before); 1612 } 1613 1614 /* We can't point back to the root. */ 1615 if (blkno == args->geo->leafblk) 1616 return -EFSCORRUPTED; 1617 } 1618 1619 if (expected_level != 0) 1620 return -EFSCORRUPTED; 1621 1622 /* 1623 * A leaf block that ends in the hashval that we are interested in 1624 * (final hashval == search hashval) means that the next block may 1625 * contain more entries with the same hashval, shift upward to the 1626 * next leaf and keep searching. 1627 */ 1628 for (;;) { 1629 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) { 1630 retval = xfs_dir2_leafn_lookup_int(blk->bp, args, 1631 &blk->index, state); 1632 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) { 1633 retval = xfs_attr3_leaf_lookup_int(blk->bp, args); 1634 blk->index = args->index; 1635 args->blkno = blk->blkno; 1636 } else { 1637 ASSERT(0); 1638 return -EFSCORRUPTED; 1639 } 1640 if (((retval == -ENOENT) || (retval == -ENOATTR)) && 1641 (blk->hashval == args->hashval)) { 1642 error = xfs_da3_path_shift(state, &state->path, 1, 1, 1643 &retval); 1644 if (error) 1645 return error; 1646 if (retval == 0) { 1647 continue; 1648 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) { 1649 /* path_shift() gives ENOENT */ 1650 retval = -ENOATTR; 1651 } 1652 } 1653 break; 1654 } 1655 *result = retval; 1656 return 0; 1657 } 1658 1659 /*======================================================================== 1660 * Utility routines. 1661 *========================================================================*/ 1662 1663 /* 1664 * Compare two intermediate nodes for "order". 1665 */ 1666 STATIC int 1667 xfs_da3_node_order( 1668 struct xfs_inode *dp, 1669 struct xfs_buf *node1_bp, 1670 struct xfs_buf *node2_bp) 1671 { 1672 struct xfs_da_intnode *node1; 1673 struct xfs_da_intnode *node2; 1674 struct xfs_da_node_entry *btree1; 1675 struct xfs_da_node_entry *btree2; 1676 struct xfs_da3_icnode_hdr node1hdr; 1677 struct xfs_da3_icnode_hdr node2hdr; 1678 1679 node1 = node1_bp->b_addr; 1680 node2 = node2_bp->b_addr; 1681 dp->d_ops->node_hdr_from_disk(&node1hdr, node1); 1682 dp->d_ops->node_hdr_from_disk(&node2hdr, node2); 1683 btree1 = dp->d_ops->node_tree_p(node1); 1684 btree2 = dp->d_ops->node_tree_p(node2); 1685 1686 if (node1hdr.count > 0 && node2hdr.count > 0 && 1687 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) || 1688 (be32_to_cpu(btree2[node2hdr.count - 1].hashval) < 1689 be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) { 1690 return 1; 1691 } 1692 return 0; 1693 } 1694 1695 /* 1696 * Link a new block into a doubly linked list of blocks (of whatever type). 1697 */ 1698 int /* error */ 1699 xfs_da3_blk_link( 1700 struct xfs_da_state *state, 1701 struct xfs_da_state_blk *old_blk, 1702 struct xfs_da_state_blk *new_blk) 1703 { 1704 struct xfs_da_blkinfo *old_info; 1705 struct xfs_da_blkinfo *new_info; 1706 struct xfs_da_blkinfo *tmp_info; 1707 struct xfs_da_args *args; 1708 struct xfs_buf *bp; 1709 int before = 0; 1710 int error; 1711 struct xfs_inode *dp = state->args->dp; 1712 1713 /* 1714 * Set up environment. 1715 */ 1716 args = state->args; 1717 ASSERT(args != NULL); 1718 old_info = old_blk->bp->b_addr; 1719 new_info = new_blk->bp->b_addr; 1720 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC || 1721 old_blk->magic == XFS_DIR2_LEAFN_MAGIC || 1722 old_blk->magic == XFS_ATTR_LEAF_MAGIC); 1723 1724 switch (old_blk->magic) { 1725 case XFS_ATTR_LEAF_MAGIC: 1726 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp); 1727 break; 1728 case XFS_DIR2_LEAFN_MAGIC: 1729 before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp); 1730 break; 1731 case XFS_DA_NODE_MAGIC: 1732 before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp); 1733 break; 1734 } 1735 1736 /* 1737 * Link blocks in appropriate order. 1738 */ 1739 if (before) { 1740 /* 1741 * Link new block in before existing block. 1742 */ 1743 trace_xfs_da_link_before(args); 1744 new_info->forw = cpu_to_be32(old_blk->blkno); 1745 new_info->back = old_info->back; 1746 if (old_info->back) { 1747 error = xfs_da3_node_read(args->trans, dp, 1748 be32_to_cpu(old_info->back), 1749 -1, &bp, args->whichfork); 1750 if (error) 1751 return error; 1752 ASSERT(bp != NULL); 1753 tmp_info = bp->b_addr; 1754 ASSERT(tmp_info->magic == old_info->magic); 1755 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno); 1756 tmp_info->forw = cpu_to_be32(new_blk->blkno); 1757 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); 1758 } 1759 old_info->back = cpu_to_be32(new_blk->blkno); 1760 } else { 1761 /* 1762 * Link new block in after existing block. 1763 */ 1764 trace_xfs_da_link_after(args); 1765 new_info->forw = old_info->forw; 1766 new_info->back = cpu_to_be32(old_blk->blkno); 1767 if (old_info->forw) { 1768 error = xfs_da3_node_read(args->trans, dp, 1769 be32_to_cpu(old_info->forw), 1770 -1, &bp, args->whichfork); 1771 if (error) 1772 return error; 1773 ASSERT(bp != NULL); 1774 tmp_info = bp->b_addr; 1775 ASSERT(tmp_info->magic == old_info->magic); 1776 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno); 1777 tmp_info->back = cpu_to_be32(new_blk->blkno); 1778 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); 1779 } 1780 old_info->forw = cpu_to_be32(new_blk->blkno); 1781 } 1782 1783 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1); 1784 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1); 1785 return 0; 1786 } 1787 1788 /* 1789 * Unlink a block from a doubly linked list of blocks. 1790 */ 1791 STATIC int /* error */ 1792 xfs_da3_blk_unlink( 1793 struct xfs_da_state *state, 1794 struct xfs_da_state_blk *drop_blk, 1795 struct xfs_da_state_blk *save_blk) 1796 { 1797 struct xfs_da_blkinfo *drop_info; 1798 struct xfs_da_blkinfo *save_info; 1799 struct xfs_da_blkinfo *tmp_info; 1800 struct xfs_da_args *args; 1801 struct xfs_buf *bp; 1802 int error; 1803 1804 /* 1805 * Set up environment. 1806 */ 1807 args = state->args; 1808 ASSERT(args != NULL); 1809 save_info = save_blk->bp->b_addr; 1810 drop_info = drop_blk->bp->b_addr; 1811 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC || 1812 save_blk->magic == XFS_DIR2_LEAFN_MAGIC || 1813 save_blk->magic == XFS_ATTR_LEAF_MAGIC); 1814 ASSERT(save_blk->magic == drop_blk->magic); 1815 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) || 1816 (be32_to_cpu(save_info->back) == drop_blk->blkno)); 1817 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) || 1818 (be32_to_cpu(drop_info->back) == save_blk->blkno)); 1819 1820 /* 1821 * Unlink the leaf block from the doubly linked chain of leaves. 1822 */ 1823 if (be32_to_cpu(save_info->back) == drop_blk->blkno) { 1824 trace_xfs_da_unlink_back(args); 1825 save_info->back = drop_info->back; 1826 if (drop_info->back) { 1827 error = xfs_da3_node_read(args->trans, args->dp, 1828 be32_to_cpu(drop_info->back), 1829 -1, &bp, args->whichfork); 1830 if (error) 1831 return error; 1832 ASSERT(bp != NULL); 1833 tmp_info = bp->b_addr; 1834 ASSERT(tmp_info->magic == save_info->magic); 1835 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno); 1836 tmp_info->forw = cpu_to_be32(save_blk->blkno); 1837 xfs_trans_log_buf(args->trans, bp, 0, 1838 sizeof(*tmp_info) - 1); 1839 } 1840 } else { 1841 trace_xfs_da_unlink_forward(args); 1842 save_info->forw = drop_info->forw; 1843 if (drop_info->forw) { 1844 error = xfs_da3_node_read(args->trans, args->dp, 1845 be32_to_cpu(drop_info->forw), 1846 -1, &bp, args->whichfork); 1847 if (error) 1848 return error; 1849 ASSERT(bp != NULL); 1850 tmp_info = bp->b_addr; 1851 ASSERT(tmp_info->magic == save_info->magic); 1852 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno); 1853 tmp_info->back = cpu_to_be32(save_blk->blkno); 1854 xfs_trans_log_buf(args->trans, bp, 0, 1855 sizeof(*tmp_info) - 1); 1856 } 1857 } 1858 1859 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1); 1860 return 0; 1861 } 1862 1863 /* 1864 * Move a path "forward" or "!forward" one block at the current level. 1865 * 1866 * This routine will adjust a "path" to point to the next block 1867 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the 1868 * Btree, including updating pointers to the intermediate nodes between 1869 * the new bottom and the root. 1870 */ 1871 int /* error */ 1872 xfs_da3_path_shift( 1873 struct xfs_da_state *state, 1874 struct xfs_da_state_path *path, 1875 int forward, 1876 int release, 1877 int *result) 1878 { 1879 struct xfs_da_state_blk *blk; 1880 struct xfs_da_blkinfo *info; 1881 struct xfs_da_intnode *node; 1882 struct xfs_da_args *args; 1883 struct xfs_da_node_entry *btree; 1884 struct xfs_da3_icnode_hdr nodehdr; 1885 struct xfs_buf *bp; 1886 xfs_dablk_t blkno = 0; 1887 int level; 1888 int error; 1889 struct xfs_inode *dp = state->args->dp; 1890 1891 trace_xfs_da_path_shift(state->args); 1892 1893 /* 1894 * Roll up the Btree looking for the first block where our 1895 * current index is not at the edge of the block. Note that 1896 * we skip the bottom layer because we want the sibling block. 1897 */ 1898 args = state->args; 1899 ASSERT(args != NULL); 1900 ASSERT(path != NULL); 1901 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); 1902 level = (path->active-1) - 1; /* skip bottom layer in path */ 1903 for (blk = &path->blk[level]; level >= 0; blk--, level--) { 1904 node = blk->bp->b_addr; 1905 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 1906 btree = dp->d_ops->node_tree_p(node); 1907 1908 if (forward && (blk->index < nodehdr.count - 1)) { 1909 blk->index++; 1910 blkno = be32_to_cpu(btree[blk->index].before); 1911 break; 1912 } else if (!forward && (blk->index > 0)) { 1913 blk->index--; 1914 blkno = be32_to_cpu(btree[blk->index].before); 1915 break; 1916 } 1917 } 1918 if (level < 0) { 1919 *result = -ENOENT; /* we're out of our tree */ 1920 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); 1921 return 0; 1922 } 1923 1924 /* 1925 * Roll down the edge of the subtree until we reach the 1926 * same depth we were at originally. 1927 */ 1928 for (blk++, level++; level < path->active; blk++, level++) { 1929 /* 1930 * Read the next child block into a local buffer. 1931 */ 1932 error = xfs_da3_node_read(args->trans, dp, blkno, -1, &bp, 1933 args->whichfork); 1934 if (error) 1935 return error; 1936 1937 /* 1938 * Release the old block (if it's dirty, the trans doesn't 1939 * actually let go) and swap the local buffer into the path 1940 * structure. This ensures failure of the above read doesn't set 1941 * a NULL buffer in an active slot in the path. 1942 */ 1943 if (release) 1944 xfs_trans_brelse(args->trans, blk->bp); 1945 blk->blkno = blkno; 1946 blk->bp = bp; 1947 1948 info = blk->bp->b_addr; 1949 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || 1950 info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) || 1951 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 1952 info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) || 1953 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) || 1954 info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)); 1955 1956 1957 /* 1958 * Note: we flatten the magic number to a single type so we 1959 * don't have to compare against crc/non-crc types elsewhere. 1960 */ 1961 switch (be16_to_cpu(info->magic)) { 1962 case XFS_DA_NODE_MAGIC: 1963 case XFS_DA3_NODE_MAGIC: 1964 blk->magic = XFS_DA_NODE_MAGIC; 1965 node = (xfs_da_intnode_t *)info; 1966 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 1967 btree = dp->d_ops->node_tree_p(node); 1968 blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval); 1969 if (forward) 1970 blk->index = 0; 1971 else 1972 blk->index = nodehdr.count - 1; 1973 blkno = be32_to_cpu(btree[blk->index].before); 1974 break; 1975 case XFS_ATTR_LEAF_MAGIC: 1976 case XFS_ATTR3_LEAF_MAGIC: 1977 blk->magic = XFS_ATTR_LEAF_MAGIC; 1978 ASSERT(level == path->active-1); 1979 blk->index = 0; 1980 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); 1981 break; 1982 case XFS_DIR2_LEAFN_MAGIC: 1983 case XFS_DIR3_LEAFN_MAGIC: 1984 blk->magic = XFS_DIR2_LEAFN_MAGIC; 1985 ASSERT(level == path->active-1); 1986 blk->index = 0; 1987 blk->hashval = xfs_dir2_leaf_lasthash(args->dp, 1988 blk->bp, NULL); 1989 break; 1990 default: 1991 ASSERT(0); 1992 break; 1993 } 1994 } 1995 *result = 0; 1996 return 0; 1997 } 1998 1999 2000 /*======================================================================== 2001 * Utility routines. 2002 *========================================================================*/ 2003 2004 /* 2005 * Implement a simple hash on a character string. 2006 * Rotate the hash value by 7 bits, then XOR each character in. 2007 * This is implemented with some source-level loop unrolling. 2008 */ 2009 xfs_dahash_t 2010 xfs_da_hashname(const uint8_t *name, int namelen) 2011 { 2012 xfs_dahash_t hash; 2013 2014 /* 2015 * Do four characters at a time as long as we can. 2016 */ 2017 for (hash = 0; namelen >= 4; namelen -= 4, name += 4) 2018 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^ 2019 (name[3] << 0) ^ rol32(hash, 7 * 4); 2020 2021 /* 2022 * Now do the rest of the characters. 2023 */ 2024 switch (namelen) { 2025 case 3: 2026 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^ 2027 rol32(hash, 7 * 3); 2028 case 2: 2029 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2); 2030 case 1: 2031 return (name[0] << 0) ^ rol32(hash, 7 * 1); 2032 default: /* case 0: */ 2033 return hash; 2034 } 2035 } 2036 2037 enum xfs_dacmp 2038 xfs_da_compname( 2039 struct xfs_da_args *args, 2040 const unsigned char *name, 2041 int len) 2042 { 2043 return (args->namelen == len && memcmp(args->name, name, len) == 0) ? 2044 XFS_CMP_EXACT : XFS_CMP_DIFFERENT; 2045 } 2046 2047 static xfs_dahash_t 2048 xfs_default_hashname( 2049 struct xfs_name *name) 2050 { 2051 return xfs_da_hashname(name->name, name->len); 2052 } 2053 2054 const struct xfs_nameops xfs_default_nameops = { 2055 .hashname = xfs_default_hashname, 2056 .compname = xfs_da_compname 2057 }; 2058 2059 int 2060 xfs_da_grow_inode_int( 2061 struct xfs_da_args *args, 2062 xfs_fileoff_t *bno, 2063 int count) 2064 { 2065 struct xfs_trans *tp = args->trans; 2066 struct xfs_inode *dp = args->dp; 2067 int w = args->whichfork; 2068 xfs_rfsblock_t nblks = dp->i_d.di_nblocks; 2069 struct xfs_bmbt_irec map, *mapp; 2070 int nmap, error, got, i, mapi; 2071 2072 /* 2073 * Find a spot in the file space to put the new block. 2074 */ 2075 error = xfs_bmap_first_unused(tp, dp, count, bno, w); 2076 if (error) 2077 return error; 2078 2079 /* 2080 * Try mapping it in one filesystem block. 2081 */ 2082 nmap = 1; 2083 error = xfs_bmapi_write(tp, dp, *bno, count, 2084 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG, 2085 args->total, &map, &nmap); 2086 if (error) 2087 return error; 2088 2089 ASSERT(nmap <= 1); 2090 if (nmap == 1) { 2091 mapp = ↦ 2092 mapi = 1; 2093 } else if (nmap == 0 && count > 1) { 2094 xfs_fileoff_t b; 2095 int c; 2096 2097 /* 2098 * If we didn't get it and the block might work if fragmented, 2099 * try without the CONTIG flag. Loop until we get it all. 2100 */ 2101 mapp = kmem_alloc(sizeof(*mapp) * count, 0); 2102 for (b = *bno, mapi = 0; b < *bno + count; ) { 2103 nmap = min(XFS_BMAP_MAX_NMAP, count); 2104 c = (int)(*bno + count - b); 2105 error = xfs_bmapi_write(tp, dp, b, c, 2106 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA, 2107 args->total, &mapp[mapi], &nmap); 2108 if (error) 2109 goto out_free_map; 2110 if (nmap < 1) 2111 break; 2112 mapi += nmap; 2113 b = mapp[mapi - 1].br_startoff + 2114 mapp[mapi - 1].br_blockcount; 2115 } 2116 } else { 2117 mapi = 0; 2118 mapp = NULL; 2119 } 2120 2121 /* 2122 * Count the blocks we got, make sure it matches the total. 2123 */ 2124 for (i = 0, got = 0; i < mapi; i++) 2125 got += mapp[i].br_blockcount; 2126 if (got != count || mapp[0].br_startoff != *bno || 2127 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount != 2128 *bno + count) { 2129 error = -ENOSPC; 2130 goto out_free_map; 2131 } 2132 2133 /* account for newly allocated blocks in reserved blocks total */ 2134 args->total -= dp->i_d.di_nblocks - nblks; 2135 2136 out_free_map: 2137 if (mapp != &map) 2138 kmem_free(mapp); 2139 return error; 2140 } 2141 2142 /* 2143 * Add a block to the btree ahead of the file. 2144 * Return the new block number to the caller. 2145 */ 2146 int 2147 xfs_da_grow_inode( 2148 struct xfs_da_args *args, 2149 xfs_dablk_t *new_blkno) 2150 { 2151 xfs_fileoff_t bno; 2152 int error; 2153 2154 trace_xfs_da_grow_inode(args); 2155 2156 bno = args->geo->leafblk; 2157 error = xfs_da_grow_inode_int(args, &bno, args->geo->fsbcount); 2158 if (!error) 2159 *new_blkno = (xfs_dablk_t)bno; 2160 return error; 2161 } 2162 2163 /* 2164 * Ick. We need to always be able to remove a btree block, even 2165 * if there's no space reservation because the filesystem is full. 2166 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC. 2167 * It swaps the target block with the last block in the file. The 2168 * last block in the file can always be removed since it can't cause 2169 * a bmap btree split to do that. 2170 */ 2171 STATIC int 2172 xfs_da3_swap_lastblock( 2173 struct xfs_da_args *args, 2174 xfs_dablk_t *dead_blknop, 2175 struct xfs_buf **dead_bufp) 2176 { 2177 struct xfs_da_blkinfo *dead_info; 2178 struct xfs_da_blkinfo *sib_info; 2179 struct xfs_da_intnode *par_node; 2180 struct xfs_da_intnode *dead_node; 2181 struct xfs_dir2_leaf *dead_leaf2; 2182 struct xfs_da_node_entry *btree; 2183 struct xfs_da3_icnode_hdr par_hdr; 2184 struct xfs_inode *dp; 2185 struct xfs_trans *tp; 2186 struct xfs_mount *mp; 2187 struct xfs_buf *dead_buf; 2188 struct xfs_buf *last_buf; 2189 struct xfs_buf *sib_buf; 2190 struct xfs_buf *par_buf; 2191 xfs_dahash_t dead_hash; 2192 xfs_fileoff_t lastoff; 2193 xfs_dablk_t dead_blkno; 2194 xfs_dablk_t last_blkno; 2195 xfs_dablk_t sib_blkno; 2196 xfs_dablk_t par_blkno; 2197 int error; 2198 int w; 2199 int entno; 2200 int level; 2201 int dead_level; 2202 2203 trace_xfs_da_swap_lastblock(args); 2204 2205 dead_buf = *dead_bufp; 2206 dead_blkno = *dead_blknop; 2207 tp = args->trans; 2208 dp = args->dp; 2209 w = args->whichfork; 2210 ASSERT(w == XFS_DATA_FORK); 2211 mp = dp->i_mount; 2212 lastoff = args->geo->freeblk; 2213 error = xfs_bmap_last_before(tp, dp, &lastoff, w); 2214 if (error) 2215 return error; 2216 if (unlikely(lastoff == 0)) { 2217 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW, 2218 mp); 2219 return -EFSCORRUPTED; 2220 } 2221 /* 2222 * Read the last block in the btree space. 2223 */ 2224 last_blkno = (xfs_dablk_t)lastoff - args->geo->fsbcount; 2225 error = xfs_da3_node_read(tp, dp, last_blkno, -1, &last_buf, w); 2226 if (error) 2227 return error; 2228 /* 2229 * Copy the last block into the dead buffer and log it. 2230 */ 2231 memcpy(dead_buf->b_addr, last_buf->b_addr, args->geo->blksize); 2232 xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1); 2233 dead_info = dead_buf->b_addr; 2234 /* 2235 * Get values from the moved block. 2236 */ 2237 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 2238 dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) { 2239 struct xfs_dir3_icleaf_hdr leafhdr; 2240 struct xfs_dir2_leaf_entry *ents; 2241 2242 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info; 2243 dp->d_ops->leaf_hdr_from_disk(&leafhdr, dead_leaf2); 2244 ents = dp->d_ops->leaf_ents_p(dead_leaf2); 2245 dead_level = 0; 2246 dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval); 2247 } else { 2248 struct xfs_da3_icnode_hdr deadhdr; 2249 2250 dead_node = (xfs_da_intnode_t *)dead_info; 2251 dp->d_ops->node_hdr_from_disk(&deadhdr, dead_node); 2252 btree = dp->d_ops->node_tree_p(dead_node); 2253 dead_level = deadhdr.level; 2254 dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval); 2255 } 2256 sib_buf = par_buf = NULL; 2257 /* 2258 * If the moved block has a left sibling, fix up the pointers. 2259 */ 2260 if ((sib_blkno = be32_to_cpu(dead_info->back))) { 2261 error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w); 2262 if (error) 2263 goto done; 2264 sib_info = sib_buf->b_addr; 2265 if (unlikely( 2266 be32_to_cpu(sib_info->forw) != last_blkno || 2267 sib_info->magic != dead_info->magic)) { 2268 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)", 2269 XFS_ERRLEVEL_LOW, mp); 2270 error = -EFSCORRUPTED; 2271 goto done; 2272 } 2273 sib_info->forw = cpu_to_be32(dead_blkno); 2274 xfs_trans_log_buf(tp, sib_buf, 2275 XFS_DA_LOGRANGE(sib_info, &sib_info->forw, 2276 sizeof(sib_info->forw))); 2277 sib_buf = NULL; 2278 } 2279 /* 2280 * If the moved block has a right sibling, fix up the pointers. 2281 */ 2282 if ((sib_blkno = be32_to_cpu(dead_info->forw))) { 2283 error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w); 2284 if (error) 2285 goto done; 2286 sib_info = sib_buf->b_addr; 2287 if (unlikely( 2288 be32_to_cpu(sib_info->back) != last_blkno || 2289 sib_info->magic != dead_info->magic)) { 2290 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)", 2291 XFS_ERRLEVEL_LOW, mp); 2292 error = -EFSCORRUPTED; 2293 goto done; 2294 } 2295 sib_info->back = cpu_to_be32(dead_blkno); 2296 xfs_trans_log_buf(tp, sib_buf, 2297 XFS_DA_LOGRANGE(sib_info, &sib_info->back, 2298 sizeof(sib_info->back))); 2299 sib_buf = NULL; 2300 } 2301 par_blkno = args->geo->leafblk; 2302 level = -1; 2303 /* 2304 * Walk down the tree looking for the parent of the moved block. 2305 */ 2306 for (;;) { 2307 error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w); 2308 if (error) 2309 goto done; 2310 par_node = par_buf->b_addr; 2311 dp->d_ops->node_hdr_from_disk(&par_hdr, par_node); 2312 if (level >= 0 && level != par_hdr.level + 1) { 2313 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)", 2314 XFS_ERRLEVEL_LOW, mp); 2315 error = -EFSCORRUPTED; 2316 goto done; 2317 } 2318 level = par_hdr.level; 2319 btree = dp->d_ops->node_tree_p(par_node); 2320 for (entno = 0; 2321 entno < par_hdr.count && 2322 be32_to_cpu(btree[entno].hashval) < dead_hash; 2323 entno++) 2324 continue; 2325 if (entno == par_hdr.count) { 2326 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)", 2327 XFS_ERRLEVEL_LOW, mp); 2328 error = -EFSCORRUPTED; 2329 goto done; 2330 } 2331 par_blkno = be32_to_cpu(btree[entno].before); 2332 if (level == dead_level + 1) 2333 break; 2334 xfs_trans_brelse(tp, par_buf); 2335 par_buf = NULL; 2336 } 2337 /* 2338 * We're in the right parent block. 2339 * Look for the right entry. 2340 */ 2341 for (;;) { 2342 for (; 2343 entno < par_hdr.count && 2344 be32_to_cpu(btree[entno].before) != last_blkno; 2345 entno++) 2346 continue; 2347 if (entno < par_hdr.count) 2348 break; 2349 par_blkno = par_hdr.forw; 2350 xfs_trans_brelse(tp, par_buf); 2351 par_buf = NULL; 2352 if (unlikely(par_blkno == 0)) { 2353 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)", 2354 XFS_ERRLEVEL_LOW, mp); 2355 error = -EFSCORRUPTED; 2356 goto done; 2357 } 2358 error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w); 2359 if (error) 2360 goto done; 2361 par_node = par_buf->b_addr; 2362 dp->d_ops->node_hdr_from_disk(&par_hdr, par_node); 2363 if (par_hdr.level != level) { 2364 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)", 2365 XFS_ERRLEVEL_LOW, mp); 2366 error = -EFSCORRUPTED; 2367 goto done; 2368 } 2369 btree = dp->d_ops->node_tree_p(par_node); 2370 entno = 0; 2371 } 2372 /* 2373 * Update the parent entry pointing to the moved block. 2374 */ 2375 btree[entno].before = cpu_to_be32(dead_blkno); 2376 xfs_trans_log_buf(tp, par_buf, 2377 XFS_DA_LOGRANGE(par_node, &btree[entno].before, 2378 sizeof(btree[entno].before))); 2379 *dead_blknop = last_blkno; 2380 *dead_bufp = last_buf; 2381 return 0; 2382 done: 2383 if (par_buf) 2384 xfs_trans_brelse(tp, par_buf); 2385 if (sib_buf) 2386 xfs_trans_brelse(tp, sib_buf); 2387 xfs_trans_brelse(tp, last_buf); 2388 return error; 2389 } 2390 2391 /* 2392 * Remove a btree block from a directory or attribute. 2393 */ 2394 int 2395 xfs_da_shrink_inode( 2396 struct xfs_da_args *args, 2397 xfs_dablk_t dead_blkno, 2398 struct xfs_buf *dead_buf) 2399 { 2400 struct xfs_inode *dp; 2401 int done, error, w, count; 2402 struct xfs_trans *tp; 2403 2404 trace_xfs_da_shrink_inode(args); 2405 2406 dp = args->dp; 2407 w = args->whichfork; 2408 tp = args->trans; 2409 count = args->geo->fsbcount; 2410 for (;;) { 2411 /* 2412 * Remove extents. If we get ENOSPC for a dir we have to move 2413 * the last block to the place we want to kill. 2414 */ 2415 error = xfs_bunmapi(tp, dp, dead_blkno, count, 2416 xfs_bmapi_aflag(w), 0, &done); 2417 if (error == -ENOSPC) { 2418 if (w != XFS_DATA_FORK) 2419 break; 2420 error = xfs_da3_swap_lastblock(args, &dead_blkno, 2421 &dead_buf); 2422 if (error) 2423 break; 2424 } else { 2425 break; 2426 } 2427 } 2428 xfs_trans_binval(tp, dead_buf); 2429 return error; 2430 } 2431 2432 /* 2433 * See if the mapping(s) for this btree block are valid, i.e. 2434 * don't contain holes, are logically contiguous, and cover the whole range. 2435 */ 2436 STATIC int 2437 xfs_da_map_covers_blocks( 2438 int nmap, 2439 xfs_bmbt_irec_t *mapp, 2440 xfs_dablk_t bno, 2441 int count) 2442 { 2443 int i; 2444 xfs_fileoff_t off; 2445 2446 for (i = 0, off = bno; i < nmap; i++) { 2447 if (mapp[i].br_startblock == HOLESTARTBLOCK || 2448 mapp[i].br_startblock == DELAYSTARTBLOCK) { 2449 return 0; 2450 } 2451 if (off != mapp[i].br_startoff) { 2452 return 0; 2453 } 2454 off += mapp[i].br_blockcount; 2455 } 2456 return off == bno + count; 2457 } 2458 2459 /* 2460 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map. 2461 * 2462 * For the single map case, it is assumed that the caller has provided a pointer 2463 * to a valid xfs_buf_map. For the multiple map case, this function will 2464 * allocate the xfs_buf_map to hold all the maps and replace the caller's single 2465 * map pointer with the allocated map. 2466 */ 2467 static int 2468 xfs_buf_map_from_irec( 2469 struct xfs_mount *mp, 2470 struct xfs_buf_map **mapp, 2471 int *nmaps, 2472 struct xfs_bmbt_irec *irecs, 2473 int nirecs) 2474 { 2475 struct xfs_buf_map *map; 2476 int i; 2477 2478 ASSERT(*nmaps == 1); 2479 ASSERT(nirecs >= 1); 2480 2481 if (nirecs > 1) { 2482 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), 2483 KM_NOFS); 2484 if (!map) 2485 return -ENOMEM; 2486 *mapp = map; 2487 } 2488 2489 *nmaps = nirecs; 2490 map = *mapp; 2491 for (i = 0; i < *nmaps; i++) { 2492 ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK && 2493 irecs[i].br_startblock != HOLESTARTBLOCK); 2494 map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock); 2495 map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount); 2496 } 2497 return 0; 2498 } 2499 2500 /* 2501 * Map the block we are given ready for reading. There are three possible return 2502 * values: 2503 * -1 - will be returned if we land in a hole and mappedbno == -2 so the 2504 * caller knows not to execute a subsequent read. 2505 * 0 - if we mapped the block successfully 2506 * >0 - positive error number if there was an error. 2507 */ 2508 static int 2509 xfs_dabuf_map( 2510 struct xfs_inode *dp, 2511 xfs_dablk_t bno, 2512 xfs_daddr_t mappedbno, 2513 int whichfork, 2514 struct xfs_buf_map **map, 2515 int *nmaps) 2516 { 2517 struct xfs_mount *mp = dp->i_mount; 2518 int nfsb; 2519 int error = 0; 2520 struct xfs_bmbt_irec irec; 2521 struct xfs_bmbt_irec *irecs = &irec; 2522 int nirecs; 2523 2524 ASSERT(map && *map); 2525 ASSERT(*nmaps == 1); 2526 2527 if (whichfork == XFS_DATA_FORK) 2528 nfsb = mp->m_dir_geo->fsbcount; 2529 else 2530 nfsb = mp->m_attr_geo->fsbcount; 2531 2532 /* 2533 * Caller doesn't have a mapping. -2 means don't complain 2534 * if we land in a hole. 2535 */ 2536 if (mappedbno == -1 || mappedbno == -2) { 2537 /* 2538 * Optimize the one-block case. 2539 */ 2540 if (nfsb != 1) 2541 irecs = kmem_zalloc(sizeof(irec) * nfsb, 2542 KM_NOFS); 2543 2544 nirecs = nfsb; 2545 error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs, 2546 &nirecs, xfs_bmapi_aflag(whichfork)); 2547 if (error) 2548 goto out; 2549 } else { 2550 irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno); 2551 irecs->br_startoff = (xfs_fileoff_t)bno; 2552 irecs->br_blockcount = nfsb; 2553 irecs->br_state = 0; 2554 nirecs = 1; 2555 } 2556 2557 if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) { 2558 error = mappedbno == -2 ? -1 : -EFSCORRUPTED; 2559 if (unlikely(error == -EFSCORRUPTED)) { 2560 if (xfs_error_level >= XFS_ERRLEVEL_LOW) { 2561 int i; 2562 xfs_alert(mp, "%s: bno %lld dir: inode %lld", 2563 __func__, (long long)bno, 2564 (long long)dp->i_ino); 2565 for (i = 0; i < *nmaps; i++) { 2566 xfs_alert(mp, 2567 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d", 2568 i, 2569 (long long)irecs[i].br_startoff, 2570 (long long)irecs[i].br_startblock, 2571 (long long)irecs[i].br_blockcount, 2572 irecs[i].br_state); 2573 } 2574 } 2575 XFS_ERROR_REPORT("xfs_da_do_buf(1)", 2576 XFS_ERRLEVEL_LOW, mp); 2577 } 2578 goto out; 2579 } 2580 error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs); 2581 out: 2582 if (irecs != &irec) 2583 kmem_free(irecs); 2584 return error; 2585 } 2586 2587 /* 2588 * Get a buffer for the dir/attr block. 2589 */ 2590 int 2591 xfs_da_get_buf( 2592 struct xfs_trans *trans, 2593 struct xfs_inode *dp, 2594 xfs_dablk_t bno, 2595 xfs_daddr_t mappedbno, 2596 struct xfs_buf **bpp, 2597 int whichfork) 2598 { 2599 struct xfs_buf *bp; 2600 struct xfs_buf_map map; 2601 struct xfs_buf_map *mapp; 2602 int nmap; 2603 int error; 2604 2605 *bpp = NULL; 2606 mapp = ↦ 2607 nmap = 1; 2608 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork, 2609 &mapp, &nmap); 2610 if (error) { 2611 /* mapping a hole is not an error, but we don't continue */ 2612 if (error == -1) 2613 error = 0; 2614 goto out_free; 2615 } 2616 2617 bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp, 2618 mapp, nmap, 0); 2619 error = bp ? bp->b_error : -EIO; 2620 if (error) { 2621 if (bp) 2622 xfs_trans_brelse(trans, bp); 2623 goto out_free; 2624 } 2625 2626 *bpp = bp; 2627 2628 out_free: 2629 if (mapp != &map) 2630 kmem_free(mapp); 2631 2632 return error; 2633 } 2634 2635 /* 2636 * Get a buffer for the dir/attr block, fill in the contents. 2637 */ 2638 int 2639 xfs_da_read_buf( 2640 struct xfs_trans *trans, 2641 struct xfs_inode *dp, 2642 xfs_dablk_t bno, 2643 xfs_daddr_t mappedbno, 2644 struct xfs_buf **bpp, 2645 int whichfork, 2646 const struct xfs_buf_ops *ops) 2647 { 2648 struct xfs_buf *bp; 2649 struct xfs_buf_map map; 2650 struct xfs_buf_map *mapp; 2651 int nmap; 2652 int error; 2653 2654 *bpp = NULL; 2655 mapp = ↦ 2656 nmap = 1; 2657 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork, 2658 &mapp, &nmap); 2659 if (error) { 2660 /* mapping a hole is not an error, but we don't continue */ 2661 if (error == -1) 2662 error = 0; 2663 goto out_free; 2664 } 2665 2666 error = xfs_trans_read_buf_map(dp->i_mount, trans, 2667 dp->i_mount->m_ddev_targp, 2668 mapp, nmap, 0, &bp, ops); 2669 if (error) 2670 goto out_free; 2671 2672 if (whichfork == XFS_ATTR_FORK) 2673 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF); 2674 else 2675 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF); 2676 *bpp = bp; 2677 out_free: 2678 if (mapp != &map) 2679 kmem_free(mapp); 2680 2681 return error; 2682 } 2683 2684 /* 2685 * Readahead the dir/attr block. 2686 */ 2687 int 2688 xfs_da_reada_buf( 2689 struct xfs_inode *dp, 2690 xfs_dablk_t bno, 2691 xfs_daddr_t mappedbno, 2692 int whichfork, 2693 const struct xfs_buf_ops *ops) 2694 { 2695 struct xfs_buf_map map; 2696 struct xfs_buf_map *mapp; 2697 int nmap; 2698 int error; 2699 2700 mapp = ↦ 2701 nmap = 1; 2702 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork, 2703 &mapp, &nmap); 2704 if (error) { 2705 /* mapping a hole is not an error, but we don't continue */ 2706 if (error == -1) 2707 error = 0; 2708 goto out_free; 2709 } 2710 2711 mappedbno = mapp[0].bm_bn; 2712 xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops); 2713 2714 out_free: 2715 if (mapp != &map) 2716 kmem_free(mapp); 2717 2718 return error; 2719 } 2720