1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * Copyright (c) 2013 Red Hat, Inc. 5 * All Rights Reserved. 6 */ 7 #include "xfs.h" 8 #include "xfs_fs.h" 9 #include "xfs_shared.h" 10 #include "xfs_format.h" 11 #include "xfs_log_format.h" 12 #include "xfs_trans_resv.h" 13 #include "xfs_bit.h" 14 #include "xfs_mount.h" 15 #include "xfs_inode.h" 16 #include "xfs_dir2.h" 17 #include "xfs_dir2_priv.h" 18 #include "xfs_trans.h" 19 #include "xfs_bmap.h" 20 #include "xfs_attr_leaf.h" 21 #include "xfs_error.h" 22 #include "xfs_trace.h" 23 #include "xfs_buf_item.h" 24 #include "xfs_log.h" 25 26 /* 27 * xfs_da_btree.c 28 * 29 * Routines to implement directories as Btrees of hashed names. 30 */ 31 32 /*======================================================================== 33 * Function prototypes for the kernel. 34 *========================================================================*/ 35 36 /* 37 * Routines used for growing the Btree. 38 */ 39 STATIC int xfs_da3_root_split(xfs_da_state_t *state, 40 xfs_da_state_blk_t *existing_root, 41 xfs_da_state_blk_t *new_child); 42 STATIC int xfs_da3_node_split(xfs_da_state_t *state, 43 xfs_da_state_blk_t *existing_blk, 44 xfs_da_state_blk_t *split_blk, 45 xfs_da_state_blk_t *blk_to_add, 46 int treelevel, 47 int *result); 48 STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state, 49 xfs_da_state_blk_t *node_blk_1, 50 xfs_da_state_blk_t *node_blk_2); 51 STATIC void xfs_da3_node_add(xfs_da_state_t *state, 52 xfs_da_state_blk_t *old_node_blk, 53 xfs_da_state_blk_t *new_node_blk); 54 55 /* 56 * Routines used for shrinking the Btree. 57 */ 58 STATIC int xfs_da3_root_join(xfs_da_state_t *state, 59 xfs_da_state_blk_t *root_blk); 60 STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval); 61 STATIC void xfs_da3_node_remove(xfs_da_state_t *state, 62 xfs_da_state_blk_t *drop_blk); 63 STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state, 64 xfs_da_state_blk_t *src_node_blk, 65 xfs_da_state_blk_t *dst_node_blk); 66 67 /* 68 * Utility routines. 69 */ 70 STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state, 71 xfs_da_state_blk_t *drop_blk, 72 xfs_da_state_blk_t *save_blk); 73 74 75 kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */ 76 77 /* 78 * Allocate a dir-state structure. 79 * We don't put them on the stack since they're large. 80 */ 81 xfs_da_state_t * 82 xfs_da_state_alloc(void) 83 { 84 return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS); 85 } 86 87 /* 88 * Kill the altpath contents of a da-state structure. 89 */ 90 STATIC void 91 xfs_da_state_kill_altpath(xfs_da_state_t *state) 92 { 93 int i; 94 95 for (i = 0; i < state->altpath.active; i++) 96 state->altpath.blk[i].bp = NULL; 97 state->altpath.active = 0; 98 } 99 100 /* 101 * Free a da-state structure. 102 */ 103 void 104 xfs_da_state_free(xfs_da_state_t *state) 105 { 106 xfs_da_state_kill_altpath(state); 107 #ifdef DEBUG 108 memset((char *)state, 0, sizeof(*state)); 109 #endif /* DEBUG */ 110 kmem_cache_free(xfs_da_state_zone, state); 111 } 112 113 static inline int xfs_dabuf_nfsb(struct xfs_mount *mp, int whichfork) 114 { 115 if (whichfork == XFS_DATA_FORK) 116 return mp->m_dir_geo->fsbcount; 117 return mp->m_attr_geo->fsbcount; 118 } 119 120 void 121 xfs_da3_node_hdr_from_disk( 122 struct xfs_mount *mp, 123 struct xfs_da3_icnode_hdr *to, 124 struct xfs_da_intnode *from) 125 { 126 if (xfs_sb_version_hascrc(&mp->m_sb)) { 127 struct xfs_da3_intnode *from3 = (struct xfs_da3_intnode *)from; 128 129 to->forw = be32_to_cpu(from3->hdr.info.hdr.forw); 130 to->back = be32_to_cpu(from3->hdr.info.hdr.back); 131 to->magic = be16_to_cpu(from3->hdr.info.hdr.magic); 132 to->count = be16_to_cpu(from3->hdr.__count); 133 to->level = be16_to_cpu(from3->hdr.__level); 134 to->btree = from3->__btree; 135 ASSERT(to->magic == XFS_DA3_NODE_MAGIC); 136 } else { 137 to->forw = be32_to_cpu(from->hdr.info.forw); 138 to->back = be32_to_cpu(from->hdr.info.back); 139 to->magic = be16_to_cpu(from->hdr.info.magic); 140 to->count = be16_to_cpu(from->hdr.__count); 141 to->level = be16_to_cpu(from->hdr.__level); 142 to->btree = from->__btree; 143 ASSERT(to->magic == XFS_DA_NODE_MAGIC); 144 } 145 } 146 147 void 148 xfs_da3_node_hdr_to_disk( 149 struct xfs_mount *mp, 150 struct xfs_da_intnode *to, 151 struct xfs_da3_icnode_hdr *from) 152 { 153 if (xfs_sb_version_hascrc(&mp->m_sb)) { 154 struct xfs_da3_intnode *to3 = (struct xfs_da3_intnode *)to; 155 156 ASSERT(from->magic == XFS_DA3_NODE_MAGIC); 157 to3->hdr.info.hdr.forw = cpu_to_be32(from->forw); 158 to3->hdr.info.hdr.back = cpu_to_be32(from->back); 159 to3->hdr.info.hdr.magic = cpu_to_be16(from->magic); 160 to3->hdr.__count = cpu_to_be16(from->count); 161 to3->hdr.__level = cpu_to_be16(from->level); 162 } else { 163 ASSERT(from->magic == XFS_DA_NODE_MAGIC); 164 to->hdr.info.forw = cpu_to_be32(from->forw); 165 to->hdr.info.back = cpu_to_be32(from->back); 166 to->hdr.info.magic = cpu_to_be16(from->magic); 167 to->hdr.__count = cpu_to_be16(from->count); 168 to->hdr.__level = cpu_to_be16(from->level); 169 } 170 } 171 172 /* 173 * Verify an xfs_da3_blkinfo structure. Note that the da3 fields are only 174 * accessible on v5 filesystems. This header format is common across da node, 175 * attr leaf and dir leaf blocks. 176 */ 177 xfs_failaddr_t 178 xfs_da3_blkinfo_verify( 179 struct xfs_buf *bp, 180 struct xfs_da3_blkinfo *hdr3) 181 { 182 struct xfs_mount *mp = bp->b_mount; 183 struct xfs_da_blkinfo *hdr = &hdr3->hdr; 184 185 if (!xfs_verify_magic16(bp, hdr->magic)) 186 return __this_address; 187 188 if (xfs_sb_version_hascrc(&mp->m_sb)) { 189 if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid)) 190 return __this_address; 191 if (be64_to_cpu(hdr3->blkno) != bp->b_bn) 192 return __this_address; 193 if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn))) 194 return __this_address; 195 } 196 197 return NULL; 198 } 199 200 static xfs_failaddr_t 201 xfs_da3_node_verify( 202 struct xfs_buf *bp) 203 { 204 struct xfs_mount *mp = bp->b_mount; 205 struct xfs_da_intnode *hdr = bp->b_addr; 206 struct xfs_da3_icnode_hdr ichdr; 207 xfs_failaddr_t fa; 208 209 xfs_da3_node_hdr_from_disk(mp, &ichdr, hdr); 210 211 fa = xfs_da3_blkinfo_verify(bp, bp->b_addr); 212 if (fa) 213 return fa; 214 215 if (ichdr.level == 0) 216 return __this_address; 217 if (ichdr.level > XFS_DA_NODE_MAXDEPTH) 218 return __this_address; 219 if (ichdr.count == 0) 220 return __this_address; 221 222 /* 223 * we don't know if the node is for and attribute or directory tree, 224 * so only fail if the count is outside both bounds 225 */ 226 if (ichdr.count > mp->m_dir_geo->node_ents && 227 ichdr.count > mp->m_attr_geo->node_ents) 228 return __this_address; 229 230 /* XXX: hash order check? */ 231 232 return NULL; 233 } 234 235 static void 236 xfs_da3_node_write_verify( 237 struct xfs_buf *bp) 238 { 239 struct xfs_mount *mp = bp->b_mount; 240 struct xfs_buf_log_item *bip = bp->b_log_item; 241 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; 242 xfs_failaddr_t fa; 243 244 fa = xfs_da3_node_verify(bp); 245 if (fa) { 246 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 247 return; 248 } 249 250 if (!xfs_sb_version_hascrc(&mp->m_sb)) 251 return; 252 253 if (bip) 254 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn); 255 256 xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF); 257 } 258 259 /* 260 * leaf/node format detection on trees is sketchy, so a node read can be done on 261 * leaf level blocks when detection identifies the tree as a node format tree 262 * incorrectly. In this case, we need to swap the verifier to match the correct 263 * format of the block being read. 264 */ 265 static void 266 xfs_da3_node_read_verify( 267 struct xfs_buf *bp) 268 { 269 struct xfs_da_blkinfo *info = bp->b_addr; 270 xfs_failaddr_t fa; 271 272 switch (be16_to_cpu(info->magic)) { 273 case XFS_DA3_NODE_MAGIC: 274 if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) { 275 xfs_verifier_error(bp, -EFSBADCRC, 276 __this_address); 277 break; 278 } 279 /* fall through */ 280 case XFS_DA_NODE_MAGIC: 281 fa = xfs_da3_node_verify(bp); 282 if (fa) 283 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 284 return; 285 case XFS_ATTR_LEAF_MAGIC: 286 case XFS_ATTR3_LEAF_MAGIC: 287 bp->b_ops = &xfs_attr3_leaf_buf_ops; 288 bp->b_ops->verify_read(bp); 289 return; 290 case XFS_DIR2_LEAFN_MAGIC: 291 case XFS_DIR3_LEAFN_MAGIC: 292 bp->b_ops = &xfs_dir3_leafn_buf_ops; 293 bp->b_ops->verify_read(bp); 294 return; 295 default: 296 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address); 297 break; 298 } 299 } 300 301 /* Verify the structure of a da3 block. */ 302 static xfs_failaddr_t 303 xfs_da3_node_verify_struct( 304 struct xfs_buf *bp) 305 { 306 struct xfs_da_blkinfo *info = bp->b_addr; 307 308 switch (be16_to_cpu(info->magic)) { 309 case XFS_DA3_NODE_MAGIC: 310 case XFS_DA_NODE_MAGIC: 311 return xfs_da3_node_verify(bp); 312 case XFS_ATTR_LEAF_MAGIC: 313 case XFS_ATTR3_LEAF_MAGIC: 314 bp->b_ops = &xfs_attr3_leaf_buf_ops; 315 return bp->b_ops->verify_struct(bp); 316 case XFS_DIR2_LEAFN_MAGIC: 317 case XFS_DIR3_LEAFN_MAGIC: 318 bp->b_ops = &xfs_dir3_leafn_buf_ops; 319 return bp->b_ops->verify_struct(bp); 320 default: 321 return __this_address; 322 } 323 } 324 325 const struct xfs_buf_ops xfs_da3_node_buf_ops = { 326 .name = "xfs_da3_node", 327 .magic16 = { cpu_to_be16(XFS_DA_NODE_MAGIC), 328 cpu_to_be16(XFS_DA3_NODE_MAGIC) }, 329 .verify_read = xfs_da3_node_read_verify, 330 .verify_write = xfs_da3_node_write_verify, 331 .verify_struct = xfs_da3_node_verify_struct, 332 }; 333 334 static int 335 xfs_da3_node_set_type( 336 struct xfs_trans *tp, 337 struct xfs_buf *bp) 338 { 339 struct xfs_da_blkinfo *info = bp->b_addr; 340 341 switch (be16_to_cpu(info->magic)) { 342 case XFS_DA_NODE_MAGIC: 343 case XFS_DA3_NODE_MAGIC: 344 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF); 345 return 0; 346 case XFS_ATTR_LEAF_MAGIC: 347 case XFS_ATTR3_LEAF_MAGIC: 348 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_ATTR_LEAF_BUF); 349 return 0; 350 case XFS_DIR2_LEAFN_MAGIC: 351 case XFS_DIR3_LEAFN_MAGIC: 352 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF); 353 return 0; 354 default: 355 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, tp->t_mountp, 356 info, sizeof(*info)); 357 xfs_trans_brelse(tp, bp); 358 return -EFSCORRUPTED; 359 } 360 } 361 362 int 363 xfs_da3_node_read( 364 struct xfs_trans *tp, 365 struct xfs_inode *dp, 366 xfs_dablk_t bno, 367 struct xfs_buf **bpp, 368 int whichfork) 369 { 370 int error; 371 372 error = xfs_da_read_buf(tp, dp, bno, 0, bpp, whichfork, 373 &xfs_da3_node_buf_ops); 374 if (error || !*bpp || !tp) 375 return error; 376 return xfs_da3_node_set_type(tp, *bpp); 377 } 378 379 int 380 xfs_da3_node_read_mapped( 381 struct xfs_trans *tp, 382 struct xfs_inode *dp, 383 xfs_daddr_t mappedbno, 384 struct xfs_buf **bpp, 385 int whichfork) 386 { 387 struct xfs_mount *mp = dp->i_mount; 388 int error; 389 390 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, mappedbno, 391 XFS_FSB_TO_BB(mp, xfs_dabuf_nfsb(mp, whichfork)), 0, 392 bpp, &xfs_da3_node_buf_ops); 393 if (error || !*bpp) 394 return error; 395 396 if (whichfork == XFS_ATTR_FORK) 397 xfs_buf_set_ref(*bpp, XFS_ATTR_BTREE_REF); 398 else 399 xfs_buf_set_ref(*bpp, XFS_DIR_BTREE_REF); 400 401 if (!tp) 402 return 0; 403 return xfs_da3_node_set_type(tp, *bpp); 404 } 405 406 /*======================================================================== 407 * Routines used for growing the Btree. 408 *========================================================================*/ 409 410 /* 411 * Create the initial contents of an intermediate node. 412 */ 413 int 414 xfs_da3_node_create( 415 struct xfs_da_args *args, 416 xfs_dablk_t blkno, 417 int level, 418 struct xfs_buf **bpp, 419 int whichfork) 420 { 421 struct xfs_da_intnode *node; 422 struct xfs_trans *tp = args->trans; 423 struct xfs_mount *mp = tp->t_mountp; 424 struct xfs_da3_icnode_hdr ichdr = {0}; 425 struct xfs_buf *bp; 426 int error; 427 struct xfs_inode *dp = args->dp; 428 429 trace_xfs_da_node_create(args); 430 ASSERT(level <= XFS_DA_NODE_MAXDEPTH); 431 432 error = xfs_da_get_buf(tp, dp, blkno, &bp, whichfork); 433 if (error) 434 return error; 435 bp->b_ops = &xfs_da3_node_buf_ops; 436 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF); 437 node = bp->b_addr; 438 439 if (xfs_sb_version_hascrc(&mp->m_sb)) { 440 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; 441 442 memset(hdr3, 0, sizeof(struct xfs_da3_node_hdr)); 443 ichdr.magic = XFS_DA3_NODE_MAGIC; 444 hdr3->info.blkno = cpu_to_be64(bp->b_bn); 445 hdr3->info.owner = cpu_to_be64(args->dp->i_ino); 446 uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid); 447 } else { 448 ichdr.magic = XFS_DA_NODE_MAGIC; 449 } 450 ichdr.level = level; 451 452 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &ichdr); 453 xfs_trans_log_buf(tp, bp, 454 XFS_DA_LOGRANGE(node, &node->hdr, args->geo->node_hdr_size)); 455 456 *bpp = bp; 457 return 0; 458 } 459 460 /* 461 * Split a leaf node, rebalance, then possibly split 462 * intermediate nodes, rebalance, etc. 463 */ 464 int /* error */ 465 xfs_da3_split( 466 struct xfs_da_state *state) 467 { 468 struct xfs_da_state_blk *oldblk; 469 struct xfs_da_state_blk *newblk; 470 struct xfs_da_state_blk *addblk; 471 struct xfs_da_intnode *node; 472 int max; 473 int action = 0; 474 int error; 475 int i; 476 477 trace_xfs_da_split(state->args); 478 479 /* 480 * Walk back up the tree splitting/inserting/adjusting as necessary. 481 * If we need to insert and there isn't room, split the node, then 482 * decide which fragment to insert the new block from below into. 483 * Note that we may split the root this way, but we need more fixup. 484 */ 485 max = state->path.active - 1; 486 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH)); 487 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC || 488 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC); 489 490 addblk = &state->path.blk[max]; /* initial dummy value */ 491 for (i = max; (i >= 0) && addblk; state->path.active--, i--) { 492 oldblk = &state->path.blk[i]; 493 newblk = &state->altpath.blk[i]; 494 495 /* 496 * If a leaf node then 497 * Allocate a new leaf node, then rebalance across them. 498 * else if an intermediate node then 499 * We split on the last layer, must we split the node? 500 */ 501 switch (oldblk->magic) { 502 case XFS_ATTR_LEAF_MAGIC: 503 error = xfs_attr3_leaf_split(state, oldblk, newblk); 504 if ((error != 0) && (error != -ENOSPC)) { 505 return error; /* GROT: attr is inconsistent */ 506 } 507 if (!error) { 508 addblk = newblk; 509 break; 510 } 511 /* 512 * Entry wouldn't fit, split the leaf again. The new 513 * extrablk will be consumed by xfs_da3_node_split if 514 * the node is split. 515 */ 516 state->extravalid = 1; 517 if (state->inleaf) { 518 state->extraafter = 0; /* before newblk */ 519 trace_xfs_attr_leaf_split_before(state->args); 520 error = xfs_attr3_leaf_split(state, oldblk, 521 &state->extrablk); 522 } else { 523 state->extraafter = 1; /* after newblk */ 524 trace_xfs_attr_leaf_split_after(state->args); 525 error = xfs_attr3_leaf_split(state, newblk, 526 &state->extrablk); 527 } 528 if (error) 529 return error; /* GROT: attr inconsistent */ 530 addblk = newblk; 531 break; 532 case XFS_DIR2_LEAFN_MAGIC: 533 error = xfs_dir2_leafn_split(state, oldblk, newblk); 534 if (error) 535 return error; 536 addblk = newblk; 537 break; 538 case XFS_DA_NODE_MAGIC: 539 error = xfs_da3_node_split(state, oldblk, newblk, addblk, 540 max - i, &action); 541 addblk->bp = NULL; 542 if (error) 543 return error; /* GROT: dir is inconsistent */ 544 /* 545 * Record the newly split block for the next time thru? 546 */ 547 if (action) 548 addblk = newblk; 549 else 550 addblk = NULL; 551 break; 552 } 553 554 /* 555 * Update the btree to show the new hashval for this child. 556 */ 557 xfs_da3_fixhashpath(state, &state->path); 558 } 559 if (!addblk) 560 return 0; 561 562 /* 563 * xfs_da3_node_split() should have consumed any extra blocks we added 564 * during a double leaf split in the attr fork. This is guaranteed as 565 * we can't be here if the attr fork only has a single leaf block. 566 */ 567 ASSERT(state->extravalid == 0 || 568 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC); 569 570 /* 571 * Split the root node. 572 */ 573 ASSERT(state->path.active == 0); 574 oldblk = &state->path.blk[0]; 575 error = xfs_da3_root_split(state, oldblk, addblk); 576 if (error) 577 goto out; 578 579 /* 580 * Update pointers to the node which used to be block 0 and just got 581 * bumped because of the addition of a new root node. Note that the 582 * original block 0 could be at any position in the list of blocks in 583 * the tree. 584 * 585 * Note: the magic numbers and sibling pointers are in the same physical 586 * place for both v2 and v3 headers (by design). Hence it doesn't matter 587 * which version of the xfs_da_intnode structure we use here as the 588 * result will be the same using either structure. 589 */ 590 node = oldblk->bp->b_addr; 591 if (node->hdr.info.forw) { 592 if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) { 593 xfs_buf_corruption_error(oldblk->bp); 594 error = -EFSCORRUPTED; 595 goto out; 596 } 597 node = addblk->bp->b_addr; 598 node->hdr.info.back = cpu_to_be32(oldblk->blkno); 599 xfs_trans_log_buf(state->args->trans, addblk->bp, 600 XFS_DA_LOGRANGE(node, &node->hdr.info, 601 sizeof(node->hdr.info))); 602 } 603 node = oldblk->bp->b_addr; 604 if (node->hdr.info.back) { 605 if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) { 606 xfs_buf_corruption_error(oldblk->bp); 607 error = -EFSCORRUPTED; 608 goto out; 609 } 610 node = addblk->bp->b_addr; 611 node->hdr.info.forw = cpu_to_be32(oldblk->blkno); 612 xfs_trans_log_buf(state->args->trans, addblk->bp, 613 XFS_DA_LOGRANGE(node, &node->hdr.info, 614 sizeof(node->hdr.info))); 615 } 616 out: 617 addblk->bp = NULL; 618 return error; 619 } 620 621 /* 622 * Split the root. We have to create a new root and point to the two 623 * parts (the split old root) that we just created. Copy block zero to 624 * the EOF, extending the inode in process. 625 */ 626 STATIC int /* error */ 627 xfs_da3_root_split( 628 struct xfs_da_state *state, 629 struct xfs_da_state_blk *blk1, 630 struct xfs_da_state_blk *blk2) 631 { 632 struct xfs_da_intnode *node; 633 struct xfs_da_intnode *oldroot; 634 struct xfs_da_node_entry *btree; 635 struct xfs_da3_icnode_hdr nodehdr; 636 struct xfs_da_args *args; 637 struct xfs_buf *bp; 638 struct xfs_inode *dp; 639 struct xfs_trans *tp; 640 struct xfs_dir2_leaf *leaf; 641 xfs_dablk_t blkno; 642 int level; 643 int error; 644 int size; 645 646 trace_xfs_da_root_split(state->args); 647 648 /* 649 * Copy the existing (incorrect) block from the root node position 650 * to a free space somewhere. 651 */ 652 args = state->args; 653 error = xfs_da_grow_inode(args, &blkno); 654 if (error) 655 return error; 656 657 dp = args->dp; 658 tp = args->trans; 659 error = xfs_da_get_buf(tp, dp, blkno, &bp, args->whichfork); 660 if (error) 661 return error; 662 node = bp->b_addr; 663 oldroot = blk1->bp->b_addr; 664 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || 665 oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) { 666 struct xfs_da3_icnode_hdr icnodehdr; 667 668 xfs_da3_node_hdr_from_disk(dp->i_mount, &icnodehdr, oldroot); 669 btree = icnodehdr.btree; 670 size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot); 671 level = icnodehdr.level; 672 673 /* 674 * we are about to copy oldroot to bp, so set up the type 675 * of bp while we know exactly what it will be. 676 */ 677 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF); 678 } else { 679 struct xfs_dir3_icleaf_hdr leafhdr; 680 681 leaf = (xfs_dir2_leaf_t *)oldroot; 682 xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf); 683 684 ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC || 685 leafhdr.magic == XFS_DIR3_LEAFN_MAGIC); 686 size = (int)((char *)&leafhdr.ents[leafhdr.count] - 687 (char *)leaf); 688 level = 0; 689 690 /* 691 * we are about to copy oldroot to bp, so set up the type 692 * of bp while we know exactly what it will be. 693 */ 694 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF); 695 } 696 697 /* 698 * we can copy most of the information in the node from one block to 699 * another, but for CRC enabled headers we have to make sure that the 700 * block specific identifiers are kept intact. We update the buffer 701 * directly for this. 702 */ 703 memcpy(node, oldroot, size); 704 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) || 705 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) { 706 struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node; 707 708 node3->hdr.info.blkno = cpu_to_be64(bp->b_bn); 709 } 710 xfs_trans_log_buf(tp, bp, 0, size - 1); 711 712 bp->b_ops = blk1->bp->b_ops; 713 xfs_trans_buf_copy_type(bp, blk1->bp); 714 blk1->bp = bp; 715 blk1->blkno = blkno; 716 717 /* 718 * Set up the new root node. 719 */ 720 error = xfs_da3_node_create(args, 721 (args->whichfork == XFS_DATA_FORK) ? args->geo->leafblk : 0, 722 level + 1, &bp, args->whichfork); 723 if (error) 724 return error; 725 726 node = bp->b_addr; 727 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 728 btree = nodehdr.btree; 729 btree[0].hashval = cpu_to_be32(blk1->hashval); 730 btree[0].before = cpu_to_be32(blk1->blkno); 731 btree[1].hashval = cpu_to_be32(blk2->hashval); 732 btree[1].before = cpu_to_be32(blk2->blkno); 733 nodehdr.count = 2; 734 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr); 735 736 #ifdef DEBUG 737 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 738 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) { 739 ASSERT(blk1->blkno >= args->geo->leafblk && 740 blk1->blkno < args->geo->freeblk); 741 ASSERT(blk2->blkno >= args->geo->leafblk && 742 blk2->blkno < args->geo->freeblk); 743 } 744 #endif 745 746 /* Header is already logged by xfs_da_node_create */ 747 xfs_trans_log_buf(tp, bp, 748 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2)); 749 750 return 0; 751 } 752 753 /* 754 * Split the node, rebalance, then add the new entry. 755 */ 756 STATIC int /* error */ 757 xfs_da3_node_split( 758 struct xfs_da_state *state, 759 struct xfs_da_state_blk *oldblk, 760 struct xfs_da_state_blk *newblk, 761 struct xfs_da_state_blk *addblk, 762 int treelevel, 763 int *result) 764 { 765 struct xfs_da_intnode *node; 766 struct xfs_da3_icnode_hdr nodehdr; 767 xfs_dablk_t blkno; 768 int newcount; 769 int error; 770 int useextra; 771 struct xfs_inode *dp = state->args->dp; 772 773 trace_xfs_da_node_split(state->args); 774 775 node = oldblk->bp->b_addr; 776 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 777 778 /* 779 * With V2 dirs the extra block is data or freespace. 780 */ 781 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK; 782 newcount = 1 + useextra; 783 /* 784 * Do we have to split the node? 785 */ 786 if (nodehdr.count + newcount > state->args->geo->node_ents) { 787 /* 788 * Allocate a new node, add to the doubly linked chain of 789 * nodes, then move some of our excess entries into it. 790 */ 791 error = xfs_da_grow_inode(state->args, &blkno); 792 if (error) 793 return error; /* GROT: dir is inconsistent */ 794 795 error = xfs_da3_node_create(state->args, blkno, treelevel, 796 &newblk->bp, state->args->whichfork); 797 if (error) 798 return error; /* GROT: dir is inconsistent */ 799 newblk->blkno = blkno; 800 newblk->magic = XFS_DA_NODE_MAGIC; 801 xfs_da3_node_rebalance(state, oldblk, newblk); 802 error = xfs_da3_blk_link(state, oldblk, newblk); 803 if (error) 804 return error; 805 *result = 1; 806 } else { 807 *result = 0; 808 } 809 810 /* 811 * Insert the new entry(s) into the correct block 812 * (updating last hashval in the process). 813 * 814 * xfs_da3_node_add() inserts BEFORE the given index, 815 * and as a result of using node_lookup_int() we always 816 * point to a valid entry (not after one), but a split 817 * operation always results in a new block whose hashvals 818 * FOLLOW the current block. 819 * 820 * If we had double-split op below us, then add the extra block too. 821 */ 822 node = oldblk->bp->b_addr; 823 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 824 if (oldblk->index <= nodehdr.count) { 825 oldblk->index++; 826 xfs_da3_node_add(state, oldblk, addblk); 827 if (useextra) { 828 if (state->extraafter) 829 oldblk->index++; 830 xfs_da3_node_add(state, oldblk, &state->extrablk); 831 state->extravalid = 0; 832 } 833 } else { 834 newblk->index++; 835 xfs_da3_node_add(state, newblk, addblk); 836 if (useextra) { 837 if (state->extraafter) 838 newblk->index++; 839 xfs_da3_node_add(state, newblk, &state->extrablk); 840 state->extravalid = 0; 841 } 842 } 843 844 return 0; 845 } 846 847 /* 848 * Balance the btree elements between two intermediate nodes, 849 * usually one full and one empty. 850 * 851 * NOTE: if blk2 is empty, then it will get the upper half of blk1. 852 */ 853 STATIC void 854 xfs_da3_node_rebalance( 855 struct xfs_da_state *state, 856 struct xfs_da_state_blk *blk1, 857 struct xfs_da_state_blk *blk2) 858 { 859 struct xfs_da_intnode *node1; 860 struct xfs_da_intnode *node2; 861 struct xfs_da_intnode *tmpnode; 862 struct xfs_da_node_entry *btree1; 863 struct xfs_da_node_entry *btree2; 864 struct xfs_da_node_entry *btree_s; 865 struct xfs_da_node_entry *btree_d; 866 struct xfs_da3_icnode_hdr nodehdr1; 867 struct xfs_da3_icnode_hdr nodehdr2; 868 struct xfs_trans *tp; 869 int count; 870 int tmp; 871 int swap = 0; 872 struct xfs_inode *dp = state->args->dp; 873 874 trace_xfs_da_node_rebalance(state->args); 875 876 node1 = blk1->bp->b_addr; 877 node2 = blk2->bp->b_addr; 878 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1); 879 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2); 880 btree1 = nodehdr1.btree; 881 btree2 = nodehdr2.btree; 882 883 /* 884 * Figure out how many entries need to move, and in which direction. 885 * Swap the nodes around if that makes it simpler. 886 */ 887 if (nodehdr1.count > 0 && nodehdr2.count > 0 && 888 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) || 889 (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) < 890 be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) { 891 tmpnode = node1; 892 node1 = node2; 893 node2 = tmpnode; 894 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1); 895 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2); 896 btree1 = nodehdr1.btree; 897 btree2 = nodehdr2.btree; 898 swap = 1; 899 } 900 901 count = (nodehdr1.count - nodehdr2.count) / 2; 902 if (count == 0) 903 return; 904 tp = state->args->trans; 905 /* 906 * Two cases: high-to-low and low-to-high. 907 */ 908 if (count > 0) { 909 /* 910 * Move elements in node2 up to make a hole. 911 */ 912 tmp = nodehdr2.count; 913 if (tmp > 0) { 914 tmp *= (uint)sizeof(xfs_da_node_entry_t); 915 btree_s = &btree2[0]; 916 btree_d = &btree2[count]; 917 memmove(btree_d, btree_s, tmp); 918 } 919 920 /* 921 * Move the req'd B-tree elements from high in node1 to 922 * low in node2. 923 */ 924 nodehdr2.count += count; 925 tmp = count * (uint)sizeof(xfs_da_node_entry_t); 926 btree_s = &btree1[nodehdr1.count - count]; 927 btree_d = &btree2[0]; 928 memcpy(btree_d, btree_s, tmp); 929 nodehdr1.count -= count; 930 } else { 931 /* 932 * Move the req'd B-tree elements from low in node2 to 933 * high in node1. 934 */ 935 count = -count; 936 tmp = count * (uint)sizeof(xfs_da_node_entry_t); 937 btree_s = &btree2[0]; 938 btree_d = &btree1[nodehdr1.count]; 939 memcpy(btree_d, btree_s, tmp); 940 nodehdr1.count += count; 941 942 xfs_trans_log_buf(tp, blk1->bp, 943 XFS_DA_LOGRANGE(node1, btree_d, tmp)); 944 945 /* 946 * Move elements in node2 down to fill the hole. 947 */ 948 tmp = nodehdr2.count - count; 949 tmp *= (uint)sizeof(xfs_da_node_entry_t); 950 btree_s = &btree2[count]; 951 btree_d = &btree2[0]; 952 memmove(btree_d, btree_s, tmp); 953 nodehdr2.count -= count; 954 } 955 956 /* 957 * Log header of node 1 and all current bits of node 2. 958 */ 959 xfs_da3_node_hdr_to_disk(dp->i_mount, node1, &nodehdr1); 960 xfs_trans_log_buf(tp, blk1->bp, 961 XFS_DA_LOGRANGE(node1, &node1->hdr, 962 state->args->geo->node_hdr_size)); 963 964 xfs_da3_node_hdr_to_disk(dp->i_mount, node2, &nodehdr2); 965 xfs_trans_log_buf(tp, blk2->bp, 966 XFS_DA_LOGRANGE(node2, &node2->hdr, 967 state->args->geo->node_hdr_size + 968 (sizeof(btree2[0]) * nodehdr2.count))); 969 970 /* 971 * Record the last hashval from each block for upward propagation. 972 * (note: don't use the swapped node pointers) 973 */ 974 if (swap) { 975 node1 = blk1->bp->b_addr; 976 node2 = blk2->bp->b_addr; 977 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1); 978 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2); 979 btree1 = nodehdr1.btree; 980 btree2 = nodehdr2.btree; 981 } 982 blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval); 983 blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval); 984 985 /* 986 * Adjust the expected index for insertion. 987 */ 988 if (blk1->index >= nodehdr1.count) { 989 blk2->index = blk1->index - nodehdr1.count; 990 blk1->index = nodehdr1.count + 1; /* make it invalid */ 991 } 992 } 993 994 /* 995 * Add a new entry to an intermediate node. 996 */ 997 STATIC void 998 xfs_da3_node_add( 999 struct xfs_da_state *state, 1000 struct xfs_da_state_blk *oldblk, 1001 struct xfs_da_state_blk *newblk) 1002 { 1003 struct xfs_da_intnode *node; 1004 struct xfs_da3_icnode_hdr nodehdr; 1005 struct xfs_da_node_entry *btree; 1006 int tmp; 1007 struct xfs_inode *dp = state->args->dp; 1008 1009 trace_xfs_da_node_add(state->args); 1010 1011 node = oldblk->bp->b_addr; 1012 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 1013 btree = nodehdr.btree; 1014 1015 ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count); 1016 ASSERT(newblk->blkno != 0); 1017 if (state->args->whichfork == XFS_DATA_FORK) 1018 ASSERT(newblk->blkno >= state->args->geo->leafblk && 1019 newblk->blkno < state->args->geo->freeblk); 1020 1021 /* 1022 * We may need to make some room before we insert the new node. 1023 */ 1024 tmp = 0; 1025 if (oldblk->index < nodehdr.count) { 1026 tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree); 1027 memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp); 1028 } 1029 btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval); 1030 btree[oldblk->index].before = cpu_to_be32(newblk->blkno); 1031 xfs_trans_log_buf(state->args->trans, oldblk->bp, 1032 XFS_DA_LOGRANGE(node, &btree[oldblk->index], 1033 tmp + sizeof(*btree))); 1034 1035 nodehdr.count += 1; 1036 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr); 1037 xfs_trans_log_buf(state->args->trans, oldblk->bp, 1038 XFS_DA_LOGRANGE(node, &node->hdr, 1039 state->args->geo->node_hdr_size)); 1040 1041 /* 1042 * Copy the last hash value from the oldblk to propagate upwards. 1043 */ 1044 oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval); 1045 } 1046 1047 /*======================================================================== 1048 * Routines used for shrinking the Btree. 1049 *========================================================================*/ 1050 1051 /* 1052 * Deallocate an empty leaf node, remove it from its parent, 1053 * possibly deallocating that block, etc... 1054 */ 1055 int 1056 xfs_da3_join( 1057 struct xfs_da_state *state) 1058 { 1059 struct xfs_da_state_blk *drop_blk; 1060 struct xfs_da_state_blk *save_blk; 1061 int action = 0; 1062 int error; 1063 1064 trace_xfs_da_join(state->args); 1065 1066 drop_blk = &state->path.blk[ state->path.active-1 ]; 1067 save_blk = &state->altpath.blk[ state->path.active-1 ]; 1068 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC); 1069 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC || 1070 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC); 1071 1072 /* 1073 * Walk back up the tree joining/deallocating as necessary. 1074 * When we stop dropping blocks, break out. 1075 */ 1076 for ( ; state->path.active >= 2; drop_blk--, save_blk--, 1077 state->path.active--) { 1078 /* 1079 * See if we can combine the block with a neighbor. 1080 * (action == 0) => no options, just leave 1081 * (action == 1) => coalesce, then unlink 1082 * (action == 2) => block empty, unlink it 1083 */ 1084 switch (drop_blk->magic) { 1085 case XFS_ATTR_LEAF_MAGIC: 1086 error = xfs_attr3_leaf_toosmall(state, &action); 1087 if (error) 1088 return error; 1089 if (action == 0) 1090 return 0; 1091 xfs_attr3_leaf_unbalance(state, drop_blk, save_blk); 1092 break; 1093 case XFS_DIR2_LEAFN_MAGIC: 1094 error = xfs_dir2_leafn_toosmall(state, &action); 1095 if (error) 1096 return error; 1097 if (action == 0) 1098 return 0; 1099 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk); 1100 break; 1101 case XFS_DA_NODE_MAGIC: 1102 /* 1103 * Remove the offending node, fixup hashvals, 1104 * check for a toosmall neighbor. 1105 */ 1106 xfs_da3_node_remove(state, drop_blk); 1107 xfs_da3_fixhashpath(state, &state->path); 1108 error = xfs_da3_node_toosmall(state, &action); 1109 if (error) 1110 return error; 1111 if (action == 0) 1112 return 0; 1113 xfs_da3_node_unbalance(state, drop_blk, save_blk); 1114 break; 1115 } 1116 xfs_da3_fixhashpath(state, &state->altpath); 1117 error = xfs_da3_blk_unlink(state, drop_blk, save_blk); 1118 xfs_da_state_kill_altpath(state); 1119 if (error) 1120 return error; 1121 error = xfs_da_shrink_inode(state->args, drop_blk->blkno, 1122 drop_blk->bp); 1123 drop_blk->bp = NULL; 1124 if (error) 1125 return error; 1126 } 1127 /* 1128 * We joined all the way to the top. If it turns out that 1129 * we only have one entry in the root, make the child block 1130 * the new root. 1131 */ 1132 xfs_da3_node_remove(state, drop_blk); 1133 xfs_da3_fixhashpath(state, &state->path); 1134 error = xfs_da3_root_join(state, &state->path.blk[0]); 1135 return error; 1136 } 1137 1138 #ifdef DEBUG 1139 static void 1140 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level) 1141 { 1142 __be16 magic = blkinfo->magic; 1143 1144 if (level == 1) { 1145 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 1146 magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) || 1147 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) || 1148 magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)); 1149 } else { 1150 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || 1151 magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)); 1152 } 1153 ASSERT(!blkinfo->forw); 1154 ASSERT(!blkinfo->back); 1155 } 1156 #else /* !DEBUG */ 1157 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level) 1158 #endif /* !DEBUG */ 1159 1160 /* 1161 * We have only one entry in the root. Copy the only remaining child of 1162 * the old root to block 0 as the new root node. 1163 */ 1164 STATIC int 1165 xfs_da3_root_join( 1166 struct xfs_da_state *state, 1167 struct xfs_da_state_blk *root_blk) 1168 { 1169 struct xfs_da_intnode *oldroot; 1170 struct xfs_da_args *args; 1171 xfs_dablk_t child; 1172 struct xfs_buf *bp; 1173 struct xfs_da3_icnode_hdr oldroothdr; 1174 int error; 1175 struct xfs_inode *dp = state->args->dp; 1176 1177 trace_xfs_da_root_join(state->args); 1178 1179 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC); 1180 1181 args = state->args; 1182 oldroot = root_blk->bp->b_addr; 1183 xfs_da3_node_hdr_from_disk(dp->i_mount, &oldroothdr, oldroot); 1184 ASSERT(oldroothdr.forw == 0); 1185 ASSERT(oldroothdr.back == 0); 1186 1187 /* 1188 * If the root has more than one child, then don't do anything. 1189 */ 1190 if (oldroothdr.count > 1) 1191 return 0; 1192 1193 /* 1194 * Read in the (only) child block, then copy those bytes into 1195 * the root block's buffer and free the original child block. 1196 */ 1197 child = be32_to_cpu(oldroothdr.btree[0].before); 1198 ASSERT(child != 0); 1199 error = xfs_da3_node_read(args->trans, dp, child, &bp, args->whichfork); 1200 if (error) 1201 return error; 1202 xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level); 1203 1204 /* 1205 * This could be copying a leaf back into the root block in the case of 1206 * there only being a single leaf block left in the tree. Hence we have 1207 * to update the b_ops pointer as well to match the buffer type change 1208 * that could occur. For dir3 blocks we also need to update the block 1209 * number in the buffer header. 1210 */ 1211 memcpy(root_blk->bp->b_addr, bp->b_addr, args->geo->blksize); 1212 root_blk->bp->b_ops = bp->b_ops; 1213 xfs_trans_buf_copy_type(root_blk->bp, bp); 1214 if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) { 1215 struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr; 1216 da3->blkno = cpu_to_be64(root_blk->bp->b_bn); 1217 } 1218 xfs_trans_log_buf(args->trans, root_blk->bp, 0, 1219 args->geo->blksize - 1); 1220 error = xfs_da_shrink_inode(args, child, bp); 1221 return error; 1222 } 1223 1224 /* 1225 * Check a node block and its neighbors to see if the block should be 1226 * collapsed into one or the other neighbor. Always keep the block 1227 * with the smaller block number. 1228 * If the current block is over 50% full, don't try to join it, return 0. 1229 * If the block is empty, fill in the state structure and return 2. 1230 * If it can be collapsed, fill in the state structure and return 1. 1231 * If nothing can be done, return 0. 1232 */ 1233 STATIC int 1234 xfs_da3_node_toosmall( 1235 struct xfs_da_state *state, 1236 int *action) 1237 { 1238 struct xfs_da_intnode *node; 1239 struct xfs_da_state_blk *blk; 1240 struct xfs_da_blkinfo *info; 1241 xfs_dablk_t blkno; 1242 struct xfs_buf *bp; 1243 struct xfs_da3_icnode_hdr nodehdr; 1244 int count; 1245 int forward; 1246 int error; 1247 int retval; 1248 int i; 1249 struct xfs_inode *dp = state->args->dp; 1250 1251 trace_xfs_da_node_toosmall(state->args); 1252 1253 /* 1254 * Check for the degenerate case of the block being over 50% full. 1255 * If so, it's not worth even looking to see if we might be able 1256 * to coalesce with a sibling. 1257 */ 1258 blk = &state->path.blk[ state->path.active-1 ]; 1259 info = blk->bp->b_addr; 1260 node = (xfs_da_intnode_t *)info; 1261 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 1262 if (nodehdr.count > (state->args->geo->node_ents >> 1)) { 1263 *action = 0; /* blk over 50%, don't try to join */ 1264 return 0; /* blk over 50%, don't try to join */ 1265 } 1266 1267 /* 1268 * Check for the degenerate case of the block being empty. 1269 * If the block is empty, we'll simply delete it, no need to 1270 * coalesce it with a sibling block. We choose (arbitrarily) 1271 * to merge with the forward block unless it is NULL. 1272 */ 1273 if (nodehdr.count == 0) { 1274 /* 1275 * Make altpath point to the block we want to keep and 1276 * path point to the block we want to drop (this one). 1277 */ 1278 forward = (info->forw != 0); 1279 memcpy(&state->altpath, &state->path, sizeof(state->path)); 1280 error = xfs_da3_path_shift(state, &state->altpath, forward, 1281 0, &retval); 1282 if (error) 1283 return error; 1284 if (retval) { 1285 *action = 0; 1286 } else { 1287 *action = 2; 1288 } 1289 return 0; 1290 } 1291 1292 /* 1293 * Examine each sibling block to see if we can coalesce with 1294 * at least 25% free space to spare. We need to figure out 1295 * whether to merge with the forward or the backward block. 1296 * We prefer coalescing with the lower numbered sibling so as 1297 * to shrink a directory over time. 1298 */ 1299 count = state->args->geo->node_ents; 1300 count -= state->args->geo->node_ents >> 2; 1301 count -= nodehdr.count; 1302 1303 /* start with smaller blk num */ 1304 forward = nodehdr.forw < nodehdr.back; 1305 for (i = 0; i < 2; forward = !forward, i++) { 1306 struct xfs_da3_icnode_hdr thdr; 1307 if (forward) 1308 blkno = nodehdr.forw; 1309 else 1310 blkno = nodehdr.back; 1311 if (blkno == 0) 1312 continue; 1313 error = xfs_da3_node_read(state->args->trans, dp, blkno, &bp, 1314 state->args->whichfork); 1315 if (error) 1316 return error; 1317 1318 node = bp->b_addr; 1319 xfs_da3_node_hdr_from_disk(dp->i_mount, &thdr, node); 1320 xfs_trans_brelse(state->args->trans, bp); 1321 1322 if (count - thdr.count >= 0) 1323 break; /* fits with at least 25% to spare */ 1324 } 1325 if (i >= 2) { 1326 *action = 0; 1327 return 0; 1328 } 1329 1330 /* 1331 * Make altpath point to the block we want to keep (the lower 1332 * numbered block) and path point to the block we want to drop. 1333 */ 1334 memcpy(&state->altpath, &state->path, sizeof(state->path)); 1335 if (blkno < blk->blkno) { 1336 error = xfs_da3_path_shift(state, &state->altpath, forward, 1337 0, &retval); 1338 } else { 1339 error = xfs_da3_path_shift(state, &state->path, forward, 1340 0, &retval); 1341 } 1342 if (error) 1343 return error; 1344 if (retval) { 1345 *action = 0; 1346 return 0; 1347 } 1348 *action = 1; 1349 return 0; 1350 } 1351 1352 /* 1353 * Pick up the last hashvalue from an intermediate node. 1354 */ 1355 STATIC uint 1356 xfs_da3_node_lasthash( 1357 struct xfs_inode *dp, 1358 struct xfs_buf *bp, 1359 int *count) 1360 { 1361 struct xfs_da3_icnode_hdr nodehdr; 1362 1363 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, bp->b_addr); 1364 if (count) 1365 *count = nodehdr.count; 1366 if (!nodehdr.count) 1367 return 0; 1368 return be32_to_cpu(nodehdr.btree[nodehdr.count - 1].hashval); 1369 } 1370 1371 /* 1372 * Walk back up the tree adjusting hash values as necessary, 1373 * when we stop making changes, return. 1374 */ 1375 void 1376 xfs_da3_fixhashpath( 1377 struct xfs_da_state *state, 1378 struct xfs_da_state_path *path) 1379 { 1380 struct xfs_da_state_blk *blk; 1381 struct xfs_da_intnode *node; 1382 struct xfs_da_node_entry *btree; 1383 xfs_dahash_t lasthash=0; 1384 int level; 1385 int count; 1386 struct xfs_inode *dp = state->args->dp; 1387 1388 trace_xfs_da_fixhashpath(state->args); 1389 1390 level = path->active-1; 1391 blk = &path->blk[ level ]; 1392 switch (blk->magic) { 1393 case XFS_ATTR_LEAF_MAGIC: 1394 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count); 1395 if (count == 0) 1396 return; 1397 break; 1398 case XFS_DIR2_LEAFN_MAGIC: 1399 lasthash = xfs_dir2_leaf_lasthash(dp, blk->bp, &count); 1400 if (count == 0) 1401 return; 1402 break; 1403 case XFS_DA_NODE_MAGIC: 1404 lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count); 1405 if (count == 0) 1406 return; 1407 break; 1408 } 1409 for (blk--, level--; level >= 0; blk--, level--) { 1410 struct xfs_da3_icnode_hdr nodehdr; 1411 1412 node = blk->bp->b_addr; 1413 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 1414 btree = nodehdr.btree; 1415 if (be32_to_cpu(btree[blk->index].hashval) == lasthash) 1416 break; 1417 blk->hashval = lasthash; 1418 btree[blk->index].hashval = cpu_to_be32(lasthash); 1419 xfs_trans_log_buf(state->args->trans, blk->bp, 1420 XFS_DA_LOGRANGE(node, &btree[blk->index], 1421 sizeof(*btree))); 1422 1423 lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval); 1424 } 1425 } 1426 1427 /* 1428 * Remove an entry from an intermediate node. 1429 */ 1430 STATIC void 1431 xfs_da3_node_remove( 1432 struct xfs_da_state *state, 1433 struct xfs_da_state_blk *drop_blk) 1434 { 1435 struct xfs_da_intnode *node; 1436 struct xfs_da3_icnode_hdr nodehdr; 1437 struct xfs_da_node_entry *btree; 1438 int index; 1439 int tmp; 1440 struct xfs_inode *dp = state->args->dp; 1441 1442 trace_xfs_da_node_remove(state->args); 1443 1444 node = drop_blk->bp->b_addr; 1445 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 1446 ASSERT(drop_blk->index < nodehdr.count); 1447 ASSERT(drop_blk->index >= 0); 1448 1449 /* 1450 * Copy over the offending entry, or just zero it out. 1451 */ 1452 index = drop_blk->index; 1453 btree = nodehdr.btree; 1454 if (index < nodehdr.count - 1) { 1455 tmp = nodehdr.count - index - 1; 1456 tmp *= (uint)sizeof(xfs_da_node_entry_t); 1457 memmove(&btree[index], &btree[index + 1], tmp); 1458 xfs_trans_log_buf(state->args->trans, drop_blk->bp, 1459 XFS_DA_LOGRANGE(node, &btree[index], tmp)); 1460 index = nodehdr.count - 1; 1461 } 1462 memset(&btree[index], 0, sizeof(xfs_da_node_entry_t)); 1463 xfs_trans_log_buf(state->args->trans, drop_blk->bp, 1464 XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index]))); 1465 nodehdr.count -= 1; 1466 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr); 1467 xfs_trans_log_buf(state->args->trans, drop_blk->bp, 1468 XFS_DA_LOGRANGE(node, &node->hdr, state->args->geo->node_hdr_size)); 1469 1470 /* 1471 * Copy the last hash value from the block to propagate upwards. 1472 */ 1473 drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval); 1474 } 1475 1476 /* 1477 * Unbalance the elements between two intermediate nodes, 1478 * move all Btree elements from one node into another. 1479 */ 1480 STATIC void 1481 xfs_da3_node_unbalance( 1482 struct xfs_da_state *state, 1483 struct xfs_da_state_blk *drop_blk, 1484 struct xfs_da_state_blk *save_blk) 1485 { 1486 struct xfs_da_intnode *drop_node; 1487 struct xfs_da_intnode *save_node; 1488 struct xfs_da_node_entry *drop_btree; 1489 struct xfs_da_node_entry *save_btree; 1490 struct xfs_da3_icnode_hdr drop_hdr; 1491 struct xfs_da3_icnode_hdr save_hdr; 1492 struct xfs_trans *tp; 1493 int sindex; 1494 int tmp; 1495 struct xfs_inode *dp = state->args->dp; 1496 1497 trace_xfs_da_node_unbalance(state->args); 1498 1499 drop_node = drop_blk->bp->b_addr; 1500 save_node = save_blk->bp->b_addr; 1501 xfs_da3_node_hdr_from_disk(dp->i_mount, &drop_hdr, drop_node); 1502 xfs_da3_node_hdr_from_disk(dp->i_mount, &save_hdr, save_node); 1503 drop_btree = drop_hdr.btree; 1504 save_btree = save_hdr.btree; 1505 tp = state->args->trans; 1506 1507 /* 1508 * If the dying block has lower hashvals, then move all the 1509 * elements in the remaining block up to make a hole. 1510 */ 1511 if ((be32_to_cpu(drop_btree[0].hashval) < 1512 be32_to_cpu(save_btree[0].hashval)) || 1513 (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) < 1514 be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) { 1515 /* XXX: check this - is memmove dst correct? */ 1516 tmp = save_hdr.count * sizeof(xfs_da_node_entry_t); 1517 memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp); 1518 1519 sindex = 0; 1520 xfs_trans_log_buf(tp, save_blk->bp, 1521 XFS_DA_LOGRANGE(save_node, &save_btree[0], 1522 (save_hdr.count + drop_hdr.count) * 1523 sizeof(xfs_da_node_entry_t))); 1524 } else { 1525 sindex = save_hdr.count; 1526 xfs_trans_log_buf(tp, save_blk->bp, 1527 XFS_DA_LOGRANGE(save_node, &save_btree[sindex], 1528 drop_hdr.count * sizeof(xfs_da_node_entry_t))); 1529 } 1530 1531 /* 1532 * Move all the B-tree elements from drop_blk to save_blk. 1533 */ 1534 tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t); 1535 memcpy(&save_btree[sindex], &drop_btree[0], tmp); 1536 save_hdr.count += drop_hdr.count; 1537 1538 xfs_da3_node_hdr_to_disk(dp->i_mount, save_node, &save_hdr); 1539 xfs_trans_log_buf(tp, save_blk->bp, 1540 XFS_DA_LOGRANGE(save_node, &save_node->hdr, 1541 state->args->geo->node_hdr_size)); 1542 1543 /* 1544 * Save the last hashval in the remaining block for upward propagation. 1545 */ 1546 save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval); 1547 } 1548 1549 /*======================================================================== 1550 * Routines used for finding things in the Btree. 1551 *========================================================================*/ 1552 1553 /* 1554 * Walk down the Btree looking for a particular filename, filling 1555 * in the state structure as we go. 1556 * 1557 * We will set the state structure to point to each of the elements 1558 * in each of the nodes where either the hashval is or should be. 1559 * 1560 * We support duplicate hashval's so for each entry in the current 1561 * node that could contain the desired hashval, descend. This is a 1562 * pruned depth-first tree search. 1563 */ 1564 int /* error */ 1565 xfs_da3_node_lookup_int( 1566 struct xfs_da_state *state, 1567 int *result) 1568 { 1569 struct xfs_da_state_blk *blk; 1570 struct xfs_da_blkinfo *curr; 1571 struct xfs_da_intnode *node; 1572 struct xfs_da_node_entry *btree; 1573 struct xfs_da3_icnode_hdr nodehdr; 1574 struct xfs_da_args *args; 1575 xfs_dablk_t blkno; 1576 xfs_dahash_t hashval; 1577 xfs_dahash_t btreehashval; 1578 int probe; 1579 int span; 1580 int max; 1581 int error; 1582 int retval; 1583 unsigned int expected_level = 0; 1584 uint16_t magic; 1585 struct xfs_inode *dp = state->args->dp; 1586 1587 args = state->args; 1588 1589 /* 1590 * Descend thru the B-tree searching each level for the right 1591 * node to use, until the right hashval is found. 1592 */ 1593 blkno = args->geo->leafblk; 1594 for (blk = &state->path.blk[0], state->path.active = 1; 1595 state->path.active <= XFS_DA_NODE_MAXDEPTH; 1596 blk++, state->path.active++) { 1597 /* 1598 * Read the next node down in the tree. 1599 */ 1600 blk->blkno = blkno; 1601 error = xfs_da3_node_read(args->trans, args->dp, blkno, 1602 &blk->bp, args->whichfork); 1603 if (error) { 1604 blk->blkno = 0; 1605 state->path.active--; 1606 return error; 1607 } 1608 curr = blk->bp->b_addr; 1609 magic = be16_to_cpu(curr->magic); 1610 1611 if (magic == XFS_ATTR_LEAF_MAGIC || 1612 magic == XFS_ATTR3_LEAF_MAGIC) { 1613 blk->magic = XFS_ATTR_LEAF_MAGIC; 1614 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); 1615 break; 1616 } 1617 1618 if (magic == XFS_DIR2_LEAFN_MAGIC || 1619 magic == XFS_DIR3_LEAFN_MAGIC) { 1620 blk->magic = XFS_DIR2_LEAFN_MAGIC; 1621 blk->hashval = xfs_dir2_leaf_lasthash(args->dp, 1622 blk->bp, NULL); 1623 break; 1624 } 1625 1626 if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC) { 1627 xfs_buf_corruption_error(blk->bp); 1628 return -EFSCORRUPTED; 1629 } 1630 1631 blk->magic = XFS_DA_NODE_MAGIC; 1632 1633 /* 1634 * Search an intermediate node for a match. 1635 */ 1636 node = blk->bp->b_addr; 1637 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 1638 btree = nodehdr.btree; 1639 1640 /* Tree taller than we can handle; bail out! */ 1641 if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) { 1642 xfs_buf_corruption_error(blk->bp); 1643 return -EFSCORRUPTED; 1644 } 1645 1646 /* Check the level from the root. */ 1647 if (blkno == args->geo->leafblk) 1648 expected_level = nodehdr.level - 1; 1649 else if (expected_level != nodehdr.level) { 1650 xfs_buf_corruption_error(blk->bp); 1651 return -EFSCORRUPTED; 1652 } else 1653 expected_level--; 1654 1655 max = nodehdr.count; 1656 blk->hashval = be32_to_cpu(btree[max - 1].hashval); 1657 1658 /* 1659 * Binary search. (note: small blocks will skip loop) 1660 */ 1661 probe = span = max / 2; 1662 hashval = args->hashval; 1663 while (span > 4) { 1664 span /= 2; 1665 btreehashval = be32_to_cpu(btree[probe].hashval); 1666 if (btreehashval < hashval) 1667 probe += span; 1668 else if (btreehashval > hashval) 1669 probe -= span; 1670 else 1671 break; 1672 } 1673 ASSERT((probe >= 0) && (probe < max)); 1674 ASSERT((span <= 4) || 1675 (be32_to_cpu(btree[probe].hashval) == hashval)); 1676 1677 /* 1678 * Since we may have duplicate hashval's, find the first 1679 * matching hashval in the node. 1680 */ 1681 while (probe > 0 && 1682 be32_to_cpu(btree[probe].hashval) >= hashval) { 1683 probe--; 1684 } 1685 while (probe < max && 1686 be32_to_cpu(btree[probe].hashval) < hashval) { 1687 probe++; 1688 } 1689 1690 /* 1691 * Pick the right block to descend on. 1692 */ 1693 if (probe == max) { 1694 blk->index = max - 1; 1695 blkno = be32_to_cpu(btree[max - 1].before); 1696 } else { 1697 blk->index = probe; 1698 blkno = be32_to_cpu(btree[probe].before); 1699 } 1700 1701 /* We can't point back to the root. */ 1702 if (XFS_IS_CORRUPT(dp->i_mount, blkno == args->geo->leafblk)) 1703 return -EFSCORRUPTED; 1704 } 1705 1706 if (XFS_IS_CORRUPT(dp->i_mount, expected_level != 0)) 1707 return -EFSCORRUPTED; 1708 1709 /* 1710 * A leaf block that ends in the hashval that we are interested in 1711 * (final hashval == search hashval) means that the next block may 1712 * contain more entries with the same hashval, shift upward to the 1713 * next leaf and keep searching. 1714 */ 1715 for (;;) { 1716 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) { 1717 retval = xfs_dir2_leafn_lookup_int(blk->bp, args, 1718 &blk->index, state); 1719 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) { 1720 retval = xfs_attr3_leaf_lookup_int(blk->bp, args); 1721 blk->index = args->index; 1722 args->blkno = blk->blkno; 1723 } else { 1724 ASSERT(0); 1725 return -EFSCORRUPTED; 1726 } 1727 if (((retval == -ENOENT) || (retval == -ENOATTR)) && 1728 (blk->hashval == args->hashval)) { 1729 error = xfs_da3_path_shift(state, &state->path, 1, 1, 1730 &retval); 1731 if (error) 1732 return error; 1733 if (retval == 0) { 1734 continue; 1735 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) { 1736 /* path_shift() gives ENOENT */ 1737 retval = -ENOATTR; 1738 } 1739 } 1740 break; 1741 } 1742 *result = retval; 1743 return 0; 1744 } 1745 1746 /*======================================================================== 1747 * Utility routines. 1748 *========================================================================*/ 1749 1750 /* 1751 * Compare two intermediate nodes for "order". 1752 */ 1753 STATIC int 1754 xfs_da3_node_order( 1755 struct xfs_inode *dp, 1756 struct xfs_buf *node1_bp, 1757 struct xfs_buf *node2_bp) 1758 { 1759 struct xfs_da_intnode *node1; 1760 struct xfs_da_intnode *node2; 1761 struct xfs_da_node_entry *btree1; 1762 struct xfs_da_node_entry *btree2; 1763 struct xfs_da3_icnode_hdr node1hdr; 1764 struct xfs_da3_icnode_hdr node2hdr; 1765 1766 node1 = node1_bp->b_addr; 1767 node2 = node2_bp->b_addr; 1768 xfs_da3_node_hdr_from_disk(dp->i_mount, &node1hdr, node1); 1769 xfs_da3_node_hdr_from_disk(dp->i_mount, &node2hdr, node2); 1770 btree1 = node1hdr.btree; 1771 btree2 = node2hdr.btree; 1772 1773 if (node1hdr.count > 0 && node2hdr.count > 0 && 1774 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) || 1775 (be32_to_cpu(btree2[node2hdr.count - 1].hashval) < 1776 be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) { 1777 return 1; 1778 } 1779 return 0; 1780 } 1781 1782 /* 1783 * Link a new block into a doubly linked list of blocks (of whatever type). 1784 */ 1785 int /* error */ 1786 xfs_da3_blk_link( 1787 struct xfs_da_state *state, 1788 struct xfs_da_state_blk *old_blk, 1789 struct xfs_da_state_blk *new_blk) 1790 { 1791 struct xfs_da_blkinfo *old_info; 1792 struct xfs_da_blkinfo *new_info; 1793 struct xfs_da_blkinfo *tmp_info; 1794 struct xfs_da_args *args; 1795 struct xfs_buf *bp; 1796 int before = 0; 1797 int error; 1798 struct xfs_inode *dp = state->args->dp; 1799 1800 /* 1801 * Set up environment. 1802 */ 1803 args = state->args; 1804 ASSERT(args != NULL); 1805 old_info = old_blk->bp->b_addr; 1806 new_info = new_blk->bp->b_addr; 1807 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC || 1808 old_blk->magic == XFS_DIR2_LEAFN_MAGIC || 1809 old_blk->magic == XFS_ATTR_LEAF_MAGIC); 1810 1811 switch (old_blk->magic) { 1812 case XFS_ATTR_LEAF_MAGIC: 1813 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp); 1814 break; 1815 case XFS_DIR2_LEAFN_MAGIC: 1816 before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp); 1817 break; 1818 case XFS_DA_NODE_MAGIC: 1819 before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp); 1820 break; 1821 } 1822 1823 /* 1824 * Link blocks in appropriate order. 1825 */ 1826 if (before) { 1827 /* 1828 * Link new block in before existing block. 1829 */ 1830 trace_xfs_da_link_before(args); 1831 new_info->forw = cpu_to_be32(old_blk->blkno); 1832 new_info->back = old_info->back; 1833 if (old_info->back) { 1834 error = xfs_da3_node_read(args->trans, dp, 1835 be32_to_cpu(old_info->back), 1836 &bp, args->whichfork); 1837 if (error) 1838 return error; 1839 ASSERT(bp != NULL); 1840 tmp_info = bp->b_addr; 1841 ASSERT(tmp_info->magic == old_info->magic); 1842 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno); 1843 tmp_info->forw = cpu_to_be32(new_blk->blkno); 1844 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); 1845 } 1846 old_info->back = cpu_to_be32(new_blk->blkno); 1847 } else { 1848 /* 1849 * Link new block in after existing block. 1850 */ 1851 trace_xfs_da_link_after(args); 1852 new_info->forw = old_info->forw; 1853 new_info->back = cpu_to_be32(old_blk->blkno); 1854 if (old_info->forw) { 1855 error = xfs_da3_node_read(args->trans, dp, 1856 be32_to_cpu(old_info->forw), 1857 &bp, args->whichfork); 1858 if (error) 1859 return error; 1860 ASSERT(bp != NULL); 1861 tmp_info = bp->b_addr; 1862 ASSERT(tmp_info->magic == old_info->magic); 1863 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno); 1864 tmp_info->back = cpu_to_be32(new_blk->blkno); 1865 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); 1866 } 1867 old_info->forw = cpu_to_be32(new_blk->blkno); 1868 } 1869 1870 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1); 1871 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1); 1872 return 0; 1873 } 1874 1875 /* 1876 * Unlink a block from a doubly linked list of blocks. 1877 */ 1878 STATIC int /* error */ 1879 xfs_da3_blk_unlink( 1880 struct xfs_da_state *state, 1881 struct xfs_da_state_blk *drop_blk, 1882 struct xfs_da_state_blk *save_blk) 1883 { 1884 struct xfs_da_blkinfo *drop_info; 1885 struct xfs_da_blkinfo *save_info; 1886 struct xfs_da_blkinfo *tmp_info; 1887 struct xfs_da_args *args; 1888 struct xfs_buf *bp; 1889 int error; 1890 1891 /* 1892 * Set up environment. 1893 */ 1894 args = state->args; 1895 ASSERT(args != NULL); 1896 save_info = save_blk->bp->b_addr; 1897 drop_info = drop_blk->bp->b_addr; 1898 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC || 1899 save_blk->magic == XFS_DIR2_LEAFN_MAGIC || 1900 save_blk->magic == XFS_ATTR_LEAF_MAGIC); 1901 ASSERT(save_blk->magic == drop_blk->magic); 1902 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) || 1903 (be32_to_cpu(save_info->back) == drop_blk->blkno)); 1904 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) || 1905 (be32_to_cpu(drop_info->back) == save_blk->blkno)); 1906 1907 /* 1908 * Unlink the leaf block from the doubly linked chain of leaves. 1909 */ 1910 if (be32_to_cpu(save_info->back) == drop_blk->blkno) { 1911 trace_xfs_da_unlink_back(args); 1912 save_info->back = drop_info->back; 1913 if (drop_info->back) { 1914 error = xfs_da3_node_read(args->trans, args->dp, 1915 be32_to_cpu(drop_info->back), 1916 &bp, args->whichfork); 1917 if (error) 1918 return error; 1919 ASSERT(bp != NULL); 1920 tmp_info = bp->b_addr; 1921 ASSERT(tmp_info->magic == save_info->magic); 1922 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno); 1923 tmp_info->forw = cpu_to_be32(save_blk->blkno); 1924 xfs_trans_log_buf(args->trans, bp, 0, 1925 sizeof(*tmp_info) - 1); 1926 } 1927 } else { 1928 trace_xfs_da_unlink_forward(args); 1929 save_info->forw = drop_info->forw; 1930 if (drop_info->forw) { 1931 error = xfs_da3_node_read(args->trans, args->dp, 1932 be32_to_cpu(drop_info->forw), 1933 &bp, args->whichfork); 1934 if (error) 1935 return error; 1936 ASSERT(bp != NULL); 1937 tmp_info = bp->b_addr; 1938 ASSERT(tmp_info->magic == save_info->magic); 1939 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno); 1940 tmp_info->back = cpu_to_be32(save_blk->blkno); 1941 xfs_trans_log_buf(args->trans, bp, 0, 1942 sizeof(*tmp_info) - 1); 1943 } 1944 } 1945 1946 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1); 1947 return 0; 1948 } 1949 1950 /* 1951 * Move a path "forward" or "!forward" one block at the current level. 1952 * 1953 * This routine will adjust a "path" to point to the next block 1954 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the 1955 * Btree, including updating pointers to the intermediate nodes between 1956 * the new bottom and the root. 1957 */ 1958 int /* error */ 1959 xfs_da3_path_shift( 1960 struct xfs_da_state *state, 1961 struct xfs_da_state_path *path, 1962 int forward, 1963 int release, 1964 int *result) 1965 { 1966 struct xfs_da_state_blk *blk; 1967 struct xfs_da_blkinfo *info; 1968 struct xfs_da_args *args; 1969 struct xfs_da_node_entry *btree; 1970 struct xfs_da3_icnode_hdr nodehdr; 1971 struct xfs_buf *bp; 1972 xfs_dablk_t blkno = 0; 1973 int level; 1974 int error; 1975 struct xfs_inode *dp = state->args->dp; 1976 1977 trace_xfs_da_path_shift(state->args); 1978 1979 /* 1980 * Roll up the Btree looking for the first block where our 1981 * current index is not at the edge of the block. Note that 1982 * we skip the bottom layer because we want the sibling block. 1983 */ 1984 args = state->args; 1985 ASSERT(args != NULL); 1986 ASSERT(path != NULL); 1987 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); 1988 level = (path->active-1) - 1; /* skip bottom layer in path */ 1989 for (blk = &path->blk[level]; level >= 0; blk--, level--) { 1990 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, 1991 blk->bp->b_addr); 1992 1993 if (forward && (blk->index < nodehdr.count - 1)) { 1994 blk->index++; 1995 blkno = be32_to_cpu(nodehdr.btree[blk->index].before); 1996 break; 1997 } else if (!forward && (blk->index > 0)) { 1998 blk->index--; 1999 blkno = be32_to_cpu(nodehdr.btree[blk->index].before); 2000 break; 2001 } 2002 } 2003 if (level < 0) { 2004 *result = -ENOENT; /* we're out of our tree */ 2005 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); 2006 return 0; 2007 } 2008 2009 /* 2010 * Roll down the edge of the subtree until we reach the 2011 * same depth we were at originally. 2012 */ 2013 for (blk++, level++; level < path->active; blk++, level++) { 2014 /* 2015 * Read the next child block into a local buffer. 2016 */ 2017 error = xfs_da3_node_read(args->trans, dp, blkno, &bp, 2018 args->whichfork); 2019 if (error) 2020 return error; 2021 2022 /* 2023 * Release the old block (if it's dirty, the trans doesn't 2024 * actually let go) and swap the local buffer into the path 2025 * structure. This ensures failure of the above read doesn't set 2026 * a NULL buffer in an active slot in the path. 2027 */ 2028 if (release) 2029 xfs_trans_brelse(args->trans, blk->bp); 2030 blk->blkno = blkno; 2031 blk->bp = bp; 2032 2033 info = blk->bp->b_addr; 2034 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || 2035 info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) || 2036 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 2037 info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) || 2038 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) || 2039 info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)); 2040 2041 2042 /* 2043 * Note: we flatten the magic number to a single type so we 2044 * don't have to compare against crc/non-crc types elsewhere. 2045 */ 2046 switch (be16_to_cpu(info->magic)) { 2047 case XFS_DA_NODE_MAGIC: 2048 case XFS_DA3_NODE_MAGIC: 2049 blk->magic = XFS_DA_NODE_MAGIC; 2050 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, 2051 bp->b_addr); 2052 btree = nodehdr.btree; 2053 blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval); 2054 if (forward) 2055 blk->index = 0; 2056 else 2057 blk->index = nodehdr.count - 1; 2058 blkno = be32_to_cpu(btree[blk->index].before); 2059 break; 2060 case XFS_ATTR_LEAF_MAGIC: 2061 case XFS_ATTR3_LEAF_MAGIC: 2062 blk->magic = XFS_ATTR_LEAF_MAGIC; 2063 ASSERT(level == path->active-1); 2064 blk->index = 0; 2065 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); 2066 break; 2067 case XFS_DIR2_LEAFN_MAGIC: 2068 case XFS_DIR3_LEAFN_MAGIC: 2069 blk->magic = XFS_DIR2_LEAFN_MAGIC; 2070 ASSERT(level == path->active-1); 2071 blk->index = 0; 2072 blk->hashval = xfs_dir2_leaf_lasthash(args->dp, 2073 blk->bp, NULL); 2074 break; 2075 default: 2076 ASSERT(0); 2077 break; 2078 } 2079 } 2080 *result = 0; 2081 return 0; 2082 } 2083 2084 2085 /*======================================================================== 2086 * Utility routines. 2087 *========================================================================*/ 2088 2089 /* 2090 * Implement a simple hash on a character string. 2091 * Rotate the hash value by 7 bits, then XOR each character in. 2092 * This is implemented with some source-level loop unrolling. 2093 */ 2094 xfs_dahash_t 2095 xfs_da_hashname(const uint8_t *name, int namelen) 2096 { 2097 xfs_dahash_t hash; 2098 2099 /* 2100 * Do four characters at a time as long as we can. 2101 */ 2102 for (hash = 0; namelen >= 4; namelen -= 4, name += 4) 2103 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^ 2104 (name[3] << 0) ^ rol32(hash, 7 * 4); 2105 2106 /* 2107 * Now do the rest of the characters. 2108 */ 2109 switch (namelen) { 2110 case 3: 2111 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^ 2112 rol32(hash, 7 * 3); 2113 case 2: 2114 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2); 2115 case 1: 2116 return (name[0] << 0) ^ rol32(hash, 7 * 1); 2117 default: /* case 0: */ 2118 return hash; 2119 } 2120 } 2121 2122 enum xfs_dacmp 2123 xfs_da_compname( 2124 struct xfs_da_args *args, 2125 const unsigned char *name, 2126 int len) 2127 { 2128 return (args->namelen == len && memcmp(args->name, name, len) == 0) ? 2129 XFS_CMP_EXACT : XFS_CMP_DIFFERENT; 2130 } 2131 2132 int 2133 xfs_da_grow_inode_int( 2134 struct xfs_da_args *args, 2135 xfs_fileoff_t *bno, 2136 int count) 2137 { 2138 struct xfs_trans *tp = args->trans; 2139 struct xfs_inode *dp = args->dp; 2140 int w = args->whichfork; 2141 xfs_rfsblock_t nblks = dp->i_d.di_nblocks; 2142 struct xfs_bmbt_irec map, *mapp; 2143 int nmap, error, got, i, mapi; 2144 2145 /* 2146 * Find a spot in the file space to put the new block. 2147 */ 2148 error = xfs_bmap_first_unused(tp, dp, count, bno, w); 2149 if (error) 2150 return error; 2151 2152 /* 2153 * Try mapping it in one filesystem block. 2154 */ 2155 nmap = 1; 2156 error = xfs_bmapi_write(tp, dp, *bno, count, 2157 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG, 2158 args->total, &map, &nmap); 2159 if (error) 2160 return error; 2161 2162 ASSERT(nmap <= 1); 2163 if (nmap == 1) { 2164 mapp = ↦ 2165 mapi = 1; 2166 } else if (nmap == 0 && count > 1) { 2167 xfs_fileoff_t b; 2168 int c; 2169 2170 /* 2171 * If we didn't get it and the block might work if fragmented, 2172 * try without the CONTIG flag. Loop until we get it all. 2173 */ 2174 mapp = kmem_alloc(sizeof(*mapp) * count, 0); 2175 for (b = *bno, mapi = 0; b < *bno + count; ) { 2176 nmap = min(XFS_BMAP_MAX_NMAP, count); 2177 c = (int)(*bno + count - b); 2178 error = xfs_bmapi_write(tp, dp, b, c, 2179 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA, 2180 args->total, &mapp[mapi], &nmap); 2181 if (error) 2182 goto out_free_map; 2183 if (nmap < 1) 2184 break; 2185 mapi += nmap; 2186 b = mapp[mapi - 1].br_startoff + 2187 mapp[mapi - 1].br_blockcount; 2188 } 2189 } else { 2190 mapi = 0; 2191 mapp = NULL; 2192 } 2193 2194 /* 2195 * Count the blocks we got, make sure it matches the total. 2196 */ 2197 for (i = 0, got = 0; i < mapi; i++) 2198 got += mapp[i].br_blockcount; 2199 if (got != count || mapp[0].br_startoff != *bno || 2200 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount != 2201 *bno + count) { 2202 error = -ENOSPC; 2203 goto out_free_map; 2204 } 2205 2206 /* account for newly allocated blocks in reserved blocks total */ 2207 args->total -= dp->i_d.di_nblocks - nblks; 2208 2209 out_free_map: 2210 if (mapp != &map) 2211 kmem_free(mapp); 2212 return error; 2213 } 2214 2215 /* 2216 * Add a block to the btree ahead of the file. 2217 * Return the new block number to the caller. 2218 */ 2219 int 2220 xfs_da_grow_inode( 2221 struct xfs_da_args *args, 2222 xfs_dablk_t *new_blkno) 2223 { 2224 xfs_fileoff_t bno; 2225 int error; 2226 2227 trace_xfs_da_grow_inode(args); 2228 2229 bno = args->geo->leafblk; 2230 error = xfs_da_grow_inode_int(args, &bno, args->geo->fsbcount); 2231 if (!error) 2232 *new_blkno = (xfs_dablk_t)bno; 2233 return error; 2234 } 2235 2236 /* 2237 * Ick. We need to always be able to remove a btree block, even 2238 * if there's no space reservation because the filesystem is full. 2239 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC. 2240 * It swaps the target block with the last block in the file. The 2241 * last block in the file can always be removed since it can't cause 2242 * a bmap btree split to do that. 2243 */ 2244 STATIC int 2245 xfs_da3_swap_lastblock( 2246 struct xfs_da_args *args, 2247 xfs_dablk_t *dead_blknop, 2248 struct xfs_buf **dead_bufp) 2249 { 2250 struct xfs_da_blkinfo *dead_info; 2251 struct xfs_da_blkinfo *sib_info; 2252 struct xfs_da_intnode *par_node; 2253 struct xfs_da_intnode *dead_node; 2254 struct xfs_dir2_leaf *dead_leaf2; 2255 struct xfs_da_node_entry *btree; 2256 struct xfs_da3_icnode_hdr par_hdr; 2257 struct xfs_inode *dp; 2258 struct xfs_trans *tp; 2259 struct xfs_mount *mp; 2260 struct xfs_buf *dead_buf; 2261 struct xfs_buf *last_buf; 2262 struct xfs_buf *sib_buf; 2263 struct xfs_buf *par_buf; 2264 xfs_dahash_t dead_hash; 2265 xfs_fileoff_t lastoff; 2266 xfs_dablk_t dead_blkno; 2267 xfs_dablk_t last_blkno; 2268 xfs_dablk_t sib_blkno; 2269 xfs_dablk_t par_blkno; 2270 int error; 2271 int w; 2272 int entno; 2273 int level; 2274 int dead_level; 2275 2276 trace_xfs_da_swap_lastblock(args); 2277 2278 dead_buf = *dead_bufp; 2279 dead_blkno = *dead_blknop; 2280 tp = args->trans; 2281 dp = args->dp; 2282 w = args->whichfork; 2283 ASSERT(w == XFS_DATA_FORK); 2284 mp = dp->i_mount; 2285 lastoff = args->geo->freeblk; 2286 error = xfs_bmap_last_before(tp, dp, &lastoff, w); 2287 if (error) 2288 return error; 2289 if (XFS_IS_CORRUPT(mp, lastoff == 0)) 2290 return -EFSCORRUPTED; 2291 /* 2292 * Read the last block in the btree space. 2293 */ 2294 last_blkno = (xfs_dablk_t)lastoff - args->geo->fsbcount; 2295 error = xfs_da3_node_read(tp, dp, last_blkno, &last_buf, w); 2296 if (error) 2297 return error; 2298 /* 2299 * Copy the last block into the dead buffer and log it. 2300 */ 2301 memcpy(dead_buf->b_addr, last_buf->b_addr, args->geo->blksize); 2302 xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1); 2303 dead_info = dead_buf->b_addr; 2304 /* 2305 * Get values from the moved block. 2306 */ 2307 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 2308 dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) { 2309 struct xfs_dir3_icleaf_hdr leafhdr; 2310 struct xfs_dir2_leaf_entry *ents; 2311 2312 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info; 2313 xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, 2314 dead_leaf2); 2315 ents = leafhdr.ents; 2316 dead_level = 0; 2317 dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval); 2318 } else { 2319 struct xfs_da3_icnode_hdr deadhdr; 2320 2321 dead_node = (xfs_da_intnode_t *)dead_info; 2322 xfs_da3_node_hdr_from_disk(dp->i_mount, &deadhdr, dead_node); 2323 btree = deadhdr.btree; 2324 dead_level = deadhdr.level; 2325 dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval); 2326 } 2327 sib_buf = par_buf = NULL; 2328 /* 2329 * If the moved block has a left sibling, fix up the pointers. 2330 */ 2331 if ((sib_blkno = be32_to_cpu(dead_info->back))) { 2332 error = xfs_da3_node_read(tp, dp, sib_blkno, &sib_buf, w); 2333 if (error) 2334 goto done; 2335 sib_info = sib_buf->b_addr; 2336 if (XFS_IS_CORRUPT(mp, 2337 be32_to_cpu(sib_info->forw) != last_blkno || 2338 sib_info->magic != dead_info->magic)) { 2339 error = -EFSCORRUPTED; 2340 goto done; 2341 } 2342 sib_info->forw = cpu_to_be32(dead_blkno); 2343 xfs_trans_log_buf(tp, sib_buf, 2344 XFS_DA_LOGRANGE(sib_info, &sib_info->forw, 2345 sizeof(sib_info->forw))); 2346 sib_buf = NULL; 2347 } 2348 /* 2349 * If the moved block has a right sibling, fix up the pointers. 2350 */ 2351 if ((sib_blkno = be32_to_cpu(dead_info->forw))) { 2352 error = xfs_da3_node_read(tp, dp, sib_blkno, &sib_buf, w); 2353 if (error) 2354 goto done; 2355 sib_info = sib_buf->b_addr; 2356 if (XFS_IS_CORRUPT(mp, 2357 be32_to_cpu(sib_info->back) != last_blkno || 2358 sib_info->magic != dead_info->magic)) { 2359 error = -EFSCORRUPTED; 2360 goto done; 2361 } 2362 sib_info->back = cpu_to_be32(dead_blkno); 2363 xfs_trans_log_buf(tp, sib_buf, 2364 XFS_DA_LOGRANGE(sib_info, &sib_info->back, 2365 sizeof(sib_info->back))); 2366 sib_buf = NULL; 2367 } 2368 par_blkno = args->geo->leafblk; 2369 level = -1; 2370 /* 2371 * Walk down the tree looking for the parent of the moved block. 2372 */ 2373 for (;;) { 2374 error = xfs_da3_node_read(tp, dp, par_blkno, &par_buf, w); 2375 if (error) 2376 goto done; 2377 par_node = par_buf->b_addr; 2378 xfs_da3_node_hdr_from_disk(dp->i_mount, &par_hdr, par_node); 2379 if (XFS_IS_CORRUPT(mp, 2380 level >= 0 && level != par_hdr.level + 1)) { 2381 error = -EFSCORRUPTED; 2382 goto done; 2383 } 2384 level = par_hdr.level; 2385 btree = par_hdr.btree; 2386 for (entno = 0; 2387 entno < par_hdr.count && 2388 be32_to_cpu(btree[entno].hashval) < dead_hash; 2389 entno++) 2390 continue; 2391 if (XFS_IS_CORRUPT(mp, entno == par_hdr.count)) { 2392 error = -EFSCORRUPTED; 2393 goto done; 2394 } 2395 par_blkno = be32_to_cpu(btree[entno].before); 2396 if (level == dead_level + 1) 2397 break; 2398 xfs_trans_brelse(tp, par_buf); 2399 par_buf = NULL; 2400 } 2401 /* 2402 * We're in the right parent block. 2403 * Look for the right entry. 2404 */ 2405 for (;;) { 2406 for (; 2407 entno < par_hdr.count && 2408 be32_to_cpu(btree[entno].before) != last_blkno; 2409 entno++) 2410 continue; 2411 if (entno < par_hdr.count) 2412 break; 2413 par_blkno = par_hdr.forw; 2414 xfs_trans_brelse(tp, par_buf); 2415 par_buf = NULL; 2416 if (XFS_IS_CORRUPT(mp, par_blkno == 0)) { 2417 error = -EFSCORRUPTED; 2418 goto done; 2419 } 2420 error = xfs_da3_node_read(tp, dp, par_blkno, &par_buf, w); 2421 if (error) 2422 goto done; 2423 par_node = par_buf->b_addr; 2424 xfs_da3_node_hdr_from_disk(dp->i_mount, &par_hdr, par_node); 2425 if (XFS_IS_CORRUPT(mp, par_hdr.level != level)) { 2426 error = -EFSCORRUPTED; 2427 goto done; 2428 } 2429 btree = par_hdr.btree; 2430 entno = 0; 2431 } 2432 /* 2433 * Update the parent entry pointing to the moved block. 2434 */ 2435 btree[entno].before = cpu_to_be32(dead_blkno); 2436 xfs_trans_log_buf(tp, par_buf, 2437 XFS_DA_LOGRANGE(par_node, &btree[entno].before, 2438 sizeof(btree[entno].before))); 2439 *dead_blknop = last_blkno; 2440 *dead_bufp = last_buf; 2441 return 0; 2442 done: 2443 if (par_buf) 2444 xfs_trans_brelse(tp, par_buf); 2445 if (sib_buf) 2446 xfs_trans_brelse(tp, sib_buf); 2447 xfs_trans_brelse(tp, last_buf); 2448 return error; 2449 } 2450 2451 /* 2452 * Remove a btree block from a directory or attribute. 2453 */ 2454 int 2455 xfs_da_shrink_inode( 2456 struct xfs_da_args *args, 2457 xfs_dablk_t dead_blkno, 2458 struct xfs_buf *dead_buf) 2459 { 2460 struct xfs_inode *dp; 2461 int done, error, w, count; 2462 struct xfs_trans *tp; 2463 2464 trace_xfs_da_shrink_inode(args); 2465 2466 dp = args->dp; 2467 w = args->whichfork; 2468 tp = args->trans; 2469 count = args->geo->fsbcount; 2470 for (;;) { 2471 /* 2472 * Remove extents. If we get ENOSPC for a dir we have to move 2473 * the last block to the place we want to kill. 2474 */ 2475 error = xfs_bunmapi(tp, dp, dead_blkno, count, 2476 xfs_bmapi_aflag(w), 0, &done); 2477 if (error == -ENOSPC) { 2478 if (w != XFS_DATA_FORK) 2479 break; 2480 error = xfs_da3_swap_lastblock(args, &dead_blkno, 2481 &dead_buf); 2482 if (error) 2483 break; 2484 } else { 2485 break; 2486 } 2487 } 2488 xfs_trans_binval(tp, dead_buf); 2489 return error; 2490 } 2491 2492 static int 2493 xfs_dabuf_map( 2494 struct xfs_inode *dp, 2495 xfs_dablk_t bno, 2496 unsigned int flags, 2497 int whichfork, 2498 struct xfs_buf_map **mapp, 2499 int *nmaps) 2500 { 2501 struct xfs_mount *mp = dp->i_mount; 2502 int nfsb = xfs_dabuf_nfsb(mp, whichfork); 2503 struct xfs_bmbt_irec irec, *irecs = &irec; 2504 struct xfs_buf_map *map = *mapp; 2505 xfs_fileoff_t off = bno; 2506 int error = 0, nirecs, i; 2507 2508 if (nfsb > 1) 2509 irecs = kmem_zalloc(sizeof(irec) * nfsb, KM_NOFS); 2510 2511 nirecs = nfsb; 2512 error = xfs_bmapi_read(dp, bno, nfsb, irecs, &nirecs, 2513 xfs_bmapi_aflag(whichfork)); 2514 if (error) 2515 goto out_free_irecs; 2516 2517 /* 2518 * Use the caller provided map for the single map case, else allocate a 2519 * larger one that needs to be free by the caller. 2520 */ 2521 if (nirecs > 1) { 2522 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_NOFS); 2523 if (!map) 2524 goto out_free_irecs; 2525 *mapp = map; 2526 } 2527 2528 for (i = 0; i < nirecs; i++) { 2529 if (irecs[i].br_startblock == HOLESTARTBLOCK || 2530 irecs[i].br_startblock == DELAYSTARTBLOCK) 2531 goto invalid_mapping; 2532 if (off != irecs[i].br_startoff) 2533 goto invalid_mapping; 2534 2535 map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock); 2536 map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount); 2537 off += irecs[i].br_blockcount; 2538 } 2539 2540 if (off != bno + nfsb) 2541 goto invalid_mapping; 2542 2543 *nmaps = nirecs; 2544 out_free_irecs: 2545 if (irecs != &irec) 2546 kmem_free(irecs); 2547 return error; 2548 2549 invalid_mapping: 2550 /* Caller ok with no mapping. */ 2551 if (XFS_IS_CORRUPT(mp, !(flags & XFS_DABUF_MAP_HOLE_OK))) { 2552 error = -EFSCORRUPTED; 2553 if (xfs_error_level >= XFS_ERRLEVEL_LOW) { 2554 xfs_alert(mp, "%s: bno %u inode %llu", 2555 __func__, bno, dp->i_ino); 2556 2557 for (i = 0; i < nirecs; i++) { 2558 xfs_alert(mp, 2559 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d", 2560 i, irecs[i].br_startoff, 2561 irecs[i].br_startblock, 2562 irecs[i].br_blockcount, 2563 irecs[i].br_state); 2564 } 2565 } 2566 } else { 2567 *nmaps = 0; 2568 } 2569 goto out_free_irecs; 2570 } 2571 2572 /* 2573 * Get a buffer for the dir/attr block. 2574 */ 2575 int 2576 xfs_da_get_buf( 2577 struct xfs_trans *tp, 2578 struct xfs_inode *dp, 2579 xfs_dablk_t bno, 2580 struct xfs_buf **bpp, 2581 int whichfork) 2582 { 2583 struct xfs_mount *mp = dp->i_mount; 2584 struct xfs_buf *bp; 2585 struct xfs_buf_map map, *mapp = ↦ 2586 int nmap = 1; 2587 int error; 2588 2589 *bpp = NULL; 2590 error = xfs_dabuf_map(dp, bno, 0, whichfork, &mapp, &nmap); 2591 if (error || nmap == 0) 2592 goto out_free; 2593 2594 error = xfs_trans_get_buf_map(tp, mp->m_ddev_targp, mapp, nmap, 0, &bp); 2595 if (error) 2596 goto out_free; 2597 2598 *bpp = bp; 2599 2600 out_free: 2601 if (mapp != &map) 2602 kmem_free(mapp); 2603 2604 return error; 2605 } 2606 2607 /* 2608 * Get a buffer for the dir/attr block, fill in the contents. 2609 */ 2610 int 2611 xfs_da_read_buf( 2612 struct xfs_trans *tp, 2613 struct xfs_inode *dp, 2614 xfs_dablk_t bno, 2615 unsigned int flags, 2616 struct xfs_buf **bpp, 2617 int whichfork, 2618 const struct xfs_buf_ops *ops) 2619 { 2620 struct xfs_mount *mp = dp->i_mount; 2621 struct xfs_buf *bp; 2622 struct xfs_buf_map map, *mapp = ↦ 2623 int nmap = 1; 2624 int error; 2625 2626 *bpp = NULL; 2627 error = xfs_dabuf_map(dp, bno, flags, whichfork, &mapp, &nmap); 2628 if (error || !nmap) 2629 goto out_free; 2630 2631 error = xfs_trans_read_buf_map(mp, tp, mp->m_ddev_targp, mapp, nmap, 0, 2632 &bp, ops); 2633 if (error) 2634 goto out_free; 2635 2636 if (whichfork == XFS_ATTR_FORK) 2637 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF); 2638 else 2639 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF); 2640 *bpp = bp; 2641 out_free: 2642 if (mapp != &map) 2643 kmem_free(mapp); 2644 2645 return error; 2646 } 2647 2648 /* 2649 * Readahead the dir/attr block. 2650 */ 2651 int 2652 xfs_da_reada_buf( 2653 struct xfs_inode *dp, 2654 xfs_dablk_t bno, 2655 unsigned int flags, 2656 int whichfork, 2657 const struct xfs_buf_ops *ops) 2658 { 2659 struct xfs_buf_map map; 2660 struct xfs_buf_map *mapp; 2661 int nmap; 2662 int error; 2663 2664 mapp = ↦ 2665 nmap = 1; 2666 error = xfs_dabuf_map(dp, bno, flags, whichfork, &mapp, &nmap); 2667 if (error || !nmap) 2668 goto out_free; 2669 2670 xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops); 2671 2672 out_free: 2673 if (mapp != &map) 2674 kmem_free(mapp); 2675 2676 return error; 2677 } 2678