1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * Copyright (c) 2013 Red Hat, Inc. 5 * All Rights Reserved. 6 */ 7 #include "xfs.h" 8 #include "xfs_fs.h" 9 #include "xfs_shared.h" 10 #include "xfs_format.h" 11 #include "xfs_log_format.h" 12 #include "xfs_trans_resv.h" 13 #include "xfs_bit.h" 14 #include "xfs_mount.h" 15 #include "xfs_inode.h" 16 #include "xfs_dir2.h" 17 #include "xfs_dir2_priv.h" 18 #include "xfs_trans.h" 19 #include "xfs_bmap.h" 20 #include "xfs_attr_leaf.h" 21 #include "xfs_error.h" 22 #include "xfs_trace.h" 23 #include "xfs_buf_item.h" 24 #include "xfs_log.h" 25 #include "xfs_errortag.h" 26 27 /* 28 * xfs_da_btree.c 29 * 30 * Routines to implement directories as Btrees of hashed names. 31 */ 32 33 /*======================================================================== 34 * Function prototypes for the kernel. 35 *========================================================================*/ 36 37 /* 38 * Routines used for growing the Btree. 39 */ 40 STATIC int xfs_da3_root_split(xfs_da_state_t *state, 41 xfs_da_state_blk_t *existing_root, 42 xfs_da_state_blk_t *new_child); 43 STATIC int xfs_da3_node_split(xfs_da_state_t *state, 44 xfs_da_state_blk_t *existing_blk, 45 xfs_da_state_blk_t *split_blk, 46 xfs_da_state_blk_t *blk_to_add, 47 int treelevel, 48 int *result); 49 STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state, 50 xfs_da_state_blk_t *node_blk_1, 51 xfs_da_state_blk_t *node_blk_2); 52 STATIC void xfs_da3_node_add(xfs_da_state_t *state, 53 xfs_da_state_blk_t *old_node_blk, 54 xfs_da_state_blk_t *new_node_blk); 55 56 /* 57 * Routines used for shrinking the Btree. 58 */ 59 STATIC int xfs_da3_root_join(xfs_da_state_t *state, 60 xfs_da_state_blk_t *root_blk); 61 STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval); 62 STATIC void xfs_da3_node_remove(xfs_da_state_t *state, 63 xfs_da_state_blk_t *drop_blk); 64 STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state, 65 xfs_da_state_blk_t *src_node_blk, 66 xfs_da_state_blk_t *dst_node_blk); 67 68 /* 69 * Utility routines. 70 */ 71 STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state, 72 xfs_da_state_blk_t *drop_blk, 73 xfs_da_state_blk_t *save_blk); 74 75 76 struct kmem_cache *xfs_da_state_cache; /* anchor for dir/attr state */ 77 78 /* 79 * Allocate a dir-state structure. 80 * We don't put them on the stack since they're large. 81 */ 82 struct xfs_da_state * 83 xfs_da_state_alloc( 84 struct xfs_da_args *args) 85 { 86 struct xfs_da_state *state; 87 88 state = kmem_cache_zalloc(xfs_da_state_cache, GFP_NOFS | __GFP_NOFAIL); 89 state->args = args; 90 state->mp = args->dp->i_mount; 91 return state; 92 } 93 94 /* 95 * Kill the altpath contents of a da-state structure. 96 */ 97 STATIC void 98 xfs_da_state_kill_altpath(xfs_da_state_t *state) 99 { 100 int i; 101 102 for (i = 0; i < state->altpath.active; i++) 103 state->altpath.blk[i].bp = NULL; 104 state->altpath.active = 0; 105 } 106 107 /* 108 * Free a da-state structure. 109 */ 110 void 111 xfs_da_state_free(xfs_da_state_t *state) 112 { 113 xfs_da_state_kill_altpath(state); 114 #ifdef DEBUG 115 memset((char *)state, 0, sizeof(*state)); 116 #endif /* DEBUG */ 117 kmem_cache_free(xfs_da_state_cache, state); 118 } 119 120 static inline int xfs_dabuf_nfsb(struct xfs_mount *mp, int whichfork) 121 { 122 if (whichfork == XFS_DATA_FORK) 123 return mp->m_dir_geo->fsbcount; 124 return mp->m_attr_geo->fsbcount; 125 } 126 127 void 128 xfs_da3_node_hdr_from_disk( 129 struct xfs_mount *mp, 130 struct xfs_da3_icnode_hdr *to, 131 struct xfs_da_intnode *from) 132 { 133 if (xfs_has_crc(mp)) { 134 struct xfs_da3_intnode *from3 = (struct xfs_da3_intnode *)from; 135 136 to->forw = be32_to_cpu(from3->hdr.info.hdr.forw); 137 to->back = be32_to_cpu(from3->hdr.info.hdr.back); 138 to->magic = be16_to_cpu(from3->hdr.info.hdr.magic); 139 to->count = be16_to_cpu(from3->hdr.__count); 140 to->level = be16_to_cpu(from3->hdr.__level); 141 to->btree = from3->__btree; 142 ASSERT(to->magic == XFS_DA3_NODE_MAGIC); 143 } else { 144 to->forw = be32_to_cpu(from->hdr.info.forw); 145 to->back = be32_to_cpu(from->hdr.info.back); 146 to->magic = be16_to_cpu(from->hdr.info.magic); 147 to->count = be16_to_cpu(from->hdr.__count); 148 to->level = be16_to_cpu(from->hdr.__level); 149 to->btree = from->__btree; 150 ASSERT(to->magic == XFS_DA_NODE_MAGIC); 151 } 152 } 153 154 void 155 xfs_da3_node_hdr_to_disk( 156 struct xfs_mount *mp, 157 struct xfs_da_intnode *to, 158 struct xfs_da3_icnode_hdr *from) 159 { 160 if (xfs_has_crc(mp)) { 161 struct xfs_da3_intnode *to3 = (struct xfs_da3_intnode *)to; 162 163 ASSERT(from->magic == XFS_DA3_NODE_MAGIC); 164 to3->hdr.info.hdr.forw = cpu_to_be32(from->forw); 165 to3->hdr.info.hdr.back = cpu_to_be32(from->back); 166 to3->hdr.info.hdr.magic = cpu_to_be16(from->magic); 167 to3->hdr.__count = cpu_to_be16(from->count); 168 to3->hdr.__level = cpu_to_be16(from->level); 169 } else { 170 ASSERT(from->magic == XFS_DA_NODE_MAGIC); 171 to->hdr.info.forw = cpu_to_be32(from->forw); 172 to->hdr.info.back = cpu_to_be32(from->back); 173 to->hdr.info.magic = cpu_to_be16(from->magic); 174 to->hdr.__count = cpu_to_be16(from->count); 175 to->hdr.__level = cpu_to_be16(from->level); 176 } 177 } 178 179 /* 180 * Verify an xfs_da3_blkinfo structure. Note that the da3 fields are only 181 * accessible on v5 filesystems. This header format is common across da node, 182 * attr leaf and dir leaf blocks. 183 */ 184 xfs_failaddr_t 185 xfs_da3_blkinfo_verify( 186 struct xfs_buf *bp, 187 struct xfs_da3_blkinfo *hdr3) 188 { 189 struct xfs_mount *mp = bp->b_mount; 190 struct xfs_da_blkinfo *hdr = &hdr3->hdr; 191 192 if (!xfs_verify_magic16(bp, hdr->magic)) 193 return __this_address; 194 195 if (xfs_has_crc(mp)) { 196 if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid)) 197 return __this_address; 198 if (be64_to_cpu(hdr3->blkno) != xfs_buf_daddr(bp)) 199 return __this_address; 200 if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn))) 201 return __this_address; 202 } 203 204 return NULL; 205 } 206 207 static xfs_failaddr_t 208 xfs_da3_node_verify( 209 struct xfs_buf *bp) 210 { 211 struct xfs_mount *mp = bp->b_mount; 212 struct xfs_da_intnode *hdr = bp->b_addr; 213 struct xfs_da3_icnode_hdr ichdr; 214 xfs_failaddr_t fa; 215 216 xfs_da3_node_hdr_from_disk(mp, &ichdr, hdr); 217 218 fa = xfs_da3_blkinfo_verify(bp, bp->b_addr); 219 if (fa) 220 return fa; 221 222 if (ichdr.level == 0) 223 return __this_address; 224 if (ichdr.level > XFS_DA_NODE_MAXDEPTH) 225 return __this_address; 226 if (ichdr.count == 0) 227 return __this_address; 228 229 /* 230 * we don't know if the node is for and attribute or directory tree, 231 * so only fail if the count is outside both bounds 232 */ 233 if (ichdr.count > mp->m_dir_geo->node_ents && 234 ichdr.count > mp->m_attr_geo->node_ents) 235 return __this_address; 236 237 /* XXX: hash order check? */ 238 239 return NULL; 240 } 241 242 static void 243 xfs_da3_node_write_verify( 244 struct xfs_buf *bp) 245 { 246 struct xfs_mount *mp = bp->b_mount; 247 struct xfs_buf_log_item *bip = bp->b_log_item; 248 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; 249 xfs_failaddr_t fa; 250 251 fa = xfs_da3_node_verify(bp); 252 if (fa) { 253 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 254 return; 255 } 256 257 if (!xfs_has_crc(mp)) 258 return; 259 260 if (bip) 261 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn); 262 263 xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF); 264 } 265 266 /* 267 * leaf/node format detection on trees is sketchy, so a node read can be done on 268 * leaf level blocks when detection identifies the tree as a node format tree 269 * incorrectly. In this case, we need to swap the verifier to match the correct 270 * format of the block being read. 271 */ 272 static void 273 xfs_da3_node_read_verify( 274 struct xfs_buf *bp) 275 { 276 struct xfs_da_blkinfo *info = bp->b_addr; 277 xfs_failaddr_t fa; 278 279 switch (be16_to_cpu(info->magic)) { 280 case XFS_DA3_NODE_MAGIC: 281 if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) { 282 xfs_verifier_error(bp, -EFSBADCRC, 283 __this_address); 284 break; 285 } 286 fallthrough; 287 case XFS_DA_NODE_MAGIC: 288 fa = xfs_da3_node_verify(bp); 289 if (fa) 290 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 291 return; 292 case XFS_ATTR_LEAF_MAGIC: 293 case XFS_ATTR3_LEAF_MAGIC: 294 bp->b_ops = &xfs_attr3_leaf_buf_ops; 295 bp->b_ops->verify_read(bp); 296 return; 297 case XFS_DIR2_LEAFN_MAGIC: 298 case XFS_DIR3_LEAFN_MAGIC: 299 bp->b_ops = &xfs_dir3_leafn_buf_ops; 300 bp->b_ops->verify_read(bp); 301 return; 302 default: 303 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address); 304 break; 305 } 306 } 307 308 /* Verify the structure of a da3 block. */ 309 static xfs_failaddr_t 310 xfs_da3_node_verify_struct( 311 struct xfs_buf *bp) 312 { 313 struct xfs_da_blkinfo *info = bp->b_addr; 314 315 switch (be16_to_cpu(info->magic)) { 316 case XFS_DA3_NODE_MAGIC: 317 case XFS_DA_NODE_MAGIC: 318 return xfs_da3_node_verify(bp); 319 case XFS_ATTR_LEAF_MAGIC: 320 case XFS_ATTR3_LEAF_MAGIC: 321 bp->b_ops = &xfs_attr3_leaf_buf_ops; 322 return bp->b_ops->verify_struct(bp); 323 case XFS_DIR2_LEAFN_MAGIC: 324 case XFS_DIR3_LEAFN_MAGIC: 325 bp->b_ops = &xfs_dir3_leafn_buf_ops; 326 return bp->b_ops->verify_struct(bp); 327 default: 328 return __this_address; 329 } 330 } 331 332 const struct xfs_buf_ops xfs_da3_node_buf_ops = { 333 .name = "xfs_da3_node", 334 .magic16 = { cpu_to_be16(XFS_DA_NODE_MAGIC), 335 cpu_to_be16(XFS_DA3_NODE_MAGIC) }, 336 .verify_read = xfs_da3_node_read_verify, 337 .verify_write = xfs_da3_node_write_verify, 338 .verify_struct = xfs_da3_node_verify_struct, 339 }; 340 341 static int 342 xfs_da3_node_set_type( 343 struct xfs_trans *tp, 344 struct xfs_buf *bp) 345 { 346 struct xfs_da_blkinfo *info = bp->b_addr; 347 348 switch (be16_to_cpu(info->magic)) { 349 case XFS_DA_NODE_MAGIC: 350 case XFS_DA3_NODE_MAGIC: 351 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF); 352 return 0; 353 case XFS_ATTR_LEAF_MAGIC: 354 case XFS_ATTR3_LEAF_MAGIC: 355 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_ATTR_LEAF_BUF); 356 return 0; 357 case XFS_DIR2_LEAFN_MAGIC: 358 case XFS_DIR3_LEAFN_MAGIC: 359 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF); 360 return 0; 361 default: 362 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, tp->t_mountp, 363 info, sizeof(*info)); 364 xfs_trans_brelse(tp, bp); 365 return -EFSCORRUPTED; 366 } 367 } 368 369 int 370 xfs_da3_node_read( 371 struct xfs_trans *tp, 372 struct xfs_inode *dp, 373 xfs_dablk_t bno, 374 struct xfs_buf **bpp, 375 int whichfork) 376 { 377 int error; 378 379 error = xfs_da_read_buf(tp, dp, bno, 0, bpp, whichfork, 380 &xfs_da3_node_buf_ops); 381 if (error || !*bpp || !tp) 382 return error; 383 return xfs_da3_node_set_type(tp, *bpp); 384 } 385 386 int 387 xfs_da3_node_read_mapped( 388 struct xfs_trans *tp, 389 struct xfs_inode *dp, 390 xfs_daddr_t mappedbno, 391 struct xfs_buf **bpp, 392 int whichfork) 393 { 394 struct xfs_mount *mp = dp->i_mount; 395 int error; 396 397 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, mappedbno, 398 XFS_FSB_TO_BB(mp, xfs_dabuf_nfsb(mp, whichfork)), 0, 399 bpp, &xfs_da3_node_buf_ops); 400 if (error || !*bpp) 401 return error; 402 403 if (whichfork == XFS_ATTR_FORK) 404 xfs_buf_set_ref(*bpp, XFS_ATTR_BTREE_REF); 405 else 406 xfs_buf_set_ref(*bpp, XFS_DIR_BTREE_REF); 407 408 if (!tp) 409 return 0; 410 return xfs_da3_node_set_type(tp, *bpp); 411 } 412 413 /*======================================================================== 414 * Routines used for growing the Btree. 415 *========================================================================*/ 416 417 /* 418 * Create the initial contents of an intermediate node. 419 */ 420 int 421 xfs_da3_node_create( 422 struct xfs_da_args *args, 423 xfs_dablk_t blkno, 424 int level, 425 struct xfs_buf **bpp, 426 int whichfork) 427 { 428 struct xfs_da_intnode *node; 429 struct xfs_trans *tp = args->trans; 430 struct xfs_mount *mp = tp->t_mountp; 431 struct xfs_da3_icnode_hdr ichdr = {0}; 432 struct xfs_buf *bp; 433 int error; 434 struct xfs_inode *dp = args->dp; 435 436 trace_xfs_da_node_create(args); 437 ASSERT(level <= XFS_DA_NODE_MAXDEPTH); 438 439 error = xfs_da_get_buf(tp, dp, blkno, &bp, whichfork); 440 if (error) 441 return error; 442 bp->b_ops = &xfs_da3_node_buf_ops; 443 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF); 444 node = bp->b_addr; 445 446 if (xfs_has_crc(mp)) { 447 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; 448 449 memset(hdr3, 0, sizeof(struct xfs_da3_node_hdr)); 450 ichdr.magic = XFS_DA3_NODE_MAGIC; 451 hdr3->info.blkno = cpu_to_be64(xfs_buf_daddr(bp)); 452 hdr3->info.owner = cpu_to_be64(args->dp->i_ino); 453 uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid); 454 } else { 455 ichdr.magic = XFS_DA_NODE_MAGIC; 456 } 457 ichdr.level = level; 458 459 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &ichdr); 460 xfs_trans_log_buf(tp, bp, 461 XFS_DA_LOGRANGE(node, &node->hdr, args->geo->node_hdr_size)); 462 463 *bpp = bp; 464 return 0; 465 } 466 467 /* 468 * Split a leaf node, rebalance, then possibly split 469 * intermediate nodes, rebalance, etc. 470 */ 471 int /* error */ 472 xfs_da3_split( 473 struct xfs_da_state *state) 474 { 475 struct xfs_da_state_blk *oldblk; 476 struct xfs_da_state_blk *newblk; 477 struct xfs_da_state_blk *addblk; 478 struct xfs_da_intnode *node; 479 int max; 480 int action = 0; 481 int error; 482 int i; 483 484 trace_xfs_da_split(state->args); 485 486 if (XFS_TEST_ERROR(false, state->mp, XFS_ERRTAG_DA_LEAF_SPLIT)) 487 return -EIO; 488 489 /* 490 * Walk back up the tree splitting/inserting/adjusting as necessary. 491 * If we need to insert and there isn't room, split the node, then 492 * decide which fragment to insert the new block from below into. 493 * Note that we may split the root this way, but we need more fixup. 494 */ 495 max = state->path.active - 1; 496 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH)); 497 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC || 498 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC); 499 500 addblk = &state->path.blk[max]; /* initial dummy value */ 501 for (i = max; (i >= 0) && addblk; state->path.active--, i--) { 502 oldblk = &state->path.blk[i]; 503 newblk = &state->altpath.blk[i]; 504 505 /* 506 * If a leaf node then 507 * Allocate a new leaf node, then rebalance across them. 508 * else if an intermediate node then 509 * We split on the last layer, must we split the node? 510 */ 511 switch (oldblk->magic) { 512 case XFS_ATTR_LEAF_MAGIC: 513 error = xfs_attr3_leaf_split(state, oldblk, newblk); 514 if ((error != 0) && (error != -ENOSPC)) { 515 return error; /* GROT: attr is inconsistent */ 516 } 517 if (!error) { 518 addblk = newblk; 519 break; 520 } 521 /* 522 * Entry wouldn't fit, split the leaf again. The new 523 * extrablk will be consumed by xfs_da3_node_split if 524 * the node is split. 525 */ 526 state->extravalid = 1; 527 if (state->inleaf) { 528 state->extraafter = 0; /* before newblk */ 529 trace_xfs_attr_leaf_split_before(state->args); 530 error = xfs_attr3_leaf_split(state, oldblk, 531 &state->extrablk); 532 } else { 533 state->extraafter = 1; /* after newblk */ 534 trace_xfs_attr_leaf_split_after(state->args); 535 error = xfs_attr3_leaf_split(state, newblk, 536 &state->extrablk); 537 } 538 if (error) 539 return error; /* GROT: attr inconsistent */ 540 addblk = newblk; 541 break; 542 case XFS_DIR2_LEAFN_MAGIC: 543 error = xfs_dir2_leafn_split(state, oldblk, newblk); 544 if (error) 545 return error; 546 addblk = newblk; 547 break; 548 case XFS_DA_NODE_MAGIC: 549 error = xfs_da3_node_split(state, oldblk, newblk, addblk, 550 max - i, &action); 551 addblk->bp = NULL; 552 if (error) 553 return error; /* GROT: dir is inconsistent */ 554 /* 555 * Record the newly split block for the next time thru? 556 */ 557 if (action) 558 addblk = newblk; 559 else 560 addblk = NULL; 561 break; 562 } 563 564 /* 565 * Update the btree to show the new hashval for this child. 566 */ 567 xfs_da3_fixhashpath(state, &state->path); 568 } 569 if (!addblk) 570 return 0; 571 572 /* 573 * xfs_da3_node_split() should have consumed any extra blocks we added 574 * during a double leaf split in the attr fork. This is guaranteed as 575 * we can't be here if the attr fork only has a single leaf block. 576 */ 577 ASSERT(state->extravalid == 0 || 578 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC); 579 580 /* 581 * Split the root node. 582 */ 583 ASSERT(state->path.active == 0); 584 oldblk = &state->path.blk[0]; 585 error = xfs_da3_root_split(state, oldblk, addblk); 586 if (error) 587 goto out; 588 589 /* 590 * Update pointers to the node which used to be block 0 and just got 591 * bumped because of the addition of a new root node. Note that the 592 * original block 0 could be at any position in the list of blocks in 593 * the tree. 594 * 595 * Note: the magic numbers and sibling pointers are in the same physical 596 * place for both v2 and v3 headers (by design). Hence it doesn't matter 597 * which version of the xfs_da_intnode structure we use here as the 598 * result will be the same using either structure. 599 */ 600 node = oldblk->bp->b_addr; 601 if (node->hdr.info.forw) { 602 if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) { 603 xfs_buf_mark_corrupt(oldblk->bp); 604 error = -EFSCORRUPTED; 605 goto out; 606 } 607 node = addblk->bp->b_addr; 608 node->hdr.info.back = cpu_to_be32(oldblk->blkno); 609 xfs_trans_log_buf(state->args->trans, addblk->bp, 610 XFS_DA_LOGRANGE(node, &node->hdr.info, 611 sizeof(node->hdr.info))); 612 } 613 node = oldblk->bp->b_addr; 614 if (node->hdr.info.back) { 615 if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) { 616 xfs_buf_mark_corrupt(oldblk->bp); 617 error = -EFSCORRUPTED; 618 goto out; 619 } 620 node = addblk->bp->b_addr; 621 node->hdr.info.forw = cpu_to_be32(oldblk->blkno); 622 xfs_trans_log_buf(state->args->trans, addblk->bp, 623 XFS_DA_LOGRANGE(node, &node->hdr.info, 624 sizeof(node->hdr.info))); 625 } 626 out: 627 addblk->bp = NULL; 628 return error; 629 } 630 631 /* 632 * Split the root. We have to create a new root and point to the two 633 * parts (the split old root) that we just created. Copy block zero to 634 * the EOF, extending the inode in process. 635 */ 636 STATIC int /* error */ 637 xfs_da3_root_split( 638 struct xfs_da_state *state, 639 struct xfs_da_state_blk *blk1, 640 struct xfs_da_state_blk *blk2) 641 { 642 struct xfs_da_intnode *node; 643 struct xfs_da_intnode *oldroot; 644 struct xfs_da_node_entry *btree; 645 struct xfs_da3_icnode_hdr nodehdr; 646 struct xfs_da_args *args; 647 struct xfs_buf *bp; 648 struct xfs_inode *dp; 649 struct xfs_trans *tp; 650 struct xfs_dir2_leaf *leaf; 651 xfs_dablk_t blkno; 652 int level; 653 int error; 654 int size; 655 656 trace_xfs_da_root_split(state->args); 657 658 /* 659 * Copy the existing (incorrect) block from the root node position 660 * to a free space somewhere. 661 */ 662 args = state->args; 663 error = xfs_da_grow_inode(args, &blkno); 664 if (error) 665 return error; 666 667 dp = args->dp; 668 tp = args->trans; 669 error = xfs_da_get_buf(tp, dp, blkno, &bp, args->whichfork); 670 if (error) 671 return error; 672 node = bp->b_addr; 673 oldroot = blk1->bp->b_addr; 674 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || 675 oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) { 676 struct xfs_da3_icnode_hdr icnodehdr; 677 678 xfs_da3_node_hdr_from_disk(dp->i_mount, &icnodehdr, oldroot); 679 btree = icnodehdr.btree; 680 size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot); 681 level = icnodehdr.level; 682 683 /* 684 * we are about to copy oldroot to bp, so set up the type 685 * of bp while we know exactly what it will be. 686 */ 687 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF); 688 } else { 689 struct xfs_dir3_icleaf_hdr leafhdr; 690 691 leaf = (xfs_dir2_leaf_t *)oldroot; 692 xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf); 693 694 ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC || 695 leafhdr.magic == XFS_DIR3_LEAFN_MAGIC); 696 size = (int)((char *)&leafhdr.ents[leafhdr.count] - 697 (char *)leaf); 698 level = 0; 699 700 /* 701 * we are about to copy oldroot to bp, so set up the type 702 * of bp while we know exactly what it will be. 703 */ 704 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF); 705 } 706 707 /* 708 * we can copy most of the information in the node from one block to 709 * another, but for CRC enabled headers we have to make sure that the 710 * block specific identifiers are kept intact. We update the buffer 711 * directly for this. 712 */ 713 memcpy(node, oldroot, size); 714 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) || 715 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) { 716 struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node; 717 718 node3->hdr.info.blkno = cpu_to_be64(xfs_buf_daddr(bp)); 719 } 720 xfs_trans_log_buf(tp, bp, 0, size - 1); 721 722 bp->b_ops = blk1->bp->b_ops; 723 xfs_trans_buf_copy_type(bp, blk1->bp); 724 blk1->bp = bp; 725 blk1->blkno = blkno; 726 727 /* 728 * Set up the new root node. 729 */ 730 error = xfs_da3_node_create(args, 731 (args->whichfork == XFS_DATA_FORK) ? args->geo->leafblk : 0, 732 level + 1, &bp, args->whichfork); 733 if (error) 734 return error; 735 736 node = bp->b_addr; 737 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 738 btree = nodehdr.btree; 739 btree[0].hashval = cpu_to_be32(blk1->hashval); 740 btree[0].before = cpu_to_be32(blk1->blkno); 741 btree[1].hashval = cpu_to_be32(blk2->hashval); 742 btree[1].before = cpu_to_be32(blk2->blkno); 743 nodehdr.count = 2; 744 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr); 745 746 #ifdef DEBUG 747 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 748 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) { 749 ASSERT(blk1->blkno >= args->geo->leafblk && 750 blk1->blkno < args->geo->freeblk); 751 ASSERT(blk2->blkno >= args->geo->leafblk && 752 blk2->blkno < args->geo->freeblk); 753 } 754 #endif 755 756 /* Header is already logged by xfs_da_node_create */ 757 xfs_trans_log_buf(tp, bp, 758 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2)); 759 760 return 0; 761 } 762 763 /* 764 * Split the node, rebalance, then add the new entry. 765 */ 766 STATIC int /* error */ 767 xfs_da3_node_split( 768 struct xfs_da_state *state, 769 struct xfs_da_state_blk *oldblk, 770 struct xfs_da_state_blk *newblk, 771 struct xfs_da_state_blk *addblk, 772 int treelevel, 773 int *result) 774 { 775 struct xfs_da_intnode *node; 776 struct xfs_da3_icnode_hdr nodehdr; 777 xfs_dablk_t blkno; 778 int newcount; 779 int error; 780 int useextra; 781 struct xfs_inode *dp = state->args->dp; 782 783 trace_xfs_da_node_split(state->args); 784 785 node = oldblk->bp->b_addr; 786 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 787 788 /* 789 * With V2 dirs the extra block is data or freespace. 790 */ 791 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK; 792 newcount = 1 + useextra; 793 /* 794 * Do we have to split the node? 795 */ 796 if (nodehdr.count + newcount > state->args->geo->node_ents) { 797 /* 798 * Allocate a new node, add to the doubly linked chain of 799 * nodes, then move some of our excess entries into it. 800 */ 801 error = xfs_da_grow_inode(state->args, &blkno); 802 if (error) 803 return error; /* GROT: dir is inconsistent */ 804 805 error = xfs_da3_node_create(state->args, blkno, treelevel, 806 &newblk->bp, state->args->whichfork); 807 if (error) 808 return error; /* GROT: dir is inconsistent */ 809 newblk->blkno = blkno; 810 newblk->magic = XFS_DA_NODE_MAGIC; 811 xfs_da3_node_rebalance(state, oldblk, newblk); 812 error = xfs_da3_blk_link(state, oldblk, newblk); 813 if (error) 814 return error; 815 *result = 1; 816 } else { 817 *result = 0; 818 } 819 820 /* 821 * Insert the new entry(s) into the correct block 822 * (updating last hashval in the process). 823 * 824 * xfs_da3_node_add() inserts BEFORE the given index, 825 * and as a result of using node_lookup_int() we always 826 * point to a valid entry (not after one), but a split 827 * operation always results in a new block whose hashvals 828 * FOLLOW the current block. 829 * 830 * If we had double-split op below us, then add the extra block too. 831 */ 832 node = oldblk->bp->b_addr; 833 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 834 if (oldblk->index <= nodehdr.count) { 835 oldblk->index++; 836 xfs_da3_node_add(state, oldblk, addblk); 837 if (useextra) { 838 if (state->extraafter) 839 oldblk->index++; 840 xfs_da3_node_add(state, oldblk, &state->extrablk); 841 state->extravalid = 0; 842 } 843 } else { 844 newblk->index++; 845 xfs_da3_node_add(state, newblk, addblk); 846 if (useextra) { 847 if (state->extraafter) 848 newblk->index++; 849 xfs_da3_node_add(state, newblk, &state->extrablk); 850 state->extravalid = 0; 851 } 852 } 853 854 return 0; 855 } 856 857 /* 858 * Balance the btree elements between two intermediate nodes, 859 * usually one full and one empty. 860 * 861 * NOTE: if blk2 is empty, then it will get the upper half of blk1. 862 */ 863 STATIC void 864 xfs_da3_node_rebalance( 865 struct xfs_da_state *state, 866 struct xfs_da_state_blk *blk1, 867 struct xfs_da_state_blk *blk2) 868 { 869 struct xfs_da_intnode *node1; 870 struct xfs_da_intnode *node2; 871 struct xfs_da_node_entry *btree1; 872 struct xfs_da_node_entry *btree2; 873 struct xfs_da_node_entry *btree_s; 874 struct xfs_da_node_entry *btree_d; 875 struct xfs_da3_icnode_hdr nodehdr1; 876 struct xfs_da3_icnode_hdr nodehdr2; 877 struct xfs_trans *tp; 878 int count; 879 int tmp; 880 int swap = 0; 881 struct xfs_inode *dp = state->args->dp; 882 883 trace_xfs_da_node_rebalance(state->args); 884 885 node1 = blk1->bp->b_addr; 886 node2 = blk2->bp->b_addr; 887 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1); 888 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2); 889 btree1 = nodehdr1.btree; 890 btree2 = nodehdr2.btree; 891 892 /* 893 * Figure out how many entries need to move, and in which direction. 894 * Swap the nodes around if that makes it simpler. 895 */ 896 if (nodehdr1.count > 0 && nodehdr2.count > 0 && 897 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) || 898 (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) < 899 be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) { 900 swap(node1, node2); 901 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1); 902 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2); 903 btree1 = nodehdr1.btree; 904 btree2 = nodehdr2.btree; 905 swap = 1; 906 } 907 908 count = (nodehdr1.count - nodehdr2.count) / 2; 909 if (count == 0) 910 return; 911 tp = state->args->trans; 912 /* 913 * Two cases: high-to-low and low-to-high. 914 */ 915 if (count > 0) { 916 /* 917 * Move elements in node2 up to make a hole. 918 */ 919 tmp = nodehdr2.count; 920 if (tmp > 0) { 921 tmp *= (uint)sizeof(xfs_da_node_entry_t); 922 btree_s = &btree2[0]; 923 btree_d = &btree2[count]; 924 memmove(btree_d, btree_s, tmp); 925 } 926 927 /* 928 * Move the req'd B-tree elements from high in node1 to 929 * low in node2. 930 */ 931 nodehdr2.count += count; 932 tmp = count * (uint)sizeof(xfs_da_node_entry_t); 933 btree_s = &btree1[nodehdr1.count - count]; 934 btree_d = &btree2[0]; 935 memcpy(btree_d, btree_s, tmp); 936 nodehdr1.count -= count; 937 } else { 938 /* 939 * Move the req'd B-tree elements from low in node2 to 940 * high in node1. 941 */ 942 count = -count; 943 tmp = count * (uint)sizeof(xfs_da_node_entry_t); 944 btree_s = &btree2[0]; 945 btree_d = &btree1[nodehdr1.count]; 946 memcpy(btree_d, btree_s, tmp); 947 nodehdr1.count += count; 948 949 xfs_trans_log_buf(tp, blk1->bp, 950 XFS_DA_LOGRANGE(node1, btree_d, tmp)); 951 952 /* 953 * Move elements in node2 down to fill the hole. 954 */ 955 tmp = nodehdr2.count - count; 956 tmp *= (uint)sizeof(xfs_da_node_entry_t); 957 btree_s = &btree2[count]; 958 btree_d = &btree2[0]; 959 memmove(btree_d, btree_s, tmp); 960 nodehdr2.count -= count; 961 } 962 963 /* 964 * Log header of node 1 and all current bits of node 2. 965 */ 966 xfs_da3_node_hdr_to_disk(dp->i_mount, node1, &nodehdr1); 967 xfs_trans_log_buf(tp, blk1->bp, 968 XFS_DA_LOGRANGE(node1, &node1->hdr, 969 state->args->geo->node_hdr_size)); 970 971 xfs_da3_node_hdr_to_disk(dp->i_mount, node2, &nodehdr2); 972 xfs_trans_log_buf(tp, blk2->bp, 973 XFS_DA_LOGRANGE(node2, &node2->hdr, 974 state->args->geo->node_hdr_size + 975 (sizeof(btree2[0]) * nodehdr2.count))); 976 977 /* 978 * Record the last hashval from each block for upward propagation. 979 * (note: don't use the swapped node pointers) 980 */ 981 if (swap) { 982 node1 = blk1->bp->b_addr; 983 node2 = blk2->bp->b_addr; 984 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1); 985 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2); 986 btree1 = nodehdr1.btree; 987 btree2 = nodehdr2.btree; 988 } 989 blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval); 990 blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval); 991 992 /* 993 * Adjust the expected index for insertion. 994 */ 995 if (blk1->index >= nodehdr1.count) { 996 blk2->index = blk1->index - nodehdr1.count; 997 blk1->index = nodehdr1.count + 1; /* make it invalid */ 998 } 999 } 1000 1001 /* 1002 * Add a new entry to an intermediate node. 1003 */ 1004 STATIC void 1005 xfs_da3_node_add( 1006 struct xfs_da_state *state, 1007 struct xfs_da_state_blk *oldblk, 1008 struct xfs_da_state_blk *newblk) 1009 { 1010 struct xfs_da_intnode *node; 1011 struct xfs_da3_icnode_hdr nodehdr; 1012 struct xfs_da_node_entry *btree; 1013 int tmp; 1014 struct xfs_inode *dp = state->args->dp; 1015 1016 trace_xfs_da_node_add(state->args); 1017 1018 node = oldblk->bp->b_addr; 1019 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 1020 btree = nodehdr.btree; 1021 1022 ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count); 1023 ASSERT(newblk->blkno != 0); 1024 if (state->args->whichfork == XFS_DATA_FORK) 1025 ASSERT(newblk->blkno >= state->args->geo->leafblk && 1026 newblk->blkno < state->args->geo->freeblk); 1027 1028 /* 1029 * We may need to make some room before we insert the new node. 1030 */ 1031 tmp = 0; 1032 if (oldblk->index < nodehdr.count) { 1033 tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree); 1034 memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp); 1035 } 1036 btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval); 1037 btree[oldblk->index].before = cpu_to_be32(newblk->blkno); 1038 xfs_trans_log_buf(state->args->trans, oldblk->bp, 1039 XFS_DA_LOGRANGE(node, &btree[oldblk->index], 1040 tmp + sizeof(*btree))); 1041 1042 nodehdr.count += 1; 1043 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr); 1044 xfs_trans_log_buf(state->args->trans, oldblk->bp, 1045 XFS_DA_LOGRANGE(node, &node->hdr, 1046 state->args->geo->node_hdr_size)); 1047 1048 /* 1049 * Copy the last hash value from the oldblk to propagate upwards. 1050 */ 1051 oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval); 1052 } 1053 1054 /*======================================================================== 1055 * Routines used for shrinking the Btree. 1056 *========================================================================*/ 1057 1058 /* 1059 * Deallocate an empty leaf node, remove it from its parent, 1060 * possibly deallocating that block, etc... 1061 */ 1062 int 1063 xfs_da3_join( 1064 struct xfs_da_state *state) 1065 { 1066 struct xfs_da_state_blk *drop_blk; 1067 struct xfs_da_state_blk *save_blk; 1068 int action = 0; 1069 int error; 1070 1071 trace_xfs_da_join(state->args); 1072 1073 drop_blk = &state->path.blk[ state->path.active-1 ]; 1074 save_blk = &state->altpath.blk[ state->path.active-1 ]; 1075 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC); 1076 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC || 1077 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC); 1078 1079 /* 1080 * Walk back up the tree joining/deallocating as necessary. 1081 * When we stop dropping blocks, break out. 1082 */ 1083 for ( ; state->path.active >= 2; drop_blk--, save_blk--, 1084 state->path.active--) { 1085 /* 1086 * See if we can combine the block with a neighbor. 1087 * (action == 0) => no options, just leave 1088 * (action == 1) => coalesce, then unlink 1089 * (action == 2) => block empty, unlink it 1090 */ 1091 switch (drop_blk->magic) { 1092 case XFS_ATTR_LEAF_MAGIC: 1093 error = xfs_attr3_leaf_toosmall(state, &action); 1094 if (error) 1095 return error; 1096 if (action == 0) 1097 return 0; 1098 xfs_attr3_leaf_unbalance(state, drop_blk, save_blk); 1099 break; 1100 case XFS_DIR2_LEAFN_MAGIC: 1101 error = xfs_dir2_leafn_toosmall(state, &action); 1102 if (error) 1103 return error; 1104 if (action == 0) 1105 return 0; 1106 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk); 1107 break; 1108 case XFS_DA_NODE_MAGIC: 1109 /* 1110 * Remove the offending node, fixup hashvals, 1111 * check for a toosmall neighbor. 1112 */ 1113 xfs_da3_node_remove(state, drop_blk); 1114 xfs_da3_fixhashpath(state, &state->path); 1115 error = xfs_da3_node_toosmall(state, &action); 1116 if (error) 1117 return error; 1118 if (action == 0) 1119 return 0; 1120 xfs_da3_node_unbalance(state, drop_blk, save_blk); 1121 break; 1122 } 1123 xfs_da3_fixhashpath(state, &state->altpath); 1124 error = xfs_da3_blk_unlink(state, drop_blk, save_blk); 1125 xfs_da_state_kill_altpath(state); 1126 if (error) 1127 return error; 1128 error = xfs_da_shrink_inode(state->args, drop_blk->blkno, 1129 drop_blk->bp); 1130 drop_blk->bp = NULL; 1131 if (error) 1132 return error; 1133 } 1134 /* 1135 * We joined all the way to the top. If it turns out that 1136 * we only have one entry in the root, make the child block 1137 * the new root. 1138 */ 1139 xfs_da3_node_remove(state, drop_blk); 1140 xfs_da3_fixhashpath(state, &state->path); 1141 error = xfs_da3_root_join(state, &state->path.blk[0]); 1142 return error; 1143 } 1144 1145 #ifdef DEBUG 1146 static void 1147 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level) 1148 { 1149 __be16 magic = blkinfo->magic; 1150 1151 if (level == 1) { 1152 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 1153 magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) || 1154 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) || 1155 magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)); 1156 } else { 1157 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || 1158 magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)); 1159 } 1160 ASSERT(!blkinfo->forw); 1161 ASSERT(!blkinfo->back); 1162 } 1163 #else /* !DEBUG */ 1164 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level) 1165 #endif /* !DEBUG */ 1166 1167 /* 1168 * We have only one entry in the root. Copy the only remaining child of 1169 * the old root to block 0 as the new root node. 1170 */ 1171 STATIC int 1172 xfs_da3_root_join( 1173 struct xfs_da_state *state, 1174 struct xfs_da_state_blk *root_blk) 1175 { 1176 struct xfs_da_intnode *oldroot; 1177 struct xfs_da_args *args; 1178 xfs_dablk_t child; 1179 struct xfs_buf *bp; 1180 struct xfs_da3_icnode_hdr oldroothdr; 1181 int error; 1182 struct xfs_inode *dp = state->args->dp; 1183 1184 trace_xfs_da_root_join(state->args); 1185 1186 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC); 1187 1188 args = state->args; 1189 oldroot = root_blk->bp->b_addr; 1190 xfs_da3_node_hdr_from_disk(dp->i_mount, &oldroothdr, oldroot); 1191 ASSERT(oldroothdr.forw == 0); 1192 ASSERT(oldroothdr.back == 0); 1193 1194 /* 1195 * If the root has more than one child, then don't do anything. 1196 */ 1197 if (oldroothdr.count > 1) 1198 return 0; 1199 1200 /* 1201 * Read in the (only) child block, then copy those bytes into 1202 * the root block's buffer and free the original child block. 1203 */ 1204 child = be32_to_cpu(oldroothdr.btree[0].before); 1205 ASSERT(child != 0); 1206 error = xfs_da3_node_read(args->trans, dp, child, &bp, args->whichfork); 1207 if (error) 1208 return error; 1209 xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level); 1210 1211 /* 1212 * This could be copying a leaf back into the root block in the case of 1213 * there only being a single leaf block left in the tree. Hence we have 1214 * to update the b_ops pointer as well to match the buffer type change 1215 * that could occur. For dir3 blocks we also need to update the block 1216 * number in the buffer header. 1217 */ 1218 memcpy(root_blk->bp->b_addr, bp->b_addr, args->geo->blksize); 1219 root_blk->bp->b_ops = bp->b_ops; 1220 xfs_trans_buf_copy_type(root_blk->bp, bp); 1221 if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) { 1222 struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr; 1223 da3->blkno = cpu_to_be64(xfs_buf_daddr(root_blk->bp)); 1224 } 1225 xfs_trans_log_buf(args->trans, root_blk->bp, 0, 1226 args->geo->blksize - 1); 1227 error = xfs_da_shrink_inode(args, child, bp); 1228 return error; 1229 } 1230 1231 /* 1232 * Check a node block and its neighbors to see if the block should be 1233 * collapsed into one or the other neighbor. Always keep the block 1234 * with the smaller block number. 1235 * If the current block is over 50% full, don't try to join it, return 0. 1236 * If the block is empty, fill in the state structure and return 2. 1237 * If it can be collapsed, fill in the state structure and return 1. 1238 * If nothing can be done, return 0. 1239 */ 1240 STATIC int 1241 xfs_da3_node_toosmall( 1242 struct xfs_da_state *state, 1243 int *action) 1244 { 1245 struct xfs_da_intnode *node; 1246 struct xfs_da_state_blk *blk; 1247 struct xfs_da_blkinfo *info; 1248 xfs_dablk_t blkno; 1249 struct xfs_buf *bp; 1250 struct xfs_da3_icnode_hdr nodehdr; 1251 int count; 1252 int forward; 1253 int error; 1254 int retval; 1255 int i; 1256 struct xfs_inode *dp = state->args->dp; 1257 1258 trace_xfs_da_node_toosmall(state->args); 1259 1260 /* 1261 * Check for the degenerate case of the block being over 50% full. 1262 * If so, it's not worth even looking to see if we might be able 1263 * to coalesce with a sibling. 1264 */ 1265 blk = &state->path.blk[ state->path.active-1 ]; 1266 info = blk->bp->b_addr; 1267 node = (xfs_da_intnode_t *)info; 1268 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 1269 if (nodehdr.count > (state->args->geo->node_ents >> 1)) { 1270 *action = 0; /* blk over 50%, don't try to join */ 1271 return 0; /* blk over 50%, don't try to join */ 1272 } 1273 1274 /* 1275 * Check for the degenerate case of the block being empty. 1276 * If the block is empty, we'll simply delete it, no need to 1277 * coalesce it with a sibling block. We choose (arbitrarily) 1278 * to merge with the forward block unless it is NULL. 1279 */ 1280 if (nodehdr.count == 0) { 1281 /* 1282 * Make altpath point to the block we want to keep and 1283 * path point to the block we want to drop (this one). 1284 */ 1285 forward = (info->forw != 0); 1286 memcpy(&state->altpath, &state->path, sizeof(state->path)); 1287 error = xfs_da3_path_shift(state, &state->altpath, forward, 1288 0, &retval); 1289 if (error) 1290 return error; 1291 if (retval) { 1292 *action = 0; 1293 } else { 1294 *action = 2; 1295 } 1296 return 0; 1297 } 1298 1299 /* 1300 * Examine each sibling block to see if we can coalesce with 1301 * at least 25% free space to spare. We need to figure out 1302 * whether to merge with the forward or the backward block. 1303 * We prefer coalescing with the lower numbered sibling so as 1304 * to shrink a directory over time. 1305 */ 1306 count = state->args->geo->node_ents; 1307 count -= state->args->geo->node_ents >> 2; 1308 count -= nodehdr.count; 1309 1310 /* start with smaller blk num */ 1311 forward = nodehdr.forw < nodehdr.back; 1312 for (i = 0; i < 2; forward = !forward, i++) { 1313 struct xfs_da3_icnode_hdr thdr; 1314 if (forward) 1315 blkno = nodehdr.forw; 1316 else 1317 blkno = nodehdr.back; 1318 if (blkno == 0) 1319 continue; 1320 error = xfs_da3_node_read(state->args->trans, dp, blkno, &bp, 1321 state->args->whichfork); 1322 if (error) 1323 return error; 1324 1325 node = bp->b_addr; 1326 xfs_da3_node_hdr_from_disk(dp->i_mount, &thdr, node); 1327 xfs_trans_brelse(state->args->trans, bp); 1328 1329 if (count - thdr.count >= 0) 1330 break; /* fits with at least 25% to spare */ 1331 } 1332 if (i >= 2) { 1333 *action = 0; 1334 return 0; 1335 } 1336 1337 /* 1338 * Make altpath point to the block we want to keep (the lower 1339 * numbered block) and path point to the block we want to drop. 1340 */ 1341 memcpy(&state->altpath, &state->path, sizeof(state->path)); 1342 if (blkno < blk->blkno) { 1343 error = xfs_da3_path_shift(state, &state->altpath, forward, 1344 0, &retval); 1345 } else { 1346 error = xfs_da3_path_shift(state, &state->path, forward, 1347 0, &retval); 1348 } 1349 if (error) 1350 return error; 1351 if (retval) { 1352 *action = 0; 1353 return 0; 1354 } 1355 *action = 1; 1356 return 0; 1357 } 1358 1359 /* 1360 * Pick up the last hashvalue from an intermediate node. 1361 */ 1362 STATIC uint 1363 xfs_da3_node_lasthash( 1364 struct xfs_inode *dp, 1365 struct xfs_buf *bp, 1366 int *count) 1367 { 1368 struct xfs_da3_icnode_hdr nodehdr; 1369 1370 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, bp->b_addr); 1371 if (count) 1372 *count = nodehdr.count; 1373 if (!nodehdr.count) 1374 return 0; 1375 return be32_to_cpu(nodehdr.btree[nodehdr.count - 1].hashval); 1376 } 1377 1378 /* 1379 * Walk back up the tree adjusting hash values as necessary, 1380 * when we stop making changes, return. 1381 */ 1382 void 1383 xfs_da3_fixhashpath( 1384 struct xfs_da_state *state, 1385 struct xfs_da_state_path *path) 1386 { 1387 struct xfs_da_state_blk *blk; 1388 struct xfs_da_intnode *node; 1389 struct xfs_da_node_entry *btree; 1390 xfs_dahash_t lasthash=0; 1391 int level; 1392 int count; 1393 struct xfs_inode *dp = state->args->dp; 1394 1395 trace_xfs_da_fixhashpath(state->args); 1396 1397 level = path->active-1; 1398 blk = &path->blk[ level ]; 1399 switch (blk->magic) { 1400 case XFS_ATTR_LEAF_MAGIC: 1401 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count); 1402 if (count == 0) 1403 return; 1404 break; 1405 case XFS_DIR2_LEAFN_MAGIC: 1406 lasthash = xfs_dir2_leaf_lasthash(dp, blk->bp, &count); 1407 if (count == 0) 1408 return; 1409 break; 1410 case XFS_DA_NODE_MAGIC: 1411 lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count); 1412 if (count == 0) 1413 return; 1414 break; 1415 } 1416 for (blk--, level--; level >= 0; blk--, level--) { 1417 struct xfs_da3_icnode_hdr nodehdr; 1418 1419 node = blk->bp->b_addr; 1420 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 1421 btree = nodehdr.btree; 1422 if (be32_to_cpu(btree[blk->index].hashval) == lasthash) 1423 break; 1424 blk->hashval = lasthash; 1425 btree[blk->index].hashval = cpu_to_be32(lasthash); 1426 xfs_trans_log_buf(state->args->trans, blk->bp, 1427 XFS_DA_LOGRANGE(node, &btree[blk->index], 1428 sizeof(*btree))); 1429 1430 lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval); 1431 } 1432 } 1433 1434 /* 1435 * Remove an entry from an intermediate node. 1436 */ 1437 STATIC void 1438 xfs_da3_node_remove( 1439 struct xfs_da_state *state, 1440 struct xfs_da_state_blk *drop_blk) 1441 { 1442 struct xfs_da_intnode *node; 1443 struct xfs_da3_icnode_hdr nodehdr; 1444 struct xfs_da_node_entry *btree; 1445 int index; 1446 int tmp; 1447 struct xfs_inode *dp = state->args->dp; 1448 1449 trace_xfs_da_node_remove(state->args); 1450 1451 node = drop_blk->bp->b_addr; 1452 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 1453 ASSERT(drop_blk->index < nodehdr.count); 1454 ASSERT(drop_blk->index >= 0); 1455 1456 /* 1457 * Copy over the offending entry, or just zero it out. 1458 */ 1459 index = drop_blk->index; 1460 btree = nodehdr.btree; 1461 if (index < nodehdr.count - 1) { 1462 tmp = nodehdr.count - index - 1; 1463 tmp *= (uint)sizeof(xfs_da_node_entry_t); 1464 memmove(&btree[index], &btree[index + 1], tmp); 1465 xfs_trans_log_buf(state->args->trans, drop_blk->bp, 1466 XFS_DA_LOGRANGE(node, &btree[index], tmp)); 1467 index = nodehdr.count - 1; 1468 } 1469 memset(&btree[index], 0, sizeof(xfs_da_node_entry_t)); 1470 xfs_trans_log_buf(state->args->trans, drop_blk->bp, 1471 XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index]))); 1472 nodehdr.count -= 1; 1473 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr); 1474 xfs_trans_log_buf(state->args->trans, drop_blk->bp, 1475 XFS_DA_LOGRANGE(node, &node->hdr, state->args->geo->node_hdr_size)); 1476 1477 /* 1478 * Copy the last hash value from the block to propagate upwards. 1479 */ 1480 drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval); 1481 } 1482 1483 /* 1484 * Unbalance the elements between two intermediate nodes, 1485 * move all Btree elements from one node into another. 1486 */ 1487 STATIC void 1488 xfs_da3_node_unbalance( 1489 struct xfs_da_state *state, 1490 struct xfs_da_state_blk *drop_blk, 1491 struct xfs_da_state_blk *save_blk) 1492 { 1493 struct xfs_da_intnode *drop_node; 1494 struct xfs_da_intnode *save_node; 1495 struct xfs_da_node_entry *drop_btree; 1496 struct xfs_da_node_entry *save_btree; 1497 struct xfs_da3_icnode_hdr drop_hdr; 1498 struct xfs_da3_icnode_hdr save_hdr; 1499 struct xfs_trans *tp; 1500 int sindex; 1501 int tmp; 1502 struct xfs_inode *dp = state->args->dp; 1503 1504 trace_xfs_da_node_unbalance(state->args); 1505 1506 drop_node = drop_blk->bp->b_addr; 1507 save_node = save_blk->bp->b_addr; 1508 xfs_da3_node_hdr_from_disk(dp->i_mount, &drop_hdr, drop_node); 1509 xfs_da3_node_hdr_from_disk(dp->i_mount, &save_hdr, save_node); 1510 drop_btree = drop_hdr.btree; 1511 save_btree = save_hdr.btree; 1512 tp = state->args->trans; 1513 1514 /* 1515 * If the dying block has lower hashvals, then move all the 1516 * elements in the remaining block up to make a hole. 1517 */ 1518 if ((be32_to_cpu(drop_btree[0].hashval) < 1519 be32_to_cpu(save_btree[0].hashval)) || 1520 (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) < 1521 be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) { 1522 /* XXX: check this - is memmove dst correct? */ 1523 tmp = save_hdr.count * sizeof(xfs_da_node_entry_t); 1524 memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp); 1525 1526 sindex = 0; 1527 xfs_trans_log_buf(tp, save_blk->bp, 1528 XFS_DA_LOGRANGE(save_node, &save_btree[0], 1529 (save_hdr.count + drop_hdr.count) * 1530 sizeof(xfs_da_node_entry_t))); 1531 } else { 1532 sindex = save_hdr.count; 1533 xfs_trans_log_buf(tp, save_blk->bp, 1534 XFS_DA_LOGRANGE(save_node, &save_btree[sindex], 1535 drop_hdr.count * sizeof(xfs_da_node_entry_t))); 1536 } 1537 1538 /* 1539 * Move all the B-tree elements from drop_blk to save_blk. 1540 */ 1541 tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t); 1542 memcpy(&save_btree[sindex], &drop_btree[0], tmp); 1543 save_hdr.count += drop_hdr.count; 1544 1545 xfs_da3_node_hdr_to_disk(dp->i_mount, save_node, &save_hdr); 1546 xfs_trans_log_buf(tp, save_blk->bp, 1547 XFS_DA_LOGRANGE(save_node, &save_node->hdr, 1548 state->args->geo->node_hdr_size)); 1549 1550 /* 1551 * Save the last hashval in the remaining block for upward propagation. 1552 */ 1553 save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval); 1554 } 1555 1556 /*======================================================================== 1557 * Routines used for finding things in the Btree. 1558 *========================================================================*/ 1559 1560 /* 1561 * Walk down the Btree looking for a particular filename, filling 1562 * in the state structure as we go. 1563 * 1564 * We will set the state structure to point to each of the elements 1565 * in each of the nodes where either the hashval is or should be. 1566 * 1567 * We support duplicate hashval's so for each entry in the current 1568 * node that could contain the desired hashval, descend. This is a 1569 * pruned depth-first tree search. 1570 */ 1571 int /* error */ 1572 xfs_da3_node_lookup_int( 1573 struct xfs_da_state *state, 1574 int *result) 1575 { 1576 struct xfs_da_state_blk *blk; 1577 struct xfs_da_blkinfo *curr; 1578 struct xfs_da_intnode *node; 1579 struct xfs_da_node_entry *btree; 1580 struct xfs_da3_icnode_hdr nodehdr; 1581 struct xfs_da_args *args; 1582 xfs_dablk_t blkno; 1583 xfs_dahash_t hashval; 1584 xfs_dahash_t btreehashval; 1585 int probe; 1586 int span; 1587 int max; 1588 int error; 1589 int retval; 1590 unsigned int expected_level = 0; 1591 uint16_t magic; 1592 struct xfs_inode *dp = state->args->dp; 1593 1594 args = state->args; 1595 1596 /* 1597 * Descend thru the B-tree searching each level for the right 1598 * node to use, until the right hashval is found. 1599 */ 1600 blkno = args->geo->leafblk; 1601 for (blk = &state->path.blk[0], state->path.active = 1; 1602 state->path.active <= XFS_DA_NODE_MAXDEPTH; 1603 blk++, state->path.active++) { 1604 /* 1605 * Read the next node down in the tree. 1606 */ 1607 blk->blkno = blkno; 1608 error = xfs_da3_node_read(args->trans, args->dp, blkno, 1609 &blk->bp, args->whichfork); 1610 if (error) { 1611 blk->blkno = 0; 1612 state->path.active--; 1613 return error; 1614 } 1615 curr = blk->bp->b_addr; 1616 magic = be16_to_cpu(curr->magic); 1617 1618 if (magic == XFS_ATTR_LEAF_MAGIC || 1619 magic == XFS_ATTR3_LEAF_MAGIC) { 1620 blk->magic = XFS_ATTR_LEAF_MAGIC; 1621 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); 1622 break; 1623 } 1624 1625 if (magic == XFS_DIR2_LEAFN_MAGIC || 1626 magic == XFS_DIR3_LEAFN_MAGIC) { 1627 blk->magic = XFS_DIR2_LEAFN_MAGIC; 1628 blk->hashval = xfs_dir2_leaf_lasthash(args->dp, 1629 blk->bp, NULL); 1630 break; 1631 } 1632 1633 if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC) { 1634 xfs_buf_mark_corrupt(blk->bp); 1635 return -EFSCORRUPTED; 1636 } 1637 1638 blk->magic = XFS_DA_NODE_MAGIC; 1639 1640 /* 1641 * Search an intermediate node for a match. 1642 */ 1643 node = blk->bp->b_addr; 1644 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 1645 btree = nodehdr.btree; 1646 1647 /* Tree taller than we can handle; bail out! */ 1648 if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) { 1649 xfs_buf_mark_corrupt(blk->bp); 1650 return -EFSCORRUPTED; 1651 } 1652 1653 /* Check the level from the root. */ 1654 if (blkno == args->geo->leafblk) 1655 expected_level = nodehdr.level - 1; 1656 else if (expected_level != nodehdr.level) { 1657 xfs_buf_mark_corrupt(blk->bp); 1658 return -EFSCORRUPTED; 1659 } else 1660 expected_level--; 1661 1662 max = nodehdr.count; 1663 blk->hashval = be32_to_cpu(btree[max - 1].hashval); 1664 1665 /* 1666 * Binary search. (note: small blocks will skip loop) 1667 */ 1668 probe = span = max / 2; 1669 hashval = args->hashval; 1670 while (span > 4) { 1671 span /= 2; 1672 btreehashval = be32_to_cpu(btree[probe].hashval); 1673 if (btreehashval < hashval) 1674 probe += span; 1675 else if (btreehashval > hashval) 1676 probe -= span; 1677 else 1678 break; 1679 } 1680 ASSERT((probe >= 0) && (probe < max)); 1681 ASSERT((span <= 4) || 1682 (be32_to_cpu(btree[probe].hashval) == hashval)); 1683 1684 /* 1685 * Since we may have duplicate hashval's, find the first 1686 * matching hashval in the node. 1687 */ 1688 while (probe > 0 && 1689 be32_to_cpu(btree[probe].hashval) >= hashval) { 1690 probe--; 1691 } 1692 while (probe < max && 1693 be32_to_cpu(btree[probe].hashval) < hashval) { 1694 probe++; 1695 } 1696 1697 /* 1698 * Pick the right block to descend on. 1699 */ 1700 if (probe == max) { 1701 blk->index = max - 1; 1702 blkno = be32_to_cpu(btree[max - 1].before); 1703 } else { 1704 blk->index = probe; 1705 blkno = be32_to_cpu(btree[probe].before); 1706 } 1707 1708 /* We can't point back to the root. */ 1709 if (XFS_IS_CORRUPT(dp->i_mount, blkno == args->geo->leafblk)) 1710 return -EFSCORRUPTED; 1711 } 1712 1713 if (XFS_IS_CORRUPT(dp->i_mount, expected_level != 0)) 1714 return -EFSCORRUPTED; 1715 1716 /* 1717 * A leaf block that ends in the hashval that we are interested in 1718 * (final hashval == search hashval) means that the next block may 1719 * contain more entries with the same hashval, shift upward to the 1720 * next leaf and keep searching. 1721 */ 1722 for (;;) { 1723 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) { 1724 retval = xfs_dir2_leafn_lookup_int(blk->bp, args, 1725 &blk->index, state); 1726 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) { 1727 retval = xfs_attr3_leaf_lookup_int(blk->bp, args); 1728 blk->index = args->index; 1729 args->blkno = blk->blkno; 1730 } else { 1731 ASSERT(0); 1732 return -EFSCORRUPTED; 1733 } 1734 if (((retval == -ENOENT) || (retval == -ENOATTR)) && 1735 (blk->hashval == args->hashval)) { 1736 error = xfs_da3_path_shift(state, &state->path, 1, 1, 1737 &retval); 1738 if (error) 1739 return error; 1740 if (retval == 0) { 1741 continue; 1742 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) { 1743 /* path_shift() gives ENOENT */ 1744 retval = -ENOATTR; 1745 } 1746 } 1747 break; 1748 } 1749 *result = retval; 1750 return 0; 1751 } 1752 1753 /*======================================================================== 1754 * Utility routines. 1755 *========================================================================*/ 1756 1757 /* 1758 * Compare two intermediate nodes for "order". 1759 */ 1760 STATIC int 1761 xfs_da3_node_order( 1762 struct xfs_inode *dp, 1763 struct xfs_buf *node1_bp, 1764 struct xfs_buf *node2_bp) 1765 { 1766 struct xfs_da_intnode *node1; 1767 struct xfs_da_intnode *node2; 1768 struct xfs_da_node_entry *btree1; 1769 struct xfs_da_node_entry *btree2; 1770 struct xfs_da3_icnode_hdr node1hdr; 1771 struct xfs_da3_icnode_hdr node2hdr; 1772 1773 node1 = node1_bp->b_addr; 1774 node2 = node2_bp->b_addr; 1775 xfs_da3_node_hdr_from_disk(dp->i_mount, &node1hdr, node1); 1776 xfs_da3_node_hdr_from_disk(dp->i_mount, &node2hdr, node2); 1777 btree1 = node1hdr.btree; 1778 btree2 = node2hdr.btree; 1779 1780 if (node1hdr.count > 0 && node2hdr.count > 0 && 1781 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) || 1782 (be32_to_cpu(btree2[node2hdr.count - 1].hashval) < 1783 be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) { 1784 return 1; 1785 } 1786 return 0; 1787 } 1788 1789 /* 1790 * Link a new block into a doubly linked list of blocks (of whatever type). 1791 */ 1792 int /* error */ 1793 xfs_da3_blk_link( 1794 struct xfs_da_state *state, 1795 struct xfs_da_state_blk *old_blk, 1796 struct xfs_da_state_blk *new_blk) 1797 { 1798 struct xfs_da_blkinfo *old_info; 1799 struct xfs_da_blkinfo *new_info; 1800 struct xfs_da_blkinfo *tmp_info; 1801 struct xfs_da_args *args; 1802 struct xfs_buf *bp; 1803 int before = 0; 1804 int error; 1805 struct xfs_inode *dp = state->args->dp; 1806 1807 /* 1808 * Set up environment. 1809 */ 1810 args = state->args; 1811 ASSERT(args != NULL); 1812 old_info = old_blk->bp->b_addr; 1813 new_info = new_blk->bp->b_addr; 1814 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC || 1815 old_blk->magic == XFS_DIR2_LEAFN_MAGIC || 1816 old_blk->magic == XFS_ATTR_LEAF_MAGIC); 1817 1818 switch (old_blk->magic) { 1819 case XFS_ATTR_LEAF_MAGIC: 1820 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp); 1821 break; 1822 case XFS_DIR2_LEAFN_MAGIC: 1823 before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp); 1824 break; 1825 case XFS_DA_NODE_MAGIC: 1826 before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp); 1827 break; 1828 } 1829 1830 /* 1831 * Link blocks in appropriate order. 1832 */ 1833 if (before) { 1834 /* 1835 * Link new block in before existing block. 1836 */ 1837 trace_xfs_da_link_before(args); 1838 new_info->forw = cpu_to_be32(old_blk->blkno); 1839 new_info->back = old_info->back; 1840 if (old_info->back) { 1841 error = xfs_da3_node_read(args->trans, dp, 1842 be32_to_cpu(old_info->back), 1843 &bp, args->whichfork); 1844 if (error) 1845 return error; 1846 ASSERT(bp != NULL); 1847 tmp_info = bp->b_addr; 1848 ASSERT(tmp_info->magic == old_info->magic); 1849 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno); 1850 tmp_info->forw = cpu_to_be32(new_blk->blkno); 1851 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); 1852 } 1853 old_info->back = cpu_to_be32(new_blk->blkno); 1854 } else { 1855 /* 1856 * Link new block in after existing block. 1857 */ 1858 trace_xfs_da_link_after(args); 1859 new_info->forw = old_info->forw; 1860 new_info->back = cpu_to_be32(old_blk->blkno); 1861 if (old_info->forw) { 1862 error = xfs_da3_node_read(args->trans, dp, 1863 be32_to_cpu(old_info->forw), 1864 &bp, args->whichfork); 1865 if (error) 1866 return error; 1867 ASSERT(bp != NULL); 1868 tmp_info = bp->b_addr; 1869 ASSERT(tmp_info->magic == old_info->magic); 1870 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno); 1871 tmp_info->back = cpu_to_be32(new_blk->blkno); 1872 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); 1873 } 1874 old_info->forw = cpu_to_be32(new_blk->blkno); 1875 } 1876 1877 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1); 1878 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1); 1879 return 0; 1880 } 1881 1882 /* 1883 * Unlink a block from a doubly linked list of blocks. 1884 */ 1885 STATIC int /* error */ 1886 xfs_da3_blk_unlink( 1887 struct xfs_da_state *state, 1888 struct xfs_da_state_blk *drop_blk, 1889 struct xfs_da_state_blk *save_blk) 1890 { 1891 struct xfs_da_blkinfo *drop_info; 1892 struct xfs_da_blkinfo *save_info; 1893 struct xfs_da_blkinfo *tmp_info; 1894 struct xfs_da_args *args; 1895 struct xfs_buf *bp; 1896 int error; 1897 1898 /* 1899 * Set up environment. 1900 */ 1901 args = state->args; 1902 ASSERT(args != NULL); 1903 save_info = save_blk->bp->b_addr; 1904 drop_info = drop_blk->bp->b_addr; 1905 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC || 1906 save_blk->magic == XFS_DIR2_LEAFN_MAGIC || 1907 save_blk->magic == XFS_ATTR_LEAF_MAGIC); 1908 ASSERT(save_blk->magic == drop_blk->magic); 1909 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) || 1910 (be32_to_cpu(save_info->back) == drop_blk->blkno)); 1911 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) || 1912 (be32_to_cpu(drop_info->back) == save_blk->blkno)); 1913 1914 /* 1915 * Unlink the leaf block from the doubly linked chain of leaves. 1916 */ 1917 if (be32_to_cpu(save_info->back) == drop_blk->blkno) { 1918 trace_xfs_da_unlink_back(args); 1919 save_info->back = drop_info->back; 1920 if (drop_info->back) { 1921 error = xfs_da3_node_read(args->trans, args->dp, 1922 be32_to_cpu(drop_info->back), 1923 &bp, args->whichfork); 1924 if (error) 1925 return error; 1926 ASSERT(bp != NULL); 1927 tmp_info = bp->b_addr; 1928 ASSERT(tmp_info->magic == save_info->magic); 1929 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno); 1930 tmp_info->forw = cpu_to_be32(save_blk->blkno); 1931 xfs_trans_log_buf(args->trans, bp, 0, 1932 sizeof(*tmp_info) - 1); 1933 } 1934 } else { 1935 trace_xfs_da_unlink_forward(args); 1936 save_info->forw = drop_info->forw; 1937 if (drop_info->forw) { 1938 error = xfs_da3_node_read(args->trans, args->dp, 1939 be32_to_cpu(drop_info->forw), 1940 &bp, args->whichfork); 1941 if (error) 1942 return error; 1943 ASSERT(bp != NULL); 1944 tmp_info = bp->b_addr; 1945 ASSERT(tmp_info->magic == save_info->magic); 1946 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno); 1947 tmp_info->back = cpu_to_be32(save_blk->blkno); 1948 xfs_trans_log_buf(args->trans, bp, 0, 1949 sizeof(*tmp_info) - 1); 1950 } 1951 } 1952 1953 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1); 1954 return 0; 1955 } 1956 1957 /* 1958 * Move a path "forward" or "!forward" one block at the current level. 1959 * 1960 * This routine will adjust a "path" to point to the next block 1961 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the 1962 * Btree, including updating pointers to the intermediate nodes between 1963 * the new bottom and the root. 1964 */ 1965 int /* error */ 1966 xfs_da3_path_shift( 1967 struct xfs_da_state *state, 1968 struct xfs_da_state_path *path, 1969 int forward, 1970 int release, 1971 int *result) 1972 { 1973 struct xfs_da_state_blk *blk; 1974 struct xfs_da_blkinfo *info; 1975 struct xfs_da_args *args; 1976 struct xfs_da_node_entry *btree; 1977 struct xfs_da3_icnode_hdr nodehdr; 1978 struct xfs_buf *bp; 1979 xfs_dablk_t blkno = 0; 1980 int level; 1981 int error; 1982 struct xfs_inode *dp = state->args->dp; 1983 1984 trace_xfs_da_path_shift(state->args); 1985 1986 /* 1987 * Roll up the Btree looking for the first block where our 1988 * current index is not at the edge of the block. Note that 1989 * we skip the bottom layer because we want the sibling block. 1990 */ 1991 args = state->args; 1992 ASSERT(args != NULL); 1993 ASSERT(path != NULL); 1994 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); 1995 level = (path->active-1) - 1; /* skip bottom layer in path */ 1996 for (; level >= 0; level--) { 1997 blk = &path->blk[level]; 1998 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, 1999 blk->bp->b_addr); 2000 2001 if (forward && (blk->index < nodehdr.count - 1)) { 2002 blk->index++; 2003 blkno = be32_to_cpu(nodehdr.btree[blk->index].before); 2004 break; 2005 } else if (!forward && (blk->index > 0)) { 2006 blk->index--; 2007 blkno = be32_to_cpu(nodehdr.btree[blk->index].before); 2008 break; 2009 } 2010 } 2011 if (level < 0) { 2012 *result = -ENOENT; /* we're out of our tree */ 2013 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); 2014 return 0; 2015 } 2016 2017 /* 2018 * Roll down the edge of the subtree until we reach the 2019 * same depth we were at originally. 2020 */ 2021 for (blk++, level++; level < path->active; blk++, level++) { 2022 /* 2023 * Read the next child block into a local buffer. 2024 */ 2025 error = xfs_da3_node_read(args->trans, dp, blkno, &bp, 2026 args->whichfork); 2027 if (error) 2028 return error; 2029 2030 /* 2031 * Release the old block (if it's dirty, the trans doesn't 2032 * actually let go) and swap the local buffer into the path 2033 * structure. This ensures failure of the above read doesn't set 2034 * a NULL buffer in an active slot in the path. 2035 */ 2036 if (release) 2037 xfs_trans_brelse(args->trans, blk->bp); 2038 blk->blkno = blkno; 2039 blk->bp = bp; 2040 2041 info = blk->bp->b_addr; 2042 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || 2043 info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) || 2044 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 2045 info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) || 2046 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) || 2047 info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)); 2048 2049 2050 /* 2051 * Note: we flatten the magic number to a single type so we 2052 * don't have to compare against crc/non-crc types elsewhere. 2053 */ 2054 switch (be16_to_cpu(info->magic)) { 2055 case XFS_DA_NODE_MAGIC: 2056 case XFS_DA3_NODE_MAGIC: 2057 blk->magic = XFS_DA_NODE_MAGIC; 2058 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, 2059 bp->b_addr); 2060 btree = nodehdr.btree; 2061 blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval); 2062 if (forward) 2063 blk->index = 0; 2064 else 2065 blk->index = nodehdr.count - 1; 2066 blkno = be32_to_cpu(btree[blk->index].before); 2067 break; 2068 case XFS_ATTR_LEAF_MAGIC: 2069 case XFS_ATTR3_LEAF_MAGIC: 2070 blk->magic = XFS_ATTR_LEAF_MAGIC; 2071 ASSERT(level == path->active-1); 2072 blk->index = 0; 2073 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); 2074 break; 2075 case XFS_DIR2_LEAFN_MAGIC: 2076 case XFS_DIR3_LEAFN_MAGIC: 2077 blk->magic = XFS_DIR2_LEAFN_MAGIC; 2078 ASSERT(level == path->active-1); 2079 blk->index = 0; 2080 blk->hashval = xfs_dir2_leaf_lasthash(args->dp, 2081 blk->bp, NULL); 2082 break; 2083 default: 2084 ASSERT(0); 2085 break; 2086 } 2087 } 2088 *result = 0; 2089 return 0; 2090 } 2091 2092 2093 /*======================================================================== 2094 * Utility routines. 2095 *========================================================================*/ 2096 2097 /* 2098 * Implement a simple hash on a character string. 2099 * Rotate the hash value by 7 bits, then XOR each character in. 2100 * This is implemented with some source-level loop unrolling. 2101 */ 2102 xfs_dahash_t 2103 xfs_da_hashname(const uint8_t *name, int namelen) 2104 { 2105 xfs_dahash_t hash; 2106 2107 /* 2108 * Do four characters at a time as long as we can. 2109 */ 2110 for (hash = 0; namelen >= 4; namelen -= 4, name += 4) 2111 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^ 2112 (name[3] << 0) ^ rol32(hash, 7 * 4); 2113 2114 /* 2115 * Now do the rest of the characters. 2116 */ 2117 switch (namelen) { 2118 case 3: 2119 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^ 2120 rol32(hash, 7 * 3); 2121 case 2: 2122 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2); 2123 case 1: 2124 return (name[0] << 0) ^ rol32(hash, 7 * 1); 2125 default: /* case 0: */ 2126 return hash; 2127 } 2128 } 2129 2130 enum xfs_dacmp 2131 xfs_da_compname( 2132 struct xfs_da_args *args, 2133 const unsigned char *name, 2134 int len) 2135 { 2136 return (args->namelen == len && memcmp(args->name, name, len) == 0) ? 2137 XFS_CMP_EXACT : XFS_CMP_DIFFERENT; 2138 } 2139 2140 int 2141 xfs_da_grow_inode_int( 2142 struct xfs_da_args *args, 2143 xfs_fileoff_t *bno, 2144 int count) 2145 { 2146 struct xfs_trans *tp = args->trans; 2147 struct xfs_inode *dp = args->dp; 2148 int w = args->whichfork; 2149 xfs_rfsblock_t nblks = dp->i_nblocks; 2150 struct xfs_bmbt_irec map, *mapp; 2151 int nmap, error, got, i, mapi; 2152 2153 /* 2154 * Find a spot in the file space to put the new block. 2155 */ 2156 error = xfs_bmap_first_unused(tp, dp, count, bno, w); 2157 if (error) 2158 return error; 2159 2160 /* 2161 * Try mapping it in one filesystem block. 2162 */ 2163 nmap = 1; 2164 error = xfs_bmapi_write(tp, dp, *bno, count, 2165 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG, 2166 args->total, &map, &nmap); 2167 if (error) 2168 return error; 2169 2170 ASSERT(nmap <= 1); 2171 if (nmap == 1) { 2172 mapp = ↦ 2173 mapi = 1; 2174 } else if (nmap == 0 && count > 1) { 2175 xfs_fileoff_t b; 2176 int c; 2177 2178 /* 2179 * If we didn't get it and the block might work if fragmented, 2180 * try without the CONTIG flag. Loop until we get it all. 2181 */ 2182 mapp = kmem_alloc(sizeof(*mapp) * count, 0); 2183 for (b = *bno, mapi = 0; b < *bno + count; ) { 2184 nmap = min(XFS_BMAP_MAX_NMAP, count); 2185 c = (int)(*bno + count - b); 2186 error = xfs_bmapi_write(tp, dp, b, c, 2187 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA, 2188 args->total, &mapp[mapi], &nmap); 2189 if (error) 2190 goto out_free_map; 2191 if (nmap < 1) 2192 break; 2193 mapi += nmap; 2194 b = mapp[mapi - 1].br_startoff + 2195 mapp[mapi - 1].br_blockcount; 2196 } 2197 } else { 2198 mapi = 0; 2199 mapp = NULL; 2200 } 2201 2202 /* 2203 * Count the blocks we got, make sure it matches the total. 2204 */ 2205 for (i = 0, got = 0; i < mapi; i++) 2206 got += mapp[i].br_blockcount; 2207 if (got != count || mapp[0].br_startoff != *bno || 2208 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount != 2209 *bno + count) { 2210 error = -ENOSPC; 2211 goto out_free_map; 2212 } 2213 2214 /* account for newly allocated blocks in reserved blocks total */ 2215 args->total -= dp->i_nblocks - nblks; 2216 2217 out_free_map: 2218 if (mapp != &map) 2219 kmem_free(mapp); 2220 return error; 2221 } 2222 2223 /* 2224 * Add a block to the btree ahead of the file. 2225 * Return the new block number to the caller. 2226 */ 2227 int 2228 xfs_da_grow_inode( 2229 struct xfs_da_args *args, 2230 xfs_dablk_t *new_blkno) 2231 { 2232 xfs_fileoff_t bno; 2233 int error; 2234 2235 trace_xfs_da_grow_inode(args); 2236 2237 bno = args->geo->leafblk; 2238 error = xfs_da_grow_inode_int(args, &bno, args->geo->fsbcount); 2239 if (!error) 2240 *new_blkno = (xfs_dablk_t)bno; 2241 return error; 2242 } 2243 2244 /* 2245 * Ick. We need to always be able to remove a btree block, even 2246 * if there's no space reservation because the filesystem is full. 2247 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC. 2248 * It swaps the target block with the last block in the file. The 2249 * last block in the file can always be removed since it can't cause 2250 * a bmap btree split to do that. 2251 */ 2252 STATIC int 2253 xfs_da3_swap_lastblock( 2254 struct xfs_da_args *args, 2255 xfs_dablk_t *dead_blknop, 2256 struct xfs_buf **dead_bufp) 2257 { 2258 struct xfs_da_blkinfo *dead_info; 2259 struct xfs_da_blkinfo *sib_info; 2260 struct xfs_da_intnode *par_node; 2261 struct xfs_da_intnode *dead_node; 2262 struct xfs_dir2_leaf *dead_leaf2; 2263 struct xfs_da_node_entry *btree; 2264 struct xfs_da3_icnode_hdr par_hdr; 2265 struct xfs_inode *dp; 2266 struct xfs_trans *tp; 2267 struct xfs_mount *mp; 2268 struct xfs_buf *dead_buf; 2269 struct xfs_buf *last_buf; 2270 struct xfs_buf *sib_buf; 2271 struct xfs_buf *par_buf; 2272 xfs_dahash_t dead_hash; 2273 xfs_fileoff_t lastoff; 2274 xfs_dablk_t dead_blkno; 2275 xfs_dablk_t last_blkno; 2276 xfs_dablk_t sib_blkno; 2277 xfs_dablk_t par_blkno; 2278 int error; 2279 int w; 2280 int entno; 2281 int level; 2282 int dead_level; 2283 2284 trace_xfs_da_swap_lastblock(args); 2285 2286 dead_buf = *dead_bufp; 2287 dead_blkno = *dead_blknop; 2288 tp = args->trans; 2289 dp = args->dp; 2290 w = args->whichfork; 2291 ASSERT(w == XFS_DATA_FORK); 2292 mp = dp->i_mount; 2293 lastoff = args->geo->freeblk; 2294 error = xfs_bmap_last_before(tp, dp, &lastoff, w); 2295 if (error) 2296 return error; 2297 if (XFS_IS_CORRUPT(mp, lastoff == 0)) 2298 return -EFSCORRUPTED; 2299 /* 2300 * Read the last block in the btree space. 2301 */ 2302 last_blkno = (xfs_dablk_t)lastoff - args->geo->fsbcount; 2303 error = xfs_da3_node_read(tp, dp, last_blkno, &last_buf, w); 2304 if (error) 2305 return error; 2306 /* 2307 * Copy the last block into the dead buffer and log it. 2308 */ 2309 memcpy(dead_buf->b_addr, last_buf->b_addr, args->geo->blksize); 2310 xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1); 2311 dead_info = dead_buf->b_addr; 2312 /* 2313 * Get values from the moved block. 2314 */ 2315 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 2316 dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) { 2317 struct xfs_dir3_icleaf_hdr leafhdr; 2318 struct xfs_dir2_leaf_entry *ents; 2319 2320 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info; 2321 xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, 2322 dead_leaf2); 2323 ents = leafhdr.ents; 2324 dead_level = 0; 2325 dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval); 2326 } else { 2327 struct xfs_da3_icnode_hdr deadhdr; 2328 2329 dead_node = (xfs_da_intnode_t *)dead_info; 2330 xfs_da3_node_hdr_from_disk(dp->i_mount, &deadhdr, dead_node); 2331 btree = deadhdr.btree; 2332 dead_level = deadhdr.level; 2333 dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval); 2334 } 2335 sib_buf = par_buf = NULL; 2336 /* 2337 * If the moved block has a left sibling, fix up the pointers. 2338 */ 2339 if ((sib_blkno = be32_to_cpu(dead_info->back))) { 2340 error = xfs_da3_node_read(tp, dp, sib_blkno, &sib_buf, w); 2341 if (error) 2342 goto done; 2343 sib_info = sib_buf->b_addr; 2344 if (XFS_IS_CORRUPT(mp, 2345 be32_to_cpu(sib_info->forw) != last_blkno || 2346 sib_info->magic != dead_info->magic)) { 2347 error = -EFSCORRUPTED; 2348 goto done; 2349 } 2350 sib_info->forw = cpu_to_be32(dead_blkno); 2351 xfs_trans_log_buf(tp, sib_buf, 2352 XFS_DA_LOGRANGE(sib_info, &sib_info->forw, 2353 sizeof(sib_info->forw))); 2354 sib_buf = NULL; 2355 } 2356 /* 2357 * If the moved block has a right sibling, fix up the pointers. 2358 */ 2359 if ((sib_blkno = be32_to_cpu(dead_info->forw))) { 2360 error = xfs_da3_node_read(tp, dp, sib_blkno, &sib_buf, w); 2361 if (error) 2362 goto done; 2363 sib_info = sib_buf->b_addr; 2364 if (XFS_IS_CORRUPT(mp, 2365 be32_to_cpu(sib_info->back) != last_blkno || 2366 sib_info->magic != dead_info->magic)) { 2367 error = -EFSCORRUPTED; 2368 goto done; 2369 } 2370 sib_info->back = cpu_to_be32(dead_blkno); 2371 xfs_trans_log_buf(tp, sib_buf, 2372 XFS_DA_LOGRANGE(sib_info, &sib_info->back, 2373 sizeof(sib_info->back))); 2374 sib_buf = NULL; 2375 } 2376 par_blkno = args->geo->leafblk; 2377 level = -1; 2378 /* 2379 * Walk down the tree looking for the parent of the moved block. 2380 */ 2381 for (;;) { 2382 error = xfs_da3_node_read(tp, dp, par_blkno, &par_buf, w); 2383 if (error) 2384 goto done; 2385 par_node = par_buf->b_addr; 2386 xfs_da3_node_hdr_from_disk(dp->i_mount, &par_hdr, par_node); 2387 if (XFS_IS_CORRUPT(mp, 2388 level >= 0 && level != par_hdr.level + 1)) { 2389 error = -EFSCORRUPTED; 2390 goto done; 2391 } 2392 level = par_hdr.level; 2393 btree = par_hdr.btree; 2394 for (entno = 0; 2395 entno < par_hdr.count && 2396 be32_to_cpu(btree[entno].hashval) < dead_hash; 2397 entno++) 2398 continue; 2399 if (XFS_IS_CORRUPT(mp, entno == par_hdr.count)) { 2400 error = -EFSCORRUPTED; 2401 goto done; 2402 } 2403 par_blkno = be32_to_cpu(btree[entno].before); 2404 if (level == dead_level + 1) 2405 break; 2406 xfs_trans_brelse(tp, par_buf); 2407 par_buf = NULL; 2408 } 2409 /* 2410 * We're in the right parent block. 2411 * Look for the right entry. 2412 */ 2413 for (;;) { 2414 for (; 2415 entno < par_hdr.count && 2416 be32_to_cpu(btree[entno].before) != last_blkno; 2417 entno++) 2418 continue; 2419 if (entno < par_hdr.count) 2420 break; 2421 par_blkno = par_hdr.forw; 2422 xfs_trans_brelse(tp, par_buf); 2423 par_buf = NULL; 2424 if (XFS_IS_CORRUPT(mp, par_blkno == 0)) { 2425 error = -EFSCORRUPTED; 2426 goto done; 2427 } 2428 error = xfs_da3_node_read(tp, dp, par_blkno, &par_buf, w); 2429 if (error) 2430 goto done; 2431 par_node = par_buf->b_addr; 2432 xfs_da3_node_hdr_from_disk(dp->i_mount, &par_hdr, par_node); 2433 if (XFS_IS_CORRUPT(mp, par_hdr.level != level)) { 2434 error = -EFSCORRUPTED; 2435 goto done; 2436 } 2437 btree = par_hdr.btree; 2438 entno = 0; 2439 } 2440 /* 2441 * Update the parent entry pointing to the moved block. 2442 */ 2443 btree[entno].before = cpu_to_be32(dead_blkno); 2444 xfs_trans_log_buf(tp, par_buf, 2445 XFS_DA_LOGRANGE(par_node, &btree[entno].before, 2446 sizeof(btree[entno].before))); 2447 *dead_blknop = last_blkno; 2448 *dead_bufp = last_buf; 2449 return 0; 2450 done: 2451 if (par_buf) 2452 xfs_trans_brelse(tp, par_buf); 2453 if (sib_buf) 2454 xfs_trans_brelse(tp, sib_buf); 2455 xfs_trans_brelse(tp, last_buf); 2456 return error; 2457 } 2458 2459 /* 2460 * Remove a btree block from a directory or attribute. 2461 */ 2462 int 2463 xfs_da_shrink_inode( 2464 struct xfs_da_args *args, 2465 xfs_dablk_t dead_blkno, 2466 struct xfs_buf *dead_buf) 2467 { 2468 struct xfs_inode *dp; 2469 int done, error, w, count; 2470 struct xfs_trans *tp; 2471 2472 trace_xfs_da_shrink_inode(args); 2473 2474 dp = args->dp; 2475 w = args->whichfork; 2476 tp = args->trans; 2477 count = args->geo->fsbcount; 2478 for (;;) { 2479 /* 2480 * Remove extents. If we get ENOSPC for a dir we have to move 2481 * the last block to the place we want to kill. 2482 */ 2483 error = xfs_bunmapi(tp, dp, dead_blkno, count, 2484 xfs_bmapi_aflag(w), 0, &done); 2485 if (error == -ENOSPC) { 2486 if (w != XFS_DATA_FORK) 2487 break; 2488 error = xfs_da3_swap_lastblock(args, &dead_blkno, 2489 &dead_buf); 2490 if (error) 2491 break; 2492 } else { 2493 break; 2494 } 2495 } 2496 xfs_trans_binval(tp, dead_buf); 2497 return error; 2498 } 2499 2500 static int 2501 xfs_dabuf_map( 2502 struct xfs_inode *dp, 2503 xfs_dablk_t bno, 2504 unsigned int flags, 2505 int whichfork, 2506 struct xfs_buf_map **mapp, 2507 int *nmaps) 2508 { 2509 struct xfs_mount *mp = dp->i_mount; 2510 int nfsb = xfs_dabuf_nfsb(mp, whichfork); 2511 struct xfs_bmbt_irec irec, *irecs = &irec; 2512 struct xfs_buf_map *map = *mapp; 2513 xfs_fileoff_t off = bno; 2514 int error = 0, nirecs, i; 2515 2516 if (nfsb > 1) 2517 irecs = kmem_zalloc(sizeof(irec) * nfsb, KM_NOFS); 2518 2519 nirecs = nfsb; 2520 error = xfs_bmapi_read(dp, bno, nfsb, irecs, &nirecs, 2521 xfs_bmapi_aflag(whichfork)); 2522 if (error) 2523 goto out_free_irecs; 2524 2525 /* 2526 * Use the caller provided map for the single map case, else allocate a 2527 * larger one that needs to be free by the caller. 2528 */ 2529 if (nirecs > 1) { 2530 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_NOFS); 2531 if (!map) { 2532 error = -ENOMEM; 2533 goto out_free_irecs; 2534 } 2535 *mapp = map; 2536 } 2537 2538 for (i = 0; i < nirecs; i++) { 2539 if (irecs[i].br_startblock == HOLESTARTBLOCK || 2540 irecs[i].br_startblock == DELAYSTARTBLOCK) 2541 goto invalid_mapping; 2542 if (off != irecs[i].br_startoff) 2543 goto invalid_mapping; 2544 2545 map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock); 2546 map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount); 2547 off += irecs[i].br_blockcount; 2548 } 2549 2550 if (off != bno + nfsb) 2551 goto invalid_mapping; 2552 2553 *nmaps = nirecs; 2554 out_free_irecs: 2555 if (irecs != &irec) 2556 kmem_free(irecs); 2557 return error; 2558 2559 invalid_mapping: 2560 /* Caller ok with no mapping. */ 2561 if (XFS_IS_CORRUPT(mp, !(flags & XFS_DABUF_MAP_HOLE_OK))) { 2562 error = -EFSCORRUPTED; 2563 if (xfs_error_level >= XFS_ERRLEVEL_LOW) { 2564 xfs_alert(mp, "%s: bno %u inode %llu", 2565 __func__, bno, dp->i_ino); 2566 2567 for (i = 0; i < nirecs; i++) { 2568 xfs_alert(mp, 2569 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d", 2570 i, irecs[i].br_startoff, 2571 irecs[i].br_startblock, 2572 irecs[i].br_blockcount, 2573 irecs[i].br_state); 2574 } 2575 } 2576 } else { 2577 *nmaps = 0; 2578 } 2579 goto out_free_irecs; 2580 } 2581 2582 /* 2583 * Get a buffer for the dir/attr block. 2584 */ 2585 int 2586 xfs_da_get_buf( 2587 struct xfs_trans *tp, 2588 struct xfs_inode *dp, 2589 xfs_dablk_t bno, 2590 struct xfs_buf **bpp, 2591 int whichfork) 2592 { 2593 struct xfs_mount *mp = dp->i_mount; 2594 struct xfs_buf *bp; 2595 struct xfs_buf_map map, *mapp = ↦ 2596 int nmap = 1; 2597 int error; 2598 2599 *bpp = NULL; 2600 error = xfs_dabuf_map(dp, bno, 0, whichfork, &mapp, &nmap); 2601 if (error || nmap == 0) 2602 goto out_free; 2603 2604 error = xfs_trans_get_buf_map(tp, mp->m_ddev_targp, mapp, nmap, 0, &bp); 2605 if (error) 2606 goto out_free; 2607 2608 *bpp = bp; 2609 2610 out_free: 2611 if (mapp != &map) 2612 kmem_free(mapp); 2613 2614 return error; 2615 } 2616 2617 /* 2618 * Get a buffer for the dir/attr block, fill in the contents. 2619 */ 2620 int 2621 xfs_da_read_buf( 2622 struct xfs_trans *tp, 2623 struct xfs_inode *dp, 2624 xfs_dablk_t bno, 2625 unsigned int flags, 2626 struct xfs_buf **bpp, 2627 int whichfork, 2628 const struct xfs_buf_ops *ops) 2629 { 2630 struct xfs_mount *mp = dp->i_mount; 2631 struct xfs_buf *bp; 2632 struct xfs_buf_map map, *mapp = ↦ 2633 int nmap = 1; 2634 int error; 2635 2636 *bpp = NULL; 2637 error = xfs_dabuf_map(dp, bno, flags, whichfork, &mapp, &nmap); 2638 if (error || !nmap) 2639 goto out_free; 2640 2641 error = xfs_trans_read_buf_map(mp, tp, mp->m_ddev_targp, mapp, nmap, 0, 2642 &bp, ops); 2643 if (error) 2644 goto out_free; 2645 2646 if (whichfork == XFS_ATTR_FORK) 2647 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF); 2648 else 2649 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF); 2650 *bpp = bp; 2651 out_free: 2652 if (mapp != &map) 2653 kmem_free(mapp); 2654 2655 return error; 2656 } 2657 2658 /* 2659 * Readahead the dir/attr block. 2660 */ 2661 int 2662 xfs_da_reada_buf( 2663 struct xfs_inode *dp, 2664 xfs_dablk_t bno, 2665 unsigned int flags, 2666 int whichfork, 2667 const struct xfs_buf_ops *ops) 2668 { 2669 struct xfs_buf_map map; 2670 struct xfs_buf_map *mapp; 2671 int nmap; 2672 int error; 2673 2674 mapp = ↦ 2675 nmap = 1; 2676 error = xfs_dabuf_map(dp, bno, flags, whichfork, &mapp, &nmap); 2677 if (error || !nmap) 2678 goto out_free; 2679 2680 xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops); 2681 2682 out_free: 2683 if (mapp != &map) 2684 kmem_free(mapp); 2685 2686 return error; 2687 } 2688