1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * Copyright (c) 2013 Red Hat, Inc. 5 * All Rights Reserved. 6 */ 7 #include "xfs.h" 8 #include "xfs_fs.h" 9 #include "xfs_shared.h" 10 #include "xfs_format.h" 11 #include "xfs_log_format.h" 12 #include "xfs_trans_resv.h" 13 #include "xfs_bit.h" 14 #include "xfs_mount.h" 15 #include "xfs_inode.h" 16 #include "xfs_dir2.h" 17 #include "xfs_dir2_priv.h" 18 #include "xfs_trans.h" 19 #include "xfs_bmap.h" 20 #include "xfs_attr_leaf.h" 21 #include "xfs_error.h" 22 #include "xfs_trace.h" 23 #include "xfs_buf_item.h" 24 #include "xfs_log.h" 25 26 /* 27 * xfs_da_btree.c 28 * 29 * Routines to implement directories as Btrees of hashed names. 30 */ 31 32 /*======================================================================== 33 * Function prototypes for the kernel. 34 *========================================================================*/ 35 36 /* 37 * Routines used for growing the Btree. 38 */ 39 STATIC int xfs_da3_root_split(xfs_da_state_t *state, 40 xfs_da_state_blk_t *existing_root, 41 xfs_da_state_blk_t *new_child); 42 STATIC int xfs_da3_node_split(xfs_da_state_t *state, 43 xfs_da_state_blk_t *existing_blk, 44 xfs_da_state_blk_t *split_blk, 45 xfs_da_state_blk_t *blk_to_add, 46 int treelevel, 47 int *result); 48 STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state, 49 xfs_da_state_blk_t *node_blk_1, 50 xfs_da_state_blk_t *node_blk_2); 51 STATIC void xfs_da3_node_add(xfs_da_state_t *state, 52 xfs_da_state_blk_t *old_node_blk, 53 xfs_da_state_blk_t *new_node_blk); 54 55 /* 56 * Routines used for shrinking the Btree. 57 */ 58 STATIC int xfs_da3_root_join(xfs_da_state_t *state, 59 xfs_da_state_blk_t *root_blk); 60 STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval); 61 STATIC void xfs_da3_node_remove(xfs_da_state_t *state, 62 xfs_da_state_blk_t *drop_blk); 63 STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state, 64 xfs_da_state_blk_t *src_node_blk, 65 xfs_da_state_blk_t *dst_node_blk); 66 67 /* 68 * Utility routines. 69 */ 70 STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state, 71 xfs_da_state_blk_t *drop_blk, 72 xfs_da_state_blk_t *save_blk); 73 74 75 struct kmem_cache *xfs_da_state_cache; /* anchor for dir/attr state */ 76 77 /* 78 * Allocate a dir-state structure. 79 * We don't put them on the stack since they're large. 80 */ 81 struct xfs_da_state * 82 xfs_da_state_alloc( 83 struct xfs_da_args *args) 84 { 85 struct xfs_da_state *state; 86 87 state = kmem_cache_zalloc(xfs_da_state_cache, GFP_NOFS | __GFP_NOFAIL); 88 state->args = args; 89 state->mp = args->dp->i_mount; 90 return state; 91 } 92 93 /* 94 * Kill the altpath contents of a da-state structure. 95 */ 96 STATIC void 97 xfs_da_state_kill_altpath(xfs_da_state_t *state) 98 { 99 int i; 100 101 for (i = 0; i < state->altpath.active; i++) 102 state->altpath.blk[i].bp = NULL; 103 state->altpath.active = 0; 104 } 105 106 /* 107 * Free a da-state structure. 108 */ 109 void 110 xfs_da_state_free(xfs_da_state_t *state) 111 { 112 xfs_da_state_kill_altpath(state); 113 #ifdef DEBUG 114 memset((char *)state, 0, sizeof(*state)); 115 #endif /* DEBUG */ 116 kmem_cache_free(xfs_da_state_cache, state); 117 } 118 119 static inline int xfs_dabuf_nfsb(struct xfs_mount *mp, int whichfork) 120 { 121 if (whichfork == XFS_DATA_FORK) 122 return mp->m_dir_geo->fsbcount; 123 return mp->m_attr_geo->fsbcount; 124 } 125 126 void 127 xfs_da3_node_hdr_from_disk( 128 struct xfs_mount *mp, 129 struct xfs_da3_icnode_hdr *to, 130 struct xfs_da_intnode *from) 131 { 132 if (xfs_has_crc(mp)) { 133 struct xfs_da3_intnode *from3 = (struct xfs_da3_intnode *)from; 134 135 to->forw = be32_to_cpu(from3->hdr.info.hdr.forw); 136 to->back = be32_to_cpu(from3->hdr.info.hdr.back); 137 to->magic = be16_to_cpu(from3->hdr.info.hdr.magic); 138 to->count = be16_to_cpu(from3->hdr.__count); 139 to->level = be16_to_cpu(from3->hdr.__level); 140 to->btree = from3->__btree; 141 ASSERT(to->magic == XFS_DA3_NODE_MAGIC); 142 } else { 143 to->forw = be32_to_cpu(from->hdr.info.forw); 144 to->back = be32_to_cpu(from->hdr.info.back); 145 to->magic = be16_to_cpu(from->hdr.info.magic); 146 to->count = be16_to_cpu(from->hdr.__count); 147 to->level = be16_to_cpu(from->hdr.__level); 148 to->btree = from->__btree; 149 ASSERT(to->magic == XFS_DA_NODE_MAGIC); 150 } 151 } 152 153 void 154 xfs_da3_node_hdr_to_disk( 155 struct xfs_mount *mp, 156 struct xfs_da_intnode *to, 157 struct xfs_da3_icnode_hdr *from) 158 { 159 if (xfs_has_crc(mp)) { 160 struct xfs_da3_intnode *to3 = (struct xfs_da3_intnode *)to; 161 162 ASSERT(from->magic == XFS_DA3_NODE_MAGIC); 163 to3->hdr.info.hdr.forw = cpu_to_be32(from->forw); 164 to3->hdr.info.hdr.back = cpu_to_be32(from->back); 165 to3->hdr.info.hdr.magic = cpu_to_be16(from->magic); 166 to3->hdr.__count = cpu_to_be16(from->count); 167 to3->hdr.__level = cpu_to_be16(from->level); 168 } else { 169 ASSERT(from->magic == XFS_DA_NODE_MAGIC); 170 to->hdr.info.forw = cpu_to_be32(from->forw); 171 to->hdr.info.back = cpu_to_be32(from->back); 172 to->hdr.info.magic = cpu_to_be16(from->magic); 173 to->hdr.__count = cpu_to_be16(from->count); 174 to->hdr.__level = cpu_to_be16(from->level); 175 } 176 } 177 178 /* 179 * Verify an xfs_da3_blkinfo structure. Note that the da3 fields are only 180 * accessible on v5 filesystems. This header format is common across da node, 181 * attr leaf and dir leaf blocks. 182 */ 183 xfs_failaddr_t 184 xfs_da3_blkinfo_verify( 185 struct xfs_buf *bp, 186 struct xfs_da3_blkinfo *hdr3) 187 { 188 struct xfs_mount *mp = bp->b_mount; 189 struct xfs_da_blkinfo *hdr = &hdr3->hdr; 190 191 if (!xfs_verify_magic16(bp, hdr->magic)) 192 return __this_address; 193 194 if (xfs_has_crc(mp)) { 195 if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid)) 196 return __this_address; 197 if (be64_to_cpu(hdr3->blkno) != xfs_buf_daddr(bp)) 198 return __this_address; 199 if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn))) 200 return __this_address; 201 } 202 203 return NULL; 204 } 205 206 static xfs_failaddr_t 207 xfs_da3_node_verify( 208 struct xfs_buf *bp) 209 { 210 struct xfs_mount *mp = bp->b_mount; 211 struct xfs_da_intnode *hdr = bp->b_addr; 212 struct xfs_da3_icnode_hdr ichdr; 213 xfs_failaddr_t fa; 214 215 xfs_da3_node_hdr_from_disk(mp, &ichdr, hdr); 216 217 fa = xfs_da3_blkinfo_verify(bp, bp->b_addr); 218 if (fa) 219 return fa; 220 221 if (ichdr.level == 0) 222 return __this_address; 223 if (ichdr.level > XFS_DA_NODE_MAXDEPTH) 224 return __this_address; 225 if (ichdr.count == 0) 226 return __this_address; 227 228 /* 229 * we don't know if the node is for and attribute or directory tree, 230 * so only fail if the count is outside both bounds 231 */ 232 if (ichdr.count > mp->m_dir_geo->node_ents && 233 ichdr.count > mp->m_attr_geo->node_ents) 234 return __this_address; 235 236 /* XXX: hash order check? */ 237 238 return NULL; 239 } 240 241 static void 242 xfs_da3_node_write_verify( 243 struct xfs_buf *bp) 244 { 245 struct xfs_mount *mp = bp->b_mount; 246 struct xfs_buf_log_item *bip = bp->b_log_item; 247 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; 248 xfs_failaddr_t fa; 249 250 fa = xfs_da3_node_verify(bp); 251 if (fa) { 252 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 253 return; 254 } 255 256 if (!xfs_has_crc(mp)) 257 return; 258 259 if (bip) 260 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn); 261 262 xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF); 263 } 264 265 /* 266 * leaf/node format detection on trees is sketchy, so a node read can be done on 267 * leaf level blocks when detection identifies the tree as a node format tree 268 * incorrectly. In this case, we need to swap the verifier to match the correct 269 * format of the block being read. 270 */ 271 static void 272 xfs_da3_node_read_verify( 273 struct xfs_buf *bp) 274 { 275 struct xfs_da_blkinfo *info = bp->b_addr; 276 xfs_failaddr_t fa; 277 278 switch (be16_to_cpu(info->magic)) { 279 case XFS_DA3_NODE_MAGIC: 280 if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) { 281 xfs_verifier_error(bp, -EFSBADCRC, 282 __this_address); 283 break; 284 } 285 fallthrough; 286 case XFS_DA_NODE_MAGIC: 287 fa = xfs_da3_node_verify(bp); 288 if (fa) 289 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 290 return; 291 case XFS_ATTR_LEAF_MAGIC: 292 case XFS_ATTR3_LEAF_MAGIC: 293 bp->b_ops = &xfs_attr3_leaf_buf_ops; 294 bp->b_ops->verify_read(bp); 295 return; 296 case XFS_DIR2_LEAFN_MAGIC: 297 case XFS_DIR3_LEAFN_MAGIC: 298 bp->b_ops = &xfs_dir3_leafn_buf_ops; 299 bp->b_ops->verify_read(bp); 300 return; 301 default: 302 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address); 303 break; 304 } 305 } 306 307 /* Verify the structure of a da3 block. */ 308 static xfs_failaddr_t 309 xfs_da3_node_verify_struct( 310 struct xfs_buf *bp) 311 { 312 struct xfs_da_blkinfo *info = bp->b_addr; 313 314 switch (be16_to_cpu(info->magic)) { 315 case XFS_DA3_NODE_MAGIC: 316 case XFS_DA_NODE_MAGIC: 317 return xfs_da3_node_verify(bp); 318 case XFS_ATTR_LEAF_MAGIC: 319 case XFS_ATTR3_LEAF_MAGIC: 320 bp->b_ops = &xfs_attr3_leaf_buf_ops; 321 return bp->b_ops->verify_struct(bp); 322 case XFS_DIR2_LEAFN_MAGIC: 323 case XFS_DIR3_LEAFN_MAGIC: 324 bp->b_ops = &xfs_dir3_leafn_buf_ops; 325 return bp->b_ops->verify_struct(bp); 326 default: 327 return __this_address; 328 } 329 } 330 331 const struct xfs_buf_ops xfs_da3_node_buf_ops = { 332 .name = "xfs_da3_node", 333 .magic16 = { cpu_to_be16(XFS_DA_NODE_MAGIC), 334 cpu_to_be16(XFS_DA3_NODE_MAGIC) }, 335 .verify_read = xfs_da3_node_read_verify, 336 .verify_write = xfs_da3_node_write_verify, 337 .verify_struct = xfs_da3_node_verify_struct, 338 }; 339 340 static int 341 xfs_da3_node_set_type( 342 struct xfs_trans *tp, 343 struct xfs_buf *bp) 344 { 345 struct xfs_da_blkinfo *info = bp->b_addr; 346 347 switch (be16_to_cpu(info->magic)) { 348 case XFS_DA_NODE_MAGIC: 349 case XFS_DA3_NODE_MAGIC: 350 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF); 351 return 0; 352 case XFS_ATTR_LEAF_MAGIC: 353 case XFS_ATTR3_LEAF_MAGIC: 354 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_ATTR_LEAF_BUF); 355 return 0; 356 case XFS_DIR2_LEAFN_MAGIC: 357 case XFS_DIR3_LEAFN_MAGIC: 358 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF); 359 return 0; 360 default: 361 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, tp->t_mountp, 362 info, sizeof(*info)); 363 xfs_trans_brelse(tp, bp); 364 return -EFSCORRUPTED; 365 } 366 } 367 368 int 369 xfs_da3_node_read( 370 struct xfs_trans *tp, 371 struct xfs_inode *dp, 372 xfs_dablk_t bno, 373 struct xfs_buf **bpp, 374 int whichfork) 375 { 376 int error; 377 378 error = xfs_da_read_buf(tp, dp, bno, 0, bpp, whichfork, 379 &xfs_da3_node_buf_ops); 380 if (error || !*bpp || !tp) 381 return error; 382 return xfs_da3_node_set_type(tp, *bpp); 383 } 384 385 int 386 xfs_da3_node_read_mapped( 387 struct xfs_trans *tp, 388 struct xfs_inode *dp, 389 xfs_daddr_t mappedbno, 390 struct xfs_buf **bpp, 391 int whichfork) 392 { 393 struct xfs_mount *mp = dp->i_mount; 394 int error; 395 396 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, mappedbno, 397 XFS_FSB_TO_BB(mp, xfs_dabuf_nfsb(mp, whichfork)), 0, 398 bpp, &xfs_da3_node_buf_ops); 399 if (error || !*bpp) 400 return error; 401 402 if (whichfork == XFS_ATTR_FORK) 403 xfs_buf_set_ref(*bpp, XFS_ATTR_BTREE_REF); 404 else 405 xfs_buf_set_ref(*bpp, XFS_DIR_BTREE_REF); 406 407 if (!tp) 408 return 0; 409 return xfs_da3_node_set_type(tp, *bpp); 410 } 411 412 /*======================================================================== 413 * Routines used for growing the Btree. 414 *========================================================================*/ 415 416 /* 417 * Create the initial contents of an intermediate node. 418 */ 419 int 420 xfs_da3_node_create( 421 struct xfs_da_args *args, 422 xfs_dablk_t blkno, 423 int level, 424 struct xfs_buf **bpp, 425 int whichfork) 426 { 427 struct xfs_da_intnode *node; 428 struct xfs_trans *tp = args->trans; 429 struct xfs_mount *mp = tp->t_mountp; 430 struct xfs_da3_icnode_hdr ichdr = {0}; 431 struct xfs_buf *bp; 432 int error; 433 struct xfs_inode *dp = args->dp; 434 435 trace_xfs_da_node_create(args); 436 ASSERT(level <= XFS_DA_NODE_MAXDEPTH); 437 438 error = xfs_da_get_buf(tp, dp, blkno, &bp, whichfork); 439 if (error) 440 return error; 441 bp->b_ops = &xfs_da3_node_buf_ops; 442 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF); 443 node = bp->b_addr; 444 445 if (xfs_has_crc(mp)) { 446 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; 447 448 memset(hdr3, 0, sizeof(struct xfs_da3_node_hdr)); 449 ichdr.magic = XFS_DA3_NODE_MAGIC; 450 hdr3->info.blkno = cpu_to_be64(xfs_buf_daddr(bp)); 451 hdr3->info.owner = cpu_to_be64(args->dp->i_ino); 452 uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid); 453 } else { 454 ichdr.magic = XFS_DA_NODE_MAGIC; 455 } 456 ichdr.level = level; 457 458 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &ichdr); 459 xfs_trans_log_buf(tp, bp, 460 XFS_DA_LOGRANGE(node, &node->hdr, args->geo->node_hdr_size)); 461 462 *bpp = bp; 463 return 0; 464 } 465 466 /* 467 * Split a leaf node, rebalance, then possibly split 468 * intermediate nodes, rebalance, etc. 469 */ 470 int /* error */ 471 xfs_da3_split( 472 struct xfs_da_state *state) 473 { 474 struct xfs_da_state_blk *oldblk; 475 struct xfs_da_state_blk *newblk; 476 struct xfs_da_state_blk *addblk; 477 struct xfs_da_intnode *node; 478 int max; 479 int action = 0; 480 int error; 481 int i; 482 483 trace_xfs_da_split(state->args); 484 485 /* 486 * Walk back up the tree splitting/inserting/adjusting as necessary. 487 * If we need to insert and there isn't room, split the node, then 488 * decide which fragment to insert the new block from below into. 489 * Note that we may split the root this way, but we need more fixup. 490 */ 491 max = state->path.active - 1; 492 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH)); 493 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC || 494 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC); 495 496 addblk = &state->path.blk[max]; /* initial dummy value */ 497 for (i = max; (i >= 0) && addblk; state->path.active--, i--) { 498 oldblk = &state->path.blk[i]; 499 newblk = &state->altpath.blk[i]; 500 501 /* 502 * If a leaf node then 503 * Allocate a new leaf node, then rebalance across them. 504 * else if an intermediate node then 505 * We split on the last layer, must we split the node? 506 */ 507 switch (oldblk->magic) { 508 case XFS_ATTR_LEAF_MAGIC: 509 error = xfs_attr3_leaf_split(state, oldblk, newblk); 510 if ((error != 0) && (error != -ENOSPC)) { 511 return error; /* GROT: attr is inconsistent */ 512 } 513 if (!error) { 514 addblk = newblk; 515 break; 516 } 517 /* 518 * Entry wouldn't fit, split the leaf again. The new 519 * extrablk will be consumed by xfs_da3_node_split if 520 * the node is split. 521 */ 522 state->extravalid = 1; 523 if (state->inleaf) { 524 state->extraafter = 0; /* before newblk */ 525 trace_xfs_attr_leaf_split_before(state->args); 526 error = xfs_attr3_leaf_split(state, oldblk, 527 &state->extrablk); 528 } else { 529 state->extraafter = 1; /* after newblk */ 530 trace_xfs_attr_leaf_split_after(state->args); 531 error = xfs_attr3_leaf_split(state, newblk, 532 &state->extrablk); 533 } 534 if (error) 535 return error; /* GROT: attr inconsistent */ 536 addblk = newblk; 537 break; 538 case XFS_DIR2_LEAFN_MAGIC: 539 error = xfs_dir2_leafn_split(state, oldblk, newblk); 540 if (error) 541 return error; 542 addblk = newblk; 543 break; 544 case XFS_DA_NODE_MAGIC: 545 error = xfs_da3_node_split(state, oldblk, newblk, addblk, 546 max - i, &action); 547 addblk->bp = NULL; 548 if (error) 549 return error; /* GROT: dir is inconsistent */ 550 /* 551 * Record the newly split block for the next time thru? 552 */ 553 if (action) 554 addblk = newblk; 555 else 556 addblk = NULL; 557 break; 558 } 559 560 /* 561 * Update the btree to show the new hashval for this child. 562 */ 563 xfs_da3_fixhashpath(state, &state->path); 564 } 565 if (!addblk) 566 return 0; 567 568 /* 569 * xfs_da3_node_split() should have consumed any extra blocks we added 570 * during a double leaf split in the attr fork. This is guaranteed as 571 * we can't be here if the attr fork only has a single leaf block. 572 */ 573 ASSERT(state->extravalid == 0 || 574 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC); 575 576 /* 577 * Split the root node. 578 */ 579 ASSERT(state->path.active == 0); 580 oldblk = &state->path.blk[0]; 581 error = xfs_da3_root_split(state, oldblk, addblk); 582 if (error) 583 goto out; 584 585 /* 586 * Update pointers to the node which used to be block 0 and just got 587 * bumped because of the addition of a new root node. Note that the 588 * original block 0 could be at any position in the list of blocks in 589 * the tree. 590 * 591 * Note: the magic numbers and sibling pointers are in the same physical 592 * place for both v2 and v3 headers (by design). Hence it doesn't matter 593 * which version of the xfs_da_intnode structure we use here as the 594 * result will be the same using either structure. 595 */ 596 node = oldblk->bp->b_addr; 597 if (node->hdr.info.forw) { 598 if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) { 599 xfs_buf_mark_corrupt(oldblk->bp); 600 error = -EFSCORRUPTED; 601 goto out; 602 } 603 node = addblk->bp->b_addr; 604 node->hdr.info.back = cpu_to_be32(oldblk->blkno); 605 xfs_trans_log_buf(state->args->trans, addblk->bp, 606 XFS_DA_LOGRANGE(node, &node->hdr.info, 607 sizeof(node->hdr.info))); 608 } 609 node = oldblk->bp->b_addr; 610 if (node->hdr.info.back) { 611 if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) { 612 xfs_buf_mark_corrupt(oldblk->bp); 613 error = -EFSCORRUPTED; 614 goto out; 615 } 616 node = addblk->bp->b_addr; 617 node->hdr.info.forw = cpu_to_be32(oldblk->blkno); 618 xfs_trans_log_buf(state->args->trans, addblk->bp, 619 XFS_DA_LOGRANGE(node, &node->hdr.info, 620 sizeof(node->hdr.info))); 621 } 622 out: 623 addblk->bp = NULL; 624 return error; 625 } 626 627 /* 628 * Split the root. We have to create a new root and point to the two 629 * parts (the split old root) that we just created. Copy block zero to 630 * the EOF, extending the inode in process. 631 */ 632 STATIC int /* error */ 633 xfs_da3_root_split( 634 struct xfs_da_state *state, 635 struct xfs_da_state_blk *blk1, 636 struct xfs_da_state_blk *blk2) 637 { 638 struct xfs_da_intnode *node; 639 struct xfs_da_intnode *oldroot; 640 struct xfs_da_node_entry *btree; 641 struct xfs_da3_icnode_hdr nodehdr; 642 struct xfs_da_args *args; 643 struct xfs_buf *bp; 644 struct xfs_inode *dp; 645 struct xfs_trans *tp; 646 struct xfs_dir2_leaf *leaf; 647 xfs_dablk_t blkno; 648 int level; 649 int error; 650 int size; 651 652 trace_xfs_da_root_split(state->args); 653 654 /* 655 * Copy the existing (incorrect) block from the root node position 656 * to a free space somewhere. 657 */ 658 args = state->args; 659 error = xfs_da_grow_inode(args, &blkno); 660 if (error) 661 return error; 662 663 dp = args->dp; 664 tp = args->trans; 665 error = xfs_da_get_buf(tp, dp, blkno, &bp, args->whichfork); 666 if (error) 667 return error; 668 node = bp->b_addr; 669 oldroot = blk1->bp->b_addr; 670 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || 671 oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) { 672 struct xfs_da3_icnode_hdr icnodehdr; 673 674 xfs_da3_node_hdr_from_disk(dp->i_mount, &icnodehdr, oldroot); 675 btree = icnodehdr.btree; 676 size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot); 677 level = icnodehdr.level; 678 679 /* 680 * we are about to copy oldroot to bp, so set up the type 681 * of bp while we know exactly what it will be. 682 */ 683 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF); 684 } else { 685 struct xfs_dir3_icleaf_hdr leafhdr; 686 687 leaf = (xfs_dir2_leaf_t *)oldroot; 688 xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf); 689 690 ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC || 691 leafhdr.magic == XFS_DIR3_LEAFN_MAGIC); 692 size = (int)((char *)&leafhdr.ents[leafhdr.count] - 693 (char *)leaf); 694 level = 0; 695 696 /* 697 * we are about to copy oldroot to bp, so set up the type 698 * of bp while we know exactly what it will be. 699 */ 700 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF); 701 } 702 703 /* 704 * we can copy most of the information in the node from one block to 705 * another, but for CRC enabled headers we have to make sure that the 706 * block specific identifiers are kept intact. We update the buffer 707 * directly for this. 708 */ 709 memcpy(node, oldroot, size); 710 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) || 711 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) { 712 struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node; 713 714 node3->hdr.info.blkno = cpu_to_be64(xfs_buf_daddr(bp)); 715 } 716 xfs_trans_log_buf(tp, bp, 0, size - 1); 717 718 bp->b_ops = blk1->bp->b_ops; 719 xfs_trans_buf_copy_type(bp, blk1->bp); 720 blk1->bp = bp; 721 blk1->blkno = blkno; 722 723 /* 724 * Set up the new root node. 725 */ 726 error = xfs_da3_node_create(args, 727 (args->whichfork == XFS_DATA_FORK) ? args->geo->leafblk : 0, 728 level + 1, &bp, args->whichfork); 729 if (error) 730 return error; 731 732 node = bp->b_addr; 733 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 734 btree = nodehdr.btree; 735 btree[0].hashval = cpu_to_be32(blk1->hashval); 736 btree[0].before = cpu_to_be32(blk1->blkno); 737 btree[1].hashval = cpu_to_be32(blk2->hashval); 738 btree[1].before = cpu_to_be32(blk2->blkno); 739 nodehdr.count = 2; 740 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr); 741 742 #ifdef DEBUG 743 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 744 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) { 745 ASSERT(blk1->blkno >= args->geo->leafblk && 746 blk1->blkno < args->geo->freeblk); 747 ASSERT(blk2->blkno >= args->geo->leafblk && 748 blk2->blkno < args->geo->freeblk); 749 } 750 #endif 751 752 /* Header is already logged by xfs_da_node_create */ 753 xfs_trans_log_buf(tp, bp, 754 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2)); 755 756 return 0; 757 } 758 759 /* 760 * Split the node, rebalance, then add the new entry. 761 */ 762 STATIC int /* error */ 763 xfs_da3_node_split( 764 struct xfs_da_state *state, 765 struct xfs_da_state_blk *oldblk, 766 struct xfs_da_state_blk *newblk, 767 struct xfs_da_state_blk *addblk, 768 int treelevel, 769 int *result) 770 { 771 struct xfs_da_intnode *node; 772 struct xfs_da3_icnode_hdr nodehdr; 773 xfs_dablk_t blkno; 774 int newcount; 775 int error; 776 int useextra; 777 struct xfs_inode *dp = state->args->dp; 778 779 trace_xfs_da_node_split(state->args); 780 781 node = oldblk->bp->b_addr; 782 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 783 784 /* 785 * With V2 dirs the extra block is data or freespace. 786 */ 787 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK; 788 newcount = 1 + useextra; 789 /* 790 * Do we have to split the node? 791 */ 792 if (nodehdr.count + newcount > state->args->geo->node_ents) { 793 /* 794 * Allocate a new node, add to the doubly linked chain of 795 * nodes, then move some of our excess entries into it. 796 */ 797 error = xfs_da_grow_inode(state->args, &blkno); 798 if (error) 799 return error; /* GROT: dir is inconsistent */ 800 801 error = xfs_da3_node_create(state->args, blkno, treelevel, 802 &newblk->bp, state->args->whichfork); 803 if (error) 804 return error; /* GROT: dir is inconsistent */ 805 newblk->blkno = blkno; 806 newblk->magic = XFS_DA_NODE_MAGIC; 807 xfs_da3_node_rebalance(state, oldblk, newblk); 808 error = xfs_da3_blk_link(state, oldblk, newblk); 809 if (error) 810 return error; 811 *result = 1; 812 } else { 813 *result = 0; 814 } 815 816 /* 817 * Insert the new entry(s) into the correct block 818 * (updating last hashval in the process). 819 * 820 * xfs_da3_node_add() inserts BEFORE the given index, 821 * and as a result of using node_lookup_int() we always 822 * point to a valid entry (not after one), but a split 823 * operation always results in a new block whose hashvals 824 * FOLLOW the current block. 825 * 826 * If we had double-split op below us, then add the extra block too. 827 */ 828 node = oldblk->bp->b_addr; 829 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 830 if (oldblk->index <= nodehdr.count) { 831 oldblk->index++; 832 xfs_da3_node_add(state, oldblk, addblk); 833 if (useextra) { 834 if (state->extraafter) 835 oldblk->index++; 836 xfs_da3_node_add(state, oldblk, &state->extrablk); 837 state->extravalid = 0; 838 } 839 } else { 840 newblk->index++; 841 xfs_da3_node_add(state, newblk, addblk); 842 if (useextra) { 843 if (state->extraafter) 844 newblk->index++; 845 xfs_da3_node_add(state, newblk, &state->extrablk); 846 state->extravalid = 0; 847 } 848 } 849 850 return 0; 851 } 852 853 /* 854 * Balance the btree elements between two intermediate nodes, 855 * usually one full and one empty. 856 * 857 * NOTE: if blk2 is empty, then it will get the upper half of blk1. 858 */ 859 STATIC void 860 xfs_da3_node_rebalance( 861 struct xfs_da_state *state, 862 struct xfs_da_state_blk *blk1, 863 struct xfs_da_state_blk *blk2) 864 { 865 struct xfs_da_intnode *node1; 866 struct xfs_da_intnode *node2; 867 struct xfs_da_node_entry *btree1; 868 struct xfs_da_node_entry *btree2; 869 struct xfs_da_node_entry *btree_s; 870 struct xfs_da_node_entry *btree_d; 871 struct xfs_da3_icnode_hdr nodehdr1; 872 struct xfs_da3_icnode_hdr nodehdr2; 873 struct xfs_trans *tp; 874 int count; 875 int tmp; 876 int swap = 0; 877 struct xfs_inode *dp = state->args->dp; 878 879 trace_xfs_da_node_rebalance(state->args); 880 881 node1 = blk1->bp->b_addr; 882 node2 = blk2->bp->b_addr; 883 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1); 884 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2); 885 btree1 = nodehdr1.btree; 886 btree2 = nodehdr2.btree; 887 888 /* 889 * Figure out how many entries need to move, and in which direction. 890 * Swap the nodes around if that makes it simpler. 891 */ 892 if (nodehdr1.count > 0 && nodehdr2.count > 0 && 893 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) || 894 (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) < 895 be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) { 896 swap(node1, node2); 897 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1); 898 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2); 899 btree1 = nodehdr1.btree; 900 btree2 = nodehdr2.btree; 901 swap = 1; 902 } 903 904 count = (nodehdr1.count - nodehdr2.count) / 2; 905 if (count == 0) 906 return; 907 tp = state->args->trans; 908 /* 909 * Two cases: high-to-low and low-to-high. 910 */ 911 if (count > 0) { 912 /* 913 * Move elements in node2 up to make a hole. 914 */ 915 tmp = nodehdr2.count; 916 if (tmp > 0) { 917 tmp *= (uint)sizeof(xfs_da_node_entry_t); 918 btree_s = &btree2[0]; 919 btree_d = &btree2[count]; 920 memmove(btree_d, btree_s, tmp); 921 } 922 923 /* 924 * Move the req'd B-tree elements from high in node1 to 925 * low in node2. 926 */ 927 nodehdr2.count += count; 928 tmp = count * (uint)sizeof(xfs_da_node_entry_t); 929 btree_s = &btree1[nodehdr1.count - count]; 930 btree_d = &btree2[0]; 931 memcpy(btree_d, btree_s, tmp); 932 nodehdr1.count -= count; 933 } else { 934 /* 935 * Move the req'd B-tree elements from low in node2 to 936 * high in node1. 937 */ 938 count = -count; 939 tmp = count * (uint)sizeof(xfs_da_node_entry_t); 940 btree_s = &btree2[0]; 941 btree_d = &btree1[nodehdr1.count]; 942 memcpy(btree_d, btree_s, tmp); 943 nodehdr1.count += count; 944 945 xfs_trans_log_buf(tp, blk1->bp, 946 XFS_DA_LOGRANGE(node1, btree_d, tmp)); 947 948 /* 949 * Move elements in node2 down to fill the hole. 950 */ 951 tmp = nodehdr2.count - count; 952 tmp *= (uint)sizeof(xfs_da_node_entry_t); 953 btree_s = &btree2[count]; 954 btree_d = &btree2[0]; 955 memmove(btree_d, btree_s, tmp); 956 nodehdr2.count -= count; 957 } 958 959 /* 960 * Log header of node 1 and all current bits of node 2. 961 */ 962 xfs_da3_node_hdr_to_disk(dp->i_mount, node1, &nodehdr1); 963 xfs_trans_log_buf(tp, blk1->bp, 964 XFS_DA_LOGRANGE(node1, &node1->hdr, 965 state->args->geo->node_hdr_size)); 966 967 xfs_da3_node_hdr_to_disk(dp->i_mount, node2, &nodehdr2); 968 xfs_trans_log_buf(tp, blk2->bp, 969 XFS_DA_LOGRANGE(node2, &node2->hdr, 970 state->args->geo->node_hdr_size + 971 (sizeof(btree2[0]) * nodehdr2.count))); 972 973 /* 974 * Record the last hashval from each block for upward propagation. 975 * (note: don't use the swapped node pointers) 976 */ 977 if (swap) { 978 node1 = blk1->bp->b_addr; 979 node2 = blk2->bp->b_addr; 980 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1); 981 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2); 982 btree1 = nodehdr1.btree; 983 btree2 = nodehdr2.btree; 984 } 985 blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval); 986 blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval); 987 988 /* 989 * Adjust the expected index for insertion. 990 */ 991 if (blk1->index >= nodehdr1.count) { 992 blk2->index = blk1->index - nodehdr1.count; 993 blk1->index = nodehdr1.count + 1; /* make it invalid */ 994 } 995 } 996 997 /* 998 * Add a new entry to an intermediate node. 999 */ 1000 STATIC void 1001 xfs_da3_node_add( 1002 struct xfs_da_state *state, 1003 struct xfs_da_state_blk *oldblk, 1004 struct xfs_da_state_blk *newblk) 1005 { 1006 struct xfs_da_intnode *node; 1007 struct xfs_da3_icnode_hdr nodehdr; 1008 struct xfs_da_node_entry *btree; 1009 int tmp; 1010 struct xfs_inode *dp = state->args->dp; 1011 1012 trace_xfs_da_node_add(state->args); 1013 1014 node = oldblk->bp->b_addr; 1015 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 1016 btree = nodehdr.btree; 1017 1018 ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count); 1019 ASSERT(newblk->blkno != 0); 1020 if (state->args->whichfork == XFS_DATA_FORK) 1021 ASSERT(newblk->blkno >= state->args->geo->leafblk && 1022 newblk->blkno < state->args->geo->freeblk); 1023 1024 /* 1025 * We may need to make some room before we insert the new node. 1026 */ 1027 tmp = 0; 1028 if (oldblk->index < nodehdr.count) { 1029 tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree); 1030 memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp); 1031 } 1032 btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval); 1033 btree[oldblk->index].before = cpu_to_be32(newblk->blkno); 1034 xfs_trans_log_buf(state->args->trans, oldblk->bp, 1035 XFS_DA_LOGRANGE(node, &btree[oldblk->index], 1036 tmp + sizeof(*btree))); 1037 1038 nodehdr.count += 1; 1039 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr); 1040 xfs_trans_log_buf(state->args->trans, oldblk->bp, 1041 XFS_DA_LOGRANGE(node, &node->hdr, 1042 state->args->geo->node_hdr_size)); 1043 1044 /* 1045 * Copy the last hash value from the oldblk to propagate upwards. 1046 */ 1047 oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval); 1048 } 1049 1050 /*======================================================================== 1051 * Routines used for shrinking the Btree. 1052 *========================================================================*/ 1053 1054 /* 1055 * Deallocate an empty leaf node, remove it from its parent, 1056 * possibly deallocating that block, etc... 1057 */ 1058 int 1059 xfs_da3_join( 1060 struct xfs_da_state *state) 1061 { 1062 struct xfs_da_state_blk *drop_blk; 1063 struct xfs_da_state_blk *save_blk; 1064 int action = 0; 1065 int error; 1066 1067 trace_xfs_da_join(state->args); 1068 1069 drop_blk = &state->path.blk[ state->path.active-1 ]; 1070 save_blk = &state->altpath.blk[ state->path.active-1 ]; 1071 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC); 1072 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC || 1073 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC); 1074 1075 /* 1076 * Walk back up the tree joining/deallocating as necessary. 1077 * When we stop dropping blocks, break out. 1078 */ 1079 for ( ; state->path.active >= 2; drop_blk--, save_blk--, 1080 state->path.active--) { 1081 /* 1082 * See if we can combine the block with a neighbor. 1083 * (action == 0) => no options, just leave 1084 * (action == 1) => coalesce, then unlink 1085 * (action == 2) => block empty, unlink it 1086 */ 1087 switch (drop_blk->magic) { 1088 case XFS_ATTR_LEAF_MAGIC: 1089 error = xfs_attr3_leaf_toosmall(state, &action); 1090 if (error) 1091 return error; 1092 if (action == 0) 1093 return 0; 1094 xfs_attr3_leaf_unbalance(state, drop_blk, save_blk); 1095 break; 1096 case XFS_DIR2_LEAFN_MAGIC: 1097 error = xfs_dir2_leafn_toosmall(state, &action); 1098 if (error) 1099 return error; 1100 if (action == 0) 1101 return 0; 1102 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk); 1103 break; 1104 case XFS_DA_NODE_MAGIC: 1105 /* 1106 * Remove the offending node, fixup hashvals, 1107 * check for a toosmall neighbor. 1108 */ 1109 xfs_da3_node_remove(state, drop_blk); 1110 xfs_da3_fixhashpath(state, &state->path); 1111 error = xfs_da3_node_toosmall(state, &action); 1112 if (error) 1113 return error; 1114 if (action == 0) 1115 return 0; 1116 xfs_da3_node_unbalance(state, drop_blk, save_blk); 1117 break; 1118 } 1119 xfs_da3_fixhashpath(state, &state->altpath); 1120 error = xfs_da3_blk_unlink(state, drop_blk, save_blk); 1121 xfs_da_state_kill_altpath(state); 1122 if (error) 1123 return error; 1124 error = xfs_da_shrink_inode(state->args, drop_blk->blkno, 1125 drop_blk->bp); 1126 drop_blk->bp = NULL; 1127 if (error) 1128 return error; 1129 } 1130 /* 1131 * We joined all the way to the top. If it turns out that 1132 * we only have one entry in the root, make the child block 1133 * the new root. 1134 */ 1135 xfs_da3_node_remove(state, drop_blk); 1136 xfs_da3_fixhashpath(state, &state->path); 1137 error = xfs_da3_root_join(state, &state->path.blk[0]); 1138 return error; 1139 } 1140 1141 #ifdef DEBUG 1142 static void 1143 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level) 1144 { 1145 __be16 magic = blkinfo->magic; 1146 1147 if (level == 1) { 1148 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 1149 magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) || 1150 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) || 1151 magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)); 1152 } else { 1153 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || 1154 magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)); 1155 } 1156 ASSERT(!blkinfo->forw); 1157 ASSERT(!blkinfo->back); 1158 } 1159 #else /* !DEBUG */ 1160 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level) 1161 #endif /* !DEBUG */ 1162 1163 /* 1164 * We have only one entry in the root. Copy the only remaining child of 1165 * the old root to block 0 as the new root node. 1166 */ 1167 STATIC int 1168 xfs_da3_root_join( 1169 struct xfs_da_state *state, 1170 struct xfs_da_state_blk *root_blk) 1171 { 1172 struct xfs_da_intnode *oldroot; 1173 struct xfs_da_args *args; 1174 xfs_dablk_t child; 1175 struct xfs_buf *bp; 1176 struct xfs_da3_icnode_hdr oldroothdr; 1177 int error; 1178 struct xfs_inode *dp = state->args->dp; 1179 1180 trace_xfs_da_root_join(state->args); 1181 1182 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC); 1183 1184 args = state->args; 1185 oldroot = root_blk->bp->b_addr; 1186 xfs_da3_node_hdr_from_disk(dp->i_mount, &oldroothdr, oldroot); 1187 ASSERT(oldroothdr.forw == 0); 1188 ASSERT(oldroothdr.back == 0); 1189 1190 /* 1191 * If the root has more than one child, then don't do anything. 1192 */ 1193 if (oldroothdr.count > 1) 1194 return 0; 1195 1196 /* 1197 * Read in the (only) child block, then copy those bytes into 1198 * the root block's buffer and free the original child block. 1199 */ 1200 child = be32_to_cpu(oldroothdr.btree[0].before); 1201 ASSERT(child != 0); 1202 error = xfs_da3_node_read(args->trans, dp, child, &bp, args->whichfork); 1203 if (error) 1204 return error; 1205 xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level); 1206 1207 /* 1208 * This could be copying a leaf back into the root block in the case of 1209 * there only being a single leaf block left in the tree. Hence we have 1210 * to update the b_ops pointer as well to match the buffer type change 1211 * that could occur. For dir3 blocks we also need to update the block 1212 * number in the buffer header. 1213 */ 1214 memcpy(root_blk->bp->b_addr, bp->b_addr, args->geo->blksize); 1215 root_blk->bp->b_ops = bp->b_ops; 1216 xfs_trans_buf_copy_type(root_blk->bp, bp); 1217 if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) { 1218 struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr; 1219 da3->blkno = cpu_to_be64(xfs_buf_daddr(root_blk->bp)); 1220 } 1221 xfs_trans_log_buf(args->trans, root_blk->bp, 0, 1222 args->geo->blksize - 1); 1223 error = xfs_da_shrink_inode(args, child, bp); 1224 return error; 1225 } 1226 1227 /* 1228 * Check a node block and its neighbors to see if the block should be 1229 * collapsed into one or the other neighbor. Always keep the block 1230 * with the smaller block number. 1231 * If the current block is over 50% full, don't try to join it, return 0. 1232 * If the block is empty, fill in the state structure and return 2. 1233 * If it can be collapsed, fill in the state structure and return 1. 1234 * If nothing can be done, return 0. 1235 */ 1236 STATIC int 1237 xfs_da3_node_toosmall( 1238 struct xfs_da_state *state, 1239 int *action) 1240 { 1241 struct xfs_da_intnode *node; 1242 struct xfs_da_state_blk *blk; 1243 struct xfs_da_blkinfo *info; 1244 xfs_dablk_t blkno; 1245 struct xfs_buf *bp; 1246 struct xfs_da3_icnode_hdr nodehdr; 1247 int count; 1248 int forward; 1249 int error; 1250 int retval; 1251 int i; 1252 struct xfs_inode *dp = state->args->dp; 1253 1254 trace_xfs_da_node_toosmall(state->args); 1255 1256 /* 1257 * Check for the degenerate case of the block being over 50% full. 1258 * If so, it's not worth even looking to see if we might be able 1259 * to coalesce with a sibling. 1260 */ 1261 blk = &state->path.blk[ state->path.active-1 ]; 1262 info = blk->bp->b_addr; 1263 node = (xfs_da_intnode_t *)info; 1264 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 1265 if (nodehdr.count > (state->args->geo->node_ents >> 1)) { 1266 *action = 0; /* blk over 50%, don't try to join */ 1267 return 0; /* blk over 50%, don't try to join */ 1268 } 1269 1270 /* 1271 * Check for the degenerate case of the block being empty. 1272 * If the block is empty, we'll simply delete it, no need to 1273 * coalesce it with a sibling block. We choose (arbitrarily) 1274 * to merge with the forward block unless it is NULL. 1275 */ 1276 if (nodehdr.count == 0) { 1277 /* 1278 * Make altpath point to the block we want to keep and 1279 * path point to the block we want to drop (this one). 1280 */ 1281 forward = (info->forw != 0); 1282 memcpy(&state->altpath, &state->path, sizeof(state->path)); 1283 error = xfs_da3_path_shift(state, &state->altpath, forward, 1284 0, &retval); 1285 if (error) 1286 return error; 1287 if (retval) { 1288 *action = 0; 1289 } else { 1290 *action = 2; 1291 } 1292 return 0; 1293 } 1294 1295 /* 1296 * Examine each sibling block to see if we can coalesce with 1297 * at least 25% free space to spare. We need to figure out 1298 * whether to merge with the forward or the backward block. 1299 * We prefer coalescing with the lower numbered sibling so as 1300 * to shrink a directory over time. 1301 */ 1302 count = state->args->geo->node_ents; 1303 count -= state->args->geo->node_ents >> 2; 1304 count -= nodehdr.count; 1305 1306 /* start with smaller blk num */ 1307 forward = nodehdr.forw < nodehdr.back; 1308 for (i = 0; i < 2; forward = !forward, i++) { 1309 struct xfs_da3_icnode_hdr thdr; 1310 if (forward) 1311 blkno = nodehdr.forw; 1312 else 1313 blkno = nodehdr.back; 1314 if (blkno == 0) 1315 continue; 1316 error = xfs_da3_node_read(state->args->trans, dp, blkno, &bp, 1317 state->args->whichfork); 1318 if (error) 1319 return error; 1320 1321 node = bp->b_addr; 1322 xfs_da3_node_hdr_from_disk(dp->i_mount, &thdr, node); 1323 xfs_trans_brelse(state->args->trans, bp); 1324 1325 if (count - thdr.count >= 0) 1326 break; /* fits with at least 25% to spare */ 1327 } 1328 if (i >= 2) { 1329 *action = 0; 1330 return 0; 1331 } 1332 1333 /* 1334 * Make altpath point to the block we want to keep (the lower 1335 * numbered block) and path point to the block we want to drop. 1336 */ 1337 memcpy(&state->altpath, &state->path, sizeof(state->path)); 1338 if (blkno < blk->blkno) { 1339 error = xfs_da3_path_shift(state, &state->altpath, forward, 1340 0, &retval); 1341 } else { 1342 error = xfs_da3_path_shift(state, &state->path, forward, 1343 0, &retval); 1344 } 1345 if (error) 1346 return error; 1347 if (retval) { 1348 *action = 0; 1349 return 0; 1350 } 1351 *action = 1; 1352 return 0; 1353 } 1354 1355 /* 1356 * Pick up the last hashvalue from an intermediate node. 1357 */ 1358 STATIC uint 1359 xfs_da3_node_lasthash( 1360 struct xfs_inode *dp, 1361 struct xfs_buf *bp, 1362 int *count) 1363 { 1364 struct xfs_da3_icnode_hdr nodehdr; 1365 1366 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, bp->b_addr); 1367 if (count) 1368 *count = nodehdr.count; 1369 if (!nodehdr.count) 1370 return 0; 1371 return be32_to_cpu(nodehdr.btree[nodehdr.count - 1].hashval); 1372 } 1373 1374 /* 1375 * Walk back up the tree adjusting hash values as necessary, 1376 * when we stop making changes, return. 1377 */ 1378 void 1379 xfs_da3_fixhashpath( 1380 struct xfs_da_state *state, 1381 struct xfs_da_state_path *path) 1382 { 1383 struct xfs_da_state_blk *blk; 1384 struct xfs_da_intnode *node; 1385 struct xfs_da_node_entry *btree; 1386 xfs_dahash_t lasthash=0; 1387 int level; 1388 int count; 1389 struct xfs_inode *dp = state->args->dp; 1390 1391 trace_xfs_da_fixhashpath(state->args); 1392 1393 level = path->active-1; 1394 blk = &path->blk[ level ]; 1395 switch (blk->magic) { 1396 case XFS_ATTR_LEAF_MAGIC: 1397 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count); 1398 if (count == 0) 1399 return; 1400 break; 1401 case XFS_DIR2_LEAFN_MAGIC: 1402 lasthash = xfs_dir2_leaf_lasthash(dp, blk->bp, &count); 1403 if (count == 0) 1404 return; 1405 break; 1406 case XFS_DA_NODE_MAGIC: 1407 lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count); 1408 if (count == 0) 1409 return; 1410 break; 1411 } 1412 for (blk--, level--; level >= 0; blk--, level--) { 1413 struct xfs_da3_icnode_hdr nodehdr; 1414 1415 node = blk->bp->b_addr; 1416 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 1417 btree = nodehdr.btree; 1418 if (be32_to_cpu(btree[blk->index].hashval) == lasthash) 1419 break; 1420 blk->hashval = lasthash; 1421 btree[blk->index].hashval = cpu_to_be32(lasthash); 1422 xfs_trans_log_buf(state->args->trans, blk->bp, 1423 XFS_DA_LOGRANGE(node, &btree[blk->index], 1424 sizeof(*btree))); 1425 1426 lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval); 1427 } 1428 } 1429 1430 /* 1431 * Remove an entry from an intermediate node. 1432 */ 1433 STATIC void 1434 xfs_da3_node_remove( 1435 struct xfs_da_state *state, 1436 struct xfs_da_state_blk *drop_blk) 1437 { 1438 struct xfs_da_intnode *node; 1439 struct xfs_da3_icnode_hdr nodehdr; 1440 struct xfs_da_node_entry *btree; 1441 int index; 1442 int tmp; 1443 struct xfs_inode *dp = state->args->dp; 1444 1445 trace_xfs_da_node_remove(state->args); 1446 1447 node = drop_blk->bp->b_addr; 1448 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 1449 ASSERT(drop_blk->index < nodehdr.count); 1450 ASSERT(drop_blk->index >= 0); 1451 1452 /* 1453 * Copy over the offending entry, or just zero it out. 1454 */ 1455 index = drop_blk->index; 1456 btree = nodehdr.btree; 1457 if (index < nodehdr.count - 1) { 1458 tmp = nodehdr.count - index - 1; 1459 tmp *= (uint)sizeof(xfs_da_node_entry_t); 1460 memmove(&btree[index], &btree[index + 1], tmp); 1461 xfs_trans_log_buf(state->args->trans, drop_blk->bp, 1462 XFS_DA_LOGRANGE(node, &btree[index], tmp)); 1463 index = nodehdr.count - 1; 1464 } 1465 memset(&btree[index], 0, sizeof(xfs_da_node_entry_t)); 1466 xfs_trans_log_buf(state->args->trans, drop_blk->bp, 1467 XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index]))); 1468 nodehdr.count -= 1; 1469 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr); 1470 xfs_trans_log_buf(state->args->trans, drop_blk->bp, 1471 XFS_DA_LOGRANGE(node, &node->hdr, state->args->geo->node_hdr_size)); 1472 1473 /* 1474 * Copy the last hash value from the block to propagate upwards. 1475 */ 1476 drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval); 1477 } 1478 1479 /* 1480 * Unbalance the elements between two intermediate nodes, 1481 * move all Btree elements from one node into another. 1482 */ 1483 STATIC void 1484 xfs_da3_node_unbalance( 1485 struct xfs_da_state *state, 1486 struct xfs_da_state_blk *drop_blk, 1487 struct xfs_da_state_blk *save_blk) 1488 { 1489 struct xfs_da_intnode *drop_node; 1490 struct xfs_da_intnode *save_node; 1491 struct xfs_da_node_entry *drop_btree; 1492 struct xfs_da_node_entry *save_btree; 1493 struct xfs_da3_icnode_hdr drop_hdr; 1494 struct xfs_da3_icnode_hdr save_hdr; 1495 struct xfs_trans *tp; 1496 int sindex; 1497 int tmp; 1498 struct xfs_inode *dp = state->args->dp; 1499 1500 trace_xfs_da_node_unbalance(state->args); 1501 1502 drop_node = drop_blk->bp->b_addr; 1503 save_node = save_blk->bp->b_addr; 1504 xfs_da3_node_hdr_from_disk(dp->i_mount, &drop_hdr, drop_node); 1505 xfs_da3_node_hdr_from_disk(dp->i_mount, &save_hdr, save_node); 1506 drop_btree = drop_hdr.btree; 1507 save_btree = save_hdr.btree; 1508 tp = state->args->trans; 1509 1510 /* 1511 * If the dying block has lower hashvals, then move all the 1512 * elements in the remaining block up to make a hole. 1513 */ 1514 if ((be32_to_cpu(drop_btree[0].hashval) < 1515 be32_to_cpu(save_btree[0].hashval)) || 1516 (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) < 1517 be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) { 1518 /* XXX: check this - is memmove dst correct? */ 1519 tmp = save_hdr.count * sizeof(xfs_da_node_entry_t); 1520 memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp); 1521 1522 sindex = 0; 1523 xfs_trans_log_buf(tp, save_blk->bp, 1524 XFS_DA_LOGRANGE(save_node, &save_btree[0], 1525 (save_hdr.count + drop_hdr.count) * 1526 sizeof(xfs_da_node_entry_t))); 1527 } else { 1528 sindex = save_hdr.count; 1529 xfs_trans_log_buf(tp, save_blk->bp, 1530 XFS_DA_LOGRANGE(save_node, &save_btree[sindex], 1531 drop_hdr.count * sizeof(xfs_da_node_entry_t))); 1532 } 1533 1534 /* 1535 * Move all the B-tree elements from drop_blk to save_blk. 1536 */ 1537 tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t); 1538 memcpy(&save_btree[sindex], &drop_btree[0], tmp); 1539 save_hdr.count += drop_hdr.count; 1540 1541 xfs_da3_node_hdr_to_disk(dp->i_mount, save_node, &save_hdr); 1542 xfs_trans_log_buf(tp, save_blk->bp, 1543 XFS_DA_LOGRANGE(save_node, &save_node->hdr, 1544 state->args->geo->node_hdr_size)); 1545 1546 /* 1547 * Save the last hashval in the remaining block for upward propagation. 1548 */ 1549 save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval); 1550 } 1551 1552 /*======================================================================== 1553 * Routines used for finding things in the Btree. 1554 *========================================================================*/ 1555 1556 /* 1557 * Walk down the Btree looking for a particular filename, filling 1558 * in the state structure as we go. 1559 * 1560 * We will set the state structure to point to each of the elements 1561 * in each of the nodes where either the hashval is or should be. 1562 * 1563 * We support duplicate hashval's so for each entry in the current 1564 * node that could contain the desired hashval, descend. This is a 1565 * pruned depth-first tree search. 1566 */ 1567 int /* error */ 1568 xfs_da3_node_lookup_int( 1569 struct xfs_da_state *state, 1570 int *result) 1571 { 1572 struct xfs_da_state_blk *blk; 1573 struct xfs_da_blkinfo *curr; 1574 struct xfs_da_intnode *node; 1575 struct xfs_da_node_entry *btree; 1576 struct xfs_da3_icnode_hdr nodehdr; 1577 struct xfs_da_args *args; 1578 xfs_dablk_t blkno; 1579 xfs_dahash_t hashval; 1580 xfs_dahash_t btreehashval; 1581 int probe; 1582 int span; 1583 int max; 1584 int error; 1585 int retval; 1586 unsigned int expected_level = 0; 1587 uint16_t magic; 1588 struct xfs_inode *dp = state->args->dp; 1589 1590 args = state->args; 1591 1592 /* 1593 * Descend thru the B-tree searching each level for the right 1594 * node to use, until the right hashval is found. 1595 */ 1596 blkno = args->geo->leafblk; 1597 for (blk = &state->path.blk[0], state->path.active = 1; 1598 state->path.active <= XFS_DA_NODE_MAXDEPTH; 1599 blk++, state->path.active++) { 1600 /* 1601 * Read the next node down in the tree. 1602 */ 1603 blk->blkno = blkno; 1604 error = xfs_da3_node_read(args->trans, args->dp, blkno, 1605 &blk->bp, args->whichfork); 1606 if (error) { 1607 blk->blkno = 0; 1608 state->path.active--; 1609 return error; 1610 } 1611 curr = blk->bp->b_addr; 1612 magic = be16_to_cpu(curr->magic); 1613 1614 if (magic == XFS_ATTR_LEAF_MAGIC || 1615 magic == XFS_ATTR3_LEAF_MAGIC) { 1616 blk->magic = XFS_ATTR_LEAF_MAGIC; 1617 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); 1618 break; 1619 } 1620 1621 if (magic == XFS_DIR2_LEAFN_MAGIC || 1622 magic == XFS_DIR3_LEAFN_MAGIC) { 1623 blk->magic = XFS_DIR2_LEAFN_MAGIC; 1624 blk->hashval = xfs_dir2_leaf_lasthash(args->dp, 1625 blk->bp, NULL); 1626 break; 1627 } 1628 1629 if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC) { 1630 xfs_buf_mark_corrupt(blk->bp); 1631 return -EFSCORRUPTED; 1632 } 1633 1634 blk->magic = XFS_DA_NODE_MAGIC; 1635 1636 /* 1637 * Search an intermediate node for a match. 1638 */ 1639 node = blk->bp->b_addr; 1640 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node); 1641 btree = nodehdr.btree; 1642 1643 /* Tree taller than we can handle; bail out! */ 1644 if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) { 1645 xfs_buf_mark_corrupt(blk->bp); 1646 return -EFSCORRUPTED; 1647 } 1648 1649 /* Check the level from the root. */ 1650 if (blkno == args->geo->leafblk) 1651 expected_level = nodehdr.level - 1; 1652 else if (expected_level != nodehdr.level) { 1653 xfs_buf_mark_corrupt(blk->bp); 1654 return -EFSCORRUPTED; 1655 } else 1656 expected_level--; 1657 1658 max = nodehdr.count; 1659 blk->hashval = be32_to_cpu(btree[max - 1].hashval); 1660 1661 /* 1662 * Binary search. (note: small blocks will skip loop) 1663 */ 1664 probe = span = max / 2; 1665 hashval = args->hashval; 1666 while (span > 4) { 1667 span /= 2; 1668 btreehashval = be32_to_cpu(btree[probe].hashval); 1669 if (btreehashval < hashval) 1670 probe += span; 1671 else if (btreehashval > hashval) 1672 probe -= span; 1673 else 1674 break; 1675 } 1676 ASSERT((probe >= 0) && (probe < max)); 1677 ASSERT((span <= 4) || 1678 (be32_to_cpu(btree[probe].hashval) == hashval)); 1679 1680 /* 1681 * Since we may have duplicate hashval's, find the first 1682 * matching hashval in the node. 1683 */ 1684 while (probe > 0 && 1685 be32_to_cpu(btree[probe].hashval) >= hashval) { 1686 probe--; 1687 } 1688 while (probe < max && 1689 be32_to_cpu(btree[probe].hashval) < hashval) { 1690 probe++; 1691 } 1692 1693 /* 1694 * Pick the right block to descend on. 1695 */ 1696 if (probe == max) { 1697 blk->index = max - 1; 1698 blkno = be32_to_cpu(btree[max - 1].before); 1699 } else { 1700 blk->index = probe; 1701 blkno = be32_to_cpu(btree[probe].before); 1702 } 1703 1704 /* We can't point back to the root. */ 1705 if (XFS_IS_CORRUPT(dp->i_mount, blkno == args->geo->leafblk)) 1706 return -EFSCORRUPTED; 1707 } 1708 1709 if (XFS_IS_CORRUPT(dp->i_mount, expected_level != 0)) 1710 return -EFSCORRUPTED; 1711 1712 /* 1713 * A leaf block that ends in the hashval that we are interested in 1714 * (final hashval == search hashval) means that the next block may 1715 * contain more entries with the same hashval, shift upward to the 1716 * next leaf and keep searching. 1717 */ 1718 for (;;) { 1719 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) { 1720 retval = xfs_dir2_leafn_lookup_int(blk->bp, args, 1721 &blk->index, state); 1722 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) { 1723 retval = xfs_attr3_leaf_lookup_int(blk->bp, args); 1724 blk->index = args->index; 1725 args->blkno = blk->blkno; 1726 } else { 1727 ASSERT(0); 1728 return -EFSCORRUPTED; 1729 } 1730 if (((retval == -ENOENT) || (retval == -ENOATTR)) && 1731 (blk->hashval == args->hashval)) { 1732 error = xfs_da3_path_shift(state, &state->path, 1, 1, 1733 &retval); 1734 if (error) 1735 return error; 1736 if (retval == 0) { 1737 continue; 1738 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) { 1739 /* path_shift() gives ENOENT */ 1740 retval = -ENOATTR; 1741 } 1742 } 1743 break; 1744 } 1745 *result = retval; 1746 return 0; 1747 } 1748 1749 /*======================================================================== 1750 * Utility routines. 1751 *========================================================================*/ 1752 1753 /* 1754 * Compare two intermediate nodes for "order". 1755 */ 1756 STATIC int 1757 xfs_da3_node_order( 1758 struct xfs_inode *dp, 1759 struct xfs_buf *node1_bp, 1760 struct xfs_buf *node2_bp) 1761 { 1762 struct xfs_da_intnode *node1; 1763 struct xfs_da_intnode *node2; 1764 struct xfs_da_node_entry *btree1; 1765 struct xfs_da_node_entry *btree2; 1766 struct xfs_da3_icnode_hdr node1hdr; 1767 struct xfs_da3_icnode_hdr node2hdr; 1768 1769 node1 = node1_bp->b_addr; 1770 node2 = node2_bp->b_addr; 1771 xfs_da3_node_hdr_from_disk(dp->i_mount, &node1hdr, node1); 1772 xfs_da3_node_hdr_from_disk(dp->i_mount, &node2hdr, node2); 1773 btree1 = node1hdr.btree; 1774 btree2 = node2hdr.btree; 1775 1776 if (node1hdr.count > 0 && node2hdr.count > 0 && 1777 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) || 1778 (be32_to_cpu(btree2[node2hdr.count - 1].hashval) < 1779 be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) { 1780 return 1; 1781 } 1782 return 0; 1783 } 1784 1785 /* 1786 * Link a new block into a doubly linked list of blocks (of whatever type). 1787 */ 1788 int /* error */ 1789 xfs_da3_blk_link( 1790 struct xfs_da_state *state, 1791 struct xfs_da_state_blk *old_blk, 1792 struct xfs_da_state_blk *new_blk) 1793 { 1794 struct xfs_da_blkinfo *old_info; 1795 struct xfs_da_blkinfo *new_info; 1796 struct xfs_da_blkinfo *tmp_info; 1797 struct xfs_da_args *args; 1798 struct xfs_buf *bp; 1799 int before = 0; 1800 int error; 1801 struct xfs_inode *dp = state->args->dp; 1802 1803 /* 1804 * Set up environment. 1805 */ 1806 args = state->args; 1807 ASSERT(args != NULL); 1808 old_info = old_blk->bp->b_addr; 1809 new_info = new_blk->bp->b_addr; 1810 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC || 1811 old_blk->magic == XFS_DIR2_LEAFN_MAGIC || 1812 old_blk->magic == XFS_ATTR_LEAF_MAGIC); 1813 1814 switch (old_blk->magic) { 1815 case XFS_ATTR_LEAF_MAGIC: 1816 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp); 1817 break; 1818 case XFS_DIR2_LEAFN_MAGIC: 1819 before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp); 1820 break; 1821 case XFS_DA_NODE_MAGIC: 1822 before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp); 1823 break; 1824 } 1825 1826 /* 1827 * Link blocks in appropriate order. 1828 */ 1829 if (before) { 1830 /* 1831 * Link new block in before existing block. 1832 */ 1833 trace_xfs_da_link_before(args); 1834 new_info->forw = cpu_to_be32(old_blk->blkno); 1835 new_info->back = old_info->back; 1836 if (old_info->back) { 1837 error = xfs_da3_node_read(args->trans, dp, 1838 be32_to_cpu(old_info->back), 1839 &bp, args->whichfork); 1840 if (error) 1841 return error; 1842 ASSERT(bp != NULL); 1843 tmp_info = bp->b_addr; 1844 ASSERT(tmp_info->magic == old_info->magic); 1845 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno); 1846 tmp_info->forw = cpu_to_be32(new_blk->blkno); 1847 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); 1848 } 1849 old_info->back = cpu_to_be32(new_blk->blkno); 1850 } else { 1851 /* 1852 * Link new block in after existing block. 1853 */ 1854 trace_xfs_da_link_after(args); 1855 new_info->forw = old_info->forw; 1856 new_info->back = cpu_to_be32(old_blk->blkno); 1857 if (old_info->forw) { 1858 error = xfs_da3_node_read(args->trans, dp, 1859 be32_to_cpu(old_info->forw), 1860 &bp, args->whichfork); 1861 if (error) 1862 return error; 1863 ASSERT(bp != NULL); 1864 tmp_info = bp->b_addr; 1865 ASSERT(tmp_info->magic == old_info->magic); 1866 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno); 1867 tmp_info->back = cpu_to_be32(new_blk->blkno); 1868 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); 1869 } 1870 old_info->forw = cpu_to_be32(new_blk->blkno); 1871 } 1872 1873 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1); 1874 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1); 1875 return 0; 1876 } 1877 1878 /* 1879 * Unlink a block from a doubly linked list of blocks. 1880 */ 1881 STATIC int /* error */ 1882 xfs_da3_blk_unlink( 1883 struct xfs_da_state *state, 1884 struct xfs_da_state_blk *drop_blk, 1885 struct xfs_da_state_blk *save_blk) 1886 { 1887 struct xfs_da_blkinfo *drop_info; 1888 struct xfs_da_blkinfo *save_info; 1889 struct xfs_da_blkinfo *tmp_info; 1890 struct xfs_da_args *args; 1891 struct xfs_buf *bp; 1892 int error; 1893 1894 /* 1895 * Set up environment. 1896 */ 1897 args = state->args; 1898 ASSERT(args != NULL); 1899 save_info = save_blk->bp->b_addr; 1900 drop_info = drop_blk->bp->b_addr; 1901 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC || 1902 save_blk->magic == XFS_DIR2_LEAFN_MAGIC || 1903 save_blk->magic == XFS_ATTR_LEAF_MAGIC); 1904 ASSERT(save_blk->magic == drop_blk->magic); 1905 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) || 1906 (be32_to_cpu(save_info->back) == drop_blk->blkno)); 1907 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) || 1908 (be32_to_cpu(drop_info->back) == save_blk->blkno)); 1909 1910 /* 1911 * Unlink the leaf block from the doubly linked chain of leaves. 1912 */ 1913 if (be32_to_cpu(save_info->back) == drop_blk->blkno) { 1914 trace_xfs_da_unlink_back(args); 1915 save_info->back = drop_info->back; 1916 if (drop_info->back) { 1917 error = xfs_da3_node_read(args->trans, args->dp, 1918 be32_to_cpu(drop_info->back), 1919 &bp, args->whichfork); 1920 if (error) 1921 return error; 1922 ASSERT(bp != NULL); 1923 tmp_info = bp->b_addr; 1924 ASSERT(tmp_info->magic == save_info->magic); 1925 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno); 1926 tmp_info->forw = cpu_to_be32(save_blk->blkno); 1927 xfs_trans_log_buf(args->trans, bp, 0, 1928 sizeof(*tmp_info) - 1); 1929 } 1930 } else { 1931 trace_xfs_da_unlink_forward(args); 1932 save_info->forw = drop_info->forw; 1933 if (drop_info->forw) { 1934 error = xfs_da3_node_read(args->trans, args->dp, 1935 be32_to_cpu(drop_info->forw), 1936 &bp, args->whichfork); 1937 if (error) 1938 return error; 1939 ASSERT(bp != NULL); 1940 tmp_info = bp->b_addr; 1941 ASSERT(tmp_info->magic == save_info->magic); 1942 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno); 1943 tmp_info->back = cpu_to_be32(save_blk->blkno); 1944 xfs_trans_log_buf(args->trans, bp, 0, 1945 sizeof(*tmp_info) - 1); 1946 } 1947 } 1948 1949 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1); 1950 return 0; 1951 } 1952 1953 /* 1954 * Move a path "forward" or "!forward" one block at the current level. 1955 * 1956 * This routine will adjust a "path" to point to the next block 1957 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the 1958 * Btree, including updating pointers to the intermediate nodes between 1959 * the new bottom and the root. 1960 */ 1961 int /* error */ 1962 xfs_da3_path_shift( 1963 struct xfs_da_state *state, 1964 struct xfs_da_state_path *path, 1965 int forward, 1966 int release, 1967 int *result) 1968 { 1969 struct xfs_da_state_blk *blk; 1970 struct xfs_da_blkinfo *info; 1971 struct xfs_da_args *args; 1972 struct xfs_da_node_entry *btree; 1973 struct xfs_da3_icnode_hdr nodehdr; 1974 struct xfs_buf *bp; 1975 xfs_dablk_t blkno = 0; 1976 int level; 1977 int error; 1978 struct xfs_inode *dp = state->args->dp; 1979 1980 trace_xfs_da_path_shift(state->args); 1981 1982 /* 1983 * Roll up the Btree looking for the first block where our 1984 * current index is not at the edge of the block. Note that 1985 * we skip the bottom layer because we want the sibling block. 1986 */ 1987 args = state->args; 1988 ASSERT(args != NULL); 1989 ASSERT(path != NULL); 1990 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); 1991 level = (path->active-1) - 1; /* skip bottom layer in path */ 1992 for (; level >= 0; level--) { 1993 blk = &path->blk[level]; 1994 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, 1995 blk->bp->b_addr); 1996 1997 if (forward && (blk->index < nodehdr.count - 1)) { 1998 blk->index++; 1999 blkno = be32_to_cpu(nodehdr.btree[blk->index].before); 2000 break; 2001 } else if (!forward && (blk->index > 0)) { 2002 blk->index--; 2003 blkno = be32_to_cpu(nodehdr.btree[blk->index].before); 2004 break; 2005 } 2006 } 2007 if (level < 0) { 2008 *result = -ENOENT; /* we're out of our tree */ 2009 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); 2010 return 0; 2011 } 2012 2013 /* 2014 * Roll down the edge of the subtree until we reach the 2015 * same depth we were at originally. 2016 */ 2017 for (blk++, level++; level < path->active; blk++, level++) { 2018 /* 2019 * Read the next child block into a local buffer. 2020 */ 2021 error = xfs_da3_node_read(args->trans, dp, blkno, &bp, 2022 args->whichfork); 2023 if (error) 2024 return error; 2025 2026 /* 2027 * Release the old block (if it's dirty, the trans doesn't 2028 * actually let go) and swap the local buffer into the path 2029 * structure. This ensures failure of the above read doesn't set 2030 * a NULL buffer in an active slot in the path. 2031 */ 2032 if (release) 2033 xfs_trans_brelse(args->trans, blk->bp); 2034 blk->blkno = blkno; 2035 blk->bp = bp; 2036 2037 info = blk->bp->b_addr; 2038 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || 2039 info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) || 2040 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 2041 info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) || 2042 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) || 2043 info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)); 2044 2045 2046 /* 2047 * Note: we flatten the magic number to a single type so we 2048 * don't have to compare against crc/non-crc types elsewhere. 2049 */ 2050 switch (be16_to_cpu(info->magic)) { 2051 case XFS_DA_NODE_MAGIC: 2052 case XFS_DA3_NODE_MAGIC: 2053 blk->magic = XFS_DA_NODE_MAGIC; 2054 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, 2055 bp->b_addr); 2056 btree = nodehdr.btree; 2057 blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval); 2058 if (forward) 2059 blk->index = 0; 2060 else 2061 blk->index = nodehdr.count - 1; 2062 blkno = be32_to_cpu(btree[blk->index].before); 2063 break; 2064 case XFS_ATTR_LEAF_MAGIC: 2065 case XFS_ATTR3_LEAF_MAGIC: 2066 blk->magic = XFS_ATTR_LEAF_MAGIC; 2067 ASSERT(level == path->active-1); 2068 blk->index = 0; 2069 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); 2070 break; 2071 case XFS_DIR2_LEAFN_MAGIC: 2072 case XFS_DIR3_LEAFN_MAGIC: 2073 blk->magic = XFS_DIR2_LEAFN_MAGIC; 2074 ASSERT(level == path->active-1); 2075 blk->index = 0; 2076 blk->hashval = xfs_dir2_leaf_lasthash(args->dp, 2077 blk->bp, NULL); 2078 break; 2079 default: 2080 ASSERT(0); 2081 break; 2082 } 2083 } 2084 *result = 0; 2085 return 0; 2086 } 2087 2088 2089 /*======================================================================== 2090 * Utility routines. 2091 *========================================================================*/ 2092 2093 /* 2094 * Implement a simple hash on a character string. 2095 * Rotate the hash value by 7 bits, then XOR each character in. 2096 * This is implemented with some source-level loop unrolling. 2097 */ 2098 xfs_dahash_t 2099 xfs_da_hashname(const uint8_t *name, int namelen) 2100 { 2101 xfs_dahash_t hash; 2102 2103 /* 2104 * Do four characters at a time as long as we can. 2105 */ 2106 for (hash = 0; namelen >= 4; namelen -= 4, name += 4) 2107 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^ 2108 (name[3] << 0) ^ rol32(hash, 7 * 4); 2109 2110 /* 2111 * Now do the rest of the characters. 2112 */ 2113 switch (namelen) { 2114 case 3: 2115 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^ 2116 rol32(hash, 7 * 3); 2117 case 2: 2118 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2); 2119 case 1: 2120 return (name[0] << 0) ^ rol32(hash, 7 * 1); 2121 default: /* case 0: */ 2122 return hash; 2123 } 2124 } 2125 2126 enum xfs_dacmp 2127 xfs_da_compname( 2128 struct xfs_da_args *args, 2129 const unsigned char *name, 2130 int len) 2131 { 2132 return (args->namelen == len && memcmp(args->name, name, len) == 0) ? 2133 XFS_CMP_EXACT : XFS_CMP_DIFFERENT; 2134 } 2135 2136 int 2137 xfs_da_grow_inode_int( 2138 struct xfs_da_args *args, 2139 xfs_fileoff_t *bno, 2140 int count) 2141 { 2142 struct xfs_trans *tp = args->trans; 2143 struct xfs_inode *dp = args->dp; 2144 int w = args->whichfork; 2145 xfs_rfsblock_t nblks = dp->i_nblocks; 2146 struct xfs_bmbt_irec map, *mapp; 2147 int nmap, error, got, i, mapi; 2148 2149 /* 2150 * Find a spot in the file space to put the new block. 2151 */ 2152 error = xfs_bmap_first_unused(tp, dp, count, bno, w); 2153 if (error) 2154 return error; 2155 2156 /* 2157 * Try mapping it in one filesystem block. 2158 */ 2159 nmap = 1; 2160 error = xfs_bmapi_write(tp, dp, *bno, count, 2161 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG, 2162 args->total, &map, &nmap); 2163 if (error) 2164 return error; 2165 2166 ASSERT(nmap <= 1); 2167 if (nmap == 1) { 2168 mapp = ↦ 2169 mapi = 1; 2170 } else if (nmap == 0 && count > 1) { 2171 xfs_fileoff_t b; 2172 int c; 2173 2174 /* 2175 * If we didn't get it and the block might work if fragmented, 2176 * try without the CONTIG flag. Loop until we get it all. 2177 */ 2178 mapp = kmem_alloc(sizeof(*mapp) * count, 0); 2179 for (b = *bno, mapi = 0; b < *bno + count; ) { 2180 nmap = min(XFS_BMAP_MAX_NMAP, count); 2181 c = (int)(*bno + count - b); 2182 error = xfs_bmapi_write(tp, dp, b, c, 2183 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA, 2184 args->total, &mapp[mapi], &nmap); 2185 if (error) 2186 goto out_free_map; 2187 if (nmap < 1) 2188 break; 2189 mapi += nmap; 2190 b = mapp[mapi - 1].br_startoff + 2191 mapp[mapi - 1].br_blockcount; 2192 } 2193 } else { 2194 mapi = 0; 2195 mapp = NULL; 2196 } 2197 2198 /* 2199 * Count the blocks we got, make sure it matches the total. 2200 */ 2201 for (i = 0, got = 0; i < mapi; i++) 2202 got += mapp[i].br_blockcount; 2203 if (got != count || mapp[0].br_startoff != *bno || 2204 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount != 2205 *bno + count) { 2206 error = -ENOSPC; 2207 goto out_free_map; 2208 } 2209 2210 /* account for newly allocated blocks in reserved blocks total */ 2211 args->total -= dp->i_nblocks - nblks; 2212 2213 out_free_map: 2214 if (mapp != &map) 2215 kmem_free(mapp); 2216 return error; 2217 } 2218 2219 /* 2220 * Add a block to the btree ahead of the file. 2221 * Return the new block number to the caller. 2222 */ 2223 int 2224 xfs_da_grow_inode( 2225 struct xfs_da_args *args, 2226 xfs_dablk_t *new_blkno) 2227 { 2228 xfs_fileoff_t bno; 2229 int error; 2230 2231 trace_xfs_da_grow_inode(args); 2232 2233 bno = args->geo->leafblk; 2234 error = xfs_da_grow_inode_int(args, &bno, args->geo->fsbcount); 2235 if (!error) 2236 *new_blkno = (xfs_dablk_t)bno; 2237 return error; 2238 } 2239 2240 /* 2241 * Ick. We need to always be able to remove a btree block, even 2242 * if there's no space reservation because the filesystem is full. 2243 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC. 2244 * It swaps the target block with the last block in the file. The 2245 * last block in the file can always be removed since it can't cause 2246 * a bmap btree split to do that. 2247 */ 2248 STATIC int 2249 xfs_da3_swap_lastblock( 2250 struct xfs_da_args *args, 2251 xfs_dablk_t *dead_blknop, 2252 struct xfs_buf **dead_bufp) 2253 { 2254 struct xfs_da_blkinfo *dead_info; 2255 struct xfs_da_blkinfo *sib_info; 2256 struct xfs_da_intnode *par_node; 2257 struct xfs_da_intnode *dead_node; 2258 struct xfs_dir2_leaf *dead_leaf2; 2259 struct xfs_da_node_entry *btree; 2260 struct xfs_da3_icnode_hdr par_hdr; 2261 struct xfs_inode *dp; 2262 struct xfs_trans *tp; 2263 struct xfs_mount *mp; 2264 struct xfs_buf *dead_buf; 2265 struct xfs_buf *last_buf; 2266 struct xfs_buf *sib_buf; 2267 struct xfs_buf *par_buf; 2268 xfs_dahash_t dead_hash; 2269 xfs_fileoff_t lastoff; 2270 xfs_dablk_t dead_blkno; 2271 xfs_dablk_t last_blkno; 2272 xfs_dablk_t sib_blkno; 2273 xfs_dablk_t par_blkno; 2274 int error; 2275 int w; 2276 int entno; 2277 int level; 2278 int dead_level; 2279 2280 trace_xfs_da_swap_lastblock(args); 2281 2282 dead_buf = *dead_bufp; 2283 dead_blkno = *dead_blknop; 2284 tp = args->trans; 2285 dp = args->dp; 2286 w = args->whichfork; 2287 ASSERT(w == XFS_DATA_FORK); 2288 mp = dp->i_mount; 2289 lastoff = args->geo->freeblk; 2290 error = xfs_bmap_last_before(tp, dp, &lastoff, w); 2291 if (error) 2292 return error; 2293 if (XFS_IS_CORRUPT(mp, lastoff == 0)) 2294 return -EFSCORRUPTED; 2295 /* 2296 * Read the last block in the btree space. 2297 */ 2298 last_blkno = (xfs_dablk_t)lastoff - args->geo->fsbcount; 2299 error = xfs_da3_node_read(tp, dp, last_blkno, &last_buf, w); 2300 if (error) 2301 return error; 2302 /* 2303 * Copy the last block into the dead buffer and log it. 2304 */ 2305 memcpy(dead_buf->b_addr, last_buf->b_addr, args->geo->blksize); 2306 xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1); 2307 dead_info = dead_buf->b_addr; 2308 /* 2309 * Get values from the moved block. 2310 */ 2311 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 2312 dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) { 2313 struct xfs_dir3_icleaf_hdr leafhdr; 2314 struct xfs_dir2_leaf_entry *ents; 2315 2316 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info; 2317 xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, 2318 dead_leaf2); 2319 ents = leafhdr.ents; 2320 dead_level = 0; 2321 dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval); 2322 } else { 2323 struct xfs_da3_icnode_hdr deadhdr; 2324 2325 dead_node = (xfs_da_intnode_t *)dead_info; 2326 xfs_da3_node_hdr_from_disk(dp->i_mount, &deadhdr, dead_node); 2327 btree = deadhdr.btree; 2328 dead_level = deadhdr.level; 2329 dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval); 2330 } 2331 sib_buf = par_buf = NULL; 2332 /* 2333 * If the moved block has a left sibling, fix up the pointers. 2334 */ 2335 if ((sib_blkno = be32_to_cpu(dead_info->back))) { 2336 error = xfs_da3_node_read(tp, dp, sib_blkno, &sib_buf, w); 2337 if (error) 2338 goto done; 2339 sib_info = sib_buf->b_addr; 2340 if (XFS_IS_CORRUPT(mp, 2341 be32_to_cpu(sib_info->forw) != last_blkno || 2342 sib_info->magic != dead_info->magic)) { 2343 error = -EFSCORRUPTED; 2344 goto done; 2345 } 2346 sib_info->forw = cpu_to_be32(dead_blkno); 2347 xfs_trans_log_buf(tp, sib_buf, 2348 XFS_DA_LOGRANGE(sib_info, &sib_info->forw, 2349 sizeof(sib_info->forw))); 2350 sib_buf = NULL; 2351 } 2352 /* 2353 * If the moved block has a right sibling, fix up the pointers. 2354 */ 2355 if ((sib_blkno = be32_to_cpu(dead_info->forw))) { 2356 error = xfs_da3_node_read(tp, dp, sib_blkno, &sib_buf, w); 2357 if (error) 2358 goto done; 2359 sib_info = sib_buf->b_addr; 2360 if (XFS_IS_CORRUPT(mp, 2361 be32_to_cpu(sib_info->back) != last_blkno || 2362 sib_info->magic != dead_info->magic)) { 2363 error = -EFSCORRUPTED; 2364 goto done; 2365 } 2366 sib_info->back = cpu_to_be32(dead_blkno); 2367 xfs_trans_log_buf(tp, sib_buf, 2368 XFS_DA_LOGRANGE(sib_info, &sib_info->back, 2369 sizeof(sib_info->back))); 2370 sib_buf = NULL; 2371 } 2372 par_blkno = args->geo->leafblk; 2373 level = -1; 2374 /* 2375 * Walk down the tree looking for the parent of the moved block. 2376 */ 2377 for (;;) { 2378 error = xfs_da3_node_read(tp, dp, par_blkno, &par_buf, w); 2379 if (error) 2380 goto done; 2381 par_node = par_buf->b_addr; 2382 xfs_da3_node_hdr_from_disk(dp->i_mount, &par_hdr, par_node); 2383 if (XFS_IS_CORRUPT(mp, 2384 level >= 0 && level != par_hdr.level + 1)) { 2385 error = -EFSCORRUPTED; 2386 goto done; 2387 } 2388 level = par_hdr.level; 2389 btree = par_hdr.btree; 2390 for (entno = 0; 2391 entno < par_hdr.count && 2392 be32_to_cpu(btree[entno].hashval) < dead_hash; 2393 entno++) 2394 continue; 2395 if (XFS_IS_CORRUPT(mp, entno == par_hdr.count)) { 2396 error = -EFSCORRUPTED; 2397 goto done; 2398 } 2399 par_blkno = be32_to_cpu(btree[entno].before); 2400 if (level == dead_level + 1) 2401 break; 2402 xfs_trans_brelse(tp, par_buf); 2403 par_buf = NULL; 2404 } 2405 /* 2406 * We're in the right parent block. 2407 * Look for the right entry. 2408 */ 2409 for (;;) { 2410 for (; 2411 entno < par_hdr.count && 2412 be32_to_cpu(btree[entno].before) != last_blkno; 2413 entno++) 2414 continue; 2415 if (entno < par_hdr.count) 2416 break; 2417 par_blkno = par_hdr.forw; 2418 xfs_trans_brelse(tp, par_buf); 2419 par_buf = NULL; 2420 if (XFS_IS_CORRUPT(mp, par_blkno == 0)) { 2421 error = -EFSCORRUPTED; 2422 goto done; 2423 } 2424 error = xfs_da3_node_read(tp, dp, par_blkno, &par_buf, w); 2425 if (error) 2426 goto done; 2427 par_node = par_buf->b_addr; 2428 xfs_da3_node_hdr_from_disk(dp->i_mount, &par_hdr, par_node); 2429 if (XFS_IS_CORRUPT(mp, par_hdr.level != level)) { 2430 error = -EFSCORRUPTED; 2431 goto done; 2432 } 2433 btree = par_hdr.btree; 2434 entno = 0; 2435 } 2436 /* 2437 * Update the parent entry pointing to the moved block. 2438 */ 2439 btree[entno].before = cpu_to_be32(dead_blkno); 2440 xfs_trans_log_buf(tp, par_buf, 2441 XFS_DA_LOGRANGE(par_node, &btree[entno].before, 2442 sizeof(btree[entno].before))); 2443 *dead_blknop = last_blkno; 2444 *dead_bufp = last_buf; 2445 return 0; 2446 done: 2447 if (par_buf) 2448 xfs_trans_brelse(tp, par_buf); 2449 if (sib_buf) 2450 xfs_trans_brelse(tp, sib_buf); 2451 xfs_trans_brelse(tp, last_buf); 2452 return error; 2453 } 2454 2455 /* 2456 * Remove a btree block from a directory or attribute. 2457 */ 2458 int 2459 xfs_da_shrink_inode( 2460 struct xfs_da_args *args, 2461 xfs_dablk_t dead_blkno, 2462 struct xfs_buf *dead_buf) 2463 { 2464 struct xfs_inode *dp; 2465 int done, error, w, count; 2466 struct xfs_trans *tp; 2467 2468 trace_xfs_da_shrink_inode(args); 2469 2470 dp = args->dp; 2471 w = args->whichfork; 2472 tp = args->trans; 2473 count = args->geo->fsbcount; 2474 for (;;) { 2475 /* 2476 * Remove extents. If we get ENOSPC for a dir we have to move 2477 * the last block to the place we want to kill. 2478 */ 2479 error = xfs_bunmapi(tp, dp, dead_blkno, count, 2480 xfs_bmapi_aflag(w), 0, &done); 2481 if (error == -ENOSPC) { 2482 if (w != XFS_DATA_FORK) 2483 break; 2484 error = xfs_da3_swap_lastblock(args, &dead_blkno, 2485 &dead_buf); 2486 if (error) 2487 break; 2488 } else { 2489 break; 2490 } 2491 } 2492 xfs_trans_binval(tp, dead_buf); 2493 return error; 2494 } 2495 2496 static int 2497 xfs_dabuf_map( 2498 struct xfs_inode *dp, 2499 xfs_dablk_t bno, 2500 unsigned int flags, 2501 int whichfork, 2502 struct xfs_buf_map **mapp, 2503 int *nmaps) 2504 { 2505 struct xfs_mount *mp = dp->i_mount; 2506 int nfsb = xfs_dabuf_nfsb(mp, whichfork); 2507 struct xfs_bmbt_irec irec, *irecs = &irec; 2508 struct xfs_buf_map *map = *mapp; 2509 xfs_fileoff_t off = bno; 2510 int error = 0, nirecs, i; 2511 2512 if (nfsb > 1) 2513 irecs = kmem_zalloc(sizeof(irec) * nfsb, KM_NOFS); 2514 2515 nirecs = nfsb; 2516 error = xfs_bmapi_read(dp, bno, nfsb, irecs, &nirecs, 2517 xfs_bmapi_aflag(whichfork)); 2518 if (error) 2519 goto out_free_irecs; 2520 2521 /* 2522 * Use the caller provided map for the single map case, else allocate a 2523 * larger one that needs to be free by the caller. 2524 */ 2525 if (nirecs > 1) { 2526 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_NOFS); 2527 if (!map) { 2528 error = -ENOMEM; 2529 goto out_free_irecs; 2530 } 2531 *mapp = map; 2532 } 2533 2534 for (i = 0; i < nirecs; i++) { 2535 if (irecs[i].br_startblock == HOLESTARTBLOCK || 2536 irecs[i].br_startblock == DELAYSTARTBLOCK) 2537 goto invalid_mapping; 2538 if (off != irecs[i].br_startoff) 2539 goto invalid_mapping; 2540 2541 map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock); 2542 map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount); 2543 off += irecs[i].br_blockcount; 2544 } 2545 2546 if (off != bno + nfsb) 2547 goto invalid_mapping; 2548 2549 *nmaps = nirecs; 2550 out_free_irecs: 2551 if (irecs != &irec) 2552 kmem_free(irecs); 2553 return error; 2554 2555 invalid_mapping: 2556 /* Caller ok with no mapping. */ 2557 if (XFS_IS_CORRUPT(mp, !(flags & XFS_DABUF_MAP_HOLE_OK))) { 2558 error = -EFSCORRUPTED; 2559 if (xfs_error_level >= XFS_ERRLEVEL_LOW) { 2560 xfs_alert(mp, "%s: bno %u inode %llu", 2561 __func__, bno, dp->i_ino); 2562 2563 for (i = 0; i < nirecs; i++) { 2564 xfs_alert(mp, 2565 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d", 2566 i, irecs[i].br_startoff, 2567 irecs[i].br_startblock, 2568 irecs[i].br_blockcount, 2569 irecs[i].br_state); 2570 } 2571 } 2572 } else { 2573 *nmaps = 0; 2574 } 2575 goto out_free_irecs; 2576 } 2577 2578 /* 2579 * Get a buffer for the dir/attr block. 2580 */ 2581 int 2582 xfs_da_get_buf( 2583 struct xfs_trans *tp, 2584 struct xfs_inode *dp, 2585 xfs_dablk_t bno, 2586 struct xfs_buf **bpp, 2587 int whichfork) 2588 { 2589 struct xfs_mount *mp = dp->i_mount; 2590 struct xfs_buf *bp; 2591 struct xfs_buf_map map, *mapp = ↦ 2592 int nmap = 1; 2593 int error; 2594 2595 *bpp = NULL; 2596 error = xfs_dabuf_map(dp, bno, 0, whichfork, &mapp, &nmap); 2597 if (error || nmap == 0) 2598 goto out_free; 2599 2600 error = xfs_trans_get_buf_map(tp, mp->m_ddev_targp, mapp, nmap, 0, &bp); 2601 if (error) 2602 goto out_free; 2603 2604 *bpp = bp; 2605 2606 out_free: 2607 if (mapp != &map) 2608 kmem_free(mapp); 2609 2610 return error; 2611 } 2612 2613 /* 2614 * Get a buffer for the dir/attr block, fill in the contents. 2615 */ 2616 int 2617 xfs_da_read_buf( 2618 struct xfs_trans *tp, 2619 struct xfs_inode *dp, 2620 xfs_dablk_t bno, 2621 unsigned int flags, 2622 struct xfs_buf **bpp, 2623 int whichfork, 2624 const struct xfs_buf_ops *ops) 2625 { 2626 struct xfs_mount *mp = dp->i_mount; 2627 struct xfs_buf *bp; 2628 struct xfs_buf_map map, *mapp = ↦ 2629 int nmap = 1; 2630 int error; 2631 2632 *bpp = NULL; 2633 error = xfs_dabuf_map(dp, bno, flags, whichfork, &mapp, &nmap); 2634 if (error || !nmap) 2635 goto out_free; 2636 2637 error = xfs_trans_read_buf_map(mp, tp, mp->m_ddev_targp, mapp, nmap, 0, 2638 &bp, ops); 2639 if (error) 2640 goto out_free; 2641 2642 if (whichfork == XFS_ATTR_FORK) 2643 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF); 2644 else 2645 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF); 2646 *bpp = bp; 2647 out_free: 2648 if (mapp != &map) 2649 kmem_free(mapp); 2650 2651 return error; 2652 } 2653 2654 /* 2655 * Readahead the dir/attr block. 2656 */ 2657 int 2658 xfs_da_reada_buf( 2659 struct xfs_inode *dp, 2660 xfs_dablk_t bno, 2661 unsigned int flags, 2662 int whichfork, 2663 const struct xfs_buf_ops *ops) 2664 { 2665 struct xfs_buf_map map; 2666 struct xfs_buf_map *mapp; 2667 int nmap; 2668 int error; 2669 2670 mapp = ↦ 2671 nmap = 1; 2672 error = xfs_dabuf_map(dp, bno, flags, whichfork, &mapp, &nmap); 2673 if (error || !nmap) 2674 goto out_free; 2675 2676 xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops); 2677 2678 out_free: 2679 if (mapp != &map) 2680 kmem_free(mapp); 2681 2682 return error; 2683 } 2684