1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * Copyright (c) 2013 Red Hat, Inc. 5 * All Rights Reserved. 6 */ 7 #include "xfs.h" 8 #include "xfs_fs.h" 9 #include "xfs_shared.h" 10 #include "xfs_format.h" 11 #include "xfs_log_format.h" 12 #include "xfs_trans_resv.h" 13 #include "xfs_sb.h" 14 #include "xfs_mount.h" 15 #include "xfs_da_format.h" 16 #include "xfs_da_btree.h" 17 #include "xfs_inode.h" 18 #include "xfs_trans.h" 19 #include "xfs_bmap_btree.h" 20 #include "xfs_bmap.h" 21 #include "xfs_attr_sf.h" 22 #include "xfs_attr.h" 23 #include "xfs_attr_remote.h" 24 #include "xfs_attr_leaf.h" 25 #include "xfs_error.h" 26 #include "xfs_trace.h" 27 #include "xfs_buf_item.h" 28 #include "xfs_dir2.h" 29 #include "xfs_log.h" 30 #include "xfs_ag.h" 31 #include "xfs_errortag.h" 32 33 34 /* 35 * xfs_attr_leaf.c 36 * 37 * Routines to implement leaf blocks of attributes as Btrees of hashed names. 38 */ 39 40 /*======================================================================== 41 * Function prototypes for the kernel. 42 *========================================================================*/ 43 44 /* 45 * Routines used for growing the Btree. 46 */ 47 STATIC int xfs_attr3_leaf_create(struct xfs_da_args *args, 48 xfs_dablk_t which_block, struct xfs_buf **bpp); 49 STATIC int xfs_attr3_leaf_add_work(struct xfs_buf *leaf_buffer, 50 struct xfs_attr3_icleaf_hdr *ichdr, 51 struct xfs_da_args *args, int freemap_index); 52 STATIC void xfs_attr3_leaf_compact(struct xfs_da_args *args, 53 struct xfs_attr3_icleaf_hdr *ichdr, 54 struct xfs_buf *leaf_buffer); 55 STATIC void xfs_attr3_leaf_rebalance(xfs_da_state_t *state, 56 xfs_da_state_blk_t *blk1, 57 xfs_da_state_blk_t *blk2); 58 STATIC int xfs_attr3_leaf_figure_balance(xfs_da_state_t *state, 59 xfs_da_state_blk_t *leaf_blk_1, 60 struct xfs_attr3_icleaf_hdr *ichdr1, 61 xfs_da_state_blk_t *leaf_blk_2, 62 struct xfs_attr3_icleaf_hdr *ichdr2, 63 int *number_entries_in_blk1, 64 int *number_usedbytes_in_blk1); 65 66 /* 67 * Utility routines. 68 */ 69 STATIC void xfs_attr3_leaf_moveents(struct xfs_da_args *args, 70 struct xfs_attr_leafblock *src_leaf, 71 struct xfs_attr3_icleaf_hdr *src_ichdr, int src_start, 72 struct xfs_attr_leafblock *dst_leaf, 73 struct xfs_attr3_icleaf_hdr *dst_ichdr, int dst_start, 74 int move_count); 75 STATIC int xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index); 76 77 /* 78 * attr3 block 'firstused' conversion helpers. 79 * 80 * firstused refers to the offset of the first used byte of the nameval region 81 * of an attr leaf block. The region starts at the tail of the block and expands 82 * backwards towards the middle. As such, firstused is initialized to the block 83 * size for an empty leaf block and is reduced from there. 84 * 85 * The attr3 block size is pegged to the fsb size and the maximum fsb is 64k. 86 * The in-core firstused field is 32-bit and thus supports the maximum fsb size. 87 * The on-disk field is only 16-bit, however, and overflows at 64k. Since this 88 * only occurs at exactly 64k, we use zero as a magic on-disk value to represent 89 * the attr block size. The following helpers manage the conversion between the 90 * in-core and on-disk formats. 91 */ 92 93 static void 94 xfs_attr3_leaf_firstused_from_disk( 95 struct xfs_da_geometry *geo, 96 struct xfs_attr3_icleaf_hdr *to, 97 struct xfs_attr_leafblock *from) 98 { 99 struct xfs_attr3_leaf_hdr *hdr3; 100 101 if (from->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)) { 102 hdr3 = (struct xfs_attr3_leaf_hdr *) from; 103 to->firstused = be16_to_cpu(hdr3->firstused); 104 } else { 105 to->firstused = be16_to_cpu(from->hdr.firstused); 106 } 107 108 /* 109 * Convert from the magic fsb size value to actual blocksize. This 110 * should only occur for empty blocks when the block size overflows 111 * 16-bits. 112 */ 113 if (to->firstused == XFS_ATTR3_LEAF_NULLOFF) { 114 ASSERT(!to->count && !to->usedbytes); 115 ASSERT(geo->blksize > USHRT_MAX); 116 to->firstused = geo->blksize; 117 } 118 } 119 120 static void 121 xfs_attr3_leaf_firstused_to_disk( 122 struct xfs_da_geometry *geo, 123 struct xfs_attr_leafblock *to, 124 struct xfs_attr3_icleaf_hdr *from) 125 { 126 struct xfs_attr3_leaf_hdr *hdr3; 127 uint32_t firstused; 128 129 /* magic value should only be seen on disk */ 130 ASSERT(from->firstused != XFS_ATTR3_LEAF_NULLOFF); 131 132 /* 133 * Scale down the 32-bit in-core firstused value to the 16-bit on-disk 134 * value. This only overflows at the max supported value of 64k. Use the 135 * magic on-disk value to represent block size in this case. 136 */ 137 firstused = from->firstused; 138 if (firstused > USHRT_MAX) { 139 ASSERT(from->firstused == geo->blksize); 140 firstused = XFS_ATTR3_LEAF_NULLOFF; 141 } 142 143 if (from->magic == XFS_ATTR3_LEAF_MAGIC) { 144 hdr3 = (struct xfs_attr3_leaf_hdr *) to; 145 hdr3->firstused = cpu_to_be16(firstused); 146 } else { 147 to->hdr.firstused = cpu_to_be16(firstused); 148 } 149 } 150 151 void 152 xfs_attr3_leaf_hdr_from_disk( 153 struct xfs_da_geometry *geo, 154 struct xfs_attr3_icleaf_hdr *to, 155 struct xfs_attr_leafblock *from) 156 { 157 int i; 158 159 ASSERT(from->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) || 160 from->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)); 161 162 if (from->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)) { 163 struct xfs_attr3_leaf_hdr *hdr3 = (struct xfs_attr3_leaf_hdr *)from; 164 165 to->forw = be32_to_cpu(hdr3->info.hdr.forw); 166 to->back = be32_to_cpu(hdr3->info.hdr.back); 167 to->magic = be16_to_cpu(hdr3->info.hdr.magic); 168 to->count = be16_to_cpu(hdr3->count); 169 to->usedbytes = be16_to_cpu(hdr3->usedbytes); 170 xfs_attr3_leaf_firstused_from_disk(geo, to, from); 171 to->holes = hdr3->holes; 172 173 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { 174 to->freemap[i].base = be16_to_cpu(hdr3->freemap[i].base); 175 to->freemap[i].size = be16_to_cpu(hdr3->freemap[i].size); 176 } 177 return; 178 } 179 to->forw = be32_to_cpu(from->hdr.info.forw); 180 to->back = be32_to_cpu(from->hdr.info.back); 181 to->magic = be16_to_cpu(from->hdr.info.magic); 182 to->count = be16_to_cpu(from->hdr.count); 183 to->usedbytes = be16_to_cpu(from->hdr.usedbytes); 184 xfs_attr3_leaf_firstused_from_disk(geo, to, from); 185 to->holes = from->hdr.holes; 186 187 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { 188 to->freemap[i].base = be16_to_cpu(from->hdr.freemap[i].base); 189 to->freemap[i].size = be16_to_cpu(from->hdr.freemap[i].size); 190 } 191 } 192 193 void 194 xfs_attr3_leaf_hdr_to_disk( 195 struct xfs_da_geometry *geo, 196 struct xfs_attr_leafblock *to, 197 struct xfs_attr3_icleaf_hdr *from) 198 { 199 int i; 200 201 ASSERT(from->magic == XFS_ATTR_LEAF_MAGIC || 202 from->magic == XFS_ATTR3_LEAF_MAGIC); 203 204 if (from->magic == XFS_ATTR3_LEAF_MAGIC) { 205 struct xfs_attr3_leaf_hdr *hdr3 = (struct xfs_attr3_leaf_hdr *)to; 206 207 hdr3->info.hdr.forw = cpu_to_be32(from->forw); 208 hdr3->info.hdr.back = cpu_to_be32(from->back); 209 hdr3->info.hdr.magic = cpu_to_be16(from->magic); 210 hdr3->count = cpu_to_be16(from->count); 211 hdr3->usedbytes = cpu_to_be16(from->usedbytes); 212 xfs_attr3_leaf_firstused_to_disk(geo, to, from); 213 hdr3->holes = from->holes; 214 hdr3->pad1 = 0; 215 216 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { 217 hdr3->freemap[i].base = cpu_to_be16(from->freemap[i].base); 218 hdr3->freemap[i].size = cpu_to_be16(from->freemap[i].size); 219 } 220 return; 221 } 222 to->hdr.info.forw = cpu_to_be32(from->forw); 223 to->hdr.info.back = cpu_to_be32(from->back); 224 to->hdr.info.magic = cpu_to_be16(from->magic); 225 to->hdr.count = cpu_to_be16(from->count); 226 to->hdr.usedbytes = cpu_to_be16(from->usedbytes); 227 xfs_attr3_leaf_firstused_to_disk(geo, to, from); 228 to->hdr.holes = from->holes; 229 to->hdr.pad1 = 0; 230 231 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { 232 to->hdr.freemap[i].base = cpu_to_be16(from->freemap[i].base); 233 to->hdr.freemap[i].size = cpu_to_be16(from->freemap[i].size); 234 } 235 } 236 237 static xfs_failaddr_t 238 xfs_attr3_leaf_verify_entry( 239 struct xfs_mount *mp, 240 char *buf_end, 241 struct xfs_attr_leafblock *leaf, 242 struct xfs_attr3_icleaf_hdr *leafhdr, 243 struct xfs_attr_leaf_entry *ent, 244 int idx, 245 __u32 *last_hashval) 246 { 247 struct xfs_attr_leaf_name_local *lentry; 248 struct xfs_attr_leaf_name_remote *rentry; 249 char *name_end; 250 unsigned int nameidx; 251 unsigned int namesize; 252 __u32 hashval; 253 254 /* hash order check */ 255 hashval = be32_to_cpu(ent->hashval); 256 if (hashval < *last_hashval) 257 return __this_address; 258 *last_hashval = hashval; 259 260 nameidx = be16_to_cpu(ent->nameidx); 261 if (nameidx < leafhdr->firstused || nameidx >= mp->m_attr_geo->blksize) 262 return __this_address; 263 264 /* 265 * Check the name information. The namelen fields are u8 so we can't 266 * possibly exceed the maximum name length of 255 bytes. 267 */ 268 if (ent->flags & XFS_ATTR_LOCAL) { 269 lentry = xfs_attr3_leaf_name_local(leaf, idx); 270 namesize = xfs_attr_leaf_entsize_local(lentry->namelen, 271 be16_to_cpu(lentry->valuelen)); 272 name_end = (char *)lentry + namesize; 273 if (lentry->namelen == 0) 274 return __this_address; 275 } else { 276 rentry = xfs_attr3_leaf_name_remote(leaf, idx); 277 namesize = xfs_attr_leaf_entsize_remote(rentry->namelen); 278 name_end = (char *)rentry + namesize; 279 if (rentry->namelen == 0) 280 return __this_address; 281 if (!(ent->flags & XFS_ATTR_INCOMPLETE) && 282 rentry->valueblk == 0) 283 return __this_address; 284 } 285 286 if (name_end > buf_end) 287 return __this_address; 288 289 return NULL; 290 } 291 292 static xfs_failaddr_t 293 xfs_attr3_leaf_verify( 294 struct xfs_buf *bp) 295 { 296 struct xfs_attr3_icleaf_hdr ichdr; 297 struct xfs_mount *mp = bp->b_mount; 298 struct xfs_attr_leafblock *leaf = bp->b_addr; 299 struct xfs_attr_leaf_entry *entries; 300 struct xfs_attr_leaf_entry *ent; 301 char *buf_end; 302 uint32_t end; /* must be 32bit - see below */ 303 __u32 last_hashval = 0; 304 int i; 305 xfs_failaddr_t fa; 306 307 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf); 308 309 fa = xfs_da3_blkinfo_verify(bp, bp->b_addr); 310 if (fa) 311 return fa; 312 313 /* 314 * firstused is the block offset of the first name info structure. 315 * Make sure it doesn't go off the block or crash into the header. 316 */ 317 if (ichdr.firstused > mp->m_attr_geo->blksize) 318 return __this_address; 319 if (ichdr.firstused < xfs_attr3_leaf_hdr_size(leaf)) 320 return __this_address; 321 322 /* Make sure the entries array doesn't crash into the name info. */ 323 entries = xfs_attr3_leaf_entryp(bp->b_addr); 324 if ((char *)&entries[ichdr.count] > 325 (char *)bp->b_addr + ichdr.firstused) 326 return __this_address; 327 328 /* 329 * NOTE: This verifier historically failed empty leaf buffers because 330 * we expect the fork to be in another format. Empty attr fork format 331 * conversions are possible during xattr set, however, and format 332 * conversion is not atomic with the xattr set that triggers it. We 333 * cannot assume leaf blocks are non-empty until that is addressed. 334 */ 335 buf_end = (char *)bp->b_addr + mp->m_attr_geo->blksize; 336 for (i = 0, ent = entries; i < ichdr.count; ent++, i++) { 337 fa = xfs_attr3_leaf_verify_entry(mp, buf_end, leaf, &ichdr, 338 ent, i, &last_hashval); 339 if (fa) 340 return fa; 341 } 342 343 /* 344 * Quickly check the freemap information. Attribute data has to be 345 * aligned to 4-byte boundaries, and likewise for the free space. 346 * 347 * Note that for 64k block size filesystems, the freemap entries cannot 348 * overflow as they are only be16 fields. However, when checking end 349 * pointer of the freemap, we have to be careful to detect overflows and 350 * so use uint32_t for those checks. 351 */ 352 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { 353 if (ichdr.freemap[i].base > mp->m_attr_geo->blksize) 354 return __this_address; 355 if (ichdr.freemap[i].base & 0x3) 356 return __this_address; 357 if (ichdr.freemap[i].size > mp->m_attr_geo->blksize) 358 return __this_address; 359 if (ichdr.freemap[i].size & 0x3) 360 return __this_address; 361 362 /* be care of 16 bit overflows here */ 363 end = (uint32_t)ichdr.freemap[i].base + ichdr.freemap[i].size; 364 if (end < ichdr.freemap[i].base) 365 return __this_address; 366 if (end > mp->m_attr_geo->blksize) 367 return __this_address; 368 } 369 370 return NULL; 371 } 372 373 static void 374 xfs_attr3_leaf_write_verify( 375 struct xfs_buf *bp) 376 { 377 struct xfs_mount *mp = bp->b_mount; 378 struct xfs_buf_log_item *bip = bp->b_log_item; 379 struct xfs_attr3_leaf_hdr *hdr3 = bp->b_addr; 380 xfs_failaddr_t fa; 381 382 fa = xfs_attr3_leaf_verify(bp); 383 if (fa) { 384 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 385 return; 386 } 387 388 if (!xfs_has_crc(mp)) 389 return; 390 391 if (bip) 392 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn); 393 394 xfs_buf_update_cksum(bp, XFS_ATTR3_LEAF_CRC_OFF); 395 } 396 397 /* 398 * leaf/node format detection on trees is sketchy, so a node read can be done on 399 * leaf level blocks when detection identifies the tree as a node format tree 400 * incorrectly. In this case, we need to swap the verifier to match the correct 401 * format of the block being read. 402 */ 403 static void 404 xfs_attr3_leaf_read_verify( 405 struct xfs_buf *bp) 406 { 407 struct xfs_mount *mp = bp->b_mount; 408 xfs_failaddr_t fa; 409 410 if (xfs_has_crc(mp) && 411 !xfs_buf_verify_cksum(bp, XFS_ATTR3_LEAF_CRC_OFF)) 412 xfs_verifier_error(bp, -EFSBADCRC, __this_address); 413 else { 414 fa = xfs_attr3_leaf_verify(bp); 415 if (fa) 416 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 417 } 418 } 419 420 const struct xfs_buf_ops xfs_attr3_leaf_buf_ops = { 421 .name = "xfs_attr3_leaf", 422 .magic16 = { cpu_to_be16(XFS_ATTR_LEAF_MAGIC), 423 cpu_to_be16(XFS_ATTR3_LEAF_MAGIC) }, 424 .verify_read = xfs_attr3_leaf_read_verify, 425 .verify_write = xfs_attr3_leaf_write_verify, 426 .verify_struct = xfs_attr3_leaf_verify, 427 }; 428 429 int 430 xfs_attr3_leaf_read( 431 struct xfs_trans *tp, 432 struct xfs_inode *dp, 433 xfs_dablk_t bno, 434 struct xfs_buf **bpp) 435 { 436 int err; 437 438 err = xfs_da_read_buf(tp, dp, bno, 0, bpp, XFS_ATTR_FORK, 439 &xfs_attr3_leaf_buf_ops); 440 if (!err && tp && *bpp) 441 xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_ATTR_LEAF_BUF); 442 return err; 443 } 444 445 /*======================================================================== 446 * Namespace helper routines 447 *========================================================================*/ 448 449 /* 450 * If we are in log recovery, then we want the lookup to ignore the INCOMPLETE 451 * flag on disk - if there's an incomplete attr then recovery needs to tear it 452 * down. If there's no incomplete attr, then recovery needs to tear that attr 453 * down to replace it with the attr that has been logged. In this case, the 454 * INCOMPLETE flag will not be set in attr->attr_filter, but rather 455 * XFS_DA_OP_RECOVERY will be set in args->op_flags. 456 */ 457 static bool 458 xfs_attr_match( 459 struct xfs_da_args *args, 460 uint8_t namelen, 461 unsigned char *name, 462 int flags) 463 { 464 465 if (args->namelen != namelen) 466 return false; 467 if (memcmp(args->name, name, namelen) != 0) 468 return false; 469 470 /* Recovery ignores the INCOMPLETE flag. */ 471 if ((args->op_flags & XFS_DA_OP_RECOVERY) && 472 args->attr_filter == (flags & XFS_ATTR_NSP_ONDISK_MASK)) 473 return true; 474 475 /* All remaining matches need to be filtered by INCOMPLETE state. */ 476 if (args->attr_filter != 477 (flags & (XFS_ATTR_NSP_ONDISK_MASK | XFS_ATTR_INCOMPLETE))) 478 return false; 479 return true; 480 } 481 482 static int 483 xfs_attr_copy_value( 484 struct xfs_da_args *args, 485 unsigned char *value, 486 int valuelen) 487 { 488 /* 489 * No copy if all we have to do is get the length 490 */ 491 if (!args->valuelen) { 492 args->valuelen = valuelen; 493 return 0; 494 } 495 496 /* 497 * No copy if the length of the existing buffer is too small 498 */ 499 if (args->valuelen < valuelen) { 500 args->valuelen = valuelen; 501 return -ERANGE; 502 } 503 504 if (!args->value) { 505 args->value = kvmalloc(valuelen, GFP_KERNEL | __GFP_NOLOCKDEP); 506 if (!args->value) 507 return -ENOMEM; 508 } 509 args->valuelen = valuelen; 510 511 /* remote block xattr requires IO for copy-in */ 512 if (args->rmtblkno) 513 return xfs_attr_rmtval_get(args); 514 515 /* 516 * This is to prevent a GCC warning because the remote xattr case 517 * doesn't have a value to pass in. In that case, we never reach here, 518 * but GCC can't work that out and so throws a "passing NULL to 519 * memcpy" warning. 520 */ 521 if (!value) 522 return -EINVAL; 523 memcpy(args->value, value, valuelen); 524 return 0; 525 } 526 527 /*======================================================================== 528 * External routines when attribute fork size < XFS_LITINO(mp). 529 *========================================================================*/ 530 531 /* 532 * Query whether the total requested number of attr fork bytes of extended 533 * attribute space will be able to fit inline. 534 * 535 * Returns zero if not, else the i_forkoff fork offset to be used in the 536 * literal area for attribute data once the new bytes have been added. 537 * 538 * i_forkoff must be 8 byte aligned, hence is stored as a >>3 value; 539 * special case for dev/uuid inodes, they have fixed size data forks. 540 */ 541 int 542 xfs_attr_shortform_bytesfit( 543 struct xfs_inode *dp, 544 int bytes) 545 { 546 struct xfs_mount *mp = dp->i_mount; 547 int64_t dsize; 548 int minforkoff; 549 int maxforkoff; 550 int offset; 551 552 /* 553 * Check if the new size could fit at all first: 554 */ 555 if (bytes > XFS_LITINO(mp)) 556 return 0; 557 558 /* rounded down */ 559 offset = (XFS_LITINO(mp) - bytes) >> 3; 560 561 if (dp->i_df.if_format == XFS_DINODE_FMT_DEV) { 562 minforkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; 563 return (offset >= minforkoff) ? minforkoff : 0; 564 } 565 566 /* 567 * If the requested numbers of bytes is smaller or equal to the 568 * current attribute fork size we can always proceed. 569 * 570 * Note that if_bytes in the data fork might actually be larger than 571 * the current data fork size is due to delalloc extents. In that 572 * case either the extent count will go down when they are converted 573 * to real extents, or the delalloc conversion will take care of the 574 * literal area rebalancing. 575 */ 576 if (bytes <= XFS_IFORK_ASIZE(dp)) 577 return dp->i_forkoff; 578 579 /* 580 * For attr2 we can try to move the forkoff if there is space in the 581 * literal area, but for the old format we are done if there is no 582 * space in the fixed attribute fork. 583 */ 584 if (!xfs_has_attr2(mp)) 585 return 0; 586 587 dsize = dp->i_df.if_bytes; 588 589 switch (dp->i_df.if_format) { 590 case XFS_DINODE_FMT_EXTENTS: 591 /* 592 * If there is no attr fork and the data fork is extents, 593 * determine if creating the default attr fork will result 594 * in the extents form migrating to btree. If so, the 595 * minimum offset only needs to be the space required for 596 * the btree root. 597 */ 598 if (!dp->i_forkoff && dp->i_df.if_bytes > 599 xfs_default_attroffset(dp)) 600 dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS); 601 break; 602 case XFS_DINODE_FMT_BTREE: 603 /* 604 * If we have a data btree then keep forkoff if we have one, 605 * otherwise we are adding a new attr, so then we set 606 * minforkoff to where the btree root can finish so we have 607 * plenty of room for attrs 608 */ 609 if (dp->i_forkoff) { 610 if (offset < dp->i_forkoff) 611 return 0; 612 return dp->i_forkoff; 613 } 614 dsize = XFS_BMAP_BROOT_SPACE(mp, dp->i_df.if_broot); 615 break; 616 } 617 618 /* 619 * A data fork btree root must have space for at least 620 * MINDBTPTRS key/ptr pairs if the data fork is small or empty. 621 */ 622 minforkoff = max_t(int64_t, dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS)); 623 minforkoff = roundup(minforkoff, 8) >> 3; 624 625 /* attr fork btree root can have at least this many key/ptr pairs */ 626 maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS); 627 maxforkoff = maxforkoff >> 3; /* rounded down */ 628 629 if (offset >= maxforkoff) 630 return maxforkoff; 631 if (offset >= minforkoff) 632 return offset; 633 return 0; 634 } 635 636 /* 637 * Switch on the ATTR2 superblock bit (implies also FEATURES2) unless: 638 * - noattr2 mount option is set, 639 * - on-disk version bit says it is already set, or 640 * - the attr2 mount option is not set to enable automatic upgrade from attr1. 641 */ 642 STATIC void 643 xfs_sbversion_add_attr2( 644 struct xfs_mount *mp, 645 struct xfs_trans *tp) 646 { 647 if (xfs_has_noattr2(mp)) 648 return; 649 if (mp->m_sb.sb_features2 & XFS_SB_VERSION2_ATTR2BIT) 650 return; 651 if (!xfs_has_attr2(mp)) 652 return; 653 654 spin_lock(&mp->m_sb_lock); 655 xfs_add_attr2(mp); 656 spin_unlock(&mp->m_sb_lock); 657 xfs_log_sb(tp); 658 } 659 660 /* 661 * Create the initial contents of a shortform attribute list. 662 */ 663 void 664 xfs_attr_shortform_create( 665 struct xfs_da_args *args) 666 { 667 struct xfs_inode *dp = args->dp; 668 struct xfs_ifork *ifp = dp->i_afp; 669 struct xfs_attr_sf_hdr *hdr; 670 671 trace_xfs_attr_sf_create(args); 672 673 ASSERT(ifp->if_bytes == 0); 674 if (ifp->if_format == XFS_DINODE_FMT_EXTENTS) 675 ifp->if_format = XFS_DINODE_FMT_LOCAL; 676 xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK); 677 hdr = (struct xfs_attr_sf_hdr *)ifp->if_u1.if_data; 678 memset(hdr, 0, sizeof(*hdr)); 679 hdr->totsize = cpu_to_be16(sizeof(*hdr)); 680 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); 681 } 682 683 /* 684 * Return -EEXIST if attr is found, or -ENOATTR if not 685 * args: args containing attribute name and namelen 686 * sfep: If not null, pointer will be set to the last attr entry found on 687 -EEXIST. On -ENOATTR pointer is left at the last entry in the list 688 * basep: If not null, pointer is set to the byte offset of the entry in the 689 * list on -EEXIST. On -ENOATTR, pointer is left at the byte offset of 690 * the last entry in the list 691 */ 692 int 693 xfs_attr_sf_findname( 694 struct xfs_da_args *args, 695 struct xfs_attr_sf_entry **sfep, 696 unsigned int *basep) 697 { 698 struct xfs_attr_shortform *sf; 699 struct xfs_attr_sf_entry *sfe; 700 unsigned int base = sizeof(struct xfs_attr_sf_hdr); 701 int size = 0; 702 int end; 703 int i; 704 705 sf = (struct xfs_attr_shortform *)args->dp->i_afp->if_u1.if_data; 706 sfe = &sf->list[0]; 707 end = sf->hdr.count; 708 for (i = 0; i < end; sfe = xfs_attr_sf_nextentry(sfe), 709 base += size, i++) { 710 size = xfs_attr_sf_entsize(sfe); 711 if (!xfs_attr_match(args, sfe->namelen, sfe->nameval, 712 sfe->flags)) 713 continue; 714 break; 715 } 716 717 if (sfep != NULL) 718 *sfep = sfe; 719 720 if (basep != NULL) 721 *basep = base; 722 723 if (i == end) 724 return -ENOATTR; 725 return -EEXIST; 726 } 727 728 /* 729 * Add a name/value pair to the shortform attribute list. 730 * Overflow from the inode has already been checked for. 731 */ 732 void 733 xfs_attr_shortform_add( 734 struct xfs_da_args *args, 735 int forkoff) 736 { 737 struct xfs_attr_shortform *sf; 738 struct xfs_attr_sf_entry *sfe; 739 int offset, size; 740 struct xfs_mount *mp; 741 struct xfs_inode *dp; 742 struct xfs_ifork *ifp; 743 744 trace_xfs_attr_sf_add(args); 745 746 dp = args->dp; 747 mp = dp->i_mount; 748 dp->i_forkoff = forkoff; 749 750 ifp = dp->i_afp; 751 ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL); 752 sf = (struct xfs_attr_shortform *)ifp->if_u1.if_data; 753 if (xfs_attr_sf_findname(args, &sfe, NULL) == -EEXIST) 754 ASSERT(0); 755 756 offset = (char *)sfe - (char *)sf; 757 size = xfs_attr_sf_entsize_byname(args->namelen, args->valuelen); 758 xfs_idata_realloc(dp, size, XFS_ATTR_FORK); 759 sf = (struct xfs_attr_shortform *)ifp->if_u1.if_data; 760 sfe = (struct xfs_attr_sf_entry *)((char *)sf + offset); 761 762 sfe->namelen = args->namelen; 763 sfe->valuelen = args->valuelen; 764 sfe->flags = args->attr_filter; 765 memcpy(sfe->nameval, args->name, args->namelen); 766 memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen); 767 sf->hdr.count++; 768 be16_add_cpu(&sf->hdr.totsize, size); 769 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); 770 771 xfs_sbversion_add_attr2(mp, args->trans); 772 } 773 774 /* 775 * After the last attribute is removed revert to original inode format, 776 * making all literal area available to the data fork once more. 777 */ 778 void 779 xfs_attr_fork_remove( 780 struct xfs_inode *ip, 781 struct xfs_trans *tp) 782 { 783 ASSERT(ip->i_afp->if_nextents == 0); 784 785 xfs_idestroy_fork(ip->i_afp); 786 kmem_cache_free(xfs_ifork_cache, ip->i_afp); 787 ip->i_afp = NULL; 788 ip->i_forkoff = 0; 789 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 790 } 791 792 /* 793 * Remove an attribute from the shortform attribute list structure. 794 */ 795 int 796 xfs_attr_sf_removename( 797 struct xfs_da_args *args) 798 { 799 struct xfs_attr_shortform *sf; 800 struct xfs_attr_sf_entry *sfe; 801 int size = 0, end, totsize; 802 unsigned int base; 803 struct xfs_mount *mp; 804 struct xfs_inode *dp; 805 int error; 806 807 trace_xfs_attr_sf_remove(args); 808 809 dp = args->dp; 810 mp = dp->i_mount; 811 sf = (struct xfs_attr_shortform *)dp->i_afp->if_u1.if_data; 812 813 error = xfs_attr_sf_findname(args, &sfe, &base); 814 815 /* 816 * If we are recovering an operation, finding nothing to 817 * remove is not an error - it just means there was nothing 818 * to clean up. 819 */ 820 if (error == -ENOATTR && (args->op_flags & XFS_DA_OP_RECOVERY)) 821 return 0; 822 if (error != -EEXIST) 823 return error; 824 size = xfs_attr_sf_entsize(sfe); 825 826 /* 827 * Fix up the attribute fork data, covering the hole 828 */ 829 end = base + size; 830 totsize = be16_to_cpu(sf->hdr.totsize); 831 if (end != totsize) 832 memmove(&((char *)sf)[base], &((char *)sf)[end], totsize - end); 833 sf->hdr.count--; 834 be16_add_cpu(&sf->hdr.totsize, -size); 835 836 /* 837 * Fix up the start offset of the attribute fork 838 */ 839 totsize -= size; 840 if (totsize == sizeof(xfs_attr_sf_hdr_t) && xfs_has_attr2(mp) && 841 (dp->i_df.if_format != XFS_DINODE_FMT_BTREE) && 842 !(args->op_flags & (XFS_DA_OP_ADDNAME | XFS_DA_OP_REPLACE))) { 843 xfs_attr_fork_remove(dp, args->trans); 844 } else { 845 xfs_idata_realloc(dp, -size, XFS_ATTR_FORK); 846 dp->i_forkoff = xfs_attr_shortform_bytesfit(dp, totsize); 847 ASSERT(dp->i_forkoff); 848 ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) || 849 (args->op_flags & XFS_DA_OP_ADDNAME) || 850 !xfs_has_attr2(mp) || 851 dp->i_df.if_format == XFS_DINODE_FMT_BTREE); 852 xfs_trans_log_inode(args->trans, dp, 853 XFS_ILOG_CORE | XFS_ILOG_ADATA); 854 } 855 856 xfs_sbversion_add_attr2(mp, args->trans); 857 858 return 0; 859 } 860 861 /* 862 * Look up a name in a shortform attribute list structure. 863 */ 864 /*ARGSUSED*/ 865 int 866 xfs_attr_shortform_lookup(xfs_da_args_t *args) 867 { 868 struct xfs_attr_shortform *sf; 869 struct xfs_attr_sf_entry *sfe; 870 int i; 871 struct xfs_ifork *ifp; 872 873 trace_xfs_attr_sf_lookup(args); 874 875 ifp = args->dp->i_afp; 876 ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL); 877 sf = (struct xfs_attr_shortform *)ifp->if_u1.if_data; 878 sfe = &sf->list[0]; 879 for (i = 0; i < sf->hdr.count; 880 sfe = xfs_attr_sf_nextentry(sfe), i++) { 881 if (xfs_attr_match(args, sfe->namelen, sfe->nameval, 882 sfe->flags)) 883 return -EEXIST; 884 } 885 return -ENOATTR; 886 } 887 888 /* 889 * Retrieve the attribute value and length. 890 * 891 * If args->valuelen is zero, only the length needs to be returned. Unlike a 892 * lookup, we only return an error if the attribute does not exist or we can't 893 * retrieve the value. 894 */ 895 int 896 xfs_attr_shortform_getvalue( 897 struct xfs_da_args *args) 898 { 899 struct xfs_attr_shortform *sf; 900 struct xfs_attr_sf_entry *sfe; 901 int i; 902 903 ASSERT(args->dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL); 904 sf = (struct xfs_attr_shortform *)args->dp->i_afp->if_u1.if_data; 905 sfe = &sf->list[0]; 906 for (i = 0; i < sf->hdr.count; 907 sfe = xfs_attr_sf_nextentry(sfe), i++) { 908 if (xfs_attr_match(args, sfe->namelen, sfe->nameval, 909 sfe->flags)) 910 return xfs_attr_copy_value(args, 911 &sfe->nameval[args->namelen], sfe->valuelen); 912 } 913 return -ENOATTR; 914 } 915 916 /* 917 * Convert from using the shortform to the leaf. On success, return the 918 * buffer so that we can keep it locked until we're totally done with it. 919 */ 920 int 921 xfs_attr_shortform_to_leaf( 922 struct xfs_da_args *args, 923 struct xfs_buf **leaf_bp) 924 { 925 struct xfs_inode *dp; 926 struct xfs_attr_shortform *sf; 927 struct xfs_attr_sf_entry *sfe; 928 struct xfs_da_args nargs; 929 char *tmpbuffer; 930 int error, i, size; 931 xfs_dablk_t blkno; 932 struct xfs_buf *bp; 933 struct xfs_ifork *ifp; 934 935 trace_xfs_attr_sf_to_leaf(args); 936 937 dp = args->dp; 938 ifp = dp->i_afp; 939 sf = (struct xfs_attr_shortform *)ifp->if_u1.if_data; 940 size = be16_to_cpu(sf->hdr.totsize); 941 tmpbuffer = kmem_alloc(size, 0); 942 ASSERT(tmpbuffer != NULL); 943 memcpy(tmpbuffer, ifp->if_u1.if_data, size); 944 sf = (struct xfs_attr_shortform *)tmpbuffer; 945 946 xfs_idata_realloc(dp, -size, XFS_ATTR_FORK); 947 xfs_bmap_local_to_extents_empty(args->trans, dp, XFS_ATTR_FORK); 948 949 bp = NULL; 950 error = xfs_da_grow_inode(args, &blkno); 951 if (error) 952 goto out; 953 954 ASSERT(blkno == 0); 955 error = xfs_attr3_leaf_create(args, blkno, &bp); 956 if (error) 957 goto out; 958 959 memset((char *)&nargs, 0, sizeof(nargs)); 960 nargs.dp = dp; 961 nargs.geo = args->geo; 962 nargs.total = args->total; 963 nargs.whichfork = XFS_ATTR_FORK; 964 nargs.trans = args->trans; 965 nargs.op_flags = XFS_DA_OP_OKNOENT; 966 967 sfe = &sf->list[0]; 968 for (i = 0; i < sf->hdr.count; i++) { 969 nargs.name = sfe->nameval; 970 nargs.namelen = sfe->namelen; 971 nargs.value = &sfe->nameval[nargs.namelen]; 972 nargs.valuelen = sfe->valuelen; 973 nargs.hashval = xfs_da_hashname(sfe->nameval, 974 sfe->namelen); 975 nargs.attr_filter = sfe->flags & XFS_ATTR_NSP_ONDISK_MASK; 976 error = xfs_attr3_leaf_lookup_int(bp, &nargs); /* set a->index */ 977 ASSERT(error == -ENOATTR); 978 error = xfs_attr3_leaf_add(bp, &nargs); 979 ASSERT(error != -ENOSPC); 980 if (error) 981 goto out; 982 sfe = xfs_attr_sf_nextentry(sfe); 983 } 984 error = 0; 985 *leaf_bp = bp; 986 out: 987 kmem_free(tmpbuffer); 988 return error; 989 } 990 991 /* 992 * Check a leaf attribute block to see if all the entries would fit into 993 * a shortform attribute list. 994 */ 995 int 996 xfs_attr_shortform_allfit( 997 struct xfs_buf *bp, 998 struct xfs_inode *dp) 999 { 1000 struct xfs_attr_leafblock *leaf; 1001 struct xfs_attr_leaf_entry *entry; 1002 xfs_attr_leaf_name_local_t *name_loc; 1003 struct xfs_attr3_icleaf_hdr leafhdr; 1004 int bytes; 1005 int i; 1006 struct xfs_mount *mp = bp->b_mount; 1007 1008 leaf = bp->b_addr; 1009 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf); 1010 entry = xfs_attr3_leaf_entryp(leaf); 1011 1012 bytes = sizeof(struct xfs_attr_sf_hdr); 1013 for (i = 0; i < leafhdr.count; entry++, i++) { 1014 if (entry->flags & XFS_ATTR_INCOMPLETE) 1015 continue; /* don't copy partial entries */ 1016 if (!(entry->flags & XFS_ATTR_LOCAL)) 1017 return 0; 1018 name_loc = xfs_attr3_leaf_name_local(leaf, i); 1019 if (name_loc->namelen >= XFS_ATTR_SF_ENTSIZE_MAX) 1020 return 0; 1021 if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX) 1022 return 0; 1023 bytes += xfs_attr_sf_entsize_byname(name_loc->namelen, 1024 be16_to_cpu(name_loc->valuelen)); 1025 } 1026 if (xfs_has_attr2(dp->i_mount) && 1027 (dp->i_df.if_format != XFS_DINODE_FMT_BTREE) && 1028 (bytes == sizeof(struct xfs_attr_sf_hdr))) 1029 return -1; 1030 return xfs_attr_shortform_bytesfit(dp, bytes); 1031 } 1032 1033 /* Verify the consistency of an inline attribute fork. */ 1034 xfs_failaddr_t 1035 xfs_attr_shortform_verify( 1036 struct xfs_inode *ip) 1037 { 1038 struct xfs_attr_shortform *sfp; 1039 struct xfs_attr_sf_entry *sfep; 1040 struct xfs_attr_sf_entry *next_sfep; 1041 char *endp; 1042 struct xfs_ifork *ifp; 1043 int i; 1044 int64_t size; 1045 1046 ASSERT(ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL); 1047 ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK); 1048 sfp = (struct xfs_attr_shortform *)ifp->if_u1.if_data; 1049 size = ifp->if_bytes; 1050 1051 /* 1052 * Give up if the attribute is way too short. 1053 */ 1054 if (size < sizeof(struct xfs_attr_sf_hdr)) 1055 return __this_address; 1056 1057 endp = (char *)sfp + size; 1058 1059 /* Check all reported entries */ 1060 sfep = &sfp->list[0]; 1061 for (i = 0; i < sfp->hdr.count; i++) { 1062 /* 1063 * struct xfs_attr_sf_entry has a variable length. 1064 * Check the fixed-offset parts of the structure are 1065 * within the data buffer. 1066 * xfs_attr_sf_entry is defined with a 1-byte variable 1067 * array at the end, so we must subtract that off. 1068 */ 1069 if (((char *)sfep + sizeof(*sfep)) >= endp) 1070 return __this_address; 1071 1072 /* Don't allow names with known bad length. */ 1073 if (sfep->namelen == 0) 1074 return __this_address; 1075 1076 /* 1077 * Check that the variable-length part of the structure is 1078 * within the data buffer. The next entry starts after the 1079 * name component, so nextentry is an acceptable test. 1080 */ 1081 next_sfep = xfs_attr_sf_nextentry(sfep); 1082 if ((char *)next_sfep > endp) 1083 return __this_address; 1084 1085 /* 1086 * Check for unknown flags. Short form doesn't support 1087 * the incomplete or local bits, so we can use the namespace 1088 * mask here. 1089 */ 1090 if (sfep->flags & ~XFS_ATTR_NSP_ONDISK_MASK) 1091 return __this_address; 1092 1093 /* 1094 * Check for invalid namespace combinations. We only allow 1095 * one namespace flag per xattr, so we can just count the 1096 * bits (i.e. hweight) here. 1097 */ 1098 if (hweight8(sfep->flags & XFS_ATTR_NSP_ONDISK_MASK) > 1) 1099 return __this_address; 1100 1101 sfep = next_sfep; 1102 } 1103 if ((void *)sfep != (void *)endp) 1104 return __this_address; 1105 1106 return NULL; 1107 } 1108 1109 /* 1110 * Convert a leaf attribute list to shortform attribute list 1111 */ 1112 int 1113 xfs_attr3_leaf_to_shortform( 1114 struct xfs_buf *bp, 1115 struct xfs_da_args *args, 1116 int forkoff) 1117 { 1118 struct xfs_attr_leafblock *leaf; 1119 struct xfs_attr3_icleaf_hdr ichdr; 1120 struct xfs_attr_leaf_entry *entry; 1121 struct xfs_attr_leaf_name_local *name_loc; 1122 struct xfs_da_args nargs; 1123 struct xfs_inode *dp = args->dp; 1124 char *tmpbuffer; 1125 int error; 1126 int i; 1127 1128 trace_xfs_attr_leaf_to_sf(args); 1129 1130 tmpbuffer = kmem_alloc(args->geo->blksize, 0); 1131 if (!tmpbuffer) 1132 return -ENOMEM; 1133 1134 memcpy(tmpbuffer, bp->b_addr, args->geo->blksize); 1135 1136 leaf = (xfs_attr_leafblock_t *)tmpbuffer; 1137 xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf); 1138 entry = xfs_attr3_leaf_entryp(leaf); 1139 1140 /* XXX (dgc): buffer is about to be marked stale - why zero it? */ 1141 memset(bp->b_addr, 0, args->geo->blksize); 1142 1143 /* 1144 * Clean out the prior contents of the attribute list. 1145 */ 1146 error = xfs_da_shrink_inode(args, 0, bp); 1147 if (error) 1148 goto out; 1149 1150 if (forkoff == -1) { 1151 /* 1152 * Don't remove the attr fork if this operation is the first 1153 * part of a attr replace operations. We're going to add a new 1154 * attr immediately, so we need to keep the attr fork around in 1155 * this case. 1156 */ 1157 if (!(args->op_flags & XFS_DA_OP_REPLACE)) { 1158 ASSERT(xfs_has_attr2(dp->i_mount)); 1159 ASSERT(dp->i_df.if_format != XFS_DINODE_FMT_BTREE); 1160 xfs_attr_fork_remove(dp, args->trans); 1161 } 1162 goto out; 1163 } 1164 1165 xfs_attr_shortform_create(args); 1166 1167 /* 1168 * Copy the attributes 1169 */ 1170 memset((char *)&nargs, 0, sizeof(nargs)); 1171 nargs.geo = args->geo; 1172 nargs.dp = dp; 1173 nargs.total = args->total; 1174 nargs.whichfork = XFS_ATTR_FORK; 1175 nargs.trans = args->trans; 1176 nargs.op_flags = XFS_DA_OP_OKNOENT; 1177 1178 for (i = 0; i < ichdr.count; entry++, i++) { 1179 if (entry->flags & XFS_ATTR_INCOMPLETE) 1180 continue; /* don't copy partial entries */ 1181 if (!entry->nameidx) 1182 continue; 1183 ASSERT(entry->flags & XFS_ATTR_LOCAL); 1184 name_loc = xfs_attr3_leaf_name_local(leaf, i); 1185 nargs.name = name_loc->nameval; 1186 nargs.namelen = name_loc->namelen; 1187 nargs.value = &name_loc->nameval[nargs.namelen]; 1188 nargs.valuelen = be16_to_cpu(name_loc->valuelen); 1189 nargs.hashval = be32_to_cpu(entry->hashval); 1190 nargs.attr_filter = entry->flags & XFS_ATTR_NSP_ONDISK_MASK; 1191 xfs_attr_shortform_add(&nargs, forkoff); 1192 } 1193 error = 0; 1194 1195 out: 1196 kmem_free(tmpbuffer); 1197 return error; 1198 } 1199 1200 /* 1201 * Convert from using a single leaf to a root node and a leaf. 1202 */ 1203 int 1204 xfs_attr3_leaf_to_node( 1205 struct xfs_da_args *args) 1206 { 1207 struct xfs_attr_leafblock *leaf; 1208 struct xfs_attr3_icleaf_hdr icleafhdr; 1209 struct xfs_attr_leaf_entry *entries; 1210 struct xfs_da3_icnode_hdr icnodehdr; 1211 struct xfs_da_intnode *node; 1212 struct xfs_inode *dp = args->dp; 1213 struct xfs_mount *mp = dp->i_mount; 1214 struct xfs_buf *bp1 = NULL; 1215 struct xfs_buf *bp2 = NULL; 1216 xfs_dablk_t blkno; 1217 int error; 1218 1219 trace_xfs_attr_leaf_to_node(args); 1220 1221 if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_ATTR_LEAF_TO_NODE)) { 1222 error = -EIO; 1223 goto out; 1224 } 1225 1226 error = xfs_da_grow_inode(args, &blkno); 1227 if (error) 1228 goto out; 1229 error = xfs_attr3_leaf_read(args->trans, dp, 0, &bp1); 1230 if (error) 1231 goto out; 1232 1233 error = xfs_da_get_buf(args->trans, dp, blkno, &bp2, XFS_ATTR_FORK); 1234 if (error) 1235 goto out; 1236 1237 /* copy leaf to new buffer, update identifiers */ 1238 xfs_trans_buf_set_type(args->trans, bp2, XFS_BLFT_ATTR_LEAF_BUF); 1239 bp2->b_ops = bp1->b_ops; 1240 memcpy(bp2->b_addr, bp1->b_addr, args->geo->blksize); 1241 if (xfs_has_crc(mp)) { 1242 struct xfs_da3_blkinfo *hdr3 = bp2->b_addr; 1243 hdr3->blkno = cpu_to_be64(xfs_buf_daddr(bp2)); 1244 } 1245 xfs_trans_log_buf(args->trans, bp2, 0, args->geo->blksize - 1); 1246 1247 /* 1248 * Set up the new root node. 1249 */ 1250 error = xfs_da3_node_create(args, 0, 1, &bp1, XFS_ATTR_FORK); 1251 if (error) 1252 goto out; 1253 node = bp1->b_addr; 1254 xfs_da3_node_hdr_from_disk(mp, &icnodehdr, node); 1255 1256 leaf = bp2->b_addr; 1257 xfs_attr3_leaf_hdr_from_disk(args->geo, &icleafhdr, leaf); 1258 entries = xfs_attr3_leaf_entryp(leaf); 1259 1260 /* both on-disk, don't endian-flip twice */ 1261 icnodehdr.btree[0].hashval = entries[icleafhdr.count - 1].hashval; 1262 icnodehdr.btree[0].before = cpu_to_be32(blkno); 1263 icnodehdr.count = 1; 1264 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &icnodehdr); 1265 xfs_trans_log_buf(args->trans, bp1, 0, args->geo->blksize - 1); 1266 error = 0; 1267 out: 1268 return error; 1269 } 1270 1271 /*======================================================================== 1272 * Routines used for growing the Btree. 1273 *========================================================================*/ 1274 1275 /* 1276 * Create the initial contents of a leaf attribute list 1277 * or a leaf in a node attribute list. 1278 */ 1279 STATIC int 1280 xfs_attr3_leaf_create( 1281 struct xfs_da_args *args, 1282 xfs_dablk_t blkno, 1283 struct xfs_buf **bpp) 1284 { 1285 struct xfs_attr_leafblock *leaf; 1286 struct xfs_attr3_icleaf_hdr ichdr; 1287 struct xfs_inode *dp = args->dp; 1288 struct xfs_mount *mp = dp->i_mount; 1289 struct xfs_buf *bp; 1290 int error; 1291 1292 trace_xfs_attr_leaf_create(args); 1293 1294 error = xfs_da_get_buf(args->trans, args->dp, blkno, &bp, 1295 XFS_ATTR_FORK); 1296 if (error) 1297 return error; 1298 bp->b_ops = &xfs_attr3_leaf_buf_ops; 1299 xfs_trans_buf_set_type(args->trans, bp, XFS_BLFT_ATTR_LEAF_BUF); 1300 leaf = bp->b_addr; 1301 memset(leaf, 0, args->geo->blksize); 1302 1303 memset(&ichdr, 0, sizeof(ichdr)); 1304 ichdr.firstused = args->geo->blksize; 1305 1306 if (xfs_has_crc(mp)) { 1307 struct xfs_da3_blkinfo *hdr3 = bp->b_addr; 1308 1309 ichdr.magic = XFS_ATTR3_LEAF_MAGIC; 1310 1311 hdr3->blkno = cpu_to_be64(xfs_buf_daddr(bp)); 1312 hdr3->owner = cpu_to_be64(dp->i_ino); 1313 uuid_copy(&hdr3->uuid, &mp->m_sb.sb_meta_uuid); 1314 1315 ichdr.freemap[0].base = sizeof(struct xfs_attr3_leaf_hdr); 1316 } else { 1317 ichdr.magic = XFS_ATTR_LEAF_MAGIC; 1318 ichdr.freemap[0].base = sizeof(struct xfs_attr_leaf_hdr); 1319 } 1320 ichdr.freemap[0].size = ichdr.firstused - ichdr.freemap[0].base; 1321 1322 xfs_attr3_leaf_hdr_to_disk(args->geo, leaf, &ichdr); 1323 xfs_trans_log_buf(args->trans, bp, 0, args->geo->blksize - 1); 1324 1325 *bpp = bp; 1326 return 0; 1327 } 1328 1329 /* 1330 * Split the leaf node, rebalance, then add the new entry. 1331 */ 1332 int 1333 xfs_attr3_leaf_split( 1334 struct xfs_da_state *state, 1335 struct xfs_da_state_blk *oldblk, 1336 struct xfs_da_state_blk *newblk) 1337 { 1338 xfs_dablk_t blkno; 1339 int error; 1340 1341 trace_xfs_attr_leaf_split(state->args); 1342 1343 /* 1344 * Allocate space for a new leaf node. 1345 */ 1346 ASSERT(oldblk->magic == XFS_ATTR_LEAF_MAGIC); 1347 error = xfs_da_grow_inode(state->args, &blkno); 1348 if (error) 1349 return error; 1350 error = xfs_attr3_leaf_create(state->args, blkno, &newblk->bp); 1351 if (error) 1352 return error; 1353 newblk->blkno = blkno; 1354 newblk->magic = XFS_ATTR_LEAF_MAGIC; 1355 1356 /* 1357 * Rebalance the entries across the two leaves. 1358 * NOTE: rebalance() currently depends on the 2nd block being empty. 1359 */ 1360 xfs_attr3_leaf_rebalance(state, oldblk, newblk); 1361 error = xfs_da3_blk_link(state, oldblk, newblk); 1362 if (error) 1363 return error; 1364 1365 /* 1366 * Save info on "old" attribute for "atomic rename" ops, leaf_add() 1367 * modifies the index/blkno/rmtblk/rmtblkcnt fields to show the 1368 * "new" attrs info. Will need the "old" info to remove it later. 1369 * 1370 * Insert the "new" entry in the correct block. 1371 */ 1372 if (state->inleaf) { 1373 trace_xfs_attr_leaf_add_old(state->args); 1374 error = xfs_attr3_leaf_add(oldblk->bp, state->args); 1375 } else { 1376 trace_xfs_attr_leaf_add_new(state->args); 1377 error = xfs_attr3_leaf_add(newblk->bp, state->args); 1378 } 1379 1380 /* 1381 * Update last hashval in each block since we added the name. 1382 */ 1383 oldblk->hashval = xfs_attr_leaf_lasthash(oldblk->bp, NULL); 1384 newblk->hashval = xfs_attr_leaf_lasthash(newblk->bp, NULL); 1385 return error; 1386 } 1387 1388 /* 1389 * Add a name to the leaf attribute list structure. 1390 */ 1391 int 1392 xfs_attr3_leaf_add( 1393 struct xfs_buf *bp, 1394 struct xfs_da_args *args) 1395 { 1396 struct xfs_attr_leafblock *leaf; 1397 struct xfs_attr3_icleaf_hdr ichdr; 1398 int tablesize; 1399 int entsize; 1400 int sum; 1401 int tmp; 1402 int i; 1403 1404 trace_xfs_attr_leaf_add(args); 1405 1406 leaf = bp->b_addr; 1407 xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf); 1408 ASSERT(args->index >= 0 && args->index <= ichdr.count); 1409 entsize = xfs_attr_leaf_newentsize(args, NULL); 1410 1411 /* 1412 * Search through freemap for first-fit on new name length. 1413 * (may need to figure in size of entry struct too) 1414 */ 1415 tablesize = (ichdr.count + 1) * sizeof(xfs_attr_leaf_entry_t) 1416 + xfs_attr3_leaf_hdr_size(leaf); 1417 for (sum = 0, i = XFS_ATTR_LEAF_MAPSIZE - 1; i >= 0; i--) { 1418 if (tablesize > ichdr.firstused) { 1419 sum += ichdr.freemap[i].size; 1420 continue; 1421 } 1422 if (!ichdr.freemap[i].size) 1423 continue; /* no space in this map */ 1424 tmp = entsize; 1425 if (ichdr.freemap[i].base < ichdr.firstused) 1426 tmp += sizeof(xfs_attr_leaf_entry_t); 1427 if (ichdr.freemap[i].size >= tmp) { 1428 tmp = xfs_attr3_leaf_add_work(bp, &ichdr, args, i); 1429 goto out_log_hdr; 1430 } 1431 sum += ichdr.freemap[i].size; 1432 } 1433 1434 /* 1435 * If there are no holes in the address space of the block, 1436 * and we don't have enough freespace, then compaction will do us 1437 * no good and we should just give up. 1438 */ 1439 if (!ichdr.holes && sum < entsize) 1440 return -ENOSPC; 1441 1442 /* 1443 * Compact the entries to coalesce free space. 1444 * This may change the hdr->count via dropping INCOMPLETE entries. 1445 */ 1446 xfs_attr3_leaf_compact(args, &ichdr, bp); 1447 1448 /* 1449 * After compaction, the block is guaranteed to have only one 1450 * free region, in freemap[0]. If it is not big enough, give up. 1451 */ 1452 if (ichdr.freemap[0].size < (entsize + sizeof(xfs_attr_leaf_entry_t))) { 1453 tmp = -ENOSPC; 1454 goto out_log_hdr; 1455 } 1456 1457 tmp = xfs_attr3_leaf_add_work(bp, &ichdr, args, 0); 1458 1459 out_log_hdr: 1460 xfs_attr3_leaf_hdr_to_disk(args->geo, leaf, &ichdr); 1461 xfs_trans_log_buf(args->trans, bp, 1462 XFS_DA_LOGRANGE(leaf, &leaf->hdr, 1463 xfs_attr3_leaf_hdr_size(leaf))); 1464 return tmp; 1465 } 1466 1467 /* 1468 * Add a name to a leaf attribute list structure. 1469 */ 1470 STATIC int 1471 xfs_attr3_leaf_add_work( 1472 struct xfs_buf *bp, 1473 struct xfs_attr3_icleaf_hdr *ichdr, 1474 struct xfs_da_args *args, 1475 int mapindex) 1476 { 1477 struct xfs_attr_leafblock *leaf; 1478 struct xfs_attr_leaf_entry *entry; 1479 struct xfs_attr_leaf_name_local *name_loc; 1480 struct xfs_attr_leaf_name_remote *name_rmt; 1481 struct xfs_mount *mp; 1482 int tmp; 1483 int i; 1484 1485 trace_xfs_attr_leaf_add_work(args); 1486 1487 leaf = bp->b_addr; 1488 ASSERT(mapindex >= 0 && mapindex < XFS_ATTR_LEAF_MAPSIZE); 1489 ASSERT(args->index >= 0 && args->index <= ichdr->count); 1490 1491 /* 1492 * Force open some space in the entry array and fill it in. 1493 */ 1494 entry = &xfs_attr3_leaf_entryp(leaf)[args->index]; 1495 if (args->index < ichdr->count) { 1496 tmp = ichdr->count - args->index; 1497 tmp *= sizeof(xfs_attr_leaf_entry_t); 1498 memmove(entry + 1, entry, tmp); 1499 xfs_trans_log_buf(args->trans, bp, 1500 XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); 1501 } 1502 ichdr->count++; 1503 1504 /* 1505 * Allocate space for the new string (at the end of the run). 1506 */ 1507 mp = args->trans->t_mountp; 1508 ASSERT(ichdr->freemap[mapindex].base < args->geo->blksize); 1509 ASSERT((ichdr->freemap[mapindex].base & 0x3) == 0); 1510 ASSERT(ichdr->freemap[mapindex].size >= 1511 xfs_attr_leaf_newentsize(args, NULL)); 1512 ASSERT(ichdr->freemap[mapindex].size < args->geo->blksize); 1513 ASSERT((ichdr->freemap[mapindex].size & 0x3) == 0); 1514 1515 ichdr->freemap[mapindex].size -= xfs_attr_leaf_newentsize(args, &tmp); 1516 1517 entry->nameidx = cpu_to_be16(ichdr->freemap[mapindex].base + 1518 ichdr->freemap[mapindex].size); 1519 entry->hashval = cpu_to_be32(args->hashval); 1520 entry->flags = args->attr_filter; 1521 if (tmp) 1522 entry->flags |= XFS_ATTR_LOCAL; 1523 if (args->op_flags & XFS_DA_OP_REPLACE) { 1524 if (!xfs_has_larp(mp)) 1525 entry->flags |= XFS_ATTR_INCOMPLETE; 1526 if ((args->blkno2 == args->blkno) && 1527 (args->index2 <= args->index)) { 1528 args->index2++; 1529 } 1530 } 1531 xfs_trans_log_buf(args->trans, bp, 1532 XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); 1533 ASSERT((args->index == 0) || 1534 (be32_to_cpu(entry->hashval) >= be32_to_cpu((entry-1)->hashval))); 1535 ASSERT((args->index == ichdr->count - 1) || 1536 (be32_to_cpu(entry->hashval) <= be32_to_cpu((entry+1)->hashval))); 1537 1538 /* 1539 * For "remote" attribute values, simply note that we need to 1540 * allocate space for the "remote" value. We can't actually 1541 * allocate the extents in this transaction, and we can't decide 1542 * which blocks they should be as we might allocate more blocks 1543 * as part of this transaction (a split operation for example). 1544 */ 1545 if (entry->flags & XFS_ATTR_LOCAL) { 1546 name_loc = xfs_attr3_leaf_name_local(leaf, args->index); 1547 name_loc->namelen = args->namelen; 1548 name_loc->valuelen = cpu_to_be16(args->valuelen); 1549 memcpy((char *)name_loc->nameval, args->name, args->namelen); 1550 memcpy((char *)&name_loc->nameval[args->namelen], args->value, 1551 be16_to_cpu(name_loc->valuelen)); 1552 } else { 1553 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index); 1554 name_rmt->namelen = args->namelen; 1555 memcpy((char *)name_rmt->name, args->name, args->namelen); 1556 entry->flags |= XFS_ATTR_INCOMPLETE; 1557 /* just in case */ 1558 name_rmt->valuelen = 0; 1559 name_rmt->valueblk = 0; 1560 args->rmtblkno = 1; 1561 args->rmtblkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen); 1562 args->rmtvaluelen = args->valuelen; 1563 } 1564 xfs_trans_log_buf(args->trans, bp, 1565 XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index), 1566 xfs_attr_leaf_entsize(leaf, args->index))); 1567 1568 /* 1569 * Update the control info for this leaf node 1570 */ 1571 if (be16_to_cpu(entry->nameidx) < ichdr->firstused) 1572 ichdr->firstused = be16_to_cpu(entry->nameidx); 1573 1574 ASSERT(ichdr->firstused >= ichdr->count * sizeof(xfs_attr_leaf_entry_t) 1575 + xfs_attr3_leaf_hdr_size(leaf)); 1576 tmp = (ichdr->count - 1) * sizeof(xfs_attr_leaf_entry_t) 1577 + xfs_attr3_leaf_hdr_size(leaf); 1578 1579 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { 1580 if (ichdr->freemap[i].base == tmp) { 1581 ichdr->freemap[i].base += sizeof(xfs_attr_leaf_entry_t); 1582 ichdr->freemap[i].size -= 1583 min_t(uint16_t, ichdr->freemap[i].size, 1584 sizeof(xfs_attr_leaf_entry_t)); 1585 } 1586 } 1587 ichdr->usedbytes += xfs_attr_leaf_entsize(leaf, args->index); 1588 return 0; 1589 } 1590 1591 /* 1592 * Garbage collect a leaf attribute list block by copying it to a new buffer. 1593 */ 1594 STATIC void 1595 xfs_attr3_leaf_compact( 1596 struct xfs_da_args *args, 1597 struct xfs_attr3_icleaf_hdr *ichdr_dst, 1598 struct xfs_buf *bp) 1599 { 1600 struct xfs_attr_leafblock *leaf_src; 1601 struct xfs_attr_leafblock *leaf_dst; 1602 struct xfs_attr3_icleaf_hdr ichdr_src; 1603 struct xfs_trans *trans = args->trans; 1604 char *tmpbuffer; 1605 1606 trace_xfs_attr_leaf_compact(args); 1607 1608 tmpbuffer = kmem_alloc(args->geo->blksize, 0); 1609 memcpy(tmpbuffer, bp->b_addr, args->geo->blksize); 1610 memset(bp->b_addr, 0, args->geo->blksize); 1611 leaf_src = (xfs_attr_leafblock_t *)tmpbuffer; 1612 leaf_dst = bp->b_addr; 1613 1614 /* 1615 * Copy the on-disk header back into the destination buffer to ensure 1616 * all the information in the header that is not part of the incore 1617 * header structure is preserved. 1618 */ 1619 memcpy(bp->b_addr, tmpbuffer, xfs_attr3_leaf_hdr_size(leaf_src)); 1620 1621 /* Initialise the incore headers */ 1622 ichdr_src = *ichdr_dst; /* struct copy */ 1623 ichdr_dst->firstused = args->geo->blksize; 1624 ichdr_dst->usedbytes = 0; 1625 ichdr_dst->count = 0; 1626 ichdr_dst->holes = 0; 1627 ichdr_dst->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_src); 1628 ichdr_dst->freemap[0].size = ichdr_dst->firstused - 1629 ichdr_dst->freemap[0].base; 1630 1631 /* write the header back to initialise the underlying buffer */ 1632 xfs_attr3_leaf_hdr_to_disk(args->geo, leaf_dst, ichdr_dst); 1633 1634 /* 1635 * Copy all entry's in the same (sorted) order, 1636 * but allocate name/value pairs packed and in sequence. 1637 */ 1638 xfs_attr3_leaf_moveents(args, leaf_src, &ichdr_src, 0, 1639 leaf_dst, ichdr_dst, 0, ichdr_src.count); 1640 /* 1641 * this logs the entire buffer, but the caller must write the header 1642 * back to the buffer when it is finished modifying it. 1643 */ 1644 xfs_trans_log_buf(trans, bp, 0, args->geo->blksize - 1); 1645 1646 kmem_free(tmpbuffer); 1647 } 1648 1649 /* 1650 * Compare two leaf blocks "order". 1651 * Return 0 unless leaf2 should go before leaf1. 1652 */ 1653 static int 1654 xfs_attr3_leaf_order( 1655 struct xfs_buf *leaf1_bp, 1656 struct xfs_attr3_icleaf_hdr *leaf1hdr, 1657 struct xfs_buf *leaf2_bp, 1658 struct xfs_attr3_icleaf_hdr *leaf2hdr) 1659 { 1660 struct xfs_attr_leaf_entry *entries1; 1661 struct xfs_attr_leaf_entry *entries2; 1662 1663 entries1 = xfs_attr3_leaf_entryp(leaf1_bp->b_addr); 1664 entries2 = xfs_attr3_leaf_entryp(leaf2_bp->b_addr); 1665 if (leaf1hdr->count > 0 && leaf2hdr->count > 0 && 1666 ((be32_to_cpu(entries2[0].hashval) < 1667 be32_to_cpu(entries1[0].hashval)) || 1668 (be32_to_cpu(entries2[leaf2hdr->count - 1].hashval) < 1669 be32_to_cpu(entries1[leaf1hdr->count - 1].hashval)))) { 1670 return 1; 1671 } 1672 return 0; 1673 } 1674 1675 int 1676 xfs_attr_leaf_order( 1677 struct xfs_buf *leaf1_bp, 1678 struct xfs_buf *leaf2_bp) 1679 { 1680 struct xfs_attr3_icleaf_hdr ichdr1; 1681 struct xfs_attr3_icleaf_hdr ichdr2; 1682 struct xfs_mount *mp = leaf1_bp->b_mount; 1683 1684 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr1, leaf1_bp->b_addr); 1685 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr2, leaf2_bp->b_addr); 1686 return xfs_attr3_leaf_order(leaf1_bp, &ichdr1, leaf2_bp, &ichdr2); 1687 } 1688 1689 /* 1690 * Redistribute the attribute list entries between two leaf nodes, 1691 * taking into account the size of the new entry. 1692 * 1693 * NOTE: if new block is empty, then it will get the upper half of the 1694 * old block. At present, all (one) callers pass in an empty second block. 1695 * 1696 * This code adjusts the args->index/blkno and args->index2/blkno2 fields 1697 * to match what it is doing in splitting the attribute leaf block. Those 1698 * values are used in "atomic rename" operations on attributes. Note that 1699 * the "new" and "old" values can end up in different blocks. 1700 */ 1701 STATIC void 1702 xfs_attr3_leaf_rebalance( 1703 struct xfs_da_state *state, 1704 struct xfs_da_state_blk *blk1, 1705 struct xfs_da_state_blk *blk2) 1706 { 1707 struct xfs_da_args *args; 1708 struct xfs_attr_leafblock *leaf1; 1709 struct xfs_attr_leafblock *leaf2; 1710 struct xfs_attr3_icleaf_hdr ichdr1; 1711 struct xfs_attr3_icleaf_hdr ichdr2; 1712 struct xfs_attr_leaf_entry *entries1; 1713 struct xfs_attr_leaf_entry *entries2; 1714 int count; 1715 int totallen; 1716 int max; 1717 int space; 1718 int swap; 1719 1720 /* 1721 * Set up environment. 1722 */ 1723 ASSERT(blk1->magic == XFS_ATTR_LEAF_MAGIC); 1724 ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC); 1725 leaf1 = blk1->bp->b_addr; 1726 leaf2 = blk2->bp->b_addr; 1727 xfs_attr3_leaf_hdr_from_disk(state->args->geo, &ichdr1, leaf1); 1728 xfs_attr3_leaf_hdr_from_disk(state->args->geo, &ichdr2, leaf2); 1729 ASSERT(ichdr2.count == 0); 1730 args = state->args; 1731 1732 trace_xfs_attr_leaf_rebalance(args); 1733 1734 /* 1735 * Check ordering of blocks, reverse if it makes things simpler. 1736 * 1737 * NOTE: Given that all (current) callers pass in an empty 1738 * second block, this code should never set "swap". 1739 */ 1740 swap = 0; 1741 if (xfs_attr3_leaf_order(blk1->bp, &ichdr1, blk2->bp, &ichdr2)) { 1742 swap(blk1, blk2); 1743 1744 /* swap structures rather than reconverting them */ 1745 swap(ichdr1, ichdr2); 1746 1747 leaf1 = blk1->bp->b_addr; 1748 leaf2 = blk2->bp->b_addr; 1749 swap = 1; 1750 } 1751 1752 /* 1753 * Examine entries until we reduce the absolute difference in 1754 * byte usage between the two blocks to a minimum. Then get 1755 * the direction to copy and the number of elements to move. 1756 * 1757 * "inleaf" is true if the new entry should be inserted into blk1. 1758 * If "swap" is also true, then reverse the sense of "inleaf". 1759 */ 1760 state->inleaf = xfs_attr3_leaf_figure_balance(state, blk1, &ichdr1, 1761 blk2, &ichdr2, 1762 &count, &totallen); 1763 if (swap) 1764 state->inleaf = !state->inleaf; 1765 1766 /* 1767 * Move any entries required from leaf to leaf: 1768 */ 1769 if (count < ichdr1.count) { 1770 /* 1771 * Figure the total bytes to be added to the destination leaf. 1772 */ 1773 /* number entries being moved */ 1774 count = ichdr1.count - count; 1775 space = ichdr1.usedbytes - totallen; 1776 space += count * sizeof(xfs_attr_leaf_entry_t); 1777 1778 /* 1779 * leaf2 is the destination, compact it if it looks tight. 1780 */ 1781 max = ichdr2.firstused - xfs_attr3_leaf_hdr_size(leaf1); 1782 max -= ichdr2.count * sizeof(xfs_attr_leaf_entry_t); 1783 if (space > max) 1784 xfs_attr3_leaf_compact(args, &ichdr2, blk2->bp); 1785 1786 /* 1787 * Move high entries from leaf1 to low end of leaf2. 1788 */ 1789 xfs_attr3_leaf_moveents(args, leaf1, &ichdr1, 1790 ichdr1.count - count, leaf2, &ichdr2, 0, count); 1791 1792 } else if (count > ichdr1.count) { 1793 /* 1794 * I assert that since all callers pass in an empty 1795 * second buffer, this code should never execute. 1796 */ 1797 ASSERT(0); 1798 1799 /* 1800 * Figure the total bytes to be added to the destination leaf. 1801 */ 1802 /* number entries being moved */ 1803 count -= ichdr1.count; 1804 space = totallen - ichdr1.usedbytes; 1805 space += count * sizeof(xfs_attr_leaf_entry_t); 1806 1807 /* 1808 * leaf1 is the destination, compact it if it looks tight. 1809 */ 1810 max = ichdr1.firstused - xfs_attr3_leaf_hdr_size(leaf1); 1811 max -= ichdr1.count * sizeof(xfs_attr_leaf_entry_t); 1812 if (space > max) 1813 xfs_attr3_leaf_compact(args, &ichdr1, blk1->bp); 1814 1815 /* 1816 * Move low entries from leaf2 to high end of leaf1. 1817 */ 1818 xfs_attr3_leaf_moveents(args, leaf2, &ichdr2, 0, leaf1, &ichdr1, 1819 ichdr1.count, count); 1820 } 1821 1822 xfs_attr3_leaf_hdr_to_disk(state->args->geo, leaf1, &ichdr1); 1823 xfs_attr3_leaf_hdr_to_disk(state->args->geo, leaf2, &ichdr2); 1824 xfs_trans_log_buf(args->trans, blk1->bp, 0, args->geo->blksize - 1); 1825 xfs_trans_log_buf(args->trans, blk2->bp, 0, args->geo->blksize - 1); 1826 1827 /* 1828 * Copy out last hashval in each block for B-tree code. 1829 */ 1830 entries1 = xfs_attr3_leaf_entryp(leaf1); 1831 entries2 = xfs_attr3_leaf_entryp(leaf2); 1832 blk1->hashval = be32_to_cpu(entries1[ichdr1.count - 1].hashval); 1833 blk2->hashval = be32_to_cpu(entries2[ichdr2.count - 1].hashval); 1834 1835 /* 1836 * Adjust the expected index for insertion. 1837 * NOTE: this code depends on the (current) situation that the 1838 * second block was originally empty. 1839 * 1840 * If the insertion point moved to the 2nd block, we must adjust 1841 * the index. We must also track the entry just following the 1842 * new entry for use in an "atomic rename" operation, that entry 1843 * is always the "old" entry and the "new" entry is what we are 1844 * inserting. The index/blkno fields refer to the "old" entry, 1845 * while the index2/blkno2 fields refer to the "new" entry. 1846 */ 1847 if (blk1->index > ichdr1.count) { 1848 ASSERT(state->inleaf == 0); 1849 blk2->index = blk1->index - ichdr1.count; 1850 args->index = args->index2 = blk2->index; 1851 args->blkno = args->blkno2 = blk2->blkno; 1852 } else if (blk1->index == ichdr1.count) { 1853 if (state->inleaf) { 1854 args->index = blk1->index; 1855 args->blkno = blk1->blkno; 1856 args->index2 = 0; 1857 args->blkno2 = blk2->blkno; 1858 } else { 1859 /* 1860 * On a double leaf split, the original attr location 1861 * is already stored in blkno2/index2, so don't 1862 * overwrite it overwise we corrupt the tree. 1863 */ 1864 blk2->index = blk1->index - ichdr1.count; 1865 args->index = blk2->index; 1866 args->blkno = blk2->blkno; 1867 if (!state->extravalid) { 1868 /* 1869 * set the new attr location to match the old 1870 * one and let the higher level split code 1871 * decide where in the leaf to place it. 1872 */ 1873 args->index2 = blk2->index; 1874 args->blkno2 = blk2->blkno; 1875 } 1876 } 1877 } else { 1878 ASSERT(state->inleaf == 1); 1879 args->index = args->index2 = blk1->index; 1880 args->blkno = args->blkno2 = blk1->blkno; 1881 } 1882 } 1883 1884 /* 1885 * Examine entries until we reduce the absolute difference in 1886 * byte usage between the two blocks to a minimum. 1887 * GROT: Is this really necessary? With other than a 512 byte blocksize, 1888 * GROT: there will always be enough room in either block for a new entry. 1889 * GROT: Do a double-split for this case? 1890 */ 1891 STATIC int 1892 xfs_attr3_leaf_figure_balance( 1893 struct xfs_da_state *state, 1894 struct xfs_da_state_blk *blk1, 1895 struct xfs_attr3_icleaf_hdr *ichdr1, 1896 struct xfs_da_state_blk *blk2, 1897 struct xfs_attr3_icleaf_hdr *ichdr2, 1898 int *countarg, 1899 int *usedbytesarg) 1900 { 1901 struct xfs_attr_leafblock *leaf1 = blk1->bp->b_addr; 1902 struct xfs_attr_leafblock *leaf2 = blk2->bp->b_addr; 1903 struct xfs_attr_leaf_entry *entry; 1904 int count; 1905 int max; 1906 int index; 1907 int totallen = 0; 1908 int half; 1909 int lastdelta; 1910 int foundit = 0; 1911 int tmp; 1912 1913 /* 1914 * Examine entries until we reduce the absolute difference in 1915 * byte usage between the two blocks to a minimum. 1916 */ 1917 max = ichdr1->count + ichdr2->count; 1918 half = (max + 1) * sizeof(*entry); 1919 half += ichdr1->usedbytes + ichdr2->usedbytes + 1920 xfs_attr_leaf_newentsize(state->args, NULL); 1921 half /= 2; 1922 lastdelta = state->args->geo->blksize; 1923 entry = xfs_attr3_leaf_entryp(leaf1); 1924 for (count = index = 0; count < max; entry++, index++, count++) { 1925 1926 #define XFS_ATTR_ABS(A) (((A) < 0) ? -(A) : (A)) 1927 /* 1928 * The new entry is in the first block, account for it. 1929 */ 1930 if (count == blk1->index) { 1931 tmp = totallen + sizeof(*entry) + 1932 xfs_attr_leaf_newentsize(state->args, NULL); 1933 if (XFS_ATTR_ABS(half - tmp) > lastdelta) 1934 break; 1935 lastdelta = XFS_ATTR_ABS(half - tmp); 1936 totallen = tmp; 1937 foundit = 1; 1938 } 1939 1940 /* 1941 * Wrap around into the second block if necessary. 1942 */ 1943 if (count == ichdr1->count) { 1944 leaf1 = leaf2; 1945 entry = xfs_attr3_leaf_entryp(leaf1); 1946 index = 0; 1947 } 1948 1949 /* 1950 * Figure out if next leaf entry would be too much. 1951 */ 1952 tmp = totallen + sizeof(*entry) + xfs_attr_leaf_entsize(leaf1, 1953 index); 1954 if (XFS_ATTR_ABS(half - tmp) > lastdelta) 1955 break; 1956 lastdelta = XFS_ATTR_ABS(half - tmp); 1957 totallen = tmp; 1958 #undef XFS_ATTR_ABS 1959 } 1960 1961 /* 1962 * Calculate the number of usedbytes that will end up in lower block. 1963 * If new entry not in lower block, fix up the count. 1964 */ 1965 totallen -= count * sizeof(*entry); 1966 if (foundit) { 1967 totallen -= sizeof(*entry) + 1968 xfs_attr_leaf_newentsize(state->args, NULL); 1969 } 1970 1971 *countarg = count; 1972 *usedbytesarg = totallen; 1973 return foundit; 1974 } 1975 1976 /*======================================================================== 1977 * Routines used for shrinking the Btree. 1978 *========================================================================*/ 1979 1980 /* 1981 * Check a leaf block and its neighbors to see if the block should be 1982 * collapsed into one or the other neighbor. Always keep the block 1983 * with the smaller block number. 1984 * If the current block is over 50% full, don't try to join it, return 0. 1985 * If the block is empty, fill in the state structure and return 2. 1986 * If it can be collapsed, fill in the state structure and return 1. 1987 * If nothing can be done, return 0. 1988 * 1989 * GROT: allow for INCOMPLETE entries in calculation. 1990 */ 1991 int 1992 xfs_attr3_leaf_toosmall( 1993 struct xfs_da_state *state, 1994 int *action) 1995 { 1996 struct xfs_attr_leafblock *leaf; 1997 struct xfs_da_state_blk *blk; 1998 struct xfs_attr3_icleaf_hdr ichdr; 1999 struct xfs_buf *bp; 2000 xfs_dablk_t blkno; 2001 int bytes; 2002 int forward; 2003 int error; 2004 int retval; 2005 int i; 2006 2007 trace_xfs_attr_leaf_toosmall(state->args); 2008 2009 /* 2010 * Check for the degenerate case of the block being over 50% full. 2011 * If so, it's not worth even looking to see if we might be able 2012 * to coalesce with a sibling. 2013 */ 2014 blk = &state->path.blk[ state->path.active-1 ]; 2015 leaf = blk->bp->b_addr; 2016 xfs_attr3_leaf_hdr_from_disk(state->args->geo, &ichdr, leaf); 2017 bytes = xfs_attr3_leaf_hdr_size(leaf) + 2018 ichdr.count * sizeof(xfs_attr_leaf_entry_t) + 2019 ichdr.usedbytes; 2020 if (bytes > (state->args->geo->blksize >> 1)) { 2021 *action = 0; /* blk over 50%, don't try to join */ 2022 return 0; 2023 } 2024 2025 /* 2026 * Check for the degenerate case of the block being empty. 2027 * If the block is empty, we'll simply delete it, no need to 2028 * coalesce it with a sibling block. We choose (arbitrarily) 2029 * to merge with the forward block unless it is NULL. 2030 */ 2031 if (ichdr.count == 0) { 2032 /* 2033 * Make altpath point to the block we want to keep and 2034 * path point to the block we want to drop (this one). 2035 */ 2036 forward = (ichdr.forw != 0); 2037 memcpy(&state->altpath, &state->path, sizeof(state->path)); 2038 error = xfs_da3_path_shift(state, &state->altpath, forward, 2039 0, &retval); 2040 if (error) 2041 return error; 2042 if (retval) { 2043 *action = 0; 2044 } else { 2045 *action = 2; 2046 } 2047 return 0; 2048 } 2049 2050 /* 2051 * Examine each sibling block to see if we can coalesce with 2052 * at least 25% free space to spare. We need to figure out 2053 * whether to merge with the forward or the backward block. 2054 * We prefer coalescing with the lower numbered sibling so as 2055 * to shrink an attribute list over time. 2056 */ 2057 /* start with smaller blk num */ 2058 forward = ichdr.forw < ichdr.back; 2059 for (i = 0; i < 2; forward = !forward, i++) { 2060 struct xfs_attr3_icleaf_hdr ichdr2; 2061 if (forward) 2062 blkno = ichdr.forw; 2063 else 2064 blkno = ichdr.back; 2065 if (blkno == 0) 2066 continue; 2067 error = xfs_attr3_leaf_read(state->args->trans, state->args->dp, 2068 blkno, &bp); 2069 if (error) 2070 return error; 2071 2072 xfs_attr3_leaf_hdr_from_disk(state->args->geo, &ichdr2, bp->b_addr); 2073 2074 bytes = state->args->geo->blksize - 2075 (state->args->geo->blksize >> 2) - 2076 ichdr.usedbytes - ichdr2.usedbytes - 2077 ((ichdr.count + ichdr2.count) * 2078 sizeof(xfs_attr_leaf_entry_t)) - 2079 xfs_attr3_leaf_hdr_size(leaf); 2080 2081 xfs_trans_brelse(state->args->trans, bp); 2082 if (bytes >= 0) 2083 break; /* fits with at least 25% to spare */ 2084 } 2085 if (i >= 2) { 2086 *action = 0; 2087 return 0; 2088 } 2089 2090 /* 2091 * Make altpath point to the block we want to keep (the lower 2092 * numbered block) and path point to the block we want to drop. 2093 */ 2094 memcpy(&state->altpath, &state->path, sizeof(state->path)); 2095 if (blkno < blk->blkno) { 2096 error = xfs_da3_path_shift(state, &state->altpath, forward, 2097 0, &retval); 2098 } else { 2099 error = xfs_da3_path_shift(state, &state->path, forward, 2100 0, &retval); 2101 } 2102 if (error) 2103 return error; 2104 if (retval) { 2105 *action = 0; 2106 } else { 2107 *action = 1; 2108 } 2109 return 0; 2110 } 2111 2112 /* 2113 * Remove a name from the leaf attribute list structure. 2114 * 2115 * Return 1 if leaf is less than 37% full, 0 if >= 37% full. 2116 * If two leaves are 37% full, when combined they will leave 25% free. 2117 */ 2118 int 2119 xfs_attr3_leaf_remove( 2120 struct xfs_buf *bp, 2121 struct xfs_da_args *args) 2122 { 2123 struct xfs_attr_leafblock *leaf; 2124 struct xfs_attr3_icleaf_hdr ichdr; 2125 struct xfs_attr_leaf_entry *entry; 2126 int before; 2127 int after; 2128 int smallest; 2129 int entsize; 2130 int tablesize; 2131 int tmp; 2132 int i; 2133 2134 trace_xfs_attr_leaf_remove(args); 2135 2136 leaf = bp->b_addr; 2137 xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf); 2138 2139 ASSERT(ichdr.count > 0 && ichdr.count < args->geo->blksize / 8); 2140 ASSERT(args->index >= 0 && args->index < ichdr.count); 2141 ASSERT(ichdr.firstused >= ichdr.count * sizeof(*entry) + 2142 xfs_attr3_leaf_hdr_size(leaf)); 2143 2144 entry = &xfs_attr3_leaf_entryp(leaf)[args->index]; 2145 2146 ASSERT(be16_to_cpu(entry->nameidx) >= ichdr.firstused); 2147 ASSERT(be16_to_cpu(entry->nameidx) < args->geo->blksize); 2148 2149 /* 2150 * Scan through free region table: 2151 * check for adjacency of free'd entry with an existing one, 2152 * find smallest free region in case we need to replace it, 2153 * adjust any map that borders the entry table, 2154 */ 2155 tablesize = ichdr.count * sizeof(xfs_attr_leaf_entry_t) 2156 + xfs_attr3_leaf_hdr_size(leaf); 2157 tmp = ichdr.freemap[0].size; 2158 before = after = -1; 2159 smallest = XFS_ATTR_LEAF_MAPSIZE - 1; 2160 entsize = xfs_attr_leaf_entsize(leaf, args->index); 2161 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { 2162 ASSERT(ichdr.freemap[i].base < args->geo->blksize); 2163 ASSERT(ichdr.freemap[i].size < args->geo->blksize); 2164 if (ichdr.freemap[i].base == tablesize) { 2165 ichdr.freemap[i].base -= sizeof(xfs_attr_leaf_entry_t); 2166 ichdr.freemap[i].size += sizeof(xfs_attr_leaf_entry_t); 2167 } 2168 2169 if (ichdr.freemap[i].base + ichdr.freemap[i].size == 2170 be16_to_cpu(entry->nameidx)) { 2171 before = i; 2172 } else if (ichdr.freemap[i].base == 2173 (be16_to_cpu(entry->nameidx) + entsize)) { 2174 after = i; 2175 } else if (ichdr.freemap[i].size < tmp) { 2176 tmp = ichdr.freemap[i].size; 2177 smallest = i; 2178 } 2179 } 2180 2181 /* 2182 * Coalesce adjacent freemap regions, 2183 * or replace the smallest region. 2184 */ 2185 if ((before >= 0) || (after >= 0)) { 2186 if ((before >= 0) && (after >= 0)) { 2187 ichdr.freemap[before].size += entsize; 2188 ichdr.freemap[before].size += ichdr.freemap[after].size; 2189 ichdr.freemap[after].base = 0; 2190 ichdr.freemap[after].size = 0; 2191 } else if (before >= 0) { 2192 ichdr.freemap[before].size += entsize; 2193 } else { 2194 ichdr.freemap[after].base = be16_to_cpu(entry->nameidx); 2195 ichdr.freemap[after].size += entsize; 2196 } 2197 } else { 2198 /* 2199 * Replace smallest region (if it is smaller than free'd entry) 2200 */ 2201 if (ichdr.freemap[smallest].size < entsize) { 2202 ichdr.freemap[smallest].base = be16_to_cpu(entry->nameidx); 2203 ichdr.freemap[smallest].size = entsize; 2204 } 2205 } 2206 2207 /* 2208 * Did we remove the first entry? 2209 */ 2210 if (be16_to_cpu(entry->nameidx) == ichdr.firstused) 2211 smallest = 1; 2212 else 2213 smallest = 0; 2214 2215 /* 2216 * Compress the remaining entries and zero out the removed stuff. 2217 */ 2218 memset(xfs_attr3_leaf_name(leaf, args->index), 0, entsize); 2219 ichdr.usedbytes -= entsize; 2220 xfs_trans_log_buf(args->trans, bp, 2221 XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index), 2222 entsize)); 2223 2224 tmp = (ichdr.count - args->index) * sizeof(xfs_attr_leaf_entry_t); 2225 memmove(entry, entry + 1, tmp); 2226 ichdr.count--; 2227 xfs_trans_log_buf(args->trans, bp, 2228 XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(xfs_attr_leaf_entry_t))); 2229 2230 entry = &xfs_attr3_leaf_entryp(leaf)[ichdr.count]; 2231 memset(entry, 0, sizeof(xfs_attr_leaf_entry_t)); 2232 2233 /* 2234 * If we removed the first entry, re-find the first used byte 2235 * in the name area. Note that if the entry was the "firstused", 2236 * then we don't have a "hole" in our block resulting from 2237 * removing the name. 2238 */ 2239 if (smallest) { 2240 tmp = args->geo->blksize; 2241 entry = xfs_attr3_leaf_entryp(leaf); 2242 for (i = ichdr.count - 1; i >= 0; entry++, i--) { 2243 ASSERT(be16_to_cpu(entry->nameidx) >= ichdr.firstused); 2244 ASSERT(be16_to_cpu(entry->nameidx) < args->geo->blksize); 2245 2246 if (be16_to_cpu(entry->nameidx) < tmp) 2247 tmp = be16_to_cpu(entry->nameidx); 2248 } 2249 ichdr.firstused = tmp; 2250 ASSERT(ichdr.firstused != 0); 2251 } else { 2252 ichdr.holes = 1; /* mark as needing compaction */ 2253 } 2254 xfs_attr3_leaf_hdr_to_disk(args->geo, leaf, &ichdr); 2255 xfs_trans_log_buf(args->trans, bp, 2256 XFS_DA_LOGRANGE(leaf, &leaf->hdr, 2257 xfs_attr3_leaf_hdr_size(leaf))); 2258 2259 /* 2260 * Check if leaf is less than 50% full, caller may want to 2261 * "join" the leaf with a sibling if so. 2262 */ 2263 tmp = ichdr.usedbytes + xfs_attr3_leaf_hdr_size(leaf) + 2264 ichdr.count * sizeof(xfs_attr_leaf_entry_t); 2265 2266 return tmp < args->geo->magicpct; /* leaf is < 37% full */ 2267 } 2268 2269 /* 2270 * Move all the attribute list entries from drop_leaf into save_leaf. 2271 */ 2272 void 2273 xfs_attr3_leaf_unbalance( 2274 struct xfs_da_state *state, 2275 struct xfs_da_state_blk *drop_blk, 2276 struct xfs_da_state_blk *save_blk) 2277 { 2278 struct xfs_attr_leafblock *drop_leaf = drop_blk->bp->b_addr; 2279 struct xfs_attr_leafblock *save_leaf = save_blk->bp->b_addr; 2280 struct xfs_attr3_icleaf_hdr drophdr; 2281 struct xfs_attr3_icleaf_hdr savehdr; 2282 struct xfs_attr_leaf_entry *entry; 2283 2284 trace_xfs_attr_leaf_unbalance(state->args); 2285 2286 drop_leaf = drop_blk->bp->b_addr; 2287 save_leaf = save_blk->bp->b_addr; 2288 xfs_attr3_leaf_hdr_from_disk(state->args->geo, &drophdr, drop_leaf); 2289 xfs_attr3_leaf_hdr_from_disk(state->args->geo, &savehdr, save_leaf); 2290 entry = xfs_attr3_leaf_entryp(drop_leaf); 2291 2292 /* 2293 * Save last hashval from dying block for later Btree fixup. 2294 */ 2295 drop_blk->hashval = be32_to_cpu(entry[drophdr.count - 1].hashval); 2296 2297 /* 2298 * Check if we need a temp buffer, or can we do it in place. 2299 * Note that we don't check "leaf" for holes because we will 2300 * always be dropping it, toosmall() decided that for us already. 2301 */ 2302 if (savehdr.holes == 0) { 2303 /* 2304 * dest leaf has no holes, so we add there. May need 2305 * to make some room in the entry array. 2306 */ 2307 if (xfs_attr3_leaf_order(save_blk->bp, &savehdr, 2308 drop_blk->bp, &drophdr)) { 2309 xfs_attr3_leaf_moveents(state->args, 2310 drop_leaf, &drophdr, 0, 2311 save_leaf, &savehdr, 0, 2312 drophdr.count); 2313 } else { 2314 xfs_attr3_leaf_moveents(state->args, 2315 drop_leaf, &drophdr, 0, 2316 save_leaf, &savehdr, 2317 savehdr.count, drophdr.count); 2318 } 2319 } else { 2320 /* 2321 * Destination has holes, so we make a temporary copy 2322 * of the leaf and add them both to that. 2323 */ 2324 struct xfs_attr_leafblock *tmp_leaf; 2325 struct xfs_attr3_icleaf_hdr tmphdr; 2326 2327 tmp_leaf = kmem_zalloc(state->args->geo->blksize, 0); 2328 2329 /* 2330 * Copy the header into the temp leaf so that all the stuff 2331 * not in the incore header is present and gets copied back in 2332 * once we've moved all the entries. 2333 */ 2334 memcpy(tmp_leaf, save_leaf, xfs_attr3_leaf_hdr_size(save_leaf)); 2335 2336 memset(&tmphdr, 0, sizeof(tmphdr)); 2337 tmphdr.magic = savehdr.magic; 2338 tmphdr.forw = savehdr.forw; 2339 tmphdr.back = savehdr.back; 2340 tmphdr.firstused = state->args->geo->blksize; 2341 2342 /* write the header to the temp buffer to initialise it */ 2343 xfs_attr3_leaf_hdr_to_disk(state->args->geo, tmp_leaf, &tmphdr); 2344 2345 if (xfs_attr3_leaf_order(save_blk->bp, &savehdr, 2346 drop_blk->bp, &drophdr)) { 2347 xfs_attr3_leaf_moveents(state->args, 2348 drop_leaf, &drophdr, 0, 2349 tmp_leaf, &tmphdr, 0, 2350 drophdr.count); 2351 xfs_attr3_leaf_moveents(state->args, 2352 save_leaf, &savehdr, 0, 2353 tmp_leaf, &tmphdr, tmphdr.count, 2354 savehdr.count); 2355 } else { 2356 xfs_attr3_leaf_moveents(state->args, 2357 save_leaf, &savehdr, 0, 2358 tmp_leaf, &tmphdr, 0, 2359 savehdr.count); 2360 xfs_attr3_leaf_moveents(state->args, 2361 drop_leaf, &drophdr, 0, 2362 tmp_leaf, &tmphdr, tmphdr.count, 2363 drophdr.count); 2364 } 2365 memcpy(save_leaf, tmp_leaf, state->args->geo->blksize); 2366 savehdr = tmphdr; /* struct copy */ 2367 kmem_free(tmp_leaf); 2368 } 2369 2370 xfs_attr3_leaf_hdr_to_disk(state->args->geo, save_leaf, &savehdr); 2371 xfs_trans_log_buf(state->args->trans, save_blk->bp, 0, 2372 state->args->geo->blksize - 1); 2373 2374 /* 2375 * Copy out last hashval in each block for B-tree code. 2376 */ 2377 entry = xfs_attr3_leaf_entryp(save_leaf); 2378 save_blk->hashval = be32_to_cpu(entry[savehdr.count - 1].hashval); 2379 } 2380 2381 /*======================================================================== 2382 * Routines used for finding things in the Btree. 2383 *========================================================================*/ 2384 2385 /* 2386 * Look up a name in a leaf attribute list structure. 2387 * This is the internal routine, it uses the caller's buffer. 2388 * 2389 * Note that duplicate keys are allowed, but only check within the 2390 * current leaf node. The Btree code must check in adjacent leaf nodes. 2391 * 2392 * Return in args->index the index into the entry[] array of either 2393 * the found entry, or where the entry should have been (insert before 2394 * that entry). 2395 * 2396 * Don't change the args->value unless we find the attribute. 2397 */ 2398 int 2399 xfs_attr3_leaf_lookup_int( 2400 struct xfs_buf *bp, 2401 struct xfs_da_args *args) 2402 { 2403 struct xfs_attr_leafblock *leaf; 2404 struct xfs_attr3_icleaf_hdr ichdr; 2405 struct xfs_attr_leaf_entry *entry; 2406 struct xfs_attr_leaf_entry *entries; 2407 struct xfs_attr_leaf_name_local *name_loc; 2408 struct xfs_attr_leaf_name_remote *name_rmt; 2409 xfs_dahash_t hashval; 2410 int probe; 2411 int span; 2412 2413 trace_xfs_attr_leaf_lookup(args); 2414 2415 leaf = bp->b_addr; 2416 xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf); 2417 entries = xfs_attr3_leaf_entryp(leaf); 2418 if (ichdr.count >= args->geo->blksize / 8) { 2419 xfs_buf_mark_corrupt(bp); 2420 return -EFSCORRUPTED; 2421 } 2422 2423 /* 2424 * Binary search. (note: small blocks will skip this loop) 2425 */ 2426 hashval = args->hashval; 2427 probe = span = ichdr.count / 2; 2428 for (entry = &entries[probe]; span > 4; entry = &entries[probe]) { 2429 span /= 2; 2430 if (be32_to_cpu(entry->hashval) < hashval) 2431 probe += span; 2432 else if (be32_to_cpu(entry->hashval) > hashval) 2433 probe -= span; 2434 else 2435 break; 2436 } 2437 if (!(probe >= 0 && (!ichdr.count || probe < ichdr.count))) { 2438 xfs_buf_mark_corrupt(bp); 2439 return -EFSCORRUPTED; 2440 } 2441 if (!(span <= 4 || be32_to_cpu(entry->hashval) == hashval)) { 2442 xfs_buf_mark_corrupt(bp); 2443 return -EFSCORRUPTED; 2444 } 2445 2446 /* 2447 * Since we may have duplicate hashval's, find the first matching 2448 * hashval in the leaf. 2449 */ 2450 while (probe > 0 && be32_to_cpu(entry->hashval) >= hashval) { 2451 entry--; 2452 probe--; 2453 } 2454 while (probe < ichdr.count && 2455 be32_to_cpu(entry->hashval) < hashval) { 2456 entry++; 2457 probe++; 2458 } 2459 if (probe == ichdr.count || be32_to_cpu(entry->hashval) != hashval) { 2460 args->index = probe; 2461 return -ENOATTR; 2462 } 2463 2464 /* 2465 * Duplicate keys may be present, so search all of them for a match. 2466 */ 2467 for (; probe < ichdr.count && (be32_to_cpu(entry->hashval) == hashval); 2468 entry++, probe++) { 2469 /* 2470 * GROT: Add code to remove incomplete entries. 2471 */ 2472 if (entry->flags & XFS_ATTR_LOCAL) { 2473 name_loc = xfs_attr3_leaf_name_local(leaf, probe); 2474 if (!xfs_attr_match(args, name_loc->namelen, 2475 name_loc->nameval, entry->flags)) 2476 continue; 2477 args->index = probe; 2478 return -EEXIST; 2479 } else { 2480 name_rmt = xfs_attr3_leaf_name_remote(leaf, probe); 2481 if (!xfs_attr_match(args, name_rmt->namelen, 2482 name_rmt->name, entry->flags)) 2483 continue; 2484 args->index = probe; 2485 args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen); 2486 args->rmtblkno = be32_to_cpu(name_rmt->valueblk); 2487 args->rmtblkcnt = xfs_attr3_rmt_blocks( 2488 args->dp->i_mount, 2489 args->rmtvaluelen); 2490 return -EEXIST; 2491 } 2492 } 2493 args->index = probe; 2494 return -ENOATTR; 2495 } 2496 2497 /* 2498 * Get the value associated with an attribute name from a leaf attribute 2499 * list structure. 2500 * 2501 * If args->valuelen is zero, only the length needs to be returned. Unlike a 2502 * lookup, we only return an error if the attribute does not exist or we can't 2503 * retrieve the value. 2504 */ 2505 int 2506 xfs_attr3_leaf_getvalue( 2507 struct xfs_buf *bp, 2508 struct xfs_da_args *args) 2509 { 2510 struct xfs_attr_leafblock *leaf; 2511 struct xfs_attr3_icleaf_hdr ichdr; 2512 struct xfs_attr_leaf_entry *entry; 2513 struct xfs_attr_leaf_name_local *name_loc; 2514 struct xfs_attr_leaf_name_remote *name_rmt; 2515 2516 leaf = bp->b_addr; 2517 xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf); 2518 ASSERT(ichdr.count < args->geo->blksize / 8); 2519 ASSERT(args->index < ichdr.count); 2520 2521 entry = &xfs_attr3_leaf_entryp(leaf)[args->index]; 2522 if (entry->flags & XFS_ATTR_LOCAL) { 2523 name_loc = xfs_attr3_leaf_name_local(leaf, args->index); 2524 ASSERT(name_loc->namelen == args->namelen); 2525 ASSERT(memcmp(args->name, name_loc->nameval, args->namelen) == 0); 2526 return xfs_attr_copy_value(args, 2527 &name_loc->nameval[args->namelen], 2528 be16_to_cpu(name_loc->valuelen)); 2529 } 2530 2531 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index); 2532 ASSERT(name_rmt->namelen == args->namelen); 2533 ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0); 2534 args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen); 2535 args->rmtblkno = be32_to_cpu(name_rmt->valueblk); 2536 args->rmtblkcnt = xfs_attr3_rmt_blocks(args->dp->i_mount, 2537 args->rmtvaluelen); 2538 return xfs_attr_copy_value(args, NULL, args->rmtvaluelen); 2539 } 2540 2541 /*======================================================================== 2542 * Utility routines. 2543 *========================================================================*/ 2544 2545 /* 2546 * Move the indicated entries from one leaf to another. 2547 * NOTE: this routine modifies both source and destination leaves. 2548 */ 2549 /*ARGSUSED*/ 2550 STATIC void 2551 xfs_attr3_leaf_moveents( 2552 struct xfs_da_args *args, 2553 struct xfs_attr_leafblock *leaf_s, 2554 struct xfs_attr3_icleaf_hdr *ichdr_s, 2555 int start_s, 2556 struct xfs_attr_leafblock *leaf_d, 2557 struct xfs_attr3_icleaf_hdr *ichdr_d, 2558 int start_d, 2559 int count) 2560 { 2561 struct xfs_attr_leaf_entry *entry_s; 2562 struct xfs_attr_leaf_entry *entry_d; 2563 int desti; 2564 int tmp; 2565 int i; 2566 2567 /* 2568 * Check for nothing to do. 2569 */ 2570 if (count == 0) 2571 return; 2572 2573 /* 2574 * Set up environment. 2575 */ 2576 ASSERT(ichdr_s->magic == XFS_ATTR_LEAF_MAGIC || 2577 ichdr_s->magic == XFS_ATTR3_LEAF_MAGIC); 2578 ASSERT(ichdr_s->magic == ichdr_d->magic); 2579 ASSERT(ichdr_s->count > 0 && ichdr_s->count < args->geo->blksize / 8); 2580 ASSERT(ichdr_s->firstused >= (ichdr_s->count * sizeof(*entry_s)) 2581 + xfs_attr3_leaf_hdr_size(leaf_s)); 2582 ASSERT(ichdr_d->count < args->geo->blksize / 8); 2583 ASSERT(ichdr_d->firstused >= (ichdr_d->count * sizeof(*entry_d)) 2584 + xfs_attr3_leaf_hdr_size(leaf_d)); 2585 2586 ASSERT(start_s < ichdr_s->count); 2587 ASSERT(start_d <= ichdr_d->count); 2588 ASSERT(count <= ichdr_s->count); 2589 2590 2591 /* 2592 * Move the entries in the destination leaf up to make a hole? 2593 */ 2594 if (start_d < ichdr_d->count) { 2595 tmp = ichdr_d->count - start_d; 2596 tmp *= sizeof(xfs_attr_leaf_entry_t); 2597 entry_s = &xfs_attr3_leaf_entryp(leaf_d)[start_d]; 2598 entry_d = &xfs_attr3_leaf_entryp(leaf_d)[start_d + count]; 2599 memmove(entry_d, entry_s, tmp); 2600 } 2601 2602 /* 2603 * Copy all entry's in the same (sorted) order, 2604 * but allocate attribute info packed and in sequence. 2605 */ 2606 entry_s = &xfs_attr3_leaf_entryp(leaf_s)[start_s]; 2607 entry_d = &xfs_attr3_leaf_entryp(leaf_d)[start_d]; 2608 desti = start_d; 2609 for (i = 0; i < count; entry_s++, entry_d++, desti++, i++) { 2610 ASSERT(be16_to_cpu(entry_s->nameidx) >= ichdr_s->firstused); 2611 tmp = xfs_attr_leaf_entsize(leaf_s, start_s + i); 2612 #ifdef GROT 2613 /* 2614 * Code to drop INCOMPLETE entries. Difficult to use as we 2615 * may also need to change the insertion index. Code turned 2616 * off for 6.2, should be revisited later. 2617 */ 2618 if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */ 2619 memset(xfs_attr3_leaf_name(leaf_s, start_s + i), 0, tmp); 2620 ichdr_s->usedbytes -= tmp; 2621 ichdr_s->count -= 1; 2622 entry_d--; /* to compensate for ++ in loop hdr */ 2623 desti--; 2624 if ((start_s + i) < offset) 2625 result++; /* insertion index adjustment */ 2626 } else { 2627 #endif /* GROT */ 2628 ichdr_d->firstused -= tmp; 2629 /* both on-disk, don't endian flip twice */ 2630 entry_d->hashval = entry_s->hashval; 2631 entry_d->nameidx = cpu_to_be16(ichdr_d->firstused); 2632 entry_d->flags = entry_s->flags; 2633 ASSERT(be16_to_cpu(entry_d->nameidx) + tmp 2634 <= args->geo->blksize); 2635 memmove(xfs_attr3_leaf_name(leaf_d, desti), 2636 xfs_attr3_leaf_name(leaf_s, start_s + i), tmp); 2637 ASSERT(be16_to_cpu(entry_s->nameidx) + tmp 2638 <= args->geo->blksize); 2639 memset(xfs_attr3_leaf_name(leaf_s, start_s + i), 0, tmp); 2640 ichdr_s->usedbytes -= tmp; 2641 ichdr_d->usedbytes += tmp; 2642 ichdr_s->count -= 1; 2643 ichdr_d->count += 1; 2644 tmp = ichdr_d->count * sizeof(xfs_attr_leaf_entry_t) 2645 + xfs_attr3_leaf_hdr_size(leaf_d); 2646 ASSERT(ichdr_d->firstused >= tmp); 2647 #ifdef GROT 2648 } 2649 #endif /* GROT */ 2650 } 2651 2652 /* 2653 * Zero out the entries we just copied. 2654 */ 2655 if (start_s == ichdr_s->count) { 2656 tmp = count * sizeof(xfs_attr_leaf_entry_t); 2657 entry_s = &xfs_attr3_leaf_entryp(leaf_s)[start_s]; 2658 ASSERT(((char *)entry_s + tmp) <= 2659 ((char *)leaf_s + args->geo->blksize)); 2660 memset(entry_s, 0, tmp); 2661 } else { 2662 /* 2663 * Move the remaining entries down to fill the hole, 2664 * then zero the entries at the top. 2665 */ 2666 tmp = (ichdr_s->count - count) * sizeof(xfs_attr_leaf_entry_t); 2667 entry_s = &xfs_attr3_leaf_entryp(leaf_s)[start_s + count]; 2668 entry_d = &xfs_attr3_leaf_entryp(leaf_s)[start_s]; 2669 memmove(entry_d, entry_s, tmp); 2670 2671 tmp = count * sizeof(xfs_attr_leaf_entry_t); 2672 entry_s = &xfs_attr3_leaf_entryp(leaf_s)[ichdr_s->count]; 2673 ASSERT(((char *)entry_s + tmp) <= 2674 ((char *)leaf_s + args->geo->blksize)); 2675 memset(entry_s, 0, tmp); 2676 } 2677 2678 /* 2679 * Fill in the freemap information 2680 */ 2681 ichdr_d->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_d); 2682 ichdr_d->freemap[0].base += ichdr_d->count * sizeof(xfs_attr_leaf_entry_t); 2683 ichdr_d->freemap[0].size = ichdr_d->firstused - ichdr_d->freemap[0].base; 2684 ichdr_d->freemap[1].base = 0; 2685 ichdr_d->freemap[2].base = 0; 2686 ichdr_d->freemap[1].size = 0; 2687 ichdr_d->freemap[2].size = 0; 2688 ichdr_s->holes = 1; /* leaf may not be compact */ 2689 } 2690 2691 /* 2692 * Pick up the last hashvalue from a leaf block. 2693 */ 2694 xfs_dahash_t 2695 xfs_attr_leaf_lasthash( 2696 struct xfs_buf *bp, 2697 int *count) 2698 { 2699 struct xfs_attr3_icleaf_hdr ichdr; 2700 struct xfs_attr_leaf_entry *entries; 2701 struct xfs_mount *mp = bp->b_mount; 2702 2703 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, bp->b_addr); 2704 entries = xfs_attr3_leaf_entryp(bp->b_addr); 2705 if (count) 2706 *count = ichdr.count; 2707 if (!ichdr.count) 2708 return 0; 2709 return be32_to_cpu(entries[ichdr.count - 1].hashval); 2710 } 2711 2712 /* 2713 * Calculate the number of bytes used to store the indicated attribute 2714 * (whether local or remote only calculate bytes in this block). 2715 */ 2716 STATIC int 2717 xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index) 2718 { 2719 struct xfs_attr_leaf_entry *entries; 2720 xfs_attr_leaf_name_local_t *name_loc; 2721 xfs_attr_leaf_name_remote_t *name_rmt; 2722 int size; 2723 2724 entries = xfs_attr3_leaf_entryp(leaf); 2725 if (entries[index].flags & XFS_ATTR_LOCAL) { 2726 name_loc = xfs_attr3_leaf_name_local(leaf, index); 2727 size = xfs_attr_leaf_entsize_local(name_loc->namelen, 2728 be16_to_cpu(name_loc->valuelen)); 2729 } else { 2730 name_rmt = xfs_attr3_leaf_name_remote(leaf, index); 2731 size = xfs_attr_leaf_entsize_remote(name_rmt->namelen); 2732 } 2733 return size; 2734 } 2735 2736 /* 2737 * Calculate the number of bytes that would be required to store the new 2738 * attribute (whether local or remote only calculate bytes in this block). 2739 * This routine decides as a side effect whether the attribute will be 2740 * a "local" or a "remote" attribute. 2741 */ 2742 int 2743 xfs_attr_leaf_newentsize( 2744 struct xfs_da_args *args, 2745 int *local) 2746 { 2747 int size; 2748 2749 size = xfs_attr_leaf_entsize_local(args->namelen, args->valuelen); 2750 if (size < xfs_attr_leaf_entsize_local_max(args->geo->blksize)) { 2751 if (local) 2752 *local = 1; 2753 return size; 2754 } 2755 if (local) 2756 *local = 0; 2757 return xfs_attr_leaf_entsize_remote(args->namelen); 2758 } 2759 2760 2761 /*======================================================================== 2762 * Manage the INCOMPLETE flag in a leaf entry 2763 *========================================================================*/ 2764 2765 /* 2766 * Clear the INCOMPLETE flag on an entry in a leaf block. 2767 */ 2768 int 2769 xfs_attr3_leaf_clearflag( 2770 struct xfs_da_args *args) 2771 { 2772 struct xfs_attr_leafblock *leaf; 2773 struct xfs_attr_leaf_entry *entry; 2774 struct xfs_attr_leaf_name_remote *name_rmt; 2775 struct xfs_buf *bp; 2776 int error; 2777 #ifdef DEBUG 2778 struct xfs_attr3_icleaf_hdr ichdr; 2779 xfs_attr_leaf_name_local_t *name_loc; 2780 int namelen; 2781 char *name; 2782 #endif /* DEBUG */ 2783 2784 trace_xfs_attr_leaf_clearflag(args); 2785 /* 2786 * Set up the operation. 2787 */ 2788 error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp); 2789 if (error) 2790 return error; 2791 2792 leaf = bp->b_addr; 2793 entry = &xfs_attr3_leaf_entryp(leaf)[args->index]; 2794 ASSERT(entry->flags & XFS_ATTR_INCOMPLETE); 2795 2796 #ifdef DEBUG 2797 xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf); 2798 ASSERT(args->index < ichdr.count); 2799 ASSERT(args->index >= 0); 2800 2801 if (entry->flags & XFS_ATTR_LOCAL) { 2802 name_loc = xfs_attr3_leaf_name_local(leaf, args->index); 2803 namelen = name_loc->namelen; 2804 name = (char *)name_loc->nameval; 2805 } else { 2806 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index); 2807 namelen = name_rmt->namelen; 2808 name = (char *)name_rmt->name; 2809 } 2810 ASSERT(be32_to_cpu(entry->hashval) == args->hashval); 2811 ASSERT(namelen == args->namelen); 2812 ASSERT(memcmp(name, args->name, namelen) == 0); 2813 #endif /* DEBUG */ 2814 2815 entry->flags &= ~XFS_ATTR_INCOMPLETE; 2816 xfs_trans_log_buf(args->trans, bp, 2817 XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); 2818 2819 if (args->rmtblkno) { 2820 ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0); 2821 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index); 2822 name_rmt->valueblk = cpu_to_be32(args->rmtblkno); 2823 name_rmt->valuelen = cpu_to_be32(args->rmtvaluelen); 2824 xfs_trans_log_buf(args->trans, bp, 2825 XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt))); 2826 } 2827 2828 return 0; 2829 } 2830 2831 /* 2832 * Set the INCOMPLETE flag on an entry in a leaf block. 2833 */ 2834 int 2835 xfs_attr3_leaf_setflag( 2836 struct xfs_da_args *args) 2837 { 2838 struct xfs_attr_leafblock *leaf; 2839 struct xfs_attr_leaf_entry *entry; 2840 struct xfs_attr_leaf_name_remote *name_rmt; 2841 struct xfs_buf *bp; 2842 int error; 2843 #ifdef DEBUG 2844 struct xfs_attr3_icleaf_hdr ichdr; 2845 #endif 2846 2847 trace_xfs_attr_leaf_setflag(args); 2848 2849 /* 2850 * Set up the operation. 2851 */ 2852 error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp); 2853 if (error) 2854 return error; 2855 2856 leaf = bp->b_addr; 2857 #ifdef DEBUG 2858 xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf); 2859 ASSERT(args->index < ichdr.count); 2860 ASSERT(args->index >= 0); 2861 #endif 2862 entry = &xfs_attr3_leaf_entryp(leaf)[args->index]; 2863 2864 ASSERT((entry->flags & XFS_ATTR_INCOMPLETE) == 0); 2865 entry->flags |= XFS_ATTR_INCOMPLETE; 2866 xfs_trans_log_buf(args->trans, bp, 2867 XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); 2868 if ((entry->flags & XFS_ATTR_LOCAL) == 0) { 2869 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index); 2870 name_rmt->valueblk = 0; 2871 name_rmt->valuelen = 0; 2872 xfs_trans_log_buf(args->trans, bp, 2873 XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt))); 2874 } 2875 2876 return 0; 2877 } 2878 2879 /* 2880 * In a single transaction, clear the INCOMPLETE flag on the leaf entry 2881 * given by args->blkno/index and set the INCOMPLETE flag on the leaf 2882 * entry given by args->blkno2/index2. 2883 * 2884 * Note that they could be in different blocks, or in the same block. 2885 */ 2886 int 2887 xfs_attr3_leaf_flipflags( 2888 struct xfs_da_args *args) 2889 { 2890 struct xfs_attr_leafblock *leaf1; 2891 struct xfs_attr_leafblock *leaf2; 2892 struct xfs_attr_leaf_entry *entry1; 2893 struct xfs_attr_leaf_entry *entry2; 2894 struct xfs_attr_leaf_name_remote *name_rmt; 2895 struct xfs_buf *bp1; 2896 struct xfs_buf *bp2; 2897 int error; 2898 #ifdef DEBUG 2899 struct xfs_attr3_icleaf_hdr ichdr1; 2900 struct xfs_attr3_icleaf_hdr ichdr2; 2901 xfs_attr_leaf_name_local_t *name_loc; 2902 int namelen1, namelen2; 2903 char *name1, *name2; 2904 #endif /* DEBUG */ 2905 2906 trace_xfs_attr_leaf_flipflags(args); 2907 2908 /* 2909 * Read the block containing the "old" attr 2910 */ 2911 error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp1); 2912 if (error) 2913 return error; 2914 2915 /* 2916 * Read the block containing the "new" attr, if it is different 2917 */ 2918 if (args->blkno2 != args->blkno) { 2919 error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno2, 2920 &bp2); 2921 if (error) 2922 return error; 2923 } else { 2924 bp2 = bp1; 2925 } 2926 2927 leaf1 = bp1->b_addr; 2928 entry1 = &xfs_attr3_leaf_entryp(leaf1)[args->index]; 2929 2930 leaf2 = bp2->b_addr; 2931 entry2 = &xfs_attr3_leaf_entryp(leaf2)[args->index2]; 2932 2933 #ifdef DEBUG 2934 xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr1, leaf1); 2935 ASSERT(args->index < ichdr1.count); 2936 ASSERT(args->index >= 0); 2937 2938 xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr2, leaf2); 2939 ASSERT(args->index2 < ichdr2.count); 2940 ASSERT(args->index2 >= 0); 2941 2942 if (entry1->flags & XFS_ATTR_LOCAL) { 2943 name_loc = xfs_attr3_leaf_name_local(leaf1, args->index); 2944 namelen1 = name_loc->namelen; 2945 name1 = (char *)name_loc->nameval; 2946 } else { 2947 name_rmt = xfs_attr3_leaf_name_remote(leaf1, args->index); 2948 namelen1 = name_rmt->namelen; 2949 name1 = (char *)name_rmt->name; 2950 } 2951 if (entry2->flags & XFS_ATTR_LOCAL) { 2952 name_loc = xfs_attr3_leaf_name_local(leaf2, args->index2); 2953 namelen2 = name_loc->namelen; 2954 name2 = (char *)name_loc->nameval; 2955 } else { 2956 name_rmt = xfs_attr3_leaf_name_remote(leaf2, args->index2); 2957 namelen2 = name_rmt->namelen; 2958 name2 = (char *)name_rmt->name; 2959 } 2960 ASSERT(be32_to_cpu(entry1->hashval) == be32_to_cpu(entry2->hashval)); 2961 ASSERT(namelen1 == namelen2); 2962 ASSERT(memcmp(name1, name2, namelen1) == 0); 2963 #endif /* DEBUG */ 2964 2965 ASSERT(entry1->flags & XFS_ATTR_INCOMPLETE); 2966 ASSERT((entry2->flags & XFS_ATTR_INCOMPLETE) == 0); 2967 2968 entry1->flags &= ~XFS_ATTR_INCOMPLETE; 2969 xfs_trans_log_buf(args->trans, bp1, 2970 XFS_DA_LOGRANGE(leaf1, entry1, sizeof(*entry1))); 2971 if (args->rmtblkno) { 2972 ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0); 2973 name_rmt = xfs_attr3_leaf_name_remote(leaf1, args->index); 2974 name_rmt->valueblk = cpu_to_be32(args->rmtblkno); 2975 name_rmt->valuelen = cpu_to_be32(args->rmtvaluelen); 2976 xfs_trans_log_buf(args->trans, bp1, 2977 XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt))); 2978 } 2979 2980 entry2->flags |= XFS_ATTR_INCOMPLETE; 2981 xfs_trans_log_buf(args->trans, bp2, 2982 XFS_DA_LOGRANGE(leaf2, entry2, sizeof(*entry2))); 2983 if ((entry2->flags & XFS_ATTR_LOCAL) == 0) { 2984 name_rmt = xfs_attr3_leaf_name_remote(leaf2, args->index2); 2985 name_rmt->valueblk = 0; 2986 name_rmt->valuelen = 0; 2987 xfs_trans_log_buf(args->trans, bp2, 2988 XFS_DA_LOGRANGE(leaf2, name_rmt, sizeof(*name_rmt))); 2989 } 2990 2991 return 0; 2992 } 2993