1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_bit.h" 25 #include "xfs_sb.h" 26 #include "xfs_mount.h" 27 #include "xfs_defer.h" 28 #include "xfs_da_format.h" 29 #include "xfs_da_btree.h" 30 #include "xfs_dir2.h" 31 #include "xfs_inode.h" 32 #include "xfs_btree.h" 33 #include "xfs_trans.h" 34 #include "xfs_inode_item.h" 35 #include "xfs_extfree_item.h" 36 #include "xfs_alloc.h" 37 #include "xfs_bmap.h" 38 #include "xfs_bmap_util.h" 39 #include "xfs_bmap_btree.h" 40 #include "xfs_rtalloc.h" 41 #include "xfs_error.h" 42 #include "xfs_quota.h" 43 #include "xfs_trans_space.h" 44 #include "xfs_buf_item.h" 45 #include "xfs_trace.h" 46 #include "xfs_symlink.h" 47 #include "xfs_attr_leaf.h" 48 #include "xfs_filestream.h" 49 #include "xfs_rmap.h" 50 #include "xfs_ag_resv.h" 51 #include "xfs_refcount.h" 52 #include "xfs_rmap_btree.h" 53 #include "xfs_icache.h" 54 55 56 kmem_zone_t *xfs_bmap_free_item_zone; 57 58 /* 59 * Miscellaneous helper functions 60 */ 61 62 /* 63 * Compute and fill in the value of the maximum depth of a bmap btree 64 * in this filesystem. Done once, during mount. 65 */ 66 void 67 xfs_bmap_compute_maxlevels( 68 xfs_mount_t *mp, /* file system mount structure */ 69 int whichfork) /* data or attr fork */ 70 { 71 int level; /* btree level */ 72 uint maxblocks; /* max blocks at this level */ 73 uint maxleafents; /* max leaf entries possible */ 74 int maxrootrecs; /* max records in root block */ 75 int minleafrecs; /* min records in leaf block */ 76 int minnoderecs; /* min records in node block */ 77 int sz; /* root block size */ 78 79 /* 80 * The maximum number of extents in a file, hence the maximum 81 * number of leaf entries, is controlled by the type of di_nextents 82 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents 83 * (a signed 16-bit number, xfs_aextnum_t). 84 * 85 * Note that we can no longer assume that if we are in ATTR1 that 86 * the fork offset of all the inodes will be 87 * (xfs_default_attroffset(ip) >> 3) because we could have mounted 88 * with ATTR2 and then mounted back with ATTR1, keeping the 89 * di_forkoff's fixed but probably at various positions. Therefore, 90 * for both ATTR1 and ATTR2 we have to assume the worst case scenario 91 * of a minimum size available. 92 */ 93 if (whichfork == XFS_DATA_FORK) { 94 maxleafents = MAXEXTNUM; 95 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS); 96 } else { 97 maxleafents = MAXAEXTNUM; 98 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS); 99 } 100 maxrootrecs = xfs_bmdr_maxrecs(sz, 0); 101 minleafrecs = mp->m_bmap_dmnr[0]; 102 minnoderecs = mp->m_bmap_dmnr[1]; 103 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; 104 for (level = 1; maxblocks > 1; level++) { 105 if (maxblocks <= maxrootrecs) 106 maxblocks = 1; 107 else 108 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs; 109 } 110 mp->m_bm_maxlevels[whichfork] = level; 111 } 112 113 STATIC int /* error */ 114 xfs_bmbt_lookup_eq( 115 struct xfs_btree_cur *cur, 116 xfs_fileoff_t off, 117 xfs_fsblock_t bno, 118 xfs_filblks_t len, 119 int *stat) /* success/failure */ 120 { 121 cur->bc_rec.b.br_startoff = off; 122 cur->bc_rec.b.br_startblock = bno; 123 cur->bc_rec.b.br_blockcount = len; 124 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); 125 } 126 127 STATIC int /* error */ 128 xfs_bmbt_lookup_ge( 129 struct xfs_btree_cur *cur, 130 xfs_fileoff_t off, 131 xfs_fsblock_t bno, 132 xfs_filblks_t len, 133 int *stat) /* success/failure */ 134 { 135 cur->bc_rec.b.br_startoff = off; 136 cur->bc_rec.b.br_startblock = bno; 137 cur->bc_rec.b.br_blockcount = len; 138 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); 139 } 140 141 /* 142 * Check if the inode needs to be converted to btree format. 143 */ 144 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork) 145 { 146 return whichfork != XFS_COW_FORK && 147 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && 148 XFS_IFORK_NEXTENTS(ip, whichfork) > 149 XFS_IFORK_MAXEXT(ip, whichfork); 150 } 151 152 /* 153 * Check if the inode should be converted to extent format. 154 */ 155 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork) 156 { 157 return whichfork != XFS_COW_FORK && 158 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE && 159 XFS_IFORK_NEXTENTS(ip, whichfork) <= 160 XFS_IFORK_MAXEXT(ip, whichfork); 161 } 162 163 /* 164 * Update the record referred to by cur to the value given 165 * by [off, bno, len, state]. 166 * This either works (return 0) or gets an EFSCORRUPTED error. 167 */ 168 STATIC int 169 xfs_bmbt_update( 170 struct xfs_btree_cur *cur, 171 xfs_fileoff_t off, 172 xfs_fsblock_t bno, 173 xfs_filblks_t len, 174 xfs_exntst_t state) 175 { 176 union xfs_btree_rec rec; 177 178 xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state); 179 return xfs_btree_update(cur, &rec); 180 } 181 182 /* 183 * Compute the worst-case number of indirect blocks that will be used 184 * for ip's delayed extent of length "len". 185 */ 186 STATIC xfs_filblks_t 187 xfs_bmap_worst_indlen( 188 xfs_inode_t *ip, /* incore inode pointer */ 189 xfs_filblks_t len) /* delayed extent length */ 190 { 191 int level; /* btree level number */ 192 int maxrecs; /* maximum record count at this level */ 193 xfs_mount_t *mp; /* mount structure */ 194 xfs_filblks_t rval; /* return value */ 195 xfs_filblks_t orig_len; 196 197 mp = ip->i_mount; 198 199 /* Calculate the worst-case size of the bmbt. */ 200 orig_len = len; 201 maxrecs = mp->m_bmap_dmxr[0]; 202 for (level = 0, rval = 0; 203 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); 204 level++) { 205 len += maxrecs - 1; 206 do_div(len, maxrecs); 207 rval += len; 208 if (len == 1) { 209 rval += XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - 210 level - 1; 211 break; 212 } 213 if (level == 0) 214 maxrecs = mp->m_bmap_dmxr[1]; 215 } 216 217 /* Calculate the worst-case size of the rmapbt. */ 218 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) 219 rval += 1 + xfs_rmapbt_calc_size(mp, orig_len) + 220 mp->m_rmap_maxlevels; 221 222 return rval; 223 } 224 225 /* 226 * Calculate the default attribute fork offset for newly created inodes. 227 */ 228 uint 229 xfs_default_attroffset( 230 struct xfs_inode *ip) 231 { 232 struct xfs_mount *mp = ip->i_mount; 233 uint offset; 234 235 if (mp->m_sb.sb_inodesize == 256) { 236 offset = XFS_LITINO(mp, ip->i_d.di_version) - 237 XFS_BMDR_SPACE_CALC(MINABTPTRS); 238 } else { 239 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS); 240 } 241 242 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version)); 243 return offset; 244 } 245 246 /* 247 * Helper routine to reset inode di_forkoff field when switching 248 * attribute fork from local to extent format - we reset it where 249 * possible to make space available for inline data fork extents. 250 */ 251 STATIC void 252 xfs_bmap_forkoff_reset( 253 xfs_inode_t *ip, 254 int whichfork) 255 { 256 if (whichfork == XFS_ATTR_FORK && 257 ip->i_d.di_format != XFS_DINODE_FMT_DEV && 258 ip->i_d.di_format != XFS_DINODE_FMT_UUID && 259 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) { 260 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3; 261 262 if (dfl_forkoff > ip->i_d.di_forkoff) 263 ip->i_d.di_forkoff = dfl_forkoff; 264 } 265 } 266 267 #ifdef DEBUG 268 STATIC struct xfs_buf * 269 xfs_bmap_get_bp( 270 struct xfs_btree_cur *cur, 271 xfs_fsblock_t bno) 272 { 273 struct xfs_log_item_desc *lidp; 274 int i; 275 276 if (!cur) 277 return NULL; 278 279 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) { 280 if (!cur->bc_bufs[i]) 281 break; 282 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno) 283 return cur->bc_bufs[i]; 284 } 285 286 /* Chase down all the log items to see if the bp is there */ 287 list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) { 288 struct xfs_buf_log_item *bip; 289 bip = (struct xfs_buf_log_item *)lidp->lid_item; 290 if (bip->bli_item.li_type == XFS_LI_BUF && 291 XFS_BUF_ADDR(bip->bli_buf) == bno) 292 return bip->bli_buf; 293 } 294 295 return NULL; 296 } 297 298 STATIC void 299 xfs_check_block( 300 struct xfs_btree_block *block, 301 xfs_mount_t *mp, 302 int root, 303 short sz) 304 { 305 int i, j, dmxr; 306 __be64 *pp, *thispa; /* pointer to block address */ 307 xfs_bmbt_key_t *prevp, *keyp; 308 309 ASSERT(be16_to_cpu(block->bb_level) > 0); 310 311 prevp = NULL; 312 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) { 313 dmxr = mp->m_bmap_dmxr[0]; 314 keyp = XFS_BMBT_KEY_ADDR(mp, block, i); 315 316 if (prevp) { 317 ASSERT(be64_to_cpu(prevp->br_startoff) < 318 be64_to_cpu(keyp->br_startoff)); 319 } 320 prevp = keyp; 321 322 /* 323 * Compare the block numbers to see if there are dups. 324 */ 325 if (root) 326 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz); 327 else 328 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr); 329 330 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) { 331 if (root) 332 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz); 333 else 334 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr); 335 if (*thispa == *pp) { 336 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld", 337 __func__, j, i, 338 (unsigned long long)be64_to_cpu(*thispa)); 339 panic("%s: ptrs are equal in node\n", 340 __func__); 341 } 342 } 343 } 344 } 345 346 /* 347 * Check that the extents for the inode ip are in the right order in all 348 * btree leaves. THis becomes prohibitively expensive for large extent count 349 * files, so don't bother with inodes that have more than 10,000 extents in 350 * them. The btree record ordering checks will still be done, so for such large 351 * bmapbt constructs that is going to catch most corruptions. 352 */ 353 STATIC void 354 xfs_bmap_check_leaf_extents( 355 xfs_btree_cur_t *cur, /* btree cursor or null */ 356 xfs_inode_t *ip, /* incore inode pointer */ 357 int whichfork) /* data or attr fork */ 358 { 359 struct xfs_btree_block *block; /* current btree block */ 360 xfs_fsblock_t bno; /* block # of "block" */ 361 xfs_buf_t *bp; /* buffer for "block" */ 362 int error; /* error return value */ 363 xfs_extnum_t i=0, j; /* index into the extents list */ 364 xfs_ifork_t *ifp; /* fork structure */ 365 int level; /* btree level, for checking */ 366 xfs_mount_t *mp; /* file system mount structure */ 367 __be64 *pp; /* pointer to block address */ 368 xfs_bmbt_rec_t *ep; /* pointer to current extent */ 369 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */ 370 xfs_bmbt_rec_t *nextp; /* pointer to next extent */ 371 int bp_release = 0; 372 373 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) { 374 return; 375 } 376 377 /* skip large extent count inodes */ 378 if (ip->i_d.di_nextents > 10000) 379 return; 380 381 bno = NULLFSBLOCK; 382 mp = ip->i_mount; 383 ifp = XFS_IFORK_PTR(ip, whichfork); 384 block = ifp->if_broot; 385 /* 386 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 387 */ 388 level = be16_to_cpu(block->bb_level); 389 ASSERT(level > 0); 390 xfs_check_block(block, mp, 1, ifp->if_broot_bytes); 391 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 392 bno = be64_to_cpu(*pp); 393 394 ASSERT(bno != NULLFSBLOCK); 395 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 396 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 397 398 /* 399 * Go down the tree until leaf level is reached, following the first 400 * pointer (leftmost) at each level. 401 */ 402 while (level-- > 0) { 403 /* See if buf is in cur first */ 404 bp_release = 0; 405 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 406 if (!bp) { 407 bp_release = 1; 408 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, 409 XFS_BMAP_BTREE_REF, 410 &xfs_bmbt_buf_ops); 411 if (error) 412 goto error_norelse; 413 } 414 block = XFS_BUF_TO_BLOCK(bp); 415 if (level == 0) 416 break; 417 418 /* 419 * Check this block for basic sanity (increasing keys and 420 * no duplicate blocks). 421 */ 422 423 xfs_check_block(block, mp, 0, 0); 424 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 425 bno = be64_to_cpu(*pp); 426 XFS_WANT_CORRUPTED_GOTO(mp, 427 XFS_FSB_SANITY_CHECK(mp, bno), error0); 428 if (bp_release) { 429 bp_release = 0; 430 xfs_trans_brelse(NULL, bp); 431 } 432 } 433 434 /* 435 * Here with bp and block set to the leftmost leaf node in the tree. 436 */ 437 i = 0; 438 439 /* 440 * Loop over all leaf nodes checking that all extents are in the right order. 441 */ 442 for (;;) { 443 xfs_fsblock_t nextbno; 444 xfs_extnum_t num_recs; 445 446 447 num_recs = xfs_btree_get_numrecs(block); 448 449 /* 450 * Read-ahead the next leaf block, if any. 451 */ 452 453 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 454 455 /* 456 * Check all the extents to make sure they are OK. 457 * If we had a previous block, the last entry should 458 * conform with the first entry in this one. 459 */ 460 461 ep = XFS_BMBT_REC_ADDR(mp, block, 1); 462 if (i) { 463 ASSERT(xfs_bmbt_disk_get_startoff(&last) + 464 xfs_bmbt_disk_get_blockcount(&last) <= 465 xfs_bmbt_disk_get_startoff(ep)); 466 } 467 for (j = 1; j < num_recs; j++) { 468 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1); 469 ASSERT(xfs_bmbt_disk_get_startoff(ep) + 470 xfs_bmbt_disk_get_blockcount(ep) <= 471 xfs_bmbt_disk_get_startoff(nextp)); 472 ep = nextp; 473 } 474 475 last = *ep; 476 i += num_recs; 477 if (bp_release) { 478 bp_release = 0; 479 xfs_trans_brelse(NULL, bp); 480 } 481 bno = nextbno; 482 /* 483 * If we've reached the end, stop. 484 */ 485 if (bno == NULLFSBLOCK) 486 break; 487 488 bp_release = 0; 489 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 490 if (!bp) { 491 bp_release = 1; 492 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, 493 XFS_BMAP_BTREE_REF, 494 &xfs_bmbt_buf_ops); 495 if (error) 496 goto error_norelse; 497 } 498 block = XFS_BUF_TO_BLOCK(bp); 499 } 500 501 return; 502 503 error0: 504 xfs_warn(mp, "%s: at error0", __func__); 505 if (bp_release) 506 xfs_trans_brelse(NULL, bp); 507 error_norelse: 508 xfs_warn(mp, "%s: BAD after btree leaves for %d extents", 509 __func__, i); 510 panic("%s: CORRUPTED BTREE OR SOMETHING", __func__); 511 return; 512 } 513 514 /* 515 * Add bmap trace insert entries for all the contents of the extent records. 516 */ 517 void 518 xfs_bmap_trace_exlist( 519 xfs_inode_t *ip, /* incore inode pointer */ 520 xfs_extnum_t cnt, /* count of entries in the list */ 521 int whichfork, /* data or attr or cow fork */ 522 unsigned long caller_ip) 523 { 524 xfs_extnum_t idx; /* extent record index */ 525 xfs_ifork_t *ifp; /* inode fork pointer */ 526 int state = 0; 527 528 if (whichfork == XFS_ATTR_FORK) 529 state |= BMAP_ATTRFORK; 530 else if (whichfork == XFS_COW_FORK) 531 state |= BMAP_COWFORK; 532 533 ifp = XFS_IFORK_PTR(ip, whichfork); 534 ASSERT(cnt == xfs_iext_count(ifp)); 535 for (idx = 0; idx < cnt; idx++) 536 trace_xfs_extlist(ip, idx, state, caller_ip); 537 } 538 539 /* 540 * Validate that the bmbt_irecs being returned from bmapi are valid 541 * given the caller's original parameters. Specifically check the 542 * ranges of the returned irecs to ensure that they only extend beyond 543 * the given parameters if the XFS_BMAPI_ENTIRE flag was set. 544 */ 545 STATIC void 546 xfs_bmap_validate_ret( 547 xfs_fileoff_t bno, 548 xfs_filblks_t len, 549 int flags, 550 xfs_bmbt_irec_t *mval, 551 int nmap, 552 int ret_nmap) 553 { 554 int i; /* index to map values */ 555 556 ASSERT(ret_nmap <= nmap); 557 558 for (i = 0; i < ret_nmap; i++) { 559 ASSERT(mval[i].br_blockcount > 0); 560 if (!(flags & XFS_BMAPI_ENTIRE)) { 561 ASSERT(mval[i].br_startoff >= bno); 562 ASSERT(mval[i].br_blockcount <= len); 563 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <= 564 bno + len); 565 } else { 566 ASSERT(mval[i].br_startoff < bno + len); 567 ASSERT(mval[i].br_startoff + mval[i].br_blockcount > 568 bno); 569 } 570 ASSERT(i == 0 || 571 mval[i - 1].br_startoff + mval[i - 1].br_blockcount == 572 mval[i].br_startoff); 573 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK && 574 mval[i].br_startblock != HOLESTARTBLOCK); 575 ASSERT(mval[i].br_state == XFS_EXT_NORM || 576 mval[i].br_state == XFS_EXT_UNWRITTEN); 577 } 578 } 579 580 #else 581 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0) 582 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) 583 #endif /* DEBUG */ 584 585 /* 586 * bmap free list manipulation functions 587 */ 588 589 /* 590 * Add the extent to the list of extents to be free at transaction end. 591 * The list is maintained sorted (by block number). 592 */ 593 void 594 xfs_bmap_add_free( 595 struct xfs_mount *mp, 596 struct xfs_defer_ops *dfops, 597 xfs_fsblock_t bno, 598 xfs_filblks_t len, 599 struct xfs_owner_info *oinfo) 600 { 601 struct xfs_extent_free_item *new; /* new element */ 602 #ifdef DEBUG 603 xfs_agnumber_t agno; 604 xfs_agblock_t agbno; 605 606 ASSERT(bno != NULLFSBLOCK); 607 ASSERT(len > 0); 608 ASSERT(len <= MAXEXTLEN); 609 ASSERT(!isnullstartblock(bno)); 610 agno = XFS_FSB_TO_AGNO(mp, bno); 611 agbno = XFS_FSB_TO_AGBNO(mp, bno); 612 ASSERT(agno < mp->m_sb.sb_agcount); 613 ASSERT(agbno < mp->m_sb.sb_agblocks); 614 ASSERT(len < mp->m_sb.sb_agblocks); 615 ASSERT(agbno + len <= mp->m_sb.sb_agblocks); 616 #endif 617 ASSERT(xfs_bmap_free_item_zone != NULL); 618 619 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP); 620 new->xefi_startblock = bno; 621 new->xefi_blockcount = (xfs_extlen_t)len; 622 if (oinfo) 623 new->xefi_oinfo = *oinfo; 624 else 625 xfs_rmap_skip_owner_update(&new->xefi_oinfo); 626 trace_xfs_bmap_free_defer(mp, XFS_FSB_TO_AGNO(mp, bno), 0, 627 XFS_FSB_TO_AGBNO(mp, bno), len); 628 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list); 629 } 630 631 /* 632 * Inode fork format manipulation functions 633 */ 634 635 /* 636 * Transform a btree format file with only one leaf node, where the 637 * extents list will fit in the inode, into an extents format file. 638 * Since the file extents are already in-core, all we have to do is 639 * give up the space for the btree root and pitch the leaf block. 640 */ 641 STATIC int /* error */ 642 xfs_bmap_btree_to_extents( 643 xfs_trans_t *tp, /* transaction pointer */ 644 xfs_inode_t *ip, /* incore inode pointer */ 645 xfs_btree_cur_t *cur, /* btree cursor */ 646 int *logflagsp, /* inode logging flags */ 647 int whichfork) /* data or attr fork */ 648 { 649 /* REFERENCED */ 650 struct xfs_btree_block *cblock;/* child btree block */ 651 xfs_fsblock_t cbno; /* child block number */ 652 xfs_buf_t *cbp; /* child block's buffer */ 653 int error; /* error return value */ 654 xfs_ifork_t *ifp; /* inode fork data */ 655 xfs_mount_t *mp; /* mount point structure */ 656 __be64 *pp; /* ptr to block address */ 657 struct xfs_btree_block *rblock;/* root btree block */ 658 struct xfs_owner_info oinfo; 659 660 mp = ip->i_mount; 661 ifp = XFS_IFORK_PTR(ip, whichfork); 662 ASSERT(whichfork != XFS_COW_FORK); 663 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 664 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); 665 rblock = ifp->if_broot; 666 ASSERT(be16_to_cpu(rblock->bb_level) == 1); 667 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1); 668 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1); 669 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes); 670 cbno = be64_to_cpu(*pp); 671 *logflagsp = 0; 672 #ifdef DEBUG 673 if ((error = xfs_btree_check_lptr(cur, cbno, 1))) 674 return error; 675 #endif 676 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF, 677 &xfs_bmbt_buf_ops); 678 if (error) 679 return error; 680 cblock = XFS_BUF_TO_BLOCK(cbp); 681 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp))) 682 return error; 683 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); 684 xfs_bmap_add_free(mp, cur->bc_private.b.dfops, cbno, 1, &oinfo); 685 ip->i_d.di_nblocks--; 686 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 687 xfs_trans_binval(tp, cbp); 688 if (cur->bc_bufs[0] == cbp) 689 cur->bc_bufs[0] = NULL; 690 xfs_iroot_realloc(ip, -1, whichfork); 691 ASSERT(ifp->if_broot == NULL); 692 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0); 693 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 694 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 695 return 0; 696 } 697 698 /* 699 * Convert an extents-format file into a btree-format file. 700 * The new file will have a root block (in the inode) and a single child block. 701 */ 702 STATIC int /* error */ 703 xfs_bmap_extents_to_btree( 704 xfs_trans_t *tp, /* transaction pointer */ 705 xfs_inode_t *ip, /* incore inode pointer */ 706 xfs_fsblock_t *firstblock, /* first-block-allocated */ 707 struct xfs_defer_ops *dfops, /* blocks freed in xaction */ 708 xfs_btree_cur_t **curp, /* cursor returned to caller */ 709 int wasdel, /* converting a delayed alloc */ 710 int *logflagsp, /* inode logging flags */ 711 int whichfork) /* data or attr fork */ 712 { 713 struct xfs_btree_block *ablock; /* allocated (child) bt block */ 714 xfs_buf_t *abp; /* buffer for ablock */ 715 xfs_alloc_arg_t args; /* allocation arguments */ 716 xfs_bmbt_rec_t *arp; /* child record pointer */ 717 struct xfs_btree_block *block; /* btree root block */ 718 xfs_btree_cur_t *cur; /* bmap btree cursor */ 719 xfs_bmbt_rec_host_t *ep; /* extent record pointer */ 720 int error; /* error return value */ 721 xfs_extnum_t i, cnt; /* extent record index */ 722 xfs_ifork_t *ifp; /* inode fork pointer */ 723 xfs_bmbt_key_t *kp; /* root block key pointer */ 724 xfs_mount_t *mp; /* mount structure */ 725 xfs_extnum_t nextents; /* number of file extents */ 726 xfs_bmbt_ptr_t *pp; /* root block address pointer */ 727 728 mp = ip->i_mount; 729 ASSERT(whichfork != XFS_COW_FORK); 730 ifp = XFS_IFORK_PTR(ip, whichfork); 731 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS); 732 733 /* 734 * Make space in the inode incore. 735 */ 736 xfs_iroot_realloc(ip, 1, whichfork); 737 ifp->if_flags |= XFS_IFBROOT; 738 739 /* 740 * Fill in the root. 741 */ 742 block = ifp->if_broot; 743 if (xfs_sb_version_hascrc(&mp->m_sb)) 744 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL, 745 XFS_BMAP_CRC_MAGIC, 1, 1, ip->i_ino, 746 XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS); 747 else 748 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL, 749 XFS_BMAP_MAGIC, 1, 1, ip->i_ino, 750 XFS_BTREE_LONG_PTRS); 751 752 /* 753 * Need a cursor. Can't allocate until bb_level is filled in. 754 */ 755 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 756 cur->bc_private.b.firstblock = *firstblock; 757 cur->bc_private.b.dfops = dfops; 758 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; 759 /* 760 * Convert to a btree with two levels, one record in root. 761 */ 762 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE); 763 memset(&args, 0, sizeof(args)); 764 args.tp = tp; 765 args.mp = mp; 766 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork); 767 args.firstblock = *firstblock; 768 if (*firstblock == NULLFSBLOCK) { 769 args.type = XFS_ALLOCTYPE_START_BNO; 770 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino); 771 } else if (dfops->dop_low) { 772 try_another_ag: 773 args.type = XFS_ALLOCTYPE_START_BNO; 774 args.fsbno = *firstblock; 775 } else { 776 args.type = XFS_ALLOCTYPE_NEAR_BNO; 777 args.fsbno = *firstblock; 778 } 779 args.minlen = args.maxlen = args.prod = 1; 780 args.wasdel = wasdel; 781 *logflagsp = 0; 782 if ((error = xfs_alloc_vextent(&args))) { 783 xfs_iroot_realloc(ip, -1, whichfork); 784 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 785 return error; 786 } 787 788 /* 789 * During a CoW operation, the allocation and bmbt updates occur in 790 * different transactions. The mapping code tries to put new bmbt 791 * blocks near extents being mapped, but the only way to guarantee this 792 * is if the alloc and the mapping happen in a single transaction that 793 * has a block reservation. That isn't the case here, so if we run out 794 * of space we'll try again with another AG. 795 */ 796 if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) && 797 args.fsbno == NULLFSBLOCK && 798 args.type == XFS_ALLOCTYPE_NEAR_BNO) { 799 dfops->dop_low = true; 800 goto try_another_ag; 801 } 802 /* 803 * Allocation can't fail, the space was reserved. 804 */ 805 ASSERT(args.fsbno != NULLFSBLOCK); 806 ASSERT(*firstblock == NULLFSBLOCK || 807 args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) || 808 (dfops->dop_low && 809 args.agno > XFS_FSB_TO_AGNO(mp, *firstblock))); 810 *firstblock = cur->bc_private.b.firstblock = args.fsbno; 811 cur->bc_private.b.allocated++; 812 ip->i_d.di_nblocks++; 813 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); 814 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0); 815 /* 816 * Fill in the child block. 817 */ 818 abp->b_ops = &xfs_bmbt_buf_ops; 819 ablock = XFS_BUF_TO_BLOCK(abp); 820 if (xfs_sb_version_hascrc(&mp->m_sb)) 821 xfs_btree_init_block_int(mp, ablock, abp->b_bn, 822 XFS_BMAP_CRC_MAGIC, 0, 0, ip->i_ino, 823 XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS); 824 else 825 xfs_btree_init_block_int(mp, ablock, abp->b_bn, 826 XFS_BMAP_MAGIC, 0, 0, ip->i_ino, 827 XFS_BTREE_LONG_PTRS); 828 829 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); 830 nextents = xfs_iext_count(ifp); 831 for (cnt = i = 0; i < nextents; i++) { 832 ep = xfs_iext_get_ext(ifp, i); 833 if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) { 834 arp->l0 = cpu_to_be64(ep->l0); 835 arp->l1 = cpu_to_be64(ep->l1); 836 arp++; cnt++; 837 } 838 } 839 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork)); 840 xfs_btree_set_numrecs(ablock, cnt); 841 842 /* 843 * Fill in the root key and pointer. 844 */ 845 kp = XFS_BMBT_KEY_ADDR(mp, block, 1); 846 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); 847 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp)); 848 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur, 849 be16_to_cpu(block->bb_level))); 850 *pp = cpu_to_be64(args.fsbno); 851 852 /* 853 * Do all this logging at the end so that 854 * the root is at the right level. 855 */ 856 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS); 857 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); 858 ASSERT(*curp == NULL); 859 *curp = cur; 860 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork); 861 return 0; 862 } 863 864 /* 865 * Convert a local file to an extents file. 866 * This code is out of bounds for data forks of regular files, 867 * since the file data needs to get logged so things will stay consistent. 868 * (The bmap-level manipulations are ok, though). 869 */ 870 void 871 xfs_bmap_local_to_extents_empty( 872 struct xfs_inode *ip, 873 int whichfork) 874 { 875 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 876 877 ASSERT(whichfork != XFS_COW_FORK); 878 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 879 ASSERT(ifp->if_bytes == 0); 880 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0); 881 882 xfs_bmap_forkoff_reset(ip, whichfork); 883 ifp->if_flags &= ~XFS_IFINLINE; 884 ifp->if_flags |= XFS_IFEXTENTS; 885 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 886 } 887 888 889 STATIC int /* error */ 890 xfs_bmap_local_to_extents( 891 xfs_trans_t *tp, /* transaction pointer */ 892 xfs_inode_t *ip, /* incore inode pointer */ 893 xfs_fsblock_t *firstblock, /* first block allocated in xaction */ 894 xfs_extlen_t total, /* total blocks needed by transaction */ 895 int *logflagsp, /* inode logging flags */ 896 int whichfork, 897 void (*init_fn)(struct xfs_trans *tp, 898 struct xfs_buf *bp, 899 struct xfs_inode *ip, 900 struct xfs_ifork *ifp)) 901 { 902 int error = 0; 903 int flags; /* logging flags returned */ 904 xfs_ifork_t *ifp; /* inode fork pointer */ 905 xfs_alloc_arg_t args; /* allocation arguments */ 906 xfs_buf_t *bp; /* buffer for extent block */ 907 xfs_bmbt_rec_host_t *ep; /* extent record pointer */ 908 909 /* 910 * We don't want to deal with the case of keeping inode data inline yet. 911 * So sending the data fork of a regular inode is invalid. 912 */ 913 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK)); 914 ifp = XFS_IFORK_PTR(ip, whichfork); 915 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 916 917 if (!ifp->if_bytes) { 918 xfs_bmap_local_to_extents_empty(ip, whichfork); 919 flags = XFS_ILOG_CORE; 920 goto done; 921 } 922 923 flags = 0; 924 error = 0; 925 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == 926 XFS_IFINLINE); 927 memset(&args, 0, sizeof(args)); 928 args.tp = tp; 929 args.mp = ip->i_mount; 930 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0); 931 args.firstblock = *firstblock; 932 /* 933 * Allocate a block. We know we need only one, since the 934 * file currently fits in an inode. 935 */ 936 if (*firstblock == NULLFSBLOCK) { 937 try_another_ag: 938 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino); 939 args.type = XFS_ALLOCTYPE_START_BNO; 940 } else { 941 args.fsbno = *firstblock; 942 args.type = XFS_ALLOCTYPE_NEAR_BNO; 943 } 944 args.total = total; 945 args.minlen = args.maxlen = args.prod = 1; 946 error = xfs_alloc_vextent(&args); 947 if (error) 948 goto done; 949 950 /* 951 * During a CoW operation, the allocation and bmbt updates occur in 952 * different transactions. The mapping code tries to put new bmbt 953 * blocks near extents being mapped, but the only way to guarantee this 954 * is if the alloc and the mapping happen in a single transaction that 955 * has a block reservation. That isn't the case here, so if we run out 956 * of space we'll try again with another AG. 957 */ 958 if (xfs_sb_version_hasreflink(&ip->i_mount->m_sb) && 959 args.fsbno == NULLFSBLOCK && 960 args.type == XFS_ALLOCTYPE_NEAR_BNO) { 961 goto try_another_ag; 962 } 963 /* Can't fail, the space was reserved. */ 964 ASSERT(args.fsbno != NULLFSBLOCK); 965 ASSERT(args.len == 1); 966 *firstblock = args.fsbno; 967 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0); 968 969 /* 970 * Initialize the block, copy the data and log the remote buffer. 971 * 972 * The callout is responsible for logging because the remote format 973 * might differ from the local format and thus we don't know how much to 974 * log here. Note that init_fn must also set the buffer log item type 975 * correctly. 976 */ 977 init_fn(tp, bp, ip, ifp); 978 979 /* account for the change in fork size */ 980 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); 981 xfs_bmap_local_to_extents_empty(ip, whichfork); 982 flags |= XFS_ILOG_CORE; 983 984 xfs_iext_add(ifp, 0, 1); 985 ep = xfs_iext_get_ext(ifp, 0); 986 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM); 987 trace_xfs_bmap_post_update(ip, 0, 988 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0, 989 _THIS_IP_); 990 XFS_IFORK_NEXT_SET(ip, whichfork, 1); 991 ip->i_d.di_nblocks = 1; 992 xfs_trans_mod_dquot_byino(tp, ip, 993 XFS_TRANS_DQ_BCOUNT, 1L); 994 flags |= xfs_ilog_fext(whichfork); 995 996 done: 997 *logflagsp = flags; 998 return error; 999 } 1000 1001 /* 1002 * Called from xfs_bmap_add_attrfork to handle btree format files. 1003 */ 1004 STATIC int /* error */ 1005 xfs_bmap_add_attrfork_btree( 1006 xfs_trans_t *tp, /* transaction pointer */ 1007 xfs_inode_t *ip, /* incore inode pointer */ 1008 xfs_fsblock_t *firstblock, /* first block allocated */ 1009 struct xfs_defer_ops *dfops, /* blocks to free at commit */ 1010 int *flags) /* inode logging flags */ 1011 { 1012 xfs_btree_cur_t *cur; /* btree cursor */ 1013 int error; /* error return value */ 1014 xfs_mount_t *mp; /* file system mount struct */ 1015 int stat; /* newroot status */ 1016 1017 mp = ip->i_mount; 1018 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip)) 1019 *flags |= XFS_ILOG_DBROOT; 1020 else { 1021 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); 1022 cur->bc_private.b.dfops = dfops; 1023 cur->bc_private.b.firstblock = *firstblock; 1024 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat))) 1025 goto error0; 1026 /* must be at least one entry */ 1027 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0); 1028 if ((error = xfs_btree_new_iroot(cur, flags, &stat))) 1029 goto error0; 1030 if (stat == 0) { 1031 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1032 return -ENOSPC; 1033 } 1034 *firstblock = cur->bc_private.b.firstblock; 1035 cur->bc_private.b.allocated = 0; 1036 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1037 } 1038 return 0; 1039 error0: 1040 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 1041 return error; 1042 } 1043 1044 /* 1045 * Called from xfs_bmap_add_attrfork to handle extents format files. 1046 */ 1047 STATIC int /* error */ 1048 xfs_bmap_add_attrfork_extents( 1049 xfs_trans_t *tp, /* transaction pointer */ 1050 xfs_inode_t *ip, /* incore inode pointer */ 1051 xfs_fsblock_t *firstblock, /* first block allocated */ 1052 struct xfs_defer_ops *dfops, /* blocks to free at commit */ 1053 int *flags) /* inode logging flags */ 1054 { 1055 xfs_btree_cur_t *cur; /* bmap btree cursor */ 1056 int error; /* error return value */ 1057 1058 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip)) 1059 return 0; 1060 cur = NULL; 1061 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops, &cur, 0, 1062 flags, XFS_DATA_FORK); 1063 if (cur) { 1064 cur->bc_private.b.allocated = 0; 1065 xfs_btree_del_cursor(cur, 1066 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 1067 } 1068 return error; 1069 } 1070 1071 /* 1072 * Called from xfs_bmap_add_attrfork to handle local format files. Each 1073 * different data fork content type needs a different callout to do the 1074 * conversion. Some are basic and only require special block initialisation 1075 * callouts for the data formating, others (directories) are so specialised they 1076 * handle everything themselves. 1077 * 1078 * XXX (dgc): investigate whether directory conversion can use the generic 1079 * formatting callout. It should be possible - it's just a very complex 1080 * formatter. 1081 */ 1082 STATIC int /* error */ 1083 xfs_bmap_add_attrfork_local( 1084 xfs_trans_t *tp, /* transaction pointer */ 1085 xfs_inode_t *ip, /* incore inode pointer */ 1086 xfs_fsblock_t *firstblock, /* first block allocated */ 1087 struct xfs_defer_ops *dfops, /* blocks to free at commit */ 1088 int *flags) /* inode logging flags */ 1089 { 1090 xfs_da_args_t dargs; /* args for dir/attr code */ 1091 1092 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip)) 1093 return 0; 1094 1095 if (S_ISDIR(VFS_I(ip)->i_mode)) { 1096 memset(&dargs, 0, sizeof(dargs)); 1097 dargs.geo = ip->i_mount->m_dir_geo; 1098 dargs.dp = ip; 1099 dargs.firstblock = firstblock; 1100 dargs.dfops = dfops; 1101 dargs.total = dargs.geo->fsbcount; 1102 dargs.whichfork = XFS_DATA_FORK; 1103 dargs.trans = tp; 1104 return xfs_dir2_sf_to_block(&dargs); 1105 } 1106 1107 if (S_ISLNK(VFS_I(ip)->i_mode)) 1108 return xfs_bmap_local_to_extents(tp, ip, firstblock, 1, 1109 flags, XFS_DATA_FORK, 1110 xfs_symlink_local_to_remote); 1111 1112 /* should only be called for types that support local format data */ 1113 ASSERT(0); 1114 return -EFSCORRUPTED; 1115 } 1116 1117 /* 1118 * Convert inode from non-attributed to attributed. 1119 * Must not be in a transaction, ip must not be locked. 1120 */ 1121 int /* error code */ 1122 xfs_bmap_add_attrfork( 1123 xfs_inode_t *ip, /* incore inode pointer */ 1124 int size, /* space new attribute needs */ 1125 int rsvd) /* xact may use reserved blks */ 1126 { 1127 xfs_fsblock_t firstblock; /* 1st block/ag allocated */ 1128 struct xfs_defer_ops dfops; /* freed extent records */ 1129 xfs_mount_t *mp; /* mount structure */ 1130 xfs_trans_t *tp; /* transaction pointer */ 1131 int blks; /* space reservation */ 1132 int version = 1; /* superblock attr version */ 1133 int logflags; /* logging flags */ 1134 int error; /* error return value */ 1135 1136 ASSERT(XFS_IFORK_Q(ip) == 0); 1137 1138 mp = ip->i_mount; 1139 ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); 1140 1141 blks = XFS_ADDAFORK_SPACE_RES(mp); 1142 1143 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0, 1144 rsvd ? XFS_TRANS_RESERVE : 0, &tp); 1145 if (error) 1146 return error; 1147 1148 xfs_ilock(ip, XFS_ILOCK_EXCL); 1149 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ? 1150 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : 1151 XFS_QMOPT_RES_REGBLKS); 1152 if (error) 1153 goto trans_cancel; 1154 if (XFS_IFORK_Q(ip)) 1155 goto trans_cancel; 1156 if (ip->i_d.di_anextents != 0) { 1157 error = -EFSCORRUPTED; 1158 goto trans_cancel; 1159 } 1160 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) { 1161 /* 1162 * For inodes coming from pre-6.2 filesystems. 1163 */ 1164 ASSERT(ip->i_d.di_aformat == 0); 1165 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 1166 } 1167 1168 xfs_trans_ijoin(tp, ip, 0); 1169 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1170 1171 switch (ip->i_d.di_format) { 1172 case XFS_DINODE_FMT_DEV: 1173 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; 1174 break; 1175 case XFS_DINODE_FMT_UUID: 1176 ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3; 1177 break; 1178 case XFS_DINODE_FMT_LOCAL: 1179 case XFS_DINODE_FMT_EXTENTS: 1180 case XFS_DINODE_FMT_BTREE: 1181 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size); 1182 if (!ip->i_d.di_forkoff) 1183 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3; 1184 else if (mp->m_flags & XFS_MOUNT_ATTR2) 1185 version = 2; 1186 break; 1187 default: 1188 ASSERT(0); 1189 error = -EINVAL; 1190 goto trans_cancel; 1191 } 1192 1193 ASSERT(ip->i_afp == NULL); 1194 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); 1195 ip->i_afp->if_flags = XFS_IFEXTENTS; 1196 logflags = 0; 1197 xfs_defer_init(&dfops, &firstblock); 1198 switch (ip->i_d.di_format) { 1199 case XFS_DINODE_FMT_LOCAL: 1200 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &dfops, 1201 &logflags); 1202 break; 1203 case XFS_DINODE_FMT_EXTENTS: 1204 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock, 1205 &dfops, &logflags); 1206 break; 1207 case XFS_DINODE_FMT_BTREE: 1208 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &dfops, 1209 &logflags); 1210 break; 1211 default: 1212 error = 0; 1213 break; 1214 } 1215 if (logflags) 1216 xfs_trans_log_inode(tp, ip, logflags); 1217 if (error) 1218 goto bmap_cancel; 1219 if (!xfs_sb_version_hasattr(&mp->m_sb) || 1220 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) { 1221 bool log_sb = false; 1222 1223 spin_lock(&mp->m_sb_lock); 1224 if (!xfs_sb_version_hasattr(&mp->m_sb)) { 1225 xfs_sb_version_addattr(&mp->m_sb); 1226 log_sb = true; 1227 } 1228 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) { 1229 xfs_sb_version_addattr2(&mp->m_sb); 1230 log_sb = true; 1231 } 1232 spin_unlock(&mp->m_sb_lock); 1233 if (log_sb) 1234 xfs_log_sb(tp); 1235 } 1236 1237 error = xfs_defer_finish(&tp, &dfops, NULL); 1238 if (error) 1239 goto bmap_cancel; 1240 error = xfs_trans_commit(tp); 1241 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1242 return error; 1243 1244 bmap_cancel: 1245 xfs_defer_cancel(&dfops); 1246 trans_cancel: 1247 xfs_trans_cancel(tp); 1248 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1249 return error; 1250 } 1251 1252 /* 1253 * Internal and external extent tree search functions. 1254 */ 1255 1256 /* 1257 * Read in the extents to if_extents. 1258 * All inode fields are set up by caller, we just traverse the btree 1259 * and copy the records in. If the file system cannot contain unwritten 1260 * extents, the records are checked for no "state" flags. 1261 */ 1262 int /* error */ 1263 xfs_bmap_read_extents( 1264 xfs_trans_t *tp, /* transaction pointer */ 1265 xfs_inode_t *ip, /* incore inode */ 1266 int whichfork) /* data or attr fork */ 1267 { 1268 struct xfs_btree_block *block; /* current btree block */ 1269 xfs_fsblock_t bno; /* block # of "block" */ 1270 xfs_buf_t *bp; /* buffer for "block" */ 1271 int error; /* error return value */ 1272 xfs_exntfmt_t exntf; /* XFS_EXTFMT_NOSTATE, if checking */ 1273 xfs_extnum_t i, j; /* index into the extents list */ 1274 xfs_ifork_t *ifp; /* fork structure */ 1275 int level; /* btree level, for checking */ 1276 xfs_mount_t *mp; /* file system mount structure */ 1277 __be64 *pp; /* pointer to block address */ 1278 /* REFERENCED */ 1279 xfs_extnum_t room; /* number of entries there's room for */ 1280 1281 bno = NULLFSBLOCK; 1282 mp = ip->i_mount; 1283 ifp = XFS_IFORK_PTR(ip, whichfork); 1284 exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE : 1285 XFS_EXTFMT_INODE(ip); 1286 block = ifp->if_broot; 1287 /* 1288 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 1289 */ 1290 level = be16_to_cpu(block->bb_level); 1291 ASSERT(level > 0); 1292 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 1293 bno = be64_to_cpu(*pp); 1294 ASSERT(bno != NULLFSBLOCK); 1295 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 1296 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 1297 /* 1298 * Go down the tree until leaf level is reached, following the first 1299 * pointer (leftmost) at each level. 1300 */ 1301 while (level-- > 0) { 1302 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 1303 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); 1304 if (error) 1305 return error; 1306 block = XFS_BUF_TO_BLOCK(bp); 1307 if (level == 0) 1308 break; 1309 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 1310 bno = be64_to_cpu(*pp); 1311 XFS_WANT_CORRUPTED_GOTO(mp, 1312 XFS_FSB_SANITY_CHECK(mp, bno), error0); 1313 xfs_trans_brelse(tp, bp); 1314 } 1315 /* 1316 * Here with bp and block set to the leftmost leaf node in the tree. 1317 */ 1318 room = xfs_iext_count(ifp); 1319 i = 0; 1320 /* 1321 * Loop over all leaf nodes. Copy information to the extent records. 1322 */ 1323 for (;;) { 1324 xfs_bmbt_rec_t *frp; 1325 xfs_fsblock_t nextbno; 1326 xfs_extnum_t num_recs; 1327 xfs_extnum_t start; 1328 1329 num_recs = xfs_btree_get_numrecs(block); 1330 if (unlikely(i + num_recs > room)) { 1331 ASSERT(i + num_recs <= room); 1332 xfs_warn(ip->i_mount, 1333 "corrupt dinode %Lu, (btree extents).", 1334 (unsigned long long) ip->i_ino); 1335 XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)", 1336 XFS_ERRLEVEL_LOW, ip->i_mount, block); 1337 goto error0; 1338 } 1339 /* 1340 * Read-ahead the next leaf block, if any. 1341 */ 1342 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 1343 if (nextbno != NULLFSBLOCK) 1344 xfs_btree_reada_bufl(mp, nextbno, 1, 1345 &xfs_bmbt_buf_ops); 1346 /* 1347 * Copy records into the extent records. 1348 */ 1349 frp = XFS_BMBT_REC_ADDR(mp, block, 1); 1350 start = i; 1351 for (j = 0; j < num_recs; j++, i++, frp++) { 1352 xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i); 1353 trp->l0 = be64_to_cpu(frp->l0); 1354 trp->l1 = be64_to_cpu(frp->l1); 1355 } 1356 if (exntf == XFS_EXTFMT_NOSTATE) { 1357 /* 1358 * Check all attribute bmap btree records and 1359 * any "older" data bmap btree records for a 1360 * set bit in the "extent flag" position. 1361 */ 1362 if (unlikely(xfs_check_nostate_extents(ifp, 1363 start, num_recs))) { 1364 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)", 1365 XFS_ERRLEVEL_LOW, 1366 ip->i_mount); 1367 goto error0; 1368 } 1369 } 1370 xfs_trans_brelse(tp, bp); 1371 bno = nextbno; 1372 /* 1373 * If we've reached the end, stop. 1374 */ 1375 if (bno == NULLFSBLOCK) 1376 break; 1377 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 1378 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); 1379 if (error) 1380 return error; 1381 block = XFS_BUF_TO_BLOCK(bp); 1382 } 1383 if (i != XFS_IFORK_NEXTENTS(ip, whichfork)) 1384 return -EFSCORRUPTED; 1385 ASSERT(i == xfs_iext_count(ifp)); 1386 XFS_BMAP_TRACE_EXLIST(ip, i, whichfork); 1387 return 0; 1388 error0: 1389 xfs_trans_brelse(tp, bp); 1390 return -EFSCORRUPTED; 1391 } 1392 1393 /* 1394 * Returns the file-relative block number of the first unused block(s) 1395 * in the file with at least "len" logically contiguous blocks free. 1396 * This is the lowest-address hole if the file has holes, else the first block 1397 * past the end of file. 1398 * Return 0 if the file is currently local (in-inode). 1399 */ 1400 int /* error */ 1401 xfs_bmap_first_unused( 1402 xfs_trans_t *tp, /* transaction pointer */ 1403 xfs_inode_t *ip, /* incore inode */ 1404 xfs_extlen_t len, /* size of hole to find */ 1405 xfs_fileoff_t *first_unused, /* unused block */ 1406 int whichfork) /* data or attr fork */ 1407 { 1408 int error; /* error return value */ 1409 int idx; /* extent record index */ 1410 xfs_ifork_t *ifp; /* inode fork pointer */ 1411 xfs_fileoff_t lastaddr; /* last block number seen */ 1412 xfs_fileoff_t lowest; /* lowest useful block */ 1413 xfs_fileoff_t max; /* starting useful block */ 1414 xfs_fileoff_t off; /* offset for this block */ 1415 xfs_extnum_t nextents; /* number of extent entries */ 1416 1417 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE || 1418 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS || 1419 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 1420 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { 1421 *first_unused = 0; 1422 return 0; 1423 } 1424 ifp = XFS_IFORK_PTR(ip, whichfork); 1425 if (!(ifp->if_flags & XFS_IFEXTENTS) && 1426 (error = xfs_iread_extents(tp, ip, whichfork))) 1427 return error; 1428 lowest = *first_unused; 1429 nextents = xfs_iext_count(ifp); 1430 for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) { 1431 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx); 1432 off = xfs_bmbt_get_startoff(ep); 1433 /* 1434 * See if the hole before this extent will work. 1435 */ 1436 if (off >= lowest + len && off - max >= len) { 1437 *first_unused = max; 1438 return 0; 1439 } 1440 lastaddr = off + xfs_bmbt_get_blockcount(ep); 1441 max = XFS_FILEOFF_MAX(lastaddr, lowest); 1442 } 1443 *first_unused = max; 1444 return 0; 1445 } 1446 1447 /* 1448 * Returns the file-relative block number of the last block - 1 before 1449 * last_block (input value) in the file. 1450 * This is not based on i_size, it is based on the extent records. 1451 * Returns 0 for local files, as they do not have extent records. 1452 */ 1453 int /* error */ 1454 xfs_bmap_last_before( 1455 struct xfs_trans *tp, /* transaction pointer */ 1456 struct xfs_inode *ip, /* incore inode */ 1457 xfs_fileoff_t *last_block, /* last block */ 1458 int whichfork) /* data or attr fork */ 1459 { 1460 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1461 struct xfs_bmbt_irec got; 1462 xfs_extnum_t idx; 1463 int error; 1464 1465 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 1466 case XFS_DINODE_FMT_LOCAL: 1467 *last_block = 0; 1468 return 0; 1469 case XFS_DINODE_FMT_BTREE: 1470 case XFS_DINODE_FMT_EXTENTS: 1471 break; 1472 default: 1473 return -EIO; 1474 } 1475 1476 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1477 error = xfs_iread_extents(tp, ip, whichfork); 1478 if (error) 1479 return error; 1480 } 1481 1482 if (xfs_iext_lookup_extent(ip, ifp, *last_block - 1, &idx, &got)) { 1483 if (got.br_startoff <= *last_block - 1) 1484 return 0; 1485 } 1486 1487 if (xfs_iext_get_extent(ifp, idx - 1, &got)) { 1488 *last_block = got.br_startoff + got.br_blockcount; 1489 return 0; 1490 } 1491 1492 *last_block = 0; 1493 return 0; 1494 } 1495 1496 int 1497 xfs_bmap_last_extent( 1498 struct xfs_trans *tp, 1499 struct xfs_inode *ip, 1500 int whichfork, 1501 struct xfs_bmbt_irec *rec, 1502 int *is_empty) 1503 { 1504 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1505 int error; 1506 int nextents; 1507 1508 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1509 error = xfs_iread_extents(tp, ip, whichfork); 1510 if (error) 1511 return error; 1512 } 1513 1514 nextents = xfs_iext_count(ifp); 1515 if (nextents == 0) { 1516 *is_empty = 1; 1517 return 0; 1518 } 1519 1520 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, nextents - 1), rec); 1521 *is_empty = 0; 1522 return 0; 1523 } 1524 1525 /* 1526 * Check the last inode extent to determine whether this allocation will result 1527 * in blocks being allocated at the end of the file. When we allocate new data 1528 * blocks at the end of the file which do not start at the previous data block, 1529 * we will try to align the new blocks at stripe unit boundaries. 1530 * 1531 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be 1532 * at, or past the EOF. 1533 */ 1534 STATIC int 1535 xfs_bmap_isaeof( 1536 struct xfs_bmalloca *bma, 1537 int whichfork) 1538 { 1539 struct xfs_bmbt_irec rec; 1540 int is_empty; 1541 int error; 1542 1543 bma->aeof = 0; 1544 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, 1545 &is_empty); 1546 if (error) 1547 return error; 1548 1549 if (is_empty) { 1550 bma->aeof = 1; 1551 return 0; 1552 } 1553 1554 /* 1555 * Check if we are allocation or past the last extent, or at least into 1556 * the last delayed allocated extent. 1557 */ 1558 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount || 1559 (bma->offset >= rec.br_startoff && 1560 isnullstartblock(rec.br_startblock)); 1561 return 0; 1562 } 1563 1564 /* 1565 * Returns the file-relative block number of the first block past eof in 1566 * the file. This is not based on i_size, it is based on the extent records. 1567 * Returns 0 for local files, as they do not have extent records. 1568 */ 1569 int 1570 xfs_bmap_last_offset( 1571 struct xfs_inode *ip, 1572 xfs_fileoff_t *last_block, 1573 int whichfork) 1574 { 1575 struct xfs_bmbt_irec rec; 1576 int is_empty; 1577 int error; 1578 1579 *last_block = 0; 1580 1581 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) 1582 return 0; 1583 1584 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && 1585 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 1586 return -EIO; 1587 1588 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty); 1589 if (error || is_empty) 1590 return error; 1591 1592 *last_block = rec.br_startoff + rec.br_blockcount; 1593 return 0; 1594 } 1595 1596 /* 1597 * Returns whether the selected fork of the inode has exactly one 1598 * block or not. For the data fork we check this matches di_size, 1599 * implying the file's range is 0..bsize-1. 1600 */ 1601 int /* 1=>1 block, 0=>otherwise */ 1602 xfs_bmap_one_block( 1603 xfs_inode_t *ip, /* incore inode */ 1604 int whichfork) /* data or attr fork */ 1605 { 1606 xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */ 1607 xfs_ifork_t *ifp; /* inode fork pointer */ 1608 int rval; /* return value */ 1609 xfs_bmbt_irec_t s; /* internal version of extent */ 1610 1611 #ifndef DEBUG 1612 if (whichfork == XFS_DATA_FORK) 1613 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize; 1614 #endif /* !DEBUG */ 1615 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1) 1616 return 0; 1617 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 1618 return 0; 1619 ifp = XFS_IFORK_PTR(ip, whichfork); 1620 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 1621 ep = xfs_iext_get_ext(ifp, 0); 1622 xfs_bmbt_get_all(ep, &s); 1623 rval = s.br_startoff == 0 && s.br_blockcount == 1; 1624 if (rval && whichfork == XFS_DATA_FORK) 1625 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize); 1626 return rval; 1627 } 1628 1629 /* 1630 * Extent tree manipulation functions used during allocation. 1631 */ 1632 1633 /* 1634 * Convert a delayed allocation to a real allocation. 1635 */ 1636 STATIC int /* error */ 1637 xfs_bmap_add_extent_delay_real( 1638 struct xfs_bmalloca *bma, 1639 int whichfork) 1640 { 1641 struct xfs_bmbt_irec *new = &bma->got; 1642 int diff; /* temp value */ 1643 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */ 1644 int error; /* error return value */ 1645 int i; /* temp state */ 1646 xfs_ifork_t *ifp; /* inode fork pointer */ 1647 xfs_fileoff_t new_endoff; /* end offset of new entry */ 1648 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 1649 /* left is 0, right is 1, prev is 2 */ 1650 int rval=0; /* return value (logging flags) */ 1651 int state = 0;/* state bits, accessed thru macros */ 1652 xfs_filblks_t da_new; /* new count del alloc blocks used */ 1653 xfs_filblks_t da_old; /* old count del alloc blocks used */ 1654 xfs_filblks_t temp=0; /* value for da_new calculations */ 1655 xfs_filblks_t temp2=0;/* value for da_new calculations */ 1656 int tmp_rval; /* partial logging flags */ 1657 struct xfs_mount *mp; 1658 xfs_extnum_t *nextents; 1659 1660 mp = bma->ip->i_mount; 1661 ifp = XFS_IFORK_PTR(bma->ip, whichfork); 1662 ASSERT(whichfork != XFS_ATTR_FORK); 1663 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents : 1664 &bma->ip->i_d.di_nextents); 1665 1666 ASSERT(bma->idx >= 0); 1667 ASSERT(bma->idx <= xfs_iext_count(ifp)); 1668 ASSERT(!isnullstartblock(new->br_startblock)); 1669 ASSERT(!bma->cur || 1670 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); 1671 1672 XFS_STATS_INC(mp, xs_add_exlist); 1673 1674 #define LEFT r[0] 1675 #define RIGHT r[1] 1676 #define PREV r[2] 1677 1678 if (whichfork == XFS_COW_FORK) 1679 state |= BMAP_COWFORK; 1680 1681 /* 1682 * Set up a bunch of variables to make the tests simpler. 1683 */ 1684 ep = xfs_iext_get_ext(ifp, bma->idx); 1685 xfs_bmbt_get_all(ep, &PREV); 1686 new_endoff = new->br_startoff + new->br_blockcount; 1687 ASSERT(PREV.br_startoff <= new->br_startoff); 1688 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 1689 1690 da_old = startblockval(PREV.br_startblock); 1691 da_new = 0; 1692 1693 /* 1694 * Set flags determining what part of the previous delayed allocation 1695 * extent is being replaced by a real allocation. 1696 */ 1697 if (PREV.br_startoff == new->br_startoff) 1698 state |= BMAP_LEFT_FILLING; 1699 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 1700 state |= BMAP_RIGHT_FILLING; 1701 1702 /* 1703 * Check and set flags if this segment has a left neighbor. 1704 * Don't set contiguous if the combined extent would be too large. 1705 */ 1706 if (bma->idx > 0) { 1707 state |= BMAP_LEFT_VALID; 1708 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT); 1709 1710 if (isnullstartblock(LEFT.br_startblock)) 1711 state |= BMAP_LEFT_DELAY; 1712 } 1713 1714 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 1715 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 1716 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 1717 LEFT.br_state == new->br_state && 1718 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) 1719 state |= BMAP_LEFT_CONTIG; 1720 1721 /* 1722 * Check and set flags if this segment has a right neighbor. 1723 * Don't set contiguous if the combined extent would be too large. 1724 * Also check for all-three-contiguous being too large. 1725 */ 1726 if (bma->idx < xfs_iext_count(ifp) - 1) { 1727 state |= BMAP_RIGHT_VALID; 1728 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT); 1729 1730 if (isnullstartblock(RIGHT.br_startblock)) 1731 state |= BMAP_RIGHT_DELAY; 1732 } 1733 1734 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 1735 new_endoff == RIGHT.br_startoff && 1736 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 1737 new->br_state == RIGHT.br_state && 1738 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 1739 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1740 BMAP_RIGHT_FILLING)) != 1741 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1742 BMAP_RIGHT_FILLING) || 1743 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 1744 <= MAXEXTLEN)) 1745 state |= BMAP_RIGHT_CONTIG; 1746 1747 error = 0; 1748 /* 1749 * Switch out based on the FILLING and CONTIG state bits. 1750 */ 1751 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1752 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 1753 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1754 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1755 /* 1756 * Filling in all of a previously delayed allocation extent. 1757 * The left and right neighbors are both contiguous with new. 1758 */ 1759 bma->idx--; 1760 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1761 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx), 1762 LEFT.br_blockcount + PREV.br_blockcount + 1763 RIGHT.br_blockcount); 1764 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 1765 1766 xfs_iext_remove(bma->ip, bma->idx + 1, 2, state); 1767 (*nextents)--; 1768 if (bma->cur == NULL) 1769 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1770 else { 1771 rval = XFS_ILOG_CORE; 1772 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff, 1773 RIGHT.br_startblock, 1774 RIGHT.br_blockcount, &i); 1775 if (error) 1776 goto done; 1777 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1778 error = xfs_btree_delete(bma->cur, &i); 1779 if (error) 1780 goto done; 1781 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1782 error = xfs_btree_decrement(bma->cur, 0, &i); 1783 if (error) 1784 goto done; 1785 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1786 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, 1787 LEFT.br_startblock, 1788 LEFT.br_blockcount + 1789 PREV.br_blockcount + 1790 RIGHT.br_blockcount, LEFT.br_state); 1791 if (error) 1792 goto done; 1793 } 1794 break; 1795 1796 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1797 /* 1798 * Filling in all of a previously delayed allocation extent. 1799 * The left neighbor is contiguous, the right is not. 1800 */ 1801 bma->idx--; 1802 1803 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1804 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx), 1805 LEFT.br_blockcount + PREV.br_blockcount); 1806 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 1807 1808 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state); 1809 if (bma->cur == NULL) 1810 rval = XFS_ILOG_DEXT; 1811 else { 1812 rval = 0; 1813 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff, 1814 LEFT.br_startblock, LEFT.br_blockcount, 1815 &i); 1816 if (error) 1817 goto done; 1818 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1819 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, 1820 LEFT.br_startblock, 1821 LEFT.br_blockcount + 1822 PREV.br_blockcount, LEFT.br_state); 1823 if (error) 1824 goto done; 1825 } 1826 break; 1827 1828 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1829 /* 1830 * Filling in all of a previously delayed allocation extent. 1831 * The right neighbor is contiguous, the left is not. 1832 */ 1833 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1834 xfs_bmbt_set_startblock(ep, new->br_startblock); 1835 xfs_bmbt_set_blockcount(ep, 1836 PREV.br_blockcount + RIGHT.br_blockcount); 1837 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 1838 1839 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state); 1840 if (bma->cur == NULL) 1841 rval = XFS_ILOG_DEXT; 1842 else { 1843 rval = 0; 1844 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff, 1845 RIGHT.br_startblock, 1846 RIGHT.br_blockcount, &i); 1847 if (error) 1848 goto done; 1849 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1850 error = xfs_bmbt_update(bma->cur, PREV.br_startoff, 1851 new->br_startblock, 1852 PREV.br_blockcount + 1853 RIGHT.br_blockcount, PREV.br_state); 1854 if (error) 1855 goto done; 1856 } 1857 break; 1858 1859 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 1860 /* 1861 * Filling in all of a previously delayed allocation extent. 1862 * Neither the left nor right neighbors are contiguous with 1863 * the new one. 1864 */ 1865 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1866 xfs_bmbt_set_startblock(ep, new->br_startblock); 1867 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 1868 1869 (*nextents)++; 1870 if (bma->cur == NULL) 1871 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1872 else { 1873 rval = XFS_ILOG_CORE; 1874 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, 1875 new->br_startblock, new->br_blockcount, 1876 &i); 1877 if (error) 1878 goto done; 1879 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1880 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; 1881 error = xfs_btree_insert(bma->cur, &i); 1882 if (error) 1883 goto done; 1884 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1885 } 1886 break; 1887 1888 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 1889 /* 1890 * Filling in the first part of a previous delayed allocation. 1891 * The left neighbor is contiguous. 1892 */ 1893 trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_); 1894 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1), 1895 LEFT.br_blockcount + new->br_blockcount); 1896 xfs_bmbt_set_startoff(ep, 1897 PREV.br_startoff + new->br_blockcount); 1898 trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_); 1899 1900 temp = PREV.br_blockcount - new->br_blockcount; 1901 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1902 xfs_bmbt_set_blockcount(ep, temp); 1903 if (bma->cur == NULL) 1904 rval = XFS_ILOG_DEXT; 1905 else { 1906 rval = 0; 1907 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff, 1908 LEFT.br_startblock, LEFT.br_blockcount, 1909 &i); 1910 if (error) 1911 goto done; 1912 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1913 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, 1914 LEFT.br_startblock, 1915 LEFT.br_blockcount + 1916 new->br_blockcount, 1917 LEFT.br_state); 1918 if (error) 1919 goto done; 1920 } 1921 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1922 startblockval(PREV.br_startblock)); 1923 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); 1924 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 1925 1926 bma->idx--; 1927 break; 1928 1929 case BMAP_LEFT_FILLING: 1930 /* 1931 * Filling in the first part of a previous delayed allocation. 1932 * The left neighbor is not contiguous. 1933 */ 1934 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1935 xfs_bmbt_set_startoff(ep, new_endoff); 1936 temp = PREV.br_blockcount - new->br_blockcount; 1937 xfs_bmbt_set_blockcount(ep, temp); 1938 xfs_iext_insert(bma->ip, bma->idx, 1, new, state); 1939 (*nextents)++; 1940 if (bma->cur == NULL) 1941 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1942 else { 1943 rval = XFS_ILOG_CORE; 1944 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, 1945 new->br_startblock, new->br_blockcount, 1946 &i); 1947 if (error) 1948 goto done; 1949 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1950 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; 1951 error = xfs_btree_insert(bma->cur, &i); 1952 if (error) 1953 goto done; 1954 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1955 } 1956 1957 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1958 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1959 bma->firstblock, bma->dfops, 1960 &bma->cur, 1, &tmp_rval, whichfork); 1961 rval |= tmp_rval; 1962 if (error) 1963 goto done; 1964 } 1965 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1966 startblockval(PREV.br_startblock) - 1967 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 1968 ep = xfs_iext_get_ext(ifp, bma->idx + 1); 1969 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); 1970 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_); 1971 break; 1972 1973 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1974 /* 1975 * Filling in the last part of a previous delayed allocation. 1976 * The right neighbor is contiguous with the new allocation. 1977 */ 1978 temp = PREV.br_blockcount - new->br_blockcount; 1979 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_); 1980 xfs_bmbt_set_blockcount(ep, temp); 1981 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1), 1982 new->br_startoff, new->br_startblock, 1983 new->br_blockcount + RIGHT.br_blockcount, 1984 RIGHT.br_state); 1985 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_); 1986 if (bma->cur == NULL) 1987 rval = XFS_ILOG_DEXT; 1988 else { 1989 rval = 0; 1990 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff, 1991 RIGHT.br_startblock, 1992 RIGHT.br_blockcount, &i); 1993 if (error) 1994 goto done; 1995 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1996 error = xfs_bmbt_update(bma->cur, new->br_startoff, 1997 new->br_startblock, 1998 new->br_blockcount + 1999 RIGHT.br_blockcount, 2000 RIGHT.br_state); 2001 if (error) 2002 goto done; 2003 } 2004 2005 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 2006 startblockval(PREV.br_startblock)); 2007 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 2008 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); 2009 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 2010 2011 bma->idx++; 2012 break; 2013 2014 case BMAP_RIGHT_FILLING: 2015 /* 2016 * Filling in the last part of a previous delayed allocation. 2017 * The right neighbor is not contiguous. 2018 */ 2019 temp = PREV.br_blockcount - new->br_blockcount; 2020 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 2021 xfs_bmbt_set_blockcount(ep, temp); 2022 xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state); 2023 (*nextents)++; 2024 if (bma->cur == NULL) 2025 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2026 else { 2027 rval = XFS_ILOG_CORE; 2028 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, 2029 new->br_startblock, new->br_blockcount, 2030 &i); 2031 if (error) 2032 goto done; 2033 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2034 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; 2035 error = xfs_btree_insert(bma->cur, &i); 2036 if (error) 2037 goto done; 2038 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2039 } 2040 2041 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 2042 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 2043 bma->firstblock, bma->dfops, &bma->cur, 1, 2044 &tmp_rval, whichfork); 2045 rval |= tmp_rval; 2046 if (error) 2047 goto done; 2048 } 2049 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 2050 startblockval(PREV.br_startblock) - 2051 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 2052 ep = xfs_iext_get_ext(ifp, bma->idx); 2053 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); 2054 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 2055 2056 bma->idx++; 2057 break; 2058 2059 case 0: 2060 /* 2061 * Filling in the middle part of a previous delayed allocation. 2062 * Contiguity is impossible here. 2063 * This case is avoided almost all the time. 2064 * 2065 * We start with a delayed allocation: 2066 * 2067 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+ 2068 * PREV @ idx 2069 * 2070 * and we are allocating: 2071 * +rrrrrrrrrrrrrrrrr+ 2072 * new 2073 * 2074 * and we set it up for insertion as: 2075 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+ 2076 * new 2077 * PREV @ idx LEFT RIGHT 2078 * inserted at idx + 1 2079 */ 2080 temp = new->br_startoff - PREV.br_startoff; 2081 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff; 2082 trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_); 2083 xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */ 2084 LEFT = *new; 2085 RIGHT.br_state = PREV.br_state; 2086 RIGHT.br_startblock = nullstartblock( 2087 (int)xfs_bmap_worst_indlen(bma->ip, temp2)); 2088 RIGHT.br_startoff = new_endoff; 2089 RIGHT.br_blockcount = temp2; 2090 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */ 2091 xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state); 2092 (*nextents)++; 2093 if (bma->cur == NULL) 2094 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2095 else { 2096 rval = XFS_ILOG_CORE; 2097 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, 2098 new->br_startblock, new->br_blockcount, 2099 &i); 2100 if (error) 2101 goto done; 2102 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2103 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; 2104 error = xfs_btree_insert(bma->cur, &i); 2105 if (error) 2106 goto done; 2107 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2108 } 2109 2110 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 2111 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 2112 bma->firstblock, bma->dfops, &bma->cur, 2113 1, &tmp_rval, whichfork); 2114 rval |= tmp_rval; 2115 if (error) 2116 goto done; 2117 } 2118 temp = xfs_bmap_worst_indlen(bma->ip, temp); 2119 temp2 = xfs_bmap_worst_indlen(bma->ip, temp2); 2120 diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) - 2121 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 2122 if (diff > 0) { 2123 error = xfs_mod_fdblocks(bma->ip->i_mount, 2124 -((int64_t)diff), false); 2125 ASSERT(!error); 2126 if (error) 2127 goto done; 2128 } 2129 2130 ep = xfs_iext_get_ext(ifp, bma->idx); 2131 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 2132 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 2133 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_); 2134 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2), 2135 nullstartblock((int)temp2)); 2136 trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_); 2137 2138 bma->idx++; 2139 da_new = temp + temp2; 2140 break; 2141 2142 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2143 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2144 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 2145 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2146 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2147 case BMAP_LEFT_CONTIG: 2148 case BMAP_RIGHT_CONTIG: 2149 /* 2150 * These cases are all impossible. 2151 */ 2152 ASSERT(0); 2153 } 2154 2155 /* add reverse mapping */ 2156 error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new); 2157 if (error) 2158 goto done; 2159 2160 /* convert to a btree if necessary */ 2161 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 2162 int tmp_logflags; /* partial log flag return val */ 2163 2164 ASSERT(bma->cur == NULL); 2165 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 2166 bma->firstblock, bma->dfops, &bma->cur, 2167 da_old > 0, &tmp_logflags, whichfork); 2168 bma->logflags |= tmp_logflags; 2169 if (error) 2170 goto done; 2171 } 2172 2173 /* adjust for changes in reserved delayed indirect blocks */ 2174 if (da_old || da_new) { 2175 temp = da_new; 2176 if (bma->cur) 2177 temp += bma->cur->bc_private.b.allocated; 2178 ASSERT(temp <= da_old); 2179 if (temp < da_old) 2180 xfs_mod_fdblocks(bma->ip->i_mount, 2181 (int64_t)(da_old - temp), false); 2182 } 2183 2184 /* clear out the allocated field, done with it now in any case. */ 2185 if (bma->cur) 2186 bma->cur->bc_private.b.allocated = 0; 2187 2188 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); 2189 done: 2190 if (whichfork != XFS_COW_FORK) 2191 bma->logflags |= rval; 2192 return error; 2193 #undef LEFT 2194 #undef RIGHT 2195 #undef PREV 2196 } 2197 2198 /* 2199 * Convert an unwritten allocation to a real allocation or vice versa. 2200 */ 2201 STATIC int /* error */ 2202 xfs_bmap_add_extent_unwritten_real( 2203 struct xfs_trans *tp, 2204 xfs_inode_t *ip, /* incore inode pointer */ 2205 xfs_extnum_t *idx, /* extent number to update/insert */ 2206 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 2207 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 2208 xfs_fsblock_t *first, /* pointer to firstblock variable */ 2209 struct xfs_defer_ops *dfops, /* list of extents to be freed */ 2210 int *logflagsp) /* inode logging flags */ 2211 { 2212 xfs_btree_cur_t *cur; /* btree cursor */ 2213 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */ 2214 int error; /* error return value */ 2215 int i; /* temp state */ 2216 xfs_ifork_t *ifp; /* inode fork pointer */ 2217 xfs_fileoff_t new_endoff; /* end offset of new entry */ 2218 xfs_exntst_t newext; /* new extent state */ 2219 xfs_exntst_t oldext; /* old extent state */ 2220 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 2221 /* left is 0, right is 1, prev is 2 */ 2222 int rval=0; /* return value (logging flags) */ 2223 int state = 0;/* state bits, accessed thru macros */ 2224 struct xfs_mount *mp = tp->t_mountp; 2225 2226 *logflagsp = 0; 2227 2228 cur = *curp; 2229 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 2230 2231 ASSERT(*idx >= 0); 2232 ASSERT(*idx <= xfs_iext_count(ifp)); 2233 ASSERT(!isnullstartblock(new->br_startblock)); 2234 2235 XFS_STATS_INC(mp, xs_add_exlist); 2236 2237 #define LEFT r[0] 2238 #define RIGHT r[1] 2239 #define PREV r[2] 2240 2241 /* 2242 * Set up a bunch of variables to make the tests simpler. 2243 */ 2244 error = 0; 2245 ep = xfs_iext_get_ext(ifp, *idx); 2246 xfs_bmbt_get_all(ep, &PREV); 2247 newext = new->br_state; 2248 oldext = (newext == XFS_EXT_UNWRITTEN) ? 2249 XFS_EXT_NORM : XFS_EXT_UNWRITTEN; 2250 ASSERT(PREV.br_state == oldext); 2251 new_endoff = new->br_startoff + new->br_blockcount; 2252 ASSERT(PREV.br_startoff <= new->br_startoff); 2253 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 2254 2255 /* 2256 * Set flags determining what part of the previous oldext allocation 2257 * extent is being replaced by a newext allocation. 2258 */ 2259 if (PREV.br_startoff == new->br_startoff) 2260 state |= BMAP_LEFT_FILLING; 2261 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 2262 state |= BMAP_RIGHT_FILLING; 2263 2264 /* 2265 * Check and set flags if this segment has a left neighbor. 2266 * Don't set contiguous if the combined extent would be too large. 2267 */ 2268 if (*idx > 0) { 2269 state |= BMAP_LEFT_VALID; 2270 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT); 2271 2272 if (isnullstartblock(LEFT.br_startblock)) 2273 state |= BMAP_LEFT_DELAY; 2274 } 2275 2276 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2277 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 2278 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 2279 LEFT.br_state == newext && 2280 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2281 state |= BMAP_LEFT_CONTIG; 2282 2283 /* 2284 * Check and set flags if this segment has a right neighbor. 2285 * Don't set contiguous if the combined extent would be too large. 2286 * Also check for all-three-contiguous being too large. 2287 */ 2288 if (*idx < xfs_iext_count(&ip->i_df) - 1) { 2289 state |= BMAP_RIGHT_VALID; 2290 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT); 2291 if (isnullstartblock(RIGHT.br_startblock)) 2292 state |= BMAP_RIGHT_DELAY; 2293 } 2294 2295 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2296 new_endoff == RIGHT.br_startoff && 2297 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 2298 newext == RIGHT.br_state && 2299 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 2300 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2301 BMAP_RIGHT_FILLING)) != 2302 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2303 BMAP_RIGHT_FILLING) || 2304 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 2305 <= MAXEXTLEN)) 2306 state |= BMAP_RIGHT_CONTIG; 2307 2308 /* 2309 * Switch out based on the FILLING and CONTIG state bits. 2310 */ 2311 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2312 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 2313 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2314 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2315 /* 2316 * Setting all of a previous oldext extent to newext. 2317 * The left and right neighbors are both contiguous with new. 2318 */ 2319 --*idx; 2320 2321 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2322 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), 2323 LEFT.br_blockcount + PREV.br_blockcount + 2324 RIGHT.br_blockcount); 2325 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2326 2327 xfs_iext_remove(ip, *idx + 1, 2, state); 2328 ip->i_d.di_nextents -= 2; 2329 if (cur == NULL) 2330 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2331 else { 2332 rval = XFS_ILOG_CORE; 2333 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, 2334 RIGHT.br_startblock, 2335 RIGHT.br_blockcount, &i))) 2336 goto done; 2337 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2338 if ((error = xfs_btree_delete(cur, &i))) 2339 goto done; 2340 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2341 if ((error = xfs_btree_decrement(cur, 0, &i))) 2342 goto done; 2343 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2344 if ((error = xfs_btree_delete(cur, &i))) 2345 goto done; 2346 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2347 if ((error = xfs_btree_decrement(cur, 0, &i))) 2348 goto done; 2349 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2350 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, 2351 LEFT.br_startblock, 2352 LEFT.br_blockcount + PREV.br_blockcount + 2353 RIGHT.br_blockcount, LEFT.br_state))) 2354 goto done; 2355 } 2356 break; 2357 2358 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2359 /* 2360 * Setting all of a previous oldext extent to newext. 2361 * The left neighbor is contiguous, the right is not. 2362 */ 2363 --*idx; 2364 2365 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2366 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), 2367 LEFT.br_blockcount + PREV.br_blockcount); 2368 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2369 2370 xfs_iext_remove(ip, *idx + 1, 1, state); 2371 ip->i_d.di_nextents--; 2372 if (cur == NULL) 2373 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2374 else { 2375 rval = XFS_ILOG_CORE; 2376 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 2377 PREV.br_startblock, PREV.br_blockcount, 2378 &i))) 2379 goto done; 2380 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2381 if ((error = xfs_btree_delete(cur, &i))) 2382 goto done; 2383 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2384 if ((error = xfs_btree_decrement(cur, 0, &i))) 2385 goto done; 2386 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2387 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, 2388 LEFT.br_startblock, 2389 LEFT.br_blockcount + PREV.br_blockcount, 2390 LEFT.br_state))) 2391 goto done; 2392 } 2393 break; 2394 2395 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2396 /* 2397 * Setting all of a previous oldext extent to newext. 2398 * The right neighbor is contiguous, the left is not. 2399 */ 2400 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2401 xfs_bmbt_set_blockcount(ep, 2402 PREV.br_blockcount + RIGHT.br_blockcount); 2403 xfs_bmbt_set_state(ep, newext); 2404 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2405 xfs_iext_remove(ip, *idx + 1, 1, state); 2406 ip->i_d.di_nextents--; 2407 if (cur == NULL) 2408 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2409 else { 2410 rval = XFS_ILOG_CORE; 2411 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, 2412 RIGHT.br_startblock, 2413 RIGHT.br_blockcount, &i))) 2414 goto done; 2415 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2416 if ((error = xfs_btree_delete(cur, &i))) 2417 goto done; 2418 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2419 if ((error = xfs_btree_decrement(cur, 0, &i))) 2420 goto done; 2421 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2422 if ((error = xfs_bmbt_update(cur, new->br_startoff, 2423 new->br_startblock, 2424 new->br_blockcount + RIGHT.br_blockcount, 2425 newext))) 2426 goto done; 2427 } 2428 break; 2429 2430 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 2431 /* 2432 * Setting all of a previous oldext extent to newext. 2433 * Neither the left nor right neighbors are contiguous with 2434 * the new one. 2435 */ 2436 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2437 xfs_bmbt_set_state(ep, newext); 2438 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2439 2440 if (cur == NULL) 2441 rval = XFS_ILOG_DEXT; 2442 else { 2443 rval = 0; 2444 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, 2445 new->br_startblock, new->br_blockcount, 2446 &i))) 2447 goto done; 2448 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2449 if ((error = xfs_bmbt_update(cur, new->br_startoff, 2450 new->br_startblock, new->br_blockcount, 2451 newext))) 2452 goto done; 2453 } 2454 break; 2455 2456 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 2457 /* 2458 * Setting the first part of a previous oldext extent to newext. 2459 * The left neighbor is contiguous. 2460 */ 2461 trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_); 2462 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1), 2463 LEFT.br_blockcount + new->br_blockcount); 2464 xfs_bmbt_set_startoff(ep, 2465 PREV.br_startoff + new->br_blockcount); 2466 trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_); 2467 2468 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2469 xfs_bmbt_set_startblock(ep, 2470 new->br_startblock + new->br_blockcount); 2471 xfs_bmbt_set_blockcount(ep, 2472 PREV.br_blockcount - new->br_blockcount); 2473 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2474 2475 --*idx; 2476 2477 if (cur == NULL) 2478 rval = XFS_ILOG_DEXT; 2479 else { 2480 rval = 0; 2481 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 2482 PREV.br_startblock, PREV.br_blockcount, 2483 &i))) 2484 goto done; 2485 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2486 if ((error = xfs_bmbt_update(cur, 2487 PREV.br_startoff + new->br_blockcount, 2488 PREV.br_startblock + new->br_blockcount, 2489 PREV.br_blockcount - new->br_blockcount, 2490 oldext))) 2491 goto done; 2492 if ((error = xfs_btree_decrement(cur, 0, &i))) 2493 goto done; 2494 error = xfs_bmbt_update(cur, LEFT.br_startoff, 2495 LEFT.br_startblock, 2496 LEFT.br_blockcount + new->br_blockcount, 2497 LEFT.br_state); 2498 if (error) 2499 goto done; 2500 } 2501 break; 2502 2503 case BMAP_LEFT_FILLING: 2504 /* 2505 * Setting the first part of a previous oldext extent to newext. 2506 * The left neighbor is not contiguous. 2507 */ 2508 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2509 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext); 2510 xfs_bmbt_set_startoff(ep, new_endoff); 2511 xfs_bmbt_set_blockcount(ep, 2512 PREV.br_blockcount - new->br_blockcount); 2513 xfs_bmbt_set_startblock(ep, 2514 new->br_startblock + new->br_blockcount); 2515 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2516 2517 xfs_iext_insert(ip, *idx, 1, new, state); 2518 ip->i_d.di_nextents++; 2519 if (cur == NULL) 2520 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2521 else { 2522 rval = XFS_ILOG_CORE; 2523 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 2524 PREV.br_startblock, PREV.br_blockcount, 2525 &i))) 2526 goto done; 2527 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2528 if ((error = xfs_bmbt_update(cur, 2529 PREV.br_startoff + new->br_blockcount, 2530 PREV.br_startblock + new->br_blockcount, 2531 PREV.br_blockcount - new->br_blockcount, 2532 oldext))) 2533 goto done; 2534 cur->bc_rec.b = *new; 2535 if ((error = xfs_btree_insert(cur, &i))) 2536 goto done; 2537 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2538 } 2539 break; 2540 2541 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2542 /* 2543 * Setting the last part of a previous oldext extent to newext. 2544 * The right neighbor is contiguous with the new allocation. 2545 */ 2546 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2547 xfs_bmbt_set_blockcount(ep, 2548 PREV.br_blockcount - new->br_blockcount); 2549 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2550 2551 ++*idx; 2552 2553 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2554 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx), 2555 new->br_startoff, new->br_startblock, 2556 new->br_blockcount + RIGHT.br_blockcount, newext); 2557 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2558 2559 if (cur == NULL) 2560 rval = XFS_ILOG_DEXT; 2561 else { 2562 rval = 0; 2563 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 2564 PREV.br_startblock, 2565 PREV.br_blockcount, &i))) 2566 goto done; 2567 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2568 if ((error = xfs_bmbt_update(cur, PREV.br_startoff, 2569 PREV.br_startblock, 2570 PREV.br_blockcount - new->br_blockcount, 2571 oldext))) 2572 goto done; 2573 if ((error = xfs_btree_increment(cur, 0, &i))) 2574 goto done; 2575 if ((error = xfs_bmbt_update(cur, new->br_startoff, 2576 new->br_startblock, 2577 new->br_blockcount + RIGHT.br_blockcount, 2578 newext))) 2579 goto done; 2580 } 2581 break; 2582 2583 case BMAP_RIGHT_FILLING: 2584 /* 2585 * Setting the last part of a previous oldext extent to newext. 2586 * The right neighbor is not contiguous. 2587 */ 2588 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2589 xfs_bmbt_set_blockcount(ep, 2590 PREV.br_blockcount - new->br_blockcount); 2591 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2592 2593 ++*idx; 2594 xfs_iext_insert(ip, *idx, 1, new, state); 2595 2596 ip->i_d.di_nextents++; 2597 if (cur == NULL) 2598 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2599 else { 2600 rval = XFS_ILOG_CORE; 2601 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 2602 PREV.br_startblock, PREV.br_blockcount, 2603 &i))) 2604 goto done; 2605 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2606 if ((error = xfs_bmbt_update(cur, PREV.br_startoff, 2607 PREV.br_startblock, 2608 PREV.br_blockcount - new->br_blockcount, 2609 oldext))) 2610 goto done; 2611 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, 2612 new->br_startblock, new->br_blockcount, 2613 &i))) 2614 goto done; 2615 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2616 cur->bc_rec.b.br_state = XFS_EXT_NORM; 2617 if ((error = xfs_btree_insert(cur, &i))) 2618 goto done; 2619 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2620 } 2621 break; 2622 2623 case 0: 2624 /* 2625 * Setting the middle part of a previous oldext extent to 2626 * newext. Contiguity is impossible here. 2627 * One extent becomes three extents. 2628 */ 2629 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2630 xfs_bmbt_set_blockcount(ep, 2631 new->br_startoff - PREV.br_startoff); 2632 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2633 2634 r[0] = *new; 2635 r[1].br_startoff = new_endoff; 2636 r[1].br_blockcount = 2637 PREV.br_startoff + PREV.br_blockcount - new_endoff; 2638 r[1].br_startblock = new->br_startblock + new->br_blockcount; 2639 r[1].br_state = oldext; 2640 2641 ++*idx; 2642 xfs_iext_insert(ip, *idx, 2, &r[0], state); 2643 2644 ip->i_d.di_nextents += 2; 2645 if (cur == NULL) 2646 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2647 else { 2648 rval = XFS_ILOG_CORE; 2649 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 2650 PREV.br_startblock, PREV.br_blockcount, 2651 &i))) 2652 goto done; 2653 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2654 /* new right extent - oldext */ 2655 if ((error = xfs_bmbt_update(cur, r[1].br_startoff, 2656 r[1].br_startblock, r[1].br_blockcount, 2657 r[1].br_state))) 2658 goto done; 2659 /* new left extent - oldext */ 2660 cur->bc_rec.b = PREV; 2661 cur->bc_rec.b.br_blockcount = 2662 new->br_startoff - PREV.br_startoff; 2663 if ((error = xfs_btree_insert(cur, &i))) 2664 goto done; 2665 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2666 /* 2667 * Reset the cursor to the position of the new extent 2668 * we are about to insert as we can't trust it after 2669 * the previous insert. 2670 */ 2671 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, 2672 new->br_startblock, new->br_blockcount, 2673 &i))) 2674 goto done; 2675 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2676 /* new middle extent - newext */ 2677 cur->bc_rec.b.br_state = new->br_state; 2678 if ((error = xfs_btree_insert(cur, &i))) 2679 goto done; 2680 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2681 } 2682 break; 2683 2684 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2685 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2686 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 2687 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2688 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2689 case BMAP_LEFT_CONTIG: 2690 case BMAP_RIGHT_CONTIG: 2691 /* 2692 * These cases are all impossible. 2693 */ 2694 ASSERT(0); 2695 } 2696 2697 /* update reverse mappings */ 2698 error = xfs_rmap_convert_extent(mp, dfops, ip, XFS_DATA_FORK, new); 2699 if (error) 2700 goto done; 2701 2702 /* convert to a btree if necessary */ 2703 if (xfs_bmap_needs_btree(ip, XFS_DATA_FORK)) { 2704 int tmp_logflags; /* partial log flag return val */ 2705 2706 ASSERT(cur == NULL); 2707 error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, &cur, 2708 0, &tmp_logflags, XFS_DATA_FORK); 2709 *logflagsp |= tmp_logflags; 2710 if (error) 2711 goto done; 2712 } 2713 2714 /* clear out the allocated field, done with it now in any case. */ 2715 if (cur) { 2716 cur->bc_private.b.allocated = 0; 2717 *curp = cur; 2718 } 2719 2720 xfs_bmap_check_leaf_extents(*curp, ip, XFS_DATA_FORK); 2721 done: 2722 *logflagsp |= rval; 2723 return error; 2724 #undef LEFT 2725 #undef RIGHT 2726 #undef PREV 2727 } 2728 2729 /* 2730 * Convert a hole to a delayed allocation. 2731 */ 2732 STATIC void 2733 xfs_bmap_add_extent_hole_delay( 2734 xfs_inode_t *ip, /* incore inode pointer */ 2735 int whichfork, 2736 xfs_extnum_t *idx, /* extent number to update/insert */ 2737 xfs_bmbt_irec_t *new) /* new data to add to file extents */ 2738 { 2739 xfs_ifork_t *ifp; /* inode fork pointer */ 2740 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2741 xfs_filblks_t newlen=0; /* new indirect size */ 2742 xfs_filblks_t oldlen=0; /* old indirect size */ 2743 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2744 int state; /* state bits, accessed thru macros */ 2745 xfs_filblks_t temp=0; /* temp for indirect calculations */ 2746 2747 ifp = XFS_IFORK_PTR(ip, whichfork); 2748 state = 0; 2749 if (whichfork == XFS_COW_FORK) 2750 state |= BMAP_COWFORK; 2751 ASSERT(isnullstartblock(new->br_startblock)); 2752 2753 /* 2754 * Check and set flags if this segment has a left neighbor 2755 */ 2756 if (*idx > 0) { 2757 state |= BMAP_LEFT_VALID; 2758 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left); 2759 2760 if (isnullstartblock(left.br_startblock)) 2761 state |= BMAP_LEFT_DELAY; 2762 } 2763 2764 /* 2765 * Check and set flags if the current (right) segment exists. 2766 * If it doesn't exist, we're converting the hole at end-of-file. 2767 */ 2768 if (*idx < xfs_iext_count(ifp)) { 2769 state |= BMAP_RIGHT_VALID; 2770 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right); 2771 2772 if (isnullstartblock(right.br_startblock)) 2773 state |= BMAP_RIGHT_DELAY; 2774 } 2775 2776 /* 2777 * Set contiguity flags on the left and right neighbors. 2778 * Don't let extents get too large, even if the pieces are contiguous. 2779 */ 2780 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) && 2781 left.br_startoff + left.br_blockcount == new->br_startoff && 2782 left.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2783 state |= BMAP_LEFT_CONTIG; 2784 2785 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) && 2786 new->br_startoff + new->br_blockcount == right.br_startoff && 2787 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 2788 (!(state & BMAP_LEFT_CONTIG) || 2789 (left.br_blockcount + new->br_blockcount + 2790 right.br_blockcount <= MAXEXTLEN))) 2791 state |= BMAP_RIGHT_CONTIG; 2792 2793 /* 2794 * Switch out based on the contiguity flags. 2795 */ 2796 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2797 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2798 /* 2799 * New allocation is contiguous with delayed allocations 2800 * on the left and on the right. 2801 * Merge all three into a single extent record. 2802 */ 2803 --*idx; 2804 temp = left.br_blockcount + new->br_blockcount + 2805 right.br_blockcount; 2806 2807 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2808 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp); 2809 oldlen = startblockval(left.br_startblock) + 2810 startblockval(new->br_startblock) + 2811 startblockval(right.br_startblock); 2812 newlen = xfs_bmap_worst_indlen(ip, temp); 2813 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx), 2814 nullstartblock((int)newlen)); 2815 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2816 2817 xfs_iext_remove(ip, *idx + 1, 1, state); 2818 break; 2819 2820 case BMAP_LEFT_CONTIG: 2821 /* 2822 * New allocation is contiguous with a delayed allocation 2823 * on the left. 2824 * Merge the new allocation with the left neighbor. 2825 */ 2826 --*idx; 2827 temp = left.br_blockcount + new->br_blockcount; 2828 2829 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2830 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp); 2831 oldlen = startblockval(left.br_startblock) + 2832 startblockval(new->br_startblock); 2833 newlen = xfs_bmap_worst_indlen(ip, temp); 2834 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx), 2835 nullstartblock((int)newlen)); 2836 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2837 break; 2838 2839 case BMAP_RIGHT_CONTIG: 2840 /* 2841 * New allocation is contiguous with a delayed allocation 2842 * on the right. 2843 * Merge the new allocation with the right neighbor. 2844 */ 2845 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2846 temp = new->br_blockcount + right.br_blockcount; 2847 oldlen = startblockval(new->br_startblock) + 2848 startblockval(right.br_startblock); 2849 newlen = xfs_bmap_worst_indlen(ip, temp); 2850 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx), 2851 new->br_startoff, 2852 nullstartblock((int)newlen), temp, right.br_state); 2853 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2854 break; 2855 2856 case 0: 2857 /* 2858 * New allocation is not contiguous with another 2859 * delayed allocation. 2860 * Insert a new entry. 2861 */ 2862 oldlen = newlen = 0; 2863 xfs_iext_insert(ip, *idx, 1, new, state); 2864 break; 2865 } 2866 if (oldlen != newlen) { 2867 ASSERT(oldlen > newlen); 2868 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen), 2869 false); 2870 /* 2871 * Nothing to do for disk quota accounting here. 2872 */ 2873 } 2874 } 2875 2876 /* 2877 * Convert a hole to a real allocation. 2878 */ 2879 STATIC int /* error */ 2880 xfs_bmap_add_extent_hole_real( 2881 struct xfs_bmalloca *bma, 2882 int whichfork) 2883 { 2884 struct xfs_bmbt_irec *new = &bma->got; 2885 int error; /* error return value */ 2886 int i; /* temp state */ 2887 xfs_ifork_t *ifp; /* inode fork pointer */ 2888 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2889 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2890 int rval=0; /* return value (logging flags) */ 2891 int state; /* state bits, accessed thru macros */ 2892 struct xfs_mount *mp; 2893 2894 mp = bma->ip->i_mount; 2895 ifp = XFS_IFORK_PTR(bma->ip, whichfork); 2896 2897 ASSERT(bma->idx >= 0); 2898 ASSERT(bma->idx <= xfs_iext_count(ifp)); 2899 ASSERT(!isnullstartblock(new->br_startblock)); 2900 ASSERT(!bma->cur || 2901 !(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); 2902 ASSERT(whichfork != XFS_COW_FORK); 2903 2904 XFS_STATS_INC(mp, xs_add_exlist); 2905 2906 state = 0; 2907 if (whichfork == XFS_ATTR_FORK) 2908 state |= BMAP_ATTRFORK; 2909 2910 /* 2911 * Check and set flags if this segment has a left neighbor. 2912 */ 2913 if (bma->idx > 0) { 2914 state |= BMAP_LEFT_VALID; 2915 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &left); 2916 if (isnullstartblock(left.br_startblock)) 2917 state |= BMAP_LEFT_DELAY; 2918 } 2919 2920 /* 2921 * Check and set flags if this segment has a current value. 2922 * Not true if we're inserting into the "hole" at eof. 2923 */ 2924 if (bma->idx < xfs_iext_count(ifp)) { 2925 state |= BMAP_RIGHT_VALID; 2926 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &right); 2927 if (isnullstartblock(right.br_startblock)) 2928 state |= BMAP_RIGHT_DELAY; 2929 } 2930 2931 /* 2932 * We're inserting a real allocation between "left" and "right". 2933 * Set the contiguity flags. Don't let extents get too large. 2934 */ 2935 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2936 left.br_startoff + left.br_blockcount == new->br_startoff && 2937 left.br_startblock + left.br_blockcount == new->br_startblock && 2938 left.br_state == new->br_state && 2939 left.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2940 state |= BMAP_LEFT_CONTIG; 2941 2942 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2943 new->br_startoff + new->br_blockcount == right.br_startoff && 2944 new->br_startblock + new->br_blockcount == right.br_startblock && 2945 new->br_state == right.br_state && 2946 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 2947 (!(state & BMAP_LEFT_CONTIG) || 2948 left.br_blockcount + new->br_blockcount + 2949 right.br_blockcount <= MAXEXTLEN)) 2950 state |= BMAP_RIGHT_CONTIG; 2951 2952 error = 0; 2953 /* 2954 * Select which case we're in here, and implement it. 2955 */ 2956 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2957 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2958 /* 2959 * New allocation is contiguous with real allocations on the 2960 * left and on the right. 2961 * Merge all three into a single extent record. 2962 */ 2963 --bma->idx; 2964 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 2965 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx), 2966 left.br_blockcount + new->br_blockcount + 2967 right.br_blockcount); 2968 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 2969 2970 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state); 2971 2972 XFS_IFORK_NEXT_SET(bma->ip, whichfork, 2973 XFS_IFORK_NEXTENTS(bma->ip, whichfork) - 1); 2974 if (bma->cur == NULL) { 2975 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 2976 } else { 2977 rval = XFS_ILOG_CORE; 2978 error = xfs_bmbt_lookup_eq(bma->cur, right.br_startoff, 2979 right.br_startblock, right.br_blockcount, 2980 &i); 2981 if (error) 2982 goto done; 2983 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2984 error = xfs_btree_delete(bma->cur, &i); 2985 if (error) 2986 goto done; 2987 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2988 error = xfs_btree_decrement(bma->cur, 0, &i); 2989 if (error) 2990 goto done; 2991 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2992 error = xfs_bmbt_update(bma->cur, left.br_startoff, 2993 left.br_startblock, 2994 left.br_blockcount + 2995 new->br_blockcount + 2996 right.br_blockcount, 2997 left.br_state); 2998 if (error) 2999 goto done; 3000 } 3001 break; 3002 3003 case BMAP_LEFT_CONTIG: 3004 /* 3005 * New allocation is contiguous with a real allocation 3006 * on the left. 3007 * Merge the new allocation with the left neighbor. 3008 */ 3009 --bma->idx; 3010 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 3011 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx), 3012 left.br_blockcount + new->br_blockcount); 3013 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 3014 3015 if (bma->cur == NULL) { 3016 rval = xfs_ilog_fext(whichfork); 3017 } else { 3018 rval = 0; 3019 error = xfs_bmbt_lookup_eq(bma->cur, left.br_startoff, 3020 left.br_startblock, left.br_blockcount, 3021 &i); 3022 if (error) 3023 goto done; 3024 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 3025 error = xfs_bmbt_update(bma->cur, left.br_startoff, 3026 left.br_startblock, 3027 left.br_blockcount + 3028 new->br_blockcount, 3029 left.br_state); 3030 if (error) 3031 goto done; 3032 } 3033 break; 3034 3035 case BMAP_RIGHT_CONTIG: 3036 /* 3037 * New allocation is contiguous with a real allocation 3038 * on the right. 3039 * Merge the new allocation with the right neighbor. 3040 */ 3041 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 3042 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx), 3043 new->br_startoff, new->br_startblock, 3044 new->br_blockcount + right.br_blockcount, 3045 right.br_state); 3046 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 3047 3048 if (bma->cur == NULL) { 3049 rval = xfs_ilog_fext(whichfork); 3050 } else { 3051 rval = 0; 3052 error = xfs_bmbt_lookup_eq(bma->cur, 3053 right.br_startoff, 3054 right.br_startblock, 3055 right.br_blockcount, &i); 3056 if (error) 3057 goto done; 3058 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 3059 error = xfs_bmbt_update(bma->cur, new->br_startoff, 3060 new->br_startblock, 3061 new->br_blockcount + 3062 right.br_blockcount, 3063 right.br_state); 3064 if (error) 3065 goto done; 3066 } 3067 break; 3068 3069 case 0: 3070 /* 3071 * New allocation is not contiguous with another 3072 * real allocation. 3073 * Insert a new entry. 3074 */ 3075 xfs_iext_insert(bma->ip, bma->idx, 1, new, state); 3076 XFS_IFORK_NEXT_SET(bma->ip, whichfork, 3077 XFS_IFORK_NEXTENTS(bma->ip, whichfork) + 1); 3078 if (bma->cur == NULL) { 3079 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 3080 } else { 3081 rval = XFS_ILOG_CORE; 3082 error = xfs_bmbt_lookup_eq(bma->cur, 3083 new->br_startoff, 3084 new->br_startblock, 3085 new->br_blockcount, &i); 3086 if (error) 3087 goto done; 3088 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 3089 bma->cur->bc_rec.b.br_state = new->br_state; 3090 error = xfs_btree_insert(bma->cur, &i); 3091 if (error) 3092 goto done; 3093 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 3094 } 3095 break; 3096 } 3097 3098 /* add reverse mapping */ 3099 error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new); 3100 if (error) 3101 goto done; 3102 3103 /* convert to a btree if necessary */ 3104 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 3105 int tmp_logflags; /* partial log flag return val */ 3106 3107 ASSERT(bma->cur == NULL); 3108 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 3109 bma->firstblock, bma->dfops, &bma->cur, 3110 0, &tmp_logflags, whichfork); 3111 bma->logflags |= tmp_logflags; 3112 if (error) 3113 goto done; 3114 } 3115 3116 /* clear out the allocated field, done with it now in any case. */ 3117 if (bma->cur) 3118 bma->cur->bc_private.b.allocated = 0; 3119 3120 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); 3121 done: 3122 bma->logflags |= rval; 3123 return error; 3124 } 3125 3126 /* 3127 * Functions used in the extent read, allocate and remove paths 3128 */ 3129 3130 /* 3131 * Adjust the size of the new extent based on di_extsize and rt extsize. 3132 */ 3133 int 3134 xfs_bmap_extsize_align( 3135 xfs_mount_t *mp, 3136 xfs_bmbt_irec_t *gotp, /* next extent pointer */ 3137 xfs_bmbt_irec_t *prevp, /* previous extent pointer */ 3138 xfs_extlen_t extsz, /* align to this extent size */ 3139 int rt, /* is this a realtime inode? */ 3140 int eof, /* is extent at end-of-file? */ 3141 int delay, /* creating delalloc extent? */ 3142 int convert, /* overwriting unwritten extent? */ 3143 xfs_fileoff_t *offp, /* in/out: aligned offset */ 3144 xfs_extlen_t *lenp) /* in/out: aligned length */ 3145 { 3146 xfs_fileoff_t orig_off; /* original offset */ 3147 xfs_extlen_t orig_alen; /* original length */ 3148 xfs_fileoff_t orig_end; /* original off+len */ 3149 xfs_fileoff_t nexto; /* next file offset */ 3150 xfs_fileoff_t prevo; /* previous file offset */ 3151 xfs_fileoff_t align_off; /* temp for offset */ 3152 xfs_extlen_t align_alen; /* temp for length */ 3153 xfs_extlen_t temp; /* temp for calculations */ 3154 3155 if (convert) 3156 return 0; 3157 3158 orig_off = align_off = *offp; 3159 orig_alen = align_alen = *lenp; 3160 orig_end = orig_off + orig_alen; 3161 3162 /* 3163 * If this request overlaps an existing extent, then don't 3164 * attempt to perform any additional alignment. 3165 */ 3166 if (!delay && !eof && 3167 (orig_off >= gotp->br_startoff) && 3168 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) { 3169 return 0; 3170 } 3171 3172 /* 3173 * If the file offset is unaligned vs. the extent size 3174 * we need to align it. This will be possible unless 3175 * the file was previously written with a kernel that didn't 3176 * perform this alignment, or if a truncate shot us in the 3177 * foot. 3178 */ 3179 temp = do_mod(orig_off, extsz); 3180 if (temp) { 3181 align_alen += temp; 3182 align_off -= temp; 3183 } 3184 3185 /* Same adjustment for the end of the requested area. */ 3186 temp = (align_alen % extsz); 3187 if (temp) 3188 align_alen += extsz - temp; 3189 3190 /* 3191 * For large extent hint sizes, the aligned extent might be larger than 3192 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls 3193 * the length back under MAXEXTLEN. The outer allocation loops handle 3194 * short allocation just fine, so it is safe to do this. We only want to 3195 * do it when we are forced to, though, because it means more allocation 3196 * operations are required. 3197 */ 3198 while (align_alen > MAXEXTLEN) 3199 align_alen -= extsz; 3200 ASSERT(align_alen <= MAXEXTLEN); 3201 3202 /* 3203 * If the previous block overlaps with this proposed allocation 3204 * then move the start forward without adjusting the length. 3205 */ 3206 if (prevp->br_startoff != NULLFILEOFF) { 3207 if (prevp->br_startblock == HOLESTARTBLOCK) 3208 prevo = prevp->br_startoff; 3209 else 3210 prevo = prevp->br_startoff + prevp->br_blockcount; 3211 } else 3212 prevo = 0; 3213 if (align_off != orig_off && align_off < prevo) 3214 align_off = prevo; 3215 /* 3216 * If the next block overlaps with this proposed allocation 3217 * then move the start back without adjusting the length, 3218 * but not before offset 0. 3219 * This may of course make the start overlap previous block, 3220 * and if we hit the offset 0 limit then the next block 3221 * can still overlap too. 3222 */ 3223 if (!eof && gotp->br_startoff != NULLFILEOFF) { 3224 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) || 3225 (!delay && gotp->br_startblock == DELAYSTARTBLOCK)) 3226 nexto = gotp->br_startoff + gotp->br_blockcount; 3227 else 3228 nexto = gotp->br_startoff; 3229 } else 3230 nexto = NULLFILEOFF; 3231 if (!eof && 3232 align_off + align_alen != orig_end && 3233 align_off + align_alen > nexto) 3234 align_off = nexto > align_alen ? nexto - align_alen : 0; 3235 /* 3236 * If we're now overlapping the next or previous extent that 3237 * means we can't fit an extsz piece in this hole. Just move 3238 * the start forward to the first valid spot and set 3239 * the length so we hit the end. 3240 */ 3241 if (align_off != orig_off && align_off < prevo) 3242 align_off = prevo; 3243 if (align_off + align_alen != orig_end && 3244 align_off + align_alen > nexto && 3245 nexto != NULLFILEOFF) { 3246 ASSERT(nexto > prevo); 3247 align_alen = nexto - align_off; 3248 } 3249 3250 /* 3251 * If realtime, and the result isn't a multiple of the realtime 3252 * extent size we need to remove blocks until it is. 3253 */ 3254 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) { 3255 /* 3256 * We're not covering the original request, or 3257 * we won't be able to once we fix the length. 3258 */ 3259 if (orig_off < align_off || 3260 orig_end > align_off + align_alen || 3261 align_alen - temp < orig_alen) 3262 return -EINVAL; 3263 /* 3264 * Try to fix it by moving the start up. 3265 */ 3266 if (align_off + temp <= orig_off) { 3267 align_alen -= temp; 3268 align_off += temp; 3269 } 3270 /* 3271 * Try to fix it by moving the end in. 3272 */ 3273 else if (align_off + align_alen - temp >= orig_end) 3274 align_alen -= temp; 3275 /* 3276 * Set the start to the minimum then trim the length. 3277 */ 3278 else { 3279 align_alen -= orig_off - align_off; 3280 align_off = orig_off; 3281 align_alen -= align_alen % mp->m_sb.sb_rextsize; 3282 } 3283 /* 3284 * Result doesn't cover the request, fail it. 3285 */ 3286 if (orig_off < align_off || orig_end > align_off + align_alen) 3287 return -EINVAL; 3288 } else { 3289 ASSERT(orig_off >= align_off); 3290 /* see MAXEXTLEN handling above */ 3291 ASSERT(orig_end <= align_off + align_alen || 3292 align_alen + extsz > MAXEXTLEN); 3293 } 3294 3295 #ifdef DEBUG 3296 if (!eof && gotp->br_startoff != NULLFILEOFF) 3297 ASSERT(align_off + align_alen <= gotp->br_startoff); 3298 if (prevp->br_startoff != NULLFILEOFF) 3299 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount); 3300 #endif 3301 3302 *lenp = align_alen; 3303 *offp = align_off; 3304 return 0; 3305 } 3306 3307 #define XFS_ALLOC_GAP_UNITS 4 3308 3309 void 3310 xfs_bmap_adjacent( 3311 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3312 { 3313 xfs_fsblock_t adjust; /* adjustment to block numbers */ 3314 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ 3315 xfs_mount_t *mp; /* mount point structure */ 3316 int nullfb; /* true if ap->firstblock isn't set */ 3317 int rt; /* true if inode is realtime */ 3318 3319 #define ISVALID(x,y) \ 3320 (rt ? \ 3321 (x) < mp->m_sb.sb_rblocks : \ 3322 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \ 3323 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \ 3324 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks) 3325 3326 mp = ap->ip->i_mount; 3327 nullfb = *ap->firstblock == NULLFSBLOCK; 3328 rt = XFS_IS_REALTIME_INODE(ap->ip) && 3329 xfs_alloc_is_userdata(ap->datatype); 3330 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); 3331 /* 3332 * If allocating at eof, and there's a previous real block, 3333 * try to use its last block as our starting point. 3334 */ 3335 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF && 3336 !isnullstartblock(ap->prev.br_startblock) && 3337 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount, 3338 ap->prev.br_startblock)) { 3339 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount; 3340 /* 3341 * Adjust for the gap between prevp and us. 3342 */ 3343 adjust = ap->offset - 3344 (ap->prev.br_startoff + ap->prev.br_blockcount); 3345 if (adjust && 3346 ISVALID(ap->blkno + adjust, ap->prev.br_startblock)) 3347 ap->blkno += adjust; 3348 } 3349 /* 3350 * If not at eof, then compare the two neighbor blocks. 3351 * Figure out whether either one gives us a good starting point, 3352 * and pick the better one. 3353 */ 3354 else if (!ap->eof) { 3355 xfs_fsblock_t gotbno; /* right side block number */ 3356 xfs_fsblock_t gotdiff=0; /* right side difference */ 3357 xfs_fsblock_t prevbno; /* left side block number */ 3358 xfs_fsblock_t prevdiff=0; /* left side difference */ 3359 3360 /* 3361 * If there's a previous (left) block, select a requested 3362 * start block based on it. 3363 */ 3364 if (ap->prev.br_startoff != NULLFILEOFF && 3365 !isnullstartblock(ap->prev.br_startblock) && 3366 (prevbno = ap->prev.br_startblock + 3367 ap->prev.br_blockcount) && 3368 ISVALID(prevbno, ap->prev.br_startblock)) { 3369 /* 3370 * Calculate gap to end of previous block. 3371 */ 3372 adjust = prevdiff = ap->offset - 3373 (ap->prev.br_startoff + 3374 ap->prev.br_blockcount); 3375 /* 3376 * Figure the startblock based on the previous block's 3377 * end and the gap size. 3378 * Heuristic! 3379 * If the gap is large relative to the piece we're 3380 * allocating, or using it gives us an invalid block 3381 * number, then just use the end of the previous block. 3382 */ 3383 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3384 ISVALID(prevbno + prevdiff, 3385 ap->prev.br_startblock)) 3386 prevbno += adjust; 3387 else 3388 prevdiff += adjust; 3389 /* 3390 * If the firstblock forbids it, can't use it, 3391 * must use default. 3392 */ 3393 if (!rt && !nullfb && 3394 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno) 3395 prevbno = NULLFSBLOCK; 3396 } 3397 /* 3398 * No previous block or can't follow it, just default. 3399 */ 3400 else 3401 prevbno = NULLFSBLOCK; 3402 /* 3403 * If there's a following (right) block, select a requested 3404 * start block based on it. 3405 */ 3406 if (!isnullstartblock(ap->got.br_startblock)) { 3407 /* 3408 * Calculate gap to start of next block. 3409 */ 3410 adjust = gotdiff = ap->got.br_startoff - ap->offset; 3411 /* 3412 * Figure the startblock based on the next block's 3413 * start and the gap size. 3414 */ 3415 gotbno = ap->got.br_startblock; 3416 /* 3417 * Heuristic! 3418 * If the gap is large relative to the piece we're 3419 * allocating, or using it gives us an invalid block 3420 * number, then just use the start of the next block 3421 * offset by our length. 3422 */ 3423 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3424 ISVALID(gotbno - gotdiff, gotbno)) 3425 gotbno -= adjust; 3426 else if (ISVALID(gotbno - ap->length, gotbno)) { 3427 gotbno -= ap->length; 3428 gotdiff += adjust - ap->length; 3429 } else 3430 gotdiff += adjust; 3431 /* 3432 * If the firstblock forbids it, can't use it, 3433 * must use default. 3434 */ 3435 if (!rt && !nullfb && 3436 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno) 3437 gotbno = NULLFSBLOCK; 3438 } 3439 /* 3440 * No next block, just default. 3441 */ 3442 else 3443 gotbno = NULLFSBLOCK; 3444 /* 3445 * If both valid, pick the better one, else the only good 3446 * one, else ap->blkno is already set (to 0 or the inode block). 3447 */ 3448 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) 3449 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno; 3450 else if (prevbno != NULLFSBLOCK) 3451 ap->blkno = prevbno; 3452 else if (gotbno != NULLFSBLOCK) 3453 ap->blkno = gotbno; 3454 } 3455 #undef ISVALID 3456 } 3457 3458 static int 3459 xfs_bmap_longest_free_extent( 3460 struct xfs_trans *tp, 3461 xfs_agnumber_t ag, 3462 xfs_extlen_t *blen, 3463 int *notinit) 3464 { 3465 struct xfs_mount *mp = tp->t_mountp; 3466 struct xfs_perag *pag; 3467 xfs_extlen_t longest; 3468 int error = 0; 3469 3470 pag = xfs_perag_get(mp, ag); 3471 if (!pag->pagf_init) { 3472 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK); 3473 if (error) 3474 goto out; 3475 3476 if (!pag->pagf_init) { 3477 *notinit = 1; 3478 goto out; 3479 } 3480 } 3481 3482 longest = xfs_alloc_longest_free_extent(mp, pag, 3483 xfs_alloc_min_freelist(mp, pag), 3484 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE)); 3485 if (*blen < longest) 3486 *blen = longest; 3487 3488 out: 3489 xfs_perag_put(pag); 3490 return error; 3491 } 3492 3493 static void 3494 xfs_bmap_select_minlen( 3495 struct xfs_bmalloca *ap, 3496 struct xfs_alloc_arg *args, 3497 xfs_extlen_t *blen, 3498 int notinit) 3499 { 3500 if (notinit || *blen < ap->minlen) { 3501 /* 3502 * Since we did a BUF_TRYLOCK above, it is possible that 3503 * there is space for this request. 3504 */ 3505 args->minlen = ap->minlen; 3506 } else if (*blen < args->maxlen) { 3507 /* 3508 * If the best seen length is less than the request length, 3509 * use the best as the minimum. 3510 */ 3511 args->minlen = *blen; 3512 } else { 3513 /* 3514 * Otherwise we've seen an extent as big as maxlen, use that 3515 * as the minimum. 3516 */ 3517 args->minlen = args->maxlen; 3518 } 3519 } 3520 3521 STATIC int 3522 xfs_bmap_btalloc_nullfb( 3523 struct xfs_bmalloca *ap, 3524 struct xfs_alloc_arg *args, 3525 xfs_extlen_t *blen) 3526 { 3527 struct xfs_mount *mp = ap->ip->i_mount; 3528 xfs_agnumber_t ag, startag; 3529 int notinit = 0; 3530 int error; 3531 3532 args->type = XFS_ALLOCTYPE_START_BNO; 3533 args->total = ap->total; 3534 3535 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno); 3536 if (startag == NULLAGNUMBER) 3537 startag = ag = 0; 3538 3539 while (*blen < args->maxlen) { 3540 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, 3541 ¬init); 3542 if (error) 3543 return error; 3544 3545 if (++ag == mp->m_sb.sb_agcount) 3546 ag = 0; 3547 if (ag == startag) 3548 break; 3549 } 3550 3551 xfs_bmap_select_minlen(ap, args, blen, notinit); 3552 return 0; 3553 } 3554 3555 STATIC int 3556 xfs_bmap_btalloc_filestreams( 3557 struct xfs_bmalloca *ap, 3558 struct xfs_alloc_arg *args, 3559 xfs_extlen_t *blen) 3560 { 3561 struct xfs_mount *mp = ap->ip->i_mount; 3562 xfs_agnumber_t ag; 3563 int notinit = 0; 3564 int error; 3565 3566 args->type = XFS_ALLOCTYPE_NEAR_BNO; 3567 args->total = ap->total; 3568 3569 ag = XFS_FSB_TO_AGNO(mp, args->fsbno); 3570 if (ag == NULLAGNUMBER) 3571 ag = 0; 3572 3573 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init); 3574 if (error) 3575 return error; 3576 3577 if (*blen < args->maxlen) { 3578 error = xfs_filestream_new_ag(ap, &ag); 3579 if (error) 3580 return error; 3581 3582 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, 3583 ¬init); 3584 if (error) 3585 return error; 3586 3587 } 3588 3589 xfs_bmap_select_minlen(ap, args, blen, notinit); 3590 3591 /* 3592 * Set the failure fallback case to look in the selected AG as stream 3593 * may have moved. 3594 */ 3595 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0); 3596 return 0; 3597 } 3598 3599 STATIC int 3600 xfs_bmap_btalloc( 3601 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3602 { 3603 xfs_mount_t *mp; /* mount point structure */ 3604 xfs_alloctype_t atype = 0; /* type for allocation routines */ 3605 xfs_extlen_t align = 0; /* minimum allocation alignment */ 3606 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ 3607 xfs_agnumber_t ag; 3608 xfs_alloc_arg_t args; 3609 xfs_extlen_t blen; 3610 xfs_extlen_t nextminlen = 0; 3611 int nullfb; /* true if ap->firstblock isn't set */ 3612 int isaligned; 3613 int tryagain; 3614 int error; 3615 int stripe_align; 3616 3617 ASSERT(ap->length); 3618 3619 mp = ap->ip->i_mount; 3620 3621 /* stripe alignment for allocation is determined by mount parameters */ 3622 stripe_align = 0; 3623 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) 3624 stripe_align = mp->m_swidth; 3625 else if (mp->m_dalign) 3626 stripe_align = mp->m_dalign; 3627 3628 if (ap->flags & XFS_BMAPI_COWFORK) 3629 align = xfs_get_cowextsz_hint(ap->ip); 3630 else if (xfs_alloc_is_userdata(ap->datatype)) 3631 align = xfs_get_extsz_hint(ap->ip); 3632 if (unlikely(align)) { 3633 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, 3634 align, 0, ap->eof, 0, ap->conv, 3635 &ap->offset, &ap->length); 3636 ASSERT(!error); 3637 ASSERT(ap->length); 3638 } 3639 3640 3641 nullfb = *ap->firstblock == NULLFSBLOCK; 3642 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); 3643 if (nullfb) { 3644 if (xfs_alloc_is_userdata(ap->datatype) && 3645 xfs_inode_is_filestream(ap->ip)) { 3646 ag = xfs_filestream_lookup_ag(ap->ip); 3647 ag = (ag != NULLAGNUMBER) ? ag : 0; 3648 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0); 3649 } else { 3650 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino); 3651 } 3652 } else 3653 ap->blkno = *ap->firstblock; 3654 3655 xfs_bmap_adjacent(ap); 3656 3657 /* 3658 * If allowed, use ap->blkno; otherwise must use firstblock since 3659 * it's in the right allocation group. 3660 */ 3661 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno) 3662 ; 3663 else 3664 ap->blkno = *ap->firstblock; 3665 /* 3666 * Normal allocation, done through xfs_alloc_vextent. 3667 */ 3668 tryagain = isaligned = 0; 3669 memset(&args, 0, sizeof(args)); 3670 args.tp = ap->tp; 3671 args.mp = mp; 3672 args.fsbno = ap->blkno; 3673 xfs_rmap_skip_owner_update(&args.oinfo); 3674 3675 /* Trim the allocation back to the maximum an AG can fit. */ 3676 args.maxlen = MIN(ap->length, mp->m_ag_max_usable); 3677 args.firstblock = *ap->firstblock; 3678 blen = 0; 3679 if (nullfb) { 3680 /* 3681 * Search for an allocation group with a single extent large 3682 * enough for the request. If one isn't found, then adjust 3683 * the minimum allocation size to the largest space found. 3684 */ 3685 if (xfs_alloc_is_userdata(ap->datatype) && 3686 xfs_inode_is_filestream(ap->ip)) 3687 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen); 3688 else 3689 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen); 3690 if (error) 3691 return error; 3692 } else if (ap->dfops->dop_low) { 3693 if (xfs_inode_is_filestream(ap->ip)) 3694 args.type = XFS_ALLOCTYPE_FIRST_AG; 3695 else 3696 args.type = XFS_ALLOCTYPE_START_BNO; 3697 args.total = args.minlen = ap->minlen; 3698 } else { 3699 args.type = XFS_ALLOCTYPE_NEAR_BNO; 3700 args.total = ap->total; 3701 args.minlen = ap->minlen; 3702 } 3703 /* apply extent size hints if obtained earlier */ 3704 if (unlikely(align)) { 3705 args.prod = align; 3706 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod))) 3707 args.mod = (xfs_extlen_t)(args.prod - args.mod); 3708 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) { 3709 args.prod = 1; 3710 args.mod = 0; 3711 } else { 3712 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog; 3713 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod)))) 3714 args.mod = (xfs_extlen_t)(args.prod - args.mod); 3715 } 3716 /* 3717 * If we are not low on available data blocks, and the 3718 * underlying logical volume manager is a stripe, and 3719 * the file offset is zero then try to allocate data 3720 * blocks on stripe unit boundary. 3721 * NOTE: ap->aeof is only set if the allocation length 3722 * is >= the stripe unit and the allocation offset is 3723 * at the end of file. 3724 */ 3725 if (!ap->dfops->dop_low && ap->aeof) { 3726 if (!ap->offset) { 3727 args.alignment = stripe_align; 3728 atype = args.type; 3729 isaligned = 1; 3730 /* 3731 * Adjust for alignment 3732 */ 3733 if (blen > args.alignment && blen <= args.maxlen) 3734 args.minlen = blen - args.alignment; 3735 args.minalignslop = 0; 3736 } else { 3737 /* 3738 * First try an exact bno allocation. 3739 * If it fails then do a near or start bno 3740 * allocation with alignment turned on. 3741 */ 3742 atype = args.type; 3743 tryagain = 1; 3744 args.type = XFS_ALLOCTYPE_THIS_BNO; 3745 args.alignment = 1; 3746 /* 3747 * Compute the minlen+alignment for the 3748 * next case. Set slop so that the value 3749 * of minlen+alignment+slop doesn't go up 3750 * between the calls. 3751 */ 3752 if (blen > stripe_align && blen <= args.maxlen) 3753 nextminlen = blen - stripe_align; 3754 else 3755 nextminlen = args.minlen; 3756 if (nextminlen + stripe_align > args.minlen + 1) 3757 args.minalignslop = 3758 nextminlen + stripe_align - 3759 args.minlen - 1; 3760 else 3761 args.minalignslop = 0; 3762 } 3763 } else { 3764 args.alignment = 1; 3765 args.minalignslop = 0; 3766 } 3767 args.minleft = ap->minleft; 3768 args.wasdel = ap->wasdel; 3769 args.resv = XFS_AG_RESV_NONE; 3770 args.datatype = ap->datatype; 3771 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) 3772 args.ip = ap->ip; 3773 3774 error = xfs_alloc_vextent(&args); 3775 if (error) 3776 return error; 3777 3778 if (tryagain && args.fsbno == NULLFSBLOCK) { 3779 /* 3780 * Exact allocation failed. Now try with alignment 3781 * turned on. 3782 */ 3783 args.type = atype; 3784 args.fsbno = ap->blkno; 3785 args.alignment = stripe_align; 3786 args.minlen = nextminlen; 3787 args.minalignslop = 0; 3788 isaligned = 1; 3789 if ((error = xfs_alloc_vextent(&args))) 3790 return error; 3791 } 3792 if (isaligned && args.fsbno == NULLFSBLOCK) { 3793 /* 3794 * allocation failed, so turn off alignment and 3795 * try again. 3796 */ 3797 args.type = atype; 3798 args.fsbno = ap->blkno; 3799 args.alignment = 0; 3800 if ((error = xfs_alloc_vextent(&args))) 3801 return error; 3802 } 3803 if (args.fsbno == NULLFSBLOCK && nullfb && 3804 args.minlen > ap->minlen) { 3805 args.minlen = ap->minlen; 3806 args.type = XFS_ALLOCTYPE_START_BNO; 3807 args.fsbno = ap->blkno; 3808 if ((error = xfs_alloc_vextent(&args))) 3809 return error; 3810 } 3811 if (args.fsbno == NULLFSBLOCK && nullfb) { 3812 args.fsbno = 0; 3813 args.type = XFS_ALLOCTYPE_FIRST_AG; 3814 args.total = ap->minlen; 3815 args.minleft = 0; 3816 if ((error = xfs_alloc_vextent(&args))) 3817 return error; 3818 ap->dfops->dop_low = true; 3819 } 3820 if (args.fsbno != NULLFSBLOCK) { 3821 /* 3822 * check the allocation happened at the same or higher AG than 3823 * the first block that was allocated. 3824 */ 3825 ASSERT(*ap->firstblock == NULLFSBLOCK || 3826 XFS_FSB_TO_AGNO(mp, *ap->firstblock) == 3827 XFS_FSB_TO_AGNO(mp, args.fsbno) || 3828 (ap->dfops->dop_low && 3829 XFS_FSB_TO_AGNO(mp, *ap->firstblock) < 3830 XFS_FSB_TO_AGNO(mp, args.fsbno))); 3831 3832 ap->blkno = args.fsbno; 3833 if (*ap->firstblock == NULLFSBLOCK) 3834 *ap->firstblock = args.fsbno; 3835 ASSERT(nullfb || fb_agno == args.agno || 3836 (ap->dfops->dop_low && fb_agno < args.agno)); 3837 ap->length = args.len; 3838 if (!(ap->flags & XFS_BMAPI_COWFORK)) 3839 ap->ip->i_d.di_nblocks += args.len; 3840 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); 3841 if (ap->wasdel) 3842 ap->ip->i_delayed_blks -= args.len; 3843 /* 3844 * Adjust the disk quota also. This was reserved 3845 * earlier. 3846 */ 3847 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, 3848 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : 3849 XFS_TRANS_DQ_BCOUNT, 3850 (long) args.len); 3851 } else { 3852 ap->blkno = NULLFSBLOCK; 3853 ap->length = 0; 3854 } 3855 return 0; 3856 } 3857 3858 /* 3859 * For a remap operation, just "allocate" an extent at the address that the 3860 * caller passed in, and ensure that the AGFL is the right size. The caller 3861 * will then map the "allocated" extent into the file somewhere. 3862 */ 3863 STATIC int 3864 xfs_bmap_remap_alloc( 3865 struct xfs_bmalloca *ap) 3866 { 3867 struct xfs_trans *tp = ap->tp; 3868 struct xfs_mount *mp = tp->t_mountp; 3869 xfs_agblock_t bno; 3870 struct xfs_alloc_arg args; 3871 int error; 3872 3873 /* 3874 * validate that the block number is legal - the enables us to detect 3875 * and handle a silent filesystem corruption rather than crashing. 3876 */ 3877 memset(&args, 0, sizeof(struct xfs_alloc_arg)); 3878 args.tp = ap->tp; 3879 args.mp = ap->tp->t_mountp; 3880 bno = *ap->firstblock; 3881 args.agno = XFS_FSB_TO_AGNO(mp, bno); 3882 args.agbno = XFS_FSB_TO_AGBNO(mp, bno); 3883 if (args.agno >= mp->m_sb.sb_agcount || 3884 args.agbno >= mp->m_sb.sb_agblocks) 3885 return -EFSCORRUPTED; 3886 3887 /* "Allocate" the extent from the range we passed in. */ 3888 trace_xfs_bmap_remap_alloc(ap->ip, *ap->firstblock, ap->length); 3889 ap->blkno = bno; 3890 ap->ip->i_d.di_nblocks += ap->length; 3891 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); 3892 3893 /* Fix the freelist, like a real allocator does. */ 3894 args.datatype = ap->datatype; 3895 args.pag = xfs_perag_get(args.mp, args.agno); 3896 ASSERT(args.pag); 3897 3898 /* 3899 * The freelist fixing code will decline the allocation if 3900 * the size and shape of the free space doesn't allow for 3901 * allocating the extent and updating all the metadata that 3902 * happens during an allocation. We're remapping, not 3903 * allocating, so skip that check by pretending to be freeing. 3904 */ 3905 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING); 3906 xfs_perag_put(args.pag); 3907 if (error) 3908 trace_xfs_bmap_remap_alloc_error(ap->ip, error, _RET_IP_); 3909 return error; 3910 } 3911 3912 /* 3913 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. 3914 * It figures out where to ask the underlying allocator to put the new extent. 3915 */ 3916 STATIC int 3917 xfs_bmap_alloc( 3918 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3919 { 3920 if (ap->flags & XFS_BMAPI_REMAP) 3921 return xfs_bmap_remap_alloc(ap); 3922 if (XFS_IS_REALTIME_INODE(ap->ip) && 3923 xfs_alloc_is_userdata(ap->datatype)) 3924 return xfs_bmap_rtalloc(ap); 3925 return xfs_bmap_btalloc(ap); 3926 } 3927 3928 /* Trim extent to fit a logical block range. */ 3929 void 3930 xfs_trim_extent( 3931 struct xfs_bmbt_irec *irec, 3932 xfs_fileoff_t bno, 3933 xfs_filblks_t len) 3934 { 3935 xfs_fileoff_t distance; 3936 xfs_fileoff_t end = bno + len; 3937 3938 if (irec->br_startoff + irec->br_blockcount <= bno || 3939 irec->br_startoff >= end) { 3940 irec->br_blockcount = 0; 3941 return; 3942 } 3943 3944 if (irec->br_startoff < bno) { 3945 distance = bno - irec->br_startoff; 3946 if (isnullstartblock(irec->br_startblock)) 3947 irec->br_startblock = DELAYSTARTBLOCK; 3948 if (irec->br_startblock != DELAYSTARTBLOCK && 3949 irec->br_startblock != HOLESTARTBLOCK) 3950 irec->br_startblock += distance; 3951 irec->br_startoff += distance; 3952 irec->br_blockcount -= distance; 3953 } 3954 3955 if (end < irec->br_startoff + irec->br_blockcount) { 3956 distance = irec->br_startoff + irec->br_blockcount - end; 3957 irec->br_blockcount -= distance; 3958 } 3959 } 3960 3961 /* 3962 * Trim the returned map to the required bounds 3963 */ 3964 STATIC void 3965 xfs_bmapi_trim_map( 3966 struct xfs_bmbt_irec *mval, 3967 struct xfs_bmbt_irec *got, 3968 xfs_fileoff_t *bno, 3969 xfs_filblks_t len, 3970 xfs_fileoff_t obno, 3971 xfs_fileoff_t end, 3972 int n, 3973 int flags) 3974 { 3975 if ((flags & XFS_BMAPI_ENTIRE) || 3976 got->br_startoff + got->br_blockcount <= obno) { 3977 *mval = *got; 3978 if (isnullstartblock(got->br_startblock)) 3979 mval->br_startblock = DELAYSTARTBLOCK; 3980 return; 3981 } 3982 3983 if (obno > *bno) 3984 *bno = obno; 3985 ASSERT((*bno >= obno) || (n == 0)); 3986 ASSERT(*bno < end); 3987 mval->br_startoff = *bno; 3988 if (isnullstartblock(got->br_startblock)) 3989 mval->br_startblock = DELAYSTARTBLOCK; 3990 else 3991 mval->br_startblock = got->br_startblock + 3992 (*bno - got->br_startoff); 3993 /* 3994 * Return the minimum of what we got and what we asked for for 3995 * the length. We can use the len variable here because it is 3996 * modified below and we could have been there before coming 3997 * here if the first part of the allocation didn't overlap what 3998 * was asked for. 3999 */ 4000 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno, 4001 got->br_blockcount - (*bno - got->br_startoff)); 4002 mval->br_state = got->br_state; 4003 ASSERT(mval->br_blockcount <= len); 4004 return; 4005 } 4006 4007 /* 4008 * Update and validate the extent map to return 4009 */ 4010 STATIC void 4011 xfs_bmapi_update_map( 4012 struct xfs_bmbt_irec **map, 4013 xfs_fileoff_t *bno, 4014 xfs_filblks_t *len, 4015 xfs_fileoff_t obno, 4016 xfs_fileoff_t end, 4017 int *n, 4018 int flags) 4019 { 4020 xfs_bmbt_irec_t *mval = *map; 4021 4022 ASSERT((flags & XFS_BMAPI_ENTIRE) || 4023 ((mval->br_startoff + mval->br_blockcount) <= end)); 4024 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) || 4025 (mval->br_startoff < obno)); 4026 4027 *bno = mval->br_startoff + mval->br_blockcount; 4028 *len = end - *bno; 4029 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) { 4030 /* update previous map with new information */ 4031 ASSERT(mval->br_startblock == mval[-1].br_startblock); 4032 ASSERT(mval->br_blockcount > mval[-1].br_blockcount); 4033 ASSERT(mval->br_state == mval[-1].br_state); 4034 mval[-1].br_blockcount = mval->br_blockcount; 4035 mval[-1].br_state = mval->br_state; 4036 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK && 4037 mval[-1].br_startblock != DELAYSTARTBLOCK && 4038 mval[-1].br_startblock != HOLESTARTBLOCK && 4039 mval->br_startblock == mval[-1].br_startblock + 4040 mval[-1].br_blockcount && 4041 ((flags & XFS_BMAPI_IGSTATE) || 4042 mval[-1].br_state == mval->br_state)) { 4043 ASSERT(mval->br_startoff == 4044 mval[-1].br_startoff + mval[-1].br_blockcount); 4045 mval[-1].br_blockcount += mval->br_blockcount; 4046 } else if (*n > 0 && 4047 mval->br_startblock == DELAYSTARTBLOCK && 4048 mval[-1].br_startblock == DELAYSTARTBLOCK && 4049 mval->br_startoff == 4050 mval[-1].br_startoff + mval[-1].br_blockcount) { 4051 mval[-1].br_blockcount += mval->br_blockcount; 4052 mval[-1].br_state = mval->br_state; 4053 } else if (!((*n == 0) && 4054 ((mval->br_startoff + mval->br_blockcount) <= 4055 obno))) { 4056 mval++; 4057 (*n)++; 4058 } 4059 *map = mval; 4060 } 4061 4062 /* 4063 * Map file blocks to filesystem blocks without allocation. 4064 */ 4065 int 4066 xfs_bmapi_read( 4067 struct xfs_inode *ip, 4068 xfs_fileoff_t bno, 4069 xfs_filblks_t len, 4070 struct xfs_bmbt_irec *mval, 4071 int *nmap, 4072 int flags) 4073 { 4074 struct xfs_mount *mp = ip->i_mount; 4075 struct xfs_ifork *ifp; 4076 struct xfs_bmbt_irec got; 4077 xfs_fileoff_t obno; 4078 xfs_fileoff_t end; 4079 xfs_extnum_t idx; 4080 int error; 4081 bool eof = false; 4082 int n = 0; 4083 int whichfork = xfs_bmapi_whichfork(flags); 4084 4085 ASSERT(*nmap >= 1); 4086 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE| 4087 XFS_BMAPI_IGSTATE|XFS_BMAPI_COWFORK))); 4088 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)); 4089 4090 if (unlikely(XFS_TEST_ERROR( 4091 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 4092 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 4093 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { 4094 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp); 4095 return -EFSCORRUPTED; 4096 } 4097 4098 if (XFS_FORCED_SHUTDOWN(mp)) 4099 return -EIO; 4100 4101 XFS_STATS_INC(mp, xs_blk_mapr); 4102 4103 ifp = XFS_IFORK_PTR(ip, whichfork); 4104 4105 /* No CoW fork? Return a hole. */ 4106 if (whichfork == XFS_COW_FORK && !ifp) { 4107 mval->br_startoff = bno; 4108 mval->br_startblock = HOLESTARTBLOCK; 4109 mval->br_blockcount = len; 4110 mval->br_state = XFS_EXT_NORM; 4111 *nmap = 1; 4112 return 0; 4113 } 4114 4115 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4116 error = xfs_iread_extents(NULL, ip, whichfork); 4117 if (error) 4118 return error; 4119 } 4120 4121 if (!xfs_iext_lookup_extent(ip, ifp, bno, &idx, &got)) 4122 eof = true; 4123 end = bno + len; 4124 obno = bno; 4125 4126 while (bno < end && n < *nmap) { 4127 /* Reading past eof, act as though there's a hole up to end. */ 4128 if (eof) 4129 got.br_startoff = end; 4130 if (got.br_startoff > bno) { 4131 /* Reading in a hole. */ 4132 mval->br_startoff = bno; 4133 mval->br_startblock = HOLESTARTBLOCK; 4134 mval->br_blockcount = 4135 XFS_FILBLKS_MIN(len, got.br_startoff - bno); 4136 mval->br_state = XFS_EXT_NORM; 4137 bno += mval->br_blockcount; 4138 len -= mval->br_blockcount; 4139 mval++; 4140 n++; 4141 continue; 4142 } 4143 4144 /* set up the extent map to return. */ 4145 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags); 4146 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 4147 4148 /* If we're done, stop now. */ 4149 if (bno >= end || n >= *nmap) 4150 break; 4151 4152 /* Else go on to the next record. */ 4153 if (!xfs_iext_get_extent(ifp, ++idx, &got)) 4154 eof = true; 4155 } 4156 *nmap = n; 4157 return 0; 4158 } 4159 4160 int 4161 xfs_bmapi_reserve_delalloc( 4162 struct xfs_inode *ip, 4163 int whichfork, 4164 xfs_fileoff_t off, 4165 xfs_filblks_t len, 4166 xfs_filblks_t prealloc, 4167 struct xfs_bmbt_irec *got, 4168 xfs_extnum_t *lastx, 4169 int eof) 4170 { 4171 struct xfs_mount *mp = ip->i_mount; 4172 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 4173 xfs_extlen_t alen; 4174 xfs_extlen_t indlen; 4175 char rt = XFS_IS_REALTIME_INODE(ip); 4176 xfs_extlen_t extsz; 4177 int error; 4178 xfs_fileoff_t aoff = off; 4179 4180 /* 4181 * Cap the alloc length. Keep track of prealloc so we know whether to 4182 * tag the inode before we return. 4183 */ 4184 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN); 4185 if (!eof) 4186 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff); 4187 if (prealloc && alen >= len) 4188 prealloc = alen - len; 4189 4190 /* Figure out the extent size, adjust alen */ 4191 if (whichfork == XFS_COW_FORK) 4192 extsz = xfs_get_cowextsz_hint(ip); 4193 else 4194 extsz = xfs_get_extsz_hint(ip); 4195 if (extsz) { 4196 struct xfs_bmbt_irec prev; 4197 4198 if (!xfs_iext_get_extent(ifp, *lastx - 1, &prev)) 4199 prev.br_startoff = NULLFILEOFF; 4200 4201 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, rt, eof, 4202 1, 0, &aoff, &alen); 4203 ASSERT(!error); 4204 } 4205 4206 if (rt) 4207 extsz = alen / mp->m_sb.sb_rextsize; 4208 4209 /* 4210 * Make a transaction-less quota reservation for delayed allocation 4211 * blocks. This number gets adjusted later. We return if we haven't 4212 * allocated blocks already inside this loop. 4213 */ 4214 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0, 4215 rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); 4216 if (error) 4217 return error; 4218 4219 /* 4220 * Split changing sb for alen and indlen since they could be coming 4221 * from different places. 4222 */ 4223 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); 4224 ASSERT(indlen > 0); 4225 4226 if (rt) { 4227 error = xfs_mod_frextents(mp, -((int64_t)extsz)); 4228 } else { 4229 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false); 4230 } 4231 4232 if (error) 4233 goto out_unreserve_quota; 4234 4235 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false); 4236 if (error) 4237 goto out_unreserve_blocks; 4238 4239 4240 ip->i_delayed_blks += alen; 4241 4242 got->br_startoff = aoff; 4243 got->br_startblock = nullstartblock(indlen); 4244 got->br_blockcount = alen; 4245 got->br_state = XFS_EXT_NORM; 4246 xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got); 4247 4248 /* 4249 * Update our extent pointer, given that xfs_bmap_add_extent_hole_delay 4250 * might have merged it into one of the neighbouring ones. 4251 */ 4252 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got); 4253 4254 /* 4255 * Tag the inode if blocks were preallocated. Note that COW fork 4256 * preallocation can occur at the start or end of the extent, even when 4257 * prealloc == 0, so we must also check the aligned offset and length. 4258 */ 4259 if (whichfork == XFS_DATA_FORK && prealloc) 4260 xfs_inode_set_eofblocks_tag(ip); 4261 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len)) 4262 xfs_inode_set_cowblocks_tag(ip); 4263 4264 ASSERT(got->br_startoff <= aoff); 4265 ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen); 4266 ASSERT(isnullstartblock(got->br_startblock)); 4267 ASSERT(got->br_state == XFS_EXT_NORM); 4268 return 0; 4269 4270 out_unreserve_blocks: 4271 if (rt) 4272 xfs_mod_frextents(mp, extsz); 4273 else 4274 xfs_mod_fdblocks(mp, alen, false); 4275 out_unreserve_quota: 4276 if (XFS_IS_QUOTA_ON(mp)) 4277 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ? 4278 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); 4279 return error; 4280 } 4281 4282 static int 4283 xfs_bmapi_allocate( 4284 struct xfs_bmalloca *bma) 4285 { 4286 struct xfs_mount *mp = bma->ip->i_mount; 4287 int whichfork = xfs_bmapi_whichfork(bma->flags); 4288 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); 4289 int tmp_logflags = 0; 4290 int error; 4291 4292 ASSERT(bma->length > 0); 4293 4294 /* 4295 * For the wasdelay case, we could also just allocate the stuff asked 4296 * for in this bmap call but that wouldn't be as good. 4297 */ 4298 if (bma->wasdel) { 4299 bma->length = (xfs_extlen_t)bma->got.br_blockcount; 4300 bma->offset = bma->got.br_startoff; 4301 if (bma->idx) { 4302 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), 4303 &bma->prev); 4304 } 4305 } else { 4306 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN); 4307 if (!bma->eof) 4308 bma->length = XFS_FILBLKS_MIN(bma->length, 4309 bma->got.br_startoff - bma->offset); 4310 } 4311 4312 /* 4313 * Set the data type being allocated. For the data fork, the first data 4314 * in the file is treated differently to all other allocations. For the 4315 * attribute fork, we only need to ensure the allocated range is not on 4316 * the busy list. 4317 */ 4318 if (!(bma->flags & XFS_BMAPI_METADATA)) { 4319 bma->datatype = XFS_ALLOC_NOBUSY; 4320 if (whichfork == XFS_DATA_FORK) { 4321 if (bma->offset == 0) 4322 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA; 4323 else 4324 bma->datatype |= XFS_ALLOC_USERDATA; 4325 } 4326 if (bma->flags & XFS_BMAPI_ZERO) 4327 bma->datatype |= XFS_ALLOC_USERDATA_ZERO; 4328 } 4329 4330 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1; 4331 4332 /* 4333 * Only want to do the alignment at the eof if it is userdata and 4334 * allocation length is larger than a stripe unit. 4335 */ 4336 if (mp->m_dalign && bma->length >= mp->m_dalign && 4337 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) { 4338 error = xfs_bmap_isaeof(bma, whichfork); 4339 if (error) 4340 return error; 4341 } 4342 4343 error = xfs_bmap_alloc(bma); 4344 if (error) 4345 return error; 4346 4347 if (bma->dfops->dop_low) 4348 bma->minleft = 0; 4349 if (bma->cur) 4350 bma->cur->bc_private.b.firstblock = *bma->firstblock; 4351 if (bma->blkno == NULLFSBLOCK) 4352 return 0; 4353 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) { 4354 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork); 4355 bma->cur->bc_private.b.firstblock = *bma->firstblock; 4356 bma->cur->bc_private.b.dfops = bma->dfops; 4357 } 4358 /* 4359 * Bump the number of extents we've allocated 4360 * in this call. 4361 */ 4362 bma->nallocs++; 4363 4364 if (bma->cur) 4365 bma->cur->bc_private.b.flags = 4366 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; 4367 4368 bma->got.br_startoff = bma->offset; 4369 bma->got.br_startblock = bma->blkno; 4370 bma->got.br_blockcount = bma->length; 4371 bma->got.br_state = XFS_EXT_NORM; 4372 4373 /* 4374 * A wasdelay extent has been initialized, so shouldn't be flagged 4375 * as unwritten. 4376 */ 4377 if (!bma->wasdel && (bma->flags & XFS_BMAPI_PREALLOC) && 4378 xfs_sb_version_hasextflgbit(&mp->m_sb)) 4379 bma->got.br_state = XFS_EXT_UNWRITTEN; 4380 4381 if (bma->wasdel) 4382 error = xfs_bmap_add_extent_delay_real(bma, whichfork); 4383 else 4384 error = xfs_bmap_add_extent_hole_real(bma, whichfork); 4385 4386 bma->logflags |= tmp_logflags; 4387 if (error) 4388 return error; 4389 4390 /* 4391 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real 4392 * or xfs_bmap_add_extent_hole_real might have merged it into one of 4393 * the neighbouring ones. 4394 */ 4395 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got); 4396 4397 ASSERT(bma->got.br_startoff <= bma->offset); 4398 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >= 4399 bma->offset + bma->length); 4400 ASSERT(bma->got.br_state == XFS_EXT_NORM || 4401 bma->got.br_state == XFS_EXT_UNWRITTEN); 4402 return 0; 4403 } 4404 4405 STATIC int 4406 xfs_bmapi_convert_unwritten( 4407 struct xfs_bmalloca *bma, 4408 struct xfs_bmbt_irec *mval, 4409 xfs_filblks_t len, 4410 int flags) 4411 { 4412 int whichfork = xfs_bmapi_whichfork(flags); 4413 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); 4414 int tmp_logflags = 0; 4415 int error; 4416 4417 /* check if we need to do unwritten->real conversion */ 4418 if (mval->br_state == XFS_EXT_UNWRITTEN && 4419 (flags & XFS_BMAPI_PREALLOC)) 4420 return 0; 4421 4422 /* check if we need to do real->unwritten conversion */ 4423 if (mval->br_state == XFS_EXT_NORM && 4424 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) != 4425 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) 4426 return 0; 4427 4428 ASSERT(whichfork != XFS_COW_FORK); 4429 4430 /* 4431 * Modify (by adding) the state flag, if writing. 4432 */ 4433 ASSERT(mval->br_blockcount <= len); 4434 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) { 4435 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp, 4436 bma->ip, whichfork); 4437 bma->cur->bc_private.b.firstblock = *bma->firstblock; 4438 bma->cur->bc_private.b.dfops = bma->dfops; 4439 } 4440 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) 4441 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN; 4442 4443 /* 4444 * Before insertion into the bmbt, zero the range being converted 4445 * if required. 4446 */ 4447 if (flags & XFS_BMAPI_ZERO) { 4448 error = xfs_zero_extent(bma->ip, mval->br_startblock, 4449 mval->br_blockcount); 4450 if (error) 4451 return error; 4452 } 4453 4454 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, &bma->idx, 4455 &bma->cur, mval, bma->firstblock, bma->dfops, 4456 &tmp_logflags); 4457 /* 4458 * Log the inode core unconditionally in the unwritten extent conversion 4459 * path because the conversion might not have done so (e.g., if the 4460 * extent count hasn't changed). We need to make sure the inode is dirty 4461 * in the transaction for the sake of fsync(), even if nothing has 4462 * changed, because fsync() will not force the log for this transaction 4463 * unless it sees the inode pinned. 4464 */ 4465 bma->logflags |= tmp_logflags | XFS_ILOG_CORE; 4466 if (error) 4467 return error; 4468 4469 /* 4470 * Update our extent pointer, given that 4471 * xfs_bmap_add_extent_unwritten_real might have merged it into one 4472 * of the neighbouring ones. 4473 */ 4474 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got); 4475 4476 /* 4477 * We may have combined previously unwritten space with written space, 4478 * so generate another request. 4479 */ 4480 if (mval->br_blockcount < len) 4481 return -EAGAIN; 4482 return 0; 4483 } 4484 4485 /* 4486 * Map file blocks to filesystem blocks, and allocate blocks or convert the 4487 * extent state if necessary. Details behaviour is controlled by the flags 4488 * parameter. Only allocates blocks from a single allocation group, to avoid 4489 * locking problems. 4490 * 4491 * The returned value in "firstblock" from the first call in a transaction 4492 * must be remembered and presented to subsequent calls in "firstblock". 4493 * An upper bound for the number of blocks to be allocated is supplied to 4494 * the first call in "total"; if no allocation group has that many free 4495 * blocks then the call will fail (return NULLFSBLOCK in "firstblock"). 4496 */ 4497 int 4498 xfs_bmapi_write( 4499 struct xfs_trans *tp, /* transaction pointer */ 4500 struct xfs_inode *ip, /* incore inode */ 4501 xfs_fileoff_t bno, /* starting file offs. mapped */ 4502 xfs_filblks_t len, /* length to map in file */ 4503 int flags, /* XFS_BMAPI_... */ 4504 xfs_fsblock_t *firstblock, /* first allocated block 4505 controls a.g. for allocs */ 4506 xfs_extlen_t total, /* total blocks needed */ 4507 struct xfs_bmbt_irec *mval, /* output: map values */ 4508 int *nmap, /* i/o: mval size/count */ 4509 struct xfs_defer_ops *dfops) /* i/o: list extents to free */ 4510 { 4511 struct xfs_mount *mp = ip->i_mount; 4512 struct xfs_ifork *ifp; 4513 struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */ 4514 xfs_fileoff_t end; /* end of mapped file region */ 4515 bool eof = false; /* after the end of extents */ 4516 int error; /* error return */ 4517 int n; /* current extent index */ 4518 xfs_fileoff_t obno; /* old block number (offset) */ 4519 int whichfork; /* data or attr fork */ 4520 char inhole; /* current location is hole in file */ 4521 char wasdelay; /* old extent was delayed */ 4522 4523 #ifdef DEBUG 4524 xfs_fileoff_t orig_bno; /* original block number value */ 4525 int orig_flags; /* original flags arg value */ 4526 xfs_filblks_t orig_len; /* original value of len arg */ 4527 struct xfs_bmbt_irec *orig_mval; /* original value of mval */ 4528 int orig_nmap; /* original value of *nmap */ 4529 4530 orig_bno = bno; 4531 orig_len = len; 4532 orig_flags = flags; 4533 orig_mval = mval; 4534 orig_nmap = *nmap; 4535 #endif 4536 whichfork = xfs_bmapi_whichfork(flags); 4537 4538 ASSERT(*nmap >= 1); 4539 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); 4540 ASSERT(!(flags & XFS_BMAPI_IGSTATE)); 4541 ASSERT(tp != NULL); 4542 ASSERT(len > 0); 4543 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL); 4544 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4545 ASSERT(!(flags & XFS_BMAPI_REMAP) || whichfork == XFS_DATA_FORK); 4546 ASSERT(!(flags & XFS_BMAPI_PREALLOC) || !(flags & XFS_BMAPI_REMAP)); 4547 ASSERT(!(flags & XFS_BMAPI_CONVERT) || !(flags & XFS_BMAPI_REMAP)); 4548 ASSERT(!(flags & XFS_BMAPI_PREALLOC) || whichfork != XFS_COW_FORK); 4549 ASSERT(!(flags & XFS_BMAPI_CONVERT) || whichfork != XFS_COW_FORK); 4550 4551 /* zeroing is for currently only for data extents, not metadata */ 4552 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) != 4553 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)); 4554 /* 4555 * we can allocate unwritten extents or pre-zero allocated blocks, 4556 * but it makes no sense to do both at once. This would result in 4557 * zeroing the unwritten extent twice, but it still being an 4558 * unwritten extent.... 4559 */ 4560 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) != 4561 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)); 4562 4563 if (unlikely(XFS_TEST_ERROR( 4564 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 4565 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 4566 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { 4567 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp); 4568 return -EFSCORRUPTED; 4569 } 4570 4571 if (XFS_FORCED_SHUTDOWN(mp)) 4572 return -EIO; 4573 4574 ifp = XFS_IFORK_PTR(ip, whichfork); 4575 4576 XFS_STATS_INC(mp, xs_blk_mapw); 4577 4578 if (*firstblock == NULLFSBLOCK) { 4579 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE) 4580 bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1; 4581 else 4582 bma.minleft = 1; 4583 } else { 4584 bma.minleft = 0; 4585 } 4586 4587 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4588 error = xfs_iread_extents(tp, ip, whichfork); 4589 if (error) 4590 goto error0; 4591 } 4592 4593 n = 0; 4594 end = bno + len; 4595 obno = bno; 4596 4597 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.idx, &bma.got)) 4598 eof = true; 4599 if (!xfs_iext_get_extent(ifp, bma.idx - 1, &bma.prev)) 4600 bma.prev.br_startoff = NULLFILEOFF; 4601 bma.tp = tp; 4602 bma.ip = ip; 4603 bma.total = total; 4604 bma.datatype = 0; 4605 bma.dfops = dfops; 4606 bma.firstblock = firstblock; 4607 4608 while (bno < end && n < *nmap) { 4609 inhole = eof || bma.got.br_startoff > bno; 4610 wasdelay = !inhole && isnullstartblock(bma.got.br_startblock); 4611 4612 /* 4613 * Make sure we only reflink into a hole. 4614 */ 4615 if (flags & XFS_BMAPI_REMAP) 4616 ASSERT(inhole); 4617 if (flags & XFS_BMAPI_COWFORK) 4618 ASSERT(!inhole); 4619 4620 /* 4621 * First, deal with the hole before the allocated space 4622 * that we found, if any. 4623 */ 4624 if (inhole || wasdelay) { 4625 bma.eof = eof; 4626 bma.conv = !!(flags & XFS_BMAPI_CONVERT); 4627 bma.wasdel = wasdelay; 4628 bma.offset = bno; 4629 bma.flags = flags; 4630 4631 /* 4632 * There's a 32/64 bit type mismatch between the 4633 * allocation length request (which can be 64 bits in 4634 * length) and the bma length request, which is 4635 * xfs_extlen_t and therefore 32 bits. Hence we have to 4636 * check for 32-bit overflows and handle them here. 4637 */ 4638 if (len > (xfs_filblks_t)MAXEXTLEN) 4639 bma.length = MAXEXTLEN; 4640 else 4641 bma.length = len; 4642 4643 ASSERT(len > 0); 4644 ASSERT(bma.length > 0); 4645 error = xfs_bmapi_allocate(&bma); 4646 if (error) 4647 goto error0; 4648 if (bma.blkno == NULLFSBLOCK) 4649 break; 4650 4651 /* 4652 * If this is a CoW allocation, record the data in 4653 * the refcount btree for orphan recovery. 4654 */ 4655 if (whichfork == XFS_COW_FORK) { 4656 error = xfs_refcount_alloc_cow_extent(mp, dfops, 4657 bma.blkno, bma.length); 4658 if (error) 4659 goto error0; 4660 } 4661 } 4662 4663 /* Deal with the allocated space we found. */ 4664 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno, 4665 end, n, flags); 4666 4667 /* Execute unwritten extent conversion if necessary */ 4668 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags); 4669 if (error == -EAGAIN) 4670 continue; 4671 if (error) 4672 goto error0; 4673 4674 /* update the extent map to return */ 4675 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 4676 4677 /* 4678 * If we're done, stop now. Stop when we've allocated 4679 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise 4680 * the transaction may get too big. 4681 */ 4682 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap) 4683 break; 4684 4685 /* Else go on to the next record. */ 4686 bma.prev = bma.got; 4687 if (!xfs_iext_get_extent(ifp, ++bma.idx, &bma.got)) 4688 eof = true; 4689 } 4690 *nmap = n; 4691 4692 /* 4693 * Transform from btree to extents, give it cur. 4694 */ 4695 if (xfs_bmap_wants_extents(ip, whichfork)) { 4696 int tmp_logflags = 0; 4697 4698 ASSERT(bma.cur); 4699 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, 4700 &tmp_logflags, whichfork); 4701 bma.logflags |= tmp_logflags; 4702 if (error) 4703 goto error0; 4704 } 4705 4706 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE || 4707 XFS_IFORK_NEXTENTS(ip, whichfork) > 4708 XFS_IFORK_MAXEXT(ip, whichfork)); 4709 error = 0; 4710 error0: 4711 /* 4712 * Log everything. Do this after conversion, there's no point in 4713 * logging the extent records if we've converted to btree format. 4714 */ 4715 if ((bma.logflags & xfs_ilog_fext(whichfork)) && 4716 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 4717 bma.logflags &= ~xfs_ilog_fext(whichfork); 4718 else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) && 4719 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) 4720 bma.logflags &= ~xfs_ilog_fbroot(whichfork); 4721 /* 4722 * Log whatever the flags say, even if error. Otherwise we might miss 4723 * detecting a case where the data is changed, there's an error, 4724 * and it's not logged so we don't shutdown when we should. 4725 */ 4726 if (bma.logflags) 4727 xfs_trans_log_inode(tp, ip, bma.logflags); 4728 4729 if (bma.cur) { 4730 if (!error) { 4731 ASSERT(*firstblock == NULLFSBLOCK || 4732 XFS_FSB_TO_AGNO(mp, *firstblock) == 4733 XFS_FSB_TO_AGNO(mp, 4734 bma.cur->bc_private.b.firstblock) || 4735 (dfops->dop_low && 4736 XFS_FSB_TO_AGNO(mp, *firstblock) < 4737 XFS_FSB_TO_AGNO(mp, 4738 bma.cur->bc_private.b.firstblock))); 4739 *firstblock = bma.cur->bc_private.b.firstblock; 4740 } 4741 xfs_btree_del_cursor(bma.cur, 4742 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 4743 } 4744 if (!error) 4745 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval, 4746 orig_nmap, *nmap); 4747 return error; 4748 } 4749 4750 /* 4751 * When a delalloc extent is split (e.g., due to a hole punch), the original 4752 * indlen reservation must be shared across the two new extents that are left 4753 * behind. 4754 * 4755 * Given the original reservation and the worst case indlen for the two new 4756 * extents (as calculated by xfs_bmap_worst_indlen()), split the original 4757 * reservation fairly across the two new extents. If necessary, steal available 4758 * blocks from a deleted extent to make up a reservation deficiency (e.g., if 4759 * ores == 1). The number of stolen blocks is returned. The availability and 4760 * subsequent accounting of stolen blocks is the responsibility of the caller. 4761 */ 4762 static xfs_filblks_t 4763 xfs_bmap_split_indlen( 4764 xfs_filblks_t ores, /* original res. */ 4765 xfs_filblks_t *indlen1, /* ext1 worst indlen */ 4766 xfs_filblks_t *indlen2, /* ext2 worst indlen */ 4767 xfs_filblks_t avail) /* stealable blocks */ 4768 { 4769 xfs_filblks_t len1 = *indlen1; 4770 xfs_filblks_t len2 = *indlen2; 4771 xfs_filblks_t nres = len1 + len2; /* new total res. */ 4772 xfs_filblks_t stolen = 0; 4773 4774 /* 4775 * Steal as many blocks as we can to try and satisfy the worst case 4776 * indlen for both new extents. 4777 */ 4778 while (nres > ores && avail) { 4779 nres--; 4780 avail--; 4781 stolen++; 4782 } 4783 4784 /* 4785 * The only blocks available are those reserved for the original 4786 * extent and what we can steal from the extent being removed. 4787 * If this still isn't enough to satisfy the combined 4788 * requirements for the two new extents, skim blocks off of each 4789 * of the new reservations until they match what is available. 4790 */ 4791 while (nres > ores) { 4792 if (len1) { 4793 len1--; 4794 nres--; 4795 } 4796 if (nres == ores) 4797 break; 4798 if (len2) { 4799 len2--; 4800 nres--; 4801 } 4802 } 4803 4804 *indlen1 = len1; 4805 *indlen2 = len2; 4806 4807 return stolen; 4808 } 4809 4810 int 4811 xfs_bmap_del_extent_delay( 4812 struct xfs_inode *ip, 4813 int whichfork, 4814 xfs_extnum_t *idx, 4815 struct xfs_bmbt_irec *got, 4816 struct xfs_bmbt_irec *del) 4817 { 4818 struct xfs_mount *mp = ip->i_mount; 4819 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 4820 struct xfs_bmbt_irec new; 4821 int64_t da_old, da_new, da_diff = 0; 4822 xfs_fileoff_t del_endoff, got_endoff; 4823 xfs_filblks_t got_indlen, new_indlen, stolen; 4824 int error = 0, state = 0; 4825 bool isrt; 4826 4827 XFS_STATS_INC(mp, xs_del_exlist); 4828 4829 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 4830 del_endoff = del->br_startoff + del->br_blockcount; 4831 got_endoff = got->br_startoff + got->br_blockcount; 4832 da_old = startblockval(got->br_startblock); 4833 da_new = 0; 4834 4835 ASSERT(*idx >= 0); 4836 ASSERT(*idx <= xfs_iext_count(ifp)); 4837 ASSERT(del->br_blockcount > 0); 4838 ASSERT(got->br_startoff <= del->br_startoff); 4839 ASSERT(got_endoff >= del_endoff); 4840 4841 if (isrt) { 4842 int64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount); 4843 4844 do_div(rtexts, mp->m_sb.sb_rextsize); 4845 xfs_mod_frextents(mp, rtexts); 4846 } 4847 4848 /* 4849 * Update the inode delalloc counter now and wait to update the 4850 * sb counters as we might have to borrow some blocks for the 4851 * indirect block accounting. 4852 */ 4853 error = xfs_trans_reserve_quota_nblks(NULL, ip, 4854 -((long)del->br_blockcount), 0, 4855 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); 4856 if (error) 4857 return error; 4858 ip->i_delayed_blks -= del->br_blockcount; 4859 4860 if (whichfork == XFS_COW_FORK) 4861 state |= BMAP_COWFORK; 4862 4863 if (got->br_startoff == del->br_startoff) 4864 state |= BMAP_LEFT_CONTIG; 4865 if (got_endoff == del_endoff) 4866 state |= BMAP_RIGHT_CONTIG; 4867 4868 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 4869 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 4870 /* 4871 * Matches the whole extent. Delete the entry. 4872 */ 4873 xfs_iext_remove(ip, *idx, 1, state); 4874 --*idx; 4875 break; 4876 case BMAP_LEFT_CONTIG: 4877 /* 4878 * Deleting the first part of the extent. 4879 */ 4880 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 4881 got->br_startoff = del_endoff; 4882 got->br_blockcount -= del->br_blockcount; 4883 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4884 got->br_blockcount), da_old); 4885 got->br_startblock = nullstartblock((int)da_new); 4886 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got); 4887 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 4888 break; 4889 case BMAP_RIGHT_CONTIG: 4890 /* 4891 * Deleting the last part of the extent. 4892 */ 4893 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 4894 got->br_blockcount = got->br_blockcount - del->br_blockcount; 4895 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4896 got->br_blockcount), da_old); 4897 got->br_startblock = nullstartblock((int)da_new); 4898 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got); 4899 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 4900 break; 4901 case 0: 4902 /* 4903 * Deleting the middle of the extent. 4904 * 4905 * Distribute the original indlen reservation across the two new 4906 * extents. Steal blocks from the deleted extent if necessary. 4907 * Stealing blocks simply fudges the fdblocks accounting below. 4908 * Warn if either of the new indlen reservations is zero as this 4909 * can lead to delalloc problems. 4910 */ 4911 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 4912 4913 got->br_blockcount = del->br_startoff - got->br_startoff; 4914 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount); 4915 4916 new.br_blockcount = got_endoff - del_endoff; 4917 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount); 4918 4919 WARN_ON_ONCE(!got_indlen || !new_indlen); 4920 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen, 4921 del->br_blockcount); 4922 4923 got->br_startblock = nullstartblock((int)got_indlen); 4924 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got); 4925 trace_xfs_bmap_post_update(ip, *idx, 0, _THIS_IP_); 4926 4927 new.br_startoff = del_endoff; 4928 new.br_state = got->br_state; 4929 new.br_startblock = nullstartblock((int)new_indlen); 4930 4931 ++*idx; 4932 xfs_iext_insert(ip, *idx, 1, &new, state); 4933 4934 da_new = got_indlen + new_indlen - stolen; 4935 del->br_blockcount -= stolen; 4936 break; 4937 } 4938 4939 ASSERT(da_old >= da_new); 4940 da_diff = da_old - da_new; 4941 if (!isrt) 4942 da_diff += del->br_blockcount; 4943 if (da_diff) 4944 xfs_mod_fdblocks(mp, da_diff, false); 4945 return error; 4946 } 4947 4948 void 4949 xfs_bmap_del_extent_cow( 4950 struct xfs_inode *ip, 4951 xfs_extnum_t *idx, 4952 struct xfs_bmbt_irec *got, 4953 struct xfs_bmbt_irec *del) 4954 { 4955 struct xfs_mount *mp = ip->i_mount; 4956 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); 4957 struct xfs_bmbt_irec new; 4958 xfs_fileoff_t del_endoff, got_endoff; 4959 int state = BMAP_COWFORK; 4960 4961 XFS_STATS_INC(mp, xs_del_exlist); 4962 4963 del_endoff = del->br_startoff + del->br_blockcount; 4964 got_endoff = got->br_startoff + got->br_blockcount; 4965 4966 ASSERT(*idx >= 0); 4967 ASSERT(*idx <= xfs_iext_count(ifp)); 4968 ASSERT(del->br_blockcount > 0); 4969 ASSERT(got->br_startoff <= del->br_startoff); 4970 ASSERT(got_endoff >= del_endoff); 4971 ASSERT(!isnullstartblock(got->br_startblock)); 4972 4973 if (got->br_startoff == del->br_startoff) 4974 state |= BMAP_LEFT_CONTIG; 4975 if (got_endoff == del_endoff) 4976 state |= BMAP_RIGHT_CONTIG; 4977 4978 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 4979 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 4980 /* 4981 * Matches the whole extent. Delete the entry. 4982 */ 4983 xfs_iext_remove(ip, *idx, 1, state); 4984 --*idx; 4985 break; 4986 case BMAP_LEFT_CONTIG: 4987 /* 4988 * Deleting the first part of the extent. 4989 */ 4990 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 4991 got->br_startoff = del_endoff; 4992 got->br_blockcount -= del->br_blockcount; 4993 got->br_startblock = del->br_startblock + del->br_blockcount; 4994 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got); 4995 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 4996 break; 4997 case BMAP_RIGHT_CONTIG: 4998 /* 4999 * Deleting the last part of the extent. 5000 */ 5001 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5002 got->br_blockcount -= del->br_blockcount; 5003 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got); 5004 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5005 break; 5006 case 0: 5007 /* 5008 * Deleting the middle of the extent. 5009 */ 5010 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5011 got->br_blockcount = del->br_startoff - got->br_startoff; 5012 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got); 5013 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5014 5015 new.br_startoff = del_endoff; 5016 new.br_blockcount = got_endoff - del_endoff; 5017 new.br_state = got->br_state; 5018 new.br_startblock = del->br_startblock + del->br_blockcount; 5019 5020 ++*idx; 5021 xfs_iext_insert(ip, *idx, 1, &new, state); 5022 break; 5023 } 5024 } 5025 5026 /* 5027 * Called by xfs_bmapi to update file extent records and the btree 5028 * after removing space (or undoing a delayed allocation). 5029 */ 5030 STATIC int /* error */ 5031 xfs_bmap_del_extent( 5032 xfs_inode_t *ip, /* incore inode pointer */ 5033 xfs_trans_t *tp, /* current transaction pointer */ 5034 xfs_extnum_t *idx, /* extent number to update/delete */ 5035 struct xfs_defer_ops *dfops, /* list of extents to be freed */ 5036 xfs_btree_cur_t *cur, /* if null, not a btree */ 5037 xfs_bmbt_irec_t *del, /* data to remove from extents */ 5038 int *logflagsp, /* inode logging flags */ 5039 int whichfork, /* data or attr fork */ 5040 int bflags) /* bmapi flags */ 5041 { 5042 xfs_filblks_t da_new; /* new delay-alloc indirect blocks */ 5043 xfs_filblks_t da_old; /* old delay-alloc indirect blocks */ 5044 xfs_fsblock_t del_endblock=0; /* first block past del */ 5045 xfs_fileoff_t del_endoff; /* first offset past del */ 5046 int delay; /* current block is delayed allocated */ 5047 int do_fx; /* free extent at end of routine */ 5048 xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */ 5049 int error; /* error return value */ 5050 int flags; /* inode logging flags */ 5051 xfs_bmbt_irec_t got; /* current extent entry */ 5052 xfs_fileoff_t got_endoff; /* first offset past got */ 5053 int i; /* temp state */ 5054 xfs_ifork_t *ifp; /* inode fork pointer */ 5055 xfs_mount_t *mp; /* mount structure */ 5056 xfs_filblks_t nblks; /* quota/sb block count */ 5057 xfs_bmbt_irec_t new; /* new record to be inserted */ 5058 /* REFERENCED */ 5059 uint qfield; /* quota field to update */ 5060 xfs_filblks_t temp; /* for indirect length calculations */ 5061 xfs_filblks_t temp2; /* for indirect length calculations */ 5062 int state = 0; 5063 5064 mp = ip->i_mount; 5065 XFS_STATS_INC(mp, xs_del_exlist); 5066 5067 if (whichfork == XFS_ATTR_FORK) 5068 state |= BMAP_ATTRFORK; 5069 else if (whichfork == XFS_COW_FORK) 5070 state |= BMAP_COWFORK; 5071 5072 ifp = XFS_IFORK_PTR(ip, whichfork); 5073 ASSERT((*idx >= 0) && (*idx < xfs_iext_count(ifp))); 5074 ASSERT(del->br_blockcount > 0); 5075 ep = xfs_iext_get_ext(ifp, *idx); 5076 xfs_bmbt_get_all(ep, &got); 5077 ASSERT(got.br_startoff <= del->br_startoff); 5078 del_endoff = del->br_startoff + del->br_blockcount; 5079 got_endoff = got.br_startoff + got.br_blockcount; 5080 ASSERT(got_endoff >= del_endoff); 5081 delay = isnullstartblock(got.br_startblock); 5082 ASSERT(isnullstartblock(del->br_startblock) == delay); 5083 flags = 0; 5084 qfield = 0; 5085 error = 0; 5086 /* 5087 * If deleting a real allocation, must free up the disk space. 5088 */ 5089 if (!delay) { 5090 flags = XFS_ILOG_CORE; 5091 /* 5092 * Realtime allocation. Free it and record di_nblocks update. 5093 */ 5094 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { 5095 xfs_fsblock_t bno; 5096 xfs_filblks_t len; 5097 5098 ASSERT(do_mod(del->br_blockcount, 5099 mp->m_sb.sb_rextsize) == 0); 5100 ASSERT(do_mod(del->br_startblock, 5101 mp->m_sb.sb_rextsize) == 0); 5102 bno = del->br_startblock; 5103 len = del->br_blockcount; 5104 do_div(bno, mp->m_sb.sb_rextsize); 5105 do_div(len, mp->m_sb.sb_rextsize); 5106 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); 5107 if (error) 5108 goto done; 5109 do_fx = 0; 5110 nblks = len * mp->m_sb.sb_rextsize; 5111 qfield = XFS_TRANS_DQ_RTBCOUNT; 5112 } 5113 /* 5114 * Ordinary allocation. 5115 */ 5116 else { 5117 do_fx = 1; 5118 nblks = del->br_blockcount; 5119 qfield = XFS_TRANS_DQ_BCOUNT; 5120 } 5121 /* 5122 * Set up del_endblock and cur for later. 5123 */ 5124 del_endblock = del->br_startblock + del->br_blockcount; 5125 if (cur) { 5126 if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff, 5127 got.br_startblock, got.br_blockcount, 5128 &i))) 5129 goto done; 5130 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5131 } 5132 da_old = da_new = 0; 5133 } else { 5134 da_old = startblockval(got.br_startblock); 5135 da_new = 0; 5136 nblks = 0; 5137 do_fx = 0; 5138 } 5139 5140 /* 5141 * Set flag value to use in switch statement. 5142 * Left-contig is 2, right-contig is 1. 5143 */ 5144 switch (((got.br_startoff == del->br_startoff) << 1) | 5145 (got_endoff == del_endoff)) { 5146 case 3: 5147 /* 5148 * Matches the whole extent. Delete the entry. 5149 */ 5150 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5151 xfs_iext_remove(ip, *idx, 1, 5152 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0); 5153 --*idx; 5154 if (delay) 5155 break; 5156 5157 XFS_IFORK_NEXT_SET(ip, whichfork, 5158 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 5159 flags |= XFS_ILOG_CORE; 5160 if (!cur) { 5161 flags |= xfs_ilog_fext(whichfork); 5162 break; 5163 } 5164 if ((error = xfs_btree_delete(cur, &i))) 5165 goto done; 5166 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5167 break; 5168 5169 case 2: 5170 /* 5171 * Deleting the first part of the extent. 5172 */ 5173 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5174 xfs_bmbt_set_startoff(ep, del_endoff); 5175 temp = got.br_blockcount - del->br_blockcount; 5176 xfs_bmbt_set_blockcount(ep, temp); 5177 if (delay) { 5178 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 5179 da_old); 5180 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 5181 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5182 da_new = temp; 5183 break; 5184 } 5185 xfs_bmbt_set_startblock(ep, del_endblock); 5186 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5187 if (!cur) { 5188 flags |= xfs_ilog_fext(whichfork); 5189 break; 5190 } 5191 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock, 5192 got.br_blockcount - del->br_blockcount, 5193 got.br_state))) 5194 goto done; 5195 break; 5196 5197 case 1: 5198 /* 5199 * Deleting the last part of the extent. 5200 */ 5201 temp = got.br_blockcount - del->br_blockcount; 5202 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5203 xfs_bmbt_set_blockcount(ep, temp); 5204 if (delay) { 5205 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 5206 da_old); 5207 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 5208 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5209 da_new = temp; 5210 break; 5211 } 5212 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5213 if (!cur) { 5214 flags |= xfs_ilog_fext(whichfork); 5215 break; 5216 } 5217 if ((error = xfs_bmbt_update(cur, got.br_startoff, 5218 got.br_startblock, 5219 got.br_blockcount - del->br_blockcount, 5220 got.br_state))) 5221 goto done; 5222 break; 5223 5224 case 0: 5225 /* 5226 * Deleting the middle of the extent. 5227 */ 5228 temp = del->br_startoff - got.br_startoff; 5229 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5230 xfs_bmbt_set_blockcount(ep, temp); 5231 new.br_startoff = del_endoff; 5232 temp2 = got_endoff - del_endoff; 5233 new.br_blockcount = temp2; 5234 new.br_state = got.br_state; 5235 if (!delay) { 5236 new.br_startblock = del_endblock; 5237 flags |= XFS_ILOG_CORE; 5238 if (cur) { 5239 if ((error = xfs_bmbt_update(cur, 5240 got.br_startoff, 5241 got.br_startblock, temp, 5242 got.br_state))) 5243 goto done; 5244 if ((error = xfs_btree_increment(cur, 0, &i))) 5245 goto done; 5246 cur->bc_rec.b = new; 5247 error = xfs_btree_insert(cur, &i); 5248 if (error && error != -ENOSPC) 5249 goto done; 5250 /* 5251 * If get no-space back from btree insert, 5252 * it tried a split, and we have a zero 5253 * block reservation. 5254 * Fix up our state and return the error. 5255 */ 5256 if (error == -ENOSPC) { 5257 /* 5258 * Reset the cursor, don't trust 5259 * it after any insert operation. 5260 */ 5261 if ((error = xfs_bmbt_lookup_eq(cur, 5262 got.br_startoff, 5263 got.br_startblock, 5264 temp, &i))) 5265 goto done; 5266 XFS_WANT_CORRUPTED_GOTO(mp, 5267 i == 1, done); 5268 /* 5269 * Update the btree record back 5270 * to the original value. 5271 */ 5272 if ((error = xfs_bmbt_update(cur, 5273 got.br_startoff, 5274 got.br_startblock, 5275 got.br_blockcount, 5276 got.br_state))) 5277 goto done; 5278 /* 5279 * Reset the extent record back 5280 * to the original value. 5281 */ 5282 xfs_bmbt_set_blockcount(ep, 5283 got.br_blockcount); 5284 flags = 0; 5285 error = -ENOSPC; 5286 goto done; 5287 } 5288 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5289 } else 5290 flags |= xfs_ilog_fext(whichfork); 5291 XFS_IFORK_NEXT_SET(ip, whichfork, 5292 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 5293 } else { 5294 xfs_filblks_t stolen; 5295 ASSERT(whichfork == XFS_DATA_FORK); 5296 5297 /* 5298 * Distribute the original indlen reservation across the 5299 * two new extents. Steal blocks from the deleted extent 5300 * if necessary. Stealing blocks simply fudges the 5301 * fdblocks accounting in xfs_bunmapi(). 5302 */ 5303 temp = xfs_bmap_worst_indlen(ip, got.br_blockcount); 5304 temp2 = xfs_bmap_worst_indlen(ip, new.br_blockcount); 5305 stolen = xfs_bmap_split_indlen(da_old, &temp, &temp2, 5306 del->br_blockcount); 5307 da_new = temp + temp2 - stolen; 5308 del->br_blockcount -= stolen; 5309 5310 /* 5311 * Set the reservation for each extent. Warn if either 5312 * is zero as this can lead to delalloc problems. 5313 */ 5314 WARN_ON_ONCE(!temp || !temp2); 5315 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 5316 new.br_startblock = nullstartblock((int)temp2); 5317 } 5318 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5319 xfs_iext_insert(ip, *idx + 1, 1, &new, state); 5320 ++*idx; 5321 break; 5322 } 5323 5324 /* remove reverse mapping */ 5325 if (!delay) { 5326 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del); 5327 if (error) 5328 goto done; 5329 } 5330 5331 /* 5332 * If we need to, add to list of extents to delete. 5333 */ 5334 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) { 5335 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) { 5336 error = xfs_refcount_decrease_extent(mp, dfops, del); 5337 if (error) 5338 goto done; 5339 } else 5340 xfs_bmap_add_free(mp, dfops, del->br_startblock, 5341 del->br_blockcount, NULL); 5342 } 5343 5344 /* 5345 * Adjust inode # blocks in the file. 5346 */ 5347 if (nblks) 5348 ip->i_d.di_nblocks -= nblks; 5349 /* 5350 * Adjust quota data. 5351 */ 5352 if (qfield && !(bflags & XFS_BMAPI_REMAP)) 5353 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks); 5354 5355 /* 5356 * Account for change in delayed indirect blocks. 5357 * Nothing to do for disk quota accounting here. 5358 */ 5359 ASSERT(da_old >= da_new); 5360 if (da_old > da_new) 5361 xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false); 5362 done: 5363 *logflagsp = flags; 5364 return error; 5365 } 5366 5367 /* 5368 * Unmap (remove) blocks from a file. 5369 * If nexts is nonzero then the number of extents to remove is limited to 5370 * that value. If not all extents in the block range can be removed then 5371 * *done is set. 5372 */ 5373 int /* error */ 5374 __xfs_bunmapi( 5375 xfs_trans_t *tp, /* transaction pointer */ 5376 struct xfs_inode *ip, /* incore inode */ 5377 xfs_fileoff_t bno, /* starting offset to unmap */ 5378 xfs_filblks_t *rlen, /* i/o: amount remaining */ 5379 int flags, /* misc flags */ 5380 xfs_extnum_t nexts, /* number of extents max */ 5381 xfs_fsblock_t *firstblock, /* first allocated block 5382 controls a.g. for allocs */ 5383 struct xfs_defer_ops *dfops) /* i/o: deferred updates */ 5384 { 5385 xfs_btree_cur_t *cur; /* bmap btree cursor */ 5386 xfs_bmbt_irec_t del; /* extent being deleted */ 5387 int error; /* error return value */ 5388 xfs_extnum_t extno; /* extent number in list */ 5389 xfs_bmbt_irec_t got; /* current extent record */ 5390 xfs_ifork_t *ifp; /* inode fork pointer */ 5391 int isrt; /* freeing in rt area */ 5392 xfs_extnum_t lastx; /* last extent index used */ 5393 int logflags; /* transaction logging flags */ 5394 xfs_extlen_t mod; /* rt extent offset */ 5395 xfs_mount_t *mp; /* mount structure */ 5396 xfs_fileoff_t start; /* first file offset deleted */ 5397 int tmp_logflags; /* partial logging flags */ 5398 int wasdel; /* was a delayed alloc extent */ 5399 int whichfork; /* data or attribute fork */ 5400 xfs_fsblock_t sum; 5401 xfs_filblks_t len = *rlen; /* length to unmap in file */ 5402 5403 trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_); 5404 5405 whichfork = xfs_bmapi_whichfork(flags); 5406 ASSERT(whichfork != XFS_COW_FORK); 5407 ifp = XFS_IFORK_PTR(ip, whichfork); 5408 if (unlikely( 5409 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5410 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 5411 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW, 5412 ip->i_mount); 5413 return -EFSCORRUPTED; 5414 } 5415 mp = ip->i_mount; 5416 if (XFS_FORCED_SHUTDOWN(mp)) 5417 return -EIO; 5418 5419 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5420 ASSERT(len > 0); 5421 ASSERT(nexts >= 0); 5422 5423 if (!(ifp->if_flags & XFS_IFEXTENTS) && 5424 (error = xfs_iread_extents(tp, ip, whichfork))) 5425 return error; 5426 if (xfs_iext_count(ifp) == 0) { 5427 *rlen = 0; 5428 return 0; 5429 } 5430 XFS_STATS_INC(mp, xs_blk_unmap); 5431 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 5432 start = bno; 5433 bno = start + len - 1; 5434 5435 /* 5436 * Check to see if the given block number is past the end of the 5437 * file, back up to the last block if so... 5438 */ 5439 if (!xfs_iext_lookup_extent(ip, ifp, bno, &lastx, &got)) { 5440 ASSERT(lastx > 0); 5441 xfs_iext_get_extent(ifp, --lastx, &got); 5442 bno = got.br_startoff + got.br_blockcount - 1; 5443 } 5444 5445 logflags = 0; 5446 if (ifp->if_flags & XFS_IFBROOT) { 5447 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); 5448 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5449 cur->bc_private.b.firstblock = *firstblock; 5450 cur->bc_private.b.dfops = dfops; 5451 cur->bc_private.b.flags = 0; 5452 } else 5453 cur = NULL; 5454 5455 if (isrt) { 5456 /* 5457 * Synchronize by locking the bitmap inode. 5458 */ 5459 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP); 5460 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL); 5461 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM); 5462 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL); 5463 } 5464 5465 extno = 0; 5466 while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 && 5467 (nexts == 0 || extno < nexts)) { 5468 /* 5469 * Is the found extent after a hole in which bno lives? 5470 * Just back up to the previous extent, if so. 5471 */ 5472 if (got.br_startoff > bno) { 5473 if (--lastx < 0) 5474 break; 5475 xfs_iext_get_extent(ifp, lastx, &got); 5476 } 5477 /* 5478 * Is the last block of this extent before the range 5479 * we're supposed to delete? If so, we're done. 5480 */ 5481 bno = XFS_FILEOFF_MIN(bno, 5482 got.br_startoff + got.br_blockcount - 1); 5483 if (bno < start) 5484 break; 5485 /* 5486 * Then deal with the (possibly delayed) allocated space 5487 * we found. 5488 */ 5489 del = got; 5490 wasdel = isnullstartblock(del.br_startblock); 5491 if (got.br_startoff < start) { 5492 del.br_startoff = start; 5493 del.br_blockcount -= start - got.br_startoff; 5494 if (!wasdel) 5495 del.br_startblock += start - got.br_startoff; 5496 } 5497 if (del.br_startoff + del.br_blockcount > bno + 1) 5498 del.br_blockcount = bno + 1 - del.br_startoff; 5499 sum = del.br_startblock + del.br_blockcount; 5500 if (isrt && 5501 (mod = do_mod(sum, mp->m_sb.sb_rextsize))) { 5502 /* 5503 * Realtime extent not lined up at the end. 5504 * The extent could have been split into written 5505 * and unwritten pieces, or we could just be 5506 * unmapping part of it. But we can't really 5507 * get rid of part of a realtime extent. 5508 */ 5509 if (del.br_state == XFS_EXT_UNWRITTEN || 5510 !xfs_sb_version_hasextflgbit(&mp->m_sb)) { 5511 /* 5512 * This piece is unwritten, or we're not 5513 * using unwritten extents. Skip over it. 5514 */ 5515 ASSERT(bno >= mod); 5516 bno -= mod > del.br_blockcount ? 5517 del.br_blockcount : mod; 5518 if (bno < got.br_startoff) { 5519 if (--lastx >= 0) 5520 xfs_bmbt_get_all(xfs_iext_get_ext( 5521 ifp, lastx), &got); 5522 } 5523 continue; 5524 } 5525 /* 5526 * It's written, turn it unwritten. 5527 * This is better than zeroing it. 5528 */ 5529 ASSERT(del.br_state == XFS_EXT_NORM); 5530 ASSERT(tp->t_blk_res > 0); 5531 /* 5532 * If this spans a realtime extent boundary, 5533 * chop it back to the start of the one we end at. 5534 */ 5535 if (del.br_blockcount > mod) { 5536 del.br_startoff += del.br_blockcount - mod; 5537 del.br_startblock += del.br_blockcount - mod; 5538 del.br_blockcount = mod; 5539 } 5540 del.br_state = XFS_EXT_UNWRITTEN; 5541 error = xfs_bmap_add_extent_unwritten_real(tp, ip, 5542 &lastx, &cur, &del, firstblock, dfops, 5543 &logflags); 5544 if (error) 5545 goto error0; 5546 goto nodelete; 5547 } 5548 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) { 5549 /* 5550 * Realtime extent is lined up at the end but not 5551 * at the front. We'll get rid of full extents if 5552 * we can. 5553 */ 5554 mod = mp->m_sb.sb_rextsize - mod; 5555 if (del.br_blockcount > mod) { 5556 del.br_blockcount -= mod; 5557 del.br_startoff += mod; 5558 del.br_startblock += mod; 5559 } else if ((del.br_startoff == start && 5560 (del.br_state == XFS_EXT_UNWRITTEN || 5561 tp->t_blk_res == 0)) || 5562 !xfs_sb_version_hasextflgbit(&mp->m_sb)) { 5563 /* 5564 * Can't make it unwritten. There isn't 5565 * a full extent here so just skip it. 5566 */ 5567 ASSERT(bno >= del.br_blockcount); 5568 bno -= del.br_blockcount; 5569 if (got.br_startoff > bno && --lastx >= 0) 5570 xfs_iext_get_extent(ifp, lastx, &got); 5571 continue; 5572 } else if (del.br_state == XFS_EXT_UNWRITTEN) { 5573 struct xfs_bmbt_irec prev; 5574 5575 /* 5576 * This one is already unwritten. 5577 * It must have a written left neighbor. 5578 * Unwrite the killed part of that one and 5579 * try again. 5580 */ 5581 ASSERT(lastx > 0); 5582 xfs_iext_get_extent(ifp, lastx - 1, &prev); 5583 ASSERT(prev.br_state == XFS_EXT_NORM); 5584 ASSERT(!isnullstartblock(prev.br_startblock)); 5585 ASSERT(del.br_startblock == 5586 prev.br_startblock + prev.br_blockcount); 5587 if (prev.br_startoff < start) { 5588 mod = start - prev.br_startoff; 5589 prev.br_blockcount -= mod; 5590 prev.br_startblock += mod; 5591 prev.br_startoff = start; 5592 } 5593 prev.br_state = XFS_EXT_UNWRITTEN; 5594 lastx--; 5595 error = xfs_bmap_add_extent_unwritten_real(tp, 5596 ip, &lastx, &cur, &prev, 5597 firstblock, dfops, &logflags); 5598 if (error) 5599 goto error0; 5600 goto nodelete; 5601 } else { 5602 ASSERT(del.br_state == XFS_EXT_NORM); 5603 del.br_state = XFS_EXT_UNWRITTEN; 5604 error = xfs_bmap_add_extent_unwritten_real(tp, 5605 ip, &lastx, &cur, &del, 5606 firstblock, dfops, &logflags); 5607 if (error) 5608 goto error0; 5609 goto nodelete; 5610 } 5611 } 5612 5613 /* 5614 * If it's the case where the directory code is running 5615 * with no block reservation, and the deleted block is in 5616 * the middle of its extent, and the resulting insert 5617 * of an extent would cause transformation to btree format, 5618 * then reject it. The calling code will then swap 5619 * blocks around instead. 5620 * We have to do this now, rather than waiting for the 5621 * conversion to btree format, since the transaction 5622 * will be dirty. 5623 */ 5624 if (!wasdel && tp->t_blk_res == 0 && 5625 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && 5626 XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */ 5627 XFS_IFORK_MAXEXT(ip, whichfork) && 5628 del.br_startoff > got.br_startoff && 5629 del.br_startoff + del.br_blockcount < 5630 got.br_startoff + got.br_blockcount) { 5631 error = -ENOSPC; 5632 goto error0; 5633 } 5634 5635 /* 5636 * Unreserve quota and update realtime free space, if 5637 * appropriate. If delayed allocation, update the inode delalloc 5638 * counter now and wait to update the sb counters as 5639 * xfs_bmap_del_extent() might need to borrow some blocks. 5640 */ 5641 if (wasdel) { 5642 ASSERT(startblockval(del.br_startblock) > 0); 5643 if (isrt) { 5644 xfs_filblks_t rtexts; 5645 5646 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount); 5647 do_div(rtexts, mp->m_sb.sb_rextsize); 5648 xfs_mod_frextents(mp, (int64_t)rtexts); 5649 (void)xfs_trans_reserve_quota_nblks(NULL, 5650 ip, -((long)del.br_blockcount), 0, 5651 XFS_QMOPT_RES_RTBLKS); 5652 } else { 5653 (void)xfs_trans_reserve_quota_nblks(NULL, 5654 ip, -((long)del.br_blockcount), 0, 5655 XFS_QMOPT_RES_REGBLKS); 5656 } 5657 ip->i_delayed_blks -= del.br_blockcount; 5658 if (cur) 5659 cur->bc_private.b.flags |= 5660 XFS_BTCUR_BPRV_WASDEL; 5661 } else if (cur) 5662 cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL; 5663 5664 error = xfs_bmap_del_extent(ip, tp, &lastx, dfops, cur, &del, 5665 &tmp_logflags, whichfork, flags); 5666 logflags |= tmp_logflags; 5667 if (error) 5668 goto error0; 5669 5670 if (!isrt && wasdel) 5671 xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount, false); 5672 5673 bno = del.br_startoff - 1; 5674 nodelete: 5675 /* 5676 * If not done go on to the next (previous) record. 5677 */ 5678 if (bno != (xfs_fileoff_t)-1 && bno >= start) { 5679 if (lastx >= 0) { 5680 xfs_iext_get_extent(ifp, lastx, &got); 5681 if (got.br_startoff > bno && --lastx >= 0) 5682 xfs_iext_get_extent(ifp, lastx, &got); 5683 } 5684 extno++; 5685 } 5686 } 5687 if (bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0) 5688 *rlen = 0; 5689 else 5690 *rlen = bno - start + 1; 5691 5692 /* 5693 * Convert to a btree if necessary. 5694 */ 5695 if (xfs_bmap_needs_btree(ip, whichfork)) { 5696 ASSERT(cur == NULL); 5697 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops, 5698 &cur, 0, &tmp_logflags, whichfork); 5699 logflags |= tmp_logflags; 5700 if (error) 5701 goto error0; 5702 } 5703 /* 5704 * transform from btree to extents, give it cur 5705 */ 5706 else if (xfs_bmap_wants_extents(ip, whichfork)) { 5707 ASSERT(cur != NULL); 5708 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags, 5709 whichfork); 5710 logflags |= tmp_logflags; 5711 if (error) 5712 goto error0; 5713 } 5714 /* 5715 * transform from extents to local? 5716 */ 5717 error = 0; 5718 error0: 5719 /* 5720 * Log everything. Do this after conversion, there's no point in 5721 * logging the extent records if we've converted to btree format. 5722 */ 5723 if ((logflags & xfs_ilog_fext(whichfork)) && 5724 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 5725 logflags &= ~xfs_ilog_fext(whichfork); 5726 else if ((logflags & xfs_ilog_fbroot(whichfork)) && 5727 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) 5728 logflags &= ~xfs_ilog_fbroot(whichfork); 5729 /* 5730 * Log inode even in the error case, if the transaction 5731 * is dirty we'll need to shut down the filesystem. 5732 */ 5733 if (logflags) 5734 xfs_trans_log_inode(tp, ip, logflags); 5735 if (cur) { 5736 if (!error) { 5737 *firstblock = cur->bc_private.b.firstblock; 5738 cur->bc_private.b.allocated = 0; 5739 } 5740 xfs_btree_del_cursor(cur, 5741 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 5742 } 5743 return error; 5744 } 5745 5746 /* Unmap a range of a file. */ 5747 int 5748 xfs_bunmapi( 5749 xfs_trans_t *tp, 5750 struct xfs_inode *ip, 5751 xfs_fileoff_t bno, 5752 xfs_filblks_t len, 5753 int flags, 5754 xfs_extnum_t nexts, 5755 xfs_fsblock_t *firstblock, 5756 struct xfs_defer_ops *dfops, 5757 int *done) 5758 { 5759 int error; 5760 5761 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts, firstblock, 5762 dfops); 5763 *done = (len == 0); 5764 return error; 5765 } 5766 5767 /* 5768 * Determine whether an extent shift can be accomplished by a merge with the 5769 * extent that precedes the target hole of the shift. 5770 */ 5771 STATIC bool 5772 xfs_bmse_can_merge( 5773 struct xfs_bmbt_irec *left, /* preceding extent */ 5774 struct xfs_bmbt_irec *got, /* current extent to shift */ 5775 xfs_fileoff_t shift) /* shift fsb */ 5776 { 5777 xfs_fileoff_t startoff; 5778 5779 startoff = got->br_startoff - shift; 5780 5781 /* 5782 * The extent, once shifted, must be adjacent in-file and on-disk with 5783 * the preceding extent. 5784 */ 5785 if ((left->br_startoff + left->br_blockcount != startoff) || 5786 (left->br_startblock + left->br_blockcount != got->br_startblock) || 5787 (left->br_state != got->br_state) || 5788 (left->br_blockcount + got->br_blockcount > MAXEXTLEN)) 5789 return false; 5790 5791 return true; 5792 } 5793 5794 /* 5795 * A bmap extent shift adjusts the file offset of an extent to fill a preceding 5796 * hole in the file. If an extent shift would result in the extent being fully 5797 * adjacent to the extent that currently precedes the hole, we can merge with 5798 * the preceding extent rather than do the shift. 5799 * 5800 * This function assumes the caller has verified a shift-by-merge is possible 5801 * with the provided extents via xfs_bmse_can_merge(). 5802 */ 5803 STATIC int 5804 xfs_bmse_merge( 5805 struct xfs_inode *ip, 5806 int whichfork, 5807 xfs_fileoff_t shift, /* shift fsb */ 5808 int current_ext, /* idx of gotp */ 5809 struct xfs_bmbt_rec_host *gotp, /* extent to shift */ 5810 struct xfs_bmbt_rec_host *leftp, /* preceding extent */ 5811 struct xfs_btree_cur *cur, 5812 int *logflags) /* output */ 5813 { 5814 struct xfs_bmbt_irec got; 5815 struct xfs_bmbt_irec left; 5816 xfs_filblks_t blockcount; 5817 int error, i; 5818 struct xfs_mount *mp = ip->i_mount; 5819 5820 xfs_bmbt_get_all(gotp, &got); 5821 xfs_bmbt_get_all(leftp, &left); 5822 blockcount = left.br_blockcount + got.br_blockcount; 5823 5824 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5825 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5826 ASSERT(xfs_bmse_can_merge(&left, &got, shift)); 5827 5828 /* 5829 * Merge the in-core extents. Note that the host record pointers and 5830 * current_ext index are invalid once the extent has been removed via 5831 * xfs_iext_remove(). 5832 */ 5833 xfs_bmbt_set_blockcount(leftp, blockcount); 5834 xfs_iext_remove(ip, current_ext, 1, 0); 5835 5836 /* 5837 * Update the on-disk extent count, the btree if necessary and log the 5838 * inode. 5839 */ 5840 XFS_IFORK_NEXT_SET(ip, whichfork, 5841 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 5842 *logflags |= XFS_ILOG_CORE; 5843 if (!cur) { 5844 *logflags |= XFS_ILOG_DEXT; 5845 return 0; 5846 } 5847 5848 /* lookup and remove the extent to merge */ 5849 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock, 5850 got.br_blockcount, &i); 5851 if (error) 5852 return error; 5853 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5854 5855 error = xfs_btree_delete(cur, &i); 5856 if (error) 5857 return error; 5858 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5859 5860 /* lookup and update size of the previous extent */ 5861 error = xfs_bmbt_lookup_eq(cur, left.br_startoff, left.br_startblock, 5862 left.br_blockcount, &i); 5863 if (error) 5864 return error; 5865 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5866 5867 left.br_blockcount = blockcount; 5868 5869 return xfs_bmbt_update(cur, left.br_startoff, left.br_startblock, 5870 left.br_blockcount, left.br_state); 5871 } 5872 5873 /* 5874 * Shift a single extent. 5875 */ 5876 STATIC int 5877 xfs_bmse_shift_one( 5878 struct xfs_inode *ip, 5879 int whichfork, 5880 xfs_fileoff_t offset_shift_fsb, 5881 int *current_ext, 5882 struct xfs_bmbt_rec_host *gotp, 5883 struct xfs_btree_cur *cur, 5884 int *logflags, 5885 enum shift_direction direction, 5886 struct xfs_defer_ops *dfops) 5887 { 5888 struct xfs_ifork *ifp; 5889 struct xfs_mount *mp; 5890 xfs_fileoff_t startoff; 5891 struct xfs_bmbt_rec_host *adj_irecp; 5892 struct xfs_bmbt_irec got; 5893 struct xfs_bmbt_irec adj_irec; 5894 int error; 5895 int i; 5896 int total_extents; 5897 5898 mp = ip->i_mount; 5899 ifp = XFS_IFORK_PTR(ip, whichfork); 5900 total_extents = xfs_iext_count(ifp); 5901 5902 xfs_bmbt_get_all(gotp, &got); 5903 5904 /* delalloc extents should be prevented by caller */ 5905 XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock)); 5906 5907 if (direction == SHIFT_LEFT) { 5908 startoff = got.br_startoff - offset_shift_fsb; 5909 5910 /* 5911 * Check for merge if we've got an extent to the left, 5912 * otherwise make sure there's enough room at the start 5913 * of the file for the shift. 5914 */ 5915 if (!*current_ext) { 5916 if (got.br_startoff < offset_shift_fsb) 5917 return -EINVAL; 5918 goto update_current_ext; 5919 } 5920 /* 5921 * grab the left extent and check for a large 5922 * enough hole. 5923 */ 5924 adj_irecp = xfs_iext_get_ext(ifp, *current_ext - 1); 5925 xfs_bmbt_get_all(adj_irecp, &adj_irec); 5926 5927 if (startoff < 5928 adj_irec.br_startoff + adj_irec.br_blockcount) 5929 return -EINVAL; 5930 5931 /* check whether to merge the extent or shift it down */ 5932 if (xfs_bmse_can_merge(&adj_irec, &got, 5933 offset_shift_fsb)) { 5934 error = xfs_bmse_merge(ip, whichfork, offset_shift_fsb, 5935 *current_ext, gotp, adj_irecp, 5936 cur, logflags); 5937 if (error) 5938 return error; 5939 adj_irec = got; 5940 goto update_rmap; 5941 } 5942 } else { 5943 startoff = got.br_startoff + offset_shift_fsb; 5944 /* nothing to move if this is the last extent */ 5945 if (*current_ext >= (total_extents - 1)) 5946 goto update_current_ext; 5947 /* 5948 * If this is not the last extent in the file, make sure there 5949 * is enough room between current extent and next extent for 5950 * accommodating the shift. 5951 */ 5952 adj_irecp = xfs_iext_get_ext(ifp, *current_ext + 1); 5953 xfs_bmbt_get_all(adj_irecp, &adj_irec); 5954 if (startoff + got.br_blockcount > adj_irec.br_startoff) 5955 return -EINVAL; 5956 /* 5957 * Unlike a left shift (which involves a hole punch), 5958 * a right shift does not modify extent neighbors 5959 * in any way. We should never find mergeable extents 5960 * in this scenario. Check anyways and warn if we 5961 * encounter two extents that could be one. 5962 */ 5963 if (xfs_bmse_can_merge(&got, &adj_irec, offset_shift_fsb)) 5964 WARN_ON_ONCE(1); 5965 } 5966 /* 5967 * Increment the extent index for the next iteration, update the start 5968 * offset of the in-core extent and update the btree if applicable. 5969 */ 5970 update_current_ext: 5971 if (direction == SHIFT_LEFT) 5972 (*current_ext)++; 5973 else 5974 (*current_ext)--; 5975 xfs_bmbt_set_startoff(gotp, startoff); 5976 *logflags |= XFS_ILOG_CORE; 5977 adj_irec = got; 5978 if (!cur) { 5979 *logflags |= XFS_ILOG_DEXT; 5980 goto update_rmap; 5981 } 5982 5983 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock, 5984 got.br_blockcount, &i); 5985 if (error) 5986 return error; 5987 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5988 5989 got.br_startoff = startoff; 5990 error = xfs_bmbt_update(cur, got.br_startoff, got.br_startblock, 5991 got.br_blockcount, got.br_state); 5992 if (error) 5993 return error; 5994 5995 update_rmap: 5996 /* update reverse mapping */ 5997 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, &adj_irec); 5998 if (error) 5999 return error; 6000 adj_irec.br_startoff = startoff; 6001 return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &adj_irec); 6002 } 6003 6004 /* 6005 * Shift extent records to the left/right to cover/create a hole. 6006 * 6007 * The maximum number of extents to be shifted in a single operation is 6008 * @num_exts. @stop_fsb specifies the file offset at which to stop shift and the 6009 * file offset where we've left off is returned in @next_fsb. @offset_shift_fsb 6010 * is the length by which each extent is shifted. If there is no hole to shift 6011 * the extents into, this will be considered invalid operation and we abort 6012 * immediately. 6013 */ 6014 int 6015 xfs_bmap_shift_extents( 6016 struct xfs_trans *tp, 6017 struct xfs_inode *ip, 6018 xfs_fileoff_t *next_fsb, 6019 xfs_fileoff_t offset_shift_fsb, 6020 int *done, 6021 xfs_fileoff_t stop_fsb, 6022 xfs_fsblock_t *firstblock, 6023 struct xfs_defer_ops *dfops, 6024 enum shift_direction direction, 6025 int num_exts) 6026 { 6027 struct xfs_btree_cur *cur = NULL; 6028 struct xfs_bmbt_rec_host *gotp; 6029 struct xfs_bmbt_irec got; 6030 struct xfs_mount *mp = ip->i_mount; 6031 struct xfs_ifork *ifp; 6032 xfs_extnum_t nexts = 0; 6033 xfs_extnum_t current_ext; 6034 xfs_extnum_t total_extents; 6035 xfs_extnum_t stop_extent; 6036 int error = 0; 6037 int whichfork = XFS_DATA_FORK; 6038 int logflags = 0; 6039 6040 if (unlikely(XFS_TEST_ERROR( 6041 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 6042 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 6043 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { 6044 XFS_ERROR_REPORT("xfs_bmap_shift_extents", 6045 XFS_ERRLEVEL_LOW, mp); 6046 return -EFSCORRUPTED; 6047 } 6048 6049 if (XFS_FORCED_SHUTDOWN(mp)) 6050 return -EIO; 6051 6052 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 6053 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 6054 ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT); 6055 ASSERT(*next_fsb != NULLFSBLOCK || direction == SHIFT_RIGHT); 6056 6057 ifp = XFS_IFORK_PTR(ip, whichfork); 6058 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 6059 /* Read in all the extents */ 6060 error = xfs_iread_extents(tp, ip, whichfork); 6061 if (error) 6062 return error; 6063 } 6064 6065 if (ifp->if_flags & XFS_IFBROOT) { 6066 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 6067 cur->bc_private.b.firstblock = *firstblock; 6068 cur->bc_private.b.dfops = dfops; 6069 cur->bc_private.b.flags = 0; 6070 } 6071 6072 /* 6073 * There may be delalloc extents in the data fork before the range we 6074 * are collapsing out, so we cannot use the count of real extents here. 6075 * Instead we have to calculate it from the incore fork. 6076 */ 6077 total_extents = xfs_iext_count(ifp); 6078 if (total_extents == 0) { 6079 *done = 1; 6080 goto del_cursor; 6081 } 6082 6083 /* 6084 * In case of first right shift, we need to initialize next_fsb 6085 */ 6086 if (*next_fsb == NULLFSBLOCK) { 6087 gotp = xfs_iext_get_ext(ifp, total_extents - 1); 6088 xfs_bmbt_get_all(gotp, &got); 6089 *next_fsb = got.br_startoff; 6090 if (stop_fsb > *next_fsb) { 6091 *done = 1; 6092 goto del_cursor; 6093 } 6094 } 6095 6096 /* Lookup the extent index at which we have to stop */ 6097 if (direction == SHIFT_RIGHT) { 6098 gotp = xfs_iext_bno_to_ext(ifp, stop_fsb, &stop_extent); 6099 /* Make stop_extent exclusive of shift range */ 6100 stop_extent--; 6101 } else 6102 stop_extent = total_extents; 6103 6104 /* 6105 * Look up the extent index for the fsb where we start shifting. We can 6106 * henceforth iterate with current_ext as extent list changes are locked 6107 * out via ilock. 6108 * 6109 * gotp can be null in 2 cases: 1) if there are no extents or 2) 6110 * *next_fsb lies in a hole beyond which there are no extents. Either 6111 * way, we are done. 6112 */ 6113 gotp = xfs_iext_bno_to_ext(ifp, *next_fsb, ¤t_ext); 6114 if (!gotp) { 6115 *done = 1; 6116 goto del_cursor; 6117 } 6118 6119 /* some sanity checking before we finally start shifting extents */ 6120 if ((direction == SHIFT_LEFT && current_ext >= stop_extent) || 6121 (direction == SHIFT_RIGHT && current_ext <= stop_extent)) { 6122 error = -EIO; 6123 goto del_cursor; 6124 } 6125 6126 while (nexts++ < num_exts) { 6127 error = xfs_bmse_shift_one(ip, whichfork, offset_shift_fsb, 6128 ¤t_ext, gotp, cur, &logflags, 6129 direction, dfops); 6130 if (error) 6131 goto del_cursor; 6132 /* 6133 * If there was an extent merge during the shift, the extent 6134 * count can change. Update the total and grade the next record. 6135 */ 6136 if (direction == SHIFT_LEFT) { 6137 total_extents = xfs_iext_count(ifp); 6138 stop_extent = total_extents; 6139 } 6140 6141 if (current_ext == stop_extent) { 6142 *done = 1; 6143 *next_fsb = NULLFSBLOCK; 6144 break; 6145 } 6146 gotp = xfs_iext_get_ext(ifp, current_ext); 6147 } 6148 6149 if (!*done) { 6150 xfs_bmbt_get_all(gotp, &got); 6151 *next_fsb = got.br_startoff; 6152 } 6153 6154 del_cursor: 6155 if (cur) 6156 xfs_btree_del_cursor(cur, 6157 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 6158 6159 if (logflags) 6160 xfs_trans_log_inode(tp, ip, logflags); 6161 6162 return error; 6163 } 6164 6165 /* 6166 * Splits an extent into two extents at split_fsb block such that it is 6167 * the first block of the current_ext. @current_ext is a target extent 6168 * to be split. @split_fsb is a block where the extents is split. 6169 * If split_fsb lies in a hole or the first block of extents, just return 0. 6170 */ 6171 STATIC int 6172 xfs_bmap_split_extent_at( 6173 struct xfs_trans *tp, 6174 struct xfs_inode *ip, 6175 xfs_fileoff_t split_fsb, 6176 xfs_fsblock_t *firstfsb, 6177 struct xfs_defer_ops *dfops) 6178 { 6179 int whichfork = XFS_DATA_FORK; 6180 struct xfs_btree_cur *cur = NULL; 6181 struct xfs_bmbt_rec_host *gotp; 6182 struct xfs_bmbt_irec got; 6183 struct xfs_bmbt_irec new; /* split extent */ 6184 struct xfs_mount *mp = ip->i_mount; 6185 struct xfs_ifork *ifp; 6186 xfs_fsblock_t gotblkcnt; /* new block count for got */ 6187 xfs_extnum_t current_ext; 6188 int error = 0; 6189 int logflags = 0; 6190 int i = 0; 6191 6192 if (unlikely(XFS_TEST_ERROR( 6193 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 6194 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 6195 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { 6196 XFS_ERROR_REPORT("xfs_bmap_split_extent_at", 6197 XFS_ERRLEVEL_LOW, mp); 6198 return -EFSCORRUPTED; 6199 } 6200 6201 if (XFS_FORCED_SHUTDOWN(mp)) 6202 return -EIO; 6203 6204 ifp = XFS_IFORK_PTR(ip, whichfork); 6205 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 6206 /* Read in all the extents */ 6207 error = xfs_iread_extents(tp, ip, whichfork); 6208 if (error) 6209 return error; 6210 } 6211 6212 /* 6213 * gotp can be null in 2 cases: 1) if there are no extents 6214 * or 2) split_fsb lies in a hole beyond which there are 6215 * no extents. Either way, we are done. 6216 */ 6217 gotp = xfs_iext_bno_to_ext(ifp, split_fsb, ¤t_ext); 6218 if (!gotp) 6219 return 0; 6220 6221 xfs_bmbt_get_all(gotp, &got); 6222 6223 /* 6224 * Check split_fsb lies in a hole or the start boundary offset 6225 * of the extent. 6226 */ 6227 if (got.br_startoff >= split_fsb) 6228 return 0; 6229 6230 gotblkcnt = split_fsb - got.br_startoff; 6231 new.br_startoff = split_fsb; 6232 new.br_startblock = got.br_startblock + gotblkcnt; 6233 new.br_blockcount = got.br_blockcount - gotblkcnt; 6234 new.br_state = got.br_state; 6235 6236 if (ifp->if_flags & XFS_IFBROOT) { 6237 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 6238 cur->bc_private.b.firstblock = *firstfsb; 6239 cur->bc_private.b.dfops = dfops; 6240 cur->bc_private.b.flags = 0; 6241 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, 6242 got.br_startblock, 6243 got.br_blockcount, 6244 &i); 6245 if (error) 6246 goto del_cursor; 6247 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor); 6248 } 6249 6250 xfs_bmbt_set_blockcount(gotp, gotblkcnt); 6251 got.br_blockcount = gotblkcnt; 6252 6253 logflags = XFS_ILOG_CORE; 6254 if (cur) { 6255 error = xfs_bmbt_update(cur, got.br_startoff, 6256 got.br_startblock, 6257 got.br_blockcount, 6258 got.br_state); 6259 if (error) 6260 goto del_cursor; 6261 } else 6262 logflags |= XFS_ILOG_DEXT; 6263 6264 /* Add new extent */ 6265 current_ext++; 6266 xfs_iext_insert(ip, current_ext, 1, &new, 0); 6267 XFS_IFORK_NEXT_SET(ip, whichfork, 6268 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 6269 6270 if (cur) { 6271 error = xfs_bmbt_lookup_eq(cur, new.br_startoff, 6272 new.br_startblock, new.br_blockcount, 6273 &i); 6274 if (error) 6275 goto del_cursor; 6276 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor); 6277 cur->bc_rec.b.br_state = new.br_state; 6278 6279 error = xfs_btree_insert(cur, &i); 6280 if (error) 6281 goto del_cursor; 6282 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor); 6283 } 6284 6285 /* 6286 * Convert to a btree if necessary. 6287 */ 6288 if (xfs_bmap_needs_btree(ip, whichfork)) { 6289 int tmp_logflags; /* partial log flag return val */ 6290 6291 ASSERT(cur == NULL); 6292 error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, dfops, 6293 &cur, 0, &tmp_logflags, whichfork); 6294 logflags |= tmp_logflags; 6295 } 6296 6297 del_cursor: 6298 if (cur) { 6299 cur->bc_private.b.allocated = 0; 6300 xfs_btree_del_cursor(cur, 6301 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 6302 } 6303 6304 if (logflags) 6305 xfs_trans_log_inode(tp, ip, logflags); 6306 return error; 6307 } 6308 6309 int 6310 xfs_bmap_split_extent( 6311 struct xfs_inode *ip, 6312 xfs_fileoff_t split_fsb) 6313 { 6314 struct xfs_mount *mp = ip->i_mount; 6315 struct xfs_trans *tp; 6316 struct xfs_defer_ops dfops; 6317 xfs_fsblock_t firstfsb; 6318 int error; 6319 6320 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 6321 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp); 6322 if (error) 6323 return error; 6324 6325 xfs_ilock(ip, XFS_ILOCK_EXCL); 6326 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 6327 6328 xfs_defer_init(&dfops, &firstfsb); 6329 6330 error = xfs_bmap_split_extent_at(tp, ip, split_fsb, 6331 &firstfsb, &dfops); 6332 if (error) 6333 goto out; 6334 6335 error = xfs_defer_finish(&tp, &dfops, NULL); 6336 if (error) 6337 goto out; 6338 6339 return xfs_trans_commit(tp); 6340 6341 out: 6342 xfs_defer_cancel(&dfops); 6343 xfs_trans_cancel(tp); 6344 return error; 6345 } 6346 6347 /* Deferred mapping is only for real extents in the data fork. */ 6348 static bool 6349 xfs_bmap_is_update_needed( 6350 struct xfs_bmbt_irec *bmap) 6351 { 6352 return bmap->br_startblock != HOLESTARTBLOCK && 6353 bmap->br_startblock != DELAYSTARTBLOCK; 6354 } 6355 6356 /* Record a bmap intent. */ 6357 static int 6358 __xfs_bmap_add( 6359 struct xfs_mount *mp, 6360 struct xfs_defer_ops *dfops, 6361 enum xfs_bmap_intent_type type, 6362 struct xfs_inode *ip, 6363 int whichfork, 6364 struct xfs_bmbt_irec *bmap) 6365 { 6366 int error; 6367 struct xfs_bmap_intent *bi; 6368 6369 trace_xfs_bmap_defer(mp, 6370 XFS_FSB_TO_AGNO(mp, bmap->br_startblock), 6371 type, 6372 XFS_FSB_TO_AGBNO(mp, bmap->br_startblock), 6373 ip->i_ino, whichfork, 6374 bmap->br_startoff, 6375 bmap->br_blockcount, 6376 bmap->br_state); 6377 6378 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS); 6379 INIT_LIST_HEAD(&bi->bi_list); 6380 bi->bi_type = type; 6381 bi->bi_owner = ip; 6382 bi->bi_whichfork = whichfork; 6383 bi->bi_bmap = *bmap; 6384 6385 error = xfs_defer_join(dfops, bi->bi_owner); 6386 if (error) { 6387 kmem_free(bi); 6388 return error; 6389 } 6390 6391 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list); 6392 return 0; 6393 } 6394 6395 /* Map an extent into a file. */ 6396 int 6397 xfs_bmap_map_extent( 6398 struct xfs_mount *mp, 6399 struct xfs_defer_ops *dfops, 6400 struct xfs_inode *ip, 6401 struct xfs_bmbt_irec *PREV) 6402 { 6403 if (!xfs_bmap_is_update_needed(PREV)) 6404 return 0; 6405 6406 return __xfs_bmap_add(mp, dfops, XFS_BMAP_MAP, ip, 6407 XFS_DATA_FORK, PREV); 6408 } 6409 6410 /* Unmap an extent out of a file. */ 6411 int 6412 xfs_bmap_unmap_extent( 6413 struct xfs_mount *mp, 6414 struct xfs_defer_ops *dfops, 6415 struct xfs_inode *ip, 6416 struct xfs_bmbt_irec *PREV) 6417 { 6418 if (!xfs_bmap_is_update_needed(PREV)) 6419 return 0; 6420 6421 return __xfs_bmap_add(mp, dfops, XFS_BMAP_UNMAP, ip, 6422 XFS_DATA_FORK, PREV); 6423 } 6424 6425 /* 6426 * Process one of the deferred bmap operations. We pass back the 6427 * btree cursor to maintain our lock on the bmapbt between calls. 6428 */ 6429 int 6430 xfs_bmap_finish_one( 6431 struct xfs_trans *tp, 6432 struct xfs_defer_ops *dfops, 6433 struct xfs_inode *ip, 6434 enum xfs_bmap_intent_type type, 6435 int whichfork, 6436 xfs_fileoff_t startoff, 6437 xfs_fsblock_t startblock, 6438 xfs_filblks_t blockcount, 6439 xfs_exntst_t state) 6440 { 6441 struct xfs_bmbt_irec bmap; 6442 int nimaps = 1; 6443 xfs_fsblock_t firstfsb; 6444 int flags = XFS_BMAPI_REMAP; 6445 int done; 6446 int error = 0; 6447 6448 bmap.br_startblock = startblock; 6449 bmap.br_startoff = startoff; 6450 bmap.br_blockcount = blockcount; 6451 bmap.br_state = state; 6452 6453 trace_xfs_bmap_deferred(tp->t_mountp, 6454 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type, 6455 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock), 6456 ip->i_ino, whichfork, startoff, blockcount, state); 6457 6458 if (whichfork != XFS_DATA_FORK && whichfork != XFS_ATTR_FORK) 6459 return -EFSCORRUPTED; 6460 if (whichfork == XFS_ATTR_FORK) 6461 flags |= XFS_BMAPI_ATTRFORK; 6462 6463 if (XFS_TEST_ERROR(false, tp->t_mountp, 6464 XFS_ERRTAG_BMAP_FINISH_ONE, 6465 XFS_RANDOM_BMAP_FINISH_ONE)) 6466 return -EIO; 6467 6468 switch (type) { 6469 case XFS_BMAP_MAP: 6470 firstfsb = bmap.br_startblock; 6471 error = xfs_bmapi_write(tp, ip, bmap.br_startoff, 6472 bmap.br_blockcount, flags, &firstfsb, 6473 bmap.br_blockcount, &bmap, &nimaps, 6474 dfops); 6475 break; 6476 case XFS_BMAP_UNMAP: 6477 error = xfs_bunmapi(tp, ip, bmap.br_startoff, 6478 bmap.br_blockcount, flags, 1, &firstfsb, 6479 dfops, &done); 6480 ASSERT(done); 6481 break; 6482 default: 6483 ASSERT(0); 6484 error = -EFSCORRUPTED; 6485 } 6486 6487 return error; 6488 } 6489