1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_bit.h" 25 #include "xfs_sb.h" 26 #include "xfs_mount.h" 27 #include "xfs_defer.h" 28 #include "xfs_da_format.h" 29 #include "xfs_da_btree.h" 30 #include "xfs_dir2.h" 31 #include "xfs_inode.h" 32 #include "xfs_btree.h" 33 #include "xfs_trans.h" 34 #include "xfs_inode_item.h" 35 #include "xfs_extfree_item.h" 36 #include "xfs_alloc.h" 37 #include "xfs_bmap.h" 38 #include "xfs_bmap_util.h" 39 #include "xfs_bmap_btree.h" 40 #include "xfs_rtalloc.h" 41 #include "xfs_error.h" 42 #include "xfs_quota.h" 43 #include "xfs_trans_space.h" 44 #include "xfs_buf_item.h" 45 #include "xfs_trace.h" 46 #include "xfs_symlink.h" 47 #include "xfs_attr_leaf.h" 48 #include "xfs_filestream.h" 49 #include "xfs_rmap.h" 50 #include "xfs_ag_resv.h" 51 #include "xfs_refcount.h" 52 #include "xfs_rmap_btree.h" 53 #include "xfs_icache.h" 54 55 56 kmem_zone_t *xfs_bmap_free_item_zone; 57 58 /* 59 * Miscellaneous helper functions 60 */ 61 62 /* 63 * Compute and fill in the value of the maximum depth of a bmap btree 64 * in this filesystem. Done once, during mount. 65 */ 66 void 67 xfs_bmap_compute_maxlevels( 68 xfs_mount_t *mp, /* file system mount structure */ 69 int whichfork) /* data or attr fork */ 70 { 71 int level; /* btree level */ 72 uint maxblocks; /* max blocks at this level */ 73 uint maxleafents; /* max leaf entries possible */ 74 int maxrootrecs; /* max records in root block */ 75 int minleafrecs; /* min records in leaf block */ 76 int minnoderecs; /* min records in node block */ 77 int sz; /* root block size */ 78 79 /* 80 * The maximum number of extents in a file, hence the maximum 81 * number of leaf entries, is controlled by the type of di_nextents 82 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents 83 * (a signed 16-bit number, xfs_aextnum_t). 84 * 85 * Note that we can no longer assume that if we are in ATTR1 that 86 * the fork offset of all the inodes will be 87 * (xfs_default_attroffset(ip) >> 3) because we could have mounted 88 * with ATTR2 and then mounted back with ATTR1, keeping the 89 * di_forkoff's fixed but probably at various positions. Therefore, 90 * for both ATTR1 and ATTR2 we have to assume the worst case scenario 91 * of a minimum size available. 92 */ 93 if (whichfork == XFS_DATA_FORK) { 94 maxleafents = MAXEXTNUM; 95 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS); 96 } else { 97 maxleafents = MAXAEXTNUM; 98 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS); 99 } 100 maxrootrecs = xfs_bmdr_maxrecs(sz, 0); 101 minleafrecs = mp->m_bmap_dmnr[0]; 102 minnoderecs = mp->m_bmap_dmnr[1]; 103 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; 104 for (level = 1; maxblocks > 1; level++) { 105 if (maxblocks <= maxrootrecs) 106 maxblocks = 1; 107 else 108 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs; 109 } 110 mp->m_bm_maxlevels[whichfork] = level; 111 } 112 113 STATIC int /* error */ 114 xfs_bmbt_lookup_eq( 115 struct xfs_btree_cur *cur, 116 xfs_fileoff_t off, 117 xfs_fsblock_t bno, 118 xfs_filblks_t len, 119 int *stat) /* success/failure */ 120 { 121 cur->bc_rec.b.br_startoff = off; 122 cur->bc_rec.b.br_startblock = bno; 123 cur->bc_rec.b.br_blockcount = len; 124 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); 125 } 126 127 STATIC int /* error */ 128 xfs_bmbt_lookup_ge( 129 struct xfs_btree_cur *cur, 130 xfs_fileoff_t off, 131 xfs_fsblock_t bno, 132 xfs_filblks_t len, 133 int *stat) /* success/failure */ 134 { 135 cur->bc_rec.b.br_startoff = off; 136 cur->bc_rec.b.br_startblock = bno; 137 cur->bc_rec.b.br_blockcount = len; 138 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); 139 } 140 141 /* 142 * Check if the inode needs to be converted to btree format. 143 */ 144 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork) 145 { 146 return whichfork != XFS_COW_FORK && 147 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && 148 XFS_IFORK_NEXTENTS(ip, whichfork) > 149 XFS_IFORK_MAXEXT(ip, whichfork); 150 } 151 152 /* 153 * Check if the inode should be converted to extent format. 154 */ 155 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork) 156 { 157 return whichfork != XFS_COW_FORK && 158 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE && 159 XFS_IFORK_NEXTENTS(ip, whichfork) <= 160 XFS_IFORK_MAXEXT(ip, whichfork); 161 } 162 163 /* 164 * Update the record referred to by cur to the value given 165 * by [off, bno, len, state]. 166 * This either works (return 0) or gets an EFSCORRUPTED error. 167 */ 168 STATIC int 169 xfs_bmbt_update( 170 struct xfs_btree_cur *cur, 171 xfs_fileoff_t off, 172 xfs_fsblock_t bno, 173 xfs_filblks_t len, 174 xfs_exntst_t state) 175 { 176 union xfs_btree_rec rec; 177 178 xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state); 179 return xfs_btree_update(cur, &rec); 180 } 181 182 /* 183 * Compute the worst-case number of indirect blocks that will be used 184 * for ip's delayed extent of length "len". 185 */ 186 STATIC xfs_filblks_t 187 xfs_bmap_worst_indlen( 188 xfs_inode_t *ip, /* incore inode pointer */ 189 xfs_filblks_t len) /* delayed extent length */ 190 { 191 int level; /* btree level number */ 192 int maxrecs; /* maximum record count at this level */ 193 xfs_mount_t *mp; /* mount structure */ 194 xfs_filblks_t rval; /* return value */ 195 xfs_filblks_t orig_len; 196 197 mp = ip->i_mount; 198 199 /* Calculate the worst-case size of the bmbt. */ 200 orig_len = len; 201 maxrecs = mp->m_bmap_dmxr[0]; 202 for (level = 0, rval = 0; 203 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); 204 level++) { 205 len += maxrecs - 1; 206 do_div(len, maxrecs); 207 rval += len; 208 if (len == 1) { 209 rval += XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - 210 level - 1; 211 break; 212 } 213 if (level == 0) 214 maxrecs = mp->m_bmap_dmxr[1]; 215 } 216 217 /* Calculate the worst-case size of the rmapbt. */ 218 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) 219 rval += 1 + xfs_rmapbt_calc_size(mp, orig_len) + 220 mp->m_rmap_maxlevels; 221 222 return rval; 223 } 224 225 /* 226 * Calculate the default attribute fork offset for newly created inodes. 227 */ 228 uint 229 xfs_default_attroffset( 230 struct xfs_inode *ip) 231 { 232 struct xfs_mount *mp = ip->i_mount; 233 uint offset; 234 235 if (mp->m_sb.sb_inodesize == 256) { 236 offset = XFS_LITINO(mp, ip->i_d.di_version) - 237 XFS_BMDR_SPACE_CALC(MINABTPTRS); 238 } else { 239 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS); 240 } 241 242 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version)); 243 return offset; 244 } 245 246 /* 247 * Helper routine to reset inode di_forkoff field when switching 248 * attribute fork from local to extent format - we reset it where 249 * possible to make space available for inline data fork extents. 250 */ 251 STATIC void 252 xfs_bmap_forkoff_reset( 253 xfs_inode_t *ip, 254 int whichfork) 255 { 256 if (whichfork == XFS_ATTR_FORK && 257 ip->i_d.di_format != XFS_DINODE_FMT_DEV && 258 ip->i_d.di_format != XFS_DINODE_FMT_UUID && 259 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) { 260 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3; 261 262 if (dfl_forkoff > ip->i_d.di_forkoff) 263 ip->i_d.di_forkoff = dfl_forkoff; 264 } 265 } 266 267 #ifdef DEBUG 268 STATIC struct xfs_buf * 269 xfs_bmap_get_bp( 270 struct xfs_btree_cur *cur, 271 xfs_fsblock_t bno) 272 { 273 struct xfs_log_item_desc *lidp; 274 int i; 275 276 if (!cur) 277 return NULL; 278 279 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) { 280 if (!cur->bc_bufs[i]) 281 break; 282 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno) 283 return cur->bc_bufs[i]; 284 } 285 286 /* Chase down all the log items to see if the bp is there */ 287 list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) { 288 struct xfs_buf_log_item *bip; 289 bip = (struct xfs_buf_log_item *)lidp->lid_item; 290 if (bip->bli_item.li_type == XFS_LI_BUF && 291 XFS_BUF_ADDR(bip->bli_buf) == bno) 292 return bip->bli_buf; 293 } 294 295 return NULL; 296 } 297 298 STATIC void 299 xfs_check_block( 300 struct xfs_btree_block *block, 301 xfs_mount_t *mp, 302 int root, 303 short sz) 304 { 305 int i, j, dmxr; 306 __be64 *pp, *thispa; /* pointer to block address */ 307 xfs_bmbt_key_t *prevp, *keyp; 308 309 ASSERT(be16_to_cpu(block->bb_level) > 0); 310 311 prevp = NULL; 312 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) { 313 dmxr = mp->m_bmap_dmxr[0]; 314 keyp = XFS_BMBT_KEY_ADDR(mp, block, i); 315 316 if (prevp) { 317 ASSERT(be64_to_cpu(prevp->br_startoff) < 318 be64_to_cpu(keyp->br_startoff)); 319 } 320 prevp = keyp; 321 322 /* 323 * Compare the block numbers to see if there are dups. 324 */ 325 if (root) 326 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz); 327 else 328 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr); 329 330 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) { 331 if (root) 332 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz); 333 else 334 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr); 335 if (*thispa == *pp) { 336 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld", 337 __func__, j, i, 338 (unsigned long long)be64_to_cpu(*thispa)); 339 panic("%s: ptrs are equal in node\n", 340 __func__); 341 } 342 } 343 } 344 } 345 346 /* 347 * Check that the extents for the inode ip are in the right order in all 348 * btree leaves. THis becomes prohibitively expensive for large extent count 349 * files, so don't bother with inodes that have more than 10,000 extents in 350 * them. The btree record ordering checks will still be done, so for such large 351 * bmapbt constructs that is going to catch most corruptions. 352 */ 353 STATIC void 354 xfs_bmap_check_leaf_extents( 355 xfs_btree_cur_t *cur, /* btree cursor or null */ 356 xfs_inode_t *ip, /* incore inode pointer */ 357 int whichfork) /* data or attr fork */ 358 { 359 struct xfs_btree_block *block; /* current btree block */ 360 xfs_fsblock_t bno; /* block # of "block" */ 361 xfs_buf_t *bp; /* buffer for "block" */ 362 int error; /* error return value */ 363 xfs_extnum_t i=0, j; /* index into the extents list */ 364 xfs_ifork_t *ifp; /* fork structure */ 365 int level; /* btree level, for checking */ 366 xfs_mount_t *mp; /* file system mount structure */ 367 __be64 *pp; /* pointer to block address */ 368 xfs_bmbt_rec_t *ep; /* pointer to current extent */ 369 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */ 370 xfs_bmbt_rec_t *nextp; /* pointer to next extent */ 371 int bp_release = 0; 372 373 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) { 374 return; 375 } 376 377 /* skip large extent count inodes */ 378 if (ip->i_d.di_nextents > 10000) 379 return; 380 381 bno = NULLFSBLOCK; 382 mp = ip->i_mount; 383 ifp = XFS_IFORK_PTR(ip, whichfork); 384 block = ifp->if_broot; 385 /* 386 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 387 */ 388 level = be16_to_cpu(block->bb_level); 389 ASSERT(level > 0); 390 xfs_check_block(block, mp, 1, ifp->if_broot_bytes); 391 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 392 bno = be64_to_cpu(*pp); 393 394 ASSERT(bno != NULLFSBLOCK); 395 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 396 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 397 398 /* 399 * Go down the tree until leaf level is reached, following the first 400 * pointer (leftmost) at each level. 401 */ 402 while (level-- > 0) { 403 /* See if buf is in cur first */ 404 bp_release = 0; 405 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 406 if (!bp) { 407 bp_release = 1; 408 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, 409 XFS_BMAP_BTREE_REF, 410 &xfs_bmbt_buf_ops); 411 if (error) 412 goto error_norelse; 413 } 414 block = XFS_BUF_TO_BLOCK(bp); 415 if (level == 0) 416 break; 417 418 /* 419 * Check this block for basic sanity (increasing keys and 420 * no duplicate blocks). 421 */ 422 423 xfs_check_block(block, mp, 0, 0); 424 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 425 bno = be64_to_cpu(*pp); 426 XFS_WANT_CORRUPTED_GOTO(mp, 427 XFS_FSB_SANITY_CHECK(mp, bno), error0); 428 if (bp_release) { 429 bp_release = 0; 430 xfs_trans_brelse(NULL, bp); 431 } 432 } 433 434 /* 435 * Here with bp and block set to the leftmost leaf node in the tree. 436 */ 437 i = 0; 438 439 /* 440 * Loop over all leaf nodes checking that all extents are in the right order. 441 */ 442 for (;;) { 443 xfs_fsblock_t nextbno; 444 xfs_extnum_t num_recs; 445 446 447 num_recs = xfs_btree_get_numrecs(block); 448 449 /* 450 * Read-ahead the next leaf block, if any. 451 */ 452 453 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 454 455 /* 456 * Check all the extents to make sure they are OK. 457 * If we had a previous block, the last entry should 458 * conform with the first entry in this one. 459 */ 460 461 ep = XFS_BMBT_REC_ADDR(mp, block, 1); 462 if (i) { 463 ASSERT(xfs_bmbt_disk_get_startoff(&last) + 464 xfs_bmbt_disk_get_blockcount(&last) <= 465 xfs_bmbt_disk_get_startoff(ep)); 466 } 467 for (j = 1; j < num_recs; j++) { 468 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1); 469 ASSERT(xfs_bmbt_disk_get_startoff(ep) + 470 xfs_bmbt_disk_get_blockcount(ep) <= 471 xfs_bmbt_disk_get_startoff(nextp)); 472 ep = nextp; 473 } 474 475 last = *ep; 476 i += num_recs; 477 if (bp_release) { 478 bp_release = 0; 479 xfs_trans_brelse(NULL, bp); 480 } 481 bno = nextbno; 482 /* 483 * If we've reached the end, stop. 484 */ 485 if (bno == NULLFSBLOCK) 486 break; 487 488 bp_release = 0; 489 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 490 if (!bp) { 491 bp_release = 1; 492 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, 493 XFS_BMAP_BTREE_REF, 494 &xfs_bmbt_buf_ops); 495 if (error) 496 goto error_norelse; 497 } 498 block = XFS_BUF_TO_BLOCK(bp); 499 } 500 501 return; 502 503 error0: 504 xfs_warn(mp, "%s: at error0", __func__); 505 if (bp_release) 506 xfs_trans_brelse(NULL, bp); 507 error_norelse: 508 xfs_warn(mp, "%s: BAD after btree leaves for %d extents", 509 __func__, i); 510 panic("%s: CORRUPTED BTREE OR SOMETHING", __func__); 511 return; 512 } 513 514 /* 515 * Add bmap trace insert entries for all the contents of the extent records. 516 */ 517 void 518 xfs_bmap_trace_exlist( 519 xfs_inode_t *ip, /* incore inode pointer */ 520 xfs_extnum_t cnt, /* count of entries in the list */ 521 int whichfork, /* data or attr or cow fork */ 522 unsigned long caller_ip) 523 { 524 xfs_extnum_t idx; /* extent record index */ 525 xfs_ifork_t *ifp; /* inode fork pointer */ 526 int state = 0; 527 528 if (whichfork == XFS_ATTR_FORK) 529 state |= BMAP_ATTRFORK; 530 else if (whichfork == XFS_COW_FORK) 531 state |= BMAP_COWFORK; 532 533 ifp = XFS_IFORK_PTR(ip, whichfork); 534 ASSERT(cnt == xfs_iext_count(ifp)); 535 for (idx = 0; idx < cnt; idx++) 536 trace_xfs_extlist(ip, idx, state, caller_ip); 537 } 538 539 /* 540 * Validate that the bmbt_irecs being returned from bmapi are valid 541 * given the caller's original parameters. Specifically check the 542 * ranges of the returned irecs to ensure that they only extend beyond 543 * the given parameters if the XFS_BMAPI_ENTIRE flag was set. 544 */ 545 STATIC void 546 xfs_bmap_validate_ret( 547 xfs_fileoff_t bno, 548 xfs_filblks_t len, 549 int flags, 550 xfs_bmbt_irec_t *mval, 551 int nmap, 552 int ret_nmap) 553 { 554 int i; /* index to map values */ 555 556 ASSERT(ret_nmap <= nmap); 557 558 for (i = 0; i < ret_nmap; i++) { 559 ASSERT(mval[i].br_blockcount > 0); 560 if (!(flags & XFS_BMAPI_ENTIRE)) { 561 ASSERT(mval[i].br_startoff >= bno); 562 ASSERT(mval[i].br_blockcount <= len); 563 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <= 564 bno + len); 565 } else { 566 ASSERT(mval[i].br_startoff < bno + len); 567 ASSERT(mval[i].br_startoff + mval[i].br_blockcount > 568 bno); 569 } 570 ASSERT(i == 0 || 571 mval[i - 1].br_startoff + mval[i - 1].br_blockcount == 572 mval[i].br_startoff); 573 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK && 574 mval[i].br_startblock != HOLESTARTBLOCK); 575 ASSERT(mval[i].br_state == XFS_EXT_NORM || 576 mval[i].br_state == XFS_EXT_UNWRITTEN); 577 } 578 } 579 580 #else 581 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0) 582 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) 583 #endif /* DEBUG */ 584 585 /* 586 * bmap free list manipulation functions 587 */ 588 589 /* 590 * Add the extent to the list of extents to be free at transaction end. 591 * The list is maintained sorted (by block number). 592 */ 593 void 594 xfs_bmap_add_free( 595 struct xfs_mount *mp, 596 struct xfs_defer_ops *dfops, 597 xfs_fsblock_t bno, 598 xfs_filblks_t len, 599 struct xfs_owner_info *oinfo) 600 { 601 struct xfs_extent_free_item *new; /* new element */ 602 #ifdef DEBUG 603 xfs_agnumber_t agno; 604 xfs_agblock_t agbno; 605 606 ASSERT(bno != NULLFSBLOCK); 607 ASSERT(len > 0); 608 ASSERT(len <= MAXEXTLEN); 609 ASSERT(!isnullstartblock(bno)); 610 agno = XFS_FSB_TO_AGNO(mp, bno); 611 agbno = XFS_FSB_TO_AGBNO(mp, bno); 612 ASSERT(agno < mp->m_sb.sb_agcount); 613 ASSERT(agbno < mp->m_sb.sb_agblocks); 614 ASSERT(len < mp->m_sb.sb_agblocks); 615 ASSERT(agbno + len <= mp->m_sb.sb_agblocks); 616 #endif 617 ASSERT(xfs_bmap_free_item_zone != NULL); 618 619 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP); 620 new->xefi_startblock = bno; 621 new->xefi_blockcount = (xfs_extlen_t)len; 622 if (oinfo) 623 new->xefi_oinfo = *oinfo; 624 else 625 xfs_rmap_skip_owner_update(&new->xefi_oinfo); 626 trace_xfs_bmap_free_defer(mp, XFS_FSB_TO_AGNO(mp, bno), 0, 627 XFS_FSB_TO_AGBNO(mp, bno), len); 628 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list); 629 } 630 631 /* 632 * Inode fork format manipulation functions 633 */ 634 635 /* 636 * Transform a btree format file with only one leaf node, where the 637 * extents list will fit in the inode, into an extents format file. 638 * Since the file extents are already in-core, all we have to do is 639 * give up the space for the btree root and pitch the leaf block. 640 */ 641 STATIC int /* error */ 642 xfs_bmap_btree_to_extents( 643 xfs_trans_t *tp, /* transaction pointer */ 644 xfs_inode_t *ip, /* incore inode pointer */ 645 xfs_btree_cur_t *cur, /* btree cursor */ 646 int *logflagsp, /* inode logging flags */ 647 int whichfork) /* data or attr fork */ 648 { 649 /* REFERENCED */ 650 struct xfs_btree_block *cblock;/* child btree block */ 651 xfs_fsblock_t cbno; /* child block number */ 652 xfs_buf_t *cbp; /* child block's buffer */ 653 int error; /* error return value */ 654 xfs_ifork_t *ifp; /* inode fork data */ 655 xfs_mount_t *mp; /* mount point structure */ 656 __be64 *pp; /* ptr to block address */ 657 struct xfs_btree_block *rblock;/* root btree block */ 658 struct xfs_owner_info oinfo; 659 660 mp = ip->i_mount; 661 ifp = XFS_IFORK_PTR(ip, whichfork); 662 ASSERT(whichfork != XFS_COW_FORK); 663 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 664 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); 665 rblock = ifp->if_broot; 666 ASSERT(be16_to_cpu(rblock->bb_level) == 1); 667 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1); 668 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1); 669 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes); 670 cbno = be64_to_cpu(*pp); 671 *logflagsp = 0; 672 #ifdef DEBUG 673 if ((error = xfs_btree_check_lptr(cur, cbno, 1))) 674 return error; 675 #endif 676 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF, 677 &xfs_bmbt_buf_ops); 678 if (error) 679 return error; 680 cblock = XFS_BUF_TO_BLOCK(cbp); 681 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp))) 682 return error; 683 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); 684 xfs_bmap_add_free(mp, cur->bc_private.b.dfops, cbno, 1, &oinfo); 685 ip->i_d.di_nblocks--; 686 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 687 xfs_trans_binval(tp, cbp); 688 if (cur->bc_bufs[0] == cbp) 689 cur->bc_bufs[0] = NULL; 690 xfs_iroot_realloc(ip, -1, whichfork); 691 ASSERT(ifp->if_broot == NULL); 692 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0); 693 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 694 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 695 return 0; 696 } 697 698 /* 699 * Convert an extents-format file into a btree-format file. 700 * The new file will have a root block (in the inode) and a single child block. 701 */ 702 STATIC int /* error */ 703 xfs_bmap_extents_to_btree( 704 xfs_trans_t *tp, /* transaction pointer */ 705 xfs_inode_t *ip, /* incore inode pointer */ 706 xfs_fsblock_t *firstblock, /* first-block-allocated */ 707 struct xfs_defer_ops *dfops, /* blocks freed in xaction */ 708 xfs_btree_cur_t **curp, /* cursor returned to caller */ 709 int wasdel, /* converting a delayed alloc */ 710 int *logflagsp, /* inode logging flags */ 711 int whichfork) /* data or attr fork */ 712 { 713 struct xfs_btree_block *ablock; /* allocated (child) bt block */ 714 xfs_buf_t *abp; /* buffer for ablock */ 715 xfs_alloc_arg_t args; /* allocation arguments */ 716 xfs_bmbt_rec_t *arp; /* child record pointer */ 717 struct xfs_btree_block *block; /* btree root block */ 718 xfs_btree_cur_t *cur; /* bmap btree cursor */ 719 xfs_bmbt_rec_host_t *ep; /* extent record pointer */ 720 int error; /* error return value */ 721 xfs_extnum_t i, cnt; /* extent record index */ 722 xfs_ifork_t *ifp; /* inode fork pointer */ 723 xfs_bmbt_key_t *kp; /* root block key pointer */ 724 xfs_mount_t *mp; /* mount structure */ 725 xfs_extnum_t nextents; /* number of file extents */ 726 xfs_bmbt_ptr_t *pp; /* root block address pointer */ 727 728 mp = ip->i_mount; 729 ASSERT(whichfork != XFS_COW_FORK); 730 ifp = XFS_IFORK_PTR(ip, whichfork); 731 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS); 732 733 /* 734 * Make space in the inode incore. 735 */ 736 xfs_iroot_realloc(ip, 1, whichfork); 737 ifp->if_flags |= XFS_IFBROOT; 738 739 /* 740 * Fill in the root. 741 */ 742 block = ifp->if_broot; 743 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL, 744 XFS_BTNUM_BMAP, 1, 1, ip->i_ino, 745 XFS_BTREE_LONG_PTRS); 746 /* 747 * Need a cursor. Can't allocate until bb_level is filled in. 748 */ 749 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 750 cur->bc_private.b.firstblock = *firstblock; 751 cur->bc_private.b.dfops = dfops; 752 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; 753 /* 754 * Convert to a btree with two levels, one record in root. 755 */ 756 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE); 757 memset(&args, 0, sizeof(args)); 758 args.tp = tp; 759 args.mp = mp; 760 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork); 761 args.firstblock = *firstblock; 762 if (*firstblock == NULLFSBLOCK) { 763 args.type = XFS_ALLOCTYPE_START_BNO; 764 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino); 765 } else if (dfops->dop_low) { 766 args.type = XFS_ALLOCTYPE_START_BNO; 767 args.fsbno = *firstblock; 768 } else { 769 args.type = XFS_ALLOCTYPE_NEAR_BNO; 770 args.fsbno = *firstblock; 771 } 772 args.minlen = args.maxlen = args.prod = 1; 773 args.wasdel = wasdel; 774 *logflagsp = 0; 775 if ((error = xfs_alloc_vextent(&args))) { 776 xfs_iroot_realloc(ip, -1, whichfork); 777 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 778 return error; 779 } 780 781 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { 782 xfs_iroot_realloc(ip, -1, whichfork); 783 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 784 return -ENOSPC; 785 } 786 /* 787 * Allocation can't fail, the space was reserved. 788 */ 789 ASSERT(*firstblock == NULLFSBLOCK || 790 args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock)); 791 *firstblock = cur->bc_private.b.firstblock = args.fsbno; 792 cur->bc_private.b.allocated++; 793 ip->i_d.di_nblocks++; 794 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); 795 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0); 796 /* 797 * Fill in the child block. 798 */ 799 abp->b_ops = &xfs_bmbt_buf_ops; 800 ablock = XFS_BUF_TO_BLOCK(abp); 801 xfs_btree_init_block_int(mp, ablock, abp->b_bn, 802 XFS_BTNUM_BMAP, 0, 0, ip->i_ino, 803 XFS_BTREE_LONG_PTRS); 804 805 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); 806 nextents = xfs_iext_count(ifp); 807 for (cnt = i = 0; i < nextents; i++) { 808 ep = xfs_iext_get_ext(ifp, i); 809 if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) { 810 arp->l0 = cpu_to_be64(ep->l0); 811 arp->l1 = cpu_to_be64(ep->l1); 812 arp++; cnt++; 813 } 814 } 815 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork)); 816 xfs_btree_set_numrecs(ablock, cnt); 817 818 /* 819 * Fill in the root key and pointer. 820 */ 821 kp = XFS_BMBT_KEY_ADDR(mp, block, 1); 822 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); 823 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp)); 824 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur, 825 be16_to_cpu(block->bb_level))); 826 *pp = cpu_to_be64(args.fsbno); 827 828 /* 829 * Do all this logging at the end so that 830 * the root is at the right level. 831 */ 832 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS); 833 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); 834 ASSERT(*curp == NULL); 835 *curp = cur; 836 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork); 837 return 0; 838 } 839 840 /* 841 * Convert a local file to an extents file. 842 * This code is out of bounds for data forks of regular files, 843 * since the file data needs to get logged so things will stay consistent. 844 * (The bmap-level manipulations are ok, though). 845 */ 846 void 847 xfs_bmap_local_to_extents_empty( 848 struct xfs_inode *ip, 849 int whichfork) 850 { 851 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 852 853 ASSERT(whichfork != XFS_COW_FORK); 854 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 855 ASSERT(ifp->if_bytes == 0); 856 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0); 857 858 xfs_bmap_forkoff_reset(ip, whichfork); 859 ifp->if_flags &= ~XFS_IFINLINE; 860 ifp->if_flags |= XFS_IFEXTENTS; 861 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 862 } 863 864 865 STATIC int /* error */ 866 xfs_bmap_local_to_extents( 867 xfs_trans_t *tp, /* transaction pointer */ 868 xfs_inode_t *ip, /* incore inode pointer */ 869 xfs_fsblock_t *firstblock, /* first block allocated in xaction */ 870 xfs_extlen_t total, /* total blocks needed by transaction */ 871 int *logflagsp, /* inode logging flags */ 872 int whichfork, 873 void (*init_fn)(struct xfs_trans *tp, 874 struct xfs_buf *bp, 875 struct xfs_inode *ip, 876 struct xfs_ifork *ifp)) 877 { 878 int error = 0; 879 int flags; /* logging flags returned */ 880 xfs_ifork_t *ifp; /* inode fork pointer */ 881 xfs_alloc_arg_t args; /* allocation arguments */ 882 xfs_buf_t *bp; /* buffer for extent block */ 883 xfs_bmbt_rec_host_t *ep; /* extent record pointer */ 884 885 /* 886 * We don't want to deal with the case of keeping inode data inline yet. 887 * So sending the data fork of a regular inode is invalid. 888 */ 889 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK)); 890 ifp = XFS_IFORK_PTR(ip, whichfork); 891 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 892 893 if (!ifp->if_bytes) { 894 xfs_bmap_local_to_extents_empty(ip, whichfork); 895 flags = XFS_ILOG_CORE; 896 goto done; 897 } 898 899 flags = 0; 900 error = 0; 901 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == 902 XFS_IFINLINE); 903 memset(&args, 0, sizeof(args)); 904 args.tp = tp; 905 args.mp = ip->i_mount; 906 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0); 907 args.firstblock = *firstblock; 908 /* 909 * Allocate a block. We know we need only one, since the 910 * file currently fits in an inode. 911 */ 912 if (*firstblock == NULLFSBLOCK) { 913 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino); 914 args.type = XFS_ALLOCTYPE_START_BNO; 915 } else { 916 args.fsbno = *firstblock; 917 args.type = XFS_ALLOCTYPE_NEAR_BNO; 918 } 919 args.total = total; 920 args.minlen = args.maxlen = args.prod = 1; 921 error = xfs_alloc_vextent(&args); 922 if (error) 923 goto done; 924 925 /* Can't fail, the space was reserved. */ 926 ASSERT(args.fsbno != NULLFSBLOCK); 927 ASSERT(args.len == 1); 928 *firstblock = args.fsbno; 929 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0); 930 931 /* 932 * Initialize the block, copy the data and log the remote buffer. 933 * 934 * The callout is responsible for logging because the remote format 935 * might differ from the local format and thus we don't know how much to 936 * log here. Note that init_fn must also set the buffer log item type 937 * correctly. 938 */ 939 init_fn(tp, bp, ip, ifp); 940 941 /* account for the change in fork size */ 942 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); 943 xfs_bmap_local_to_extents_empty(ip, whichfork); 944 flags |= XFS_ILOG_CORE; 945 946 xfs_iext_add(ifp, 0, 1); 947 ep = xfs_iext_get_ext(ifp, 0); 948 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM); 949 trace_xfs_bmap_post_update(ip, 0, 950 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0, 951 _THIS_IP_); 952 XFS_IFORK_NEXT_SET(ip, whichfork, 1); 953 ip->i_d.di_nblocks = 1; 954 xfs_trans_mod_dquot_byino(tp, ip, 955 XFS_TRANS_DQ_BCOUNT, 1L); 956 flags |= xfs_ilog_fext(whichfork); 957 958 done: 959 *logflagsp = flags; 960 return error; 961 } 962 963 /* 964 * Called from xfs_bmap_add_attrfork to handle btree format files. 965 */ 966 STATIC int /* error */ 967 xfs_bmap_add_attrfork_btree( 968 xfs_trans_t *tp, /* transaction pointer */ 969 xfs_inode_t *ip, /* incore inode pointer */ 970 xfs_fsblock_t *firstblock, /* first block allocated */ 971 struct xfs_defer_ops *dfops, /* blocks to free at commit */ 972 int *flags) /* inode logging flags */ 973 { 974 xfs_btree_cur_t *cur; /* btree cursor */ 975 int error; /* error return value */ 976 xfs_mount_t *mp; /* file system mount struct */ 977 int stat; /* newroot status */ 978 979 mp = ip->i_mount; 980 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip)) 981 *flags |= XFS_ILOG_DBROOT; 982 else { 983 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); 984 cur->bc_private.b.dfops = dfops; 985 cur->bc_private.b.firstblock = *firstblock; 986 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat))) 987 goto error0; 988 /* must be at least one entry */ 989 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0); 990 if ((error = xfs_btree_new_iroot(cur, flags, &stat))) 991 goto error0; 992 if (stat == 0) { 993 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 994 return -ENOSPC; 995 } 996 *firstblock = cur->bc_private.b.firstblock; 997 cur->bc_private.b.allocated = 0; 998 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 999 } 1000 return 0; 1001 error0: 1002 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 1003 return error; 1004 } 1005 1006 /* 1007 * Called from xfs_bmap_add_attrfork to handle extents format files. 1008 */ 1009 STATIC int /* error */ 1010 xfs_bmap_add_attrfork_extents( 1011 xfs_trans_t *tp, /* transaction pointer */ 1012 xfs_inode_t *ip, /* incore inode pointer */ 1013 xfs_fsblock_t *firstblock, /* first block allocated */ 1014 struct xfs_defer_ops *dfops, /* blocks to free at commit */ 1015 int *flags) /* inode logging flags */ 1016 { 1017 xfs_btree_cur_t *cur; /* bmap btree cursor */ 1018 int error; /* error return value */ 1019 1020 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip)) 1021 return 0; 1022 cur = NULL; 1023 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops, &cur, 0, 1024 flags, XFS_DATA_FORK); 1025 if (cur) { 1026 cur->bc_private.b.allocated = 0; 1027 xfs_btree_del_cursor(cur, 1028 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 1029 } 1030 return error; 1031 } 1032 1033 /* 1034 * Called from xfs_bmap_add_attrfork to handle local format files. Each 1035 * different data fork content type needs a different callout to do the 1036 * conversion. Some are basic and only require special block initialisation 1037 * callouts for the data formating, others (directories) are so specialised they 1038 * handle everything themselves. 1039 * 1040 * XXX (dgc): investigate whether directory conversion can use the generic 1041 * formatting callout. It should be possible - it's just a very complex 1042 * formatter. 1043 */ 1044 STATIC int /* error */ 1045 xfs_bmap_add_attrfork_local( 1046 xfs_trans_t *tp, /* transaction pointer */ 1047 xfs_inode_t *ip, /* incore inode pointer */ 1048 xfs_fsblock_t *firstblock, /* first block allocated */ 1049 struct xfs_defer_ops *dfops, /* blocks to free at commit */ 1050 int *flags) /* inode logging flags */ 1051 { 1052 xfs_da_args_t dargs; /* args for dir/attr code */ 1053 1054 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip)) 1055 return 0; 1056 1057 if (S_ISDIR(VFS_I(ip)->i_mode)) { 1058 memset(&dargs, 0, sizeof(dargs)); 1059 dargs.geo = ip->i_mount->m_dir_geo; 1060 dargs.dp = ip; 1061 dargs.firstblock = firstblock; 1062 dargs.dfops = dfops; 1063 dargs.total = dargs.geo->fsbcount; 1064 dargs.whichfork = XFS_DATA_FORK; 1065 dargs.trans = tp; 1066 return xfs_dir2_sf_to_block(&dargs); 1067 } 1068 1069 if (S_ISLNK(VFS_I(ip)->i_mode)) 1070 return xfs_bmap_local_to_extents(tp, ip, firstblock, 1, 1071 flags, XFS_DATA_FORK, 1072 xfs_symlink_local_to_remote); 1073 1074 /* should only be called for types that support local format data */ 1075 ASSERT(0); 1076 return -EFSCORRUPTED; 1077 } 1078 1079 /* 1080 * Convert inode from non-attributed to attributed. 1081 * Must not be in a transaction, ip must not be locked. 1082 */ 1083 int /* error code */ 1084 xfs_bmap_add_attrfork( 1085 xfs_inode_t *ip, /* incore inode pointer */ 1086 int size, /* space new attribute needs */ 1087 int rsvd) /* xact may use reserved blks */ 1088 { 1089 xfs_fsblock_t firstblock; /* 1st block/ag allocated */ 1090 struct xfs_defer_ops dfops; /* freed extent records */ 1091 xfs_mount_t *mp; /* mount structure */ 1092 xfs_trans_t *tp; /* transaction pointer */ 1093 int blks; /* space reservation */ 1094 int version = 1; /* superblock attr version */ 1095 int logflags; /* logging flags */ 1096 int error; /* error return value */ 1097 1098 ASSERT(XFS_IFORK_Q(ip) == 0); 1099 1100 mp = ip->i_mount; 1101 ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); 1102 1103 blks = XFS_ADDAFORK_SPACE_RES(mp); 1104 1105 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0, 1106 rsvd ? XFS_TRANS_RESERVE : 0, &tp); 1107 if (error) 1108 return error; 1109 1110 xfs_ilock(ip, XFS_ILOCK_EXCL); 1111 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ? 1112 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : 1113 XFS_QMOPT_RES_REGBLKS); 1114 if (error) 1115 goto trans_cancel; 1116 if (XFS_IFORK_Q(ip)) 1117 goto trans_cancel; 1118 if (ip->i_d.di_anextents != 0) { 1119 error = -EFSCORRUPTED; 1120 goto trans_cancel; 1121 } 1122 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) { 1123 /* 1124 * For inodes coming from pre-6.2 filesystems. 1125 */ 1126 ASSERT(ip->i_d.di_aformat == 0); 1127 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 1128 } 1129 1130 xfs_trans_ijoin(tp, ip, 0); 1131 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1132 1133 switch (ip->i_d.di_format) { 1134 case XFS_DINODE_FMT_DEV: 1135 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; 1136 break; 1137 case XFS_DINODE_FMT_UUID: 1138 ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3; 1139 break; 1140 case XFS_DINODE_FMT_LOCAL: 1141 case XFS_DINODE_FMT_EXTENTS: 1142 case XFS_DINODE_FMT_BTREE: 1143 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size); 1144 if (!ip->i_d.di_forkoff) 1145 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3; 1146 else if (mp->m_flags & XFS_MOUNT_ATTR2) 1147 version = 2; 1148 break; 1149 default: 1150 ASSERT(0); 1151 error = -EINVAL; 1152 goto trans_cancel; 1153 } 1154 1155 ASSERT(ip->i_afp == NULL); 1156 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); 1157 ip->i_afp->if_flags = XFS_IFEXTENTS; 1158 logflags = 0; 1159 xfs_defer_init(&dfops, &firstblock); 1160 switch (ip->i_d.di_format) { 1161 case XFS_DINODE_FMT_LOCAL: 1162 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &dfops, 1163 &logflags); 1164 break; 1165 case XFS_DINODE_FMT_EXTENTS: 1166 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock, 1167 &dfops, &logflags); 1168 break; 1169 case XFS_DINODE_FMT_BTREE: 1170 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &dfops, 1171 &logflags); 1172 break; 1173 default: 1174 error = 0; 1175 break; 1176 } 1177 if (logflags) 1178 xfs_trans_log_inode(tp, ip, logflags); 1179 if (error) 1180 goto bmap_cancel; 1181 if (!xfs_sb_version_hasattr(&mp->m_sb) || 1182 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) { 1183 bool log_sb = false; 1184 1185 spin_lock(&mp->m_sb_lock); 1186 if (!xfs_sb_version_hasattr(&mp->m_sb)) { 1187 xfs_sb_version_addattr(&mp->m_sb); 1188 log_sb = true; 1189 } 1190 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) { 1191 xfs_sb_version_addattr2(&mp->m_sb); 1192 log_sb = true; 1193 } 1194 spin_unlock(&mp->m_sb_lock); 1195 if (log_sb) 1196 xfs_log_sb(tp); 1197 } 1198 1199 error = xfs_defer_finish(&tp, &dfops, NULL); 1200 if (error) 1201 goto bmap_cancel; 1202 error = xfs_trans_commit(tp); 1203 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1204 return error; 1205 1206 bmap_cancel: 1207 xfs_defer_cancel(&dfops); 1208 trans_cancel: 1209 xfs_trans_cancel(tp); 1210 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1211 return error; 1212 } 1213 1214 /* 1215 * Internal and external extent tree search functions. 1216 */ 1217 1218 /* 1219 * Read in the extents to if_extents. 1220 * All inode fields are set up by caller, we just traverse the btree 1221 * and copy the records in. If the file system cannot contain unwritten 1222 * extents, the records are checked for no "state" flags. 1223 */ 1224 int /* error */ 1225 xfs_bmap_read_extents( 1226 xfs_trans_t *tp, /* transaction pointer */ 1227 xfs_inode_t *ip, /* incore inode */ 1228 int whichfork) /* data or attr fork */ 1229 { 1230 struct xfs_btree_block *block; /* current btree block */ 1231 xfs_fsblock_t bno; /* block # of "block" */ 1232 xfs_buf_t *bp; /* buffer for "block" */ 1233 int error; /* error return value */ 1234 xfs_extnum_t i, j; /* index into the extents list */ 1235 xfs_ifork_t *ifp; /* fork structure */ 1236 int level; /* btree level, for checking */ 1237 xfs_mount_t *mp; /* file system mount structure */ 1238 __be64 *pp; /* pointer to block address */ 1239 /* REFERENCED */ 1240 xfs_extnum_t room; /* number of entries there's room for */ 1241 1242 mp = ip->i_mount; 1243 ifp = XFS_IFORK_PTR(ip, whichfork); 1244 block = ifp->if_broot; 1245 /* 1246 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 1247 */ 1248 level = be16_to_cpu(block->bb_level); 1249 ASSERT(level > 0); 1250 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 1251 bno = be64_to_cpu(*pp); 1252 1253 /* 1254 * Go down the tree until leaf level is reached, following the first 1255 * pointer (leftmost) at each level. 1256 */ 1257 while (level-- > 0) { 1258 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 1259 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); 1260 if (error) 1261 return error; 1262 block = XFS_BUF_TO_BLOCK(bp); 1263 if (level == 0) 1264 break; 1265 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 1266 bno = be64_to_cpu(*pp); 1267 XFS_WANT_CORRUPTED_GOTO(mp, 1268 XFS_FSB_SANITY_CHECK(mp, bno), error0); 1269 xfs_trans_brelse(tp, bp); 1270 } 1271 /* 1272 * Here with bp and block set to the leftmost leaf node in the tree. 1273 */ 1274 room = xfs_iext_count(ifp); 1275 i = 0; 1276 /* 1277 * Loop over all leaf nodes. Copy information to the extent records. 1278 */ 1279 for (;;) { 1280 xfs_bmbt_rec_t *frp; 1281 xfs_fsblock_t nextbno; 1282 xfs_extnum_t num_recs; 1283 xfs_extnum_t start; 1284 1285 num_recs = xfs_btree_get_numrecs(block); 1286 if (unlikely(i + num_recs > room)) { 1287 ASSERT(i + num_recs <= room); 1288 xfs_warn(ip->i_mount, 1289 "corrupt dinode %Lu, (btree extents).", 1290 (unsigned long long) ip->i_ino); 1291 XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)", 1292 XFS_ERRLEVEL_LOW, ip->i_mount, block); 1293 goto error0; 1294 } 1295 /* 1296 * Read-ahead the next leaf block, if any. 1297 */ 1298 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 1299 if (nextbno != NULLFSBLOCK) 1300 xfs_btree_reada_bufl(mp, nextbno, 1, 1301 &xfs_bmbt_buf_ops); 1302 /* 1303 * Copy records into the extent records. 1304 */ 1305 frp = XFS_BMBT_REC_ADDR(mp, block, 1); 1306 start = i; 1307 for (j = 0; j < num_recs; j++, i++, frp++) { 1308 xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i); 1309 trp->l0 = be64_to_cpu(frp->l0); 1310 trp->l1 = be64_to_cpu(frp->l1); 1311 if (!xfs_bmbt_validate_extent(mp, whichfork, trp)) { 1312 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)", 1313 XFS_ERRLEVEL_LOW, mp); 1314 goto error0; 1315 } 1316 } 1317 xfs_trans_brelse(tp, bp); 1318 bno = nextbno; 1319 /* 1320 * If we've reached the end, stop. 1321 */ 1322 if (bno == NULLFSBLOCK) 1323 break; 1324 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 1325 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); 1326 if (error) 1327 return error; 1328 block = XFS_BUF_TO_BLOCK(bp); 1329 } 1330 if (i != XFS_IFORK_NEXTENTS(ip, whichfork)) 1331 return -EFSCORRUPTED; 1332 ASSERT(i == xfs_iext_count(ifp)); 1333 XFS_BMAP_TRACE_EXLIST(ip, i, whichfork); 1334 return 0; 1335 error0: 1336 xfs_trans_brelse(tp, bp); 1337 return -EFSCORRUPTED; 1338 } 1339 1340 /* 1341 * Returns the file-relative block number of the first unused block(s) 1342 * in the file with at least "len" logically contiguous blocks free. 1343 * This is the lowest-address hole if the file has holes, else the first block 1344 * past the end of file. 1345 * Return 0 if the file is currently local (in-inode). 1346 */ 1347 int /* error */ 1348 xfs_bmap_first_unused( 1349 xfs_trans_t *tp, /* transaction pointer */ 1350 xfs_inode_t *ip, /* incore inode */ 1351 xfs_extlen_t len, /* size of hole to find */ 1352 xfs_fileoff_t *first_unused, /* unused block */ 1353 int whichfork) /* data or attr fork */ 1354 { 1355 int error; /* error return value */ 1356 int idx; /* extent record index */ 1357 xfs_ifork_t *ifp; /* inode fork pointer */ 1358 xfs_fileoff_t lastaddr; /* last block number seen */ 1359 xfs_fileoff_t lowest; /* lowest useful block */ 1360 xfs_fileoff_t max; /* starting useful block */ 1361 xfs_fileoff_t off; /* offset for this block */ 1362 xfs_extnum_t nextents; /* number of extent entries */ 1363 1364 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE || 1365 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS || 1366 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 1367 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { 1368 *first_unused = 0; 1369 return 0; 1370 } 1371 ifp = XFS_IFORK_PTR(ip, whichfork); 1372 if (!(ifp->if_flags & XFS_IFEXTENTS) && 1373 (error = xfs_iread_extents(tp, ip, whichfork))) 1374 return error; 1375 lowest = *first_unused; 1376 nextents = xfs_iext_count(ifp); 1377 for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) { 1378 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx); 1379 off = xfs_bmbt_get_startoff(ep); 1380 /* 1381 * See if the hole before this extent will work. 1382 */ 1383 if (off >= lowest + len && off - max >= len) { 1384 *first_unused = max; 1385 return 0; 1386 } 1387 lastaddr = off + xfs_bmbt_get_blockcount(ep); 1388 max = XFS_FILEOFF_MAX(lastaddr, lowest); 1389 } 1390 *first_unused = max; 1391 return 0; 1392 } 1393 1394 /* 1395 * Returns the file-relative block number of the last block - 1 before 1396 * last_block (input value) in the file. 1397 * This is not based on i_size, it is based on the extent records. 1398 * Returns 0 for local files, as they do not have extent records. 1399 */ 1400 int /* error */ 1401 xfs_bmap_last_before( 1402 struct xfs_trans *tp, /* transaction pointer */ 1403 struct xfs_inode *ip, /* incore inode */ 1404 xfs_fileoff_t *last_block, /* last block */ 1405 int whichfork) /* data or attr fork */ 1406 { 1407 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1408 struct xfs_bmbt_irec got; 1409 xfs_extnum_t idx; 1410 int error; 1411 1412 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 1413 case XFS_DINODE_FMT_LOCAL: 1414 *last_block = 0; 1415 return 0; 1416 case XFS_DINODE_FMT_BTREE: 1417 case XFS_DINODE_FMT_EXTENTS: 1418 break; 1419 default: 1420 return -EIO; 1421 } 1422 1423 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1424 error = xfs_iread_extents(tp, ip, whichfork); 1425 if (error) 1426 return error; 1427 } 1428 1429 if (xfs_iext_lookup_extent(ip, ifp, *last_block - 1, &idx, &got)) { 1430 if (got.br_startoff <= *last_block - 1) 1431 return 0; 1432 } 1433 1434 if (xfs_iext_get_extent(ifp, idx - 1, &got)) { 1435 *last_block = got.br_startoff + got.br_blockcount; 1436 return 0; 1437 } 1438 1439 *last_block = 0; 1440 return 0; 1441 } 1442 1443 int 1444 xfs_bmap_last_extent( 1445 struct xfs_trans *tp, 1446 struct xfs_inode *ip, 1447 int whichfork, 1448 struct xfs_bmbt_irec *rec, 1449 int *is_empty) 1450 { 1451 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1452 int error; 1453 int nextents; 1454 1455 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1456 error = xfs_iread_extents(tp, ip, whichfork); 1457 if (error) 1458 return error; 1459 } 1460 1461 nextents = xfs_iext_count(ifp); 1462 if (nextents == 0) { 1463 *is_empty = 1; 1464 return 0; 1465 } 1466 1467 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, nextents - 1), rec); 1468 *is_empty = 0; 1469 return 0; 1470 } 1471 1472 /* 1473 * Check the last inode extent to determine whether this allocation will result 1474 * in blocks being allocated at the end of the file. When we allocate new data 1475 * blocks at the end of the file which do not start at the previous data block, 1476 * we will try to align the new blocks at stripe unit boundaries. 1477 * 1478 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be 1479 * at, or past the EOF. 1480 */ 1481 STATIC int 1482 xfs_bmap_isaeof( 1483 struct xfs_bmalloca *bma, 1484 int whichfork) 1485 { 1486 struct xfs_bmbt_irec rec; 1487 int is_empty; 1488 int error; 1489 1490 bma->aeof = 0; 1491 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, 1492 &is_empty); 1493 if (error) 1494 return error; 1495 1496 if (is_empty) { 1497 bma->aeof = 1; 1498 return 0; 1499 } 1500 1501 /* 1502 * Check if we are allocation or past the last extent, or at least into 1503 * the last delayed allocated extent. 1504 */ 1505 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount || 1506 (bma->offset >= rec.br_startoff && 1507 isnullstartblock(rec.br_startblock)); 1508 return 0; 1509 } 1510 1511 /* 1512 * Returns the file-relative block number of the first block past eof in 1513 * the file. This is not based on i_size, it is based on the extent records. 1514 * Returns 0 for local files, as they do not have extent records. 1515 */ 1516 int 1517 xfs_bmap_last_offset( 1518 struct xfs_inode *ip, 1519 xfs_fileoff_t *last_block, 1520 int whichfork) 1521 { 1522 struct xfs_bmbt_irec rec; 1523 int is_empty; 1524 int error; 1525 1526 *last_block = 0; 1527 1528 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) 1529 return 0; 1530 1531 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && 1532 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 1533 return -EIO; 1534 1535 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty); 1536 if (error || is_empty) 1537 return error; 1538 1539 *last_block = rec.br_startoff + rec.br_blockcount; 1540 return 0; 1541 } 1542 1543 /* 1544 * Returns whether the selected fork of the inode has exactly one 1545 * block or not. For the data fork we check this matches di_size, 1546 * implying the file's range is 0..bsize-1. 1547 */ 1548 int /* 1=>1 block, 0=>otherwise */ 1549 xfs_bmap_one_block( 1550 xfs_inode_t *ip, /* incore inode */ 1551 int whichfork) /* data or attr fork */ 1552 { 1553 xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */ 1554 xfs_ifork_t *ifp; /* inode fork pointer */ 1555 int rval; /* return value */ 1556 xfs_bmbt_irec_t s; /* internal version of extent */ 1557 1558 #ifndef DEBUG 1559 if (whichfork == XFS_DATA_FORK) 1560 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize; 1561 #endif /* !DEBUG */ 1562 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1) 1563 return 0; 1564 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 1565 return 0; 1566 ifp = XFS_IFORK_PTR(ip, whichfork); 1567 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 1568 ep = xfs_iext_get_ext(ifp, 0); 1569 xfs_bmbt_get_all(ep, &s); 1570 rval = s.br_startoff == 0 && s.br_blockcount == 1; 1571 if (rval && whichfork == XFS_DATA_FORK) 1572 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize); 1573 return rval; 1574 } 1575 1576 /* 1577 * Extent tree manipulation functions used during allocation. 1578 */ 1579 1580 /* 1581 * Convert a delayed allocation to a real allocation. 1582 */ 1583 STATIC int /* error */ 1584 xfs_bmap_add_extent_delay_real( 1585 struct xfs_bmalloca *bma, 1586 int whichfork) 1587 { 1588 struct xfs_bmbt_irec *new = &bma->got; 1589 int diff; /* temp value */ 1590 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */ 1591 int error; /* error return value */ 1592 int i; /* temp state */ 1593 xfs_ifork_t *ifp; /* inode fork pointer */ 1594 xfs_fileoff_t new_endoff; /* end offset of new entry */ 1595 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 1596 /* left is 0, right is 1, prev is 2 */ 1597 int rval=0; /* return value (logging flags) */ 1598 int state = 0;/* state bits, accessed thru macros */ 1599 xfs_filblks_t da_new; /* new count del alloc blocks used */ 1600 xfs_filblks_t da_old; /* old count del alloc blocks used */ 1601 xfs_filblks_t temp=0; /* value for da_new calculations */ 1602 xfs_filblks_t temp2=0;/* value for da_new calculations */ 1603 int tmp_rval; /* partial logging flags */ 1604 struct xfs_mount *mp; 1605 xfs_extnum_t *nextents; 1606 1607 mp = bma->ip->i_mount; 1608 ifp = XFS_IFORK_PTR(bma->ip, whichfork); 1609 ASSERT(whichfork != XFS_ATTR_FORK); 1610 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents : 1611 &bma->ip->i_d.di_nextents); 1612 1613 ASSERT(bma->idx >= 0); 1614 ASSERT(bma->idx <= xfs_iext_count(ifp)); 1615 ASSERT(!isnullstartblock(new->br_startblock)); 1616 ASSERT(!bma->cur || 1617 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); 1618 1619 XFS_STATS_INC(mp, xs_add_exlist); 1620 1621 #define LEFT r[0] 1622 #define RIGHT r[1] 1623 #define PREV r[2] 1624 1625 if (whichfork == XFS_COW_FORK) 1626 state |= BMAP_COWFORK; 1627 1628 /* 1629 * Set up a bunch of variables to make the tests simpler. 1630 */ 1631 ep = xfs_iext_get_ext(ifp, bma->idx); 1632 xfs_bmbt_get_all(ep, &PREV); 1633 new_endoff = new->br_startoff + new->br_blockcount; 1634 ASSERT(PREV.br_startoff <= new->br_startoff); 1635 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 1636 1637 da_old = startblockval(PREV.br_startblock); 1638 da_new = 0; 1639 1640 /* 1641 * Set flags determining what part of the previous delayed allocation 1642 * extent is being replaced by a real allocation. 1643 */ 1644 if (PREV.br_startoff == new->br_startoff) 1645 state |= BMAP_LEFT_FILLING; 1646 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 1647 state |= BMAP_RIGHT_FILLING; 1648 1649 /* 1650 * Check and set flags if this segment has a left neighbor. 1651 * Don't set contiguous if the combined extent would be too large. 1652 */ 1653 if (bma->idx > 0) { 1654 state |= BMAP_LEFT_VALID; 1655 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT); 1656 1657 if (isnullstartblock(LEFT.br_startblock)) 1658 state |= BMAP_LEFT_DELAY; 1659 } 1660 1661 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 1662 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 1663 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 1664 LEFT.br_state == new->br_state && 1665 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) 1666 state |= BMAP_LEFT_CONTIG; 1667 1668 /* 1669 * Check and set flags if this segment has a right neighbor. 1670 * Don't set contiguous if the combined extent would be too large. 1671 * Also check for all-three-contiguous being too large. 1672 */ 1673 if (bma->idx < xfs_iext_count(ifp) - 1) { 1674 state |= BMAP_RIGHT_VALID; 1675 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT); 1676 1677 if (isnullstartblock(RIGHT.br_startblock)) 1678 state |= BMAP_RIGHT_DELAY; 1679 } 1680 1681 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 1682 new_endoff == RIGHT.br_startoff && 1683 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 1684 new->br_state == RIGHT.br_state && 1685 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 1686 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1687 BMAP_RIGHT_FILLING)) != 1688 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1689 BMAP_RIGHT_FILLING) || 1690 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 1691 <= MAXEXTLEN)) 1692 state |= BMAP_RIGHT_CONTIG; 1693 1694 error = 0; 1695 /* 1696 * Switch out based on the FILLING and CONTIG state bits. 1697 */ 1698 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1699 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 1700 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1701 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1702 /* 1703 * Filling in all of a previously delayed allocation extent. 1704 * The left and right neighbors are both contiguous with new. 1705 */ 1706 bma->idx--; 1707 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1708 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx), 1709 LEFT.br_blockcount + PREV.br_blockcount + 1710 RIGHT.br_blockcount); 1711 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 1712 1713 xfs_iext_remove(bma->ip, bma->idx + 1, 2, state); 1714 (*nextents)--; 1715 if (bma->cur == NULL) 1716 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1717 else { 1718 rval = XFS_ILOG_CORE; 1719 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff, 1720 RIGHT.br_startblock, 1721 RIGHT.br_blockcount, &i); 1722 if (error) 1723 goto done; 1724 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1725 error = xfs_btree_delete(bma->cur, &i); 1726 if (error) 1727 goto done; 1728 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1729 error = xfs_btree_decrement(bma->cur, 0, &i); 1730 if (error) 1731 goto done; 1732 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1733 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, 1734 LEFT.br_startblock, 1735 LEFT.br_blockcount + 1736 PREV.br_blockcount + 1737 RIGHT.br_blockcount, LEFT.br_state); 1738 if (error) 1739 goto done; 1740 } 1741 break; 1742 1743 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1744 /* 1745 * Filling in all of a previously delayed allocation extent. 1746 * The left neighbor is contiguous, the right is not. 1747 */ 1748 bma->idx--; 1749 1750 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1751 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx), 1752 LEFT.br_blockcount + PREV.br_blockcount); 1753 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 1754 1755 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state); 1756 if (bma->cur == NULL) 1757 rval = XFS_ILOG_DEXT; 1758 else { 1759 rval = 0; 1760 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff, 1761 LEFT.br_startblock, LEFT.br_blockcount, 1762 &i); 1763 if (error) 1764 goto done; 1765 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1766 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, 1767 LEFT.br_startblock, 1768 LEFT.br_blockcount + 1769 PREV.br_blockcount, LEFT.br_state); 1770 if (error) 1771 goto done; 1772 } 1773 break; 1774 1775 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1776 /* 1777 * Filling in all of a previously delayed allocation extent. 1778 * The right neighbor is contiguous, the left is not. 1779 */ 1780 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1781 xfs_bmbt_set_startblock(ep, new->br_startblock); 1782 xfs_bmbt_set_blockcount(ep, 1783 PREV.br_blockcount + RIGHT.br_blockcount); 1784 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 1785 1786 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state); 1787 if (bma->cur == NULL) 1788 rval = XFS_ILOG_DEXT; 1789 else { 1790 rval = 0; 1791 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff, 1792 RIGHT.br_startblock, 1793 RIGHT.br_blockcount, &i); 1794 if (error) 1795 goto done; 1796 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1797 error = xfs_bmbt_update(bma->cur, PREV.br_startoff, 1798 new->br_startblock, 1799 PREV.br_blockcount + 1800 RIGHT.br_blockcount, PREV.br_state); 1801 if (error) 1802 goto done; 1803 } 1804 break; 1805 1806 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 1807 /* 1808 * Filling in all of a previously delayed allocation extent. 1809 * Neither the left nor right neighbors are contiguous with 1810 * the new one. 1811 */ 1812 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1813 xfs_bmbt_set_startblock(ep, new->br_startblock); 1814 xfs_bmbt_set_state(ep, new->br_state); 1815 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 1816 1817 (*nextents)++; 1818 if (bma->cur == NULL) 1819 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1820 else { 1821 rval = XFS_ILOG_CORE; 1822 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, 1823 new->br_startblock, new->br_blockcount, 1824 &i); 1825 if (error) 1826 goto done; 1827 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1828 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; 1829 error = xfs_btree_insert(bma->cur, &i); 1830 if (error) 1831 goto done; 1832 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1833 } 1834 break; 1835 1836 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 1837 /* 1838 * Filling in the first part of a previous delayed allocation. 1839 * The left neighbor is contiguous. 1840 */ 1841 trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_); 1842 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1), 1843 LEFT.br_blockcount + new->br_blockcount); 1844 xfs_bmbt_set_startoff(ep, 1845 PREV.br_startoff + new->br_blockcount); 1846 trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_); 1847 1848 temp = PREV.br_blockcount - new->br_blockcount; 1849 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1850 xfs_bmbt_set_blockcount(ep, temp); 1851 if (bma->cur == NULL) 1852 rval = XFS_ILOG_DEXT; 1853 else { 1854 rval = 0; 1855 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff, 1856 LEFT.br_startblock, LEFT.br_blockcount, 1857 &i); 1858 if (error) 1859 goto done; 1860 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1861 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, 1862 LEFT.br_startblock, 1863 LEFT.br_blockcount + 1864 new->br_blockcount, 1865 LEFT.br_state); 1866 if (error) 1867 goto done; 1868 } 1869 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1870 startblockval(PREV.br_startblock)); 1871 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); 1872 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 1873 1874 bma->idx--; 1875 break; 1876 1877 case BMAP_LEFT_FILLING: 1878 /* 1879 * Filling in the first part of a previous delayed allocation. 1880 * The left neighbor is not contiguous. 1881 */ 1882 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1883 xfs_bmbt_set_startoff(ep, new_endoff); 1884 temp = PREV.br_blockcount - new->br_blockcount; 1885 xfs_bmbt_set_blockcount(ep, temp); 1886 xfs_iext_insert(bma->ip, bma->idx, 1, new, state); 1887 (*nextents)++; 1888 if (bma->cur == NULL) 1889 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1890 else { 1891 rval = XFS_ILOG_CORE; 1892 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, 1893 new->br_startblock, new->br_blockcount, 1894 &i); 1895 if (error) 1896 goto done; 1897 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1898 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; 1899 error = xfs_btree_insert(bma->cur, &i); 1900 if (error) 1901 goto done; 1902 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1903 } 1904 1905 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1906 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1907 bma->firstblock, bma->dfops, 1908 &bma->cur, 1, &tmp_rval, whichfork); 1909 rval |= tmp_rval; 1910 if (error) 1911 goto done; 1912 } 1913 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1914 startblockval(PREV.br_startblock) - 1915 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 1916 ep = xfs_iext_get_ext(ifp, bma->idx + 1); 1917 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); 1918 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_); 1919 break; 1920 1921 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1922 /* 1923 * Filling in the last part of a previous delayed allocation. 1924 * The right neighbor is contiguous with the new allocation. 1925 */ 1926 temp = PREV.br_blockcount - new->br_blockcount; 1927 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_); 1928 xfs_bmbt_set_blockcount(ep, temp); 1929 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1), 1930 new->br_startoff, new->br_startblock, 1931 new->br_blockcount + RIGHT.br_blockcount, 1932 RIGHT.br_state); 1933 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_); 1934 if (bma->cur == NULL) 1935 rval = XFS_ILOG_DEXT; 1936 else { 1937 rval = 0; 1938 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff, 1939 RIGHT.br_startblock, 1940 RIGHT.br_blockcount, &i); 1941 if (error) 1942 goto done; 1943 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1944 error = xfs_bmbt_update(bma->cur, new->br_startoff, 1945 new->br_startblock, 1946 new->br_blockcount + 1947 RIGHT.br_blockcount, 1948 RIGHT.br_state); 1949 if (error) 1950 goto done; 1951 } 1952 1953 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1954 startblockval(PREV.br_startblock)); 1955 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1956 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); 1957 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 1958 1959 bma->idx++; 1960 break; 1961 1962 case BMAP_RIGHT_FILLING: 1963 /* 1964 * Filling in the last part of a previous delayed allocation. 1965 * The right neighbor is not contiguous. 1966 */ 1967 temp = PREV.br_blockcount - new->br_blockcount; 1968 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1969 xfs_bmbt_set_blockcount(ep, temp); 1970 xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state); 1971 (*nextents)++; 1972 if (bma->cur == NULL) 1973 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1974 else { 1975 rval = XFS_ILOG_CORE; 1976 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, 1977 new->br_startblock, new->br_blockcount, 1978 &i); 1979 if (error) 1980 goto done; 1981 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1982 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; 1983 error = xfs_btree_insert(bma->cur, &i); 1984 if (error) 1985 goto done; 1986 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1987 } 1988 1989 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1990 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1991 bma->firstblock, bma->dfops, &bma->cur, 1, 1992 &tmp_rval, whichfork); 1993 rval |= tmp_rval; 1994 if (error) 1995 goto done; 1996 } 1997 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1998 startblockval(PREV.br_startblock) - 1999 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 2000 ep = xfs_iext_get_ext(ifp, bma->idx); 2001 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); 2002 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 2003 2004 bma->idx++; 2005 break; 2006 2007 case 0: 2008 /* 2009 * Filling in the middle part of a previous delayed allocation. 2010 * Contiguity is impossible here. 2011 * This case is avoided almost all the time. 2012 * 2013 * We start with a delayed allocation: 2014 * 2015 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+ 2016 * PREV @ idx 2017 * 2018 * and we are allocating: 2019 * +rrrrrrrrrrrrrrrrr+ 2020 * new 2021 * 2022 * and we set it up for insertion as: 2023 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+ 2024 * new 2025 * PREV @ idx LEFT RIGHT 2026 * inserted at idx + 1 2027 */ 2028 temp = new->br_startoff - PREV.br_startoff; 2029 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff; 2030 trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_); 2031 xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */ 2032 LEFT = *new; 2033 RIGHT.br_state = PREV.br_state; 2034 RIGHT.br_startblock = nullstartblock( 2035 (int)xfs_bmap_worst_indlen(bma->ip, temp2)); 2036 RIGHT.br_startoff = new_endoff; 2037 RIGHT.br_blockcount = temp2; 2038 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */ 2039 xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state); 2040 (*nextents)++; 2041 if (bma->cur == NULL) 2042 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2043 else { 2044 rval = XFS_ILOG_CORE; 2045 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, 2046 new->br_startblock, new->br_blockcount, 2047 &i); 2048 if (error) 2049 goto done; 2050 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2051 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; 2052 error = xfs_btree_insert(bma->cur, &i); 2053 if (error) 2054 goto done; 2055 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2056 } 2057 2058 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 2059 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 2060 bma->firstblock, bma->dfops, &bma->cur, 2061 1, &tmp_rval, whichfork); 2062 rval |= tmp_rval; 2063 if (error) 2064 goto done; 2065 } 2066 temp = xfs_bmap_worst_indlen(bma->ip, temp); 2067 temp2 = xfs_bmap_worst_indlen(bma->ip, temp2); 2068 diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) - 2069 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 2070 if (diff > 0) { 2071 error = xfs_mod_fdblocks(bma->ip->i_mount, 2072 -((int64_t)diff), false); 2073 ASSERT(!error); 2074 if (error) 2075 goto done; 2076 } 2077 2078 ep = xfs_iext_get_ext(ifp, bma->idx); 2079 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 2080 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 2081 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_); 2082 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2), 2083 nullstartblock((int)temp2)); 2084 trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_); 2085 2086 bma->idx++; 2087 da_new = temp + temp2; 2088 break; 2089 2090 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2091 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2092 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 2093 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2094 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2095 case BMAP_LEFT_CONTIG: 2096 case BMAP_RIGHT_CONTIG: 2097 /* 2098 * These cases are all impossible. 2099 */ 2100 ASSERT(0); 2101 } 2102 2103 /* add reverse mapping */ 2104 error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new); 2105 if (error) 2106 goto done; 2107 2108 /* convert to a btree if necessary */ 2109 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 2110 int tmp_logflags; /* partial log flag return val */ 2111 2112 ASSERT(bma->cur == NULL); 2113 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 2114 bma->firstblock, bma->dfops, &bma->cur, 2115 da_old > 0, &tmp_logflags, whichfork); 2116 bma->logflags |= tmp_logflags; 2117 if (error) 2118 goto done; 2119 } 2120 2121 /* adjust for changes in reserved delayed indirect blocks */ 2122 if (da_old || da_new) { 2123 temp = da_new; 2124 if (bma->cur) 2125 temp += bma->cur->bc_private.b.allocated; 2126 ASSERT(temp <= da_old); 2127 if (temp < da_old) 2128 xfs_mod_fdblocks(bma->ip->i_mount, 2129 (int64_t)(da_old - temp), false); 2130 } 2131 2132 /* clear out the allocated field, done with it now in any case. */ 2133 if (bma->cur) 2134 bma->cur->bc_private.b.allocated = 0; 2135 2136 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); 2137 done: 2138 if (whichfork != XFS_COW_FORK) 2139 bma->logflags |= rval; 2140 return error; 2141 #undef LEFT 2142 #undef RIGHT 2143 #undef PREV 2144 } 2145 2146 /* 2147 * Convert an unwritten allocation to a real allocation or vice versa. 2148 */ 2149 STATIC int /* error */ 2150 xfs_bmap_add_extent_unwritten_real( 2151 struct xfs_trans *tp, 2152 xfs_inode_t *ip, /* incore inode pointer */ 2153 int whichfork, 2154 xfs_extnum_t *idx, /* extent number to update/insert */ 2155 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 2156 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 2157 xfs_fsblock_t *first, /* pointer to firstblock variable */ 2158 struct xfs_defer_ops *dfops, /* list of extents to be freed */ 2159 int *logflagsp) /* inode logging flags */ 2160 { 2161 xfs_btree_cur_t *cur; /* btree cursor */ 2162 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */ 2163 int error; /* error return value */ 2164 int i; /* temp state */ 2165 xfs_ifork_t *ifp; /* inode fork pointer */ 2166 xfs_fileoff_t new_endoff; /* end offset of new entry */ 2167 xfs_exntst_t newext; /* new extent state */ 2168 xfs_exntst_t oldext; /* old extent state */ 2169 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 2170 /* left is 0, right is 1, prev is 2 */ 2171 int rval=0; /* return value (logging flags) */ 2172 int state = 0;/* state bits, accessed thru macros */ 2173 struct xfs_mount *mp = ip->i_mount; 2174 2175 *logflagsp = 0; 2176 2177 cur = *curp; 2178 ifp = XFS_IFORK_PTR(ip, whichfork); 2179 if (whichfork == XFS_COW_FORK) 2180 state |= BMAP_COWFORK; 2181 2182 ASSERT(*idx >= 0); 2183 ASSERT(*idx <= xfs_iext_count(ifp)); 2184 ASSERT(!isnullstartblock(new->br_startblock)); 2185 2186 XFS_STATS_INC(mp, xs_add_exlist); 2187 2188 #define LEFT r[0] 2189 #define RIGHT r[1] 2190 #define PREV r[2] 2191 2192 /* 2193 * Set up a bunch of variables to make the tests simpler. 2194 */ 2195 error = 0; 2196 ep = xfs_iext_get_ext(ifp, *idx); 2197 xfs_bmbt_get_all(ep, &PREV); 2198 newext = new->br_state; 2199 oldext = (newext == XFS_EXT_UNWRITTEN) ? 2200 XFS_EXT_NORM : XFS_EXT_UNWRITTEN; 2201 ASSERT(PREV.br_state == oldext); 2202 new_endoff = new->br_startoff + new->br_blockcount; 2203 ASSERT(PREV.br_startoff <= new->br_startoff); 2204 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 2205 2206 /* 2207 * Set flags determining what part of the previous oldext allocation 2208 * extent is being replaced by a newext allocation. 2209 */ 2210 if (PREV.br_startoff == new->br_startoff) 2211 state |= BMAP_LEFT_FILLING; 2212 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 2213 state |= BMAP_RIGHT_FILLING; 2214 2215 /* 2216 * Check and set flags if this segment has a left neighbor. 2217 * Don't set contiguous if the combined extent would be too large. 2218 */ 2219 if (*idx > 0) { 2220 state |= BMAP_LEFT_VALID; 2221 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT); 2222 2223 if (isnullstartblock(LEFT.br_startblock)) 2224 state |= BMAP_LEFT_DELAY; 2225 } 2226 2227 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2228 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 2229 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 2230 LEFT.br_state == newext && 2231 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2232 state |= BMAP_LEFT_CONTIG; 2233 2234 /* 2235 * Check and set flags if this segment has a right neighbor. 2236 * Don't set contiguous if the combined extent would be too large. 2237 * Also check for all-three-contiguous being too large. 2238 */ 2239 if (*idx < xfs_iext_count(ifp) - 1) { 2240 state |= BMAP_RIGHT_VALID; 2241 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT); 2242 if (isnullstartblock(RIGHT.br_startblock)) 2243 state |= BMAP_RIGHT_DELAY; 2244 } 2245 2246 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2247 new_endoff == RIGHT.br_startoff && 2248 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 2249 newext == RIGHT.br_state && 2250 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 2251 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2252 BMAP_RIGHT_FILLING)) != 2253 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2254 BMAP_RIGHT_FILLING) || 2255 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 2256 <= MAXEXTLEN)) 2257 state |= BMAP_RIGHT_CONTIG; 2258 2259 /* 2260 * Switch out based on the FILLING and CONTIG state bits. 2261 */ 2262 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2263 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 2264 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2265 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2266 /* 2267 * Setting all of a previous oldext extent to newext. 2268 * The left and right neighbors are both contiguous with new. 2269 */ 2270 --*idx; 2271 2272 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2273 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), 2274 LEFT.br_blockcount + PREV.br_blockcount + 2275 RIGHT.br_blockcount); 2276 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2277 2278 xfs_iext_remove(ip, *idx + 1, 2, state); 2279 XFS_IFORK_NEXT_SET(ip, whichfork, 2280 XFS_IFORK_NEXTENTS(ip, whichfork) - 2); 2281 if (cur == NULL) 2282 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2283 else { 2284 rval = XFS_ILOG_CORE; 2285 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, 2286 RIGHT.br_startblock, 2287 RIGHT.br_blockcount, &i))) 2288 goto done; 2289 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2290 if ((error = xfs_btree_delete(cur, &i))) 2291 goto done; 2292 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2293 if ((error = xfs_btree_decrement(cur, 0, &i))) 2294 goto done; 2295 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2296 if ((error = xfs_btree_delete(cur, &i))) 2297 goto done; 2298 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2299 if ((error = xfs_btree_decrement(cur, 0, &i))) 2300 goto done; 2301 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2302 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, 2303 LEFT.br_startblock, 2304 LEFT.br_blockcount + PREV.br_blockcount + 2305 RIGHT.br_blockcount, LEFT.br_state))) 2306 goto done; 2307 } 2308 break; 2309 2310 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2311 /* 2312 * Setting all of a previous oldext extent to newext. 2313 * The left neighbor is contiguous, the right is not. 2314 */ 2315 --*idx; 2316 2317 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2318 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), 2319 LEFT.br_blockcount + PREV.br_blockcount); 2320 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2321 2322 xfs_iext_remove(ip, *idx + 1, 1, state); 2323 XFS_IFORK_NEXT_SET(ip, whichfork, 2324 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2325 if (cur == NULL) 2326 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2327 else { 2328 rval = XFS_ILOG_CORE; 2329 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 2330 PREV.br_startblock, PREV.br_blockcount, 2331 &i))) 2332 goto done; 2333 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2334 if ((error = xfs_btree_delete(cur, &i))) 2335 goto done; 2336 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2337 if ((error = xfs_btree_decrement(cur, 0, &i))) 2338 goto done; 2339 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2340 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, 2341 LEFT.br_startblock, 2342 LEFT.br_blockcount + PREV.br_blockcount, 2343 LEFT.br_state))) 2344 goto done; 2345 } 2346 break; 2347 2348 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2349 /* 2350 * Setting all of a previous oldext extent to newext. 2351 * The right neighbor is contiguous, the left is not. 2352 */ 2353 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2354 xfs_bmbt_set_blockcount(ep, 2355 PREV.br_blockcount + RIGHT.br_blockcount); 2356 xfs_bmbt_set_state(ep, newext); 2357 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2358 xfs_iext_remove(ip, *idx + 1, 1, state); 2359 XFS_IFORK_NEXT_SET(ip, whichfork, 2360 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2361 if (cur == NULL) 2362 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2363 else { 2364 rval = XFS_ILOG_CORE; 2365 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, 2366 RIGHT.br_startblock, 2367 RIGHT.br_blockcount, &i))) 2368 goto done; 2369 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2370 if ((error = xfs_btree_delete(cur, &i))) 2371 goto done; 2372 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2373 if ((error = xfs_btree_decrement(cur, 0, &i))) 2374 goto done; 2375 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2376 if ((error = xfs_bmbt_update(cur, new->br_startoff, 2377 new->br_startblock, 2378 new->br_blockcount + RIGHT.br_blockcount, 2379 newext))) 2380 goto done; 2381 } 2382 break; 2383 2384 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 2385 /* 2386 * Setting all of a previous oldext extent to newext. 2387 * Neither the left nor right neighbors are contiguous with 2388 * the new one. 2389 */ 2390 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2391 xfs_bmbt_set_state(ep, newext); 2392 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2393 2394 if (cur == NULL) 2395 rval = XFS_ILOG_DEXT; 2396 else { 2397 rval = 0; 2398 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, 2399 new->br_startblock, new->br_blockcount, 2400 &i))) 2401 goto done; 2402 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2403 if ((error = xfs_bmbt_update(cur, new->br_startoff, 2404 new->br_startblock, new->br_blockcount, 2405 newext))) 2406 goto done; 2407 } 2408 break; 2409 2410 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 2411 /* 2412 * Setting the first part of a previous oldext extent to newext. 2413 * The left neighbor is contiguous. 2414 */ 2415 trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_); 2416 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1), 2417 LEFT.br_blockcount + new->br_blockcount); 2418 xfs_bmbt_set_startoff(ep, 2419 PREV.br_startoff + new->br_blockcount); 2420 trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_); 2421 2422 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2423 xfs_bmbt_set_startblock(ep, 2424 new->br_startblock + new->br_blockcount); 2425 xfs_bmbt_set_blockcount(ep, 2426 PREV.br_blockcount - new->br_blockcount); 2427 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2428 2429 --*idx; 2430 2431 if (cur == NULL) 2432 rval = XFS_ILOG_DEXT; 2433 else { 2434 rval = 0; 2435 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 2436 PREV.br_startblock, PREV.br_blockcount, 2437 &i))) 2438 goto done; 2439 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2440 if ((error = xfs_bmbt_update(cur, 2441 PREV.br_startoff + new->br_blockcount, 2442 PREV.br_startblock + new->br_blockcount, 2443 PREV.br_blockcount - new->br_blockcount, 2444 oldext))) 2445 goto done; 2446 if ((error = xfs_btree_decrement(cur, 0, &i))) 2447 goto done; 2448 error = xfs_bmbt_update(cur, LEFT.br_startoff, 2449 LEFT.br_startblock, 2450 LEFT.br_blockcount + new->br_blockcount, 2451 LEFT.br_state); 2452 if (error) 2453 goto done; 2454 } 2455 break; 2456 2457 case BMAP_LEFT_FILLING: 2458 /* 2459 * Setting the first part of a previous oldext extent to newext. 2460 * The left neighbor is not contiguous. 2461 */ 2462 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2463 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext); 2464 xfs_bmbt_set_startoff(ep, new_endoff); 2465 xfs_bmbt_set_blockcount(ep, 2466 PREV.br_blockcount - new->br_blockcount); 2467 xfs_bmbt_set_startblock(ep, 2468 new->br_startblock + new->br_blockcount); 2469 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2470 2471 xfs_iext_insert(ip, *idx, 1, new, state); 2472 XFS_IFORK_NEXT_SET(ip, whichfork, 2473 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2474 if (cur == NULL) 2475 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2476 else { 2477 rval = XFS_ILOG_CORE; 2478 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 2479 PREV.br_startblock, PREV.br_blockcount, 2480 &i))) 2481 goto done; 2482 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2483 if ((error = xfs_bmbt_update(cur, 2484 PREV.br_startoff + new->br_blockcount, 2485 PREV.br_startblock + new->br_blockcount, 2486 PREV.br_blockcount - new->br_blockcount, 2487 oldext))) 2488 goto done; 2489 cur->bc_rec.b = *new; 2490 if ((error = xfs_btree_insert(cur, &i))) 2491 goto done; 2492 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2493 } 2494 break; 2495 2496 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2497 /* 2498 * Setting the last part of a previous oldext extent to newext. 2499 * The right neighbor is contiguous with the new allocation. 2500 */ 2501 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2502 xfs_bmbt_set_blockcount(ep, 2503 PREV.br_blockcount - new->br_blockcount); 2504 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2505 2506 ++*idx; 2507 2508 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2509 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx), 2510 new->br_startoff, new->br_startblock, 2511 new->br_blockcount + RIGHT.br_blockcount, newext); 2512 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2513 2514 if (cur == NULL) 2515 rval = XFS_ILOG_DEXT; 2516 else { 2517 rval = 0; 2518 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 2519 PREV.br_startblock, 2520 PREV.br_blockcount, &i))) 2521 goto done; 2522 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2523 if ((error = xfs_bmbt_update(cur, PREV.br_startoff, 2524 PREV.br_startblock, 2525 PREV.br_blockcount - new->br_blockcount, 2526 oldext))) 2527 goto done; 2528 if ((error = xfs_btree_increment(cur, 0, &i))) 2529 goto done; 2530 if ((error = xfs_bmbt_update(cur, new->br_startoff, 2531 new->br_startblock, 2532 new->br_blockcount + RIGHT.br_blockcount, 2533 newext))) 2534 goto done; 2535 } 2536 break; 2537 2538 case BMAP_RIGHT_FILLING: 2539 /* 2540 * Setting the last part of a previous oldext extent to newext. 2541 * The right neighbor is not contiguous. 2542 */ 2543 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2544 xfs_bmbt_set_blockcount(ep, 2545 PREV.br_blockcount - new->br_blockcount); 2546 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2547 2548 ++*idx; 2549 xfs_iext_insert(ip, *idx, 1, new, state); 2550 2551 XFS_IFORK_NEXT_SET(ip, whichfork, 2552 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2553 if (cur == NULL) 2554 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2555 else { 2556 rval = XFS_ILOG_CORE; 2557 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 2558 PREV.br_startblock, PREV.br_blockcount, 2559 &i))) 2560 goto done; 2561 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2562 if ((error = xfs_bmbt_update(cur, PREV.br_startoff, 2563 PREV.br_startblock, 2564 PREV.br_blockcount - new->br_blockcount, 2565 oldext))) 2566 goto done; 2567 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, 2568 new->br_startblock, new->br_blockcount, 2569 &i))) 2570 goto done; 2571 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2572 cur->bc_rec.b.br_state = XFS_EXT_NORM; 2573 if ((error = xfs_btree_insert(cur, &i))) 2574 goto done; 2575 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2576 } 2577 break; 2578 2579 case 0: 2580 /* 2581 * Setting the middle part of a previous oldext extent to 2582 * newext. Contiguity is impossible here. 2583 * One extent becomes three extents. 2584 */ 2585 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2586 xfs_bmbt_set_blockcount(ep, 2587 new->br_startoff - PREV.br_startoff); 2588 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2589 2590 r[0] = *new; 2591 r[1].br_startoff = new_endoff; 2592 r[1].br_blockcount = 2593 PREV.br_startoff + PREV.br_blockcount - new_endoff; 2594 r[1].br_startblock = new->br_startblock + new->br_blockcount; 2595 r[1].br_state = oldext; 2596 2597 ++*idx; 2598 xfs_iext_insert(ip, *idx, 2, &r[0], state); 2599 2600 XFS_IFORK_NEXT_SET(ip, whichfork, 2601 XFS_IFORK_NEXTENTS(ip, whichfork) + 2); 2602 if (cur == NULL) 2603 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2604 else { 2605 rval = XFS_ILOG_CORE; 2606 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 2607 PREV.br_startblock, PREV.br_blockcount, 2608 &i))) 2609 goto done; 2610 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2611 /* new right extent - oldext */ 2612 if ((error = xfs_bmbt_update(cur, r[1].br_startoff, 2613 r[1].br_startblock, r[1].br_blockcount, 2614 r[1].br_state))) 2615 goto done; 2616 /* new left extent - oldext */ 2617 cur->bc_rec.b = PREV; 2618 cur->bc_rec.b.br_blockcount = 2619 new->br_startoff - PREV.br_startoff; 2620 if ((error = xfs_btree_insert(cur, &i))) 2621 goto done; 2622 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2623 /* 2624 * Reset the cursor to the position of the new extent 2625 * we are about to insert as we can't trust it after 2626 * the previous insert. 2627 */ 2628 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, 2629 new->br_startblock, new->br_blockcount, 2630 &i))) 2631 goto done; 2632 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2633 /* new middle extent - newext */ 2634 cur->bc_rec.b.br_state = new->br_state; 2635 if ((error = xfs_btree_insert(cur, &i))) 2636 goto done; 2637 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2638 } 2639 break; 2640 2641 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2642 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2643 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 2644 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2645 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2646 case BMAP_LEFT_CONTIG: 2647 case BMAP_RIGHT_CONTIG: 2648 /* 2649 * These cases are all impossible. 2650 */ 2651 ASSERT(0); 2652 } 2653 2654 /* update reverse mappings */ 2655 error = xfs_rmap_convert_extent(mp, dfops, ip, whichfork, new); 2656 if (error) 2657 goto done; 2658 2659 /* convert to a btree if necessary */ 2660 if (xfs_bmap_needs_btree(ip, whichfork)) { 2661 int tmp_logflags; /* partial log flag return val */ 2662 2663 ASSERT(cur == NULL); 2664 error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, &cur, 2665 0, &tmp_logflags, whichfork); 2666 *logflagsp |= tmp_logflags; 2667 if (error) 2668 goto done; 2669 } 2670 2671 /* clear out the allocated field, done with it now in any case. */ 2672 if (cur) { 2673 cur->bc_private.b.allocated = 0; 2674 *curp = cur; 2675 } 2676 2677 xfs_bmap_check_leaf_extents(*curp, ip, whichfork); 2678 done: 2679 *logflagsp |= rval; 2680 return error; 2681 #undef LEFT 2682 #undef RIGHT 2683 #undef PREV 2684 } 2685 2686 /* 2687 * Convert a hole to a delayed allocation. 2688 */ 2689 STATIC void 2690 xfs_bmap_add_extent_hole_delay( 2691 xfs_inode_t *ip, /* incore inode pointer */ 2692 int whichfork, 2693 xfs_extnum_t *idx, /* extent number to update/insert */ 2694 xfs_bmbt_irec_t *new) /* new data to add to file extents */ 2695 { 2696 xfs_ifork_t *ifp; /* inode fork pointer */ 2697 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2698 xfs_filblks_t newlen=0; /* new indirect size */ 2699 xfs_filblks_t oldlen=0; /* old indirect size */ 2700 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2701 int state; /* state bits, accessed thru macros */ 2702 xfs_filblks_t temp=0; /* temp for indirect calculations */ 2703 2704 ifp = XFS_IFORK_PTR(ip, whichfork); 2705 state = 0; 2706 if (whichfork == XFS_COW_FORK) 2707 state |= BMAP_COWFORK; 2708 ASSERT(isnullstartblock(new->br_startblock)); 2709 2710 /* 2711 * Check and set flags if this segment has a left neighbor 2712 */ 2713 if (*idx > 0) { 2714 state |= BMAP_LEFT_VALID; 2715 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left); 2716 2717 if (isnullstartblock(left.br_startblock)) 2718 state |= BMAP_LEFT_DELAY; 2719 } 2720 2721 /* 2722 * Check and set flags if the current (right) segment exists. 2723 * If it doesn't exist, we're converting the hole at end-of-file. 2724 */ 2725 if (*idx < xfs_iext_count(ifp)) { 2726 state |= BMAP_RIGHT_VALID; 2727 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right); 2728 2729 if (isnullstartblock(right.br_startblock)) 2730 state |= BMAP_RIGHT_DELAY; 2731 } 2732 2733 /* 2734 * Set contiguity flags on the left and right neighbors. 2735 * Don't let extents get too large, even if the pieces are contiguous. 2736 */ 2737 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) && 2738 left.br_startoff + left.br_blockcount == new->br_startoff && 2739 left.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2740 state |= BMAP_LEFT_CONTIG; 2741 2742 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) && 2743 new->br_startoff + new->br_blockcount == right.br_startoff && 2744 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 2745 (!(state & BMAP_LEFT_CONTIG) || 2746 (left.br_blockcount + new->br_blockcount + 2747 right.br_blockcount <= MAXEXTLEN))) 2748 state |= BMAP_RIGHT_CONTIG; 2749 2750 /* 2751 * Switch out based on the contiguity flags. 2752 */ 2753 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2754 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2755 /* 2756 * New allocation is contiguous with delayed allocations 2757 * on the left and on the right. 2758 * Merge all three into a single extent record. 2759 */ 2760 --*idx; 2761 temp = left.br_blockcount + new->br_blockcount + 2762 right.br_blockcount; 2763 2764 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2765 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp); 2766 oldlen = startblockval(left.br_startblock) + 2767 startblockval(new->br_startblock) + 2768 startblockval(right.br_startblock); 2769 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2770 oldlen); 2771 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx), 2772 nullstartblock((int)newlen)); 2773 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2774 2775 xfs_iext_remove(ip, *idx + 1, 1, state); 2776 break; 2777 2778 case BMAP_LEFT_CONTIG: 2779 /* 2780 * New allocation is contiguous with a delayed allocation 2781 * on the left. 2782 * Merge the new allocation with the left neighbor. 2783 */ 2784 --*idx; 2785 temp = left.br_blockcount + new->br_blockcount; 2786 2787 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2788 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp); 2789 oldlen = startblockval(left.br_startblock) + 2790 startblockval(new->br_startblock); 2791 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2792 oldlen); 2793 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx), 2794 nullstartblock((int)newlen)); 2795 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2796 break; 2797 2798 case BMAP_RIGHT_CONTIG: 2799 /* 2800 * New allocation is contiguous with a delayed allocation 2801 * on the right. 2802 * Merge the new allocation with the right neighbor. 2803 */ 2804 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2805 temp = new->br_blockcount + right.br_blockcount; 2806 oldlen = startblockval(new->br_startblock) + 2807 startblockval(right.br_startblock); 2808 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2809 oldlen); 2810 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx), 2811 new->br_startoff, 2812 nullstartblock((int)newlen), temp, right.br_state); 2813 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2814 break; 2815 2816 case 0: 2817 /* 2818 * New allocation is not contiguous with another 2819 * delayed allocation. 2820 * Insert a new entry. 2821 */ 2822 oldlen = newlen = 0; 2823 xfs_iext_insert(ip, *idx, 1, new, state); 2824 break; 2825 } 2826 if (oldlen != newlen) { 2827 ASSERT(oldlen > newlen); 2828 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen), 2829 false); 2830 /* 2831 * Nothing to do for disk quota accounting here. 2832 */ 2833 } 2834 } 2835 2836 /* 2837 * Convert a hole to a real allocation. 2838 */ 2839 STATIC int /* error */ 2840 xfs_bmap_add_extent_hole_real( 2841 struct xfs_trans *tp, 2842 struct xfs_inode *ip, 2843 int whichfork, 2844 xfs_extnum_t *idx, 2845 struct xfs_btree_cur **curp, 2846 struct xfs_bmbt_irec *new, 2847 xfs_fsblock_t *first, 2848 struct xfs_defer_ops *dfops, 2849 int *logflagsp) 2850 { 2851 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 2852 struct xfs_mount *mp = ip->i_mount; 2853 struct xfs_btree_cur *cur = *curp; 2854 int error; /* error return value */ 2855 int i; /* temp state */ 2856 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2857 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2858 int rval=0; /* return value (logging flags) */ 2859 int state; /* state bits, accessed thru macros */ 2860 2861 ASSERT(*idx >= 0); 2862 ASSERT(*idx <= xfs_iext_count(ifp)); 2863 ASSERT(!isnullstartblock(new->br_startblock)); 2864 ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); 2865 2866 XFS_STATS_INC(mp, xs_add_exlist); 2867 2868 state = 0; 2869 if (whichfork == XFS_ATTR_FORK) 2870 state |= BMAP_ATTRFORK; 2871 if (whichfork == XFS_COW_FORK) 2872 state |= BMAP_COWFORK; 2873 2874 /* 2875 * Check and set flags if this segment has a left neighbor. 2876 */ 2877 if (*idx > 0) { 2878 state |= BMAP_LEFT_VALID; 2879 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left); 2880 if (isnullstartblock(left.br_startblock)) 2881 state |= BMAP_LEFT_DELAY; 2882 } 2883 2884 /* 2885 * Check and set flags if this segment has a current value. 2886 * Not true if we're inserting into the "hole" at eof. 2887 */ 2888 if (*idx < xfs_iext_count(ifp)) { 2889 state |= BMAP_RIGHT_VALID; 2890 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right); 2891 if (isnullstartblock(right.br_startblock)) 2892 state |= BMAP_RIGHT_DELAY; 2893 } 2894 2895 /* 2896 * We're inserting a real allocation between "left" and "right". 2897 * Set the contiguity flags. Don't let extents get too large. 2898 */ 2899 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2900 left.br_startoff + left.br_blockcount == new->br_startoff && 2901 left.br_startblock + left.br_blockcount == new->br_startblock && 2902 left.br_state == new->br_state && 2903 left.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2904 state |= BMAP_LEFT_CONTIG; 2905 2906 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2907 new->br_startoff + new->br_blockcount == right.br_startoff && 2908 new->br_startblock + new->br_blockcount == right.br_startblock && 2909 new->br_state == right.br_state && 2910 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 2911 (!(state & BMAP_LEFT_CONTIG) || 2912 left.br_blockcount + new->br_blockcount + 2913 right.br_blockcount <= MAXEXTLEN)) 2914 state |= BMAP_RIGHT_CONTIG; 2915 2916 error = 0; 2917 /* 2918 * Select which case we're in here, and implement it. 2919 */ 2920 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2921 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2922 /* 2923 * New allocation is contiguous with real allocations on the 2924 * left and on the right. 2925 * Merge all three into a single extent record. 2926 */ 2927 --*idx; 2928 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2929 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), 2930 left.br_blockcount + new->br_blockcount + 2931 right.br_blockcount); 2932 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2933 2934 xfs_iext_remove(ip, *idx + 1, 1, state); 2935 2936 XFS_IFORK_NEXT_SET(ip, whichfork, 2937 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2938 if (cur == NULL) { 2939 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 2940 } else { 2941 rval = XFS_ILOG_CORE; 2942 error = xfs_bmbt_lookup_eq(cur, right.br_startoff, 2943 right.br_startblock, right.br_blockcount, 2944 &i); 2945 if (error) 2946 goto done; 2947 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2948 error = xfs_btree_delete(cur, &i); 2949 if (error) 2950 goto done; 2951 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2952 error = xfs_btree_decrement(cur, 0, &i); 2953 if (error) 2954 goto done; 2955 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2956 error = xfs_bmbt_update(cur, left.br_startoff, 2957 left.br_startblock, 2958 left.br_blockcount + 2959 new->br_blockcount + 2960 right.br_blockcount, 2961 left.br_state); 2962 if (error) 2963 goto done; 2964 } 2965 break; 2966 2967 case BMAP_LEFT_CONTIG: 2968 /* 2969 * New allocation is contiguous with a real allocation 2970 * on the left. 2971 * Merge the new allocation with the left neighbor. 2972 */ 2973 --*idx; 2974 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2975 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), 2976 left.br_blockcount + new->br_blockcount); 2977 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2978 2979 if (cur == NULL) { 2980 rval = xfs_ilog_fext(whichfork); 2981 } else { 2982 rval = 0; 2983 error = xfs_bmbt_lookup_eq(cur, left.br_startoff, 2984 left.br_startblock, left.br_blockcount, 2985 &i); 2986 if (error) 2987 goto done; 2988 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2989 error = xfs_bmbt_update(cur, left.br_startoff, 2990 left.br_startblock, 2991 left.br_blockcount + 2992 new->br_blockcount, 2993 left.br_state); 2994 if (error) 2995 goto done; 2996 } 2997 break; 2998 2999 case BMAP_RIGHT_CONTIG: 3000 /* 3001 * New allocation is contiguous with a real allocation 3002 * on the right. 3003 * Merge the new allocation with the right neighbor. 3004 */ 3005 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 3006 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx), 3007 new->br_startoff, new->br_startblock, 3008 new->br_blockcount + right.br_blockcount, 3009 right.br_state); 3010 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 3011 3012 if (cur == NULL) { 3013 rval = xfs_ilog_fext(whichfork); 3014 } else { 3015 rval = 0; 3016 error = xfs_bmbt_lookup_eq(cur, 3017 right.br_startoff, 3018 right.br_startblock, 3019 right.br_blockcount, &i); 3020 if (error) 3021 goto done; 3022 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 3023 error = xfs_bmbt_update(cur, new->br_startoff, 3024 new->br_startblock, 3025 new->br_blockcount + 3026 right.br_blockcount, 3027 right.br_state); 3028 if (error) 3029 goto done; 3030 } 3031 break; 3032 3033 case 0: 3034 /* 3035 * New allocation is not contiguous with another 3036 * real allocation. 3037 * Insert a new entry. 3038 */ 3039 xfs_iext_insert(ip, *idx, 1, new, state); 3040 XFS_IFORK_NEXT_SET(ip, whichfork, 3041 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 3042 if (cur == NULL) { 3043 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 3044 } else { 3045 rval = XFS_ILOG_CORE; 3046 error = xfs_bmbt_lookup_eq(cur, 3047 new->br_startoff, 3048 new->br_startblock, 3049 new->br_blockcount, &i); 3050 if (error) 3051 goto done; 3052 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 3053 cur->bc_rec.b.br_state = new->br_state; 3054 error = xfs_btree_insert(cur, &i); 3055 if (error) 3056 goto done; 3057 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 3058 } 3059 break; 3060 } 3061 3062 /* add reverse mapping */ 3063 error = xfs_rmap_map_extent(mp, dfops, ip, whichfork, new); 3064 if (error) 3065 goto done; 3066 3067 /* convert to a btree if necessary */ 3068 if (xfs_bmap_needs_btree(ip, whichfork)) { 3069 int tmp_logflags; /* partial log flag return val */ 3070 3071 ASSERT(cur == NULL); 3072 error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, curp, 3073 0, &tmp_logflags, whichfork); 3074 *logflagsp |= tmp_logflags; 3075 cur = *curp; 3076 if (error) 3077 goto done; 3078 } 3079 3080 /* clear out the allocated field, done with it now in any case. */ 3081 if (cur) 3082 cur->bc_private.b.allocated = 0; 3083 3084 xfs_bmap_check_leaf_extents(cur, ip, whichfork); 3085 done: 3086 *logflagsp |= rval; 3087 return error; 3088 } 3089 3090 /* 3091 * Functions used in the extent read, allocate and remove paths 3092 */ 3093 3094 /* 3095 * Adjust the size of the new extent based on di_extsize and rt extsize. 3096 */ 3097 int 3098 xfs_bmap_extsize_align( 3099 xfs_mount_t *mp, 3100 xfs_bmbt_irec_t *gotp, /* next extent pointer */ 3101 xfs_bmbt_irec_t *prevp, /* previous extent pointer */ 3102 xfs_extlen_t extsz, /* align to this extent size */ 3103 int rt, /* is this a realtime inode? */ 3104 int eof, /* is extent at end-of-file? */ 3105 int delay, /* creating delalloc extent? */ 3106 int convert, /* overwriting unwritten extent? */ 3107 xfs_fileoff_t *offp, /* in/out: aligned offset */ 3108 xfs_extlen_t *lenp) /* in/out: aligned length */ 3109 { 3110 xfs_fileoff_t orig_off; /* original offset */ 3111 xfs_extlen_t orig_alen; /* original length */ 3112 xfs_fileoff_t orig_end; /* original off+len */ 3113 xfs_fileoff_t nexto; /* next file offset */ 3114 xfs_fileoff_t prevo; /* previous file offset */ 3115 xfs_fileoff_t align_off; /* temp for offset */ 3116 xfs_extlen_t align_alen; /* temp for length */ 3117 xfs_extlen_t temp; /* temp for calculations */ 3118 3119 if (convert) 3120 return 0; 3121 3122 orig_off = align_off = *offp; 3123 orig_alen = align_alen = *lenp; 3124 orig_end = orig_off + orig_alen; 3125 3126 /* 3127 * If this request overlaps an existing extent, then don't 3128 * attempt to perform any additional alignment. 3129 */ 3130 if (!delay && !eof && 3131 (orig_off >= gotp->br_startoff) && 3132 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) { 3133 return 0; 3134 } 3135 3136 /* 3137 * If the file offset is unaligned vs. the extent size 3138 * we need to align it. This will be possible unless 3139 * the file was previously written with a kernel that didn't 3140 * perform this alignment, or if a truncate shot us in the 3141 * foot. 3142 */ 3143 temp = do_mod(orig_off, extsz); 3144 if (temp) { 3145 align_alen += temp; 3146 align_off -= temp; 3147 } 3148 3149 /* Same adjustment for the end of the requested area. */ 3150 temp = (align_alen % extsz); 3151 if (temp) 3152 align_alen += extsz - temp; 3153 3154 /* 3155 * For large extent hint sizes, the aligned extent might be larger than 3156 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls 3157 * the length back under MAXEXTLEN. The outer allocation loops handle 3158 * short allocation just fine, so it is safe to do this. We only want to 3159 * do it when we are forced to, though, because it means more allocation 3160 * operations are required. 3161 */ 3162 while (align_alen > MAXEXTLEN) 3163 align_alen -= extsz; 3164 ASSERT(align_alen <= MAXEXTLEN); 3165 3166 /* 3167 * If the previous block overlaps with this proposed allocation 3168 * then move the start forward without adjusting the length. 3169 */ 3170 if (prevp->br_startoff != NULLFILEOFF) { 3171 if (prevp->br_startblock == HOLESTARTBLOCK) 3172 prevo = prevp->br_startoff; 3173 else 3174 prevo = prevp->br_startoff + prevp->br_blockcount; 3175 } else 3176 prevo = 0; 3177 if (align_off != orig_off && align_off < prevo) 3178 align_off = prevo; 3179 /* 3180 * If the next block overlaps with this proposed allocation 3181 * then move the start back without adjusting the length, 3182 * but not before offset 0. 3183 * This may of course make the start overlap previous block, 3184 * and if we hit the offset 0 limit then the next block 3185 * can still overlap too. 3186 */ 3187 if (!eof && gotp->br_startoff != NULLFILEOFF) { 3188 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) || 3189 (!delay && gotp->br_startblock == DELAYSTARTBLOCK)) 3190 nexto = gotp->br_startoff + gotp->br_blockcount; 3191 else 3192 nexto = gotp->br_startoff; 3193 } else 3194 nexto = NULLFILEOFF; 3195 if (!eof && 3196 align_off + align_alen != orig_end && 3197 align_off + align_alen > nexto) 3198 align_off = nexto > align_alen ? nexto - align_alen : 0; 3199 /* 3200 * If we're now overlapping the next or previous extent that 3201 * means we can't fit an extsz piece in this hole. Just move 3202 * the start forward to the first valid spot and set 3203 * the length so we hit the end. 3204 */ 3205 if (align_off != orig_off && align_off < prevo) 3206 align_off = prevo; 3207 if (align_off + align_alen != orig_end && 3208 align_off + align_alen > nexto && 3209 nexto != NULLFILEOFF) { 3210 ASSERT(nexto > prevo); 3211 align_alen = nexto - align_off; 3212 } 3213 3214 /* 3215 * If realtime, and the result isn't a multiple of the realtime 3216 * extent size we need to remove blocks until it is. 3217 */ 3218 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) { 3219 /* 3220 * We're not covering the original request, or 3221 * we won't be able to once we fix the length. 3222 */ 3223 if (orig_off < align_off || 3224 orig_end > align_off + align_alen || 3225 align_alen - temp < orig_alen) 3226 return -EINVAL; 3227 /* 3228 * Try to fix it by moving the start up. 3229 */ 3230 if (align_off + temp <= orig_off) { 3231 align_alen -= temp; 3232 align_off += temp; 3233 } 3234 /* 3235 * Try to fix it by moving the end in. 3236 */ 3237 else if (align_off + align_alen - temp >= orig_end) 3238 align_alen -= temp; 3239 /* 3240 * Set the start to the minimum then trim the length. 3241 */ 3242 else { 3243 align_alen -= orig_off - align_off; 3244 align_off = orig_off; 3245 align_alen -= align_alen % mp->m_sb.sb_rextsize; 3246 } 3247 /* 3248 * Result doesn't cover the request, fail it. 3249 */ 3250 if (orig_off < align_off || orig_end > align_off + align_alen) 3251 return -EINVAL; 3252 } else { 3253 ASSERT(orig_off >= align_off); 3254 /* see MAXEXTLEN handling above */ 3255 ASSERT(orig_end <= align_off + align_alen || 3256 align_alen + extsz > MAXEXTLEN); 3257 } 3258 3259 #ifdef DEBUG 3260 if (!eof && gotp->br_startoff != NULLFILEOFF) 3261 ASSERT(align_off + align_alen <= gotp->br_startoff); 3262 if (prevp->br_startoff != NULLFILEOFF) 3263 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount); 3264 #endif 3265 3266 *lenp = align_alen; 3267 *offp = align_off; 3268 return 0; 3269 } 3270 3271 #define XFS_ALLOC_GAP_UNITS 4 3272 3273 void 3274 xfs_bmap_adjacent( 3275 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3276 { 3277 xfs_fsblock_t adjust; /* adjustment to block numbers */ 3278 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ 3279 xfs_mount_t *mp; /* mount point structure */ 3280 int nullfb; /* true if ap->firstblock isn't set */ 3281 int rt; /* true if inode is realtime */ 3282 3283 #define ISVALID(x,y) \ 3284 (rt ? \ 3285 (x) < mp->m_sb.sb_rblocks : \ 3286 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \ 3287 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \ 3288 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks) 3289 3290 mp = ap->ip->i_mount; 3291 nullfb = *ap->firstblock == NULLFSBLOCK; 3292 rt = XFS_IS_REALTIME_INODE(ap->ip) && 3293 xfs_alloc_is_userdata(ap->datatype); 3294 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); 3295 /* 3296 * If allocating at eof, and there's a previous real block, 3297 * try to use its last block as our starting point. 3298 */ 3299 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF && 3300 !isnullstartblock(ap->prev.br_startblock) && 3301 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount, 3302 ap->prev.br_startblock)) { 3303 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount; 3304 /* 3305 * Adjust for the gap between prevp and us. 3306 */ 3307 adjust = ap->offset - 3308 (ap->prev.br_startoff + ap->prev.br_blockcount); 3309 if (adjust && 3310 ISVALID(ap->blkno + adjust, ap->prev.br_startblock)) 3311 ap->blkno += adjust; 3312 } 3313 /* 3314 * If not at eof, then compare the two neighbor blocks. 3315 * Figure out whether either one gives us a good starting point, 3316 * and pick the better one. 3317 */ 3318 else if (!ap->eof) { 3319 xfs_fsblock_t gotbno; /* right side block number */ 3320 xfs_fsblock_t gotdiff=0; /* right side difference */ 3321 xfs_fsblock_t prevbno; /* left side block number */ 3322 xfs_fsblock_t prevdiff=0; /* left side difference */ 3323 3324 /* 3325 * If there's a previous (left) block, select a requested 3326 * start block based on it. 3327 */ 3328 if (ap->prev.br_startoff != NULLFILEOFF && 3329 !isnullstartblock(ap->prev.br_startblock) && 3330 (prevbno = ap->prev.br_startblock + 3331 ap->prev.br_blockcount) && 3332 ISVALID(prevbno, ap->prev.br_startblock)) { 3333 /* 3334 * Calculate gap to end of previous block. 3335 */ 3336 adjust = prevdiff = ap->offset - 3337 (ap->prev.br_startoff + 3338 ap->prev.br_blockcount); 3339 /* 3340 * Figure the startblock based on the previous block's 3341 * end and the gap size. 3342 * Heuristic! 3343 * If the gap is large relative to the piece we're 3344 * allocating, or using it gives us an invalid block 3345 * number, then just use the end of the previous block. 3346 */ 3347 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3348 ISVALID(prevbno + prevdiff, 3349 ap->prev.br_startblock)) 3350 prevbno += adjust; 3351 else 3352 prevdiff += adjust; 3353 /* 3354 * If the firstblock forbids it, can't use it, 3355 * must use default. 3356 */ 3357 if (!rt && !nullfb && 3358 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno) 3359 prevbno = NULLFSBLOCK; 3360 } 3361 /* 3362 * No previous block or can't follow it, just default. 3363 */ 3364 else 3365 prevbno = NULLFSBLOCK; 3366 /* 3367 * If there's a following (right) block, select a requested 3368 * start block based on it. 3369 */ 3370 if (!isnullstartblock(ap->got.br_startblock)) { 3371 /* 3372 * Calculate gap to start of next block. 3373 */ 3374 adjust = gotdiff = ap->got.br_startoff - ap->offset; 3375 /* 3376 * Figure the startblock based on the next block's 3377 * start and the gap size. 3378 */ 3379 gotbno = ap->got.br_startblock; 3380 /* 3381 * Heuristic! 3382 * If the gap is large relative to the piece we're 3383 * allocating, or using it gives us an invalid block 3384 * number, then just use the start of the next block 3385 * offset by our length. 3386 */ 3387 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3388 ISVALID(gotbno - gotdiff, gotbno)) 3389 gotbno -= adjust; 3390 else if (ISVALID(gotbno - ap->length, gotbno)) { 3391 gotbno -= ap->length; 3392 gotdiff += adjust - ap->length; 3393 } else 3394 gotdiff += adjust; 3395 /* 3396 * If the firstblock forbids it, can't use it, 3397 * must use default. 3398 */ 3399 if (!rt && !nullfb && 3400 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno) 3401 gotbno = NULLFSBLOCK; 3402 } 3403 /* 3404 * No next block, just default. 3405 */ 3406 else 3407 gotbno = NULLFSBLOCK; 3408 /* 3409 * If both valid, pick the better one, else the only good 3410 * one, else ap->blkno is already set (to 0 or the inode block). 3411 */ 3412 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) 3413 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno; 3414 else if (prevbno != NULLFSBLOCK) 3415 ap->blkno = prevbno; 3416 else if (gotbno != NULLFSBLOCK) 3417 ap->blkno = gotbno; 3418 } 3419 #undef ISVALID 3420 } 3421 3422 static int 3423 xfs_bmap_longest_free_extent( 3424 struct xfs_trans *tp, 3425 xfs_agnumber_t ag, 3426 xfs_extlen_t *blen, 3427 int *notinit) 3428 { 3429 struct xfs_mount *mp = tp->t_mountp; 3430 struct xfs_perag *pag; 3431 xfs_extlen_t longest; 3432 int error = 0; 3433 3434 pag = xfs_perag_get(mp, ag); 3435 if (!pag->pagf_init) { 3436 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK); 3437 if (error) 3438 goto out; 3439 3440 if (!pag->pagf_init) { 3441 *notinit = 1; 3442 goto out; 3443 } 3444 } 3445 3446 longest = xfs_alloc_longest_free_extent(mp, pag, 3447 xfs_alloc_min_freelist(mp, pag), 3448 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE)); 3449 if (*blen < longest) 3450 *blen = longest; 3451 3452 out: 3453 xfs_perag_put(pag); 3454 return error; 3455 } 3456 3457 static void 3458 xfs_bmap_select_minlen( 3459 struct xfs_bmalloca *ap, 3460 struct xfs_alloc_arg *args, 3461 xfs_extlen_t *blen, 3462 int notinit) 3463 { 3464 if (notinit || *blen < ap->minlen) { 3465 /* 3466 * Since we did a BUF_TRYLOCK above, it is possible that 3467 * there is space for this request. 3468 */ 3469 args->minlen = ap->minlen; 3470 } else if (*blen < args->maxlen) { 3471 /* 3472 * If the best seen length is less than the request length, 3473 * use the best as the minimum. 3474 */ 3475 args->minlen = *blen; 3476 } else { 3477 /* 3478 * Otherwise we've seen an extent as big as maxlen, use that 3479 * as the minimum. 3480 */ 3481 args->minlen = args->maxlen; 3482 } 3483 } 3484 3485 STATIC int 3486 xfs_bmap_btalloc_nullfb( 3487 struct xfs_bmalloca *ap, 3488 struct xfs_alloc_arg *args, 3489 xfs_extlen_t *blen) 3490 { 3491 struct xfs_mount *mp = ap->ip->i_mount; 3492 xfs_agnumber_t ag, startag; 3493 int notinit = 0; 3494 int error; 3495 3496 args->type = XFS_ALLOCTYPE_START_BNO; 3497 args->total = ap->total; 3498 3499 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno); 3500 if (startag == NULLAGNUMBER) 3501 startag = ag = 0; 3502 3503 while (*blen < args->maxlen) { 3504 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, 3505 ¬init); 3506 if (error) 3507 return error; 3508 3509 if (++ag == mp->m_sb.sb_agcount) 3510 ag = 0; 3511 if (ag == startag) 3512 break; 3513 } 3514 3515 xfs_bmap_select_minlen(ap, args, blen, notinit); 3516 return 0; 3517 } 3518 3519 STATIC int 3520 xfs_bmap_btalloc_filestreams( 3521 struct xfs_bmalloca *ap, 3522 struct xfs_alloc_arg *args, 3523 xfs_extlen_t *blen) 3524 { 3525 struct xfs_mount *mp = ap->ip->i_mount; 3526 xfs_agnumber_t ag; 3527 int notinit = 0; 3528 int error; 3529 3530 args->type = XFS_ALLOCTYPE_NEAR_BNO; 3531 args->total = ap->total; 3532 3533 ag = XFS_FSB_TO_AGNO(mp, args->fsbno); 3534 if (ag == NULLAGNUMBER) 3535 ag = 0; 3536 3537 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init); 3538 if (error) 3539 return error; 3540 3541 if (*blen < args->maxlen) { 3542 error = xfs_filestream_new_ag(ap, &ag); 3543 if (error) 3544 return error; 3545 3546 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, 3547 ¬init); 3548 if (error) 3549 return error; 3550 3551 } 3552 3553 xfs_bmap_select_minlen(ap, args, blen, notinit); 3554 3555 /* 3556 * Set the failure fallback case to look in the selected AG as stream 3557 * may have moved. 3558 */ 3559 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0); 3560 return 0; 3561 } 3562 3563 STATIC int 3564 xfs_bmap_btalloc( 3565 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3566 { 3567 xfs_mount_t *mp; /* mount point structure */ 3568 xfs_alloctype_t atype = 0; /* type for allocation routines */ 3569 xfs_extlen_t align = 0; /* minimum allocation alignment */ 3570 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ 3571 xfs_agnumber_t ag; 3572 xfs_alloc_arg_t args; 3573 xfs_extlen_t blen; 3574 xfs_extlen_t nextminlen = 0; 3575 int nullfb; /* true if ap->firstblock isn't set */ 3576 int isaligned; 3577 int tryagain; 3578 int error; 3579 int stripe_align; 3580 3581 ASSERT(ap->length); 3582 3583 mp = ap->ip->i_mount; 3584 3585 /* stripe alignment for allocation is determined by mount parameters */ 3586 stripe_align = 0; 3587 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) 3588 stripe_align = mp->m_swidth; 3589 else if (mp->m_dalign) 3590 stripe_align = mp->m_dalign; 3591 3592 if (ap->flags & XFS_BMAPI_COWFORK) 3593 align = xfs_get_cowextsz_hint(ap->ip); 3594 else if (xfs_alloc_is_userdata(ap->datatype)) 3595 align = xfs_get_extsz_hint(ap->ip); 3596 if (align) { 3597 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, 3598 align, 0, ap->eof, 0, ap->conv, 3599 &ap->offset, &ap->length); 3600 ASSERT(!error); 3601 ASSERT(ap->length); 3602 } 3603 3604 3605 nullfb = *ap->firstblock == NULLFSBLOCK; 3606 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); 3607 if (nullfb) { 3608 if (xfs_alloc_is_userdata(ap->datatype) && 3609 xfs_inode_is_filestream(ap->ip)) { 3610 ag = xfs_filestream_lookup_ag(ap->ip); 3611 ag = (ag != NULLAGNUMBER) ? ag : 0; 3612 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0); 3613 } else { 3614 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino); 3615 } 3616 } else 3617 ap->blkno = *ap->firstblock; 3618 3619 xfs_bmap_adjacent(ap); 3620 3621 /* 3622 * If allowed, use ap->blkno; otherwise must use firstblock since 3623 * it's in the right allocation group. 3624 */ 3625 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno) 3626 ; 3627 else 3628 ap->blkno = *ap->firstblock; 3629 /* 3630 * Normal allocation, done through xfs_alloc_vextent. 3631 */ 3632 tryagain = isaligned = 0; 3633 memset(&args, 0, sizeof(args)); 3634 args.tp = ap->tp; 3635 args.mp = mp; 3636 args.fsbno = ap->blkno; 3637 xfs_rmap_skip_owner_update(&args.oinfo); 3638 3639 /* Trim the allocation back to the maximum an AG can fit. */ 3640 args.maxlen = MIN(ap->length, mp->m_ag_max_usable); 3641 args.firstblock = *ap->firstblock; 3642 blen = 0; 3643 if (nullfb) { 3644 /* 3645 * Search for an allocation group with a single extent large 3646 * enough for the request. If one isn't found, then adjust 3647 * the minimum allocation size to the largest space found. 3648 */ 3649 if (xfs_alloc_is_userdata(ap->datatype) && 3650 xfs_inode_is_filestream(ap->ip)) 3651 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen); 3652 else 3653 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen); 3654 if (error) 3655 return error; 3656 } else if (ap->dfops->dop_low) { 3657 if (xfs_inode_is_filestream(ap->ip)) 3658 args.type = XFS_ALLOCTYPE_FIRST_AG; 3659 else 3660 args.type = XFS_ALLOCTYPE_START_BNO; 3661 args.total = args.minlen = ap->minlen; 3662 } else { 3663 args.type = XFS_ALLOCTYPE_NEAR_BNO; 3664 args.total = ap->total; 3665 args.minlen = ap->minlen; 3666 } 3667 /* apply extent size hints if obtained earlier */ 3668 if (align) { 3669 args.prod = align; 3670 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod))) 3671 args.mod = (xfs_extlen_t)(args.prod - args.mod); 3672 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) { 3673 args.prod = 1; 3674 args.mod = 0; 3675 } else { 3676 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog; 3677 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod)))) 3678 args.mod = (xfs_extlen_t)(args.prod - args.mod); 3679 } 3680 /* 3681 * If we are not low on available data blocks, and the 3682 * underlying logical volume manager is a stripe, and 3683 * the file offset is zero then try to allocate data 3684 * blocks on stripe unit boundary. 3685 * NOTE: ap->aeof is only set if the allocation length 3686 * is >= the stripe unit and the allocation offset is 3687 * at the end of file. 3688 */ 3689 if (!ap->dfops->dop_low && ap->aeof) { 3690 if (!ap->offset) { 3691 args.alignment = stripe_align; 3692 atype = args.type; 3693 isaligned = 1; 3694 /* 3695 * Adjust for alignment 3696 */ 3697 if (blen > args.alignment && blen <= args.maxlen) 3698 args.minlen = blen - args.alignment; 3699 args.minalignslop = 0; 3700 } else { 3701 /* 3702 * First try an exact bno allocation. 3703 * If it fails then do a near or start bno 3704 * allocation with alignment turned on. 3705 */ 3706 atype = args.type; 3707 tryagain = 1; 3708 args.type = XFS_ALLOCTYPE_THIS_BNO; 3709 args.alignment = 1; 3710 /* 3711 * Compute the minlen+alignment for the 3712 * next case. Set slop so that the value 3713 * of minlen+alignment+slop doesn't go up 3714 * between the calls. 3715 */ 3716 if (blen > stripe_align && blen <= args.maxlen) 3717 nextminlen = blen - stripe_align; 3718 else 3719 nextminlen = args.minlen; 3720 if (nextminlen + stripe_align > args.minlen + 1) 3721 args.minalignslop = 3722 nextminlen + stripe_align - 3723 args.minlen - 1; 3724 else 3725 args.minalignslop = 0; 3726 } 3727 } else { 3728 args.alignment = 1; 3729 args.minalignslop = 0; 3730 } 3731 args.minleft = ap->minleft; 3732 args.wasdel = ap->wasdel; 3733 args.resv = XFS_AG_RESV_NONE; 3734 args.datatype = ap->datatype; 3735 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) 3736 args.ip = ap->ip; 3737 3738 error = xfs_alloc_vextent(&args); 3739 if (error) 3740 return error; 3741 3742 if (tryagain && args.fsbno == NULLFSBLOCK) { 3743 /* 3744 * Exact allocation failed. Now try with alignment 3745 * turned on. 3746 */ 3747 args.type = atype; 3748 args.fsbno = ap->blkno; 3749 args.alignment = stripe_align; 3750 args.minlen = nextminlen; 3751 args.minalignslop = 0; 3752 isaligned = 1; 3753 if ((error = xfs_alloc_vextent(&args))) 3754 return error; 3755 } 3756 if (isaligned && args.fsbno == NULLFSBLOCK) { 3757 /* 3758 * allocation failed, so turn off alignment and 3759 * try again. 3760 */ 3761 args.type = atype; 3762 args.fsbno = ap->blkno; 3763 args.alignment = 0; 3764 if ((error = xfs_alloc_vextent(&args))) 3765 return error; 3766 } 3767 if (args.fsbno == NULLFSBLOCK && nullfb && 3768 args.minlen > ap->minlen) { 3769 args.minlen = ap->minlen; 3770 args.type = XFS_ALLOCTYPE_START_BNO; 3771 args.fsbno = ap->blkno; 3772 if ((error = xfs_alloc_vextent(&args))) 3773 return error; 3774 } 3775 if (args.fsbno == NULLFSBLOCK && nullfb) { 3776 args.fsbno = 0; 3777 args.type = XFS_ALLOCTYPE_FIRST_AG; 3778 args.total = ap->minlen; 3779 if ((error = xfs_alloc_vextent(&args))) 3780 return error; 3781 ap->dfops->dop_low = true; 3782 } 3783 if (args.fsbno != NULLFSBLOCK) { 3784 /* 3785 * check the allocation happened at the same or higher AG than 3786 * the first block that was allocated. 3787 */ 3788 ASSERT(*ap->firstblock == NULLFSBLOCK || 3789 XFS_FSB_TO_AGNO(mp, *ap->firstblock) <= 3790 XFS_FSB_TO_AGNO(mp, args.fsbno)); 3791 3792 ap->blkno = args.fsbno; 3793 if (*ap->firstblock == NULLFSBLOCK) 3794 *ap->firstblock = args.fsbno; 3795 ASSERT(nullfb || fb_agno <= args.agno); 3796 ap->length = args.len; 3797 if (!(ap->flags & XFS_BMAPI_COWFORK)) 3798 ap->ip->i_d.di_nblocks += args.len; 3799 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); 3800 if (ap->wasdel) 3801 ap->ip->i_delayed_blks -= args.len; 3802 /* 3803 * Adjust the disk quota also. This was reserved 3804 * earlier. 3805 */ 3806 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, 3807 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : 3808 XFS_TRANS_DQ_BCOUNT, 3809 (long) args.len); 3810 } else { 3811 ap->blkno = NULLFSBLOCK; 3812 ap->length = 0; 3813 } 3814 return 0; 3815 } 3816 3817 /* 3818 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. 3819 * It figures out where to ask the underlying allocator to put the new extent. 3820 */ 3821 STATIC int 3822 xfs_bmap_alloc( 3823 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3824 { 3825 if (XFS_IS_REALTIME_INODE(ap->ip) && 3826 xfs_alloc_is_userdata(ap->datatype)) 3827 return xfs_bmap_rtalloc(ap); 3828 return xfs_bmap_btalloc(ap); 3829 } 3830 3831 /* Trim extent to fit a logical block range. */ 3832 void 3833 xfs_trim_extent( 3834 struct xfs_bmbt_irec *irec, 3835 xfs_fileoff_t bno, 3836 xfs_filblks_t len) 3837 { 3838 xfs_fileoff_t distance; 3839 xfs_fileoff_t end = bno + len; 3840 3841 if (irec->br_startoff + irec->br_blockcount <= bno || 3842 irec->br_startoff >= end) { 3843 irec->br_blockcount = 0; 3844 return; 3845 } 3846 3847 if (irec->br_startoff < bno) { 3848 distance = bno - irec->br_startoff; 3849 if (isnullstartblock(irec->br_startblock)) 3850 irec->br_startblock = DELAYSTARTBLOCK; 3851 if (irec->br_startblock != DELAYSTARTBLOCK && 3852 irec->br_startblock != HOLESTARTBLOCK) 3853 irec->br_startblock += distance; 3854 irec->br_startoff += distance; 3855 irec->br_blockcount -= distance; 3856 } 3857 3858 if (end < irec->br_startoff + irec->br_blockcount) { 3859 distance = irec->br_startoff + irec->br_blockcount - end; 3860 irec->br_blockcount -= distance; 3861 } 3862 } 3863 3864 /* 3865 * Trim the returned map to the required bounds 3866 */ 3867 STATIC void 3868 xfs_bmapi_trim_map( 3869 struct xfs_bmbt_irec *mval, 3870 struct xfs_bmbt_irec *got, 3871 xfs_fileoff_t *bno, 3872 xfs_filblks_t len, 3873 xfs_fileoff_t obno, 3874 xfs_fileoff_t end, 3875 int n, 3876 int flags) 3877 { 3878 if ((flags & XFS_BMAPI_ENTIRE) || 3879 got->br_startoff + got->br_blockcount <= obno) { 3880 *mval = *got; 3881 if (isnullstartblock(got->br_startblock)) 3882 mval->br_startblock = DELAYSTARTBLOCK; 3883 return; 3884 } 3885 3886 if (obno > *bno) 3887 *bno = obno; 3888 ASSERT((*bno >= obno) || (n == 0)); 3889 ASSERT(*bno < end); 3890 mval->br_startoff = *bno; 3891 if (isnullstartblock(got->br_startblock)) 3892 mval->br_startblock = DELAYSTARTBLOCK; 3893 else 3894 mval->br_startblock = got->br_startblock + 3895 (*bno - got->br_startoff); 3896 /* 3897 * Return the minimum of what we got and what we asked for for 3898 * the length. We can use the len variable here because it is 3899 * modified below and we could have been there before coming 3900 * here if the first part of the allocation didn't overlap what 3901 * was asked for. 3902 */ 3903 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno, 3904 got->br_blockcount - (*bno - got->br_startoff)); 3905 mval->br_state = got->br_state; 3906 ASSERT(mval->br_blockcount <= len); 3907 return; 3908 } 3909 3910 /* 3911 * Update and validate the extent map to return 3912 */ 3913 STATIC void 3914 xfs_bmapi_update_map( 3915 struct xfs_bmbt_irec **map, 3916 xfs_fileoff_t *bno, 3917 xfs_filblks_t *len, 3918 xfs_fileoff_t obno, 3919 xfs_fileoff_t end, 3920 int *n, 3921 int flags) 3922 { 3923 xfs_bmbt_irec_t *mval = *map; 3924 3925 ASSERT((flags & XFS_BMAPI_ENTIRE) || 3926 ((mval->br_startoff + mval->br_blockcount) <= end)); 3927 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) || 3928 (mval->br_startoff < obno)); 3929 3930 *bno = mval->br_startoff + mval->br_blockcount; 3931 *len = end - *bno; 3932 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) { 3933 /* update previous map with new information */ 3934 ASSERT(mval->br_startblock == mval[-1].br_startblock); 3935 ASSERT(mval->br_blockcount > mval[-1].br_blockcount); 3936 ASSERT(mval->br_state == mval[-1].br_state); 3937 mval[-1].br_blockcount = mval->br_blockcount; 3938 mval[-1].br_state = mval->br_state; 3939 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK && 3940 mval[-1].br_startblock != DELAYSTARTBLOCK && 3941 mval[-1].br_startblock != HOLESTARTBLOCK && 3942 mval->br_startblock == mval[-1].br_startblock + 3943 mval[-1].br_blockcount && 3944 ((flags & XFS_BMAPI_IGSTATE) || 3945 mval[-1].br_state == mval->br_state)) { 3946 ASSERT(mval->br_startoff == 3947 mval[-1].br_startoff + mval[-1].br_blockcount); 3948 mval[-1].br_blockcount += mval->br_blockcount; 3949 } else if (*n > 0 && 3950 mval->br_startblock == DELAYSTARTBLOCK && 3951 mval[-1].br_startblock == DELAYSTARTBLOCK && 3952 mval->br_startoff == 3953 mval[-1].br_startoff + mval[-1].br_blockcount) { 3954 mval[-1].br_blockcount += mval->br_blockcount; 3955 mval[-1].br_state = mval->br_state; 3956 } else if (!((*n == 0) && 3957 ((mval->br_startoff + mval->br_blockcount) <= 3958 obno))) { 3959 mval++; 3960 (*n)++; 3961 } 3962 *map = mval; 3963 } 3964 3965 /* 3966 * Map file blocks to filesystem blocks without allocation. 3967 */ 3968 int 3969 xfs_bmapi_read( 3970 struct xfs_inode *ip, 3971 xfs_fileoff_t bno, 3972 xfs_filblks_t len, 3973 struct xfs_bmbt_irec *mval, 3974 int *nmap, 3975 int flags) 3976 { 3977 struct xfs_mount *mp = ip->i_mount; 3978 struct xfs_ifork *ifp; 3979 struct xfs_bmbt_irec got; 3980 xfs_fileoff_t obno; 3981 xfs_fileoff_t end; 3982 xfs_extnum_t idx; 3983 int error; 3984 bool eof = false; 3985 int n = 0; 3986 int whichfork = xfs_bmapi_whichfork(flags); 3987 3988 ASSERT(*nmap >= 1); 3989 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE| 3990 XFS_BMAPI_IGSTATE|XFS_BMAPI_COWFORK))); 3991 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)); 3992 3993 if (unlikely(XFS_TEST_ERROR( 3994 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 3995 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 3996 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { 3997 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp); 3998 return -EFSCORRUPTED; 3999 } 4000 4001 if (XFS_FORCED_SHUTDOWN(mp)) 4002 return -EIO; 4003 4004 XFS_STATS_INC(mp, xs_blk_mapr); 4005 4006 ifp = XFS_IFORK_PTR(ip, whichfork); 4007 4008 /* No CoW fork? Return a hole. */ 4009 if (whichfork == XFS_COW_FORK && !ifp) { 4010 mval->br_startoff = bno; 4011 mval->br_startblock = HOLESTARTBLOCK; 4012 mval->br_blockcount = len; 4013 mval->br_state = XFS_EXT_NORM; 4014 *nmap = 1; 4015 return 0; 4016 } 4017 4018 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4019 error = xfs_iread_extents(NULL, ip, whichfork); 4020 if (error) 4021 return error; 4022 } 4023 4024 if (!xfs_iext_lookup_extent(ip, ifp, bno, &idx, &got)) 4025 eof = true; 4026 end = bno + len; 4027 obno = bno; 4028 4029 while (bno < end && n < *nmap) { 4030 /* Reading past eof, act as though there's a hole up to end. */ 4031 if (eof) 4032 got.br_startoff = end; 4033 if (got.br_startoff > bno) { 4034 /* Reading in a hole. */ 4035 mval->br_startoff = bno; 4036 mval->br_startblock = HOLESTARTBLOCK; 4037 mval->br_blockcount = 4038 XFS_FILBLKS_MIN(len, got.br_startoff - bno); 4039 mval->br_state = XFS_EXT_NORM; 4040 bno += mval->br_blockcount; 4041 len -= mval->br_blockcount; 4042 mval++; 4043 n++; 4044 continue; 4045 } 4046 4047 /* set up the extent map to return. */ 4048 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags); 4049 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 4050 4051 /* If we're done, stop now. */ 4052 if (bno >= end || n >= *nmap) 4053 break; 4054 4055 /* Else go on to the next record. */ 4056 if (!xfs_iext_get_extent(ifp, ++idx, &got)) 4057 eof = true; 4058 } 4059 *nmap = n; 4060 return 0; 4061 } 4062 4063 /* 4064 * Add a delayed allocation extent to an inode. Blocks are reserved from the 4065 * global pool and the extent inserted into the inode in-core extent tree. 4066 * 4067 * On entry, got refers to the first extent beyond the offset of the extent to 4068 * allocate or eof is specified if no such extent exists. On return, got refers 4069 * to the extent record that was inserted to the inode fork. 4070 * 4071 * Note that the allocated extent may have been merged with contiguous extents 4072 * during insertion into the inode fork. Thus, got does not reflect the current 4073 * state of the inode fork on return. If necessary, the caller can use lastx to 4074 * look up the updated record in the inode fork. 4075 */ 4076 int 4077 xfs_bmapi_reserve_delalloc( 4078 struct xfs_inode *ip, 4079 int whichfork, 4080 xfs_fileoff_t off, 4081 xfs_filblks_t len, 4082 xfs_filblks_t prealloc, 4083 struct xfs_bmbt_irec *got, 4084 xfs_extnum_t *lastx, 4085 int eof) 4086 { 4087 struct xfs_mount *mp = ip->i_mount; 4088 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 4089 xfs_extlen_t alen; 4090 xfs_extlen_t indlen; 4091 char rt = XFS_IS_REALTIME_INODE(ip); 4092 xfs_extlen_t extsz; 4093 int error; 4094 xfs_fileoff_t aoff = off; 4095 4096 /* 4097 * Cap the alloc length. Keep track of prealloc so we know whether to 4098 * tag the inode before we return. 4099 */ 4100 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN); 4101 if (!eof) 4102 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff); 4103 if (prealloc && alen >= len) 4104 prealloc = alen - len; 4105 4106 /* Figure out the extent size, adjust alen */ 4107 if (whichfork == XFS_COW_FORK) 4108 extsz = xfs_get_cowextsz_hint(ip); 4109 else 4110 extsz = xfs_get_extsz_hint(ip); 4111 if (extsz) { 4112 struct xfs_bmbt_irec prev; 4113 4114 if (!xfs_iext_get_extent(ifp, *lastx - 1, &prev)) 4115 prev.br_startoff = NULLFILEOFF; 4116 4117 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, rt, eof, 4118 1, 0, &aoff, &alen); 4119 ASSERT(!error); 4120 } 4121 4122 if (rt) 4123 extsz = alen / mp->m_sb.sb_rextsize; 4124 4125 /* 4126 * Make a transaction-less quota reservation for delayed allocation 4127 * blocks. This number gets adjusted later. We return if we haven't 4128 * allocated blocks already inside this loop. 4129 */ 4130 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0, 4131 rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); 4132 if (error) 4133 return error; 4134 4135 /* 4136 * Split changing sb for alen and indlen since they could be coming 4137 * from different places. 4138 */ 4139 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); 4140 ASSERT(indlen > 0); 4141 4142 if (rt) { 4143 error = xfs_mod_frextents(mp, -((int64_t)extsz)); 4144 } else { 4145 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false); 4146 } 4147 4148 if (error) 4149 goto out_unreserve_quota; 4150 4151 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false); 4152 if (error) 4153 goto out_unreserve_blocks; 4154 4155 4156 ip->i_delayed_blks += alen; 4157 4158 got->br_startoff = aoff; 4159 got->br_startblock = nullstartblock(indlen); 4160 got->br_blockcount = alen; 4161 got->br_state = XFS_EXT_NORM; 4162 4163 xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got); 4164 4165 /* 4166 * Tag the inode if blocks were preallocated. Note that COW fork 4167 * preallocation can occur at the start or end of the extent, even when 4168 * prealloc == 0, so we must also check the aligned offset and length. 4169 */ 4170 if (whichfork == XFS_DATA_FORK && prealloc) 4171 xfs_inode_set_eofblocks_tag(ip); 4172 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len)) 4173 xfs_inode_set_cowblocks_tag(ip); 4174 4175 return 0; 4176 4177 out_unreserve_blocks: 4178 if (rt) 4179 xfs_mod_frextents(mp, extsz); 4180 else 4181 xfs_mod_fdblocks(mp, alen, false); 4182 out_unreserve_quota: 4183 if (XFS_IS_QUOTA_ON(mp)) 4184 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ? 4185 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); 4186 return error; 4187 } 4188 4189 static int 4190 xfs_bmapi_allocate( 4191 struct xfs_bmalloca *bma) 4192 { 4193 struct xfs_mount *mp = bma->ip->i_mount; 4194 int whichfork = xfs_bmapi_whichfork(bma->flags); 4195 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); 4196 int tmp_logflags = 0; 4197 int error; 4198 4199 ASSERT(bma->length > 0); 4200 4201 /* 4202 * For the wasdelay case, we could also just allocate the stuff asked 4203 * for in this bmap call but that wouldn't be as good. 4204 */ 4205 if (bma->wasdel) { 4206 bma->length = (xfs_extlen_t)bma->got.br_blockcount; 4207 bma->offset = bma->got.br_startoff; 4208 if (bma->idx) { 4209 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), 4210 &bma->prev); 4211 } 4212 } else { 4213 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN); 4214 if (!bma->eof) 4215 bma->length = XFS_FILBLKS_MIN(bma->length, 4216 bma->got.br_startoff - bma->offset); 4217 } 4218 4219 /* 4220 * Set the data type being allocated. For the data fork, the first data 4221 * in the file is treated differently to all other allocations. For the 4222 * attribute fork, we only need to ensure the allocated range is not on 4223 * the busy list. 4224 */ 4225 if (!(bma->flags & XFS_BMAPI_METADATA)) { 4226 bma->datatype = XFS_ALLOC_NOBUSY; 4227 if (whichfork == XFS_DATA_FORK) { 4228 if (bma->offset == 0) 4229 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA; 4230 else 4231 bma->datatype |= XFS_ALLOC_USERDATA; 4232 } 4233 if (bma->flags & XFS_BMAPI_ZERO) 4234 bma->datatype |= XFS_ALLOC_USERDATA_ZERO; 4235 } 4236 4237 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1; 4238 4239 /* 4240 * Only want to do the alignment at the eof if it is userdata and 4241 * allocation length is larger than a stripe unit. 4242 */ 4243 if (mp->m_dalign && bma->length >= mp->m_dalign && 4244 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) { 4245 error = xfs_bmap_isaeof(bma, whichfork); 4246 if (error) 4247 return error; 4248 } 4249 4250 error = xfs_bmap_alloc(bma); 4251 if (error) 4252 return error; 4253 4254 if (bma->cur) 4255 bma->cur->bc_private.b.firstblock = *bma->firstblock; 4256 if (bma->blkno == NULLFSBLOCK) 4257 return 0; 4258 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) { 4259 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork); 4260 bma->cur->bc_private.b.firstblock = *bma->firstblock; 4261 bma->cur->bc_private.b.dfops = bma->dfops; 4262 } 4263 /* 4264 * Bump the number of extents we've allocated 4265 * in this call. 4266 */ 4267 bma->nallocs++; 4268 4269 if (bma->cur) 4270 bma->cur->bc_private.b.flags = 4271 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; 4272 4273 bma->got.br_startoff = bma->offset; 4274 bma->got.br_startblock = bma->blkno; 4275 bma->got.br_blockcount = bma->length; 4276 bma->got.br_state = XFS_EXT_NORM; 4277 4278 /* 4279 * In the data fork, a wasdelay extent has been initialized, so 4280 * shouldn't be flagged as unwritten. 4281 * 4282 * For the cow fork, however, we convert delalloc reservations 4283 * (extents allocated for speculative preallocation) to 4284 * allocated unwritten extents, and only convert the unwritten 4285 * extents to real extents when we're about to write the data. 4286 */ 4287 if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) && 4288 (bma->flags & XFS_BMAPI_PREALLOC) && 4289 xfs_sb_version_hasextflgbit(&mp->m_sb)) 4290 bma->got.br_state = XFS_EXT_UNWRITTEN; 4291 4292 if (bma->wasdel) 4293 error = xfs_bmap_add_extent_delay_real(bma, whichfork); 4294 else 4295 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip, 4296 whichfork, &bma->idx, &bma->cur, &bma->got, 4297 bma->firstblock, bma->dfops, &bma->logflags); 4298 4299 bma->logflags |= tmp_logflags; 4300 if (error) 4301 return error; 4302 4303 /* 4304 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real 4305 * or xfs_bmap_add_extent_hole_real might have merged it into one of 4306 * the neighbouring ones. 4307 */ 4308 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got); 4309 4310 ASSERT(bma->got.br_startoff <= bma->offset); 4311 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >= 4312 bma->offset + bma->length); 4313 ASSERT(bma->got.br_state == XFS_EXT_NORM || 4314 bma->got.br_state == XFS_EXT_UNWRITTEN); 4315 return 0; 4316 } 4317 4318 STATIC int 4319 xfs_bmapi_convert_unwritten( 4320 struct xfs_bmalloca *bma, 4321 struct xfs_bmbt_irec *mval, 4322 xfs_filblks_t len, 4323 int flags) 4324 { 4325 int whichfork = xfs_bmapi_whichfork(flags); 4326 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); 4327 int tmp_logflags = 0; 4328 int error; 4329 4330 /* check if we need to do unwritten->real conversion */ 4331 if (mval->br_state == XFS_EXT_UNWRITTEN && 4332 (flags & XFS_BMAPI_PREALLOC)) 4333 return 0; 4334 4335 /* check if we need to do real->unwritten conversion */ 4336 if (mval->br_state == XFS_EXT_NORM && 4337 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) != 4338 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) 4339 return 0; 4340 4341 /* 4342 * Modify (by adding) the state flag, if writing. 4343 */ 4344 ASSERT(mval->br_blockcount <= len); 4345 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) { 4346 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp, 4347 bma->ip, whichfork); 4348 bma->cur->bc_private.b.firstblock = *bma->firstblock; 4349 bma->cur->bc_private.b.dfops = bma->dfops; 4350 } 4351 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) 4352 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN; 4353 4354 /* 4355 * Before insertion into the bmbt, zero the range being converted 4356 * if required. 4357 */ 4358 if (flags & XFS_BMAPI_ZERO) { 4359 error = xfs_zero_extent(bma->ip, mval->br_startblock, 4360 mval->br_blockcount); 4361 if (error) 4362 return error; 4363 } 4364 4365 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork, 4366 &bma->idx, &bma->cur, mval, bma->firstblock, bma->dfops, 4367 &tmp_logflags); 4368 /* 4369 * Log the inode core unconditionally in the unwritten extent conversion 4370 * path because the conversion might not have done so (e.g., if the 4371 * extent count hasn't changed). We need to make sure the inode is dirty 4372 * in the transaction for the sake of fsync(), even if nothing has 4373 * changed, because fsync() will not force the log for this transaction 4374 * unless it sees the inode pinned. 4375 * 4376 * Note: If we're only converting cow fork extents, there aren't 4377 * any on-disk updates to make, so we don't need to log anything. 4378 */ 4379 if (whichfork != XFS_COW_FORK) 4380 bma->logflags |= tmp_logflags | XFS_ILOG_CORE; 4381 if (error) 4382 return error; 4383 4384 /* 4385 * Update our extent pointer, given that 4386 * xfs_bmap_add_extent_unwritten_real might have merged it into one 4387 * of the neighbouring ones. 4388 */ 4389 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got); 4390 4391 /* 4392 * We may have combined previously unwritten space with written space, 4393 * so generate another request. 4394 */ 4395 if (mval->br_blockcount < len) 4396 return -EAGAIN; 4397 return 0; 4398 } 4399 4400 /* 4401 * Map file blocks to filesystem blocks, and allocate blocks or convert the 4402 * extent state if necessary. Details behaviour is controlled by the flags 4403 * parameter. Only allocates blocks from a single allocation group, to avoid 4404 * locking problems. 4405 * 4406 * The returned value in "firstblock" from the first call in a transaction 4407 * must be remembered and presented to subsequent calls in "firstblock". 4408 * An upper bound for the number of blocks to be allocated is supplied to 4409 * the first call in "total"; if no allocation group has that many free 4410 * blocks then the call will fail (return NULLFSBLOCK in "firstblock"). 4411 */ 4412 int 4413 xfs_bmapi_write( 4414 struct xfs_trans *tp, /* transaction pointer */ 4415 struct xfs_inode *ip, /* incore inode */ 4416 xfs_fileoff_t bno, /* starting file offs. mapped */ 4417 xfs_filblks_t len, /* length to map in file */ 4418 int flags, /* XFS_BMAPI_... */ 4419 xfs_fsblock_t *firstblock, /* first allocated block 4420 controls a.g. for allocs */ 4421 xfs_extlen_t total, /* total blocks needed */ 4422 struct xfs_bmbt_irec *mval, /* output: map values */ 4423 int *nmap, /* i/o: mval size/count */ 4424 struct xfs_defer_ops *dfops) /* i/o: list extents to free */ 4425 { 4426 struct xfs_mount *mp = ip->i_mount; 4427 struct xfs_ifork *ifp; 4428 struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */ 4429 xfs_fileoff_t end; /* end of mapped file region */ 4430 bool eof = false; /* after the end of extents */ 4431 int error; /* error return */ 4432 int n; /* current extent index */ 4433 xfs_fileoff_t obno; /* old block number (offset) */ 4434 int whichfork; /* data or attr fork */ 4435 4436 #ifdef DEBUG 4437 xfs_fileoff_t orig_bno; /* original block number value */ 4438 int orig_flags; /* original flags arg value */ 4439 xfs_filblks_t orig_len; /* original value of len arg */ 4440 struct xfs_bmbt_irec *orig_mval; /* original value of mval */ 4441 int orig_nmap; /* original value of *nmap */ 4442 4443 orig_bno = bno; 4444 orig_len = len; 4445 orig_flags = flags; 4446 orig_mval = mval; 4447 orig_nmap = *nmap; 4448 #endif 4449 whichfork = xfs_bmapi_whichfork(flags); 4450 4451 ASSERT(*nmap >= 1); 4452 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); 4453 ASSERT(!(flags & XFS_BMAPI_IGSTATE)); 4454 ASSERT(tp != NULL || 4455 (flags & (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)) == 4456 (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)); 4457 ASSERT(len > 0); 4458 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL); 4459 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4460 ASSERT(!(flags & XFS_BMAPI_REMAP)); 4461 4462 /* zeroing is for currently only for data extents, not metadata */ 4463 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) != 4464 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)); 4465 /* 4466 * we can allocate unwritten extents or pre-zero allocated blocks, 4467 * but it makes no sense to do both at once. This would result in 4468 * zeroing the unwritten extent twice, but it still being an 4469 * unwritten extent.... 4470 */ 4471 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) != 4472 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)); 4473 4474 if (unlikely(XFS_TEST_ERROR( 4475 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 4476 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 4477 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { 4478 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp); 4479 return -EFSCORRUPTED; 4480 } 4481 4482 if (XFS_FORCED_SHUTDOWN(mp)) 4483 return -EIO; 4484 4485 ifp = XFS_IFORK_PTR(ip, whichfork); 4486 4487 XFS_STATS_INC(mp, xs_blk_mapw); 4488 4489 if (*firstblock == NULLFSBLOCK) { 4490 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE) 4491 bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1; 4492 else 4493 bma.minleft = 1; 4494 } else { 4495 bma.minleft = 0; 4496 } 4497 4498 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4499 error = xfs_iread_extents(tp, ip, whichfork); 4500 if (error) 4501 goto error0; 4502 } 4503 4504 n = 0; 4505 end = bno + len; 4506 obno = bno; 4507 4508 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.idx, &bma.got)) 4509 eof = true; 4510 if (!xfs_iext_get_extent(ifp, bma.idx - 1, &bma.prev)) 4511 bma.prev.br_startoff = NULLFILEOFF; 4512 bma.tp = tp; 4513 bma.ip = ip; 4514 bma.total = total; 4515 bma.datatype = 0; 4516 bma.dfops = dfops; 4517 bma.firstblock = firstblock; 4518 4519 while (bno < end && n < *nmap) { 4520 bool need_alloc = false, wasdelay = false; 4521 4522 /* in hole or beyoned EOF? */ 4523 if (eof || bma.got.br_startoff > bno) { 4524 if (flags & XFS_BMAPI_DELALLOC) { 4525 /* 4526 * For the COW fork we can reasonably get a 4527 * request for converting an extent that races 4528 * with other threads already having converted 4529 * part of it, as there converting COW to 4530 * regular blocks is not protected using the 4531 * IOLOCK. 4532 */ 4533 ASSERT(flags & XFS_BMAPI_COWFORK); 4534 if (!(flags & XFS_BMAPI_COWFORK)) { 4535 error = -EIO; 4536 goto error0; 4537 } 4538 4539 if (eof || bno >= end) 4540 break; 4541 } else { 4542 need_alloc = true; 4543 } 4544 } else if (isnullstartblock(bma.got.br_startblock)) { 4545 wasdelay = true; 4546 } 4547 4548 /* 4549 * First, deal with the hole before the allocated space 4550 * that we found, if any. 4551 */ 4552 if (need_alloc || wasdelay) { 4553 bma.eof = eof; 4554 bma.conv = !!(flags & XFS_BMAPI_CONVERT); 4555 bma.wasdel = wasdelay; 4556 bma.offset = bno; 4557 bma.flags = flags; 4558 4559 /* 4560 * There's a 32/64 bit type mismatch between the 4561 * allocation length request (which can be 64 bits in 4562 * length) and the bma length request, which is 4563 * xfs_extlen_t and therefore 32 bits. Hence we have to 4564 * check for 32-bit overflows and handle them here. 4565 */ 4566 if (len > (xfs_filblks_t)MAXEXTLEN) 4567 bma.length = MAXEXTLEN; 4568 else 4569 bma.length = len; 4570 4571 ASSERT(len > 0); 4572 ASSERT(bma.length > 0); 4573 error = xfs_bmapi_allocate(&bma); 4574 if (error) 4575 goto error0; 4576 if (bma.blkno == NULLFSBLOCK) 4577 break; 4578 4579 /* 4580 * If this is a CoW allocation, record the data in 4581 * the refcount btree for orphan recovery. 4582 */ 4583 if (whichfork == XFS_COW_FORK) { 4584 error = xfs_refcount_alloc_cow_extent(mp, dfops, 4585 bma.blkno, bma.length); 4586 if (error) 4587 goto error0; 4588 } 4589 } 4590 4591 /* Deal with the allocated space we found. */ 4592 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno, 4593 end, n, flags); 4594 4595 /* Execute unwritten extent conversion if necessary */ 4596 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags); 4597 if (error == -EAGAIN) 4598 continue; 4599 if (error) 4600 goto error0; 4601 4602 /* update the extent map to return */ 4603 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 4604 4605 /* 4606 * If we're done, stop now. Stop when we've allocated 4607 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise 4608 * the transaction may get too big. 4609 */ 4610 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap) 4611 break; 4612 4613 /* Else go on to the next record. */ 4614 bma.prev = bma.got; 4615 if (!xfs_iext_get_extent(ifp, ++bma.idx, &bma.got)) 4616 eof = true; 4617 } 4618 *nmap = n; 4619 4620 /* 4621 * Transform from btree to extents, give it cur. 4622 */ 4623 if (xfs_bmap_wants_extents(ip, whichfork)) { 4624 int tmp_logflags = 0; 4625 4626 ASSERT(bma.cur); 4627 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, 4628 &tmp_logflags, whichfork); 4629 bma.logflags |= tmp_logflags; 4630 if (error) 4631 goto error0; 4632 } 4633 4634 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE || 4635 XFS_IFORK_NEXTENTS(ip, whichfork) > 4636 XFS_IFORK_MAXEXT(ip, whichfork)); 4637 error = 0; 4638 error0: 4639 /* 4640 * Log everything. Do this after conversion, there's no point in 4641 * logging the extent records if we've converted to btree format. 4642 */ 4643 if ((bma.logflags & xfs_ilog_fext(whichfork)) && 4644 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 4645 bma.logflags &= ~xfs_ilog_fext(whichfork); 4646 else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) && 4647 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) 4648 bma.logflags &= ~xfs_ilog_fbroot(whichfork); 4649 /* 4650 * Log whatever the flags say, even if error. Otherwise we might miss 4651 * detecting a case where the data is changed, there's an error, 4652 * and it's not logged so we don't shutdown when we should. 4653 */ 4654 if (bma.logflags) 4655 xfs_trans_log_inode(tp, ip, bma.logflags); 4656 4657 if (bma.cur) { 4658 if (!error) { 4659 ASSERT(*firstblock == NULLFSBLOCK || 4660 XFS_FSB_TO_AGNO(mp, *firstblock) <= 4661 XFS_FSB_TO_AGNO(mp, 4662 bma.cur->bc_private.b.firstblock)); 4663 *firstblock = bma.cur->bc_private.b.firstblock; 4664 } 4665 xfs_btree_del_cursor(bma.cur, 4666 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 4667 } 4668 if (!error) 4669 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval, 4670 orig_nmap, *nmap); 4671 return error; 4672 } 4673 4674 static int 4675 xfs_bmapi_remap( 4676 struct xfs_trans *tp, 4677 struct xfs_inode *ip, 4678 xfs_fileoff_t bno, 4679 xfs_filblks_t len, 4680 xfs_fsblock_t startblock, 4681 struct xfs_defer_ops *dfops) 4682 { 4683 struct xfs_mount *mp = ip->i_mount; 4684 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 4685 struct xfs_btree_cur *cur = NULL; 4686 xfs_fsblock_t firstblock = NULLFSBLOCK; 4687 struct xfs_bmbt_irec got; 4688 xfs_extnum_t idx; 4689 int logflags = 0, error; 4690 4691 ASSERT(len > 0); 4692 ASSERT(len <= (xfs_filblks_t)MAXEXTLEN); 4693 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4694 4695 if (unlikely(XFS_TEST_ERROR( 4696 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS && 4697 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE), 4698 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { 4699 XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp); 4700 return -EFSCORRUPTED; 4701 } 4702 4703 if (XFS_FORCED_SHUTDOWN(mp)) 4704 return -EIO; 4705 4706 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4707 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); 4708 if (error) 4709 return error; 4710 } 4711 4712 if (xfs_iext_lookup_extent(ip, ifp, bno, &idx, &got)) { 4713 /* make sure we only reflink into a hole. */ 4714 ASSERT(got.br_startoff > bno); 4715 ASSERT(got.br_startoff - bno >= len); 4716 } 4717 4718 ip->i_d.di_nblocks += len; 4719 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 4720 4721 if (ifp->if_flags & XFS_IFBROOT) { 4722 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); 4723 cur->bc_private.b.firstblock = firstblock; 4724 cur->bc_private.b.dfops = dfops; 4725 cur->bc_private.b.flags = 0; 4726 } 4727 4728 got.br_startoff = bno; 4729 got.br_startblock = startblock; 4730 got.br_blockcount = len; 4731 got.br_state = XFS_EXT_NORM; 4732 4733 error = xfs_bmap_add_extent_hole_real(tp, ip, XFS_DATA_FORK, &idx, &cur, 4734 &got, &firstblock, dfops, &logflags); 4735 if (error) 4736 goto error0; 4737 4738 if (xfs_bmap_wants_extents(ip, XFS_DATA_FORK)) { 4739 int tmp_logflags = 0; 4740 4741 error = xfs_bmap_btree_to_extents(tp, ip, cur, 4742 &tmp_logflags, XFS_DATA_FORK); 4743 logflags |= tmp_logflags; 4744 } 4745 4746 error0: 4747 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) 4748 logflags &= ~XFS_ILOG_DEXT; 4749 else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) 4750 logflags &= ~XFS_ILOG_DBROOT; 4751 4752 if (logflags) 4753 xfs_trans_log_inode(tp, ip, logflags); 4754 if (cur) { 4755 xfs_btree_del_cursor(cur, 4756 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 4757 } 4758 return error; 4759 } 4760 4761 /* 4762 * When a delalloc extent is split (e.g., due to a hole punch), the original 4763 * indlen reservation must be shared across the two new extents that are left 4764 * behind. 4765 * 4766 * Given the original reservation and the worst case indlen for the two new 4767 * extents (as calculated by xfs_bmap_worst_indlen()), split the original 4768 * reservation fairly across the two new extents. If necessary, steal available 4769 * blocks from a deleted extent to make up a reservation deficiency (e.g., if 4770 * ores == 1). The number of stolen blocks is returned. The availability and 4771 * subsequent accounting of stolen blocks is the responsibility of the caller. 4772 */ 4773 static xfs_filblks_t 4774 xfs_bmap_split_indlen( 4775 xfs_filblks_t ores, /* original res. */ 4776 xfs_filblks_t *indlen1, /* ext1 worst indlen */ 4777 xfs_filblks_t *indlen2, /* ext2 worst indlen */ 4778 xfs_filblks_t avail) /* stealable blocks */ 4779 { 4780 xfs_filblks_t len1 = *indlen1; 4781 xfs_filblks_t len2 = *indlen2; 4782 xfs_filblks_t nres = len1 + len2; /* new total res. */ 4783 xfs_filblks_t stolen = 0; 4784 xfs_filblks_t resfactor; 4785 4786 /* 4787 * Steal as many blocks as we can to try and satisfy the worst case 4788 * indlen for both new extents. 4789 */ 4790 if (ores < nres && avail) 4791 stolen = XFS_FILBLKS_MIN(nres - ores, avail); 4792 ores += stolen; 4793 4794 /* nothing else to do if we've satisfied the new reservation */ 4795 if (ores >= nres) 4796 return stolen; 4797 4798 /* 4799 * We can't meet the total required reservation for the two extents. 4800 * Calculate the percent of the overall shortage between both extents 4801 * and apply this percentage to each of the requested indlen values. 4802 * This distributes the shortage fairly and reduces the chances that one 4803 * of the two extents is left with nothing when extents are repeatedly 4804 * split. 4805 */ 4806 resfactor = (ores * 100); 4807 do_div(resfactor, nres); 4808 len1 *= resfactor; 4809 do_div(len1, 100); 4810 len2 *= resfactor; 4811 do_div(len2, 100); 4812 ASSERT(len1 + len2 <= ores); 4813 ASSERT(len1 < *indlen1 && len2 < *indlen2); 4814 4815 /* 4816 * Hand out the remainder to each extent. If one of the two reservations 4817 * is zero, we want to make sure that one gets a block first. The loop 4818 * below starts with len1, so hand len2 a block right off the bat if it 4819 * is zero. 4820 */ 4821 ores -= (len1 + len2); 4822 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores); 4823 if (ores && !len2 && *indlen2) { 4824 len2++; 4825 ores--; 4826 } 4827 while (ores) { 4828 if (len1 < *indlen1) { 4829 len1++; 4830 ores--; 4831 } 4832 if (!ores) 4833 break; 4834 if (len2 < *indlen2) { 4835 len2++; 4836 ores--; 4837 } 4838 } 4839 4840 *indlen1 = len1; 4841 *indlen2 = len2; 4842 4843 return stolen; 4844 } 4845 4846 int 4847 xfs_bmap_del_extent_delay( 4848 struct xfs_inode *ip, 4849 int whichfork, 4850 xfs_extnum_t *idx, 4851 struct xfs_bmbt_irec *got, 4852 struct xfs_bmbt_irec *del) 4853 { 4854 struct xfs_mount *mp = ip->i_mount; 4855 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 4856 struct xfs_bmbt_irec new; 4857 int64_t da_old, da_new, da_diff = 0; 4858 xfs_fileoff_t del_endoff, got_endoff; 4859 xfs_filblks_t got_indlen, new_indlen, stolen; 4860 int error = 0, state = 0; 4861 bool isrt; 4862 4863 XFS_STATS_INC(mp, xs_del_exlist); 4864 4865 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 4866 del_endoff = del->br_startoff + del->br_blockcount; 4867 got_endoff = got->br_startoff + got->br_blockcount; 4868 da_old = startblockval(got->br_startblock); 4869 da_new = 0; 4870 4871 ASSERT(*idx >= 0); 4872 ASSERT(*idx <= xfs_iext_count(ifp)); 4873 ASSERT(del->br_blockcount > 0); 4874 ASSERT(got->br_startoff <= del->br_startoff); 4875 ASSERT(got_endoff >= del_endoff); 4876 4877 if (isrt) { 4878 uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount); 4879 4880 do_div(rtexts, mp->m_sb.sb_rextsize); 4881 xfs_mod_frextents(mp, rtexts); 4882 } 4883 4884 /* 4885 * Update the inode delalloc counter now and wait to update the 4886 * sb counters as we might have to borrow some blocks for the 4887 * indirect block accounting. 4888 */ 4889 error = xfs_trans_reserve_quota_nblks(NULL, ip, 4890 -((long)del->br_blockcount), 0, 4891 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); 4892 if (error) 4893 return error; 4894 ip->i_delayed_blks -= del->br_blockcount; 4895 4896 if (whichfork == XFS_COW_FORK) 4897 state |= BMAP_COWFORK; 4898 4899 if (got->br_startoff == del->br_startoff) 4900 state |= BMAP_LEFT_CONTIG; 4901 if (got_endoff == del_endoff) 4902 state |= BMAP_RIGHT_CONTIG; 4903 4904 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 4905 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 4906 /* 4907 * Matches the whole extent. Delete the entry. 4908 */ 4909 xfs_iext_remove(ip, *idx, 1, state); 4910 --*idx; 4911 break; 4912 case BMAP_LEFT_CONTIG: 4913 /* 4914 * Deleting the first part of the extent. 4915 */ 4916 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 4917 got->br_startoff = del_endoff; 4918 got->br_blockcount -= del->br_blockcount; 4919 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4920 got->br_blockcount), da_old); 4921 got->br_startblock = nullstartblock((int)da_new); 4922 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got); 4923 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 4924 break; 4925 case BMAP_RIGHT_CONTIG: 4926 /* 4927 * Deleting the last part of the extent. 4928 */ 4929 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 4930 got->br_blockcount = got->br_blockcount - del->br_blockcount; 4931 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4932 got->br_blockcount), da_old); 4933 got->br_startblock = nullstartblock((int)da_new); 4934 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got); 4935 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 4936 break; 4937 case 0: 4938 /* 4939 * Deleting the middle of the extent. 4940 * 4941 * Distribute the original indlen reservation across the two new 4942 * extents. Steal blocks from the deleted extent if necessary. 4943 * Stealing blocks simply fudges the fdblocks accounting below. 4944 * Warn if either of the new indlen reservations is zero as this 4945 * can lead to delalloc problems. 4946 */ 4947 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 4948 4949 got->br_blockcount = del->br_startoff - got->br_startoff; 4950 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount); 4951 4952 new.br_blockcount = got_endoff - del_endoff; 4953 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount); 4954 4955 WARN_ON_ONCE(!got_indlen || !new_indlen); 4956 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen, 4957 del->br_blockcount); 4958 4959 got->br_startblock = nullstartblock((int)got_indlen); 4960 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got); 4961 trace_xfs_bmap_post_update(ip, *idx, 0, _THIS_IP_); 4962 4963 new.br_startoff = del_endoff; 4964 new.br_state = got->br_state; 4965 new.br_startblock = nullstartblock((int)new_indlen); 4966 4967 ++*idx; 4968 xfs_iext_insert(ip, *idx, 1, &new, state); 4969 4970 da_new = got_indlen + new_indlen - stolen; 4971 del->br_blockcount -= stolen; 4972 break; 4973 } 4974 4975 ASSERT(da_old >= da_new); 4976 da_diff = da_old - da_new; 4977 if (!isrt) 4978 da_diff += del->br_blockcount; 4979 if (da_diff) 4980 xfs_mod_fdblocks(mp, da_diff, false); 4981 return error; 4982 } 4983 4984 void 4985 xfs_bmap_del_extent_cow( 4986 struct xfs_inode *ip, 4987 xfs_extnum_t *idx, 4988 struct xfs_bmbt_irec *got, 4989 struct xfs_bmbt_irec *del) 4990 { 4991 struct xfs_mount *mp = ip->i_mount; 4992 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); 4993 struct xfs_bmbt_irec new; 4994 xfs_fileoff_t del_endoff, got_endoff; 4995 int state = BMAP_COWFORK; 4996 4997 XFS_STATS_INC(mp, xs_del_exlist); 4998 4999 del_endoff = del->br_startoff + del->br_blockcount; 5000 got_endoff = got->br_startoff + got->br_blockcount; 5001 5002 ASSERT(*idx >= 0); 5003 ASSERT(*idx <= xfs_iext_count(ifp)); 5004 ASSERT(del->br_blockcount > 0); 5005 ASSERT(got->br_startoff <= del->br_startoff); 5006 ASSERT(got_endoff >= del_endoff); 5007 ASSERT(!isnullstartblock(got->br_startblock)); 5008 5009 if (got->br_startoff == del->br_startoff) 5010 state |= BMAP_LEFT_CONTIG; 5011 if (got_endoff == del_endoff) 5012 state |= BMAP_RIGHT_CONTIG; 5013 5014 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 5015 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 5016 /* 5017 * Matches the whole extent. Delete the entry. 5018 */ 5019 xfs_iext_remove(ip, *idx, 1, state); 5020 --*idx; 5021 break; 5022 case BMAP_LEFT_CONTIG: 5023 /* 5024 * Deleting the first part of the extent. 5025 */ 5026 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5027 got->br_startoff = del_endoff; 5028 got->br_blockcount -= del->br_blockcount; 5029 got->br_startblock = del->br_startblock + del->br_blockcount; 5030 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got); 5031 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5032 break; 5033 case BMAP_RIGHT_CONTIG: 5034 /* 5035 * Deleting the last part of the extent. 5036 */ 5037 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5038 got->br_blockcount -= del->br_blockcount; 5039 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got); 5040 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5041 break; 5042 case 0: 5043 /* 5044 * Deleting the middle of the extent. 5045 */ 5046 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5047 got->br_blockcount = del->br_startoff - got->br_startoff; 5048 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got); 5049 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5050 5051 new.br_startoff = del_endoff; 5052 new.br_blockcount = got_endoff - del_endoff; 5053 new.br_state = got->br_state; 5054 new.br_startblock = del->br_startblock + del->br_blockcount; 5055 5056 ++*idx; 5057 xfs_iext_insert(ip, *idx, 1, &new, state); 5058 break; 5059 } 5060 } 5061 5062 /* 5063 * Called by xfs_bmapi to update file extent records and the btree 5064 * after removing space (or undoing a delayed allocation). 5065 */ 5066 STATIC int /* error */ 5067 xfs_bmap_del_extent( 5068 xfs_inode_t *ip, /* incore inode pointer */ 5069 xfs_trans_t *tp, /* current transaction pointer */ 5070 xfs_extnum_t *idx, /* extent number to update/delete */ 5071 struct xfs_defer_ops *dfops, /* list of extents to be freed */ 5072 xfs_btree_cur_t *cur, /* if null, not a btree */ 5073 xfs_bmbt_irec_t *del, /* data to remove from extents */ 5074 int *logflagsp, /* inode logging flags */ 5075 int whichfork, /* data or attr fork */ 5076 int bflags) /* bmapi flags */ 5077 { 5078 xfs_filblks_t da_new; /* new delay-alloc indirect blocks */ 5079 xfs_filblks_t da_old; /* old delay-alloc indirect blocks */ 5080 xfs_fsblock_t del_endblock=0; /* first block past del */ 5081 xfs_fileoff_t del_endoff; /* first offset past del */ 5082 int delay; /* current block is delayed allocated */ 5083 int do_fx; /* free extent at end of routine */ 5084 xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */ 5085 int error; /* error return value */ 5086 int flags; /* inode logging flags */ 5087 xfs_bmbt_irec_t got; /* current extent entry */ 5088 xfs_fileoff_t got_endoff; /* first offset past got */ 5089 int i; /* temp state */ 5090 xfs_ifork_t *ifp; /* inode fork pointer */ 5091 xfs_mount_t *mp; /* mount structure */ 5092 xfs_filblks_t nblks; /* quota/sb block count */ 5093 xfs_bmbt_irec_t new; /* new record to be inserted */ 5094 /* REFERENCED */ 5095 uint qfield; /* quota field to update */ 5096 xfs_filblks_t temp; /* for indirect length calculations */ 5097 xfs_filblks_t temp2; /* for indirect length calculations */ 5098 int state = 0; 5099 5100 mp = ip->i_mount; 5101 XFS_STATS_INC(mp, xs_del_exlist); 5102 5103 if (whichfork == XFS_ATTR_FORK) 5104 state |= BMAP_ATTRFORK; 5105 else if (whichfork == XFS_COW_FORK) 5106 state |= BMAP_COWFORK; 5107 5108 ifp = XFS_IFORK_PTR(ip, whichfork); 5109 ASSERT((*idx >= 0) && (*idx < xfs_iext_count(ifp))); 5110 ASSERT(del->br_blockcount > 0); 5111 ep = xfs_iext_get_ext(ifp, *idx); 5112 xfs_bmbt_get_all(ep, &got); 5113 ASSERT(got.br_startoff <= del->br_startoff); 5114 del_endoff = del->br_startoff + del->br_blockcount; 5115 got_endoff = got.br_startoff + got.br_blockcount; 5116 ASSERT(got_endoff >= del_endoff); 5117 delay = isnullstartblock(got.br_startblock); 5118 ASSERT(isnullstartblock(del->br_startblock) == delay); 5119 flags = 0; 5120 qfield = 0; 5121 error = 0; 5122 /* 5123 * If deleting a real allocation, must free up the disk space. 5124 */ 5125 if (!delay) { 5126 flags = XFS_ILOG_CORE; 5127 /* 5128 * Realtime allocation. Free it and record di_nblocks update. 5129 */ 5130 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { 5131 xfs_fsblock_t bno; 5132 xfs_filblks_t len; 5133 5134 ASSERT(do_mod(del->br_blockcount, 5135 mp->m_sb.sb_rextsize) == 0); 5136 ASSERT(do_mod(del->br_startblock, 5137 mp->m_sb.sb_rextsize) == 0); 5138 bno = del->br_startblock; 5139 len = del->br_blockcount; 5140 do_div(bno, mp->m_sb.sb_rextsize); 5141 do_div(len, mp->m_sb.sb_rextsize); 5142 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); 5143 if (error) 5144 goto done; 5145 do_fx = 0; 5146 nblks = len * mp->m_sb.sb_rextsize; 5147 qfield = XFS_TRANS_DQ_RTBCOUNT; 5148 } 5149 /* 5150 * Ordinary allocation. 5151 */ 5152 else { 5153 do_fx = 1; 5154 nblks = del->br_blockcount; 5155 qfield = XFS_TRANS_DQ_BCOUNT; 5156 } 5157 /* 5158 * Set up del_endblock and cur for later. 5159 */ 5160 del_endblock = del->br_startblock + del->br_blockcount; 5161 if (cur) { 5162 if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff, 5163 got.br_startblock, got.br_blockcount, 5164 &i))) 5165 goto done; 5166 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5167 } 5168 da_old = da_new = 0; 5169 } else { 5170 da_old = startblockval(got.br_startblock); 5171 da_new = 0; 5172 nblks = 0; 5173 do_fx = 0; 5174 } 5175 5176 /* 5177 * Set flag value to use in switch statement. 5178 * Left-contig is 2, right-contig is 1. 5179 */ 5180 switch (((got.br_startoff == del->br_startoff) << 1) | 5181 (got_endoff == del_endoff)) { 5182 case 3: 5183 /* 5184 * Matches the whole extent. Delete the entry. 5185 */ 5186 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5187 xfs_iext_remove(ip, *idx, 1, 5188 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0); 5189 --*idx; 5190 if (delay) 5191 break; 5192 5193 XFS_IFORK_NEXT_SET(ip, whichfork, 5194 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 5195 flags |= XFS_ILOG_CORE; 5196 if (!cur) { 5197 flags |= xfs_ilog_fext(whichfork); 5198 break; 5199 } 5200 if ((error = xfs_btree_delete(cur, &i))) 5201 goto done; 5202 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5203 break; 5204 5205 case 2: 5206 /* 5207 * Deleting the first part of the extent. 5208 */ 5209 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5210 xfs_bmbt_set_startoff(ep, del_endoff); 5211 temp = got.br_blockcount - del->br_blockcount; 5212 xfs_bmbt_set_blockcount(ep, temp); 5213 if (delay) { 5214 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 5215 da_old); 5216 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 5217 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5218 da_new = temp; 5219 break; 5220 } 5221 xfs_bmbt_set_startblock(ep, del_endblock); 5222 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5223 if (!cur) { 5224 flags |= xfs_ilog_fext(whichfork); 5225 break; 5226 } 5227 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock, 5228 got.br_blockcount - del->br_blockcount, 5229 got.br_state))) 5230 goto done; 5231 break; 5232 5233 case 1: 5234 /* 5235 * Deleting the last part of the extent. 5236 */ 5237 temp = got.br_blockcount - del->br_blockcount; 5238 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5239 xfs_bmbt_set_blockcount(ep, temp); 5240 if (delay) { 5241 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 5242 da_old); 5243 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 5244 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5245 da_new = temp; 5246 break; 5247 } 5248 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5249 if (!cur) { 5250 flags |= xfs_ilog_fext(whichfork); 5251 break; 5252 } 5253 if ((error = xfs_bmbt_update(cur, got.br_startoff, 5254 got.br_startblock, 5255 got.br_blockcount - del->br_blockcount, 5256 got.br_state))) 5257 goto done; 5258 break; 5259 5260 case 0: 5261 /* 5262 * Deleting the middle of the extent. 5263 */ 5264 temp = del->br_startoff - got.br_startoff; 5265 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5266 xfs_bmbt_set_blockcount(ep, temp); 5267 new.br_startoff = del_endoff; 5268 temp2 = got_endoff - del_endoff; 5269 new.br_blockcount = temp2; 5270 new.br_state = got.br_state; 5271 if (!delay) { 5272 new.br_startblock = del_endblock; 5273 flags |= XFS_ILOG_CORE; 5274 if (cur) { 5275 if ((error = xfs_bmbt_update(cur, 5276 got.br_startoff, 5277 got.br_startblock, temp, 5278 got.br_state))) 5279 goto done; 5280 if ((error = xfs_btree_increment(cur, 0, &i))) 5281 goto done; 5282 cur->bc_rec.b = new; 5283 error = xfs_btree_insert(cur, &i); 5284 if (error && error != -ENOSPC) 5285 goto done; 5286 /* 5287 * If get no-space back from btree insert, 5288 * it tried a split, and we have a zero 5289 * block reservation. 5290 * Fix up our state and return the error. 5291 */ 5292 if (error == -ENOSPC) { 5293 /* 5294 * Reset the cursor, don't trust 5295 * it after any insert operation. 5296 */ 5297 if ((error = xfs_bmbt_lookup_eq(cur, 5298 got.br_startoff, 5299 got.br_startblock, 5300 temp, &i))) 5301 goto done; 5302 XFS_WANT_CORRUPTED_GOTO(mp, 5303 i == 1, done); 5304 /* 5305 * Update the btree record back 5306 * to the original value. 5307 */ 5308 if ((error = xfs_bmbt_update(cur, 5309 got.br_startoff, 5310 got.br_startblock, 5311 got.br_blockcount, 5312 got.br_state))) 5313 goto done; 5314 /* 5315 * Reset the extent record back 5316 * to the original value. 5317 */ 5318 xfs_bmbt_set_blockcount(ep, 5319 got.br_blockcount); 5320 flags = 0; 5321 error = -ENOSPC; 5322 goto done; 5323 } 5324 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5325 } else 5326 flags |= xfs_ilog_fext(whichfork); 5327 XFS_IFORK_NEXT_SET(ip, whichfork, 5328 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 5329 } else { 5330 xfs_filblks_t stolen; 5331 ASSERT(whichfork == XFS_DATA_FORK); 5332 5333 /* 5334 * Distribute the original indlen reservation across the 5335 * two new extents. Steal blocks from the deleted extent 5336 * if necessary. Stealing blocks simply fudges the 5337 * fdblocks accounting in xfs_bunmapi(). 5338 */ 5339 temp = xfs_bmap_worst_indlen(ip, got.br_blockcount); 5340 temp2 = xfs_bmap_worst_indlen(ip, new.br_blockcount); 5341 stolen = xfs_bmap_split_indlen(da_old, &temp, &temp2, 5342 del->br_blockcount); 5343 da_new = temp + temp2 - stolen; 5344 del->br_blockcount -= stolen; 5345 5346 /* 5347 * Set the reservation for each extent. Warn if either 5348 * is zero as this can lead to delalloc problems. 5349 */ 5350 WARN_ON_ONCE(!temp || !temp2); 5351 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 5352 new.br_startblock = nullstartblock((int)temp2); 5353 } 5354 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5355 xfs_iext_insert(ip, *idx + 1, 1, &new, state); 5356 ++*idx; 5357 break; 5358 } 5359 5360 /* remove reverse mapping */ 5361 if (!delay) { 5362 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del); 5363 if (error) 5364 goto done; 5365 } 5366 5367 /* 5368 * If we need to, add to list of extents to delete. 5369 */ 5370 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) { 5371 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) { 5372 error = xfs_refcount_decrease_extent(mp, dfops, del); 5373 if (error) 5374 goto done; 5375 } else 5376 xfs_bmap_add_free(mp, dfops, del->br_startblock, 5377 del->br_blockcount, NULL); 5378 } 5379 5380 /* 5381 * Adjust inode # blocks in the file. 5382 */ 5383 if (nblks) 5384 ip->i_d.di_nblocks -= nblks; 5385 /* 5386 * Adjust quota data. 5387 */ 5388 if (qfield && !(bflags & XFS_BMAPI_REMAP)) 5389 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks); 5390 5391 /* 5392 * Account for change in delayed indirect blocks. 5393 * Nothing to do for disk quota accounting here. 5394 */ 5395 ASSERT(da_old >= da_new); 5396 if (da_old > da_new) 5397 xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false); 5398 done: 5399 *logflagsp = flags; 5400 return error; 5401 } 5402 5403 /* 5404 * Unmap (remove) blocks from a file. 5405 * If nexts is nonzero then the number of extents to remove is limited to 5406 * that value. If not all extents in the block range can be removed then 5407 * *done is set. 5408 */ 5409 int /* error */ 5410 __xfs_bunmapi( 5411 xfs_trans_t *tp, /* transaction pointer */ 5412 struct xfs_inode *ip, /* incore inode */ 5413 xfs_fileoff_t bno, /* starting offset to unmap */ 5414 xfs_filblks_t *rlen, /* i/o: amount remaining */ 5415 int flags, /* misc flags */ 5416 xfs_extnum_t nexts, /* number of extents max */ 5417 xfs_fsblock_t *firstblock, /* first allocated block 5418 controls a.g. for allocs */ 5419 struct xfs_defer_ops *dfops) /* i/o: deferred updates */ 5420 { 5421 xfs_btree_cur_t *cur; /* bmap btree cursor */ 5422 xfs_bmbt_irec_t del; /* extent being deleted */ 5423 int error; /* error return value */ 5424 xfs_extnum_t extno; /* extent number in list */ 5425 xfs_bmbt_irec_t got; /* current extent record */ 5426 xfs_ifork_t *ifp; /* inode fork pointer */ 5427 int isrt; /* freeing in rt area */ 5428 xfs_extnum_t lastx; /* last extent index used */ 5429 int logflags; /* transaction logging flags */ 5430 xfs_extlen_t mod; /* rt extent offset */ 5431 xfs_mount_t *mp; /* mount structure */ 5432 xfs_fileoff_t start; /* first file offset deleted */ 5433 int tmp_logflags; /* partial logging flags */ 5434 int wasdel; /* was a delayed alloc extent */ 5435 int whichfork; /* data or attribute fork */ 5436 xfs_fsblock_t sum; 5437 xfs_filblks_t len = *rlen; /* length to unmap in file */ 5438 5439 trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_); 5440 5441 whichfork = xfs_bmapi_whichfork(flags); 5442 ASSERT(whichfork != XFS_COW_FORK); 5443 ifp = XFS_IFORK_PTR(ip, whichfork); 5444 if (unlikely( 5445 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5446 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 5447 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW, 5448 ip->i_mount); 5449 return -EFSCORRUPTED; 5450 } 5451 mp = ip->i_mount; 5452 if (XFS_FORCED_SHUTDOWN(mp)) 5453 return -EIO; 5454 5455 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5456 ASSERT(len > 0); 5457 ASSERT(nexts >= 0); 5458 5459 if (!(ifp->if_flags & XFS_IFEXTENTS) && 5460 (error = xfs_iread_extents(tp, ip, whichfork))) 5461 return error; 5462 if (xfs_iext_count(ifp) == 0) { 5463 *rlen = 0; 5464 return 0; 5465 } 5466 XFS_STATS_INC(mp, xs_blk_unmap); 5467 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 5468 start = bno; 5469 bno = start + len - 1; 5470 5471 /* 5472 * Check to see if the given block number is past the end of the 5473 * file, back up to the last block if so... 5474 */ 5475 if (!xfs_iext_lookup_extent(ip, ifp, bno, &lastx, &got)) { 5476 ASSERT(lastx > 0); 5477 xfs_iext_get_extent(ifp, --lastx, &got); 5478 bno = got.br_startoff + got.br_blockcount - 1; 5479 } 5480 5481 logflags = 0; 5482 if (ifp->if_flags & XFS_IFBROOT) { 5483 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); 5484 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5485 cur->bc_private.b.firstblock = *firstblock; 5486 cur->bc_private.b.dfops = dfops; 5487 cur->bc_private.b.flags = 0; 5488 } else 5489 cur = NULL; 5490 5491 if (isrt) { 5492 /* 5493 * Synchronize by locking the bitmap inode. 5494 */ 5495 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP); 5496 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL); 5497 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM); 5498 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL); 5499 } 5500 5501 extno = 0; 5502 while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 && 5503 (nexts == 0 || extno < nexts)) { 5504 /* 5505 * Is the found extent after a hole in which bno lives? 5506 * Just back up to the previous extent, if so. 5507 */ 5508 if (got.br_startoff > bno) { 5509 if (--lastx < 0) 5510 break; 5511 xfs_iext_get_extent(ifp, lastx, &got); 5512 } 5513 /* 5514 * Is the last block of this extent before the range 5515 * we're supposed to delete? If so, we're done. 5516 */ 5517 bno = XFS_FILEOFF_MIN(bno, 5518 got.br_startoff + got.br_blockcount - 1); 5519 if (bno < start) 5520 break; 5521 /* 5522 * Then deal with the (possibly delayed) allocated space 5523 * we found. 5524 */ 5525 del = got; 5526 wasdel = isnullstartblock(del.br_startblock); 5527 if (got.br_startoff < start) { 5528 del.br_startoff = start; 5529 del.br_blockcount -= start - got.br_startoff; 5530 if (!wasdel) 5531 del.br_startblock += start - got.br_startoff; 5532 } 5533 if (del.br_startoff + del.br_blockcount > bno + 1) 5534 del.br_blockcount = bno + 1 - del.br_startoff; 5535 sum = del.br_startblock + del.br_blockcount; 5536 if (isrt && 5537 (mod = do_mod(sum, mp->m_sb.sb_rextsize))) { 5538 /* 5539 * Realtime extent not lined up at the end. 5540 * The extent could have been split into written 5541 * and unwritten pieces, or we could just be 5542 * unmapping part of it. But we can't really 5543 * get rid of part of a realtime extent. 5544 */ 5545 if (del.br_state == XFS_EXT_UNWRITTEN || 5546 !xfs_sb_version_hasextflgbit(&mp->m_sb)) { 5547 /* 5548 * This piece is unwritten, or we're not 5549 * using unwritten extents. Skip over it. 5550 */ 5551 ASSERT(bno >= mod); 5552 bno -= mod > del.br_blockcount ? 5553 del.br_blockcount : mod; 5554 if (bno < got.br_startoff) { 5555 if (--lastx >= 0) 5556 xfs_bmbt_get_all(xfs_iext_get_ext( 5557 ifp, lastx), &got); 5558 } 5559 continue; 5560 } 5561 /* 5562 * It's written, turn it unwritten. 5563 * This is better than zeroing it. 5564 */ 5565 ASSERT(del.br_state == XFS_EXT_NORM); 5566 ASSERT(tp->t_blk_res > 0); 5567 /* 5568 * If this spans a realtime extent boundary, 5569 * chop it back to the start of the one we end at. 5570 */ 5571 if (del.br_blockcount > mod) { 5572 del.br_startoff += del.br_blockcount - mod; 5573 del.br_startblock += del.br_blockcount - mod; 5574 del.br_blockcount = mod; 5575 } 5576 del.br_state = XFS_EXT_UNWRITTEN; 5577 error = xfs_bmap_add_extent_unwritten_real(tp, ip, 5578 whichfork, &lastx, &cur, &del, 5579 firstblock, dfops, &logflags); 5580 if (error) 5581 goto error0; 5582 goto nodelete; 5583 } 5584 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) { 5585 /* 5586 * Realtime extent is lined up at the end but not 5587 * at the front. We'll get rid of full extents if 5588 * we can. 5589 */ 5590 mod = mp->m_sb.sb_rextsize - mod; 5591 if (del.br_blockcount > mod) { 5592 del.br_blockcount -= mod; 5593 del.br_startoff += mod; 5594 del.br_startblock += mod; 5595 } else if ((del.br_startoff == start && 5596 (del.br_state == XFS_EXT_UNWRITTEN || 5597 tp->t_blk_res == 0)) || 5598 !xfs_sb_version_hasextflgbit(&mp->m_sb)) { 5599 /* 5600 * Can't make it unwritten. There isn't 5601 * a full extent here so just skip it. 5602 */ 5603 ASSERT(bno >= del.br_blockcount); 5604 bno -= del.br_blockcount; 5605 if (got.br_startoff > bno && --lastx >= 0) 5606 xfs_iext_get_extent(ifp, lastx, &got); 5607 continue; 5608 } else if (del.br_state == XFS_EXT_UNWRITTEN) { 5609 struct xfs_bmbt_irec prev; 5610 5611 /* 5612 * This one is already unwritten. 5613 * It must have a written left neighbor. 5614 * Unwrite the killed part of that one and 5615 * try again. 5616 */ 5617 ASSERT(lastx > 0); 5618 xfs_iext_get_extent(ifp, lastx - 1, &prev); 5619 ASSERT(prev.br_state == XFS_EXT_NORM); 5620 ASSERT(!isnullstartblock(prev.br_startblock)); 5621 ASSERT(del.br_startblock == 5622 prev.br_startblock + prev.br_blockcount); 5623 if (prev.br_startoff < start) { 5624 mod = start - prev.br_startoff; 5625 prev.br_blockcount -= mod; 5626 prev.br_startblock += mod; 5627 prev.br_startoff = start; 5628 } 5629 prev.br_state = XFS_EXT_UNWRITTEN; 5630 lastx--; 5631 error = xfs_bmap_add_extent_unwritten_real(tp, 5632 ip, whichfork, &lastx, &cur, 5633 &prev, firstblock, dfops, 5634 &logflags); 5635 if (error) 5636 goto error0; 5637 goto nodelete; 5638 } else { 5639 ASSERT(del.br_state == XFS_EXT_NORM); 5640 del.br_state = XFS_EXT_UNWRITTEN; 5641 error = xfs_bmap_add_extent_unwritten_real(tp, 5642 ip, whichfork, &lastx, &cur, 5643 &del, firstblock, dfops, 5644 &logflags); 5645 if (error) 5646 goto error0; 5647 goto nodelete; 5648 } 5649 } 5650 5651 /* 5652 * If it's the case where the directory code is running 5653 * with no block reservation, and the deleted block is in 5654 * the middle of its extent, and the resulting insert 5655 * of an extent would cause transformation to btree format, 5656 * then reject it. The calling code will then swap 5657 * blocks around instead. 5658 * We have to do this now, rather than waiting for the 5659 * conversion to btree format, since the transaction 5660 * will be dirty. 5661 */ 5662 if (!wasdel && tp->t_blk_res == 0 && 5663 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && 5664 XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */ 5665 XFS_IFORK_MAXEXT(ip, whichfork) && 5666 del.br_startoff > got.br_startoff && 5667 del.br_startoff + del.br_blockcount < 5668 got.br_startoff + got.br_blockcount) { 5669 error = -ENOSPC; 5670 goto error0; 5671 } 5672 5673 /* 5674 * Unreserve quota and update realtime free space, if 5675 * appropriate. If delayed allocation, update the inode delalloc 5676 * counter now and wait to update the sb counters as 5677 * xfs_bmap_del_extent() might need to borrow some blocks. 5678 */ 5679 if (wasdel) { 5680 ASSERT(startblockval(del.br_startblock) > 0); 5681 if (isrt) { 5682 xfs_filblks_t rtexts; 5683 5684 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount); 5685 do_div(rtexts, mp->m_sb.sb_rextsize); 5686 xfs_mod_frextents(mp, (int64_t)rtexts); 5687 (void)xfs_trans_reserve_quota_nblks(NULL, 5688 ip, -((long)del.br_blockcount), 0, 5689 XFS_QMOPT_RES_RTBLKS); 5690 } else { 5691 (void)xfs_trans_reserve_quota_nblks(NULL, 5692 ip, -((long)del.br_blockcount), 0, 5693 XFS_QMOPT_RES_REGBLKS); 5694 } 5695 ip->i_delayed_blks -= del.br_blockcount; 5696 if (cur) 5697 cur->bc_private.b.flags |= 5698 XFS_BTCUR_BPRV_WASDEL; 5699 } else if (cur) 5700 cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL; 5701 5702 error = xfs_bmap_del_extent(ip, tp, &lastx, dfops, cur, &del, 5703 &tmp_logflags, whichfork, flags); 5704 logflags |= tmp_logflags; 5705 if (error) 5706 goto error0; 5707 5708 if (!isrt && wasdel) 5709 xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount, false); 5710 5711 bno = del.br_startoff - 1; 5712 nodelete: 5713 /* 5714 * If not done go on to the next (previous) record. 5715 */ 5716 if (bno != (xfs_fileoff_t)-1 && bno >= start) { 5717 if (lastx >= 0) { 5718 xfs_iext_get_extent(ifp, lastx, &got); 5719 if (got.br_startoff > bno && --lastx >= 0) 5720 xfs_iext_get_extent(ifp, lastx, &got); 5721 } 5722 extno++; 5723 } 5724 } 5725 if (bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0) 5726 *rlen = 0; 5727 else 5728 *rlen = bno - start + 1; 5729 5730 /* 5731 * Convert to a btree if necessary. 5732 */ 5733 if (xfs_bmap_needs_btree(ip, whichfork)) { 5734 ASSERT(cur == NULL); 5735 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops, 5736 &cur, 0, &tmp_logflags, whichfork); 5737 logflags |= tmp_logflags; 5738 if (error) 5739 goto error0; 5740 } 5741 /* 5742 * transform from btree to extents, give it cur 5743 */ 5744 else if (xfs_bmap_wants_extents(ip, whichfork)) { 5745 ASSERT(cur != NULL); 5746 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags, 5747 whichfork); 5748 logflags |= tmp_logflags; 5749 if (error) 5750 goto error0; 5751 } 5752 /* 5753 * transform from extents to local? 5754 */ 5755 error = 0; 5756 error0: 5757 /* 5758 * Log everything. Do this after conversion, there's no point in 5759 * logging the extent records if we've converted to btree format. 5760 */ 5761 if ((logflags & xfs_ilog_fext(whichfork)) && 5762 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 5763 logflags &= ~xfs_ilog_fext(whichfork); 5764 else if ((logflags & xfs_ilog_fbroot(whichfork)) && 5765 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) 5766 logflags &= ~xfs_ilog_fbroot(whichfork); 5767 /* 5768 * Log inode even in the error case, if the transaction 5769 * is dirty we'll need to shut down the filesystem. 5770 */ 5771 if (logflags) 5772 xfs_trans_log_inode(tp, ip, logflags); 5773 if (cur) { 5774 if (!error) { 5775 *firstblock = cur->bc_private.b.firstblock; 5776 cur->bc_private.b.allocated = 0; 5777 } 5778 xfs_btree_del_cursor(cur, 5779 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 5780 } 5781 return error; 5782 } 5783 5784 /* Unmap a range of a file. */ 5785 int 5786 xfs_bunmapi( 5787 xfs_trans_t *tp, 5788 struct xfs_inode *ip, 5789 xfs_fileoff_t bno, 5790 xfs_filblks_t len, 5791 int flags, 5792 xfs_extnum_t nexts, 5793 xfs_fsblock_t *firstblock, 5794 struct xfs_defer_ops *dfops, 5795 int *done) 5796 { 5797 int error; 5798 5799 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts, firstblock, 5800 dfops); 5801 *done = (len == 0); 5802 return error; 5803 } 5804 5805 /* 5806 * Determine whether an extent shift can be accomplished by a merge with the 5807 * extent that precedes the target hole of the shift. 5808 */ 5809 STATIC bool 5810 xfs_bmse_can_merge( 5811 struct xfs_bmbt_irec *left, /* preceding extent */ 5812 struct xfs_bmbt_irec *got, /* current extent to shift */ 5813 xfs_fileoff_t shift) /* shift fsb */ 5814 { 5815 xfs_fileoff_t startoff; 5816 5817 startoff = got->br_startoff - shift; 5818 5819 /* 5820 * The extent, once shifted, must be adjacent in-file and on-disk with 5821 * the preceding extent. 5822 */ 5823 if ((left->br_startoff + left->br_blockcount != startoff) || 5824 (left->br_startblock + left->br_blockcount != got->br_startblock) || 5825 (left->br_state != got->br_state) || 5826 (left->br_blockcount + got->br_blockcount > MAXEXTLEN)) 5827 return false; 5828 5829 return true; 5830 } 5831 5832 /* 5833 * A bmap extent shift adjusts the file offset of an extent to fill a preceding 5834 * hole in the file. If an extent shift would result in the extent being fully 5835 * adjacent to the extent that currently precedes the hole, we can merge with 5836 * the preceding extent rather than do the shift. 5837 * 5838 * This function assumes the caller has verified a shift-by-merge is possible 5839 * with the provided extents via xfs_bmse_can_merge(). 5840 */ 5841 STATIC int 5842 xfs_bmse_merge( 5843 struct xfs_inode *ip, 5844 int whichfork, 5845 xfs_fileoff_t shift, /* shift fsb */ 5846 int current_ext, /* idx of gotp */ 5847 struct xfs_bmbt_rec_host *gotp, /* extent to shift */ 5848 struct xfs_bmbt_rec_host *leftp, /* preceding extent */ 5849 struct xfs_btree_cur *cur, 5850 int *logflags) /* output */ 5851 { 5852 struct xfs_bmbt_irec got; 5853 struct xfs_bmbt_irec left; 5854 xfs_filblks_t blockcount; 5855 int error, i; 5856 struct xfs_mount *mp = ip->i_mount; 5857 5858 xfs_bmbt_get_all(gotp, &got); 5859 xfs_bmbt_get_all(leftp, &left); 5860 blockcount = left.br_blockcount + got.br_blockcount; 5861 5862 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5863 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5864 ASSERT(xfs_bmse_can_merge(&left, &got, shift)); 5865 5866 /* 5867 * Merge the in-core extents. Note that the host record pointers and 5868 * current_ext index are invalid once the extent has been removed via 5869 * xfs_iext_remove(). 5870 */ 5871 xfs_bmbt_set_blockcount(leftp, blockcount); 5872 xfs_iext_remove(ip, current_ext, 1, 0); 5873 5874 /* 5875 * Update the on-disk extent count, the btree if necessary and log the 5876 * inode. 5877 */ 5878 XFS_IFORK_NEXT_SET(ip, whichfork, 5879 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 5880 *logflags |= XFS_ILOG_CORE; 5881 if (!cur) { 5882 *logflags |= XFS_ILOG_DEXT; 5883 return 0; 5884 } 5885 5886 /* lookup and remove the extent to merge */ 5887 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock, 5888 got.br_blockcount, &i); 5889 if (error) 5890 return error; 5891 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5892 5893 error = xfs_btree_delete(cur, &i); 5894 if (error) 5895 return error; 5896 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5897 5898 /* lookup and update size of the previous extent */ 5899 error = xfs_bmbt_lookup_eq(cur, left.br_startoff, left.br_startblock, 5900 left.br_blockcount, &i); 5901 if (error) 5902 return error; 5903 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5904 5905 left.br_blockcount = blockcount; 5906 5907 return xfs_bmbt_update(cur, left.br_startoff, left.br_startblock, 5908 left.br_blockcount, left.br_state); 5909 } 5910 5911 /* 5912 * Shift a single extent. 5913 */ 5914 STATIC int 5915 xfs_bmse_shift_one( 5916 struct xfs_inode *ip, 5917 int whichfork, 5918 xfs_fileoff_t offset_shift_fsb, 5919 int *current_ext, 5920 struct xfs_bmbt_rec_host *gotp, 5921 struct xfs_btree_cur *cur, 5922 int *logflags, 5923 enum shift_direction direction, 5924 struct xfs_defer_ops *dfops) 5925 { 5926 struct xfs_ifork *ifp; 5927 struct xfs_mount *mp; 5928 xfs_fileoff_t startoff; 5929 struct xfs_bmbt_rec_host *adj_irecp; 5930 struct xfs_bmbt_irec got; 5931 struct xfs_bmbt_irec adj_irec; 5932 int error; 5933 int i; 5934 int total_extents; 5935 5936 mp = ip->i_mount; 5937 ifp = XFS_IFORK_PTR(ip, whichfork); 5938 total_extents = xfs_iext_count(ifp); 5939 5940 xfs_bmbt_get_all(gotp, &got); 5941 5942 /* delalloc extents should be prevented by caller */ 5943 XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock)); 5944 5945 if (direction == SHIFT_LEFT) { 5946 startoff = got.br_startoff - offset_shift_fsb; 5947 5948 /* 5949 * Check for merge if we've got an extent to the left, 5950 * otherwise make sure there's enough room at the start 5951 * of the file for the shift. 5952 */ 5953 if (!*current_ext) { 5954 if (got.br_startoff < offset_shift_fsb) 5955 return -EINVAL; 5956 goto update_current_ext; 5957 } 5958 /* 5959 * grab the left extent and check for a large 5960 * enough hole. 5961 */ 5962 adj_irecp = xfs_iext_get_ext(ifp, *current_ext - 1); 5963 xfs_bmbt_get_all(adj_irecp, &adj_irec); 5964 5965 if (startoff < 5966 adj_irec.br_startoff + adj_irec.br_blockcount) 5967 return -EINVAL; 5968 5969 /* check whether to merge the extent or shift it down */ 5970 if (xfs_bmse_can_merge(&adj_irec, &got, 5971 offset_shift_fsb)) { 5972 error = xfs_bmse_merge(ip, whichfork, offset_shift_fsb, 5973 *current_ext, gotp, adj_irecp, 5974 cur, logflags); 5975 if (error) 5976 return error; 5977 adj_irec = got; 5978 goto update_rmap; 5979 } 5980 } else { 5981 startoff = got.br_startoff + offset_shift_fsb; 5982 /* nothing to move if this is the last extent */ 5983 if (*current_ext >= (total_extents - 1)) 5984 goto update_current_ext; 5985 /* 5986 * If this is not the last extent in the file, make sure there 5987 * is enough room between current extent and next extent for 5988 * accommodating the shift. 5989 */ 5990 adj_irecp = xfs_iext_get_ext(ifp, *current_ext + 1); 5991 xfs_bmbt_get_all(adj_irecp, &adj_irec); 5992 if (startoff + got.br_blockcount > adj_irec.br_startoff) 5993 return -EINVAL; 5994 /* 5995 * Unlike a left shift (which involves a hole punch), 5996 * a right shift does not modify extent neighbors 5997 * in any way. We should never find mergeable extents 5998 * in this scenario. Check anyways and warn if we 5999 * encounter two extents that could be one. 6000 */ 6001 if (xfs_bmse_can_merge(&got, &adj_irec, offset_shift_fsb)) 6002 WARN_ON_ONCE(1); 6003 } 6004 /* 6005 * Increment the extent index for the next iteration, update the start 6006 * offset of the in-core extent and update the btree if applicable. 6007 */ 6008 update_current_ext: 6009 if (direction == SHIFT_LEFT) 6010 (*current_ext)++; 6011 else 6012 (*current_ext)--; 6013 xfs_bmbt_set_startoff(gotp, startoff); 6014 *logflags |= XFS_ILOG_CORE; 6015 adj_irec = got; 6016 if (!cur) { 6017 *logflags |= XFS_ILOG_DEXT; 6018 goto update_rmap; 6019 } 6020 6021 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock, 6022 got.br_blockcount, &i); 6023 if (error) 6024 return error; 6025 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 6026 6027 got.br_startoff = startoff; 6028 error = xfs_bmbt_update(cur, got.br_startoff, got.br_startblock, 6029 got.br_blockcount, got.br_state); 6030 if (error) 6031 return error; 6032 6033 update_rmap: 6034 /* update reverse mapping */ 6035 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, &adj_irec); 6036 if (error) 6037 return error; 6038 adj_irec.br_startoff = startoff; 6039 return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &adj_irec); 6040 } 6041 6042 /* 6043 * Shift extent records to the left/right to cover/create a hole. 6044 * 6045 * The maximum number of extents to be shifted in a single operation is 6046 * @num_exts. @stop_fsb specifies the file offset at which to stop shift and the 6047 * file offset where we've left off is returned in @next_fsb. @offset_shift_fsb 6048 * is the length by which each extent is shifted. If there is no hole to shift 6049 * the extents into, this will be considered invalid operation and we abort 6050 * immediately. 6051 */ 6052 int 6053 xfs_bmap_shift_extents( 6054 struct xfs_trans *tp, 6055 struct xfs_inode *ip, 6056 xfs_fileoff_t *next_fsb, 6057 xfs_fileoff_t offset_shift_fsb, 6058 int *done, 6059 xfs_fileoff_t stop_fsb, 6060 xfs_fsblock_t *firstblock, 6061 struct xfs_defer_ops *dfops, 6062 enum shift_direction direction, 6063 int num_exts) 6064 { 6065 struct xfs_btree_cur *cur = NULL; 6066 struct xfs_bmbt_rec_host *gotp; 6067 struct xfs_bmbt_irec got; 6068 struct xfs_mount *mp = ip->i_mount; 6069 struct xfs_ifork *ifp; 6070 xfs_extnum_t nexts = 0; 6071 xfs_extnum_t current_ext; 6072 xfs_extnum_t total_extents; 6073 xfs_extnum_t stop_extent; 6074 int error = 0; 6075 int whichfork = XFS_DATA_FORK; 6076 int logflags = 0; 6077 6078 if (unlikely(XFS_TEST_ERROR( 6079 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 6080 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 6081 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { 6082 XFS_ERROR_REPORT("xfs_bmap_shift_extents", 6083 XFS_ERRLEVEL_LOW, mp); 6084 return -EFSCORRUPTED; 6085 } 6086 6087 if (XFS_FORCED_SHUTDOWN(mp)) 6088 return -EIO; 6089 6090 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 6091 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 6092 ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT); 6093 ASSERT(*next_fsb != NULLFSBLOCK || direction == SHIFT_RIGHT); 6094 6095 ifp = XFS_IFORK_PTR(ip, whichfork); 6096 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 6097 /* Read in all the extents */ 6098 error = xfs_iread_extents(tp, ip, whichfork); 6099 if (error) 6100 return error; 6101 } 6102 6103 if (ifp->if_flags & XFS_IFBROOT) { 6104 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 6105 cur->bc_private.b.firstblock = *firstblock; 6106 cur->bc_private.b.dfops = dfops; 6107 cur->bc_private.b.flags = 0; 6108 } 6109 6110 /* 6111 * There may be delalloc extents in the data fork before the range we 6112 * are collapsing out, so we cannot use the count of real extents here. 6113 * Instead we have to calculate it from the incore fork. 6114 */ 6115 total_extents = xfs_iext_count(ifp); 6116 if (total_extents == 0) { 6117 *done = 1; 6118 goto del_cursor; 6119 } 6120 6121 /* 6122 * In case of first right shift, we need to initialize next_fsb 6123 */ 6124 if (*next_fsb == NULLFSBLOCK) { 6125 gotp = xfs_iext_get_ext(ifp, total_extents - 1); 6126 xfs_bmbt_get_all(gotp, &got); 6127 *next_fsb = got.br_startoff; 6128 if (stop_fsb > *next_fsb) { 6129 *done = 1; 6130 goto del_cursor; 6131 } 6132 } 6133 6134 /* Lookup the extent index at which we have to stop */ 6135 if (direction == SHIFT_RIGHT) { 6136 gotp = xfs_iext_bno_to_ext(ifp, stop_fsb, &stop_extent); 6137 /* Make stop_extent exclusive of shift range */ 6138 stop_extent--; 6139 } else 6140 stop_extent = total_extents; 6141 6142 /* 6143 * Look up the extent index for the fsb where we start shifting. We can 6144 * henceforth iterate with current_ext as extent list changes are locked 6145 * out via ilock. 6146 * 6147 * gotp can be null in 2 cases: 1) if there are no extents or 2) 6148 * *next_fsb lies in a hole beyond which there are no extents. Either 6149 * way, we are done. 6150 */ 6151 gotp = xfs_iext_bno_to_ext(ifp, *next_fsb, ¤t_ext); 6152 if (!gotp) { 6153 *done = 1; 6154 goto del_cursor; 6155 } 6156 6157 /* some sanity checking before we finally start shifting extents */ 6158 if ((direction == SHIFT_LEFT && current_ext >= stop_extent) || 6159 (direction == SHIFT_RIGHT && current_ext <= stop_extent)) { 6160 error = -EIO; 6161 goto del_cursor; 6162 } 6163 6164 while (nexts++ < num_exts) { 6165 error = xfs_bmse_shift_one(ip, whichfork, offset_shift_fsb, 6166 ¤t_ext, gotp, cur, &logflags, 6167 direction, dfops); 6168 if (error) 6169 goto del_cursor; 6170 /* 6171 * If there was an extent merge during the shift, the extent 6172 * count can change. Update the total and grade the next record. 6173 */ 6174 if (direction == SHIFT_LEFT) { 6175 total_extents = xfs_iext_count(ifp); 6176 stop_extent = total_extents; 6177 } 6178 6179 if (current_ext == stop_extent) { 6180 *done = 1; 6181 *next_fsb = NULLFSBLOCK; 6182 break; 6183 } 6184 gotp = xfs_iext_get_ext(ifp, current_ext); 6185 } 6186 6187 if (!*done) { 6188 xfs_bmbt_get_all(gotp, &got); 6189 *next_fsb = got.br_startoff; 6190 } 6191 6192 del_cursor: 6193 if (cur) 6194 xfs_btree_del_cursor(cur, 6195 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 6196 6197 if (logflags) 6198 xfs_trans_log_inode(tp, ip, logflags); 6199 6200 return error; 6201 } 6202 6203 /* 6204 * Splits an extent into two extents at split_fsb block such that it is 6205 * the first block of the current_ext. @current_ext is a target extent 6206 * to be split. @split_fsb is a block where the extents is split. 6207 * If split_fsb lies in a hole or the first block of extents, just return 0. 6208 */ 6209 STATIC int 6210 xfs_bmap_split_extent_at( 6211 struct xfs_trans *tp, 6212 struct xfs_inode *ip, 6213 xfs_fileoff_t split_fsb, 6214 xfs_fsblock_t *firstfsb, 6215 struct xfs_defer_ops *dfops) 6216 { 6217 int whichfork = XFS_DATA_FORK; 6218 struct xfs_btree_cur *cur = NULL; 6219 struct xfs_bmbt_rec_host *gotp; 6220 struct xfs_bmbt_irec got; 6221 struct xfs_bmbt_irec new; /* split extent */ 6222 struct xfs_mount *mp = ip->i_mount; 6223 struct xfs_ifork *ifp; 6224 xfs_fsblock_t gotblkcnt; /* new block count for got */ 6225 xfs_extnum_t current_ext; 6226 int error = 0; 6227 int logflags = 0; 6228 int i = 0; 6229 6230 if (unlikely(XFS_TEST_ERROR( 6231 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 6232 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 6233 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { 6234 XFS_ERROR_REPORT("xfs_bmap_split_extent_at", 6235 XFS_ERRLEVEL_LOW, mp); 6236 return -EFSCORRUPTED; 6237 } 6238 6239 if (XFS_FORCED_SHUTDOWN(mp)) 6240 return -EIO; 6241 6242 ifp = XFS_IFORK_PTR(ip, whichfork); 6243 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 6244 /* Read in all the extents */ 6245 error = xfs_iread_extents(tp, ip, whichfork); 6246 if (error) 6247 return error; 6248 } 6249 6250 /* 6251 * gotp can be null in 2 cases: 1) if there are no extents 6252 * or 2) split_fsb lies in a hole beyond which there are 6253 * no extents. Either way, we are done. 6254 */ 6255 gotp = xfs_iext_bno_to_ext(ifp, split_fsb, ¤t_ext); 6256 if (!gotp) 6257 return 0; 6258 6259 xfs_bmbt_get_all(gotp, &got); 6260 6261 /* 6262 * Check split_fsb lies in a hole or the start boundary offset 6263 * of the extent. 6264 */ 6265 if (got.br_startoff >= split_fsb) 6266 return 0; 6267 6268 gotblkcnt = split_fsb - got.br_startoff; 6269 new.br_startoff = split_fsb; 6270 new.br_startblock = got.br_startblock + gotblkcnt; 6271 new.br_blockcount = got.br_blockcount - gotblkcnt; 6272 new.br_state = got.br_state; 6273 6274 if (ifp->if_flags & XFS_IFBROOT) { 6275 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 6276 cur->bc_private.b.firstblock = *firstfsb; 6277 cur->bc_private.b.dfops = dfops; 6278 cur->bc_private.b.flags = 0; 6279 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, 6280 got.br_startblock, 6281 got.br_blockcount, 6282 &i); 6283 if (error) 6284 goto del_cursor; 6285 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor); 6286 } 6287 6288 xfs_bmbt_set_blockcount(gotp, gotblkcnt); 6289 got.br_blockcount = gotblkcnt; 6290 6291 logflags = XFS_ILOG_CORE; 6292 if (cur) { 6293 error = xfs_bmbt_update(cur, got.br_startoff, 6294 got.br_startblock, 6295 got.br_blockcount, 6296 got.br_state); 6297 if (error) 6298 goto del_cursor; 6299 } else 6300 logflags |= XFS_ILOG_DEXT; 6301 6302 /* Add new extent */ 6303 current_ext++; 6304 xfs_iext_insert(ip, current_ext, 1, &new, 0); 6305 XFS_IFORK_NEXT_SET(ip, whichfork, 6306 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 6307 6308 if (cur) { 6309 error = xfs_bmbt_lookup_eq(cur, new.br_startoff, 6310 new.br_startblock, new.br_blockcount, 6311 &i); 6312 if (error) 6313 goto del_cursor; 6314 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor); 6315 cur->bc_rec.b.br_state = new.br_state; 6316 6317 error = xfs_btree_insert(cur, &i); 6318 if (error) 6319 goto del_cursor; 6320 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor); 6321 } 6322 6323 /* 6324 * Convert to a btree if necessary. 6325 */ 6326 if (xfs_bmap_needs_btree(ip, whichfork)) { 6327 int tmp_logflags; /* partial log flag return val */ 6328 6329 ASSERT(cur == NULL); 6330 error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, dfops, 6331 &cur, 0, &tmp_logflags, whichfork); 6332 logflags |= tmp_logflags; 6333 } 6334 6335 del_cursor: 6336 if (cur) { 6337 cur->bc_private.b.allocated = 0; 6338 xfs_btree_del_cursor(cur, 6339 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 6340 } 6341 6342 if (logflags) 6343 xfs_trans_log_inode(tp, ip, logflags); 6344 return error; 6345 } 6346 6347 int 6348 xfs_bmap_split_extent( 6349 struct xfs_inode *ip, 6350 xfs_fileoff_t split_fsb) 6351 { 6352 struct xfs_mount *mp = ip->i_mount; 6353 struct xfs_trans *tp; 6354 struct xfs_defer_ops dfops; 6355 xfs_fsblock_t firstfsb; 6356 int error; 6357 6358 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 6359 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp); 6360 if (error) 6361 return error; 6362 6363 xfs_ilock(ip, XFS_ILOCK_EXCL); 6364 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 6365 6366 xfs_defer_init(&dfops, &firstfsb); 6367 6368 error = xfs_bmap_split_extent_at(tp, ip, split_fsb, 6369 &firstfsb, &dfops); 6370 if (error) 6371 goto out; 6372 6373 error = xfs_defer_finish(&tp, &dfops, NULL); 6374 if (error) 6375 goto out; 6376 6377 return xfs_trans_commit(tp); 6378 6379 out: 6380 xfs_defer_cancel(&dfops); 6381 xfs_trans_cancel(tp); 6382 return error; 6383 } 6384 6385 /* Deferred mapping is only for real extents in the data fork. */ 6386 static bool 6387 xfs_bmap_is_update_needed( 6388 struct xfs_bmbt_irec *bmap) 6389 { 6390 return bmap->br_startblock != HOLESTARTBLOCK && 6391 bmap->br_startblock != DELAYSTARTBLOCK; 6392 } 6393 6394 /* Record a bmap intent. */ 6395 static int 6396 __xfs_bmap_add( 6397 struct xfs_mount *mp, 6398 struct xfs_defer_ops *dfops, 6399 enum xfs_bmap_intent_type type, 6400 struct xfs_inode *ip, 6401 int whichfork, 6402 struct xfs_bmbt_irec *bmap) 6403 { 6404 int error; 6405 struct xfs_bmap_intent *bi; 6406 6407 trace_xfs_bmap_defer(mp, 6408 XFS_FSB_TO_AGNO(mp, bmap->br_startblock), 6409 type, 6410 XFS_FSB_TO_AGBNO(mp, bmap->br_startblock), 6411 ip->i_ino, whichfork, 6412 bmap->br_startoff, 6413 bmap->br_blockcount, 6414 bmap->br_state); 6415 6416 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS); 6417 INIT_LIST_HEAD(&bi->bi_list); 6418 bi->bi_type = type; 6419 bi->bi_owner = ip; 6420 bi->bi_whichfork = whichfork; 6421 bi->bi_bmap = *bmap; 6422 6423 error = xfs_defer_join(dfops, bi->bi_owner); 6424 if (error) { 6425 kmem_free(bi); 6426 return error; 6427 } 6428 6429 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list); 6430 return 0; 6431 } 6432 6433 /* Map an extent into a file. */ 6434 int 6435 xfs_bmap_map_extent( 6436 struct xfs_mount *mp, 6437 struct xfs_defer_ops *dfops, 6438 struct xfs_inode *ip, 6439 struct xfs_bmbt_irec *PREV) 6440 { 6441 if (!xfs_bmap_is_update_needed(PREV)) 6442 return 0; 6443 6444 return __xfs_bmap_add(mp, dfops, XFS_BMAP_MAP, ip, 6445 XFS_DATA_FORK, PREV); 6446 } 6447 6448 /* Unmap an extent out of a file. */ 6449 int 6450 xfs_bmap_unmap_extent( 6451 struct xfs_mount *mp, 6452 struct xfs_defer_ops *dfops, 6453 struct xfs_inode *ip, 6454 struct xfs_bmbt_irec *PREV) 6455 { 6456 if (!xfs_bmap_is_update_needed(PREV)) 6457 return 0; 6458 6459 return __xfs_bmap_add(mp, dfops, XFS_BMAP_UNMAP, ip, 6460 XFS_DATA_FORK, PREV); 6461 } 6462 6463 /* 6464 * Process one of the deferred bmap operations. We pass back the 6465 * btree cursor to maintain our lock on the bmapbt between calls. 6466 */ 6467 int 6468 xfs_bmap_finish_one( 6469 struct xfs_trans *tp, 6470 struct xfs_defer_ops *dfops, 6471 struct xfs_inode *ip, 6472 enum xfs_bmap_intent_type type, 6473 int whichfork, 6474 xfs_fileoff_t startoff, 6475 xfs_fsblock_t startblock, 6476 xfs_filblks_t blockcount, 6477 xfs_exntst_t state) 6478 { 6479 int error = 0, done; 6480 6481 trace_xfs_bmap_deferred(tp->t_mountp, 6482 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type, 6483 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock), 6484 ip->i_ino, whichfork, startoff, blockcount, state); 6485 6486 if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK)) 6487 return -EFSCORRUPTED; 6488 6489 if (XFS_TEST_ERROR(false, tp->t_mountp, 6490 XFS_ERRTAG_BMAP_FINISH_ONE, 6491 XFS_RANDOM_BMAP_FINISH_ONE)) 6492 return -EIO; 6493 6494 switch (type) { 6495 case XFS_BMAP_MAP: 6496 error = xfs_bmapi_remap(tp, ip, startoff, blockcount, 6497 startblock, dfops); 6498 break; 6499 case XFS_BMAP_UNMAP: 6500 error = xfs_bunmapi(tp, ip, startoff, blockcount, 6501 XFS_BMAPI_REMAP, 1, &startblock, dfops, &done); 6502 ASSERT(done); 6503 break; 6504 default: 6505 ASSERT(0); 6506 error = -EFSCORRUPTED; 6507 } 6508 6509 return error; 6510 } 6511