1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_bit.h" 25 #include "xfs_sb.h" 26 #include "xfs_mount.h" 27 #include "xfs_defer.h" 28 #include "xfs_da_format.h" 29 #include "xfs_da_btree.h" 30 #include "xfs_dir2.h" 31 #include "xfs_inode.h" 32 #include "xfs_btree.h" 33 #include "xfs_trans.h" 34 #include "xfs_inode_item.h" 35 #include "xfs_extfree_item.h" 36 #include "xfs_alloc.h" 37 #include "xfs_bmap.h" 38 #include "xfs_bmap_util.h" 39 #include "xfs_bmap_btree.h" 40 #include "xfs_rtalloc.h" 41 #include "xfs_error.h" 42 #include "xfs_quota.h" 43 #include "xfs_trans_space.h" 44 #include "xfs_buf_item.h" 45 #include "xfs_trace.h" 46 #include "xfs_symlink.h" 47 #include "xfs_attr_leaf.h" 48 #include "xfs_filestream.h" 49 #include "xfs_rmap.h" 50 #include "xfs_ag_resv.h" 51 #include "xfs_refcount.h" 52 #include "xfs_icache.h" 53 54 55 kmem_zone_t *xfs_bmap_free_item_zone; 56 57 /* 58 * Miscellaneous helper functions 59 */ 60 61 /* 62 * Compute and fill in the value of the maximum depth of a bmap btree 63 * in this filesystem. Done once, during mount. 64 */ 65 void 66 xfs_bmap_compute_maxlevels( 67 xfs_mount_t *mp, /* file system mount structure */ 68 int whichfork) /* data or attr fork */ 69 { 70 int level; /* btree level */ 71 uint maxblocks; /* max blocks at this level */ 72 uint maxleafents; /* max leaf entries possible */ 73 int maxrootrecs; /* max records in root block */ 74 int minleafrecs; /* min records in leaf block */ 75 int minnoderecs; /* min records in node block */ 76 int sz; /* root block size */ 77 78 /* 79 * The maximum number of extents in a file, hence the maximum 80 * number of leaf entries, is controlled by the type of di_nextents 81 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents 82 * (a signed 16-bit number, xfs_aextnum_t). 83 * 84 * Note that we can no longer assume that if we are in ATTR1 that 85 * the fork offset of all the inodes will be 86 * (xfs_default_attroffset(ip) >> 3) because we could have mounted 87 * with ATTR2 and then mounted back with ATTR1, keeping the 88 * di_forkoff's fixed but probably at various positions. Therefore, 89 * for both ATTR1 and ATTR2 we have to assume the worst case scenario 90 * of a minimum size available. 91 */ 92 if (whichfork == XFS_DATA_FORK) { 93 maxleafents = MAXEXTNUM; 94 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS); 95 } else { 96 maxleafents = MAXAEXTNUM; 97 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS); 98 } 99 maxrootrecs = xfs_bmdr_maxrecs(sz, 0); 100 minleafrecs = mp->m_bmap_dmnr[0]; 101 minnoderecs = mp->m_bmap_dmnr[1]; 102 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; 103 for (level = 1; maxblocks > 1; level++) { 104 if (maxblocks <= maxrootrecs) 105 maxblocks = 1; 106 else 107 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs; 108 } 109 mp->m_bm_maxlevels[whichfork] = level; 110 } 111 112 STATIC int /* error */ 113 xfs_bmbt_lookup_eq( 114 struct xfs_btree_cur *cur, 115 xfs_fileoff_t off, 116 xfs_fsblock_t bno, 117 xfs_filblks_t len, 118 int *stat) /* success/failure */ 119 { 120 cur->bc_rec.b.br_startoff = off; 121 cur->bc_rec.b.br_startblock = bno; 122 cur->bc_rec.b.br_blockcount = len; 123 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); 124 } 125 126 STATIC int /* error */ 127 xfs_bmbt_lookup_ge( 128 struct xfs_btree_cur *cur, 129 xfs_fileoff_t off, 130 xfs_fsblock_t bno, 131 xfs_filblks_t len, 132 int *stat) /* success/failure */ 133 { 134 cur->bc_rec.b.br_startoff = off; 135 cur->bc_rec.b.br_startblock = bno; 136 cur->bc_rec.b.br_blockcount = len; 137 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); 138 } 139 140 /* 141 * Check if the inode needs to be converted to btree format. 142 */ 143 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork) 144 { 145 return whichfork != XFS_COW_FORK && 146 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && 147 XFS_IFORK_NEXTENTS(ip, whichfork) > 148 XFS_IFORK_MAXEXT(ip, whichfork); 149 } 150 151 /* 152 * Check if the inode should be converted to extent format. 153 */ 154 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork) 155 { 156 return whichfork != XFS_COW_FORK && 157 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE && 158 XFS_IFORK_NEXTENTS(ip, whichfork) <= 159 XFS_IFORK_MAXEXT(ip, whichfork); 160 } 161 162 /* 163 * Update the record referred to by cur to the value given 164 * by [off, bno, len, state]. 165 * This either works (return 0) or gets an EFSCORRUPTED error. 166 */ 167 STATIC int 168 xfs_bmbt_update( 169 struct xfs_btree_cur *cur, 170 xfs_fileoff_t off, 171 xfs_fsblock_t bno, 172 xfs_filblks_t len, 173 xfs_exntst_t state) 174 { 175 union xfs_btree_rec rec; 176 177 xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state); 178 return xfs_btree_update(cur, &rec); 179 } 180 181 /* 182 * Compute the worst-case number of indirect blocks that will be used 183 * for ip's delayed extent of length "len". 184 */ 185 STATIC xfs_filblks_t 186 xfs_bmap_worst_indlen( 187 xfs_inode_t *ip, /* incore inode pointer */ 188 xfs_filblks_t len) /* delayed extent length */ 189 { 190 int level; /* btree level number */ 191 int maxrecs; /* maximum record count at this level */ 192 xfs_mount_t *mp; /* mount structure */ 193 xfs_filblks_t rval; /* return value */ 194 195 mp = ip->i_mount; 196 maxrecs = mp->m_bmap_dmxr[0]; 197 for (level = 0, rval = 0; 198 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); 199 level++) { 200 len += maxrecs - 1; 201 do_div(len, maxrecs); 202 rval += len; 203 if (len == 1) 204 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - 205 level - 1; 206 if (level == 0) 207 maxrecs = mp->m_bmap_dmxr[1]; 208 } 209 return rval; 210 } 211 212 /* 213 * Calculate the default attribute fork offset for newly created inodes. 214 */ 215 uint 216 xfs_default_attroffset( 217 struct xfs_inode *ip) 218 { 219 struct xfs_mount *mp = ip->i_mount; 220 uint offset; 221 222 if (mp->m_sb.sb_inodesize == 256) { 223 offset = XFS_LITINO(mp, ip->i_d.di_version) - 224 XFS_BMDR_SPACE_CALC(MINABTPTRS); 225 } else { 226 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS); 227 } 228 229 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version)); 230 return offset; 231 } 232 233 /* 234 * Helper routine to reset inode di_forkoff field when switching 235 * attribute fork from local to extent format - we reset it where 236 * possible to make space available for inline data fork extents. 237 */ 238 STATIC void 239 xfs_bmap_forkoff_reset( 240 xfs_inode_t *ip, 241 int whichfork) 242 { 243 if (whichfork == XFS_ATTR_FORK && 244 ip->i_d.di_format != XFS_DINODE_FMT_DEV && 245 ip->i_d.di_format != XFS_DINODE_FMT_UUID && 246 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) { 247 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3; 248 249 if (dfl_forkoff > ip->i_d.di_forkoff) 250 ip->i_d.di_forkoff = dfl_forkoff; 251 } 252 } 253 254 #ifdef DEBUG 255 STATIC struct xfs_buf * 256 xfs_bmap_get_bp( 257 struct xfs_btree_cur *cur, 258 xfs_fsblock_t bno) 259 { 260 struct xfs_log_item_desc *lidp; 261 int i; 262 263 if (!cur) 264 return NULL; 265 266 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) { 267 if (!cur->bc_bufs[i]) 268 break; 269 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno) 270 return cur->bc_bufs[i]; 271 } 272 273 /* Chase down all the log items to see if the bp is there */ 274 list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) { 275 struct xfs_buf_log_item *bip; 276 bip = (struct xfs_buf_log_item *)lidp->lid_item; 277 if (bip->bli_item.li_type == XFS_LI_BUF && 278 XFS_BUF_ADDR(bip->bli_buf) == bno) 279 return bip->bli_buf; 280 } 281 282 return NULL; 283 } 284 285 STATIC void 286 xfs_check_block( 287 struct xfs_btree_block *block, 288 xfs_mount_t *mp, 289 int root, 290 short sz) 291 { 292 int i, j, dmxr; 293 __be64 *pp, *thispa; /* pointer to block address */ 294 xfs_bmbt_key_t *prevp, *keyp; 295 296 ASSERT(be16_to_cpu(block->bb_level) > 0); 297 298 prevp = NULL; 299 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) { 300 dmxr = mp->m_bmap_dmxr[0]; 301 keyp = XFS_BMBT_KEY_ADDR(mp, block, i); 302 303 if (prevp) { 304 ASSERT(be64_to_cpu(prevp->br_startoff) < 305 be64_to_cpu(keyp->br_startoff)); 306 } 307 prevp = keyp; 308 309 /* 310 * Compare the block numbers to see if there are dups. 311 */ 312 if (root) 313 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz); 314 else 315 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr); 316 317 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) { 318 if (root) 319 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz); 320 else 321 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr); 322 if (*thispa == *pp) { 323 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld", 324 __func__, j, i, 325 (unsigned long long)be64_to_cpu(*thispa)); 326 panic("%s: ptrs are equal in node\n", 327 __func__); 328 } 329 } 330 } 331 } 332 333 /* 334 * Check that the extents for the inode ip are in the right order in all 335 * btree leaves. THis becomes prohibitively expensive for large extent count 336 * files, so don't bother with inodes that have more than 10,000 extents in 337 * them. The btree record ordering checks will still be done, so for such large 338 * bmapbt constructs that is going to catch most corruptions. 339 */ 340 STATIC void 341 xfs_bmap_check_leaf_extents( 342 xfs_btree_cur_t *cur, /* btree cursor or null */ 343 xfs_inode_t *ip, /* incore inode pointer */ 344 int whichfork) /* data or attr fork */ 345 { 346 struct xfs_btree_block *block; /* current btree block */ 347 xfs_fsblock_t bno; /* block # of "block" */ 348 xfs_buf_t *bp; /* buffer for "block" */ 349 int error; /* error return value */ 350 xfs_extnum_t i=0, j; /* index into the extents list */ 351 xfs_ifork_t *ifp; /* fork structure */ 352 int level; /* btree level, for checking */ 353 xfs_mount_t *mp; /* file system mount structure */ 354 __be64 *pp; /* pointer to block address */ 355 xfs_bmbt_rec_t *ep; /* pointer to current extent */ 356 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */ 357 xfs_bmbt_rec_t *nextp; /* pointer to next extent */ 358 int bp_release = 0; 359 360 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) { 361 return; 362 } 363 364 /* skip large extent count inodes */ 365 if (ip->i_d.di_nextents > 10000) 366 return; 367 368 bno = NULLFSBLOCK; 369 mp = ip->i_mount; 370 ifp = XFS_IFORK_PTR(ip, whichfork); 371 block = ifp->if_broot; 372 /* 373 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 374 */ 375 level = be16_to_cpu(block->bb_level); 376 ASSERT(level > 0); 377 xfs_check_block(block, mp, 1, ifp->if_broot_bytes); 378 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 379 bno = be64_to_cpu(*pp); 380 381 ASSERT(bno != NULLFSBLOCK); 382 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 383 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 384 385 /* 386 * Go down the tree until leaf level is reached, following the first 387 * pointer (leftmost) at each level. 388 */ 389 while (level-- > 0) { 390 /* See if buf is in cur first */ 391 bp_release = 0; 392 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 393 if (!bp) { 394 bp_release = 1; 395 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, 396 XFS_BMAP_BTREE_REF, 397 &xfs_bmbt_buf_ops); 398 if (error) 399 goto error_norelse; 400 } 401 block = XFS_BUF_TO_BLOCK(bp); 402 if (level == 0) 403 break; 404 405 /* 406 * Check this block for basic sanity (increasing keys and 407 * no duplicate blocks). 408 */ 409 410 xfs_check_block(block, mp, 0, 0); 411 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 412 bno = be64_to_cpu(*pp); 413 XFS_WANT_CORRUPTED_GOTO(mp, 414 XFS_FSB_SANITY_CHECK(mp, bno), error0); 415 if (bp_release) { 416 bp_release = 0; 417 xfs_trans_brelse(NULL, bp); 418 } 419 } 420 421 /* 422 * Here with bp and block set to the leftmost leaf node in the tree. 423 */ 424 i = 0; 425 426 /* 427 * Loop over all leaf nodes checking that all extents are in the right order. 428 */ 429 for (;;) { 430 xfs_fsblock_t nextbno; 431 xfs_extnum_t num_recs; 432 433 434 num_recs = xfs_btree_get_numrecs(block); 435 436 /* 437 * Read-ahead the next leaf block, if any. 438 */ 439 440 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 441 442 /* 443 * Check all the extents to make sure they are OK. 444 * If we had a previous block, the last entry should 445 * conform with the first entry in this one. 446 */ 447 448 ep = XFS_BMBT_REC_ADDR(mp, block, 1); 449 if (i) { 450 ASSERT(xfs_bmbt_disk_get_startoff(&last) + 451 xfs_bmbt_disk_get_blockcount(&last) <= 452 xfs_bmbt_disk_get_startoff(ep)); 453 } 454 for (j = 1; j < num_recs; j++) { 455 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1); 456 ASSERT(xfs_bmbt_disk_get_startoff(ep) + 457 xfs_bmbt_disk_get_blockcount(ep) <= 458 xfs_bmbt_disk_get_startoff(nextp)); 459 ep = nextp; 460 } 461 462 last = *ep; 463 i += num_recs; 464 if (bp_release) { 465 bp_release = 0; 466 xfs_trans_brelse(NULL, bp); 467 } 468 bno = nextbno; 469 /* 470 * If we've reached the end, stop. 471 */ 472 if (bno == NULLFSBLOCK) 473 break; 474 475 bp_release = 0; 476 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 477 if (!bp) { 478 bp_release = 1; 479 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, 480 XFS_BMAP_BTREE_REF, 481 &xfs_bmbt_buf_ops); 482 if (error) 483 goto error_norelse; 484 } 485 block = XFS_BUF_TO_BLOCK(bp); 486 } 487 488 return; 489 490 error0: 491 xfs_warn(mp, "%s: at error0", __func__); 492 if (bp_release) 493 xfs_trans_brelse(NULL, bp); 494 error_norelse: 495 xfs_warn(mp, "%s: BAD after btree leaves for %d extents", 496 __func__, i); 497 panic("%s: CORRUPTED BTREE OR SOMETHING", __func__); 498 return; 499 } 500 501 /* 502 * Add bmap trace insert entries for all the contents of the extent records. 503 */ 504 void 505 xfs_bmap_trace_exlist( 506 xfs_inode_t *ip, /* incore inode pointer */ 507 xfs_extnum_t cnt, /* count of entries in the list */ 508 int whichfork, /* data or attr or cow fork */ 509 unsigned long caller_ip) 510 { 511 xfs_extnum_t idx; /* extent record index */ 512 xfs_ifork_t *ifp; /* inode fork pointer */ 513 int state = 0; 514 515 if (whichfork == XFS_ATTR_FORK) 516 state |= BMAP_ATTRFORK; 517 else if (whichfork == XFS_COW_FORK) 518 state |= BMAP_COWFORK; 519 520 ifp = XFS_IFORK_PTR(ip, whichfork); 521 ASSERT(cnt == xfs_iext_count(ifp)); 522 for (idx = 0; idx < cnt; idx++) 523 trace_xfs_extlist(ip, idx, state, caller_ip); 524 } 525 526 /* 527 * Validate that the bmbt_irecs being returned from bmapi are valid 528 * given the caller's original parameters. Specifically check the 529 * ranges of the returned irecs to ensure that they only extend beyond 530 * the given parameters if the XFS_BMAPI_ENTIRE flag was set. 531 */ 532 STATIC void 533 xfs_bmap_validate_ret( 534 xfs_fileoff_t bno, 535 xfs_filblks_t len, 536 int flags, 537 xfs_bmbt_irec_t *mval, 538 int nmap, 539 int ret_nmap) 540 { 541 int i; /* index to map values */ 542 543 ASSERT(ret_nmap <= nmap); 544 545 for (i = 0; i < ret_nmap; i++) { 546 ASSERT(mval[i].br_blockcount > 0); 547 if (!(flags & XFS_BMAPI_ENTIRE)) { 548 ASSERT(mval[i].br_startoff >= bno); 549 ASSERT(mval[i].br_blockcount <= len); 550 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <= 551 bno + len); 552 } else { 553 ASSERT(mval[i].br_startoff < bno + len); 554 ASSERT(mval[i].br_startoff + mval[i].br_blockcount > 555 bno); 556 } 557 ASSERT(i == 0 || 558 mval[i - 1].br_startoff + mval[i - 1].br_blockcount == 559 mval[i].br_startoff); 560 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK && 561 mval[i].br_startblock != HOLESTARTBLOCK); 562 ASSERT(mval[i].br_state == XFS_EXT_NORM || 563 mval[i].br_state == XFS_EXT_UNWRITTEN); 564 } 565 } 566 567 #else 568 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0) 569 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0) 570 #endif /* DEBUG */ 571 572 /* 573 * bmap free list manipulation functions 574 */ 575 576 /* 577 * Add the extent to the list of extents to be free at transaction end. 578 * The list is maintained sorted (by block number). 579 */ 580 void 581 xfs_bmap_add_free( 582 struct xfs_mount *mp, 583 struct xfs_defer_ops *dfops, 584 xfs_fsblock_t bno, 585 xfs_filblks_t len, 586 struct xfs_owner_info *oinfo) 587 { 588 struct xfs_extent_free_item *new; /* new element */ 589 #ifdef DEBUG 590 xfs_agnumber_t agno; 591 xfs_agblock_t agbno; 592 593 ASSERT(bno != NULLFSBLOCK); 594 ASSERT(len > 0); 595 ASSERT(len <= MAXEXTLEN); 596 ASSERT(!isnullstartblock(bno)); 597 agno = XFS_FSB_TO_AGNO(mp, bno); 598 agbno = XFS_FSB_TO_AGBNO(mp, bno); 599 ASSERT(agno < mp->m_sb.sb_agcount); 600 ASSERT(agbno < mp->m_sb.sb_agblocks); 601 ASSERT(len < mp->m_sb.sb_agblocks); 602 ASSERT(agbno + len <= mp->m_sb.sb_agblocks); 603 #endif 604 ASSERT(xfs_bmap_free_item_zone != NULL); 605 606 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP); 607 new->xefi_startblock = bno; 608 new->xefi_blockcount = (xfs_extlen_t)len; 609 if (oinfo) 610 new->xefi_oinfo = *oinfo; 611 else 612 xfs_rmap_skip_owner_update(&new->xefi_oinfo); 613 trace_xfs_bmap_free_defer(mp, XFS_FSB_TO_AGNO(mp, bno), 0, 614 XFS_FSB_TO_AGBNO(mp, bno), len); 615 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list); 616 } 617 618 /* 619 * Inode fork format manipulation functions 620 */ 621 622 /* 623 * Transform a btree format file with only one leaf node, where the 624 * extents list will fit in the inode, into an extents format file. 625 * Since the file extents are already in-core, all we have to do is 626 * give up the space for the btree root and pitch the leaf block. 627 */ 628 STATIC int /* error */ 629 xfs_bmap_btree_to_extents( 630 xfs_trans_t *tp, /* transaction pointer */ 631 xfs_inode_t *ip, /* incore inode pointer */ 632 xfs_btree_cur_t *cur, /* btree cursor */ 633 int *logflagsp, /* inode logging flags */ 634 int whichfork) /* data or attr fork */ 635 { 636 /* REFERENCED */ 637 struct xfs_btree_block *cblock;/* child btree block */ 638 xfs_fsblock_t cbno; /* child block number */ 639 xfs_buf_t *cbp; /* child block's buffer */ 640 int error; /* error return value */ 641 xfs_ifork_t *ifp; /* inode fork data */ 642 xfs_mount_t *mp; /* mount point structure */ 643 __be64 *pp; /* ptr to block address */ 644 struct xfs_btree_block *rblock;/* root btree block */ 645 struct xfs_owner_info oinfo; 646 647 mp = ip->i_mount; 648 ifp = XFS_IFORK_PTR(ip, whichfork); 649 ASSERT(whichfork != XFS_COW_FORK); 650 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 651 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); 652 rblock = ifp->if_broot; 653 ASSERT(be16_to_cpu(rblock->bb_level) == 1); 654 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1); 655 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1); 656 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes); 657 cbno = be64_to_cpu(*pp); 658 *logflagsp = 0; 659 #ifdef DEBUG 660 if ((error = xfs_btree_check_lptr(cur, cbno, 1))) 661 return error; 662 #endif 663 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF, 664 &xfs_bmbt_buf_ops); 665 if (error) 666 return error; 667 cblock = XFS_BUF_TO_BLOCK(cbp); 668 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp))) 669 return error; 670 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); 671 xfs_bmap_add_free(mp, cur->bc_private.b.dfops, cbno, 1, &oinfo); 672 ip->i_d.di_nblocks--; 673 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 674 xfs_trans_binval(tp, cbp); 675 if (cur->bc_bufs[0] == cbp) 676 cur->bc_bufs[0] = NULL; 677 xfs_iroot_realloc(ip, -1, whichfork); 678 ASSERT(ifp->if_broot == NULL); 679 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0); 680 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 681 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 682 return 0; 683 } 684 685 /* 686 * Convert an extents-format file into a btree-format file. 687 * The new file will have a root block (in the inode) and a single child block. 688 */ 689 STATIC int /* error */ 690 xfs_bmap_extents_to_btree( 691 xfs_trans_t *tp, /* transaction pointer */ 692 xfs_inode_t *ip, /* incore inode pointer */ 693 xfs_fsblock_t *firstblock, /* first-block-allocated */ 694 struct xfs_defer_ops *dfops, /* blocks freed in xaction */ 695 xfs_btree_cur_t **curp, /* cursor returned to caller */ 696 int wasdel, /* converting a delayed alloc */ 697 int *logflagsp, /* inode logging flags */ 698 int whichfork) /* data or attr fork */ 699 { 700 struct xfs_btree_block *ablock; /* allocated (child) bt block */ 701 xfs_buf_t *abp; /* buffer for ablock */ 702 xfs_alloc_arg_t args; /* allocation arguments */ 703 xfs_bmbt_rec_t *arp; /* child record pointer */ 704 struct xfs_btree_block *block; /* btree root block */ 705 xfs_btree_cur_t *cur; /* bmap btree cursor */ 706 xfs_bmbt_rec_host_t *ep; /* extent record pointer */ 707 int error; /* error return value */ 708 xfs_extnum_t i, cnt; /* extent record index */ 709 xfs_ifork_t *ifp; /* inode fork pointer */ 710 xfs_bmbt_key_t *kp; /* root block key pointer */ 711 xfs_mount_t *mp; /* mount structure */ 712 xfs_extnum_t nextents; /* number of file extents */ 713 xfs_bmbt_ptr_t *pp; /* root block address pointer */ 714 715 mp = ip->i_mount; 716 ASSERT(whichfork != XFS_COW_FORK); 717 ifp = XFS_IFORK_PTR(ip, whichfork); 718 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS); 719 720 /* 721 * Make space in the inode incore. 722 */ 723 xfs_iroot_realloc(ip, 1, whichfork); 724 ifp->if_flags |= XFS_IFBROOT; 725 726 /* 727 * Fill in the root. 728 */ 729 block = ifp->if_broot; 730 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL, 731 XFS_BTNUM_BMAP, 1, 1, ip->i_ino, 732 XFS_BTREE_LONG_PTRS); 733 /* 734 * Need a cursor. Can't allocate until bb_level is filled in. 735 */ 736 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 737 cur->bc_private.b.firstblock = *firstblock; 738 cur->bc_private.b.dfops = dfops; 739 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; 740 /* 741 * Convert to a btree with two levels, one record in root. 742 */ 743 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE); 744 memset(&args, 0, sizeof(args)); 745 args.tp = tp; 746 args.mp = mp; 747 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork); 748 args.firstblock = *firstblock; 749 if (*firstblock == NULLFSBLOCK) { 750 args.type = XFS_ALLOCTYPE_START_BNO; 751 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino); 752 } else if (dfops->dop_low) { 753 args.type = XFS_ALLOCTYPE_START_BNO; 754 args.fsbno = *firstblock; 755 } else { 756 args.type = XFS_ALLOCTYPE_NEAR_BNO; 757 args.fsbno = *firstblock; 758 } 759 args.minlen = args.maxlen = args.prod = 1; 760 args.wasdel = wasdel; 761 *logflagsp = 0; 762 if ((error = xfs_alloc_vextent(&args))) { 763 xfs_iroot_realloc(ip, -1, whichfork); 764 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 765 return error; 766 } 767 768 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { 769 xfs_iroot_realloc(ip, -1, whichfork); 770 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 771 return -ENOSPC; 772 } 773 /* 774 * Allocation can't fail, the space was reserved. 775 */ 776 ASSERT(*firstblock == NULLFSBLOCK || 777 args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock)); 778 *firstblock = cur->bc_private.b.firstblock = args.fsbno; 779 cur->bc_private.b.allocated++; 780 ip->i_d.di_nblocks++; 781 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); 782 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0); 783 /* 784 * Fill in the child block. 785 */ 786 abp->b_ops = &xfs_bmbt_buf_ops; 787 ablock = XFS_BUF_TO_BLOCK(abp); 788 xfs_btree_init_block_int(mp, ablock, abp->b_bn, 789 XFS_BTNUM_BMAP, 0, 0, ip->i_ino, 790 XFS_BTREE_LONG_PTRS); 791 792 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); 793 nextents = xfs_iext_count(ifp); 794 for (cnt = i = 0; i < nextents; i++) { 795 ep = xfs_iext_get_ext(ifp, i); 796 if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) { 797 arp->l0 = cpu_to_be64(ep->l0); 798 arp->l1 = cpu_to_be64(ep->l1); 799 arp++; cnt++; 800 } 801 } 802 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork)); 803 xfs_btree_set_numrecs(ablock, cnt); 804 805 /* 806 * Fill in the root key and pointer. 807 */ 808 kp = XFS_BMBT_KEY_ADDR(mp, block, 1); 809 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); 810 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp)); 811 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur, 812 be16_to_cpu(block->bb_level))); 813 *pp = cpu_to_be64(args.fsbno); 814 815 /* 816 * Do all this logging at the end so that 817 * the root is at the right level. 818 */ 819 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS); 820 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); 821 ASSERT(*curp == NULL); 822 *curp = cur; 823 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork); 824 return 0; 825 } 826 827 /* 828 * Convert a local file to an extents file. 829 * This code is out of bounds for data forks of regular files, 830 * since the file data needs to get logged so things will stay consistent. 831 * (The bmap-level manipulations are ok, though). 832 */ 833 void 834 xfs_bmap_local_to_extents_empty( 835 struct xfs_inode *ip, 836 int whichfork) 837 { 838 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 839 840 ASSERT(whichfork != XFS_COW_FORK); 841 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 842 ASSERT(ifp->if_bytes == 0); 843 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0); 844 845 xfs_bmap_forkoff_reset(ip, whichfork); 846 ifp->if_flags &= ~XFS_IFINLINE; 847 ifp->if_flags |= XFS_IFEXTENTS; 848 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 849 } 850 851 852 STATIC int /* error */ 853 xfs_bmap_local_to_extents( 854 xfs_trans_t *tp, /* transaction pointer */ 855 xfs_inode_t *ip, /* incore inode pointer */ 856 xfs_fsblock_t *firstblock, /* first block allocated in xaction */ 857 xfs_extlen_t total, /* total blocks needed by transaction */ 858 int *logflagsp, /* inode logging flags */ 859 int whichfork, 860 void (*init_fn)(struct xfs_trans *tp, 861 struct xfs_buf *bp, 862 struct xfs_inode *ip, 863 struct xfs_ifork *ifp)) 864 { 865 int error = 0; 866 int flags; /* logging flags returned */ 867 xfs_ifork_t *ifp; /* inode fork pointer */ 868 xfs_alloc_arg_t args; /* allocation arguments */ 869 xfs_buf_t *bp; /* buffer for extent block */ 870 struct xfs_bmbt_irec rec; 871 872 /* 873 * We don't want to deal with the case of keeping inode data inline yet. 874 * So sending the data fork of a regular inode is invalid. 875 */ 876 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK)); 877 ifp = XFS_IFORK_PTR(ip, whichfork); 878 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 879 880 if (!ifp->if_bytes) { 881 xfs_bmap_local_to_extents_empty(ip, whichfork); 882 flags = XFS_ILOG_CORE; 883 goto done; 884 } 885 886 flags = 0; 887 error = 0; 888 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == 889 XFS_IFINLINE); 890 memset(&args, 0, sizeof(args)); 891 args.tp = tp; 892 args.mp = ip->i_mount; 893 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0); 894 args.firstblock = *firstblock; 895 /* 896 * Allocate a block. We know we need only one, since the 897 * file currently fits in an inode. 898 */ 899 if (*firstblock == NULLFSBLOCK) { 900 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino); 901 args.type = XFS_ALLOCTYPE_START_BNO; 902 } else { 903 args.fsbno = *firstblock; 904 args.type = XFS_ALLOCTYPE_NEAR_BNO; 905 } 906 args.total = total; 907 args.minlen = args.maxlen = args.prod = 1; 908 error = xfs_alloc_vextent(&args); 909 if (error) 910 goto done; 911 912 /* Can't fail, the space was reserved. */ 913 ASSERT(args.fsbno != NULLFSBLOCK); 914 ASSERT(args.len == 1); 915 *firstblock = args.fsbno; 916 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0); 917 918 /* 919 * Initialize the block, copy the data and log the remote buffer. 920 * 921 * The callout is responsible for logging because the remote format 922 * might differ from the local format and thus we don't know how much to 923 * log here. Note that init_fn must also set the buffer log item type 924 * correctly. 925 */ 926 init_fn(tp, bp, ip, ifp); 927 928 /* account for the change in fork size */ 929 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); 930 xfs_bmap_local_to_extents_empty(ip, whichfork); 931 flags |= XFS_ILOG_CORE; 932 933 rec.br_startoff = 0; 934 rec.br_startblock = args.fsbno; 935 rec.br_blockcount = 1; 936 rec.br_state = XFS_EXT_NORM; 937 xfs_iext_insert(ip, 0, 1, &rec, 0); 938 939 trace_xfs_bmap_post_update(ip, 0, 940 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0, 941 _THIS_IP_); 942 XFS_IFORK_NEXT_SET(ip, whichfork, 1); 943 ip->i_d.di_nblocks = 1; 944 xfs_trans_mod_dquot_byino(tp, ip, 945 XFS_TRANS_DQ_BCOUNT, 1L); 946 flags |= xfs_ilog_fext(whichfork); 947 948 done: 949 *logflagsp = flags; 950 return error; 951 } 952 953 /* 954 * Called from xfs_bmap_add_attrfork to handle btree format files. 955 */ 956 STATIC int /* error */ 957 xfs_bmap_add_attrfork_btree( 958 xfs_trans_t *tp, /* transaction pointer */ 959 xfs_inode_t *ip, /* incore inode pointer */ 960 xfs_fsblock_t *firstblock, /* first block allocated */ 961 struct xfs_defer_ops *dfops, /* blocks to free at commit */ 962 int *flags) /* inode logging flags */ 963 { 964 xfs_btree_cur_t *cur; /* btree cursor */ 965 int error; /* error return value */ 966 xfs_mount_t *mp; /* file system mount struct */ 967 int stat; /* newroot status */ 968 969 mp = ip->i_mount; 970 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip)) 971 *flags |= XFS_ILOG_DBROOT; 972 else { 973 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); 974 cur->bc_private.b.dfops = dfops; 975 cur->bc_private.b.firstblock = *firstblock; 976 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat))) 977 goto error0; 978 /* must be at least one entry */ 979 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0); 980 if ((error = xfs_btree_new_iroot(cur, flags, &stat))) 981 goto error0; 982 if (stat == 0) { 983 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 984 return -ENOSPC; 985 } 986 *firstblock = cur->bc_private.b.firstblock; 987 cur->bc_private.b.allocated = 0; 988 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 989 } 990 return 0; 991 error0: 992 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 993 return error; 994 } 995 996 /* 997 * Called from xfs_bmap_add_attrfork to handle extents format files. 998 */ 999 STATIC int /* error */ 1000 xfs_bmap_add_attrfork_extents( 1001 xfs_trans_t *tp, /* transaction pointer */ 1002 xfs_inode_t *ip, /* incore inode pointer */ 1003 xfs_fsblock_t *firstblock, /* first block allocated */ 1004 struct xfs_defer_ops *dfops, /* blocks to free at commit */ 1005 int *flags) /* inode logging flags */ 1006 { 1007 xfs_btree_cur_t *cur; /* bmap btree cursor */ 1008 int error; /* error return value */ 1009 1010 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip)) 1011 return 0; 1012 cur = NULL; 1013 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops, &cur, 0, 1014 flags, XFS_DATA_FORK); 1015 if (cur) { 1016 cur->bc_private.b.allocated = 0; 1017 xfs_btree_del_cursor(cur, 1018 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 1019 } 1020 return error; 1021 } 1022 1023 /* 1024 * Called from xfs_bmap_add_attrfork to handle local format files. Each 1025 * different data fork content type needs a different callout to do the 1026 * conversion. Some are basic and only require special block initialisation 1027 * callouts for the data formating, others (directories) are so specialised they 1028 * handle everything themselves. 1029 * 1030 * XXX (dgc): investigate whether directory conversion can use the generic 1031 * formatting callout. It should be possible - it's just a very complex 1032 * formatter. 1033 */ 1034 STATIC int /* error */ 1035 xfs_bmap_add_attrfork_local( 1036 xfs_trans_t *tp, /* transaction pointer */ 1037 xfs_inode_t *ip, /* incore inode pointer */ 1038 xfs_fsblock_t *firstblock, /* first block allocated */ 1039 struct xfs_defer_ops *dfops, /* blocks to free at commit */ 1040 int *flags) /* inode logging flags */ 1041 { 1042 xfs_da_args_t dargs; /* args for dir/attr code */ 1043 1044 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip)) 1045 return 0; 1046 1047 if (S_ISDIR(VFS_I(ip)->i_mode)) { 1048 memset(&dargs, 0, sizeof(dargs)); 1049 dargs.geo = ip->i_mount->m_dir_geo; 1050 dargs.dp = ip; 1051 dargs.firstblock = firstblock; 1052 dargs.dfops = dfops; 1053 dargs.total = dargs.geo->fsbcount; 1054 dargs.whichfork = XFS_DATA_FORK; 1055 dargs.trans = tp; 1056 return xfs_dir2_sf_to_block(&dargs); 1057 } 1058 1059 if (S_ISLNK(VFS_I(ip)->i_mode)) 1060 return xfs_bmap_local_to_extents(tp, ip, firstblock, 1, 1061 flags, XFS_DATA_FORK, 1062 xfs_symlink_local_to_remote); 1063 1064 /* should only be called for types that support local format data */ 1065 ASSERT(0); 1066 return -EFSCORRUPTED; 1067 } 1068 1069 /* 1070 * Convert inode from non-attributed to attributed. 1071 * Must not be in a transaction, ip must not be locked. 1072 */ 1073 int /* error code */ 1074 xfs_bmap_add_attrfork( 1075 xfs_inode_t *ip, /* incore inode pointer */ 1076 int size, /* space new attribute needs */ 1077 int rsvd) /* xact may use reserved blks */ 1078 { 1079 xfs_fsblock_t firstblock; /* 1st block/ag allocated */ 1080 struct xfs_defer_ops dfops; /* freed extent records */ 1081 xfs_mount_t *mp; /* mount structure */ 1082 xfs_trans_t *tp; /* transaction pointer */ 1083 int blks; /* space reservation */ 1084 int version = 1; /* superblock attr version */ 1085 int logflags; /* logging flags */ 1086 int error; /* error return value */ 1087 1088 ASSERT(XFS_IFORK_Q(ip) == 0); 1089 1090 mp = ip->i_mount; 1091 ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); 1092 1093 blks = XFS_ADDAFORK_SPACE_RES(mp); 1094 1095 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0, 1096 rsvd ? XFS_TRANS_RESERVE : 0, &tp); 1097 if (error) 1098 return error; 1099 1100 xfs_ilock(ip, XFS_ILOCK_EXCL); 1101 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ? 1102 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : 1103 XFS_QMOPT_RES_REGBLKS); 1104 if (error) 1105 goto trans_cancel; 1106 if (XFS_IFORK_Q(ip)) 1107 goto trans_cancel; 1108 if (ip->i_d.di_anextents != 0) { 1109 error = -EFSCORRUPTED; 1110 goto trans_cancel; 1111 } 1112 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) { 1113 /* 1114 * For inodes coming from pre-6.2 filesystems. 1115 */ 1116 ASSERT(ip->i_d.di_aformat == 0); 1117 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 1118 } 1119 1120 xfs_trans_ijoin(tp, ip, 0); 1121 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1122 1123 switch (ip->i_d.di_format) { 1124 case XFS_DINODE_FMT_DEV: 1125 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; 1126 break; 1127 case XFS_DINODE_FMT_UUID: 1128 ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3; 1129 break; 1130 case XFS_DINODE_FMT_LOCAL: 1131 case XFS_DINODE_FMT_EXTENTS: 1132 case XFS_DINODE_FMT_BTREE: 1133 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size); 1134 if (!ip->i_d.di_forkoff) 1135 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3; 1136 else if (mp->m_flags & XFS_MOUNT_ATTR2) 1137 version = 2; 1138 break; 1139 default: 1140 ASSERT(0); 1141 error = -EINVAL; 1142 goto trans_cancel; 1143 } 1144 1145 ASSERT(ip->i_afp == NULL); 1146 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); 1147 ip->i_afp->if_flags = XFS_IFEXTENTS; 1148 logflags = 0; 1149 xfs_defer_init(&dfops, &firstblock); 1150 switch (ip->i_d.di_format) { 1151 case XFS_DINODE_FMT_LOCAL: 1152 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &dfops, 1153 &logflags); 1154 break; 1155 case XFS_DINODE_FMT_EXTENTS: 1156 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock, 1157 &dfops, &logflags); 1158 break; 1159 case XFS_DINODE_FMT_BTREE: 1160 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &dfops, 1161 &logflags); 1162 break; 1163 default: 1164 error = 0; 1165 break; 1166 } 1167 if (logflags) 1168 xfs_trans_log_inode(tp, ip, logflags); 1169 if (error) 1170 goto bmap_cancel; 1171 if (!xfs_sb_version_hasattr(&mp->m_sb) || 1172 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) { 1173 bool log_sb = false; 1174 1175 spin_lock(&mp->m_sb_lock); 1176 if (!xfs_sb_version_hasattr(&mp->m_sb)) { 1177 xfs_sb_version_addattr(&mp->m_sb); 1178 log_sb = true; 1179 } 1180 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) { 1181 xfs_sb_version_addattr2(&mp->m_sb); 1182 log_sb = true; 1183 } 1184 spin_unlock(&mp->m_sb_lock); 1185 if (log_sb) 1186 xfs_log_sb(tp); 1187 } 1188 1189 error = xfs_defer_finish(&tp, &dfops); 1190 if (error) 1191 goto bmap_cancel; 1192 error = xfs_trans_commit(tp); 1193 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1194 return error; 1195 1196 bmap_cancel: 1197 xfs_defer_cancel(&dfops); 1198 trans_cancel: 1199 xfs_trans_cancel(tp); 1200 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1201 return error; 1202 } 1203 1204 /* 1205 * Internal and external extent tree search functions. 1206 */ 1207 1208 /* 1209 * Read in the extents to if_extents. 1210 * All inode fields are set up by caller, we just traverse the btree 1211 * and copy the records in. If the file system cannot contain unwritten 1212 * extents, the records are checked for no "state" flags. 1213 */ 1214 int /* error */ 1215 xfs_bmap_read_extents( 1216 xfs_trans_t *tp, /* transaction pointer */ 1217 xfs_inode_t *ip, /* incore inode */ 1218 int whichfork) /* data or attr fork */ 1219 { 1220 struct xfs_btree_block *block; /* current btree block */ 1221 xfs_fsblock_t bno; /* block # of "block" */ 1222 xfs_buf_t *bp; /* buffer for "block" */ 1223 int error; /* error return value */ 1224 xfs_extnum_t i, j; /* index into the extents list */ 1225 xfs_ifork_t *ifp; /* fork structure */ 1226 int level; /* btree level, for checking */ 1227 xfs_mount_t *mp; /* file system mount structure */ 1228 __be64 *pp; /* pointer to block address */ 1229 /* REFERENCED */ 1230 xfs_extnum_t room; /* number of entries there's room for */ 1231 1232 mp = ip->i_mount; 1233 ifp = XFS_IFORK_PTR(ip, whichfork); 1234 block = ifp->if_broot; 1235 /* 1236 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 1237 */ 1238 level = be16_to_cpu(block->bb_level); 1239 ASSERT(level > 0); 1240 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 1241 bno = be64_to_cpu(*pp); 1242 1243 /* 1244 * Go down the tree until leaf level is reached, following the first 1245 * pointer (leftmost) at each level. 1246 */ 1247 while (level-- > 0) { 1248 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 1249 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); 1250 if (error) 1251 return error; 1252 block = XFS_BUF_TO_BLOCK(bp); 1253 if (level == 0) 1254 break; 1255 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 1256 bno = be64_to_cpu(*pp); 1257 XFS_WANT_CORRUPTED_GOTO(mp, 1258 XFS_FSB_SANITY_CHECK(mp, bno), error0); 1259 xfs_trans_brelse(tp, bp); 1260 } 1261 /* 1262 * Here with bp and block set to the leftmost leaf node in the tree. 1263 */ 1264 room = xfs_iext_count(ifp); 1265 i = 0; 1266 /* 1267 * Loop over all leaf nodes. Copy information to the extent records. 1268 */ 1269 for (;;) { 1270 xfs_bmbt_rec_t *frp; 1271 xfs_fsblock_t nextbno; 1272 xfs_extnum_t num_recs; 1273 1274 num_recs = xfs_btree_get_numrecs(block); 1275 if (unlikely(i + num_recs > room)) { 1276 ASSERT(i + num_recs <= room); 1277 xfs_warn(ip->i_mount, 1278 "corrupt dinode %Lu, (btree extents).", 1279 (unsigned long long) ip->i_ino); 1280 XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)", 1281 XFS_ERRLEVEL_LOW, ip->i_mount, block); 1282 goto error0; 1283 } 1284 /* 1285 * Read-ahead the next leaf block, if any. 1286 */ 1287 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 1288 if (nextbno != NULLFSBLOCK) 1289 xfs_btree_reada_bufl(mp, nextbno, 1, 1290 &xfs_bmbt_buf_ops); 1291 /* 1292 * Copy records into the extent records. 1293 */ 1294 frp = XFS_BMBT_REC_ADDR(mp, block, 1); 1295 for (j = 0; j < num_recs; j++, i++, frp++) { 1296 xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i); 1297 trp->l0 = be64_to_cpu(frp->l0); 1298 trp->l1 = be64_to_cpu(frp->l1); 1299 if (!xfs_bmbt_validate_extent(mp, whichfork, trp)) { 1300 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)", 1301 XFS_ERRLEVEL_LOW, mp); 1302 goto error0; 1303 } 1304 } 1305 xfs_trans_brelse(tp, bp); 1306 bno = nextbno; 1307 /* 1308 * If we've reached the end, stop. 1309 */ 1310 if (bno == NULLFSBLOCK) 1311 break; 1312 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 1313 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); 1314 if (error) 1315 return error; 1316 block = XFS_BUF_TO_BLOCK(bp); 1317 } 1318 if (i != XFS_IFORK_NEXTENTS(ip, whichfork)) 1319 return -EFSCORRUPTED; 1320 ASSERT(i == xfs_iext_count(ifp)); 1321 XFS_BMAP_TRACE_EXLIST(ip, i, whichfork); 1322 return 0; 1323 error0: 1324 xfs_trans_brelse(tp, bp); 1325 return -EFSCORRUPTED; 1326 } 1327 1328 /* 1329 * Returns the file-relative block number of the first unused block(s) 1330 * in the file with at least "len" logically contiguous blocks free. 1331 * This is the lowest-address hole if the file has holes, else the first block 1332 * past the end of file. 1333 * Return 0 if the file is currently local (in-inode). 1334 */ 1335 int /* error */ 1336 xfs_bmap_first_unused( 1337 xfs_trans_t *tp, /* transaction pointer */ 1338 xfs_inode_t *ip, /* incore inode */ 1339 xfs_extlen_t len, /* size of hole to find */ 1340 xfs_fileoff_t *first_unused, /* unused block */ 1341 int whichfork) /* data or attr fork */ 1342 { 1343 int error; /* error return value */ 1344 int idx; /* extent record index */ 1345 xfs_ifork_t *ifp; /* inode fork pointer */ 1346 xfs_fileoff_t lastaddr; /* last block number seen */ 1347 xfs_fileoff_t lowest; /* lowest useful block */ 1348 xfs_fileoff_t max; /* starting useful block */ 1349 xfs_extnum_t nextents; /* number of extent entries */ 1350 1351 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE || 1352 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS || 1353 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 1354 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { 1355 *first_unused = 0; 1356 return 0; 1357 } 1358 ifp = XFS_IFORK_PTR(ip, whichfork); 1359 if (!(ifp->if_flags & XFS_IFEXTENTS) && 1360 (error = xfs_iread_extents(tp, ip, whichfork))) 1361 return error; 1362 lowest = *first_unused; 1363 nextents = xfs_iext_count(ifp); 1364 for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) { 1365 struct xfs_bmbt_irec got; 1366 1367 xfs_iext_get_extent(ifp, idx, &got); 1368 1369 /* 1370 * See if the hole before this extent will work. 1371 */ 1372 if (got.br_startoff >= lowest + len && 1373 got.br_startoff - max >= len) { 1374 *first_unused = max; 1375 return 0; 1376 } 1377 lastaddr = got.br_startoff + got.br_blockcount; 1378 max = XFS_FILEOFF_MAX(lastaddr, lowest); 1379 } 1380 *first_unused = max; 1381 return 0; 1382 } 1383 1384 /* 1385 * Returns the file-relative block number of the last block - 1 before 1386 * last_block (input value) in the file. 1387 * This is not based on i_size, it is based on the extent records. 1388 * Returns 0 for local files, as they do not have extent records. 1389 */ 1390 int /* error */ 1391 xfs_bmap_last_before( 1392 struct xfs_trans *tp, /* transaction pointer */ 1393 struct xfs_inode *ip, /* incore inode */ 1394 xfs_fileoff_t *last_block, /* last block */ 1395 int whichfork) /* data or attr fork */ 1396 { 1397 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1398 struct xfs_bmbt_irec got; 1399 xfs_extnum_t idx; 1400 int error; 1401 1402 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 1403 case XFS_DINODE_FMT_LOCAL: 1404 *last_block = 0; 1405 return 0; 1406 case XFS_DINODE_FMT_BTREE: 1407 case XFS_DINODE_FMT_EXTENTS: 1408 break; 1409 default: 1410 return -EIO; 1411 } 1412 1413 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1414 error = xfs_iread_extents(tp, ip, whichfork); 1415 if (error) 1416 return error; 1417 } 1418 1419 if (xfs_iext_lookup_extent(ip, ifp, *last_block - 1, &idx, &got)) { 1420 if (got.br_startoff <= *last_block - 1) 1421 return 0; 1422 } 1423 1424 if (xfs_iext_get_extent(ifp, idx - 1, &got)) { 1425 *last_block = got.br_startoff + got.br_blockcount; 1426 return 0; 1427 } 1428 1429 *last_block = 0; 1430 return 0; 1431 } 1432 1433 int 1434 xfs_bmap_last_extent( 1435 struct xfs_trans *tp, 1436 struct xfs_inode *ip, 1437 int whichfork, 1438 struct xfs_bmbt_irec *rec, 1439 int *is_empty) 1440 { 1441 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1442 int error; 1443 int nextents; 1444 1445 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1446 error = xfs_iread_extents(tp, ip, whichfork); 1447 if (error) 1448 return error; 1449 } 1450 1451 nextents = xfs_iext_count(ifp); 1452 if (nextents == 0) { 1453 *is_empty = 1; 1454 return 0; 1455 } 1456 1457 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, nextents - 1), rec); 1458 *is_empty = 0; 1459 return 0; 1460 } 1461 1462 /* 1463 * Check the last inode extent to determine whether this allocation will result 1464 * in blocks being allocated at the end of the file. When we allocate new data 1465 * blocks at the end of the file which do not start at the previous data block, 1466 * we will try to align the new blocks at stripe unit boundaries. 1467 * 1468 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be 1469 * at, or past the EOF. 1470 */ 1471 STATIC int 1472 xfs_bmap_isaeof( 1473 struct xfs_bmalloca *bma, 1474 int whichfork) 1475 { 1476 struct xfs_bmbt_irec rec; 1477 int is_empty; 1478 int error; 1479 1480 bma->aeof = 0; 1481 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, 1482 &is_empty); 1483 if (error) 1484 return error; 1485 1486 if (is_empty) { 1487 bma->aeof = 1; 1488 return 0; 1489 } 1490 1491 /* 1492 * Check if we are allocation or past the last extent, or at least into 1493 * the last delayed allocated extent. 1494 */ 1495 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount || 1496 (bma->offset >= rec.br_startoff && 1497 isnullstartblock(rec.br_startblock)); 1498 return 0; 1499 } 1500 1501 /* 1502 * Returns the file-relative block number of the first block past eof in 1503 * the file. This is not based on i_size, it is based on the extent records. 1504 * Returns 0 for local files, as they do not have extent records. 1505 */ 1506 int 1507 xfs_bmap_last_offset( 1508 struct xfs_inode *ip, 1509 xfs_fileoff_t *last_block, 1510 int whichfork) 1511 { 1512 struct xfs_bmbt_irec rec; 1513 int is_empty; 1514 int error; 1515 1516 *last_block = 0; 1517 1518 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) 1519 return 0; 1520 1521 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && 1522 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 1523 return -EIO; 1524 1525 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty); 1526 if (error || is_empty) 1527 return error; 1528 1529 *last_block = rec.br_startoff + rec.br_blockcount; 1530 return 0; 1531 } 1532 1533 /* 1534 * Returns whether the selected fork of the inode has exactly one 1535 * block or not. For the data fork we check this matches di_size, 1536 * implying the file's range is 0..bsize-1. 1537 */ 1538 int /* 1=>1 block, 0=>otherwise */ 1539 xfs_bmap_one_block( 1540 xfs_inode_t *ip, /* incore inode */ 1541 int whichfork) /* data or attr fork */ 1542 { 1543 xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */ 1544 xfs_ifork_t *ifp; /* inode fork pointer */ 1545 int rval; /* return value */ 1546 xfs_bmbt_irec_t s; /* internal version of extent */ 1547 1548 #ifndef DEBUG 1549 if (whichfork == XFS_DATA_FORK) 1550 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize; 1551 #endif /* !DEBUG */ 1552 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1) 1553 return 0; 1554 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 1555 return 0; 1556 ifp = XFS_IFORK_PTR(ip, whichfork); 1557 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 1558 ep = xfs_iext_get_ext(ifp, 0); 1559 xfs_bmbt_get_all(ep, &s); 1560 rval = s.br_startoff == 0 && s.br_blockcount == 1; 1561 if (rval && whichfork == XFS_DATA_FORK) 1562 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize); 1563 return rval; 1564 } 1565 1566 /* 1567 * Extent tree manipulation functions used during allocation. 1568 */ 1569 1570 /* 1571 * Convert a delayed allocation to a real allocation. 1572 */ 1573 STATIC int /* error */ 1574 xfs_bmap_add_extent_delay_real( 1575 struct xfs_bmalloca *bma, 1576 int whichfork) 1577 { 1578 struct xfs_bmbt_irec *new = &bma->got; 1579 int diff; /* temp value */ 1580 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */ 1581 int error; /* error return value */ 1582 int i; /* temp state */ 1583 xfs_ifork_t *ifp; /* inode fork pointer */ 1584 xfs_fileoff_t new_endoff; /* end offset of new entry */ 1585 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 1586 /* left is 0, right is 1, prev is 2 */ 1587 int rval=0; /* return value (logging flags) */ 1588 int state = 0;/* state bits, accessed thru macros */ 1589 xfs_filblks_t da_new; /* new count del alloc blocks used */ 1590 xfs_filblks_t da_old; /* old count del alloc blocks used */ 1591 xfs_filblks_t temp=0; /* value for da_new calculations */ 1592 xfs_filblks_t temp2=0;/* value for da_new calculations */ 1593 int tmp_rval; /* partial logging flags */ 1594 struct xfs_mount *mp; 1595 xfs_extnum_t *nextents; 1596 1597 mp = bma->ip->i_mount; 1598 ifp = XFS_IFORK_PTR(bma->ip, whichfork); 1599 ASSERT(whichfork != XFS_ATTR_FORK); 1600 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents : 1601 &bma->ip->i_d.di_nextents); 1602 1603 ASSERT(bma->idx >= 0); 1604 ASSERT(bma->idx <= xfs_iext_count(ifp)); 1605 ASSERT(!isnullstartblock(new->br_startblock)); 1606 ASSERT(!bma->cur || 1607 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); 1608 1609 XFS_STATS_INC(mp, xs_add_exlist); 1610 1611 #define LEFT r[0] 1612 #define RIGHT r[1] 1613 #define PREV r[2] 1614 1615 if (whichfork == XFS_COW_FORK) 1616 state |= BMAP_COWFORK; 1617 1618 /* 1619 * Set up a bunch of variables to make the tests simpler. 1620 */ 1621 ep = xfs_iext_get_ext(ifp, bma->idx); 1622 xfs_bmbt_get_all(ep, &PREV); 1623 new_endoff = new->br_startoff + new->br_blockcount; 1624 ASSERT(PREV.br_startoff <= new->br_startoff); 1625 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 1626 1627 da_old = startblockval(PREV.br_startblock); 1628 da_new = 0; 1629 1630 /* 1631 * Set flags determining what part of the previous delayed allocation 1632 * extent is being replaced by a real allocation. 1633 */ 1634 if (PREV.br_startoff == new->br_startoff) 1635 state |= BMAP_LEFT_FILLING; 1636 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 1637 state |= BMAP_RIGHT_FILLING; 1638 1639 /* 1640 * Check and set flags if this segment has a left neighbor. 1641 * Don't set contiguous if the combined extent would be too large. 1642 */ 1643 if (bma->idx > 0) { 1644 state |= BMAP_LEFT_VALID; 1645 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT); 1646 1647 if (isnullstartblock(LEFT.br_startblock)) 1648 state |= BMAP_LEFT_DELAY; 1649 } 1650 1651 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 1652 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 1653 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 1654 LEFT.br_state == new->br_state && 1655 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) 1656 state |= BMAP_LEFT_CONTIG; 1657 1658 /* 1659 * Check and set flags if this segment has a right neighbor. 1660 * Don't set contiguous if the combined extent would be too large. 1661 * Also check for all-three-contiguous being too large. 1662 */ 1663 if (bma->idx < xfs_iext_count(ifp) - 1) { 1664 state |= BMAP_RIGHT_VALID; 1665 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT); 1666 1667 if (isnullstartblock(RIGHT.br_startblock)) 1668 state |= BMAP_RIGHT_DELAY; 1669 } 1670 1671 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 1672 new_endoff == RIGHT.br_startoff && 1673 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 1674 new->br_state == RIGHT.br_state && 1675 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 1676 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1677 BMAP_RIGHT_FILLING)) != 1678 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1679 BMAP_RIGHT_FILLING) || 1680 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 1681 <= MAXEXTLEN)) 1682 state |= BMAP_RIGHT_CONTIG; 1683 1684 error = 0; 1685 /* 1686 * Switch out based on the FILLING and CONTIG state bits. 1687 */ 1688 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1689 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 1690 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1691 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1692 /* 1693 * Filling in all of a previously delayed allocation extent. 1694 * The left and right neighbors are both contiguous with new. 1695 */ 1696 bma->idx--; 1697 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1698 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx), 1699 LEFT.br_blockcount + PREV.br_blockcount + 1700 RIGHT.br_blockcount); 1701 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 1702 1703 xfs_iext_remove(bma->ip, bma->idx + 1, 2, state); 1704 (*nextents)--; 1705 if (bma->cur == NULL) 1706 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1707 else { 1708 rval = XFS_ILOG_CORE; 1709 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff, 1710 RIGHT.br_startblock, 1711 RIGHT.br_blockcount, &i); 1712 if (error) 1713 goto done; 1714 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1715 error = xfs_btree_delete(bma->cur, &i); 1716 if (error) 1717 goto done; 1718 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1719 error = xfs_btree_decrement(bma->cur, 0, &i); 1720 if (error) 1721 goto done; 1722 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1723 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, 1724 LEFT.br_startblock, 1725 LEFT.br_blockcount + 1726 PREV.br_blockcount + 1727 RIGHT.br_blockcount, LEFT.br_state); 1728 if (error) 1729 goto done; 1730 } 1731 break; 1732 1733 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1734 /* 1735 * Filling in all of a previously delayed allocation extent. 1736 * The left neighbor is contiguous, the right is not. 1737 */ 1738 bma->idx--; 1739 1740 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1741 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx), 1742 LEFT.br_blockcount + PREV.br_blockcount); 1743 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 1744 1745 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state); 1746 if (bma->cur == NULL) 1747 rval = XFS_ILOG_DEXT; 1748 else { 1749 rval = 0; 1750 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff, 1751 LEFT.br_startblock, LEFT.br_blockcount, 1752 &i); 1753 if (error) 1754 goto done; 1755 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1756 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, 1757 LEFT.br_startblock, 1758 LEFT.br_blockcount + 1759 PREV.br_blockcount, LEFT.br_state); 1760 if (error) 1761 goto done; 1762 } 1763 break; 1764 1765 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1766 /* 1767 * Filling in all of a previously delayed allocation extent. 1768 * The right neighbor is contiguous, the left is not. 1769 */ 1770 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1771 xfs_bmbt_set_startblock(ep, new->br_startblock); 1772 xfs_bmbt_set_blockcount(ep, 1773 PREV.br_blockcount + RIGHT.br_blockcount); 1774 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 1775 1776 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state); 1777 if (bma->cur == NULL) 1778 rval = XFS_ILOG_DEXT; 1779 else { 1780 rval = 0; 1781 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff, 1782 RIGHT.br_startblock, 1783 RIGHT.br_blockcount, &i); 1784 if (error) 1785 goto done; 1786 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1787 error = xfs_bmbt_update(bma->cur, PREV.br_startoff, 1788 new->br_startblock, 1789 PREV.br_blockcount + 1790 RIGHT.br_blockcount, PREV.br_state); 1791 if (error) 1792 goto done; 1793 } 1794 break; 1795 1796 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 1797 /* 1798 * Filling in all of a previously delayed allocation extent. 1799 * Neither the left nor right neighbors are contiguous with 1800 * the new one. 1801 */ 1802 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1803 xfs_bmbt_set_startblock(ep, new->br_startblock); 1804 xfs_bmbt_set_state(ep, new->br_state); 1805 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 1806 1807 (*nextents)++; 1808 if (bma->cur == NULL) 1809 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1810 else { 1811 rval = XFS_ILOG_CORE; 1812 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, 1813 new->br_startblock, new->br_blockcount, 1814 &i); 1815 if (error) 1816 goto done; 1817 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1818 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; 1819 error = xfs_btree_insert(bma->cur, &i); 1820 if (error) 1821 goto done; 1822 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1823 } 1824 break; 1825 1826 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 1827 /* 1828 * Filling in the first part of a previous delayed allocation. 1829 * The left neighbor is contiguous. 1830 */ 1831 trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_); 1832 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1), 1833 LEFT.br_blockcount + new->br_blockcount); 1834 xfs_bmbt_set_startoff(ep, 1835 PREV.br_startoff + new->br_blockcount); 1836 trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_); 1837 1838 temp = PREV.br_blockcount - new->br_blockcount; 1839 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1840 xfs_bmbt_set_blockcount(ep, temp); 1841 if (bma->cur == NULL) 1842 rval = XFS_ILOG_DEXT; 1843 else { 1844 rval = 0; 1845 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff, 1846 LEFT.br_startblock, LEFT.br_blockcount, 1847 &i); 1848 if (error) 1849 goto done; 1850 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1851 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, 1852 LEFT.br_startblock, 1853 LEFT.br_blockcount + 1854 new->br_blockcount, 1855 LEFT.br_state); 1856 if (error) 1857 goto done; 1858 } 1859 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1860 startblockval(PREV.br_startblock)); 1861 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); 1862 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 1863 1864 bma->idx--; 1865 break; 1866 1867 case BMAP_LEFT_FILLING: 1868 /* 1869 * Filling in the first part of a previous delayed allocation. 1870 * The left neighbor is not contiguous. 1871 */ 1872 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1873 xfs_bmbt_set_startoff(ep, new_endoff); 1874 temp = PREV.br_blockcount - new->br_blockcount; 1875 xfs_bmbt_set_blockcount(ep, temp); 1876 xfs_iext_insert(bma->ip, bma->idx, 1, new, state); 1877 (*nextents)++; 1878 if (bma->cur == NULL) 1879 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1880 else { 1881 rval = XFS_ILOG_CORE; 1882 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, 1883 new->br_startblock, new->br_blockcount, 1884 &i); 1885 if (error) 1886 goto done; 1887 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1888 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; 1889 error = xfs_btree_insert(bma->cur, &i); 1890 if (error) 1891 goto done; 1892 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1893 } 1894 1895 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1896 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1897 bma->firstblock, bma->dfops, 1898 &bma->cur, 1, &tmp_rval, whichfork); 1899 rval |= tmp_rval; 1900 if (error) 1901 goto done; 1902 } 1903 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1904 startblockval(PREV.br_startblock) - 1905 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 1906 ep = xfs_iext_get_ext(ifp, bma->idx + 1); 1907 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); 1908 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_); 1909 break; 1910 1911 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1912 /* 1913 * Filling in the last part of a previous delayed allocation. 1914 * The right neighbor is contiguous with the new allocation. 1915 */ 1916 temp = PREV.br_blockcount - new->br_blockcount; 1917 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_); 1918 xfs_bmbt_set_blockcount(ep, temp); 1919 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1), 1920 new->br_startoff, new->br_startblock, 1921 new->br_blockcount + RIGHT.br_blockcount, 1922 RIGHT.br_state); 1923 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_); 1924 if (bma->cur == NULL) 1925 rval = XFS_ILOG_DEXT; 1926 else { 1927 rval = 0; 1928 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff, 1929 RIGHT.br_startblock, 1930 RIGHT.br_blockcount, &i); 1931 if (error) 1932 goto done; 1933 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1934 error = xfs_bmbt_update(bma->cur, new->br_startoff, 1935 new->br_startblock, 1936 new->br_blockcount + 1937 RIGHT.br_blockcount, 1938 RIGHT.br_state); 1939 if (error) 1940 goto done; 1941 } 1942 1943 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1944 startblockval(PREV.br_startblock)); 1945 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1946 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); 1947 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 1948 1949 bma->idx++; 1950 break; 1951 1952 case BMAP_RIGHT_FILLING: 1953 /* 1954 * Filling in the last part of a previous delayed allocation. 1955 * The right neighbor is not contiguous. 1956 */ 1957 temp = PREV.br_blockcount - new->br_blockcount; 1958 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1959 xfs_bmbt_set_blockcount(ep, temp); 1960 xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state); 1961 (*nextents)++; 1962 if (bma->cur == NULL) 1963 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1964 else { 1965 rval = XFS_ILOG_CORE; 1966 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, 1967 new->br_startblock, new->br_blockcount, 1968 &i); 1969 if (error) 1970 goto done; 1971 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1972 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; 1973 error = xfs_btree_insert(bma->cur, &i); 1974 if (error) 1975 goto done; 1976 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1977 } 1978 1979 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1980 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1981 bma->firstblock, bma->dfops, &bma->cur, 1, 1982 &tmp_rval, whichfork); 1983 rval |= tmp_rval; 1984 if (error) 1985 goto done; 1986 } 1987 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1988 startblockval(PREV.br_startblock) - 1989 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 1990 ep = xfs_iext_get_ext(ifp, bma->idx); 1991 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); 1992 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 1993 1994 bma->idx++; 1995 break; 1996 1997 case 0: 1998 /* 1999 * Filling in the middle part of a previous delayed allocation. 2000 * Contiguity is impossible here. 2001 * This case is avoided almost all the time. 2002 * 2003 * We start with a delayed allocation: 2004 * 2005 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+ 2006 * PREV @ idx 2007 * 2008 * and we are allocating: 2009 * +rrrrrrrrrrrrrrrrr+ 2010 * new 2011 * 2012 * and we set it up for insertion as: 2013 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+ 2014 * new 2015 * PREV @ idx LEFT RIGHT 2016 * inserted at idx + 1 2017 */ 2018 temp = new->br_startoff - PREV.br_startoff; 2019 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff; 2020 trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_); 2021 xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */ 2022 LEFT = *new; 2023 RIGHT.br_state = PREV.br_state; 2024 RIGHT.br_startblock = nullstartblock( 2025 (int)xfs_bmap_worst_indlen(bma->ip, temp2)); 2026 RIGHT.br_startoff = new_endoff; 2027 RIGHT.br_blockcount = temp2; 2028 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */ 2029 xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state); 2030 (*nextents)++; 2031 if (bma->cur == NULL) 2032 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2033 else { 2034 rval = XFS_ILOG_CORE; 2035 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, 2036 new->br_startblock, new->br_blockcount, 2037 &i); 2038 if (error) 2039 goto done; 2040 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2041 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; 2042 error = xfs_btree_insert(bma->cur, &i); 2043 if (error) 2044 goto done; 2045 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2046 } 2047 2048 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 2049 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 2050 bma->firstblock, bma->dfops, &bma->cur, 2051 1, &tmp_rval, whichfork); 2052 rval |= tmp_rval; 2053 if (error) 2054 goto done; 2055 } 2056 temp = xfs_bmap_worst_indlen(bma->ip, temp); 2057 temp2 = xfs_bmap_worst_indlen(bma->ip, temp2); 2058 diff = (int)(temp + temp2 - 2059 (startblockval(PREV.br_startblock) - 2060 (bma->cur ? 2061 bma->cur->bc_private.b.allocated : 0))); 2062 if (diff > 0) { 2063 error = xfs_mod_fdblocks(bma->ip->i_mount, 2064 -((int64_t)diff), false); 2065 ASSERT(!error); 2066 if (error) 2067 goto done; 2068 } 2069 2070 ep = xfs_iext_get_ext(ifp, bma->idx); 2071 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 2072 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 2073 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_); 2074 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2), 2075 nullstartblock((int)temp2)); 2076 trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_); 2077 2078 bma->idx++; 2079 da_new = temp + temp2; 2080 break; 2081 2082 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2083 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2084 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 2085 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2086 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2087 case BMAP_LEFT_CONTIG: 2088 case BMAP_RIGHT_CONTIG: 2089 /* 2090 * These cases are all impossible. 2091 */ 2092 ASSERT(0); 2093 } 2094 2095 /* add reverse mapping */ 2096 error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new); 2097 if (error) 2098 goto done; 2099 2100 /* convert to a btree if necessary */ 2101 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 2102 int tmp_logflags; /* partial log flag return val */ 2103 2104 ASSERT(bma->cur == NULL); 2105 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 2106 bma->firstblock, bma->dfops, &bma->cur, 2107 da_old > 0, &tmp_logflags, whichfork); 2108 bma->logflags |= tmp_logflags; 2109 if (error) 2110 goto done; 2111 } 2112 2113 /* adjust for changes in reserved delayed indirect blocks */ 2114 if (da_old || da_new) { 2115 temp = da_new; 2116 if (bma->cur) 2117 temp += bma->cur->bc_private.b.allocated; 2118 if (temp < da_old) 2119 xfs_mod_fdblocks(bma->ip->i_mount, 2120 (int64_t)(da_old - temp), false); 2121 } 2122 2123 /* clear out the allocated field, done with it now in any case. */ 2124 if (bma->cur) 2125 bma->cur->bc_private.b.allocated = 0; 2126 2127 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); 2128 done: 2129 if (whichfork != XFS_COW_FORK) 2130 bma->logflags |= rval; 2131 return error; 2132 #undef LEFT 2133 #undef RIGHT 2134 #undef PREV 2135 } 2136 2137 /* 2138 * Convert an unwritten allocation to a real allocation or vice versa. 2139 */ 2140 STATIC int /* error */ 2141 xfs_bmap_add_extent_unwritten_real( 2142 struct xfs_trans *tp, 2143 xfs_inode_t *ip, /* incore inode pointer */ 2144 int whichfork, 2145 xfs_extnum_t *idx, /* extent number to update/insert */ 2146 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 2147 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 2148 xfs_fsblock_t *first, /* pointer to firstblock variable */ 2149 struct xfs_defer_ops *dfops, /* list of extents to be freed */ 2150 int *logflagsp) /* inode logging flags */ 2151 { 2152 xfs_btree_cur_t *cur; /* btree cursor */ 2153 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */ 2154 int error; /* error return value */ 2155 int i; /* temp state */ 2156 xfs_ifork_t *ifp; /* inode fork pointer */ 2157 xfs_fileoff_t new_endoff; /* end offset of new entry */ 2158 xfs_exntst_t newext; /* new extent state */ 2159 xfs_exntst_t oldext; /* old extent state */ 2160 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 2161 /* left is 0, right is 1, prev is 2 */ 2162 int rval=0; /* return value (logging flags) */ 2163 int state = 0;/* state bits, accessed thru macros */ 2164 struct xfs_mount *mp = ip->i_mount; 2165 2166 *logflagsp = 0; 2167 2168 cur = *curp; 2169 ifp = XFS_IFORK_PTR(ip, whichfork); 2170 if (whichfork == XFS_COW_FORK) 2171 state |= BMAP_COWFORK; 2172 2173 ASSERT(*idx >= 0); 2174 ASSERT(*idx <= xfs_iext_count(ifp)); 2175 ASSERT(!isnullstartblock(new->br_startblock)); 2176 2177 XFS_STATS_INC(mp, xs_add_exlist); 2178 2179 #define LEFT r[0] 2180 #define RIGHT r[1] 2181 #define PREV r[2] 2182 2183 /* 2184 * Set up a bunch of variables to make the tests simpler. 2185 */ 2186 error = 0; 2187 ep = xfs_iext_get_ext(ifp, *idx); 2188 xfs_bmbt_get_all(ep, &PREV); 2189 newext = new->br_state; 2190 oldext = (newext == XFS_EXT_UNWRITTEN) ? 2191 XFS_EXT_NORM : XFS_EXT_UNWRITTEN; 2192 ASSERT(PREV.br_state == oldext); 2193 new_endoff = new->br_startoff + new->br_blockcount; 2194 ASSERT(PREV.br_startoff <= new->br_startoff); 2195 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 2196 2197 /* 2198 * Set flags determining what part of the previous oldext allocation 2199 * extent is being replaced by a newext allocation. 2200 */ 2201 if (PREV.br_startoff == new->br_startoff) 2202 state |= BMAP_LEFT_FILLING; 2203 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 2204 state |= BMAP_RIGHT_FILLING; 2205 2206 /* 2207 * Check and set flags if this segment has a left neighbor. 2208 * Don't set contiguous if the combined extent would be too large. 2209 */ 2210 if (*idx > 0) { 2211 state |= BMAP_LEFT_VALID; 2212 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT); 2213 2214 if (isnullstartblock(LEFT.br_startblock)) 2215 state |= BMAP_LEFT_DELAY; 2216 } 2217 2218 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2219 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 2220 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 2221 LEFT.br_state == newext && 2222 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2223 state |= BMAP_LEFT_CONTIG; 2224 2225 /* 2226 * Check and set flags if this segment has a right neighbor. 2227 * Don't set contiguous if the combined extent would be too large. 2228 * Also check for all-three-contiguous being too large. 2229 */ 2230 if (*idx < xfs_iext_count(ifp) - 1) { 2231 state |= BMAP_RIGHT_VALID; 2232 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT); 2233 if (isnullstartblock(RIGHT.br_startblock)) 2234 state |= BMAP_RIGHT_DELAY; 2235 } 2236 2237 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2238 new_endoff == RIGHT.br_startoff && 2239 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 2240 newext == RIGHT.br_state && 2241 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 2242 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2243 BMAP_RIGHT_FILLING)) != 2244 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2245 BMAP_RIGHT_FILLING) || 2246 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 2247 <= MAXEXTLEN)) 2248 state |= BMAP_RIGHT_CONTIG; 2249 2250 /* 2251 * Switch out based on the FILLING and CONTIG state bits. 2252 */ 2253 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2254 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 2255 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2256 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2257 /* 2258 * Setting all of a previous oldext extent to newext. 2259 * The left and right neighbors are both contiguous with new. 2260 */ 2261 --*idx; 2262 2263 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2264 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), 2265 LEFT.br_blockcount + PREV.br_blockcount + 2266 RIGHT.br_blockcount); 2267 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2268 2269 xfs_iext_remove(ip, *idx + 1, 2, state); 2270 XFS_IFORK_NEXT_SET(ip, whichfork, 2271 XFS_IFORK_NEXTENTS(ip, whichfork) - 2); 2272 if (cur == NULL) 2273 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2274 else { 2275 rval = XFS_ILOG_CORE; 2276 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, 2277 RIGHT.br_startblock, 2278 RIGHT.br_blockcount, &i))) 2279 goto done; 2280 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2281 if ((error = xfs_btree_delete(cur, &i))) 2282 goto done; 2283 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2284 if ((error = xfs_btree_decrement(cur, 0, &i))) 2285 goto done; 2286 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2287 if ((error = xfs_btree_delete(cur, &i))) 2288 goto done; 2289 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2290 if ((error = xfs_btree_decrement(cur, 0, &i))) 2291 goto done; 2292 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2293 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, 2294 LEFT.br_startblock, 2295 LEFT.br_blockcount + PREV.br_blockcount + 2296 RIGHT.br_blockcount, LEFT.br_state))) 2297 goto done; 2298 } 2299 break; 2300 2301 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2302 /* 2303 * Setting all of a previous oldext extent to newext. 2304 * The left neighbor is contiguous, the right is not. 2305 */ 2306 --*idx; 2307 2308 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2309 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), 2310 LEFT.br_blockcount + PREV.br_blockcount); 2311 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2312 2313 xfs_iext_remove(ip, *idx + 1, 1, state); 2314 XFS_IFORK_NEXT_SET(ip, whichfork, 2315 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2316 if (cur == NULL) 2317 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2318 else { 2319 rval = XFS_ILOG_CORE; 2320 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 2321 PREV.br_startblock, PREV.br_blockcount, 2322 &i))) 2323 goto done; 2324 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2325 if ((error = xfs_btree_delete(cur, &i))) 2326 goto done; 2327 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2328 if ((error = xfs_btree_decrement(cur, 0, &i))) 2329 goto done; 2330 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2331 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, 2332 LEFT.br_startblock, 2333 LEFT.br_blockcount + PREV.br_blockcount, 2334 LEFT.br_state))) 2335 goto done; 2336 } 2337 break; 2338 2339 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2340 /* 2341 * Setting all of a previous oldext extent to newext. 2342 * The right neighbor is contiguous, the left is not. 2343 */ 2344 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2345 xfs_bmbt_set_blockcount(ep, 2346 PREV.br_blockcount + RIGHT.br_blockcount); 2347 xfs_bmbt_set_state(ep, newext); 2348 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2349 xfs_iext_remove(ip, *idx + 1, 1, state); 2350 XFS_IFORK_NEXT_SET(ip, whichfork, 2351 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2352 if (cur == NULL) 2353 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2354 else { 2355 rval = XFS_ILOG_CORE; 2356 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, 2357 RIGHT.br_startblock, 2358 RIGHT.br_blockcount, &i))) 2359 goto done; 2360 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2361 if ((error = xfs_btree_delete(cur, &i))) 2362 goto done; 2363 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2364 if ((error = xfs_btree_decrement(cur, 0, &i))) 2365 goto done; 2366 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2367 if ((error = xfs_bmbt_update(cur, new->br_startoff, 2368 new->br_startblock, 2369 new->br_blockcount + RIGHT.br_blockcount, 2370 newext))) 2371 goto done; 2372 } 2373 break; 2374 2375 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 2376 /* 2377 * Setting all of a previous oldext extent to newext. 2378 * Neither the left nor right neighbors are contiguous with 2379 * the new one. 2380 */ 2381 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2382 xfs_bmbt_set_state(ep, newext); 2383 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2384 2385 if (cur == NULL) 2386 rval = XFS_ILOG_DEXT; 2387 else { 2388 rval = 0; 2389 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, 2390 new->br_startblock, new->br_blockcount, 2391 &i))) 2392 goto done; 2393 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2394 if ((error = xfs_bmbt_update(cur, new->br_startoff, 2395 new->br_startblock, new->br_blockcount, 2396 newext))) 2397 goto done; 2398 } 2399 break; 2400 2401 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 2402 /* 2403 * Setting the first part of a previous oldext extent to newext. 2404 * The left neighbor is contiguous. 2405 */ 2406 trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_); 2407 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1), 2408 LEFT.br_blockcount + new->br_blockcount); 2409 xfs_bmbt_set_startoff(ep, 2410 PREV.br_startoff + new->br_blockcount); 2411 trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_); 2412 2413 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2414 xfs_bmbt_set_startblock(ep, 2415 new->br_startblock + new->br_blockcount); 2416 xfs_bmbt_set_blockcount(ep, 2417 PREV.br_blockcount - new->br_blockcount); 2418 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2419 2420 --*idx; 2421 2422 if (cur == NULL) 2423 rval = XFS_ILOG_DEXT; 2424 else { 2425 rval = 0; 2426 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 2427 PREV.br_startblock, PREV.br_blockcount, 2428 &i))) 2429 goto done; 2430 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2431 if ((error = xfs_bmbt_update(cur, 2432 PREV.br_startoff + new->br_blockcount, 2433 PREV.br_startblock + new->br_blockcount, 2434 PREV.br_blockcount - new->br_blockcount, 2435 oldext))) 2436 goto done; 2437 if ((error = xfs_btree_decrement(cur, 0, &i))) 2438 goto done; 2439 error = xfs_bmbt_update(cur, LEFT.br_startoff, 2440 LEFT.br_startblock, 2441 LEFT.br_blockcount + new->br_blockcount, 2442 LEFT.br_state); 2443 if (error) 2444 goto done; 2445 } 2446 break; 2447 2448 case BMAP_LEFT_FILLING: 2449 /* 2450 * Setting the first part of a previous oldext extent to newext. 2451 * The left neighbor is not contiguous. 2452 */ 2453 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2454 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext); 2455 xfs_bmbt_set_startoff(ep, new_endoff); 2456 xfs_bmbt_set_blockcount(ep, 2457 PREV.br_blockcount - new->br_blockcount); 2458 xfs_bmbt_set_startblock(ep, 2459 new->br_startblock + new->br_blockcount); 2460 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2461 2462 xfs_iext_insert(ip, *idx, 1, new, state); 2463 XFS_IFORK_NEXT_SET(ip, whichfork, 2464 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2465 if (cur == NULL) 2466 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2467 else { 2468 rval = XFS_ILOG_CORE; 2469 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 2470 PREV.br_startblock, PREV.br_blockcount, 2471 &i))) 2472 goto done; 2473 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2474 if ((error = xfs_bmbt_update(cur, 2475 PREV.br_startoff + new->br_blockcount, 2476 PREV.br_startblock + new->br_blockcount, 2477 PREV.br_blockcount - new->br_blockcount, 2478 oldext))) 2479 goto done; 2480 cur->bc_rec.b = *new; 2481 if ((error = xfs_btree_insert(cur, &i))) 2482 goto done; 2483 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2484 } 2485 break; 2486 2487 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2488 /* 2489 * Setting the last part of a previous oldext extent to newext. 2490 * The right neighbor is contiguous with the new allocation. 2491 */ 2492 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2493 xfs_bmbt_set_blockcount(ep, 2494 PREV.br_blockcount - new->br_blockcount); 2495 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2496 2497 ++*idx; 2498 2499 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2500 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx), 2501 new->br_startoff, new->br_startblock, 2502 new->br_blockcount + RIGHT.br_blockcount, newext); 2503 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2504 2505 if (cur == NULL) 2506 rval = XFS_ILOG_DEXT; 2507 else { 2508 rval = 0; 2509 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 2510 PREV.br_startblock, 2511 PREV.br_blockcount, &i))) 2512 goto done; 2513 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2514 if ((error = xfs_bmbt_update(cur, PREV.br_startoff, 2515 PREV.br_startblock, 2516 PREV.br_blockcount - new->br_blockcount, 2517 oldext))) 2518 goto done; 2519 if ((error = xfs_btree_increment(cur, 0, &i))) 2520 goto done; 2521 if ((error = xfs_bmbt_update(cur, new->br_startoff, 2522 new->br_startblock, 2523 new->br_blockcount + RIGHT.br_blockcount, 2524 newext))) 2525 goto done; 2526 } 2527 break; 2528 2529 case BMAP_RIGHT_FILLING: 2530 /* 2531 * Setting the last part of a previous oldext extent to newext. 2532 * The right neighbor is not contiguous. 2533 */ 2534 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2535 xfs_bmbt_set_blockcount(ep, 2536 PREV.br_blockcount - new->br_blockcount); 2537 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2538 2539 ++*idx; 2540 xfs_iext_insert(ip, *idx, 1, new, state); 2541 2542 XFS_IFORK_NEXT_SET(ip, whichfork, 2543 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2544 if (cur == NULL) 2545 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2546 else { 2547 rval = XFS_ILOG_CORE; 2548 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 2549 PREV.br_startblock, PREV.br_blockcount, 2550 &i))) 2551 goto done; 2552 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2553 if ((error = xfs_bmbt_update(cur, PREV.br_startoff, 2554 PREV.br_startblock, 2555 PREV.br_blockcount - new->br_blockcount, 2556 oldext))) 2557 goto done; 2558 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, 2559 new->br_startblock, new->br_blockcount, 2560 &i))) 2561 goto done; 2562 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2563 cur->bc_rec.b.br_state = XFS_EXT_NORM; 2564 if ((error = xfs_btree_insert(cur, &i))) 2565 goto done; 2566 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2567 } 2568 break; 2569 2570 case 0: 2571 /* 2572 * Setting the middle part of a previous oldext extent to 2573 * newext. Contiguity is impossible here. 2574 * One extent becomes three extents. 2575 */ 2576 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2577 xfs_bmbt_set_blockcount(ep, 2578 new->br_startoff - PREV.br_startoff); 2579 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2580 2581 r[0] = *new; 2582 r[1].br_startoff = new_endoff; 2583 r[1].br_blockcount = 2584 PREV.br_startoff + PREV.br_blockcount - new_endoff; 2585 r[1].br_startblock = new->br_startblock + new->br_blockcount; 2586 r[1].br_state = oldext; 2587 2588 ++*idx; 2589 xfs_iext_insert(ip, *idx, 2, &r[0], state); 2590 2591 XFS_IFORK_NEXT_SET(ip, whichfork, 2592 XFS_IFORK_NEXTENTS(ip, whichfork) + 2); 2593 if (cur == NULL) 2594 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2595 else { 2596 rval = XFS_ILOG_CORE; 2597 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 2598 PREV.br_startblock, PREV.br_blockcount, 2599 &i))) 2600 goto done; 2601 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2602 /* new right extent - oldext */ 2603 if ((error = xfs_bmbt_update(cur, r[1].br_startoff, 2604 r[1].br_startblock, r[1].br_blockcount, 2605 r[1].br_state))) 2606 goto done; 2607 /* new left extent - oldext */ 2608 cur->bc_rec.b = PREV; 2609 cur->bc_rec.b.br_blockcount = 2610 new->br_startoff - PREV.br_startoff; 2611 if ((error = xfs_btree_insert(cur, &i))) 2612 goto done; 2613 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2614 /* 2615 * Reset the cursor to the position of the new extent 2616 * we are about to insert as we can't trust it after 2617 * the previous insert. 2618 */ 2619 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, 2620 new->br_startblock, new->br_blockcount, 2621 &i))) 2622 goto done; 2623 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2624 /* new middle extent - newext */ 2625 cur->bc_rec.b.br_state = new->br_state; 2626 if ((error = xfs_btree_insert(cur, &i))) 2627 goto done; 2628 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2629 } 2630 break; 2631 2632 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2633 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2634 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 2635 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2636 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2637 case BMAP_LEFT_CONTIG: 2638 case BMAP_RIGHT_CONTIG: 2639 /* 2640 * These cases are all impossible. 2641 */ 2642 ASSERT(0); 2643 } 2644 2645 /* update reverse mappings */ 2646 error = xfs_rmap_convert_extent(mp, dfops, ip, whichfork, new); 2647 if (error) 2648 goto done; 2649 2650 /* convert to a btree if necessary */ 2651 if (xfs_bmap_needs_btree(ip, whichfork)) { 2652 int tmp_logflags; /* partial log flag return val */ 2653 2654 ASSERT(cur == NULL); 2655 error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, &cur, 2656 0, &tmp_logflags, whichfork); 2657 *logflagsp |= tmp_logflags; 2658 if (error) 2659 goto done; 2660 } 2661 2662 /* clear out the allocated field, done with it now in any case. */ 2663 if (cur) { 2664 cur->bc_private.b.allocated = 0; 2665 *curp = cur; 2666 } 2667 2668 xfs_bmap_check_leaf_extents(*curp, ip, whichfork); 2669 done: 2670 *logflagsp |= rval; 2671 return error; 2672 #undef LEFT 2673 #undef RIGHT 2674 #undef PREV 2675 } 2676 2677 /* 2678 * Convert a hole to a delayed allocation. 2679 */ 2680 STATIC void 2681 xfs_bmap_add_extent_hole_delay( 2682 xfs_inode_t *ip, /* incore inode pointer */ 2683 int whichfork, 2684 xfs_extnum_t *idx, /* extent number to update/insert */ 2685 xfs_bmbt_irec_t *new) /* new data to add to file extents */ 2686 { 2687 xfs_ifork_t *ifp; /* inode fork pointer */ 2688 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2689 xfs_filblks_t newlen=0; /* new indirect size */ 2690 xfs_filblks_t oldlen=0; /* old indirect size */ 2691 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2692 int state; /* state bits, accessed thru macros */ 2693 xfs_filblks_t temp=0; /* temp for indirect calculations */ 2694 2695 ifp = XFS_IFORK_PTR(ip, whichfork); 2696 state = 0; 2697 if (whichfork == XFS_COW_FORK) 2698 state |= BMAP_COWFORK; 2699 ASSERT(isnullstartblock(new->br_startblock)); 2700 2701 /* 2702 * Check and set flags if this segment has a left neighbor 2703 */ 2704 if (*idx > 0) { 2705 state |= BMAP_LEFT_VALID; 2706 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left); 2707 2708 if (isnullstartblock(left.br_startblock)) 2709 state |= BMAP_LEFT_DELAY; 2710 } 2711 2712 /* 2713 * Check and set flags if the current (right) segment exists. 2714 * If it doesn't exist, we're converting the hole at end-of-file. 2715 */ 2716 if (*idx < xfs_iext_count(ifp)) { 2717 state |= BMAP_RIGHT_VALID; 2718 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right); 2719 2720 if (isnullstartblock(right.br_startblock)) 2721 state |= BMAP_RIGHT_DELAY; 2722 } 2723 2724 /* 2725 * Set contiguity flags on the left and right neighbors. 2726 * Don't let extents get too large, even if the pieces are contiguous. 2727 */ 2728 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) && 2729 left.br_startoff + left.br_blockcount == new->br_startoff && 2730 left.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2731 state |= BMAP_LEFT_CONTIG; 2732 2733 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) && 2734 new->br_startoff + new->br_blockcount == right.br_startoff && 2735 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 2736 (!(state & BMAP_LEFT_CONTIG) || 2737 (left.br_blockcount + new->br_blockcount + 2738 right.br_blockcount <= MAXEXTLEN))) 2739 state |= BMAP_RIGHT_CONTIG; 2740 2741 /* 2742 * Switch out based on the contiguity flags. 2743 */ 2744 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2745 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2746 /* 2747 * New allocation is contiguous with delayed allocations 2748 * on the left and on the right. 2749 * Merge all three into a single extent record. 2750 */ 2751 --*idx; 2752 temp = left.br_blockcount + new->br_blockcount + 2753 right.br_blockcount; 2754 2755 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2756 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp); 2757 oldlen = startblockval(left.br_startblock) + 2758 startblockval(new->br_startblock) + 2759 startblockval(right.br_startblock); 2760 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2761 oldlen); 2762 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx), 2763 nullstartblock((int)newlen)); 2764 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2765 2766 xfs_iext_remove(ip, *idx + 1, 1, state); 2767 break; 2768 2769 case BMAP_LEFT_CONTIG: 2770 /* 2771 * New allocation is contiguous with a delayed allocation 2772 * on the left. 2773 * Merge the new allocation with the left neighbor. 2774 */ 2775 --*idx; 2776 temp = left.br_blockcount + new->br_blockcount; 2777 2778 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2779 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp); 2780 oldlen = startblockval(left.br_startblock) + 2781 startblockval(new->br_startblock); 2782 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2783 oldlen); 2784 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx), 2785 nullstartblock((int)newlen)); 2786 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2787 break; 2788 2789 case BMAP_RIGHT_CONTIG: 2790 /* 2791 * New allocation is contiguous with a delayed allocation 2792 * on the right. 2793 * Merge the new allocation with the right neighbor. 2794 */ 2795 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2796 temp = new->br_blockcount + right.br_blockcount; 2797 oldlen = startblockval(new->br_startblock) + 2798 startblockval(right.br_startblock); 2799 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2800 oldlen); 2801 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx), 2802 new->br_startoff, 2803 nullstartblock((int)newlen), temp, right.br_state); 2804 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2805 break; 2806 2807 case 0: 2808 /* 2809 * New allocation is not contiguous with another 2810 * delayed allocation. 2811 * Insert a new entry. 2812 */ 2813 oldlen = newlen = 0; 2814 xfs_iext_insert(ip, *idx, 1, new, state); 2815 break; 2816 } 2817 if (oldlen != newlen) { 2818 ASSERT(oldlen > newlen); 2819 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen), 2820 false); 2821 /* 2822 * Nothing to do for disk quota accounting here. 2823 */ 2824 } 2825 } 2826 2827 /* 2828 * Convert a hole to a real allocation. 2829 */ 2830 STATIC int /* error */ 2831 xfs_bmap_add_extent_hole_real( 2832 struct xfs_trans *tp, 2833 struct xfs_inode *ip, 2834 int whichfork, 2835 xfs_extnum_t *idx, 2836 struct xfs_btree_cur **curp, 2837 struct xfs_bmbt_irec *new, 2838 xfs_fsblock_t *first, 2839 struct xfs_defer_ops *dfops, 2840 int *logflagsp) 2841 { 2842 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 2843 struct xfs_mount *mp = ip->i_mount; 2844 struct xfs_btree_cur *cur = *curp; 2845 int error; /* error return value */ 2846 int i; /* temp state */ 2847 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2848 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2849 int rval=0; /* return value (logging flags) */ 2850 int state; /* state bits, accessed thru macros */ 2851 2852 ASSERT(*idx >= 0); 2853 ASSERT(*idx <= xfs_iext_count(ifp)); 2854 ASSERT(!isnullstartblock(new->br_startblock)); 2855 ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); 2856 2857 XFS_STATS_INC(mp, xs_add_exlist); 2858 2859 state = 0; 2860 if (whichfork == XFS_ATTR_FORK) 2861 state |= BMAP_ATTRFORK; 2862 if (whichfork == XFS_COW_FORK) 2863 state |= BMAP_COWFORK; 2864 2865 /* 2866 * Check and set flags if this segment has a left neighbor. 2867 */ 2868 if (*idx > 0) { 2869 state |= BMAP_LEFT_VALID; 2870 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left); 2871 if (isnullstartblock(left.br_startblock)) 2872 state |= BMAP_LEFT_DELAY; 2873 } 2874 2875 /* 2876 * Check and set flags if this segment has a current value. 2877 * Not true if we're inserting into the "hole" at eof. 2878 */ 2879 if (*idx < xfs_iext_count(ifp)) { 2880 state |= BMAP_RIGHT_VALID; 2881 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right); 2882 if (isnullstartblock(right.br_startblock)) 2883 state |= BMAP_RIGHT_DELAY; 2884 } 2885 2886 /* 2887 * We're inserting a real allocation between "left" and "right". 2888 * Set the contiguity flags. Don't let extents get too large. 2889 */ 2890 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2891 left.br_startoff + left.br_blockcount == new->br_startoff && 2892 left.br_startblock + left.br_blockcount == new->br_startblock && 2893 left.br_state == new->br_state && 2894 left.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2895 state |= BMAP_LEFT_CONTIG; 2896 2897 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2898 new->br_startoff + new->br_blockcount == right.br_startoff && 2899 new->br_startblock + new->br_blockcount == right.br_startblock && 2900 new->br_state == right.br_state && 2901 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 2902 (!(state & BMAP_LEFT_CONTIG) || 2903 left.br_blockcount + new->br_blockcount + 2904 right.br_blockcount <= MAXEXTLEN)) 2905 state |= BMAP_RIGHT_CONTIG; 2906 2907 error = 0; 2908 /* 2909 * Select which case we're in here, and implement it. 2910 */ 2911 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2912 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2913 /* 2914 * New allocation is contiguous with real allocations on the 2915 * left and on the right. 2916 * Merge all three into a single extent record. 2917 */ 2918 --*idx; 2919 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2920 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), 2921 left.br_blockcount + new->br_blockcount + 2922 right.br_blockcount); 2923 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2924 2925 xfs_iext_remove(ip, *idx + 1, 1, state); 2926 2927 XFS_IFORK_NEXT_SET(ip, whichfork, 2928 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2929 if (cur == NULL) { 2930 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 2931 } else { 2932 rval = XFS_ILOG_CORE; 2933 error = xfs_bmbt_lookup_eq(cur, right.br_startoff, 2934 right.br_startblock, right.br_blockcount, 2935 &i); 2936 if (error) 2937 goto done; 2938 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2939 error = xfs_btree_delete(cur, &i); 2940 if (error) 2941 goto done; 2942 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2943 error = xfs_btree_decrement(cur, 0, &i); 2944 if (error) 2945 goto done; 2946 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2947 error = xfs_bmbt_update(cur, left.br_startoff, 2948 left.br_startblock, 2949 left.br_blockcount + 2950 new->br_blockcount + 2951 right.br_blockcount, 2952 left.br_state); 2953 if (error) 2954 goto done; 2955 } 2956 break; 2957 2958 case BMAP_LEFT_CONTIG: 2959 /* 2960 * New allocation is contiguous with a real allocation 2961 * on the left. 2962 * Merge the new allocation with the left neighbor. 2963 */ 2964 --*idx; 2965 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2966 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), 2967 left.br_blockcount + new->br_blockcount); 2968 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2969 2970 if (cur == NULL) { 2971 rval = xfs_ilog_fext(whichfork); 2972 } else { 2973 rval = 0; 2974 error = xfs_bmbt_lookup_eq(cur, left.br_startoff, 2975 left.br_startblock, left.br_blockcount, 2976 &i); 2977 if (error) 2978 goto done; 2979 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2980 error = xfs_bmbt_update(cur, left.br_startoff, 2981 left.br_startblock, 2982 left.br_blockcount + 2983 new->br_blockcount, 2984 left.br_state); 2985 if (error) 2986 goto done; 2987 } 2988 break; 2989 2990 case BMAP_RIGHT_CONTIG: 2991 /* 2992 * New allocation is contiguous with a real allocation 2993 * on the right. 2994 * Merge the new allocation with the right neighbor. 2995 */ 2996 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2997 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx), 2998 new->br_startoff, new->br_startblock, 2999 new->br_blockcount + right.br_blockcount, 3000 right.br_state); 3001 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 3002 3003 if (cur == NULL) { 3004 rval = xfs_ilog_fext(whichfork); 3005 } else { 3006 rval = 0; 3007 error = xfs_bmbt_lookup_eq(cur, 3008 right.br_startoff, 3009 right.br_startblock, 3010 right.br_blockcount, &i); 3011 if (error) 3012 goto done; 3013 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 3014 error = xfs_bmbt_update(cur, new->br_startoff, 3015 new->br_startblock, 3016 new->br_blockcount + 3017 right.br_blockcount, 3018 right.br_state); 3019 if (error) 3020 goto done; 3021 } 3022 break; 3023 3024 case 0: 3025 /* 3026 * New allocation is not contiguous with another 3027 * real allocation. 3028 * Insert a new entry. 3029 */ 3030 xfs_iext_insert(ip, *idx, 1, new, state); 3031 XFS_IFORK_NEXT_SET(ip, whichfork, 3032 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 3033 if (cur == NULL) { 3034 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 3035 } else { 3036 rval = XFS_ILOG_CORE; 3037 error = xfs_bmbt_lookup_eq(cur, 3038 new->br_startoff, 3039 new->br_startblock, 3040 new->br_blockcount, &i); 3041 if (error) 3042 goto done; 3043 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 3044 cur->bc_rec.b.br_state = new->br_state; 3045 error = xfs_btree_insert(cur, &i); 3046 if (error) 3047 goto done; 3048 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 3049 } 3050 break; 3051 } 3052 3053 /* add reverse mapping */ 3054 error = xfs_rmap_map_extent(mp, dfops, ip, whichfork, new); 3055 if (error) 3056 goto done; 3057 3058 /* convert to a btree if necessary */ 3059 if (xfs_bmap_needs_btree(ip, whichfork)) { 3060 int tmp_logflags; /* partial log flag return val */ 3061 3062 ASSERT(cur == NULL); 3063 error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, curp, 3064 0, &tmp_logflags, whichfork); 3065 *logflagsp |= tmp_logflags; 3066 cur = *curp; 3067 if (error) 3068 goto done; 3069 } 3070 3071 /* clear out the allocated field, done with it now in any case. */ 3072 if (cur) 3073 cur->bc_private.b.allocated = 0; 3074 3075 xfs_bmap_check_leaf_extents(cur, ip, whichfork); 3076 done: 3077 *logflagsp |= rval; 3078 return error; 3079 } 3080 3081 /* 3082 * Functions used in the extent read, allocate and remove paths 3083 */ 3084 3085 /* 3086 * Adjust the size of the new extent based on di_extsize and rt extsize. 3087 */ 3088 int 3089 xfs_bmap_extsize_align( 3090 xfs_mount_t *mp, 3091 xfs_bmbt_irec_t *gotp, /* next extent pointer */ 3092 xfs_bmbt_irec_t *prevp, /* previous extent pointer */ 3093 xfs_extlen_t extsz, /* align to this extent size */ 3094 int rt, /* is this a realtime inode? */ 3095 int eof, /* is extent at end-of-file? */ 3096 int delay, /* creating delalloc extent? */ 3097 int convert, /* overwriting unwritten extent? */ 3098 xfs_fileoff_t *offp, /* in/out: aligned offset */ 3099 xfs_extlen_t *lenp) /* in/out: aligned length */ 3100 { 3101 xfs_fileoff_t orig_off; /* original offset */ 3102 xfs_extlen_t orig_alen; /* original length */ 3103 xfs_fileoff_t orig_end; /* original off+len */ 3104 xfs_fileoff_t nexto; /* next file offset */ 3105 xfs_fileoff_t prevo; /* previous file offset */ 3106 xfs_fileoff_t align_off; /* temp for offset */ 3107 xfs_extlen_t align_alen; /* temp for length */ 3108 xfs_extlen_t temp; /* temp for calculations */ 3109 3110 if (convert) 3111 return 0; 3112 3113 orig_off = align_off = *offp; 3114 orig_alen = align_alen = *lenp; 3115 orig_end = orig_off + orig_alen; 3116 3117 /* 3118 * If this request overlaps an existing extent, then don't 3119 * attempt to perform any additional alignment. 3120 */ 3121 if (!delay && !eof && 3122 (orig_off >= gotp->br_startoff) && 3123 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) { 3124 return 0; 3125 } 3126 3127 /* 3128 * If the file offset is unaligned vs. the extent size 3129 * we need to align it. This will be possible unless 3130 * the file was previously written with a kernel that didn't 3131 * perform this alignment, or if a truncate shot us in the 3132 * foot. 3133 */ 3134 temp = do_mod(orig_off, extsz); 3135 if (temp) { 3136 align_alen += temp; 3137 align_off -= temp; 3138 } 3139 3140 /* Same adjustment for the end of the requested area. */ 3141 temp = (align_alen % extsz); 3142 if (temp) 3143 align_alen += extsz - temp; 3144 3145 /* 3146 * For large extent hint sizes, the aligned extent might be larger than 3147 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls 3148 * the length back under MAXEXTLEN. The outer allocation loops handle 3149 * short allocation just fine, so it is safe to do this. We only want to 3150 * do it when we are forced to, though, because it means more allocation 3151 * operations are required. 3152 */ 3153 while (align_alen > MAXEXTLEN) 3154 align_alen -= extsz; 3155 ASSERT(align_alen <= MAXEXTLEN); 3156 3157 /* 3158 * If the previous block overlaps with this proposed allocation 3159 * then move the start forward without adjusting the length. 3160 */ 3161 if (prevp->br_startoff != NULLFILEOFF) { 3162 if (prevp->br_startblock == HOLESTARTBLOCK) 3163 prevo = prevp->br_startoff; 3164 else 3165 prevo = prevp->br_startoff + prevp->br_blockcount; 3166 } else 3167 prevo = 0; 3168 if (align_off != orig_off && align_off < prevo) 3169 align_off = prevo; 3170 /* 3171 * If the next block overlaps with this proposed allocation 3172 * then move the start back without adjusting the length, 3173 * but not before offset 0. 3174 * This may of course make the start overlap previous block, 3175 * and if we hit the offset 0 limit then the next block 3176 * can still overlap too. 3177 */ 3178 if (!eof && gotp->br_startoff != NULLFILEOFF) { 3179 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) || 3180 (!delay && gotp->br_startblock == DELAYSTARTBLOCK)) 3181 nexto = gotp->br_startoff + gotp->br_blockcount; 3182 else 3183 nexto = gotp->br_startoff; 3184 } else 3185 nexto = NULLFILEOFF; 3186 if (!eof && 3187 align_off + align_alen != orig_end && 3188 align_off + align_alen > nexto) 3189 align_off = nexto > align_alen ? nexto - align_alen : 0; 3190 /* 3191 * If we're now overlapping the next or previous extent that 3192 * means we can't fit an extsz piece in this hole. Just move 3193 * the start forward to the first valid spot and set 3194 * the length so we hit the end. 3195 */ 3196 if (align_off != orig_off && align_off < prevo) 3197 align_off = prevo; 3198 if (align_off + align_alen != orig_end && 3199 align_off + align_alen > nexto && 3200 nexto != NULLFILEOFF) { 3201 ASSERT(nexto > prevo); 3202 align_alen = nexto - align_off; 3203 } 3204 3205 /* 3206 * If realtime, and the result isn't a multiple of the realtime 3207 * extent size we need to remove blocks until it is. 3208 */ 3209 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) { 3210 /* 3211 * We're not covering the original request, or 3212 * we won't be able to once we fix the length. 3213 */ 3214 if (orig_off < align_off || 3215 orig_end > align_off + align_alen || 3216 align_alen - temp < orig_alen) 3217 return -EINVAL; 3218 /* 3219 * Try to fix it by moving the start up. 3220 */ 3221 if (align_off + temp <= orig_off) { 3222 align_alen -= temp; 3223 align_off += temp; 3224 } 3225 /* 3226 * Try to fix it by moving the end in. 3227 */ 3228 else if (align_off + align_alen - temp >= orig_end) 3229 align_alen -= temp; 3230 /* 3231 * Set the start to the minimum then trim the length. 3232 */ 3233 else { 3234 align_alen -= orig_off - align_off; 3235 align_off = orig_off; 3236 align_alen -= align_alen % mp->m_sb.sb_rextsize; 3237 } 3238 /* 3239 * Result doesn't cover the request, fail it. 3240 */ 3241 if (orig_off < align_off || orig_end > align_off + align_alen) 3242 return -EINVAL; 3243 } else { 3244 ASSERT(orig_off >= align_off); 3245 /* see MAXEXTLEN handling above */ 3246 ASSERT(orig_end <= align_off + align_alen || 3247 align_alen + extsz > MAXEXTLEN); 3248 } 3249 3250 #ifdef DEBUG 3251 if (!eof && gotp->br_startoff != NULLFILEOFF) 3252 ASSERT(align_off + align_alen <= gotp->br_startoff); 3253 if (prevp->br_startoff != NULLFILEOFF) 3254 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount); 3255 #endif 3256 3257 *lenp = align_alen; 3258 *offp = align_off; 3259 return 0; 3260 } 3261 3262 #define XFS_ALLOC_GAP_UNITS 4 3263 3264 void 3265 xfs_bmap_adjacent( 3266 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3267 { 3268 xfs_fsblock_t adjust; /* adjustment to block numbers */ 3269 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ 3270 xfs_mount_t *mp; /* mount point structure */ 3271 int nullfb; /* true if ap->firstblock isn't set */ 3272 int rt; /* true if inode is realtime */ 3273 3274 #define ISVALID(x,y) \ 3275 (rt ? \ 3276 (x) < mp->m_sb.sb_rblocks : \ 3277 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \ 3278 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \ 3279 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks) 3280 3281 mp = ap->ip->i_mount; 3282 nullfb = *ap->firstblock == NULLFSBLOCK; 3283 rt = XFS_IS_REALTIME_INODE(ap->ip) && 3284 xfs_alloc_is_userdata(ap->datatype); 3285 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); 3286 /* 3287 * If allocating at eof, and there's a previous real block, 3288 * try to use its last block as our starting point. 3289 */ 3290 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF && 3291 !isnullstartblock(ap->prev.br_startblock) && 3292 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount, 3293 ap->prev.br_startblock)) { 3294 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount; 3295 /* 3296 * Adjust for the gap between prevp and us. 3297 */ 3298 adjust = ap->offset - 3299 (ap->prev.br_startoff + ap->prev.br_blockcount); 3300 if (adjust && 3301 ISVALID(ap->blkno + adjust, ap->prev.br_startblock)) 3302 ap->blkno += adjust; 3303 } 3304 /* 3305 * If not at eof, then compare the two neighbor blocks. 3306 * Figure out whether either one gives us a good starting point, 3307 * and pick the better one. 3308 */ 3309 else if (!ap->eof) { 3310 xfs_fsblock_t gotbno; /* right side block number */ 3311 xfs_fsblock_t gotdiff=0; /* right side difference */ 3312 xfs_fsblock_t prevbno; /* left side block number */ 3313 xfs_fsblock_t prevdiff=0; /* left side difference */ 3314 3315 /* 3316 * If there's a previous (left) block, select a requested 3317 * start block based on it. 3318 */ 3319 if (ap->prev.br_startoff != NULLFILEOFF && 3320 !isnullstartblock(ap->prev.br_startblock) && 3321 (prevbno = ap->prev.br_startblock + 3322 ap->prev.br_blockcount) && 3323 ISVALID(prevbno, ap->prev.br_startblock)) { 3324 /* 3325 * Calculate gap to end of previous block. 3326 */ 3327 adjust = prevdiff = ap->offset - 3328 (ap->prev.br_startoff + 3329 ap->prev.br_blockcount); 3330 /* 3331 * Figure the startblock based on the previous block's 3332 * end and the gap size. 3333 * Heuristic! 3334 * If the gap is large relative to the piece we're 3335 * allocating, or using it gives us an invalid block 3336 * number, then just use the end of the previous block. 3337 */ 3338 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3339 ISVALID(prevbno + prevdiff, 3340 ap->prev.br_startblock)) 3341 prevbno += adjust; 3342 else 3343 prevdiff += adjust; 3344 /* 3345 * If the firstblock forbids it, can't use it, 3346 * must use default. 3347 */ 3348 if (!rt && !nullfb && 3349 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno) 3350 prevbno = NULLFSBLOCK; 3351 } 3352 /* 3353 * No previous block or can't follow it, just default. 3354 */ 3355 else 3356 prevbno = NULLFSBLOCK; 3357 /* 3358 * If there's a following (right) block, select a requested 3359 * start block based on it. 3360 */ 3361 if (!isnullstartblock(ap->got.br_startblock)) { 3362 /* 3363 * Calculate gap to start of next block. 3364 */ 3365 adjust = gotdiff = ap->got.br_startoff - ap->offset; 3366 /* 3367 * Figure the startblock based on the next block's 3368 * start and the gap size. 3369 */ 3370 gotbno = ap->got.br_startblock; 3371 /* 3372 * Heuristic! 3373 * If the gap is large relative to the piece we're 3374 * allocating, or using it gives us an invalid block 3375 * number, then just use the start of the next block 3376 * offset by our length. 3377 */ 3378 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3379 ISVALID(gotbno - gotdiff, gotbno)) 3380 gotbno -= adjust; 3381 else if (ISVALID(gotbno - ap->length, gotbno)) { 3382 gotbno -= ap->length; 3383 gotdiff += adjust - ap->length; 3384 } else 3385 gotdiff += adjust; 3386 /* 3387 * If the firstblock forbids it, can't use it, 3388 * must use default. 3389 */ 3390 if (!rt && !nullfb && 3391 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno) 3392 gotbno = NULLFSBLOCK; 3393 } 3394 /* 3395 * No next block, just default. 3396 */ 3397 else 3398 gotbno = NULLFSBLOCK; 3399 /* 3400 * If both valid, pick the better one, else the only good 3401 * one, else ap->blkno is already set (to 0 or the inode block). 3402 */ 3403 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) 3404 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno; 3405 else if (prevbno != NULLFSBLOCK) 3406 ap->blkno = prevbno; 3407 else if (gotbno != NULLFSBLOCK) 3408 ap->blkno = gotbno; 3409 } 3410 #undef ISVALID 3411 } 3412 3413 static int 3414 xfs_bmap_longest_free_extent( 3415 struct xfs_trans *tp, 3416 xfs_agnumber_t ag, 3417 xfs_extlen_t *blen, 3418 int *notinit) 3419 { 3420 struct xfs_mount *mp = tp->t_mountp; 3421 struct xfs_perag *pag; 3422 xfs_extlen_t longest; 3423 int error = 0; 3424 3425 pag = xfs_perag_get(mp, ag); 3426 if (!pag->pagf_init) { 3427 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK); 3428 if (error) 3429 goto out; 3430 3431 if (!pag->pagf_init) { 3432 *notinit = 1; 3433 goto out; 3434 } 3435 } 3436 3437 longest = xfs_alloc_longest_free_extent(mp, pag, 3438 xfs_alloc_min_freelist(mp, pag), 3439 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE)); 3440 if (*blen < longest) 3441 *blen = longest; 3442 3443 out: 3444 xfs_perag_put(pag); 3445 return error; 3446 } 3447 3448 static void 3449 xfs_bmap_select_minlen( 3450 struct xfs_bmalloca *ap, 3451 struct xfs_alloc_arg *args, 3452 xfs_extlen_t *blen, 3453 int notinit) 3454 { 3455 if (notinit || *blen < ap->minlen) { 3456 /* 3457 * Since we did a BUF_TRYLOCK above, it is possible that 3458 * there is space for this request. 3459 */ 3460 args->minlen = ap->minlen; 3461 } else if (*blen < args->maxlen) { 3462 /* 3463 * If the best seen length is less than the request length, 3464 * use the best as the minimum. 3465 */ 3466 args->minlen = *blen; 3467 } else { 3468 /* 3469 * Otherwise we've seen an extent as big as maxlen, use that 3470 * as the minimum. 3471 */ 3472 args->minlen = args->maxlen; 3473 } 3474 } 3475 3476 STATIC int 3477 xfs_bmap_btalloc_nullfb( 3478 struct xfs_bmalloca *ap, 3479 struct xfs_alloc_arg *args, 3480 xfs_extlen_t *blen) 3481 { 3482 struct xfs_mount *mp = ap->ip->i_mount; 3483 xfs_agnumber_t ag, startag; 3484 int notinit = 0; 3485 int error; 3486 3487 args->type = XFS_ALLOCTYPE_START_BNO; 3488 args->total = ap->total; 3489 3490 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno); 3491 if (startag == NULLAGNUMBER) 3492 startag = ag = 0; 3493 3494 while (*blen < args->maxlen) { 3495 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, 3496 ¬init); 3497 if (error) 3498 return error; 3499 3500 if (++ag == mp->m_sb.sb_agcount) 3501 ag = 0; 3502 if (ag == startag) 3503 break; 3504 } 3505 3506 xfs_bmap_select_minlen(ap, args, blen, notinit); 3507 return 0; 3508 } 3509 3510 STATIC int 3511 xfs_bmap_btalloc_filestreams( 3512 struct xfs_bmalloca *ap, 3513 struct xfs_alloc_arg *args, 3514 xfs_extlen_t *blen) 3515 { 3516 struct xfs_mount *mp = ap->ip->i_mount; 3517 xfs_agnumber_t ag; 3518 int notinit = 0; 3519 int error; 3520 3521 args->type = XFS_ALLOCTYPE_NEAR_BNO; 3522 args->total = ap->total; 3523 3524 ag = XFS_FSB_TO_AGNO(mp, args->fsbno); 3525 if (ag == NULLAGNUMBER) 3526 ag = 0; 3527 3528 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init); 3529 if (error) 3530 return error; 3531 3532 if (*blen < args->maxlen) { 3533 error = xfs_filestream_new_ag(ap, &ag); 3534 if (error) 3535 return error; 3536 3537 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, 3538 ¬init); 3539 if (error) 3540 return error; 3541 3542 } 3543 3544 xfs_bmap_select_minlen(ap, args, blen, notinit); 3545 3546 /* 3547 * Set the failure fallback case to look in the selected AG as stream 3548 * may have moved. 3549 */ 3550 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0); 3551 return 0; 3552 } 3553 3554 STATIC int 3555 xfs_bmap_btalloc( 3556 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3557 { 3558 xfs_mount_t *mp; /* mount point structure */ 3559 xfs_alloctype_t atype = 0; /* type for allocation routines */ 3560 xfs_extlen_t align = 0; /* minimum allocation alignment */ 3561 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ 3562 xfs_agnumber_t ag; 3563 xfs_alloc_arg_t args; 3564 xfs_extlen_t blen; 3565 xfs_extlen_t nextminlen = 0; 3566 int nullfb; /* true if ap->firstblock isn't set */ 3567 int isaligned; 3568 int tryagain; 3569 int error; 3570 int stripe_align; 3571 3572 ASSERT(ap->length); 3573 3574 mp = ap->ip->i_mount; 3575 3576 /* stripe alignment for allocation is determined by mount parameters */ 3577 stripe_align = 0; 3578 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) 3579 stripe_align = mp->m_swidth; 3580 else if (mp->m_dalign) 3581 stripe_align = mp->m_dalign; 3582 3583 if (ap->flags & XFS_BMAPI_COWFORK) 3584 align = xfs_get_cowextsz_hint(ap->ip); 3585 else if (xfs_alloc_is_userdata(ap->datatype)) 3586 align = xfs_get_extsz_hint(ap->ip); 3587 if (align) { 3588 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, 3589 align, 0, ap->eof, 0, ap->conv, 3590 &ap->offset, &ap->length); 3591 ASSERT(!error); 3592 ASSERT(ap->length); 3593 } 3594 3595 3596 nullfb = *ap->firstblock == NULLFSBLOCK; 3597 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); 3598 if (nullfb) { 3599 if (xfs_alloc_is_userdata(ap->datatype) && 3600 xfs_inode_is_filestream(ap->ip)) { 3601 ag = xfs_filestream_lookup_ag(ap->ip); 3602 ag = (ag != NULLAGNUMBER) ? ag : 0; 3603 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0); 3604 } else { 3605 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino); 3606 } 3607 } else 3608 ap->blkno = *ap->firstblock; 3609 3610 xfs_bmap_adjacent(ap); 3611 3612 /* 3613 * If allowed, use ap->blkno; otherwise must use firstblock since 3614 * it's in the right allocation group. 3615 */ 3616 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno) 3617 ; 3618 else 3619 ap->blkno = *ap->firstblock; 3620 /* 3621 * Normal allocation, done through xfs_alloc_vextent. 3622 */ 3623 tryagain = isaligned = 0; 3624 memset(&args, 0, sizeof(args)); 3625 args.tp = ap->tp; 3626 args.mp = mp; 3627 args.fsbno = ap->blkno; 3628 xfs_rmap_skip_owner_update(&args.oinfo); 3629 3630 /* Trim the allocation back to the maximum an AG can fit. */ 3631 args.maxlen = MIN(ap->length, mp->m_ag_max_usable); 3632 args.firstblock = *ap->firstblock; 3633 blen = 0; 3634 if (nullfb) { 3635 /* 3636 * Search for an allocation group with a single extent large 3637 * enough for the request. If one isn't found, then adjust 3638 * the minimum allocation size to the largest space found. 3639 */ 3640 if (xfs_alloc_is_userdata(ap->datatype) && 3641 xfs_inode_is_filestream(ap->ip)) 3642 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen); 3643 else 3644 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen); 3645 if (error) 3646 return error; 3647 } else if (ap->dfops->dop_low) { 3648 if (xfs_inode_is_filestream(ap->ip)) 3649 args.type = XFS_ALLOCTYPE_FIRST_AG; 3650 else 3651 args.type = XFS_ALLOCTYPE_START_BNO; 3652 args.total = args.minlen = ap->minlen; 3653 } else { 3654 args.type = XFS_ALLOCTYPE_NEAR_BNO; 3655 args.total = ap->total; 3656 args.minlen = ap->minlen; 3657 } 3658 /* apply extent size hints if obtained earlier */ 3659 if (align) { 3660 args.prod = align; 3661 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod))) 3662 args.mod = (xfs_extlen_t)(args.prod - args.mod); 3663 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) { 3664 args.prod = 1; 3665 args.mod = 0; 3666 } else { 3667 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog; 3668 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod)))) 3669 args.mod = (xfs_extlen_t)(args.prod - args.mod); 3670 } 3671 /* 3672 * If we are not low on available data blocks, and the 3673 * underlying logical volume manager is a stripe, and 3674 * the file offset is zero then try to allocate data 3675 * blocks on stripe unit boundary. 3676 * NOTE: ap->aeof is only set if the allocation length 3677 * is >= the stripe unit and the allocation offset is 3678 * at the end of file. 3679 */ 3680 if (!ap->dfops->dop_low && ap->aeof) { 3681 if (!ap->offset) { 3682 args.alignment = stripe_align; 3683 atype = args.type; 3684 isaligned = 1; 3685 /* 3686 * Adjust for alignment 3687 */ 3688 if (blen > args.alignment && blen <= args.maxlen) 3689 args.minlen = blen - args.alignment; 3690 args.minalignslop = 0; 3691 } else { 3692 /* 3693 * First try an exact bno allocation. 3694 * If it fails then do a near or start bno 3695 * allocation with alignment turned on. 3696 */ 3697 atype = args.type; 3698 tryagain = 1; 3699 args.type = XFS_ALLOCTYPE_THIS_BNO; 3700 args.alignment = 1; 3701 /* 3702 * Compute the minlen+alignment for the 3703 * next case. Set slop so that the value 3704 * of minlen+alignment+slop doesn't go up 3705 * between the calls. 3706 */ 3707 if (blen > stripe_align && blen <= args.maxlen) 3708 nextminlen = blen - stripe_align; 3709 else 3710 nextminlen = args.minlen; 3711 if (nextminlen + stripe_align > args.minlen + 1) 3712 args.minalignslop = 3713 nextminlen + stripe_align - 3714 args.minlen - 1; 3715 else 3716 args.minalignslop = 0; 3717 } 3718 } else { 3719 args.alignment = 1; 3720 args.minalignslop = 0; 3721 } 3722 args.minleft = ap->minleft; 3723 args.wasdel = ap->wasdel; 3724 args.resv = XFS_AG_RESV_NONE; 3725 args.datatype = ap->datatype; 3726 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) 3727 args.ip = ap->ip; 3728 3729 error = xfs_alloc_vextent(&args); 3730 if (error) 3731 return error; 3732 3733 if (tryagain && args.fsbno == NULLFSBLOCK) { 3734 /* 3735 * Exact allocation failed. Now try with alignment 3736 * turned on. 3737 */ 3738 args.type = atype; 3739 args.fsbno = ap->blkno; 3740 args.alignment = stripe_align; 3741 args.minlen = nextminlen; 3742 args.minalignslop = 0; 3743 isaligned = 1; 3744 if ((error = xfs_alloc_vextent(&args))) 3745 return error; 3746 } 3747 if (isaligned && args.fsbno == NULLFSBLOCK) { 3748 /* 3749 * allocation failed, so turn off alignment and 3750 * try again. 3751 */ 3752 args.type = atype; 3753 args.fsbno = ap->blkno; 3754 args.alignment = 0; 3755 if ((error = xfs_alloc_vextent(&args))) 3756 return error; 3757 } 3758 if (args.fsbno == NULLFSBLOCK && nullfb && 3759 args.minlen > ap->minlen) { 3760 args.minlen = ap->minlen; 3761 args.type = XFS_ALLOCTYPE_START_BNO; 3762 args.fsbno = ap->blkno; 3763 if ((error = xfs_alloc_vextent(&args))) 3764 return error; 3765 } 3766 if (args.fsbno == NULLFSBLOCK && nullfb) { 3767 args.fsbno = 0; 3768 args.type = XFS_ALLOCTYPE_FIRST_AG; 3769 args.total = ap->minlen; 3770 if ((error = xfs_alloc_vextent(&args))) 3771 return error; 3772 ap->dfops->dop_low = true; 3773 } 3774 if (args.fsbno != NULLFSBLOCK) { 3775 /* 3776 * check the allocation happened at the same or higher AG than 3777 * the first block that was allocated. 3778 */ 3779 ASSERT(*ap->firstblock == NULLFSBLOCK || 3780 XFS_FSB_TO_AGNO(mp, *ap->firstblock) <= 3781 XFS_FSB_TO_AGNO(mp, args.fsbno)); 3782 3783 ap->blkno = args.fsbno; 3784 if (*ap->firstblock == NULLFSBLOCK) 3785 *ap->firstblock = args.fsbno; 3786 ASSERT(nullfb || fb_agno <= args.agno); 3787 ap->length = args.len; 3788 if (!(ap->flags & XFS_BMAPI_COWFORK)) 3789 ap->ip->i_d.di_nblocks += args.len; 3790 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); 3791 if (ap->wasdel) 3792 ap->ip->i_delayed_blks -= args.len; 3793 /* 3794 * Adjust the disk quota also. This was reserved 3795 * earlier. 3796 */ 3797 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, 3798 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : 3799 XFS_TRANS_DQ_BCOUNT, 3800 (long) args.len); 3801 } else { 3802 ap->blkno = NULLFSBLOCK; 3803 ap->length = 0; 3804 } 3805 return 0; 3806 } 3807 3808 /* 3809 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. 3810 * It figures out where to ask the underlying allocator to put the new extent. 3811 */ 3812 STATIC int 3813 xfs_bmap_alloc( 3814 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3815 { 3816 if (XFS_IS_REALTIME_INODE(ap->ip) && 3817 xfs_alloc_is_userdata(ap->datatype)) 3818 return xfs_bmap_rtalloc(ap); 3819 return xfs_bmap_btalloc(ap); 3820 } 3821 3822 /* Trim extent to fit a logical block range. */ 3823 void 3824 xfs_trim_extent( 3825 struct xfs_bmbt_irec *irec, 3826 xfs_fileoff_t bno, 3827 xfs_filblks_t len) 3828 { 3829 xfs_fileoff_t distance; 3830 xfs_fileoff_t end = bno + len; 3831 3832 if (irec->br_startoff + irec->br_blockcount <= bno || 3833 irec->br_startoff >= end) { 3834 irec->br_blockcount = 0; 3835 return; 3836 } 3837 3838 if (irec->br_startoff < bno) { 3839 distance = bno - irec->br_startoff; 3840 if (isnullstartblock(irec->br_startblock)) 3841 irec->br_startblock = DELAYSTARTBLOCK; 3842 if (irec->br_startblock != DELAYSTARTBLOCK && 3843 irec->br_startblock != HOLESTARTBLOCK) 3844 irec->br_startblock += distance; 3845 irec->br_startoff += distance; 3846 irec->br_blockcount -= distance; 3847 } 3848 3849 if (end < irec->br_startoff + irec->br_blockcount) { 3850 distance = irec->br_startoff + irec->br_blockcount - end; 3851 irec->br_blockcount -= distance; 3852 } 3853 } 3854 3855 /* 3856 * Trim the returned map to the required bounds 3857 */ 3858 STATIC void 3859 xfs_bmapi_trim_map( 3860 struct xfs_bmbt_irec *mval, 3861 struct xfs_bmbt_irec *got, 3862 xfs_fileoff_t *bno, 3863 xfs_filblks_t len, 3864 xfs_fileoff_t obno, 3865 xfs_fileoff_t end, 3866 int n, 3867 int flags) 3868 { 3869 if ((flags & XFS_BMAPI_ENTIRE) || 3870 got->br_startoff + got->br_blockcount <= obno) { 3871 *mval = *got; 3872 if (isnullstartblock(got->br_startblock)) 3873 mval->br_startblock = DELAYSTARTBLOCK; 3874 return; 3875 } 3876 3877 if (obno > *bno) 3878 *bno = obno; 3879 ASSERT((*bno >= obno) || (n == 0)); 3880 ASSERT(*bno < end); 3881 mval->br_startoff = *bno; 3882 if (isnullstartblock(got->br_startblock)) 3883 mval->br_startblock = DELAYSTARTBLOCK; 3884 else 3885 mval->br_startblock = got->br_startblock + 3886 (*bno - got->br_startoff); 3887 /* 3888 * Return the minimum of what we got and what we asked for for 3889 * the length. We can use the len variable here because it is 3890 * modified below and we could have been there before coming 3891 * here if the first part of the allocation didn't overlap what 3892 * was asked for. 3893 */ 3894 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno, 3895 got->br_blockcount - (*bno - got->br_startoff)); 3896 mval->br_state = got->br_state; 3897 ASSERT(mval->br_blockcount <= len); 3898 return; 3899 } 3900 3901 /* 3902 * Update and validate the extent map to return 3903 */ 3904 STATIC void 3905 xfs_bmapi_update_map( 3906 struct xfs_bmbt_irec **map, 3907 xfs_fileoff_t *bno, 3908 xfs_filblks_t *len, 3909 xfs_fileoff_t obno, 3910 xfs_fileoff_t end, 3911 int *n, 3912 int flags) 3913 { 3914 xfs_bmbt_irec_t *mval = *map; 3915 3916 ASSERT((flags & XFS_BMAPI_ENTIRE) || 3917 ((mval->br_startoff + mval->br_blockcount) <= end)); 3918 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) || 3919 (mval->br_startoff < obno)); 3920 3921 *bno = mval->br_startoff + mval->br_blockcount; 3922 *len = end - *bno; 3923 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) { 3924 /* update previous map with new information */ 3925 ASSERT(mval->br_startblock == mval[-1].br_startblock); 3926 ASSERT(mval->br_blockcount > mval[-1].br_blockcount); 3927 ASSERT(mval->br_state == mval[-1].br_state); 3928 mval[-1].br_blockcount = mval->br_blockcount; 3929 mval[-1].br_state = mval->br_state; 3930 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK && 3931 mval[-1].br_startblock != DELAYSTARTBLOCK && 3932 mval[-1].br_startblock != HOLESTARTBLOCK && 3933 mval->br_startblock == mval[-1].br_startblock + 3934 mval[-1].br_blockcount && 3935 ((flags & XFS_BMAPI_IGSTATE) || 3936 mval[-1].br_state == mval->br_state)) { 3937 ASSERT(mval->br_startoff == 3938 mval[-1].br_startoff + mval[-1].br_blockcount); 3939 mval[-1].br_blockcount += mval->br_blockcount; 3940 } else if (*n > 0 && 3941 mval->br_startblock == DELAYSTARTBLOCK && 3942 mval[-1].br_startblock == DELAYSTARTBLOCK && 3943 mval->br_startoff == 3944 mval[-1].br_startoff + mval[-1].br_blockcount) { 3945 mval[-1].br_blockcount += mval->br_blockcount; 3946 mval[-1].br_state = mval->br_state; 3947 } else if (!((*n == 0) && 3948 ((mval->br_startoff + mval->br_blockcount) <= 3949 obno))) { 3950 mval++; 3951 (*n)++; 3952 } 3953 *map = mval; 3954 } 3955 3956 /* 3957 * Map file blocks to filesystem blocks without allocation. 3958 */ 3959 int 3960 xfs_bmapi_read( 3961 struct xfs_inode *ip, 3962 xfs_fileoff_t bno, 3963 xfs_filblks_t len, 3964 struct xfs_bmbt_irec *mval, 3965 int *nmap, 3966 int flags) 3967 { 3968 struct xfs_mount *mp = ip->i_mount; 3969 struct xfs_ifork *ifp; 3970 struct xfs_bmbt_irec got; 3971 xfs_fileoff_t obno; 3972 xfs_fileoff_t end; 3973 xfs_extnum_t idx; 3974 int error; 3975 bool eof = false; 3976 int n = 0; 3977 int whichfork = xfs_bmapi_whichfork(flags); 3978 3979 ASSERT(*nmap >= 1); 3980 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE| 3981 XFS_BMAPI_IGSTATE|XFS_BMAPI_COWFORK))); 3982 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)); 3983 3984 if (unlikely(XFS_TEST_ERROR( 3985 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 3986 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 3987 mp, XFS_ERRTAG_BMAPIFORMAT))) { 3988 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp); 3989 return -EFSCORRUPTED; 3990 } 3991 3992 if (XFS_FORCED_SHUTDOWN(mp)) 3993 return -EIO; 3994 3995 XFS_STATS_INC(mp, xs_blk_mapr); 3996 3997 ifp = XFS_IFORK_PTR(ip, whichfork); 3998 3999 /* No CoW fork? Return a hole. */ 4000 if (whichfork == XFS_COW_FORK && !ifp) { 4001 mval->br_startoff = bno; 4002 mval->br_startblock = HOLESTARTBLOCK; 4003 mval->br_blockcount = len; 4004 mval->br_state = XFS_EXT_NORM; 4005 *nmap = 1; 4006 return 0; 4007 } 4008 4009 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4010 error = xfs_iread_extents(NULL, ip, whichfork); 4011 if (error) 4012 return error; 4013 } 4014 4015 if (!xfs_iext_lookup_extent(ip, ifp, bno, &idx, &got)) 4016 eof = true; 4017 end = bno + len; 4018 obno = bno; 4019 4020 while (bno < end && n < *nmap) { 4021 /* Reading past eof, act as though there's a hole up to end. */ 4022 if (eof) 4023 got.br_startoff = end; 4024 if (got.br_startoff > bno) { 4025 /* Reading in a hole. */ 4026 mval->br_startoff = bno; 4027 mval->br_startblock = HOLESTARTBLOCK; 4028 mval->br_blockcount = 4029 XFS_FILBLKS_MIN(len, got.br_startoff - bno); 4030 mval->br_state = XFS_EXT_NORM; 4031 bno += mval->br_blockcount; 4032 len -= mval->br_blockcount; 4033 mval++; 4034 n++; 4035 continue; 4036 } 4037 4038 /* set up the extent map to return. */ 4039 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags); 4040 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 4041 4042 /* If we're done, stop now. */ 4043 if (bno >= end || n >= *nmap) 4044 break; 4045 4046 /* Else go on to the next record. */ 4047 if (!xfs_iext_get_extent(ifp, ++idx, &got)) 4048 eof = true; 4049 } 4050 *nmap = n; 4051 return 0; 4052 } 4053 4054 /* 4055 * Add a delayed allocation extent to an inode. Blocks are reserved from the 4056 * global pool and the extent inserted into the inode in-core extent tree. 4057 * 4058 * On entry, got refers to the first extent beyond the offset of the extent to 4059 * allocate or eof is specified if no such extent exists. On return, got refers 4060 * to the extent record that was inserted to the inode fork. 4061 * 4062 * Note that the allocated extent may have been merged with contiguous extents 4063 * during insertion into the inode fork. Thus, got does not reflect the current 4064 * state of the inode fork on return. If necessary, the caller can use lastx to 4065 * look up the updated record in the inode fork. 4066 */ 4067 int 4068 xfs_bmapi_reserve_delalloc( 4069 struct xfs_inode *ip, 4070 int whichfork, 4071 xfs_fileoff_t off, 4072 xfs_filblks_t len, 4073 xfs_filblks_t prealloc, 4074 struct xfs_bmbt_irec *got, 4075 xfs_extnum_t *lastx, 4076 int eof) 4077 { 4078 struct xfs_mount *mp = ip->i_mount; 4079 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 4080 xfs_extlen_t alen; 4081 xfs_extlen_t indlen; 4082 char rt = XFS_IS_REALTIME_INODE(ip); 4083 xfs_extlen_t extsz; 4084 int error; 4085 xfs_fileoff_t aoff = off; 4086 4087 /* 4088 * Cap the alloc length. Keep track of prealloc so we know whether to 4089 * tag the inode before we return. 4090 */ 4091 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN); 4092 if (!eof) 4093 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff); 4094 if (prealloc && alen >= len) 4095 prealloc = alen - len; 4096 4097 /* Figure out the extent size, adjust alen */ 4098 if (whichfork == XFS_COW_FORK) 4099 extsz = xfs_get_cowextsz_hint(ip); 4100 else 4101 extsz = xfs_get_extsz_hint(ip); 4102 if (extsz) { 4103 struct xfs_bmbt_irec prev; 4104 4105 if (!xfs_iext_get_extent(ifp, *lastx - 1, &prev)) 4106 prev.br_startoff = NULLFILEOFF; 4107 4108 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, rt, eof, 4109 1, 0, &aoff, &alen); 4110 ASSERT(!error); 4111 } 4112 4113 if (rt) 4114 extsz = alen / mp->m_sb.sb_rextsize; 4115 4116 /* 4117 * Make a transaction-less quota reservation for delayed allocation 4118 * blocks. This number gets adjusted later. We return if we haven't 4119 * allocated blocks already inside this loop. 4120 */ 4121 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0, 4122 rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); 4123 if (error) 4124 return error; 4125 4126 /* 4127 * Split changing sb for alen and indlen since they could be coming 4128 * from different places. 4129 */ 4130 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); 4131 ASSERT(indlen > 0); 4132 4133 if (rt) { 4134 error = xfs_mod_frextents(mp, -((int64_t)extsz)); 4135 } else { 4136 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false); 4137 } 4138 4139 if (error) 4140 goto out_unreserve_quota; 4141 4142 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false); 4143 if (error) 4144 goto out_unreserve_blocks; 4145 4146 4147 ip->i_delayed_blks += alen; 4148 4149 got->br_startoff = aoff; 4150 got->br_startblock = nullstartblock(indlen); 4151 got->br_blockcount = alen; 4152 got->br_state = XFS_EXT_NORM; 4153 4154 xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got); 4155 4156 /* 4157 * Tag the inode if blocks were preallocated. Note that COW fork 4158 * preallocation can occur at the start or end of the extent, even when 4159 * prealloc == 0, so we must also check the aligned offset and length. 4160 */ 4161 if (whichfork == XFS_DATA_FORK && prealloc) 4162 xfs_inode_set_eofblocks_tag(ip); 4163 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len)) 4164 xfs_inode_set_cowblocks_tag(ip); 4165 4166 return 0; 4167 4168 out_unreserve_blocks: 4169 if (rt) 4170 xfs_mod_frextents(mp, extsz); 4171 else 4172 xfs_mod_fdblocks(mp, alen, false); 4173 out_unreserve_quota: 4174 if (XFS_IS_QUOTA_ON(mp)) 4175 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ? 4176 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); 4177 return error; 4178 } 4179 4180 static int 4181 xfs_bmapi_allocate( 4182 struct xfs_bmalloca *bma) 4183 { 4184 struct xfs_mount *mp = bma->ip->i_mount; 4185 int whichfork = xfs_bmapi_whichfork(bma->flags); 4186 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); 4187 int tmp_logflags = 0; 4188 int error; 4189 4190 ASSERT(bma->length > 0); 4191 4192 /* 4193 * For the wasdelay case, we could also just allocate the stuff asked 4194 * for in this bmap call but that wouldn't be as good. 4195 */ 4196 if (bma->wasdel) { 4197 bma->length = (xfs_extlen_t)bma->got.br_blockcount; 4198 bma->offset = bma->got.br_startoff; 4199 if (bma->idx) { 4200 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), 4201 &bma->prev); 4202 } 4203 } else { 4204 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN); 4205 if (!bma->eof) 4206 bma->length = XFS_FILBLKS_MIN(bma->length, 4207 bma->got.br_startoff - bma->offset); 4208 } 4209 4210 /* 4211 * Set the data type being allocated. For the data fork, the first data 4212 * in the file is treated differently to all other allocations. For the 4213 * attribute fork, we only need to ensure the allocated range is not on 4214 * the busy list. 4215 */ 4216 if (!(bma->flags & XFS_BMAPI_METADATA)) { 4217 bma->datatype = XFS_ALLOC_NOBUSY; 4218 if (whichfork == XFS_DATA_FORK) { 4219 if (bma->offset == 0) 4220 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA; 4221 else 4222 bma->datatype |= XFS_ALLOC_USERDATA; 4223 } 4224 if (bma->flags & XFS_BMAPI_ZERO) 4225 bma->datatype |= XFS_ALLOC_USERDATA_ZERO; 4226 } 4227 4228 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1; 4229 4230 /* 4231 * Only want to do the alignment at the eof if it is userdata and 4232 * allocation length is larger than a stripe unit. 4233 */ 4234 if (mp->m_dalign && bma->length >= mp->m_dalign && 4235 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) { 4236 error = xfs_bmap_isaeof(bma, whichfork); 4237 if (error) 4238 return error; 4239 } 4240 4241 error = xfs_bmap_alloc(bma); 4242 if (error) 4243 return error; 4244 4245 if (bma->cur) 4246 bma->cur->bc_private.b.firstblock = *bma->firstblock; 4247 if (bma->blkno == NULLFSBLOCK) 4248 return 0; 4249 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) { 4250 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork); 4251 bma->cur->bc_private.b.firstblock = *bma->firstblock; 4252 bma->cur->bc_private.b.dfops = bma->dfops; 4253 } 4254 /* 4255 * Bump the number of extents we've allocated 4256 * in this call. 4257 */ 4258 bma->nallocs++; 4259 4260 if (bma->cur) 4261 bma->cur->bc_private.b.flags = 4262 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; 4263 4264 bma->got.br_startoff = bma->offset; 4265 bma->got.br_startblock = bma->blkno; 4266 bma->got.br_blockcount = bma->length; 4267 bma->got.br_state = XFS_EXT_NORM; 4268 4269 /* 4270 * In the data fork, a wasdelay extent has been initialized, so 4271 * shouldn't be flagged as unwritten. 4272 * 4273 * For the cow fork, however, we convert delalloc reservations 4274 * (extents allocated for speculative preallocation) to 4275 * allocated unwritten extents, and only convert the unwritten 4276 * extents to real extents when we're about to write the data. 4277 */ 4278 if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) && 4279 (bma->flags & XFS_BMAPI_PREALLOC) && 4280 xfs_sb_version_hasextflgbit(&mp->m_sb)) 4281 bma->got.br_state = XFS_EXT_UNWRITTEN; 4282 4283 if (bma->wasdel) 4284 error = xfs_bmap_add_extent_delay_real(bma, whichfork); 4285 else 4286 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip, 4287 whichfork, &bma->idx, &bma->cur, &bma->got, 4288 bma->firstblock, bma->dfops, &bma->logflags); 4289 4290 bma->logflags |= tmp_logflags; 4291 if (error) 4292 return error; 4293 4294 /* 4295 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real 4296 * or xfs_bmap_add_extent_hole_real might have merged it into one of 4297 * the neighbouring ones. 4298 */ 4299 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got); 4300 4301 ASSERT(bma->got.br_startoff <= bma->offset); 4302 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >= 4303 bma->offset + bma->length); 4304 ASSERT(bma->got.br_state == XFS_EXT_NORM || 4305 bma->got.br_state == XFS_EXT_UNWRITTEN); 4306 return 0; 4307 } 4308 4309 STATIC int 4310 xfs_bmapi_convert_unwritten( 4311 struct xfs_bmalloca *bma, 4312 struct xfs_bmbt_irec *mval, 4313 xfs_filblks_t len, 4314 int flags) 4315 { 4316 int whichfork = xfs_bmapi_whichfork(flags); 4317 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); 4318 int tmp_logflags = 0; 4319 int error; 4320 4321 /* check if we need to do unwritten->real conversion */ 4322 if (mval->br_state == XFS_EXT_UNWRITTEN && 4323 (flags & XFS_BMAPI_PREALLOC)) 4324 return 0; 4325 4326 /* check if we need to do real->unwritten conversion */ 4327 if (mval->br_state == XFS_EXT_NORM && 4328 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) != 4329 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) 4330 return 0; 4331 4332 /* 4333 * Modify (by adding) the state flag, if writing. 4334 */ 4335 ASSERT(mval->br_blockcount <= len); 4336 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) { 4337 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp, 4338 bma->ip, whichfork); 4339 bma->cur->bc_private.b.firstblock = *bma->firstblock; 4340 bma->cur->bc_private.b.dfops = bma->dfops; 4341 } 4342 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) 4343 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN; 4344 4345 /* 4346 * Before insertion into the bmbt, zero the range being converted 4347 * if required. 4348 */ 4349 if (flags & XFS_BMAPI_ZERO) { 4350 error = xfs_zero_extent(bma->ip, mval->br_startblock, 4351 mval->br_blockcount); 4352 if (error) 4353 return error; 4354 } 4355 4356 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork, 4357 &bma->idx, &bma->cur, mval, bma->firstblock, bma->dfops, 4358 &tmp_logflags); 4359 /* 4360 * Log the inode core unconditionally in the unwritten extent conversion 4361 * path because the conversion might not have done so (e.g., if the 4362 * extent count hasn't changed). We need to make sure the inode is dirty 4363 * in the transaction for the sake of fsync(), even if nothing has 4364 * changed, because fsync() will not force the log for this transaction 4365 * unless it sees the inode pinned. 4366 * 4367 * Note: If we're only converting cow fork extents, there aren't 4368 * any on-disk updates to make, so we don't need to log anything. 4369 */ 4370 if (whichfork != XFS_COW_FORK) 4371 bma->logflags |= tmp_logflags | XFS_ILOG_CORE; 4372 if (error) 4373 return error; 4374 4375 /* 4376 * Update our extent pointer, given that 4377 * xfs_bmap_add_extent_unwritten_real might have merged it into one 4378 * of the neighbouring ones. 4379 */ 4380 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got); 4381 4382 /* 4383 * We may have combined previously unwritten space with written space, 4384 * so generate another request. 4385 */ 4386 if (mval->br_blockcount < len) 4387 return -EAGAIN; 4388 return 0; 4389 } 4390 4391 /* 4392 * Map file blocks to filesystem blocks, and allocate blocks or convert the 4393 * extent state if necessary. Details behaviour is controlled by the flags 4394 * parameter. Only allocates blocks from a single allocation group, to avoid 4395 * locking problems. 4396 * 4397 * The returned value in "firstblock" from the first call in a transaction 4398 * must be remembered and presented to subsequent calls in "firstblock". 4399 * An upper bound for the number of blocks to be allocated is supplied to 4400 * the first call in "total"; if no allocation group has that many free 4401 * blocks then the call will fail (return NULLFSBLOCK in "firstblock"). 4402 */ 4403 int 4404 xfs_bmapi_write( 4405 struct xfs_trans *tp, /* transaction pointer */ 4406 struct xfs_inode *ip, /* incore inode */ 4407 xfs_fileoff_t bno, /* starting file offs. mapped */ 4408 xfs_filblks_t len, /* length to map in file */ 4409 int flags, /* XFS_BMAPI_... */ 4410 xfs_fsblock_t *firstblock, /* first allocated block 4411 controls a.g. for allocs */ 4412 xfs_extlen_t total, /* total blocks needed */ 4413 struct xfs_bmbt_irec *mval, /* output: map values */ 4414 int *nmap, /* i/o: mval size/count */ 4415 struct xfs_defer_ops *dfops) /* i/o: list extents to free */ 4416 { 4417 struct xfs_mount *mp = ip->i_mount; 4418 struct xfs_ifork *ifp; 4419 struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */ 4420 xfs_fileoff_t end; /* end of mapped file region */ 4421 bool eof = false; /* after the end of extents */ 4422 int error; /* error return */ 4423 int n; /* current extent index */ 4424 xfs_fileoff_t obno; /* old block number (offset) */ 4425 int whichfork; /* data or attr fork */ 4426 4427 #ifdef DEBUG 4428 xfs_fileoff_t orig_bno; /* original block number value */ 4429 int orig_flags; /* original flags arg value */ 4430 xfs_filblks_t orig_len; /* original value of len arg */ 4431 struct xfs_bmbt_irec *orig_mval; /* original value of mval */ 4432 int orig_nmap; /* original value of *nmap */ 4433 4434 orig_bno = bno; 4435 orig_len = len; 4436 orig_flags = flags; 4437 orig_mval = mval; 4438 orig_nmap = *nmap; 4439 #endif 4440 whichfork = xfs_bmapi_whichfork(flags); 4441 4442 ASSERT(*nmap >= 1); 4443 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); 4444 ASSERT(!(flags & XFS_BMAPI_IGSTATE)); 4445 ASSERT(tp != NULL || 4446 (flags & (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)) == 4447 (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)); 4448 ASSERT(len > 0); 4449 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL); 4450 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4451 ASSERT(!(flags & XFS_BMAPI_REMAP)); 4452 4453 /* zeroing is for currently only for data extents, not metadata */ 4454 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) != 4455 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)); 4456 /* 4457 * we can allocate unwritten extents or pre-zero allocated blocks, 4458 * but it makes no sense to do both at once. This would result in 4459 * zeroing the unwritten extent twice, but it still being an 4460 * unwritten extent.... 4461 */ 4462 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) != 4463 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)); 4464 4465 if (unlikely(XFS_TEST_ERROR( 4466 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 4467 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 4468 mp, XFS_ERRTAG_BMAPIFORMAT))) { 4469 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp); 4470 return -EFSCORRUPTED; 4471 } 4472 4473 if (XFS_FORCED_SHUTDOWN(mp)) 4474 return -EIO; 4475 4476 ifp = XFS_IFORK_PTR(ip, whichfork); 4477 4478 XFS_STATS_INC(mp, xs_blk_mapw); 4479 4480 if (*firstblock == NULLFSBLOCK) { 4481 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE) 4482 bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1; 4483 else 4484 bma.minleft = 1; 4485 } else { 4486 bma.minleft = 0; 4487 } 4488 4489 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4490 error = xfs_iread_extents(tp, ip, whichfork); 4491 if (error) 4492 goto error0; 4493 } 4494 4495 n = 0; 4496 end = bno + len; 4497 obno = bno; 4498 4499 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.idx, &bma.got)) 4500 eof = true; 4501 if (!xfs_iext_get_extent(ifp, bma.idx - 1, &bma.prev)) 4502 bma.prev.br_startoff = NULLFILEOFF; 4503 bma.tp = tp; 4504 bma.ip = ip; 4505 bma.total = total; 4506 bma.datatype = 0; 4507 bma.dfops = dfops; 4508 bma.firstblock = firstblock; 4509 4510 while (bno < end && n < *nmap) { 4511 bool need_alloc = false, wasdelay = false; 4512 4513 /* in hole or beyoned EOF? */ 4514 if (eof || bma.got.br_startoff > bno) { 4515 if (flags & XFS_BMAPI_DELALLOC) { 4516 /* 4517 * For the COW fork we can reasonably get a 4518 * request for converting an extent that races 4519 * with other threads already having converted 4520 * part of it, as there converting COW to 4521 * regular blocks is not protected using the 4522 * IOLOCK. 4523 */ 4524 ASSERT(flags & XFS_BMAPI_COWFORK); 4525 if (!(flags & XFS_BMAPI_COWFORK)) { 4526 error = -EIO; 4527 goto error0; 4528 } 4529 4530 if (eof || bno >= end) 4531 break; 4532 } else { 4533 need_alloc = true; 4534 } 4535 } else if (isnullstartblock(bma.got.br_startblock)) { 4536 wasdelay = true; 4537 } 4538 4539 /* 4540 * First, deal with the hole before the allocated space 4541 * that we found, if any. 4542 */ 4543 if (need_alloc || wasdelay) { 4544 bma.eof = eof; 4545 bma.conv = !!(flags & XFS_BMAPI_CONVERT); 4546 bma.wasdel = wasdelay; 4547 bma.offset = bno; 4548 bma.flags = flags; 4549 4550 /* 4551 * There's a 32/64 bit type mismatch between the 4552 * allocation length request (which can be 64 bits in 4553 * length) and the bma length request, which is 4554 * xfs_extlen_t and therefore 32 bits. Hence we have to 4555 * check for 32-bit overflows and handle them here. 4556 */ 4557 if (len > (xfs_filblks_t)MAXEXTLEN) 4558 bma.length = MAXEXTLEN; 4559 else 4560 bma.length = len; 4561 4562 ASSERT(len > 0); 4563 ASSERT(bma.length > 0); 4564 error = xfs_bmapi_allocate(&bma); 4565 if (error) 4566 goto error0; 4567 if (bma.blkno == NULLFSBLOCK) 4568 break; 4569 4570 /* 4571 * If this is a CoW allocation, record the data in 4572 * the refcount btree for orphan recovery. 4573 */ 4574 if (whichfork == XFS_COW_FORK) { 4575 error = xfs_refcount_alloc_cow_extent(mp, dfops, 4576 bma.blkno, bma.length); 4577 if (error) 4578 goto error0; 4579 } 4580 } 4581 4582 /* Deal with the allocated space we found. */ 4583 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno, 4584 end, n, flags); 4585 4586 /* Execute unwritten extent conversion if necessary */ 4587 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags); 4588 if (error == -EAGAIN) 4589 continue; 4590 if (error) 4591 goto error0; 4592 4593 /* update the extent map to return */ 4594 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 4595 4596 /* 4597 * If we're done, stop now. Stop when we've allocated 4598 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise 4599 * the transaction may get too big. 4600 */ 4601 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap) 4602 break; 4603 4604 /* Else go on to the next record. */ 4605 bma.prev = bma.got; 4606 if (!xfs_iext_get_extent(ifp, ++bma.idx, &bma.got)) 4607 eof = true; 4608 } 4609 *nmap = n; 4610 4611 /* 4612 * Transform from btree to extents, give it cur. 4613 */ 4614 if (xfs_bmap_wants_extents(ip, whichfork)) { 4615 int tmp_logflags = 0; 4616 4617 ASSERT(bma.cur); 4618 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, 4619 &tmp_logflags, whichfork); 4620 bma.logflags |= tmp_logflags; 4621 if (error) 4622 goto error0; 4623 } 4624 4625 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE || 4626 XFS_IFORK_NEXTENTS(ip, whichfork) > 4627 XFS_IFORK_MAXEXT(ip, whichfork)); 4628 error = 0; 4629 error0: 4630 /* 4631 * Log everything. Do this after conversion, there's no point in 4632 * logging the extent records if we've converted to btree format. 4633 */ 4634 if ((bma.logflags & xfs_ilog_fext(whichfork)) && 4635 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 4636 bma.logflags &= ~xfs_ilog_fext(whichfork); 4637 else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) && 4638 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) 4639 bma.logflags &= ~xfs_ilog_fbroot(whichfork); 4640 /* 4641 * Log whatever the flags say, even if error. Otherwise we might miss 4642 * detecting a case where the data is changed, there's an error, 4643 * and it's not logged so we don't shutdown when we should. 4644 */ 4645 if (bma.logflags) 4646 xfs_trans_log_inode(tp, ip, bma.logflags); 4647 4648 if (bma.cur) { 4649 if (!error) { 4650 ASSERT(*firstblock == NULLFSBLOCK || 4651 XFS_FSB_TO_AGNO(mp, *firstblock) <= 4652 XFS_FSB_TO_AGNO(mp, 4653 bma.cur->bc_private.b.firstblock)); 4654 *firstblock = bma.cur->bc_private.b.firstblock; 4655 } 4656 xfs_btree_del_cursor(bma.cur, 4657 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 4658 } 4659 if (!error) 4660 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval, 4661 orig_nmap, *nmap); 4662 return error; 4663 } 4664 4665 static int 4666 xfs_bmapi_remap( 4667 struct xfs_trans *tp, 4668 struct xfs_inode *ip, 4669 xfs_fileoff_t bno, 4670 xfs_filblks_t len, 4671 xfs_fsblock_t startblock, 4672 struct xfs_defer_ops *dfops) 4673 { 4674 struct xfs_mount *mp = ip->i_mount; 4675 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 4676 struct xfs_btree_cur *cur = NULL; 4677 xfs_fsblock_t firstblock = NULLFSBLOCK; 4678 struct xfs_bmbt_irec got; 4679 xfs_extnum_t idx; 4680 int logflags = 0, error; 4681 4682 ASSERT(len > 0); 4683 ASSERT(len <= (xfs_filblks_t)MAXEXTLEN); 4684 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4685 4686 if (unlikely(XFS_TEST_ERROR( 4687 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS && 4688 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE), 4689 mp, XFS_ERRTAG_BMAPIFORMAT))) { 4690 XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp); 4691 return -EFSCORRUPTED; 4692 } 4693 4694 if (XFS_FORCED_SHUTDOWN(mp)) 4695 return -EIO; 4696 4697 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4698 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); 4699 if (error) 4700 return error; 4701 } 4702 4703 if (xfs_iext_lookup_extent(ip, ifp, bno, &idx, &got)) { 4704 /* make sure we only reflink into a hole. */ 4705 ASSERT(got.br_startoff > bno); 4706 ASSERT(got.br_startoff - bno >= len); 4707 } 4708 4709 ip->i_d.di_nblocks += len; 4710 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 4711 4712 if (ifp->if_flags & XFS_IFBROOT) { 4713 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); 4714 cur->bc_private.b.firstblock = firstblock; 4715 cur->bc_private.b.dfops = dfops; 4716 cur->bc_private.b.flags = 0; 4717 } 4718 4719 got.br_startoff = bno; 4720 got.br_startblock = startblock; 4721 got.br_blockcount = len; 4722 got.br_state = XFS_EXT_NORM; 4723 4724 error = xfs_bmap_add_extent_hole_real(tp, ip, XFS_DATA_FORK, &idx, &cur, 4725 &got, &firstblock, dfops, &logflags); 4726 if (error) 4727 goto error0; 4728 4729 if (xfs_bmap_wants_extents(ip, XFS_DATA_FORK)) { 4730 int tmp_logflags = 0; 4731 4732 error = xfs_bmap_btree_to_extents(tp, ip, cur, 4733 &tmp_logflags, XFS_DATA_FORK); 4734 logflags |= tmp_logflags; 4735 } 4736 4737 error0: 4738 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) 4739 logflags &= ~XFS_ILOG_DEXT; 4740 else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) 4741 logflags &= ~XFS_ILOG_DBROOT; 4742 4743 if (logflags) 4744 xfs_trans_log_inode(tp, ip, logflags); 4745 if (cur) { 4746 xfs_btree_del_cursor(cur, 4747 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 4748 } 4749 return error; 4750 } 4751 4752 /* 4753 * When a delalloc extent is split (e.g., due to a hole punch), the original 4754 * indlen reservation must be shared across the two new extents that are left 4755 * behind. 4756 * 4757 * Given the original reservation and the worst case indlen for the two new 4758 * extents (as calculated by xfs_bmap_worst_indlen()), split the original 4759 * reservation fairly across the two new extents. If necessary, steal available 4760 * blocks from a deleted extent to make up a reservation deficiency (e.g., if 4761 * ores == 1). The number of stolen blocks is returned. The availability and 4762 * subsequent accounting of stolen blocks is the responsibility of the caller. 4763 */ 4764 static xfs_filblks_t 4765 xfs_bmap_split_indlen( 4766 xfs_filblks_t ores, /* original res. */ 4767 xfs_filblks_t *indlen1, /* ext1 worst indlen */ 4768 xfs_filblks_t *indlen2, /* ext2 worst indlen */ 4769 xfs_filblks_t avail) /* stealable blocks */ 4770 { 4771 xfs_filblks_t len1 = *indlen1; 4772 xfs_filblks_t len2 = *indlen2; 4773 xfs_filblks_t nres = len1 + len2; /* new total res. */ 4774 xfs_filblks_t stolen = 0; 4775 xfs_filblks_t resfactor; 4776 4777 /* 4778 * Steal as many blocks as we can to try and satisfy the worst case 4779 * indlen for both new extents. 4780 */ 4781 if (ores < nres && avail) 4782 stolen = XFS_FILBLKS_MIN(nres - ores, avail); 4783 ores += stolen; 4784 4785 /* nothing else to do if we've satisfied the new reservation */ 4786 if (ores >= nres) 4787 return stolen; 4788 4789 /* 4790 * We can't meet the total required reservation for the two extents. 4791 * Calculate the percent of the overall shortage between both extents 4792 * and apply this percentage to each of the requested indlen values. 4793 * This distributes the shortage fairly and reduces the chances that one 4794 * of the two extents is left with nothing when extents are repeatedly 4795 * split. 4796 */ 4797 resfactor = (ores * 100); 4798 do_div(resfactor, nres); 4799 len1 *= resfactor; 4800 do_div(len1, 100); 4801 len2 *= resfactor; 4802 do_div(len2, 100); 4803 ASSERT(len1 + len2 <= ores); 4804 ASSERT(len1 < *indlen1 && len2 < *indlen2); 4805 4806 /* 4807 * Hand out the remainder to each extent. If one of the two reservations 4808 * is zero, we want to make sure that one gets a block first. The loop 4809 * below starts with len1, so hand len2 a block right off the bat if it 4810 * is zero. 4811 */ 4812 ores -= (len1 + len2); 4813 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores); 4814 if (ores && !len2 && *indlen2) { 4815 len2++; 4816 ores--; 4817 } 4818 while (ores) { 4819 if (len1 < *indlen1) { 4820 len1++; 4821 ores--; 4822 } 4823 if (!ores) 4824 break; 4825 if (len2 < *indlen2) { 4826 len2++; 4827 ores--; 4828 } 4829 } 4830 4831 *indlen1 = len1; 4832 *indlen2 = len2; 4833 4834 return stolen; 4835 } 4836 4837 int 4838 xfs_bmap_del_extent_delay( 4839 struct xfs_inode *ip, 4840 int whichfork, 4841 xfs_extnum_t *idx, 4842 struct xfs_bmbt_irec *got, 4843 struct xfs_bmbt_irec *del) 4844 { 4845 struct xfs_mount *mp = ip->i_mount; 4846 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 4847 struct xfs_bmbt_irec new; 4848 int64_t da_old, da_new, da_diff = 0; 4849 xfs_fileoff_t del_endoff, got_endoff; 4850 xfs_filblks_t got_indlen, new_indlen, stolen; 4851 int error = 0, state = 0; 4852 bool isrt; 4853 4854 XFS_STATS_INC(mp, xs_del_exlist); 4855 4856 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 4857 del_endoff = del->br_startoff + del->br_blockcount; 4858 got_endoff = got->br_startoff + got->br_blockcount; 4859 da_old = startblockval(got->br_startblock); 4860 da_new = 0; 4861 4862 ASSERT(*idx >= 0); 4863 ASSERT(*idx <= xfs_iext_count(ifp)); 4864 ASSERT(del->br_blockcount > 0); 4865 ASSERT(got->br_startoff <= del->br_startoff); 4866 ASSERT(got_endoff >= del_endoff); 4867 4868 if (isrt) { 4869 uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount); 4870 4871 do_div(rtexts, mp->m_sb.sb_rextsize); 4872 xfs_mod_frextents(mp, rtexts); 4873 } 4874 4875 /* 4876 * Update the inode delalloc counter now and wait to update the 4877 * sb counters as we might have to borrow some blocks for the 4878 * indirect block accounting. 4879 */ 4880 error = xfs_trans_reserve_quota_nblks(NULL, ip, 4881 -((long)del->br_blockcount), 0, 4882 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); 4883 if (error) 4884 return error; 4885 ip->i_delayed_blks -= del->br_blockcount; 4886 4887 if (whichfork == XFS_COW_FORK) 4888 state |= BMAP_COWFORK; 4889 4890 if (got->br_startoff == del->br_startoff) 4891 state |= BMAP_LEFT_CONTIG; 4892 if (got_endoff == del_endoff) 4893 state |= BMAP_RIGHT_CONTIG; 4894 4895 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 4896 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 4897 /* 4898 * Matches the whole extent. Delete the entry. 4899 */ 4900 xfs_iext_remove(ip, *idx, 1, state); 4901 --*idx; 4902 break; 4903 case BMAP_LEFT_CONTIG: 4904 /* 4905 * Deleting the first part of the extent. 4906 */ 4907 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 4908 got->br_startoff = del_endoff; 4909 got->br_blockcount -= del->br_blockcount; 4910 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4911 got->br_blockcount), da_old); 4912 got->br_startblock = nullstartblock((int)da_new); 4913 xfs_iext_update_extent(ifp, *idx, got); 4914 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 4915 break; 4916 case BMAP_RIGHT_CONTIG: 4917 /* 4918 * Deleting the last part of the extent. 4919 */ 4920 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 4921 got->br_blockcount = got->br_blockcount - del->br_blockcount; 4922 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4923 got->br_blockcount), da_old); 4924 got->br_startblock = nullstartblock((int)da_new); 4925 xfs_iext_update_extent(ifp, *idx, got); 4926 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 4927 break; 4928 case 0: 4929 /* 4930 * Deleting the middle of the extent. 4931 * 4932 * Distribute the original indlen reservation across the two new 4933 * extents. Steal blocks from the deleted extent if necessary. 4934 * Stealing blocks simply fudges the fdblocks accounting below. 4935 * Warn if either of the new indlen reservations is zero as this 4936 * can lead to delalloc problems. 4937 */ 4938 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 4939 4940 got->br_blockcount = del->br_startoff - got->br_startoff; 4941 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount); 4942 4943 new.br_blockcount = got_endoff - del_endoff; 4944 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount); 4945 4946 WARN_ON_ONCE(!got_indlen || !new_indlen); 4947 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen, 4948 del->br_blockcount); 4949 4950 got->br_startblock = nullstartblock((int)got_indlen); 4951 xfs_iext_update_extent(ifp, *idx, got); 4952 trace_xfs_bmap_post_update(ip, *idx, 0, _THIS_IP_); 4953 4954 new.br_startoff = del_endoff; 4955 new.br_state = got->br_state; 4956 new.br_startblock = nullstartblock((int)new_indlen); 4957 4958 ++*idx; 4959 xfs_iext_insert(ip, *idx, 1, &new, state); 4960 4961 da_new = got_indlen + new_indlen - stolen; 4962 del->br_blockcount -= stolen; 4963 break; 4964 } 4965 4966 ASSERT(da_old >= da_new); 4967 da_diff = da_old - da_new; 4968 if (!isrt) 4969 da_diff += del->br_blockcount; 4970 if (da_diff) 4971 xfs_mod_fdblocks(mp, da_diff, false); 4972 return error; 4973 } 4974 4975 void 4976 xfs_bmap_del_extent_cow( 4977 struct xfs_inode *ip, 4978 xfs_extnum_t *idx, 4979 struct xfs_bmbt_irec *got, 4980 struct xfs_bmbt_irec *del) 4981 { 4982 struct xfs_mount *mp = ip->i_mount; 4983 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); 4984 struct xfs_bmbt_irec new; 4985 xfs_fileoff_t del_endoff, got_endoff; 4986 int state = BMAP_COWFORK; 4987 4988 XFS_STATS_INC(mp, xs_del_exlist); 4989 4990 del_endoff = del->br_startoff + del->br_blockcount; 4991 got_endoff = got->br_startoff + got->br_blockcount; 4992 4993 ASSERT(*idx >= 0); 4994 ASSERT(*idx <= xfs_iext_count(ifp)); 4995 ASSERT(del->br_blockcount > 0); 4996 ASSERT(got->br_startoff <= del->br_startoff); 4997 ASSERT(got_endoff >= del_endoff); 4998 ASSERT(!isnullstartblock(got->br_startblock)); 4999 5000 if (got->br_startoff == del->br_startoff) 5001 state |= BMAP_LEFT_CONTIG; 5002 if (got_endoff == del_endoff) 5003 state |= BMAP_RIGHT_CONTIG; 5004 5005 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 5006 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 5007 /* 5008 * Matches the whole extent. Delete the entry. 5009 */ 5010 xfs_iext_remove(ip, *idx, 1, state); 5011 --*idx; 5012 break; 5013 case BMAP_LEFT_CONTIG: 5014 /* 5015 * Deleting the first part of the extent. 5016 */ 5017 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5018 got->br_startoff = del_endoff; 5019 got->br_blockcount -= del->br_blockcount; 5020 got->br_startblock = del->br_startblock + del->br_blockcount; 5021 xfs_iext_update_extent(ifp, *idx, got); 5022 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5023 break; 5024 case BMAP_RIGHT_CONTIG: 5025 /* 5026 * Deleting the last part of the extent. 5027 */ 5028 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5029 got->br_blockcount -= del->br_blockcount; 5030 xfs_iext_update_extent(ifp, *idx, got); 5031 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5032 break; 5033 case 0: 5034 /* 5035 * Deleting the middle of the extent. 5036 */ 5037 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5038 got->br_blockcount = del->br_startoff - got->br_startoff; 5039 xfs_iext_update_extent(ifp, *idx, got); 5040 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5041 5042 new.br_startoff = del_endoff; 5043 new.br_blockcount = got_endoff - del_endoff; 5044 new.br_state = got->br_state; 5045 new.br_startblock = del->br_startblock + del->br_blockcount; 5046 5047 ++*idx; 5048 xfs_iext_insert(ip, *idx, 1, &new, state); 5049 break; 5050 } 5051 } 5052 5053 /* 5054 * Called by xfs_bmapi to update file extent records and the btree 5055 * after removing space (or undoing a delayed allocation). 5056 */ 5057 STATIC int /* error */ 5058 xfs_bmap_del_extent( 5059 xfs_inode_t *ip, /* incore inode pointer */ 5060 xfs_trans_t *tp, /* current transaction pointer */ 5061 xfs_extnum_t *idx, /* extent number to update/delete */ 5062 struct xfs_defer_ops *dfops, /* list of extents to be freed */ 5063 xfs_btree_cur_t *cur, /* if null, not a btree */ 5064 xfs_bmbt_irec_t *del, /* data to remove from extents */ 5065 int *logflagsp, /* inode logging flags */ 5066 int whichfork, /* data or attr fork */ 5067 int bflags) /* bmapi flags */ 5068 { 5069 xfs_filblks_t da_new; /* new delay-alloc indirect blocks */ 5070 xfs_filblks_t da_old; /* old delay-alloc indirect blocks */ 5071 xfs_fsblock_t del_endblock=0; /* first block past del */ 5072 xfs_fileoff_t del_endoff; /* first offset past del */ 5073 int delay; /* current block is delayed allocated */ 5074 int do_fx; /* free extent at end of routine */ 5075 xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */ 5076 int error; /* error return value */ 5077 int flags; /* inode logging flags */ 5078 xfs_bmbt_irec_t got; /* current extent entry */ 5079 xfs_fileoff_t got_endoff; /* first offset past got */ 5080 int i; /* temp state */ 5081 xfs_ifork_t *ifp; /* inode fork pointer */ 5082 xfs_mount_t *mp; /* mount structure */ 5083 xfs_filblks_t nblks; /* quota/sb block count */ 5084 xfs_bmbt_irec_t new; /* new record to be inserted */ 5085 /* REFERENCED */ 5086 uint qfield; /* quota field to update */ 5087 xfs_filblks_t temp; /* for indirect length calculations */ 5088 xfs_filblks_t temp2; /* for indirect length calculations */ 5089 int state = 0; 5090 5091 mp = ip->i_mount; 5092 XFS_STATS_INC(mp, xs_del_exlist); 5093 5094 if (whichfork == XFS_ATTR_FORK) 5095 state |= BMAP_ATTRFORK; 5096 else if (whichfork == XFS_COW_FORK) 5097 state |= BMAP_COWFORK; 5098 5099 ifp = XFS_IFORK_PTR(ip, whichfork); 5100 ASSERT((*idx >= 0) && (*idx < xfs_iext_count(ifp))); 5101 ASSERT(del->br_blockcount > 0); 5102 ep = xfs_iext_get_ext(ifp, *idx); 5103 xfs_bmbt_get_all(ep, &got); 5104 ASSERT(got.br_startoff <= del->br_startoff); 5105 del_endoff = del->br_startoff + del->br_blockcount; 5106 got_endoff = got.br_startoff + got.br_blockcount; 5107 ASSERT(got_endoff >= del_endoff); 5108 delay = isnullstartblock(got.br_startblock); 5109 ASSERT(isnullstartblock(del->br_startblock) == delay); 5110 flags = 0; 5111 qfield = 0; 5112 error = 0; 5113 /* 5114 * If deleting a real allocation, must free up the disk space. 5115 */ 5116 if (!delay) { 5117 flags = XFS_ILOG_CORE; 5118 /* 5119 * Realtime allocation. Free it and record di_nblocks update. 5120 */ 5121 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { 5122 xfs_fsblock_t bno; 5123 xfs_filblks_t len; 5124 5125 ASSERT(do_mod(del->br_blockcount, 5126 mp->m_sb.sb_rextsize) == 0); 5127 ASSERT(do_mod(del->br_startblock, 5128 mp->m_sb.sb_rextsize) == 0); 5129 bno = del->br_startblock; 5130 len = del->br_blockcount; 5131 do_div(bno, mp->m_sb.sb_rextsize); 5132 do_div(len, mp->m_sb.sb_rextsize); 5133 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); 5134 if (error) 5135 goto done; 5136 do_fx = 0; 5137 nblks = len * mp->m_sb.sb_rextsize; 5138 qfield = XFS_TRANS_DQ_RTBCOUNT; 5139 } 5140 /* 5141 * Ordinary allocation. 5142 */ 5143 else { 5144 do_fx = 1; 5145 nblks = del->br_blockcount; 5146 qfield = XFS_TRANS_DQ_BCOUNT; 5147 } 5148 /* 5149 * Set up del_endblock and cur for later. 5150 */ 5151 del_endblock = del->br_startblock + del->br_blockcount; 5152 if (cur) { 5153 if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff, 5154 got.br_startblock, got.br_blockcount, 5155 &i))) 5156 goto done; 5157 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5158 } 5159 da_old = da_new = 0; 5160 } else { 5161 da_old = startblockval(got.br_startblock); 5162 da_new = 0; 5163 nblks = 0; 5164 do_fx = 0; 5165 } 5166 5167 /* 5168 * Set flag value to use in switch statement. 5169 * Left-contig is 2, right-contig is 1. 5170 */ 5171 switch (((got.br_startoff == del->br_startoff) << 1) | 5172 (got_endoff == del_endoff)) { 5173 case 3: 5174 /* 5175 * Matches the whole extent. Delete the entry. 5176 */ 5177 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5178 xfs_iext_remove(ip, *idx, 1, 5179 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0); 5180 --*idx; 5181 if (delay) 5182 break; 5183 5184 XFS_IFORK_NEXT_SET(ip, whichfork, 5185 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 5186 flags |= XFS_ILOG_CORE; 5187 if (!cur) { 5188 flags |= xfs_ilog_fext(whichfork); 5189 break; 5190 } 5191 if ((error = xfs_btree_delete(cur, &i))) 5192 goto done; 5193 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5194 break; 5195 5196 case 2: 5197 /* 5198 * Deleting the first part of the extent. 5199 */ 5200 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5201 xfs_bmbt_set_startoff(ep, del_endoff); 5202 temp = got.br_blockcount - del->br_blockcount; 5203 xfs_bmbt_set_blockcount(ep, temp); 5204 if (delay) { 5205 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 5206 da_old); 5207 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 5208 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5209 da_new = temp; 5210 break; 5211 } 5212 xfs_bmbt_set_startblock(ep, del_endblock); 5213 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5214 if (!cur) { 5215 flags |= xfs_ilog_fext(whichfork); 5216 break; 5217 } 5218 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock, 5219 got.br_blockcount - del->br_blockcount, 5220 got.br_state))) 5221 goto done; 5222 break; 5223 5224 case 1: 5225 /* 5226 * Deleting the last part of the extent. 5227 */ 5228 temp = got.br_blockcount - del->br_blockcount; 5229 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5230 xfs_bmbt_set_blockcount(ep, temp); 5231 if (delay) { 5232 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 5233 da_old); 5234 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 5235 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5236 da_new = temp; 5237 break; 5238 } 5239 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5240 if (!cur) { 5241 flags |= xfs_ilog_fext(whichfork); 5242 break; 5243 } 5244 if ((error = xfs_bmbt_update(cur, got.br_startoff, 5245 got.br_startblock, 5246 got.br_blockcount - del->br_blockcount, 5247 got.br_state))) 5248 goto done; 5249 break; 5250 5251 case 0: 5252 /* 5253 * Deleting the middle of the extent. 5254 */ 5255 temp = del->br_startoff - got.br_startoff; 5256 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 5257 xfs_bmbt_set_blockcount(ep, temp); 5258 new.br_startoff = del_endoff; 5259 temp2 = got_endoff - del_endoff; 5260 new.br_blockcount = temp2; 5261 new.br_state = got.br_state; 5262 if (!delay) { 5263 new.br_startblock = del_endblock; 5264 flags |= XFS_ILOG_CORE; 5265 if (cur) { 5266 if ((error = xfs_bmbt_update(cur, 5267 got.br_startoff, 5268 got.br_startblock, temp, 5269 got.br_state))) 5270 goto done; 5271 if ((error = xfs_btree_increment(cur, 0, &i))) 5272 goto done; 5273 cur->bc_rec.b = new; 5274 error = xfs_btree_insert(cur, &i); 5275 if (error && error != -ENOSPC) 5276 goto done; 5277 /* 5278 * If get no-space back from btree insert, 5279 * it tried a split, and we have a zero 5280 * block reservation. 5281 * Fix up our state and return the error. 5282 */ 5283 if (error == -ENOSPC) { 5284 /* 5285 * Reset the cursor, don't trust 5286 * it after any insert operation. 5287 */ 5288 if ((error = xfs_bmbt_lookup_eq(cur, 5289 got.br_startoff, 5290 got.br_startblock, 5291 temp, &i))) 5292 goto done; 5293 XFS_WANT_CORRUPTED_GOTO(mp, 5294 i == 1, done); 5295 /* 5296 * Update the btree record back 5297 * to the original value. 5298 */ 5299 if ((error = xfs_bmbt_update(cur, 5300 got.br_startoff, 5301 got.br_startblock, 5302 got.br_blockcount, 5303 got.br_state))) 5304 goto done; 5305 /* 5306 * Reset the extent record back 5307 * to the original value. 5308 */ 5309 xfs_bmbt_set_blockcount(ep, 5310 got.br_blockcount); 5311 flags = 0; 5312 error = -ENOSPC; 5313 goto done; 5314 } 5315 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5316 } else 5317 flags |= xfs_ilog_fext(whichfork); 5318 XFS_IFORK_NEXT_SET(ip, whichfork, 5319 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 5320 } else { 5321 xfs_filblks_t stolen; 5322 ASSERT(whichfork == XFS_DATA_FORK); 5323 5324 /* 5325 * Distribute the original indlen reservation across the 5326 * two new extents. Steal blocks from the deleted extent 5327 * if necessary. Stealing blocks simply fudges the 5328 * fdblocks accounting in xfs_bunmapi(). 5329 */ 5330 temp = xfs_bmap_worst_indlen(ip, got.br_blockcount); 5331 temp2 = xfs_bmap_worst_indlen(ip, new.br_blockcount); 5332 stolen = xfs_bmap_split_indlen(da_old, &temp, &temp2, 5333 del->br_blockcount); 5334 da_new = temp + temp2 - stolen; 5335 del->br_blockcount -= stolen; 5336 5337 /* 5338 * Set the reservation for each extent. Warn if either 5339 * is zero as this can lead to delalloc problems. 5340 */ 5341 WARN_ON_ONCE(!temp || !temp2); 5342 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 5343 new.br_startblock = nullstartblock((int)temp2); 5344 } 5345 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5346 xfs_iext_insert(ip, *idx + 1, 1, &new, state); 5347 ++*idx; 5348 break; 5349 } 5350 5351 /* remove reverse mapping */ 5352 if (!delay) { 5353 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del); 5354 if (error) 5355 goto done; 5356 } 5357 5358 /* 5359 * If we need to, add to list of extents to delete. 5360 */ 5361 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) { 5362 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) { 5363 error = xfs_refcount_decrease_extent(mp, dfops, del); 5364 if (error) 5365 goto done; 5366 } else 5367 xfs_bmap_add_free(mp, dfops, del->br_startblock, 5368 del->br_blockcount, NULL); 5369 } 5370 5371 /* 5372 * Adjust inode # blocks in the file. 5373 */ 5374 if (nblks) 5375 ip->i_d.di_nblocks -= nblks; 5376 /* 5377 * Adjust quota data. 5378 */ 5379 if (qfield && !(bflags & XFS_BMAPI_REMAP)) 5380 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks); 5381 5382 /* 5383 * Account for change in delayed indirect blocks. 5384 * Nothing to do for disk quota accounting here. 5385 */ 5386 ASSERT(da_old >= da_new); 5387 if (da_old > da_new) 5388 xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false); 5389 done: 5390 *logflagsp = flags; 5391 return error; 5392 } 5393 5394 /* 5395 * Unmap (remove) blocks from a file. 5396 * If nexts is nonzero then the number of extents to remove is limited to 5397 * that value. If not all extents in the block range can be removed then 5398 * *done is set. 5399 */ 5400 int /* error */ 5401 __xfs_bunmapi( 5402 xfs_trans_t *tp, /* transaction pointer */ 5403 struct xfs_inode *ip, /* incore inode */ 5404 xfs_fileoff_t bno, /* starting offset to unmap */ 5405 xfs_filblks_t *rlen, /* i/o: amount remaining */ 5406 int flags, /* misc flags */ 5407 xfs_extnum_t nexts, /* number of extents max */ 5408 xfs_fsblock_t *firstblock, /* first allocated block 5409 controls a.g. for allocs */ 5410 struct xfs_defer_ops *dfops) /* i/o: deferred updates */ 5411 { 5412 xfs_btree_cur_t *cur; /* bmap btree cursor */ 5413 xfs_bmbt_irec_t del; /* extent being deleted */ 5414 int error; /* error return value */ 5415 xfs_extnum_t extno; /* extent number in list */ 5416 xfs_bmbt_irec_t got; /* current extent record */ 5417 xfs_ifork_t *ifp; /* inode fork pointer */ 5418 int isrt; /* freeing in rt area */ 5419 xfs_extnum_t lastx; /* last extent index used */ 5420 int logflags; /* transaction logging flags */ 5421 xfs_extlen_t mod; /* rt extent offset */ 5422 xfs_mount_t *mp; /* mount structure */ 5423 xfs_fileoff_t start; /* first file offset deleted */ 5424 int tmp_logflags; /* partial logging flags */ 5425 int wasdel; /* was a delayed alloc extent */ 5426 int whichfork; /* data or attribute fork */ 5427 xfs_fsblock_t sum; 5428 xfs_filblks_t len = *rlen; /* length to unmap in file */ 5429 xfs_fileoff_t max_len; 5430 xfs_agnumber_t prev_agno = NULLAGNUMBER, agno; 5431 5432 trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_); 5433 5434 whichfork = xfs_bmapi_whichfork(flags); 5435 ASSERT(whichfork != XFS_COW_FORK); 5436 ifp = XFS_IFORK_PTR(ip, whichfork); 5437 if (unlikely( 5438 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5439 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 5440 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW, 5441 ip->i_mount); 5442 return -EFSCORRUPTED; 5443 } 5444 mp = ip->i_mount; 5445 if (XFS_FORCED_SHUTDOWN(mp)) 5446 return -EIO; 5447 5448 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5449 ASSERT(len > 0); 5450 ASSERT(nexts >= 0); 5451 5452 /* 5453 * Guesstimate how many blocks we can unmap without running the risk of 5454 * blowing out the transaction with a mix of EFIs and reflink 5455 * adjustments. 5456 */ 5457 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) 5458 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res)); 5459 else 5460 max_len = len; 5461 5462 if (!(ifp->if_flags & XFS_IFEXTENTS) && 5463 (error = xfs_iread_extents(tp, ip, whichfork))) 5464 return error; 5465 if (xfs_iext_count(ifp) == 0) { 5466 *rlen = 0; 5467 return 0; 5468 } 5469 XFS_STATS_INC(mp, xs_blk_unmap); 5470 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 5471 start = bno; 5472 bno = start + len - 1; 5473 5474 /* 5475 * Check to see if the given block number is past the end of the 5476 * file, back up to the last block if so... 5477 */ 5478 if (!xfs_iext_lookup_extent(ip, ifp, bno, &lastx, &got)) { 5479 ASSERT(lastx > 0); 5480 xfs_iext_get_extent(ifp, --lastx, &got); 5481 bno = got.br_startoff + got.br_blockcount - 1; 5482 } 5483 5484 logflags = 0; 5485 if (ifp->if_flags & XFS_IFBROOT) { 5486 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); 5487 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5488 cur->bc_private.b.firstblock = *firstblock; 5489 cur->bc_private.b.dfops = dfops; 5490 cur->bc_private.b.flags = 0; 5491 } else 5492 cur = NULL; 5493 5494 if (isrt) { 5495 /* 5496 * Synchronize by locking the bitmap inode. 5497 */ 5498 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP); 5499 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL); 5500 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM); 5501 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL); 5502 } 5503 5504 extno = 0; 5505 while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 && 5506 (nexts == 0 || extno < nexts) && max_len > 0) { 5507 /* 5508 * Is the found extent after a hole in which bno lives? 5509 * Just back up to the previous extent, if so. 5510 */ 5511 if (got.br_startoff > bno) { 5512 if (--lastx < 0) 5513 break; 5514 xfs_iext_get_extent(ifp, lastx, &got); 5515 } 5516 /* 5517 * Is the last block of this extent before the range 5518 * we're supposed to delete? If so, we're done. 5519 */ 5520 bno = XFS_FILEOFF_MIN(bno, 5521 got.br_startoff + got.br_blockcount - 1); 5522 if (bno < start) 5523 break; 5524 /* 5525 * Then deal with the (possibly delayed) allocated space 5526 * we found. 5527 */ 5528 del = got; 5529 wasdel = isnullstartblock(del.br_startblock); 5530 5531 /* 5532 * Make sure we don't touch multiple AGF headers out of order 5533 * in a single transaction, as that could cause AB-BA deadlocks. 5534 */ 5535 if (!wasdel) { 5536 agno = XFS_FSB_TO_AGNO(mp, del.br_startblock); 5537 if (prev_agno != NULLAGNUMBER && prev_agno > agno) 5538 break; 5539 prev_agno = agno; 5540 } 5541 if (got.br_startoff < start) { 5542 del.br_startoff = start; 5543 del.br_blockcount -= start - got.br_startoff; 5544 if (!wasdel) 5545 del.br_startblock += start - got.br_startoff; 5546 } 5547 if (del.br_startoff + del.br_blockcount > bno + 1) 5548 del.br_blockcount = bno + 1 - del.br_startoff; 5549 5550 /* How much can we safely unmap? */ 5551 if (max_len < del.br_blockcount) { 5552 del.br_startoff += del.br_blockcount - max_len; 5553 if (!wasdel) 5554 del.br_startblock += del.br_blockcount - max_len; 5555 del.br_blockcount = max_len; 5556 } 5557 5558 sum = del.br_startblock + del.br_blockcount; 5559 if (isrt && 5560 (mod = do_mod(sum, mp->m_sb.sb_rextsize))) { 5561 /* 5562 * Realtime extent not lined up at the end. 5563 * The extent could have been split into written 5564 * and unwritten pieces, or we could just be 5565 * unmapping part of it. But we can't really 5566 * get rid of part of a realtime extent. 5567 */ 5568 if (del.br_state == XFS_EXT_UNWRITTEN || 5569 !xfs_sb_version_hasextflgbit(&mp->m_sb)) { 5570 /* 5571 * This piece is unwritten, or we're not 5572 * using unwritten extents. Skip over it. 5573 */ 5574 ASSERT(bno >= mod); 5575 bno -= mod > del.br_blockcount ? 5576 del.br_blockcount : mod; 5577 if (bno < got.br_startoff) { 5578 if (--lastx >= 0) 5579 xfs_bmbt_get_all(xfs_iext_get_ext( 5580 ifp, lastx), &got); 5581 } 5582 continue; 5583 } 5584 /* 5585 * It's written, turn it unwritten. 5586 * This is better than zeroing it. 5587 */ 5588 ASSERT(del.br_state == XFS_EXT_NORM); 5589 ASSERT(tp->t_blk_res > 0); 5590 /* 5591 * If this spans a realtime extent boundary, 5592 * chop it back to the start of the one we end at. 5593 */ 5594 if (del.br_blockcount > mod) { 5595 del.br_startoff += del.br_blockcount - mod; 5596 del.br_startblock += del.br_blockcount - mod; 5597 del.br_blockcount = mod; 5598 } 5599 del.br_state = XFS_EXT_UNWRITTEN; 5600 error = xfs_bmap_add_extent_unwritten_real(tp, ip, 5601 whichfork, &lastx, &cur, &del, 5602 firstblock, dfops, &logflags); 5603 if (error) 5604 goto error0; 5605 goto nodelete; 5606 } 5607 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) { 5608 /* 5609 * Realtime extent is lined up at the end but not 5610 * at the front. We'll get rid of full extents if 5611 * we can. 5612 */ 5613 mod = mp->m_sb.sb_rextsize - mod; 5614 if (del.br_blockcount > mod) { 5615 del.br_blockcount -= mod; 5616 del.br_startoff += mod; 5617 del.br_startblock += mod; 5618 } else if ((del.br_startoff == start && 5619 (del.br_state == XFS_EXT_UNWRITTEN || 5620 tp->t_blk_res == 0)) || 5621 !xfs_sb_version_hasextflgbit(&mp->m_sb)) { 5622 /* 5623 * Can't make it unwritten. There isn't 5624 * a full extent here so just skip it. 5625 */ 5626 ASSERT(bno >= del.br_blockcount); 5627 bno -= del.br_blockcount; 5628 if (got.br_startoff > bno && --lastx >= 0) 5629 xfs_iext_get_extent(ifp, lastx, &got); 5630 continue; 5631 } else if (del.br_state == XFS_EXT_UNWRITTEN) { 5632 struct xfs_bmbt_irec prev; 5633 5634 /* 5635 * This one is already unwritten. 5636 * It must have a written left neighbor. 5637 * Unwrite the killed part of that one and 5638 * try again. 5639 */ 5640 ASSERT(lastx > 0); 5641 xfs_iext_get_extent(ifp, lastx - 1, &prev); 5642 ASSERT(prev.br_state == XFS_EXT_NORM); 5643 ASSERT(!isnullstartblock(prev.br_startblock)); 5644 ASSERT(del.br_startblock == 5645 prev.br_startblock + prev.br_blockcount); 5646 if (prev.br_startoff < start) { 5647 mod = start - prev.br_startoff; 5648 prev.br_blockcount -= mod; 5649 prev.br_startblock += mod; 5650 prev.br_startoff = start; 5651 } 5652 prev.br_state = XFS_EXT_UNWRITTEN; 5653 lastx--; 5654 error = xfs_bmap_add_extent_unwritten_real(tp, 5655 ip, whichfork, &lastx, &cur, 5656 &prev, firstblock, dfops, 5657 &logflags); 5658 if (error) 5659 goto error0; 5660 goto nodelete; 5661 } else { 5662 ASSERT(del.br_state == XFS_EXT_NORM); 5663 del.br_state = XFS_EXT_UNWRITTEN; 5664 error = xfs_bmap_add_extent_unwritten_real(tp, 5665 ip, whichfork, &lastx, &cur, 5666 &del, firstblock, dfops, 5667 &logflags); 5668 if (error) 5669 goto error0; 5670 goto nodelete; 5671 } 5672 } 5673 5674 /* 5675 * If it's the case where the directory code is running 5676 * with no block reservation, and the deleted block is in 5677 * the middle of its extent, and the resulting insert 5678 * of an extent would cause transformation to btree format, 5679 * then reject it. The calling code will then swap 5680 * blocks around instead. 5681 * We have to do this now, rather than waiting for the 5682 * conversion to btree format, since the transaction 5683 * will be dirty. 5684 */ 5685 if (!wasdel && tp->t_blk_res == 0 && 5686 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && 5687 XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */ 5688 XFS_IFORK_MAXEXT(ip, whichfork) && 5689 del.br_startoff > got.br_startoff && 5690 del.br_startoff + del.br_blockcount < 5691 got.br_startoff + got.br_blockcount) { 5692 error = -ENOSPC; 5693 goto error0; 5694 } 5695 5696 /* 5697 * Unreserve quota and update realtime free space, if 5698 * appropriate. If delayed allocation, update the inode delalloc 5699 * counter now and wait to update the sb counters as 5700 * xfs_bmap_del_extent() might need to borrow some blocks. 5701 */ 5702 if (wasdel) { 5703 ASSERT(startblockval(del.br_startblock) > 0); 5704 if (isrt) { 5705 xfs_filblks_t rtexts; 5706 5707 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount); 5708 do_div(rtexts, mp->m_sb.sb_rextsize); 5709 xfs_mod_frextents(mp, (int64_t)rtexts); 5710 (void)xfs_trans_reserve_quota_nblks(NULL, 5711 ip, -((long)del.br_blockcount), 0, 5712 XFS_QMOPT_RES_RTBLKS); 5713 } else { 5714 (void)xfs_trans_reserve_quota_nblks(NULL, 5715 ip, -((long)del.br_blockcount), 0, 5716 XFS_QMOPT_RES_REGBLKS); 5717 } 5718 ip->i_delayed_blks -= del.br_blockcount; 5719 if (cur) 5720 cur->bc_private.b.flags |= 5721 XFS_BTCUR_BPRV_WASDEL; 5722 } else if (cur) 5723 cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL; 5724 5725 error = xfs_bmap_del_extent(ip, tp, &lastx, dfops, cur, &del, 5726 &tmp_logflags, whichfork, flags); 5727 logflags |= tmp_logflags; 5728 if (error) 5729 goto error0; 5730 5731 if (!isrt && wasdel) 5732 xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount, false); 5733 5734 max_len -= del.br_blockcount; 5735 bno = del.br_startoff - 1; 5736 nodelete: 5737 /* 5738 * If not done go on to the next (previous) record. 5739 */ 5740 if (bno != (xfs_fileoff_t)-1 && bno >= start) { 5741 if (lastx >= 0) { 5742 xfs_iext_get_extent(ifp, lastx, &got); 5743 if (got.br_startoff > bno && --lastx >= 0) 5744 xfs_iext_get_extent(ifp, lastx, &got); 5745 } 5746 extno++; 5747 } 5748 } 5749 if (bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0) 5750 *rlen = 0; 5751 else 5752 *rlen = bno - start + 1; 5753 5754 /* 5755 * Convert to a btree if necessary. 5756 */ 5757 if (xfs_bmap_needs_btree(ip, whichfork)) { 5758 ASSERT(cur == NULL); 5759 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops, 5760 &cur, 0, &tmp_logflags, whichfork); 5761 logflags |= tmp_logflags; 5762 if (error) 5763 goto error0; 5764 } 5765 /* 5766 * transform from btree to extents, give it cur 5767 */ 5768 else if (xfs_bmap_wants_extents(ip, whichfork)) { 5769 ASSERT(cur != NULL); 5770 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags, 5771 whichfork); 5772 logflags |= tmp_logflags; 5773 if (error) 5774 goto error0; 5775 } 5776 /* 5777 * transform from extents to local? 5778 */ 5779 error = 0; 5780 error0: 5781 /* 5782 * Log everything. Do this after conversion, there's no point in 5783 * logging the extent records if we've converted to btree format. 5784 */ 5785 if ((logflags & xfs_ilog_fext(whichfork)) && 5786 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 5787 logflags &= ~xfs_ilog_fext(whichfork); 5788 else if ((logflags & xfs_ilog_fbroot(whichfork)) && 5789 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) 5790 logflags &= ~xfs_ilog_fbroot(whichfork); 5791 /* 5792 * Log inode even in the error case, if the transaction 5793 * is dirty we'll need to shut down the filesystem. 5794 */ 5795 if (logflags) 5796 xfs_trans_log_inode(tp, ip, logflags); 5797 if (cur) { 5798 if (!error) { 5799 *firstblock = cur->bc_private.b.firstblock; 5800 cur->bc_private.b.allocated = 0; 5801 } 5802 xfs_btree_del_cursor(cur, 5803 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 5804 } 5805 return error; 5806 } 5807 5808 /* Unmap a range of a file. */ 5809 int 5810 xfs_bunmapi( 5811 xfs_trans_t *tp, 5812 struct xfs_inode *ip, 5813 xfs_fileoff_t bno, 5814 xfs_filblks_t len, 5815 int flags, 5816 xfs_extnum_t nexts, 5817 xfs_fsblock_t *firstblock, 5818 struct xfs_defer_ops *dfops, 5819 int *done) 5820 { 5821 int error; 5822 5823 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts, firstblock, 5824 dfops); 5825 *done = (len == 0); 5826 return error; 5827 } 5828 5829 /* 5830 * Determine whether an extent shift can be accomplished by a merge with the 5831 * extent that precedes the target hole of the shift. 5832 */ 5833 STATIC bool 5834 xfs_bmse_can_merge( 5835 struct xfs_bmbt_irec *left, /* preceding extent */ 5836 struct xfs_bmbt_irec *got, /* current extent to shift */ 5837 xfs_fileoff_t shift) /* shift fsb */ 5838 { 5839 xfs_fileoff_t startoff; 5840 5841 startoff = got->br_startoff - shift; 5842 5843 /* 5844 * The extent, once shifted, must be adjacent in-file and on-disk with 5845 * the preceding extent. 5846 */ 5847 if ((left->br_startoff + left->br_blockcount != startoff) || 5848 (left->br_startblock + left->br_blockcount != got->br_startblock) || 5849 (left->br_state != got->br_state) || 5850 (left->br_blockcount + got->br_blockcount > MAXEXTLEN)) 5851 return false; 5852 5853 return true; 5854 } 5855 5856 /* 5857 * A bmap extent shift adjusts the file offset of an extent to fill a preceding 5858 * hole in the file. If an extent shift would result in the extent being fully 5859 * adjacent to the extent that currently precedes the hole, we can merge with 5860 * the preceding extent rather than do the shift. 5861 * 5862 * This function assumes the caller has verified a shift-by-merge is possible 5863 * with the provided extents via xfs_bmse_can_merge(). 5864 */ 5865 STATIC int 5866 xfs_bmse_merge( 5867 struct xfs_inode *ip, 5868 int whichfork, 5869 xfs_fileoff_t shift, /* shift fsb */ 5870 int current_ext, /* idx of gotp */ 5871 struct xfs_bmbt_irec *got, /* extent to shift */ 5872 struct xfs_bmbt_irec *left, /* preceding extent */ 5873 struct xfs_btree_cur *cur, 5874 int *logflags, /* output */ 5875 struct xfs_defer_ops *dfops) 5876 { 5877 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 5878 struct xfs_bmbt_irec new; 5879 xfs_filblks_t blockcount; 5880 int error, i; 5881 struct xfs_mount *mp = ip->i_mount; 5882 5883 blockcount = left->br_blockcount + got->br_blockcount; 5884 5885 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5886 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5887 ASSERT(xfs_bmse_can_merge(left, got, shift)); 5888 5889 new = *left; 5890 new.br_blockcount = blockcount; 5891 5892 /* 5893 * Update the on-disk extent count, the btree if necessary and log the 5894 * inode. 5895 */ 5896 XFS_IFORK_NEXT_SET(ip, whichfork, 5897 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 5898 *logflags |= XFS_ILOG_CORE; 5899 if (!cur) { 5900 *logflags |= XFS_ILOG_DEXT; 5901 goto done; 5902 } 5903 5904 /* lookup and remove the extent to merge */ 5905 error = xfs_bmbt_lookup_eq(cur, got->br_startoff, got->br_startblock, 5906 got->br_blockcount, &i); 5907 if (error) 5908 return error; 5909 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5910 5911 error = xfs_btree_delete(cur, &i); 5912 if (error) 5913 return error; 5914 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5915 5916 /* lookup and update size of the previous extent */ 5917 error = xfs_bmbt_lookup_eq(cur, left->br_startoff, left->br_startblock, 5918 left->br_blockcount, &i); 5919 if (error) 5920 return error; 5921 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5922 5923 error = xfs_bmbt_update(cur, new.br_startoff, new.br_startblock, 5924 new.br_blockcount, new.br_state); 5925 if (error) 5926 return error; 5927 5928 done: 5929 xfs_iext_update_extent(ifp, current_ext - 1, &new); 5930 xfs_iext_remove(ip, current_ext, 1, 0); 5931 5932 /* update reverse mapping. rmap functions merge the rmaps for us */ 5933 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, got); 5934 if (error) 5935 return error; 5936 memcpy(&new, got, sizeof(new)); 5937 new.br_startoff = left->br_startoff + left->br_blockcount; 5938 return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &new); 5939 } 5940 5941 /* 5942 * Shift a single extent. 5943 */ 5944 STATIC int 5945 xfs_bmse_shift_one( 5946 struct xfs_inode *ip, 5947 int whichfork, 5948 xfs_fileoff_t offset_shift_fsb, 5949 int *current_ext, 5950 struct xfs_bmbt_irec *got, 5951 struct xfs_btree_cur *cur, 5952 int *logflags, 5953 enum shift_direction direction, 5954 struct xfs_defer_ops *dfops) 5955 { 5956 struct xfs_ifork *ifp; 5957 struct xfs_mount *mp; 5958 xfs_fileoff_t startoff; 5959 struct xfs_bmbt_irec adj_irec, new; 5960 int error; 5961 int i; 5962 int total_extents; 5963 5964 mp = ip->i_mount; 5965 ifp = XFS_IFORK_PTR(ip, whichfork); 5966 total_extents = xfs_iext_count(ifp); 5967 5968 /* delalloc extents should be prevented by caller */ 5969 XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got->br_startblock)); 5970 5971 if (direction == SHIFT_LEFT) { 5972 startoff = got->br_startoff - offset_shift_fsb; 5973 5974 /* 5975 * Check for merge if we've got an extent to the left, 5976 * otherwise make sure there's enough room at the start 5977 * of the file for the shift. 5978 */ 5979 if (!*current_ext) { 5980 if (got->br_startoff < offset_shift_fsb) 5981 return -EINVAL; 5982 goto update_current_ext; 5983 } 5984 5985 /* 5986 * grab the left extent and check for a large enough hole. 5987 */ 5988 xfs_iext_get_extent(ifp, *current_ext - 1, &adj_irec); 5989 if (startoff < adj_irec.br_startoff + adj_irec.br_blockcount) 5990 return -EINVAL; 5991 5992 /* check whether to merge the extent or shift it down */ 5993 if (xfs_bmse_can_merge(&adj_irec, got, offset_shift_fsb)) { 5994 return xfs_bmse_merge(ip, whichfork, offset_shift_fsb, 5995 *current_ext, got, &adj_irec, 5996 cur, logflags, dfops); 5997 } 5998 } else { 5999 startoff = got->br_startoff + offset_shift_fsb; 6000 /* nothing to move if this is the last extent */ 6001 if (*current_ext >= (total_extents - 1)) 6002 goto update_current_ext; 6003 6004 /* 6005 * If this is not the last extent in the file, make sure there 6006 * is enough room between current extent and next extent for 6007 * accommodating the shift. 6008 */ 6009 xfs_iext_get_extent(ifp, *current_ext + 1, &adj_irec); 6010 if (startoff + got->br_blockcount > adj_irec.br_startoff) 6011 return -EINVAL; 6012 6013 /* 6014 * Unlike a left shift (which involves a hole punch), 6015 * a right shift does not modify extent neighbors 6016 * in any way. We should never find mergeable extents 6017 * in this scenario. Check anyways and warn if we 6018 * encounter two extents that could be one. 6019 */ 6020 if (xfs_bmse_can_merge(got, &adj_irec, offset_shift_fsb)) 6021 WARN_ON_ONCE(1); 6022 } 6023 6024 /* 6025 * Increment the extent index for the next iteration, update the start 6026 * offset of the in-core extent and update the btree if applicable. 6027 */ 6028 update_current_ext: 6029 *logflags |= XFS_ILOG_CORE; 6030 6031 new = *got; 6032 new.br_startoff = startoff; 6033 6034 if (cur) { 6035 error = xfs_bmbt_lookup_eq(cur, got->br_startoff, 6036 got->br_startblock, got->br_blockcount, &i); 6037 if (error) 6038 return error; 6039 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 6040 6041 error = xfs_bmbt_update(cur, new.br_startoff, 6042 new.br_startblock, new.br_blockcount, 6043 new.br_state); 6044 if (error) 6045 return error; 6046 } else { 6047 *logflags |= XFS_ILOG_DEXT; 6048 } 6049 6050 xfs_iext_update_extent(ifp, *current_ext, &new); 6051 6052 if (direction == SHIFT_LEFT) 6053 (*current_ext)++; 6054 else 6055 (*current_ext)--; 6056 6057 /* update reverse mapping */ 6058 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, got); 6059 if (error) 6060 return error; 6061 return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &new); 6062 } 6063 6064 /* 6065 * Shift extent records to the left/right to cover/create a hole. 6066 * 6067 * The maximum number of extents to be shifted in a single operation is 6068 * @num_exts. @stop_fsb specifies the file offset at which to stop shift and the 6069 * file offset where we've left off is returned in @next_fsb. @offset_shift_fsb 6070 * is the length by which each extent is shifted. If there is no hole to shift 6071 * the extents into, this will be considered invalid operation and we abort 6072 * immediately. 6073 */ 6074 int 6075 xfs_bmap_shift_extents( 6076 struct xfs_trans *tp, 6077 struct xfs_inode *ip, 6078 xfs_fileoff_t *next_fsb, 6079 xfs_fileoff_t offset_shift_fsb, 6080 int *done, 6081 xfs_fileoff_t stop_fsb, 6082 xfs_fsblock_t *firstblock, 6083 struct xfs_defer_ops *dfops, 6084 enum shift_direction direction, 6085 int num_exts) 6086 { 6087 struct xfs_btree_cur *cur = NULL; 6088 struct xfs_bmbt_irec got; 6089 struct xfs_mount *mp = ip->i_mount; 6090 struct xfs_ifork *ifp; 6091 xfs_extnum_t nexts = 0; 6092 xfs_extnum_t current_ext; 6093 xfs_extnum_t total_extents; 6094 xfs_extnum_t stop_extent; 6095 int error = 0; 6096 int whichfork = XFS_DATA_FORK; 6097 int logflags = 0; 6098 6099 if (unlikely(XFS_TEST_ERROR( 6100 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 6101 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 6102 mp, XFS_ERRTAG_BMAPIFORMAT))) { 6103 XFS_ERROR_REPORT("xfs_bmap_shift_extents", 6104 XFS_ERRLEVEL_LOW, mp); 6105 return -EFSCORRUPTED; 6106 } 6107 6108 if (XFS_FORCED_SHUTDOWN(mp)) 6109 return -EIO; 6110 6111 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 6112 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 6113 ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT); 6114 6115 ifp = XFS_IFORK_PTR(ip, whichfork); 6116 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 6117 /* Read in all the extents */ 6118 error = xfs_iread_extents(tp, ip, whichfork); 6119 if (error) 6120 return error; 6121 } 6122 6123 if (ifp->if_flags & XFS_IFBROOT) { 6124 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 6125 cur->bc_private.b.firstblock = *firstblock; 6126 cur->bc_private.b.dfops = dfops; 6127 cur->bc_private.b.flags = 0; 6128 } 6129 6130 /* 6131 * There may be delalloc extents in the data fork before the range we 6132 * are collapsing out, so we cannot use the count of real extents here. 6133 * Instead we have to calculate it from the incore fork. 6134 */ 6135 total_extents = xfs_iext_count(ifp); 6136 if (total_extents == 0) { 6137 *done = 1; 6138 goto del_cursor; 6139 } 6140 6141 /* 6142 * In case of first right shift, we need to initialize next_fsb 6143 */ 6144 if (*next_fsb == NULLFSBLOCK) { 6145 ASSERT(direction == SHIFT_RIGHT); 6146 6147 current_ext = total_extents - 1; 6148 xfs_iext_get_extent(ifp, current_ext, &got); 6149 if (stop_fsb > got.br_startoff) { 6150 *done = 1; 6151 goto del_cursor; 6152 } 6153 *next_fsb = got.br_startoff; 6154 } else { 6155 /* 6156 * Look up the extent index for the fsb where we start shifting. We can 6157 * henceforth iterate with current_ext as extent list changes are locked 6158 * out via ilock. 6159 * 6160 * If next_fsb lies in a hole beyond which there are no extents we are 6161 * done. 6162 */ 6163 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, ¤t_ext, 6164 &got)) { 6165 *done = 1; 6166 goto del_cursor; 6167 } 6168 } 6169 6170 /* Lookup the extent index at which we have to stop */ 6171 if (direction == SHIFT_RIGHT) { 6172 struct xfs_bmbt_irec s; 6173 6174 xfs_iext_lookup_extent(ip, ifp, stop_fsb, &stop_extent, &s); 6175 /* Make stop_extent exclusive of shift range */ 6176 stop_extent--; 6177 if (current_ext <= stop_extent) { 6178 error = -EIO; 6179 goto del_cursor; 6180 } 6181 } else { 6182 stop_extent = total_extents; 6183 if (current_ext >= stop_extent) { 6184 error = -EIO; 6185 goto del_cursor; 6186 } 6187 } 6188 6189 while (nexts++ < num_exts) { 6190 error = xfs_bmse_shift_one(ip, whichfork, offset_shift_fsb, 6191 ¤t_ext, &got, cur, &logflags, 6192 direction, dfops); 6193 if (error) 6194 goto del_cursor; 6195 /* 6196 * If there was an extent merge during the shift, the extent 6197 * count can change. Update the total and grade the next record. 6198 */ 6199 if (direction == SHIFT_LEFT) { 6200 total_extents = xfs_iext_count(ifp); 6201 stop_extent = total_extents; 6202 } 6203 6204 if (current_ext == stop_extent) { 6205 *done = 1; 6206 *next_fsb = NULLFSBLOCK; 6207 break; 6208 } 6209 xfs_iext_get_extent(ifp, current_ext, &got); 6210 } 6211 6212 if (!*done) 6213 *next_fsb = got.br_startoff; 6214 6215 del_cursor: 6216 if (cur) 6217 xfs_btree_del_cursor(cur, 6218 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 6219 6220 if (logflags) 6221 xfs_trans_log_inode(tp, ip, logflags); 6222 6223 return error; 6224 } 6225 6226 /* 6227 * Splits an extent into two extents at split_fsb block such that it is 6228 * the first block of the current_ext. @current_ext is a target extent 6229 * to be split. @split_fsb is a block where the extents is split. 6230 * If split_fsb lies in a hole or the first block of extents, just return 0. 6231 */ 6232 STATIC int 6233 xfs_bmap_split_extent_at( 6234 struct xfs_trans *tp, 6235 struct xfs_inode *ip, 6236 xfs_fileoff_t split_fsb, 6237 xfs_fsblock_t *firstfsb, 6238 struct xfs_defer_ops *dfops) 6239 { 6240 int whichfork = XFS_DATA_FORK; 6241 struct xfs_btree_cur *cur = NULL; 6242 struct xfs_bmbt_irec got; 6243 struct xfs_bmbt_irec new; /* split extent */ 6244 struct xfs_mount *mp = ip->i_mount; 6245 struct xfs_ifork *ifp; 6246 xfs_fsblock_t gotblkcnt; /* new block count for got */ 6247 xfs_extnum_t current_ext; 6248 int error = 0; 6249 int logflags = 0; 6250 int i = 0; 6251 6252 if (unlikely(XFS_TEST_ERROR( 6253 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 6254 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 6255 mp, XFS_ERRTAG_BMAPIFORMAT))) { 6256 XFS_ERROR_REPORT("xfs_bmap_split_extent_at", 6257 XFS_ERRLEVEL_LOW, mp); 6258 return -EFSCORRUPTED; 6259 } 6260 6261 if (XFS_FORCED_SHUTDOWN(mp)) 6262 return -EIO; 6263 6264 ifp = XFS_IFORK_PTR(ip, whichfork); 6265 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 6266 /* Read in all the extents */ 6267 error = xfs_iread_extents(tp, ip, whichfork); 6268 if (error) 6269 return error; 6270 } 6271 6272 /* 6273 * If there are not extents, or split_fsb lies in a hole we are done. 6274 */ 6275 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, ¤t_ext, &got) || 6276 got.br_startoff >= split_fsb) 6277 return 0; 6278 6279 gotblkcnt = split_fsb - got.br_startoff; 6280 new.br_startoff = split_fsb; 6281 new.br_startblock = got.br_startblock + gotblkcnt; 6282 new.br_blockcount = got.br_blockcount - gotblkcnt; 6283 new.br_state = got.br_state; 6284 6285 if (ifp->if_flags & XFS_IFBROOT) { 6286 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 6287 cur->bc_private.b.firstblock = *firstfsb; 6288 cur->bc_private.b.dfops = dfops; 6289 cur->bc_private.b.flags = 0; 6290 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, 6291 got.br_startblock, 6292 got.br_blockcount, 6293 &i); 6294 if (error) 6295 goto del_cursor; 6296 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor); 6297 } 6298 6299 got.br_blockcount = gotblkcnt; 6300 xfs_iext_update_extent(ifp, current_ext, &got); 6301 6302 logflags = XFS_ILOG_CORE; 6303 if (cur) { 6304 error = xfs_bmbt_update(cur, got.br_startoff, 6305 got.br_startblock, 6306 got.br_blockcount, 6307 got.br_state); 6308 if (error) 6309 goto del_cursor; 6310 } else 6311 logflags |= XFS_ILOG_DEXT; 6312 6313 /* Add new extent */ 6314 current_ext++; 6315 xfs_iext_insert(ip, current_ext, 1, &new, 0); 6316 XFS_IFORK_NEXT_SET(ip, whichfork, 6317 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 6318 6319 if (cur) { 6320 error = xfs_bmbt_lookup_eq(cur, new.br_startoff, 6321 new.br_startblock, new.br_blockcount, 6322 &i); 6323 if (error) 6324 goto del_cursor; 6325 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor); 6326 cur->bc_rec.b.br_state = new.br_state; 6327 6328 error = xfs_btree_insert(cur, &i); 6329 if (error) 6330 goto del_cursor; 6331 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor); 6332 } 6333 6334 /* 6335 * Convert to a btree if necessary. 6336 */ 6337 if (xfs_bmap_needs_btree(ip, whichfork)) { 6338 int tmp_logflags; /* partial log flag return val */ 6339 6340 ASSERT(cur == NULL); 6341 error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, dfops, 6342 &cur, 0, &tmp_logflags, whichfork); 6343 logflags |= tmp_logflags; 6344 } 6345 6346 del_cursor: 6347 if (cur) { 6348 cur->bc_private.b.allocated = 0; 6349 xfs_btree_del_cursor(cur, 6350 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 6351 } 6352 6353 if (logflags) 6354 xfs_trans_log_inode(tp, ip, logflags); 6355 return error; 6356 } 6357 6358 int 6359 xfs_bmap_split_extent( 6360 struct xfs_inode *ip, 6361 xfs_fileoff_t split_fsb) 6362 { 6363 struct xfs_mount *mp = ip->i_mount; 6364 struct xfs_trans *tp; 6365 struct xfs_defer_ops dfops; 6366 xfs_fsblock_t firstfsb; 6367 int error; 6368 6369 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 6370 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp); 6371 if (error) 6372 return error; 6373 6374 xfs_ilock(ip, XFS_ILOCK_EXCL); 6375 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 6376 6377 xfs_defer_init(&dfops, &firstfsb); 6378 6379 error = xfs_bmap_split_extent_at(tp, ip, split_fsb, 6380 &firstfsb, &dfops); 6381 if (error) 6382 goto out; 6383 6384 error = xfs_defer_finish(&tp, &dfops); 6385 if (error) 6386 goto out; 6387 6388 return xfs_trans_commit(tp); 6389 6390 out: 6391 xfs_defer_cancel(&dfops); 6392 xfs_trans_cancel(tp); 6393 return error; 6394 } 6395 6396 /* Deferred mapping is only for real extents in the data fork. */ 6397 static bool 6398 xfs_bmap_is_update_needed( 6399 struct xfs_bmbt_irec *bmap) 6400 { 6401 return bmap->br_startblock != HOLESTARTBLOCK && 6402 bmap->br_startblock != DELAYSTARTBLOCK; 6403 } 6404 6405 /* Record a bmap intent. */ 6406 static int 6407 __xfs_bmap_add( 6408 struct xfs_mount *mp, 6409 struct xfs_defer_ops *dfops, 6410 enum xfs_bmap_intent_type type, 6411 struct xfs_inode *ip, 6412 int whichfork, 6413 struct xfs_bmbt_irec *bmap) 6414 { 6415 int error; 6416 struct xfs_bmap_intent *bi; 6417 6418 trace_xfs_bmap_defer(mp, 6419 XFS_FSB_TO_AGNO(mp, bmap->br_startblock), 6420 type, 6421 XFS_FSB_TO_AGBNO(mp, bmap->br_startblock), 6422 ip->i_ino, whichfork, 6423 bmap->br_startoff, 6424 bmap->br_blockcount, 6425 bmap->br_state); 6426 6427 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS); 6428 INIT_LIST_HEAD(&bi->bi_list); 6429 bi->bi_type = type; 6430 bi->bi_owner = ip; 6431 bi->bi_whichfork = whichfork; 6432 bi->bi_bmap = *bmap; 6433 6434 error = xfs_defer_ijoin(dfops, bi->bi_owner); 6435 if (error) { 6436 kmem_free(bi); 6437 return error; 6438 } 6439 6440 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list); 6441 return 0; 6442 } 6443 6444 /* Map an extent into a file. */ 6445 int 6446 xfs_bmap_map_extent( 6447 struct xfs_mount *mp, 6448 struct xfs_defer_ops *dfops, 6449 struct xfs_inode *ip, 6450 struct xfs_bmbt_irec *PREV) 6451 { 6452 if (!xfs_bmap_is_update_needed(PREV)) 6453 return 0; 6454 6455 return __xfs_bmap_add(mp, dfops, XFS_BMAP_MAP, ip, 6456 XFS_DATA_FORK, PREV); 6457 } 6458 6459 /* Unmap an extent out of a file. */ 6460 int 6461 xfs_bmap_unmap_extent( 6462 struct xfs_mount *mp, 6463 struct xfs_defer_ops *dfops, 6464 struct xfs_inode *ip, 6465 struct xfs_bmbt_irec *PREV) 6466 { 6467 if (!xfs_bmap_is_update_needed(PREV)) 6468 return 0; 6469 6470 return __xfs_bmap_add(mp, dfops, XFS_BMAP_UNMAP, ip, 6471 XFS_DATA_FORK, PREV); 6472 } 6473 6474 /* 6475 * Process one of the deferred bmap operations. We pass back the 6476 * btree cursor to maintain our lock on the bmapbt between calls. 6477 */ 6478 int 6479 xfs_bmap_finish_one( 6480 struct xfs_trans *tp, 6481 struct xfs_defer_ops *dfops, 6482 struct xfs_inode *ip, 6483 enum xfs_bmap_intent_type type, 6484 int whichfork, 6485 xfs_fileoff_t startoff, 6486 xfs_fsblock_t startblock, 6487 xfs_filblks_t *blockcount, 6488 xfs_exntst_t state) 6489 { 6490 xfs_fsblock_t firstfsb; 6491 int error = 0; 6492 6493 /* 6494 * firstfsb is tied to the transaction lifetime and is used to 6495 * ensure correct AG locking order and schedule work item 6496 * continuations. XFS_BUI_MAX_FAST_EXTENTS (== 1) restricts us 6497 * to only making one bmap call per transaction, so it should 6498 * be safe to have it as a local variable here. 6499 */ 6500 firstfsb = NULLFSBLOCK; 6501 6502 trace_xfs_bmap_deferred(tp->t_mountp, 6503 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type, 6504 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock), 6505 ip->i_ino, whichfork, startoff, *blockcount, state); 6506 6507 if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK)) 6508 return -EFSCORRUPTED; 6509 6510 if (XFS_TEST_ERROR(false, tp->t_mountp, 6511 XFS_ERRTAG_BMAP_FINISH_ONE)) 6512 return -EIO; 6513 6514 switch (type) { 6515 case XFS_BMAP_MAP: 6516 error = xfs_bmapi_remap(tp, ip, startoff, *blockcount, 6517 startblock, dfops); 6518 *blockcount = 0; 6519 break; 6520 case XFS_BMAP_UNMAP: 6521 error = __xfs_bunmapi(tp, ip, startoff, blockcount, 6522 XFS_BMAPI_REMAP, 1, &firstfsb, dfops); 6523 break; 6524 default: 6525 ASSERT(0); 6526 error = -EFSCORRUPTED; 6527 } 6528 6529 return error; 6530 } 6531