1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_sb.h" 14 #include "xfs_mount.h" 15 #include "xfs_defer.h" 16 #include "xfs_da_format.h" 17 #include "xfs_da_btree.h" 18 #include "xfs_dir2.h" 19 #include "xfs_inode.h" 20 #include "xfs_btree.h" 21 #include "xfs_trans.h" 22 #include "xfs_inode_item.h" 23 #include "xfs_extfree_item.h" 24 #include "xfs_alloc.h" 25 #include "xfs_bmap.h" 26 #include "xfs_bmap_util.h" 27 #include "xfs_bmap_btree.h" 28 #include "xfs_rtalloc.h" 29 #include "xfs_errortag.h" 30 #include "xfs_error.h" 31 #include "xfs_quota.h" 32 #include "xfs_trans_space.h" 33 #include "xfs_buf_item.h" 34 #include "xfs_trace.h" 35 #include "xfs_symlink.h" 36 #include "xfs_attr_leaf.h" 37 #include "xfs_filestream.h" 38 #include "xfs_rmap.h" 39 #include "xfs_ag_resv.h" 40 #include "xfs_refcount.h" 41 #include "xfs_icache.h" 42 43 44 kmem_zone_t *xfs_bmap_free_item_zone; 45 46 /* 47 * Miscellaneous helper functions 48 */ 49 50 /* 51 * Compute and fill in the value of the maximum depth of a bmap btree 52 * in this filesystem. Done once, during mount. 53 */ 54 void 55 xfs_bmap_compute_maxlevels( 56 xfs_mount_t *mp, /* file system mount structure */ 57 int whichfork) /* data or attr fork */ 58 { 59 int level; /* btree level */ 60 uint maxblocks; /* max blocks at this level */ 61 uint maxleafents; /* max leaf entries possible */ 62 int maxrootrecs; /* max records in root block */ 63 int minleafrecs; /* min records in leaf block */ 64 int minnoderecs; /* min records in node block */ 65 int sz; /* root block size */ 66 67 /* 68 * The maximum number of extents in a file, hence the maximum 69 * number of leaf entries, is controlled by the type of di_nextents 70 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents 71 * (a signed 16-bit number, xfs_aextnum_t). 72 * 73 * Note that we can no longer assume that if we are in ATTR1 that 74 * the fork offset of all the inodes will be 75 * (xfs_default_attroffset(ip) >> 3) because we could have mounted 76 * with ATTR2 and then mounted back with ATTR1, keeping the 77 * di_forkoff's fixed but probably at various positions. Therefore, 78 * for both ATTR1 and ATTR2 we have to assume the worst case scenario 79 * of a minimum size available. 80 */ 81 if (whichfork == XFS_DATA_FORK) { 82 maxleafents = MAXEXTNUM; 83 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS); 84 } else { 85 maxleafents = MAXAEXTNUM; 86 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS); 87 } 88 maxrootrecs = xfs_bmdr_maxrecs(sz, 0); 89 minleafrecs = mp->m_bmap_dmnr[0]; 90 minnoderecs = mp->m_bmap_dmnr[1]; 91 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; 92 for (level = 1; maxblocks > 1; level++) { 93 if (maxblocks <= maxrootrecs) 94 maxblocks = 1; 95 else 96 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs; 97 } 98 mp->m_bm_maxlevels[whichfork] = level; 99 } 100 101 STATIC int /* error */ 102 xfs_bmbt_lookup_eq( 103 struct xfs_btree_cur *cur, 104 struct xfs_bmbt_irec *irec, 105 int *stat) /* success/failure */ 106 { 107 cur->bc_rec.b = *irec; 108 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); 109 } 110 111 STATIC int /* error */ 112 xfs_bmbt_lookup_first( 113 struct xfs_btree_cur *cur, 114 int *stat) /* success/failure */ 115 { 116 cur->bc_rec.b.br_startoff = 0; 117 cur->bc_rec.b.br_startblock = 0; 118 cur->bc_rec.b.br_blockcount = 0; 119 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); 120 } 121 122 /* 123 * Check if the inode needs to be converted to btree format. 124 */ 125 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork) 126 { 127 return whichfork != XFS_COW_FORK && 128 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && 129 XFS_IFORK_NEXTENTS(ip, whichfork) > 130 XFS_IFORK_MAXEXT(ip, whichfork); 131 } 132 133 /* 134 * Check if the inode should be converted to extent format. 135 */ 136 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork) 137 { 138 return whichfork != XFS_COW_FORK && 139 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE && 140 XFS_IFORK_NEXTENTS(ip, whichfork) <= 141 XFS_IFORK_MAXEXT(ip, whichfork); 142 } 143 144 /* 145 * Update the record referred to by cur to the value given by irec 146 * This either works (return 0) or gets an EFSCORRUPTED error. 147 */ 148 STATIC int 149 xfs_bmbt_update( 150 struct xfs_btree_cur *cur, 151 struct xfs_bmbt_irec *irec) 152 { 153 union xfs_btree_rec rec; 154 155 xfs_bmbt_disk_set_all(&rec.bmbt, irec); 156 return xfs_btree_update(cur, &rec); 157 } 158 159 /* 160 * Compute the worst-case number of indirect blocks that will be used 161 * for ip's delayed extent of length "len". 162 */ 163 STATIC xfs_filblks_t 164 xfs_bmap_worst_indlen( 165 xfs_inode_t *ip, /* incore inode pointer */ 166 xfs_filblks_t len) /* delayed extent length */ 167 { 168 int level; /* btree level number */ 169 int maxrecs; /* maximum record count at this level */ 170 xfs_mount_t *mp; /* mount structure */ 171 xfs_filblks_t rval; /* return value */ 172 173 mp = ip->i_mount; 174 maxrecs = mp->m_bmap_dmxr[0]; 175 for (level = 0, rval = 0; 176 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); 177 level++) { 178 len += maxrecs - 1; 179 do_div(len, maxrecs); 180 rval += len; 181 if (len == 1) 182 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - 183 level - 1; 184 if (level == 0) 185 maxrecs = mp->m_bmap_dmxr[1]; 186 } 187 return rval; 188 } 189 190 /* 191 * Calculate the default attribute fork offset for newly created inodes. 192 */ 193 uint 194 xfs_default_attroffset( 195 struct xfs_inode *ip) 196 { 197 struct xfs_mount *mp = ip->i_mount; 198 uint offset; 199 200 if (mp->m_sb.sb_inodesize == 256) { 201 offset = XFS_LITINO(mp, ip->i_d.di_version) - 202 XFS_BMDR_SPACE_CALC(MINABTPTRS); 203 } else { 204 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS); 205 } 206 207 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version)); 208 return offset; 209 } 210 211 /* 212 * Helper routine to reset inode di_forkoff field when switching 213 * attribute fork from local to extent format - we reset it where 214 * possible to make space available for inline data fork extents. 215 */ 216 STATIC void 217 xfs_bmap_forkoff_reset( 218 xfs_inode_t *ip, 219 int whichfork) 220 { 221 if (whichfork == XFS_ATTR_FORK && 222 ip->i_d.di_format != XFS_DINODE_FMT_DEV && 223 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) { 224 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3; 225 226 if (dfl_forkoff > ip->i_d.di_forkoff) 227 ip->i_d.di_forkoff = dfl_forkoff; 228 } 229 } 230 231 #ifdef DEBUG 232 STATIC struct xfs_buf * 233 xfs_bmap_get_bp( 234 struct xfs_btree_cur *cur, 235 xfs_fsblock_t bno) 236 { 237 struct xfs_log_item *lip; 238 int i; 239 240 if (!cur) 241 return NULL; 242 243 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) { 244 if (!cur->bc_bufs[i]) 245 break; 246 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno) 247 return cur->bc_bufs[i]; 248 } 249 250 /* Chase down all the log items to see if the bp is there */ 251 list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) { 252 struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip; 253 254 if (bip->bli_item.li_type == XFS_LI_BUF && 255 XFS_BUF_ADDR(bip->bli_buf) == bno) 256 return bip->bli_buf; 257 } 258 259 return NULL; 260 } 261 262 STATIC void 263 xfs_check_block( 264 struct xfs_btree_block *block, 265 xfs_mount_t *mp, 266 int root, 267 short sz) 268 { 269 int i, j, dmxr; 270 __be64 *pp, *thispa; /* pointer to block address */ 271 xfs_bmbt_key_t *prevp, *keyp; 272 273 ASSERT(be16_to_cpu(block->bb_level) > 0); 274 275 prevp = NULL; 276 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) { 277 dmxr = mp->m_bmap_dmxr[0]; 278 keyp = XFS_BMBT_KEY_ADDR(mp, block, i); 279 280 if (prevp) { 281 ASSERT(be64_to_cpu(prevp->br_startoff) < 282 be64_to_cpu(keyp->br_startoff)); 283 } 284 prevp = keyp; 285 286 /* 287 * Compare the block numbers to see if there are dups. 288 */ 289 if (root) 290 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz); 291 else 292 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr); 293 294 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) { 295 if (root) 296 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz); 297 else 298 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr); 299 if (*thispa == *pp) { 300 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld", 301 __func__, j, i, 302 (unsigned long long)be64_to_cpu(*thispa)); 303 xfs_err(mp, "%s: ptrs are equal in node\n", 304 __func__); 305 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 306 } 307 } 308 } 309 } 310 311 /* 312 * Check that the extents for the inode ip are in the right order in all 313 * btree leaves. THis becomes prohibitively expensive for large extent count 314 * files, so don't bother with inodes that have more than 10,000 extents in 315 * them. The btree record ordering checks will still be done, so for such large 316 * bmapbt constructs that is going to catch most corruptions. 317 */ 318 STATIC void 319 xfs_bmap_check_leaf_extents( 320 xfs_btree_cur_t *cur, /* btree cursor or null */ 321 xfs_inode_t *ip, /* incore inode pointer */ 322 int whichfork) /* data or attr fork */ 323 { 324 struct xfs_btree_block *block; /* current btree block */ 325 xfs_fsblock_t bno; /* block # of "block" */ 326 xfs_buf_t *bp; /* buffer for "block" */ 327 int error; /* error return value */ 328 xfs_extnum_t i=0, j; /* index into the extents list */ 329 struct xfs_ifork *ifp; /* fork structure */ 330 int level; /* btree level, for checking */ 331 xfs_mount_t *mp; /* file system mount structure */ 332 __be64 *pp; /* pointer to block address */ 333 xfs_bmbt_rec_t *ep; /* pointer to current extent */ 334 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */ 335 xfs_bmbt_rec_t *nextp; /* pointer to next extent */ 336 int bp_release = 0; 337 338 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) { 339 return; 340 } 341 342 /* skip large extent count inodes */ 343 if (ip->i_d.di_nextents > 10000) 344 return; 345 346 bno = NULLFSBLOCK; 347 mp = ip->i_mount; 348 ifp = XFS_IFORK_PTR(ip, whichfork); 349 block = ifp->if_broot; 350 /* 351 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 352 */ 353 level = be16_to_cpu(block->bb_level); 354 ASSERT(level > 0); 355 xfs_check_block(block, mp, 1, ifp->if_broot_bytes); 356 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 357 bno = be64_to_cpu(*pp); 358 359 ASSERT(bno != NULLFSBLOCK); 360 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 361 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 362 363 /* 364 * Go down the tree until leaf level is reached, following the first 365 * pointer (leftmost) at each level. 366 */ 367 while (level-- > 0) { 368 /* See if buf is in cur first */ 369 bp_release = 0; 370 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 371 if (!bp) { 372 bp_release = 1; 373 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, 374 XFS_BMAP_BTREE_REF, 375 &xfs_bmbt_buf_ops); 376 if (error) 377 goto error_norelse; 378 } 379 block = XFS_BUF_TO_BLOCK(bp); 380 if (level == 0) 381 break; 382 383 /* 384 * Check this block for basic sanity (increasing keys and 385 * no duplicate blocks). 386 */ 387 388 xfs_check_block(block, mp, 0, 0); 389 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 390 bno = be64_to_cpu(*pp); 391 XFS_WANT_CORRUPTED_GOTO(mp, 392 xfs_verify_fsbno(mp, bno), error0); 393 if (bp_release) { 394 bp_release = 0; 395 xfs_trans_brelse(NULL, bp); 396 } 397 } 398 399 /* 400 * Here with bp and block set to the leftmost leaf node in the tree. 401 */ 402 i = 0; 403 404 /* 405 * Loop over all leaf nodes checking that all extents are in the right order. 406 */ 407 for (;;) { 408 xfs_fsblock_t nextbno; 409 xfs_extnum_t num_recs; 410 411 412 num_recs = xfs_btree_get_numrecs(block); 413 414 /* 415 * Read-ahead the next leaf block, if any. 416 */ 417 418 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 419 420 /* 421 * Check all the extents to make sure they are OK. 422 * If we had a previous block, the last entry should 423 * conform with the first entry in this one. 424 */ 425 426 ep = XFS_BMBT_REC_ADDR(mp, block, 1); 427 if (i) { 428 ASSERT(xfs_bmbt_disk_get_startoff(&last) + 429 xfs_bmbt_disk_get_blockcount(&last) <= 430 xfs_bmbt_disk_get_startoff(ep)); 431 } 432 for (j = 1; j < num_recs; j++) { 433 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1); 434 ASSERT(xfs_bmbt_disk_get_startoff(ep) + 435 xfs_bmbt_disk_get_blockcount(ep) <= 436 xfs_bmbt_disk_get_startoff(nextp)); 437 ep = nextp; 438 } 439 440 last = *ep; 441 i += num_recs; 442 if (bp_release) { 443 bp_release = 0; 444 xfs_trans_brelse(NULL, bp); 445 } 446 bno = nextbno; 447 /* 448 * If we've reached the end, stop. 449 */ 450 if (bno == NULLFSBLOCK) 451 break; 452 453 bp_release = 0; 454 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 455 if (!bp) { 456 bp_release = 1; 457 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, 458 XFS_BMAP_BTREE_REF, 459 &xfs_bmbt_buf_ops); 460 if (error) 461 goto error_norelse; 462 } 463 block = XFS_BUF_TO_BLOCK(bp); 464 } 465 466 return; 467 468 error0: 469 xfs_warn(mp, "%s: at error0", __func__); 470 if (bp_release) 471 xfs_trans_brelse(NULL, bp); 472 error_norelse: 473 xfs_warn(mp, "%s: BAD after btree leaves for %d extents", 474 __func__, i); 475 xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__); 476 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 477 return; 478 } 479 480 /* 481 * Validate that the bmbt_irecs being returned from bmapi are valid 482 * given the caller's original parameters. Specifically check the 483 * ranges of the returned irecs to ensure that they only extend beyond 484 * the given parameters if the XFS_BMAPI_ENTIRE flag was set. 485 */ 486 STATIC void 487 xfs_bmap_validate_ret( 488 xfs_fileoff_t bno, 489 xfs_filblks_t len, 490 int flags, 491 xfs_bmbt_irec_t *mval, 492 int nmap, 493 int ret_nmap) 494 { 495 int i; /* index to map values */ 496 497 ASSERT(ret_nmap <= nmap); 498 499 for (i = 0; i < ret_nmap; i++) { 500 ASSERT(mval[i].br_blockcount > 0); 501 if (!(flags & XFS_BMAPI_ENTIRE)) { 502 ASSERT(mval[i].br_startoff >= bno); 503 ASSERT(mval[i].br_blockcount <= len); 504 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <= 505 bno + len); 506 } else { 507 ASSERT(mval[i].br_startoff < bno + len); 508 ASSERT(mval[i].br_startoff + mval[i].br_blockcount > 509 bno); 510 } 511 ASSERT(i == 0 || 512 mval[i - 1].br_startoff + mval[i - 1].br_blockcount == 513 mval[i].br_startoff); 514 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK && 515 mval[i].br_startblock != HOLESTARTBLOCK); 516 ASSERT(mval[i].br_state == XFS_EXT_NORM || 517 mval[i].br_state == XFS_EXT_UNWRITTEN); 518 } 519 } 520 521 #else 522 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0) 523 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0) 524 #endif /* DEBUG */ 525 526 /* 527 * bmap free list manipulation functions 528 */ 529 530 /* 531 * Add the extent to the list of extents to be free at transaction end. 532 * The list is maintained sorted (by block number). 533 */ 534 void 535 __xfs_bmap_add_free( 536 struct xfs_trans *tp, 537 xfs_fsblock_t bno, 538 xfs_filblks_t len, 539 const struct xfs_owner_info *oinfo, 540 bool skip_discard) 541 { 542 struct xfs_extent_free_item *new; /* new element */ 543 #ifdef DEBUG 544 struct xfs_mount *mp = tp->t_mountp; 545 xfs_agnumber_t agno; 546 xfs_agblock_t agbno; 547 548 ASSERT(bno != NULLFSBLOCK); 549 ASSERT(len > 0); 550 ASSERT(len <= MAXEXTLEN); 551 ASSERT(!isnullstartblock(bno)); 552 agno = XFS_FSB_TO_AGNO(mp, bno); 553 agbno = XFS_FSB_TO_AGBNO(mp, bno); 554 ASSERT(agno < mp->m_sb.sb_agcount); 555 ASSERT(agbno < mp->m_sb.sb_agblocks); 556 ASSERT(len < mp->m_sb.sb_agblocks); 557 ASSERT(agbno + len <= mp->m_sb.sb_agblocks); 558 #endif 559 ASSERT(xfs_bmap_free_item_zone != NULL); 560 561 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP); 562 new->xefi_startblock = bno; 563 new->xefi_blockcount = (xfs_extlen_t)len; 564 if (oinfo) 565 new->xefi_oinfo = *oinfo; 566 else 567 new->xefi_oinfo = XFS_RMAP_OINFO_SKIP_UPDATE; 568 new->xefi_skip_discard = skip_discard; 569 trace_xfs_bmap_free_defer(tp->t_mountp, 570 XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0, 571 XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len); 572 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list); 573 } 574 575 /* 576 * Inode fork format manipulation functions 577 */ 578 579 /* 580 * Convert the inode format to extent format if it currently is in btree format, 581 * but the extent list is small enough that it fits into the extent format. 582 * 583 * Since the extents are already in-core, all we have to do is give up the space 584 * for the btree root and pitch the leaf block. 585 */ 586 STATIC int /* error */ 587 xfs_bmap_btree_to_extents( 588 struct xfs_trans *tp, /* transaction pointer */ 589 struct xfs_inode *ip, /* incore inode pointer */ 590 struct xfs_btree_cur *cur, /* btree cursor */ 591 int *logflagsp, /* inode logging flags */ 592 int whichfork) /* data or attr fork */ 593 { 594 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 595 struct xfs_mount *mp = ip->i_mount; 596 struct xfs_btree_block *rblock = ifp->if_broot; 597 struct xfs_btree_block *cblock;/* child btree block */ 598 xfs_fsblock_t cbno; /* child block number */ 599 xfs_buf_t *cbp; /* child block's buffer */ 600 int error; /* error return value */ 601 __be64 *pp; /* ptr to block address */ 602 struct xfs_owner_info oinfo; 603 604 /* check if we actually need the extent format first: */ 605 if (!xfs_bmap_wants_extents(ip, whichfork)) 606 return 0; 607 608 ASSERT(cur); 609 ASSERT(whichfork != XFS_COW_FORK); 610 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 611 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); 612 ASSERT(be16_to_cpu(rblock->bb_level) == 1); 613 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1); 614 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1); 615 616 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes); 617 cbno = be64_to_cpu(*pp); 618 #ifdef DEBUG 619 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, 620 xfs_btree_check_lptr(cur, cbno, 1)); 621 #endif 622 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF, 623 &xfs_bmbt_buf_ops); 624 if (error) 625 return error; 626 cblock = XFS_BUF_TO_BLOCK(cbp); 627 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp))) 628 return error; 629 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); 630 xfs_bmap_add_free(cur->bc_tp, cbno, 1, &oinfo); 631 ip->i_d.di_nblocks--; 632 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 633 xfs_trans_binval(tp, cbp); 634 if (cur->bc_bufs[0] == cbp) 635 cur->bc_bufs[0] = NULL; 636 xfs_iroot_realloc(ip, -1, whichfork); 637 ASSERT(ifp->if_broot == NULL); 638 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0); 639 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 640 *logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 641 return 0; 642 } 643 644 /* 645 * Convert an extents-format file into a btree-format file. 646 * The new file will have a root block (in the inode) and a single child block. 647 */ 648 STATIC int /* error */ 649 xfs_bmap_extents_to_btree( 650 struct xfs_trans *tp, /* transaction pointer */ 651 struct xfs_inode *ip, /* incore inode pointer */ 652 struct xfs_btree_cur **curp, /* cursor returned to caller */ 653 int wasdel, /* converting a delayed alloc */ 654 int *logflagsp, /* inode logging flags */ 655 int whichfork) /* data or attr fork */ 656 { 657 struct xfs_btree_block *ablock; /* allocated (child) bt block */ 658 struct xfs_buf *abp; /* buffer for ablock */ 659 struct xfs_alloc_arg args; /* allocation arguments */ 660 struct xfs_bmbt_rec *arp; /* child record pointer */ 661 struct xfs_btree_block *block; /* btree root block */ 662 struct xfs_btree_cur *cur; /* bmap btree cursor */ 663 int error; /* error return value */ 664 struct xfs_ifork *ifp; /* inode fork pointer */ 665 struct xfs_bmbt_key *kp; /* root block key pointer */ 666 struct xfs_mount *mp; /* mount structure */ 667 xfs_bmbt_ptr_t *pp; /* root block address pointer */ 668 struct xfs_iext_cursor icur; 669 struct xfs_bmbt_irec rec; 670 xfs_extnum_t cnt = 0; 671 672 mp = ip->i_mount; 673 ASSERT(whichfork != XFS_COW_FORK); 674 ifp = XFS_IFORK_PTR(ip, whichfork); 675 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS); 676 677 /* 678 * Make space in the inode incore. This needs to be undone if we fail 679 * to expand the root. 680 */ 681 xfs_iroot_realloc(ip, 1, whichfork); 682 ifp->if_flags |= XFS_IFBROOT; 683 684 /* 685 * Fill in the root. 686 */ 687 block = ifp->if_broot; 688 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL, 689 XFS_BTNUM_BMAP, 1, 1, ip->i_ino, 690 XFS_BTREE_LONG_PTRS); 691 /* 692 * Need a cursor. Can't allocate until bb_level is filled in. 693 */ 694 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 695 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; 696 /* 697 * Convert to a btree with two levels, one record in root. 698 */ 699 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE); 700 memset(&args, 0, sizeof(args)); 701 args.tp = tp; 702 args.mp = mp; 703 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork); 704 if (tp->t_firstblock == NULLFSBLOCK) { 705 args.type = XFS_ALLOCTYPE_START_BNO; 706 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino); 707 } else if (tp->t_flags & XFS_TRANS_LOWMODE) { 708 args.type = XFS_ALLOCTYPE_START_BNO; 709 args.fsbno = tp->t_firstblock; 710 } else { 711 args.type = XFS_ALLOCTYPE_NEAR_BNO; 712 args.fsbno = tp->t_firstblock; 713 } 714 args.minlen = args.maxlen = args.prod = 1; 715 args.wasdel = wasdel; 716 *logflagsp = 0; 717 error = xfs_alloc_vextent(&args); 718 if (error) 719 goto out_root_realloc; 720 721 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { 722 error = -ENOSPC; 723 goto out_root_realloc; 724 } 725 726 /* 727 * Allocation can't fail, the space was reserved. 728 */ 729 ASSERT(tp->t_firstblock == NULLFSBLOCK || 730 args.agno >= XFS_FSB_TO_AGNO(mp, tp->t_firstblock)); 731 tp->t_firstblock = args.fsbno; 732 cur->bc_private.b.allocated++; 733 ip->i_d.di_nblocks++; 734 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); 735 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0); 736 if (!abp) { 737 error = -EFSCORRUPTED; 738 goto out_unreserve_dquot; 739 } 740 741 /* 742 * Fill in the child block. 743 */ 744 abp->b_ops = &xfs_bmbt_buf_ops; 745 ablock = XFS_BUF_TO_BLOCK(abp); 746 xfs_btree_init_block_int(mp, ablock, abp->b_bn, 747 XFS_BTNUM_BMAP, 0, 0, ip->i_ino, 748 XFS_BTREE_LONG_PTRS); 749 750 for_each_xfs_iext(ifp, &icur, &rec) { 751 if (isnullstartblock(rec.br_startblock)) 752 continue; 753 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt); 754 xfs_bmbt_disk_set_all(arp, &rec); 755 cnt++; 756 } 757 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork)); 758 xfs_btree_set_numrecs(ablock, cnt); 759 760 /* 761 * Fill in the root key and pointer. 762 */ 763 kp = XFS_BMBT_KEY_ADDR(mp, block, 1); 764 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); 765 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp)); 766 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur, 767 be16_to_cpu(block->bb_level))); 768 *pp = cpu_to_be64(args.fsbno); 769 770 /* 771 * Do all this logging at the end so that 772 * the root is at the right level. 773 */ 774 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS); 775 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); 776 ASSERT(*curp == NULL); 777 *curp = cur; 778 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork); 779 return 0; 780 781 out_unreserve_dquot: 782 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 783 out_root_realloc: 784 xfs_iroot_realloc(ip, -1, whichfork); 785 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 786 ASSERT(ifp->if_broot == NULL); 787 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 788 789 return error; 790 } 791 792 /* 793 * Convert a local file to an extents file. 794 * This code is out of bounds for data forks of regular files, 795 * since the file data needs to get logged so things will stay consistent. 796 * (The bmap-level manipulations are ok, though). 797 */ 798 void 799 xfs_bmap_local_to_extents_empty( 800 struct xfs_inode *ip, 801 int whichfork) 802 { 803 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 804 805 ASSERT(whichfork != XFS_COW_FORK); 806 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 807 ASSERT(ifp->if_bytes == 0); 808 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0); 809 810 xfs_bmap_forkoff_reset(ip, whichfork); 811 ifp->if_flags &= ~XFS_IFINLINE; 812 ifp->if_flags |= XFS_IFEXTENTS; 813 ifp->if_u1.if_root = NULL; 814 ifp->if_height = 0; 815 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 816 } 817 818 819 STATIC int /* error */ 820 xfs_bmap_local_to_extents( 821 xfs_trans_t *tp, /* transaction pointer */ 822 xfs_inode_t *ip, /* incore inode pointer */ 823 xfs_extlen_t total, /* total blocks needed by transaction */ 824 int *logflagsp, /* inode logging flags */ 825 int whichfork, 826 void (*init_fn)(struct xfs_trans *tp, 827 struct xfs_buf *bp, 828 struct xfs_inode *ip, 829 struct xfs_ifork *ifp)) 830 { 831 int error = 0; 832 int flags; /* logging flags returned */ 833 struct xfs_ifork *ifp; /* inode fork pointer */ 834 xfs_alloc_arg_t args; /* allocation arguments */ 835 xfs_buf_t *bp; /* buffer for extent block */ 836 struct xfs_bmbt_irec rec; 837 struct xfs_iext_cursor icur; 838 839 /* 840 * We don't want to deal with the case of keeping inode data inline yet. 841 * So sending the data fork of a regular inode is invalid. 842 */ 843 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK)); 844 ifp = XFS_IFORK_PTR(ip, whichfork); 845 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 846 847 if (!ifp->if_bytes) { 848 xfs_bmap_local_to_extents_empty(ip, whichfork); 849 flags = XFS_ILOG_CORE; 850 goto done; 851 } 852 853 flags = 0; 854 error = 0; 855 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS)) == XFS_IFINLINE); 856 memset(&args, 0, sizeof(args)); 857 args.tp = tp; 858 args.mp = ip->i_mount; 859 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0); 860 /* 861 * Allocate a block. We know we need only one, since the 862 * file currently fits in an inode. 863 */ 864 if (tp->t_firstblock == NULLFSBLOCK) { 865 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino); 866 args.type = XFS_ALLOCTYPE_START_BNO; 867 } else { 868 args.fsbno = tp->t_firstblock; 869 args.type = XFS_ALLOCTYPE_NEAR_BNO; 870 } 871 args.total = total; 872 args.minlen = args.maxlen = args.prod = 1; 873 error = xfs_alloc_vextent(&args); 874 if (error) 875 goto done; 876 877 /* Can't fail, the space was reserved. */ 878 ASSERT(args.fsbno != NULLFSBLOCK); 879 ASSERT(args.len == 1); 880 tp->t_firstblock = args.fsbno; 881 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0); 882 883 /* 884 * Initialize the block, copy the data and log the remote buffer. 885 * 886 * The callout is responsible for logging because the remote format 887 * might differ from the local format and thus we don't know how much to 888 * log here. Note that init_fn must also set the buffer log item type 889 * correctly. 890 */ 891 init_fn(tp, bp, ip, ifp); 892 893 /* account for the change in fork size */ 894 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); 895 xfs_bmap_local_to_extents_empty(ip, whichfork); 896 flags |= XFS_ILOG_CORE; 897 898 ifp->if_u1.if_root = NULL; 899 ifp->if_height = 0; 900 901 rec.br_startoff = 0; 902 rec.br_startblock = args.fsbno; 903 rec.br_blockcount = 1; 904 rec.br_state = XFS_EXT_NORM; 905 xfs_iext_first(ifp, &icur); 906 xfs_iext_insert(ip, &icur, &rec, 0); 907 908 XFS_IFORK_NEXT_SET(ip, whichfork, 1); 909 ip->i_d.di_nblocks = 1; 910 xfs_trans_mod_dquot_byino(tp, ip, 911 XFS_TRANS_DQ_BCOUNT, 1L); 912 flags |= xfs_ilog_fext(whichfork); 913 914 done: 915 *logflagsp = flags; 916 return error; 917 } 918 919 /* 920 * Called from xfs_bmap_add_attrfork to handle btree format files. 921 */ 922 STATIC int /* error */ 923 xfs_bmap_add_attrfork_btree( 924 xfs_trans_t *tp, /* transaction pointer */ 925 xfs_inode_t *ip, /* incore inode pointer */ 926 int *flags) /* inode logging flags */ 927 { 928 xfs_btree_cur_t *cur; /* btree cursor */ 929 int error; /* error return value */ 930 xfs_mount_t *mp; /* file system mount struct */ 931 int stat; /* newroot status */ 932 933 mp = ip->i_mount; 934 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip)) 935 *flags |= XFS_ILOG_DBROOT; 936 else { 937 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); 938 error = xfs_bmbt_lookup_first(cur, &stat); 939 if (error) 940 goto error0; 941 /* must be at least one entry */ 942 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0); 943 if ((error = xfs_btree_new_iroot(cur, flags, &stat))) 944 goto error0; 945 if (stat == 0) { 946 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 947 return -ENOSPC; 948 } 949 cur->bc_private.b.allocated = 0; 950 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 951 } 952 return 0; 953 error0: 954 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 955 return error; 956 } 957 958 /* 959 * Called from xfs_bmap_add_attrfork to handle extents format files. 960 */ 961 STATIC int /* error */ 962 xfs_bmap_add_attrfork_extents( 963 struct xfs_trans *tp, /* transaction pointer */ 964 struct xfs_inode *ip, /* incore inode pointer */ 965 int *flags) /* inode logging flags */ 966 { 967 xfs_btree_cur_t *cur; /* bmap btree cursor */ 968 int error; /* error return value */ 969 970 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip)) 971 return 0; 972 cur = NULL; 973 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags, 974 XFS_DATA_FORK); 975 if (cur) { 976 cur->bc_private.b.allocated = 0; 977 xfs_btree_del_cursor(cur, error); 978 } 979 return error; 980 } 981 982 /* 983 * Called from xfs_bmap_add_attrfork to handle local format files. Each 984 * different data fork content type needs a different callout to do the 985 * conversion. Some are basic and only require special block initialisation 986 * callouts for the data formating, others (directories) are so specialised they 987 * handle everything themselves. 988 * 989 * XXX (dgc): investigate whether directory conversion can use the generic 990 * formatting callout. It should be possible - it's just a very complex 991 * formatter. 992 */ 993 STATIC int /* error */ 994 xfs_bmap_add_attrfork_local( 995 struct xfs_trans *tp, /* transaction pointer */ 996 struct xfs_inode *ip, /* incore inode pointer */ 997 int *flags) /* inode logging flags */ 998 { 999 struct xfs_da_args dargs; /* args for dir/attr code */ 1000 1001 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip)) 1002 return 0; 1003 1004 if (S_ISDIR(VFS_I(ip)->i_mode)) { 1005 memset(&dargs, 0, sizeof(dargs)); 1006 dargs.geo = ip->i_mount->m_dir_geo; 1007 dargs.dp = ip; 1008 dargs.total = dargs.geo->fsbcount; 1009 dargs.whichfork = XFS_DATA_FORK; 1010 dargs.trans = tp; 1011 return xfs_dir2_sf_to_block(&dargs); 1012 } 1013 1014 if (S_ISLNK(VFS_I(ip)->i_mode)) 1015 return xfs_bmap_local_to_extents(tp, ip, 1, flags, 1016 XFS_DATA_FORK, 1017 xfs_symlink_local_to_remote); 1018 1019 /* should only be called for types that support local format data */ 1020 ASSERT(0); 1021 return -EFSCORRUPTED; 1022 } 1023 1024 /* Set an inode attr fork off based on the format */ 1025 int 1026 xfs_bmap_set_attrforkoff( 1027 struct xfs_inode *ip, 1028 int size, 1029 int *version) 1030 { 1031 switch (ip->i_d.di_format) { 1032 case XFS_DINODE_FMT_DEV: 1033 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; 1034 break; 1035 case XFS_DINODE_FMT_LOCAL: 1036 case XFS_DINODE_FMT_EXTENTS: 1037 case XFS_DINODE_FMT_BTREE: 1038 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size); 1039 if (!ip->i_d.di_forkoff) 1040 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3; 1041 else if ((ip->i_mount->m_flags & XFS_MOUNT_ATTR2) && version) 1042 *version = 2; 1043 break; 1044 default: 1045 ASSERT(0); 1046 return -EINVAL; 1047 } 1048 1049 return 0; 1050 } 1051 1052 /* 1053 * Convert inode from non-attributed to attributed. 1054 * Must not be in a transaction, ip must not be locked. 1055 */ 1056 int /* error code */ 1057 xfs_bmap_add_attrfork( 1058 xfs_inode_t *ip, /* incore inode pointer */ 1059 int size, /* space new attribute needs */ 1060 int rsvd) /* xact may use reserved blks */ 1061 { 1062 xfs_mount_t *mp; /* mount structure */ 1063 xfs_trans_t *tp; /* transaction pointer */ 1064 int blks; /* space reservation */ 1065 int version = 1; /* superblock attr version */ 1066 int logflags; /* logging flags */ 1067 int error; /* error return value */ 1068 1069 ASSERT(XFS_IFORK_Q(ip) == 0); 1070 1071 mp = ip->i_mount; 1072 ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); 1073 1074 blks = XFS_ADDAFORK_SPACE_RES(mp); 1075 1076 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0, 1077 rsvd ? XFS_TRANS_RESERVE : 0, &tp); 1078 if (error) 1079 return error; 1080 1081 xfs_ilock(ip, XFS_ILOCK_EXCL); 1082 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ? 1083 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : 1084 XFS_QMOPT_RES_REGBLKS); 1085 if (error) 1086 goto trans_cancel; 1087 if (XFS_IFORK_Q(ip)) 1088 goto trans_cancel; 1089 if (ip->i_d.di_anextents != 0) { 1090 error = -EFSCORRUPTED; 1091 goto trans_cancel; 1092 } 1093 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) { 1094 /* 1095 * For inodes coming from pre-6.2 filesystems. 1096 */ 1097 ASSERT(ip->i_d.di_aformat == 0); 1098 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 1099 } 1100 1101 xfs_trans_ijoin(tp, ip, 0); 1102 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1103 error = xfs_bmap_set_attrforkoff(ip, size, &version); 1104 if (error) 1105 goto trans_cancel; 1106 ASSERT(ip->i_afp == NULL); 1107 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); 1108 ip->i_afp->if_flags = XFS_IFEXTENTS; 1109 logflags = 0; 1110 switch (ip->i_d.di_format) { 1111 case XFS_DINODE_FMT_LOCAL: 1112 error = xfs_bmap_add_attrfork_local(tp, ip, &logflags); 1113 break; 1114 case XFS_DINODE_FMT_EXTENTS: 1115 error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags); 1116 break; 1117 case XFS_DINODE_FMT_BTREE: 1118 error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags); 1119 break; 1120 default: 1121 error = 0; 1122 break; 1123 } 1124 if (logflags) 1125 xfs_trans_log_inode(tp, ip, logflags); 1126 if (error) 1127 goto trans_cancel; 1128 if (!xfs_sb_version_hasattr(&mp->m_sb) || 1129 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) { 1130 bool log_sb = false; 1131 1132 spin_lock(&mp->m_sb_lock); 1133 if (!xfs_sb_version_hasattr(&mp->m_sb)) { 1134 xfs_sb_version_addattr(&mp->m_sb); 1135 log_sb = true; 1136 } 1137 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) { 1138 xfs_sb_version_addattr2(&mp->m_sb); 1139 log_sb = true; 1140 } 1141 spin_unlock(&mp->m_sb_lock); 1142 if (log_sb) 1143 xfs_log_sb(tp); 1144 } 1145 1146 error = xfs_trans_commit(tp); 1147 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1148 return error; 1149 1150 trans_cancel: 1151 xfs_trans_cancel(tp); 1152 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1153 return error; 1154 } 1155 1156 /* 1157 * Internal and external extent tree search functions. 1158 */ 1159 1160 /* 1161 * Read in extents from a btree-format inode. 1162 */ 1163 int 1164 xfs_iread_extents( 1165 struct xfs_trans *tp, 1166 struct xfs_inode *ip, 1167 int whichfork) 1168 { 1169 struct xfs_mount *mp = ip->i_mount; 1170 int state = xfs_bmap_fork_to_state(whichfork); 1171 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1172 xfs_extnum_t nextents = XFS_IFORK_NEXTENTS(ip, whichfork); 1173 struct xfs_btree_block *block = ifp->if_broot; 1174 struct xfs_iext_cursor icur; 1175 struct xfs_bmbt_irec new; 1176 xfs_fsblock_t bno; 1177 struct xfs_buf *bp; 1178 xfs_extnum_t i, j; 1179 int level; 1180 __be64 *pp; 1181 int error; 1182 1183 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1184 1185 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 1186 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 1187 return -EFSCORRUPTED; 1188 } 1189 1190 /* 1191 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 1192 */ 1193 level = be16_to_cpu(block->bb_level); 1194 ASSERT(level > 0); 1195 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 1196 bno = be64_to_cpu(*pp); 1197 1198 /* 1199 * Go down the tree until leaf level is reached, following the first 1200 * pointer (leftmost) at each level. 1201 */ 1202 while (level-- > 0) { 1203 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 1204 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); 1205 if (error) 1206 goto out; 1207 block = XFS_BUF_TO_BLOCK(bp); 1208 if (level == 0) 1209 break; 1210 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 1211 bno = be64_to_cpu(*pp); 1212 XFS_WANT_CORRUPTED_GOTO(mp, 1213 xfs_verify_fsbno(mp, bno), out_brelse); 1214 xfs_trans_brelse(tp, bp); 1215 } 1216 1217 /* 1218 * Here with bp and block set to the leftmost leaf node in the tree. 1219 */ 1220 i = 0; 1221 xfs_iext_first(ifp, &icur); 1222 1223 /* 1224 * Loop over all leaf nodes. Copy information to the extent records. 1225 */ 1226 for (;;) { 1227 xfs_bmbt_rec_t *frp; 1228 xfs_fsblock_t nextbno; 1229 xfs_extnum_t num_recs; 1230 1231 num_recs = xfs_btree_get_numrecs(block); 1232 if (unlikely(i + num_recs > nextents)) { 1233 xfs_warn(ip->i_mount, 1234 "corrupt dinode %Lu, (btree extents).", 1235 (unsigned long long) ip->i_ino); 1236 xfs_inode_verifier_error(ip, -EFSCORRUPTED, 1237 __func__, block, sizeof(*block), 1238 __this_address); 1239 error = -EFSCORRUPTED; 1240 goto out_brelse; 1241 } 1242 /* 1243 * Read-ahead the next leaf block, if any. 1244 */ 1245 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 1246 if (nextbno != NULLFSBLOCK) 1247 xfs_btree_reada_bufl(mp, nextbno, 1, 1248 &xfs_bmbt_buf_ops); 1249 /* 1250 * Copy records into the extent records. 1251 */ 1252 frp = XFS_BMBT_REC_ADDR(mp, block, 1); 1253 for (j = 0; j < num_recs; j++, frp++, i++) { 1254 xfs_failaddr_t fa; 1255 1256 xfs_bmbt_disk_get_all(frp, &new); 1257 fa = xfs_bmap_validate_extent(ip, whichfork, &new); 1258 if (fa) { 1259 error = -EFSCORRUPTED; 1260 xfs_inode_verifier_error(ip, error, 1261 "xfs_iread_extents(2)", 1262 frp, sizeof(*frp), fa); 1263 goto out_brelse; 1264 } 1265 xfs_iext_insert(ip, &icur, &new, state); 1266 trace_xfs_read_extent(ip, &icur, state, _THIS_IP_); 1267 xfs_iext_next(ifp, &icur); 1268 } 1269 xfs_trans_brelse(tp, bp); 1270 bno = nextbno; 1271 /* 1272 * If we've reached the end, stop. 1273 */ 1274 if (bno == NULLFSBLOCK) 1275 break; 1276 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 1277 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); 1278 if (error) 1279 goto out; 1280 block = XFS_BUF_TO_BLOCK(bp); 1281 } 1282 1283 if (i != XFS_IFORK_NEXTENTS(ip, whichfork)) { 1284 error = -EFSCORRUPTED; 1285 goto out; 1286 } 1287 ASSERT(i == xfs_iext_count(ifp)); 1288 1289 ifp->if_flags |= XFS_IFEXTENTS; 1290 return 0; 1291 1292 out_brelse: 1293 xfs_trans_brelse(tp, bp); 1294 out: 1295 xfs_iext_destroy(ifp); 1296 return error; 1297 } 1298 1299 /* 1300 * Returns the relative block number of the first unused block(s) in the given 1301 * fork with at least "len" logically contiguous blocks free. This is the 1302 * lowest-address hole if the fork has holes, else the first block past the end 1303 * of fork. Return 0 if the fork is currently local (in-inode). 1304 */ 1305 int /* error */ 1306 xfs_bmap_first_unused( 1307 struct xfs_trans *tp, /* transaction pointer */ 1308 struct xfs_inode *ip, /* incore inode */ 1309 xfs_extlen_t len, /* size of hole to find */ 1310 xfs_fileoff_t *first_unused, /* unused block */ 1311 int whichfork) /* data or attr fork */ 1312 { 1313 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1314 struct xfs_bmbt_irec got; 1315 struct xfs_iext_cursor icur; 1316 xfs_fileoff_t lastaddr = 0; 1317 xfs_fileoff_t lowest, max; 1318 int error; 1319 1320 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE || 1321 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS || 1322 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 1323 1324 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { 1325 *first_unused = 0; 1326 return 0; 1327 } 1328 1329 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1330 error = xfs_iread_extents(tp, ip, whichfork); 1331 if (error) 1332 return error; 1333 } 1334 1335 lowest = max = *first_unused; 1336 for_each_xfs_iext(ifp, &icur, &got) { 1337 /* 1338 * See if the hole before this extent will work. 1339 */ 1340 if (got.br_startoff >= lowest + len && 1341 got.br_startoff - max >= len) 1342 break; 1343 lastaddr = got.br_startoff + got.br_blockcount; 1344 max = XFS_FILEOFF_MAX(lastaddr, lowest); 1345 } 1346 1347 *first_unused = max; 1348 return 0; 1349 } 1350 1351 /* 1352 * Returns the file-relative block number of the last block - 1 before 1353 * last_block (input value) in the file. 1354 * This is not based on i_size, it is based on the extent records. 1355 * Returns 0 for local files, as they do not have extent records. 1356 */ 1357 int /* error */ 1358 xfs_bmap_last_before( 1359 struct xfs_trans *tp, /* transaction pointer */ 1360 struct xfs_inode *ip, /* incore inode */ 1361 xfs_fileoff_t *last_block, /* last block */ 1362 int whichfork) /* data or attr fork */ 1363 { 1364 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1365 struct xfs_bmbt_irec got; 1366 struct xfs_iext_cursor icur; 1367 int error; 1368 1369 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 1370 case XFS_DINODE_FMT_LOCAL: 1371 *last_block = 0; 1372 return 0; 1373 case XFS_DINODE_FMT_BTREE: 1374 case XFS_DINODE_FMT_EXTENTS: 1375 break; 1376 default: 1377 return -EIO; 1378 } 1379 1380 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1381 error = xfs_iread_extents(tp, ip, whichfork); 1382 if (error) 1383 return error; 1384 } 1385 1386 if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got)) 1387 *last_block = 0; 1388 return 0; 1389 } 1390 1391 int 1392 xfs_bmap_last_extent( 1393 struct xfs_trans *tp, 1394 struct xfs_inode *ip, 1395 int whichfork, 1396 struct xfs_bmbt_irec *rec, 1397 int *is_empty) 1398 { 1399 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1400 struct xfs_iext_cursor icur; 1401 int error; 1402 1403 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1404 error = xfs_iread_extents(tp, ip, whichfork); 1405 if (error) 1406 return error; 1407 } 1408 1409 xfs_iext_last(ifp, &icur); 1410 if (!xfs_iext_get_extent(ifp, &icur, rec)) 1411 *is_empty = 1; 1412 else 1413 *is_empty = 0; 1414 return 0; 1415 } 1416 1417 /* 1418 * Check the last inode extent to determine whether this allocation will result 1419 * in blocks being allocated at the end of the file. When we allocate new data 1420 * blocks at the end of the file which do not start at the previous data block, 1421 * we will try to align the new blocks at stripe unit boundaries. 1422 * 1423 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be 1424 * at, or past the EOF. 1425 */ 1426 STATIC int 1427 xfs_bmap_isaeof( 1428 struct xfs_bmalloca *bma, 1429 int whichfork) 1430 { 1431 struct xfs_bmbt_irec rec; 1432 int is_empty; 1433 int error; 1434 1435 bma->aeof = false; 1436 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, 1437 &is_empty); 1438 if (error) 1439 return error; 1440 1441 if (is_empty) { 1442 bma->aeof = true; 1443 return 0; 1444 } 1445 1446 /* 1447 * Check if we are allocation or past the last extent, or at least into 1448 * the last delayed allocated extent. 1449 */ 1450 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount || 1451 (bma->offset >= rec.br_startoff && 1452 isnullstartblock(rec.br_startblock)); 1453 return 0; 1454 } 1455 1456 /* 1457 * Returns the file-relative block number of the first block past eof in 1458 * the file. This is not based on i_size, it is based on the extent records. 1459 * Returns 0 for local files, as they do not have extent records. 1460 */ 1461 int 1462 xfs_bmap_last_offset( 1463 struct xfs_inode *ip, 1464 xfs_fileoff_t *last_block, 1465 int whichfork) 1466 { 1467 struct xfs_bmbt_irec rec; 1468 int is_empty; 1469 int error; 1470 1471 *last_block = 0; 1472 1473 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) 1474 return 0; 1475 1476 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && 1477 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 1478 return -EIO; 1479 1480 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty); 1481 if (error || is_empty) 1482 return error; 1483 1484 *last_block = rec.br_startoff + rec.br_blockcount; 1485 return 0; 1486 } 1487 1488 /* 1489 * Returns whether the selected fork of the inode has exactly one 1490 * block or not. For the data fork we check this matches di_size, 1491 * implying the file's range is 0..bsize-1. 1492 */ 1493 int /* 1=>1 block, 0=>otherwise */ 1494 xfs_bmap_one_block( 1495 xfs_inode_t *ip, /* incore inode */ 1496 int whichfork) /* data or attr fork */ 1497 { 1498 struct xfs_ifork *ifp; /* inode fork pointer */ 1499 int rval; /* return value */ 1500 xfs_bmbt_irec_t s; /* internal version of extent */ 1501 struct xfs_iext_cursor icur; 1502 1503 #ifndef DEBUG 1504 if (whichfork == XFS_DATA_FORK) 1505 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize; 1506 #endif /* !DEBUG */ 1507 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1) 1508 return 0; 1509 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 1510 return 0; 1511 ifp = XFS_IFORK_PTR(ip, whichfork); 1512 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 1513 xfs_iext_first(ifp, &icur); 1514 xfs_iext_get_extent(ifp, &icur, &s); 1515 rval = s.br_startoff == 0 && s.br_blockcount == 1; 1516 if (rval && whichfork == XFS_DATA_FORK) 1517 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize); 1518 return rval; 1519 } 1520 1521 /* 1522 * Extent tree manipulation functions used during allocation. 1523 */ 1524 1525 /* 1526 * Convert a delayed allocation to a real allocation. 1527 */ 1528 STATIC int /* error */ 1529 xfs_bmap_add_extent_delay_real( 1530 struct xfs_bmalloca *bma, 1531 int whichfork) 1532 { 1533 struct xfs_bmbt_irec *new = &bma->got; 1534 int error; /* error return value */ 1535 int i; /* temp state */ 1536 struct xfs_ifork *ifp; /* inode fork pointer */ 1537 xfs_fileoff_t new_endoff; /* end offset of new entry */ 1538 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 1539 /* left is 0, right is 1, prev is 2 */ 1540 int rval=0; /* return value (logging flags) */ 1541 int state = xfs_bmap_fork_to_state(whichfork); 1542 xfs_filblks_t da_new; /* new count del alloc blocks used */ 1543 xfs_filblks_t da_old; /* old count del alloc blocks used */ 1544 xfs_filblks_t temp=0; /* value for da_new calculations */ 1545 int tmp_rval; /* partial logging flags */ 1546 struct xfs_mount *mp; 1547 xfs_extnum_t *nextents; 1548 struct xfs_bmbt_irec old; 1549 1550 mp = bma->ip->i_mount; 1551 ifp = XFS_IFORK_PTR(bma->ip, whichfork); 1552 ASSERT(whichfork != XFS_ATTR_FORK); 1553 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents : 1554 &bma->ip->i_d.di_nextents); 1555 1556 ASSERT(!isnullstartblock(new->br_startblock)); 1557 ASSERT(!bma->cur || 1558 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); 1559 1560 XFS_STATS_INC(mp, xs_add_exlist); 1561 1562 #define LEFT r[0] 1563 #define RIGHT r[1] 1564 #define PREV r[2] 1565 1566 /* 1567 * Set up a bunch of variables to make the tests simpler. 1568 */ 1569 xfs_iext_get_extent(ifp, &bma->icur, &PREV); 1570 new_endoff = new->br_startoff + new->br_blockcount; 1571 ASSERT(isnullstartblock(PREV.br_startblock)); 1572 ASSERT(PREV.br_startoff <= new->br_startoff); 1573 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 1574 1575 da_old = startblockval(PREV.br_startblock); 1576 da_new = 0; 1577 1578 /* 1579 * Set flags determining what part of the previous delayed allocation 1580 * extent is being replaced by a real allocation. 1581 */ 1582 if (PREV.br_startoff == new->br_startoff) 1583 state |= BMAP_LEFT_FILLING; 1584 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 1585 state |= BMAP_RIGHT_FILLING; 1586 1587 /* 1588 * Check and set flags if this segment has a left neighbor. 1589 * Don't set contiguous if the combined extent would be too large. 1590 */ 1591 if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) { 1592 state |= BMAP_LEFT_VALID; 1593 if (isnullstartblock(LEFT.br_startblock)) 1594 state |= BMAP_LEFT_DELAY; 1595 } 1596 1597 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 1598 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 1599 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 1600 LEFT.br_state == new->br_state && 1601 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) 1602 state |= BMAP_LEFT_CONTIG; 1603 1604 /* 1605 * Check and set flags if this segment has a right neighbor. 1606 * Don't set contiguous if the combined extent would be too large. 1607 * Also check for all-three-contiguous being too large. 1608 */ 1609 if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) { 1610 state |= BMAP_RIGHT_VALID; 1611 if (isnullstartblock(RIGHT.br_startblock)) 1612 state |= BMAP_RIGHT_DELAY; 1613 } 1614 1615 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 1616 new_endoff == RIGHT.br_startoff && 1617 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 1618 new->br_state == RIGHT.br_state && 1619 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 1620 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1621 BMAP_RIGHT_FILLING)) != 1622 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1623 BMAP_RIGHT_FILLING) || 1624 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 1625 <= MAXEXTLEN)) 1626 state |= BMAP_RIGHT_CONTIG; 1627 1628 error = 0; 1629 /* 1630 * Switch out based on the FILLING and CONTIG state bits. 1631 */ 1632 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1633 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 1634 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1635 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1636 /* 1637 * Filling in all of a previously delayed allocation extent. 1638 * The left and right neighbors are both contiguous with new. 1639 */ 1640 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount; 1641 1642 xfs_iext_remove(bma->ip, &bma->icur, state); 1643 xfs_iext_remove(bma->ip, &bma->icur, state); 1644 xfs_iext_prev(ifp, &bma->icur); 1645 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1646 (*nextents)--; 1647 1648 if (bma->cur == NULL) 1649 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1650 else { 1651 rval = XFS_ILOG_CORE; 1652 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i); 1653 if (error) 1654 goto done; 1655 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1656 error = xfs_btree_delete(bma->cur, &i); 1657 if (error) 1658 goto done; 1659 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1660 error = xfs_btree_decrement(bma->cur, 0, &i); 1661 if (error) 1662 goto done; 1663 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1664 error = xfs_bmbt_update(bma->cur, &LEFT); 1665 if (error) 1666 goto done; 1667 } 1668 break; 1669 1670 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1671 /* 1672 * Filling in all of a previously delayed allocation extent. 1673 * The left neighbor is contiguous, the right is not. 1674 */ 1675 old = LEFT; 1676 LEFT.br_blockcount += PREV.br_blockcount; 1677 1678 xfs_iext_remove(bma->ip, &bma->icur, state); 1679 xfs_iext_prev(ifp, &bma->icur); 1680 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1681 1682 if (bma->cur == NULL) 1683 rval = XFS_ILOG_DEXT; 1684 else { 1685 rval = 0; 1686 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1687 if (error) 1688 goto done; 1689 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1690 error = xfs_bmbt_update(bma->cur, &LEFT); 1691 if (error) 1692 goto done; 1693 } 1694 break; 1695 1696 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1697 /* 1698 * Filling in all of a previously delayed allocation extent. 1699 * The right neighbor is contiguous, the left is not. Take care 1700 * with delay -> unwritten extent allocation here because the 1701 * delalloc record we are overwriting is always written. 1702 */ 1703 PREV.br_startblock = new->br_startblock; 1704 PREV.br_blockcount += RIGHT.br_blockcount; 1705 PREV.br_state = new->br_state; 1706 1707 xfs_iext_next(ifp, &bma->icur); 1708 xfs_iext_remove(bma->ip, &bma->icur, state); 1709 xfs_iext_prev(ifp, &bma->icur); 1710 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1711 1712 if (bma->cur == NULL) 1713 rval = XFS_ILOG_DEXT; 1714 else { 1715 rval = 0; 1716 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i); 1717 if (error) 1718 goto done; 1719 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1720 error = xfs_bmbt_update(bma->cur, &PREV); 1721 if (error) 1722 goto done; 1723 } 1724 break; 1725 1726 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 1727 /* 1728 * Filling in all of a previously delayed allocation extent. 1729 * Neither the left nor right neighbors are contiguous with 1730 * the new one. 1731 */ 1732 PREV.br_startblock = new->br_startblock; 1733 PREV.br_state = new->br_state; 1734 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1735 1736 (*nextents)++; 1737 if (bma->cur == NULL) 1738 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1739 else { 1740 rval = XFS_ILOG_CORE; 1741 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1742 if (error) 1743 goto done; 1744 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1745 error = xfs_btree_insert(bma->cur, &i); 1746 if (error) 1747 goto done; 1748 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1749 } 1750 break; 1751 1752 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 1753 /* 1754 * Filling in the first part of a previous delayed allocation. 1755 * The left neighbor is contiguous. 1756 */ 1757 old = LEFT; 1758 temp = PREV.br_blockcount - new->br_blockcount; 1759 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1760 startblockval(PREV.br_startblock)); 1761 1762 LEFT.br_blockcount += new->br_blockcount; 1763 1764 PREV.br_blockcount = temp; 1765 PREV.br_startoff += new->br_blockcount; 1766 PREV.br_startblock = nullstartblock(da_new); 1767 1768 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1769 xfs_iext_prev(ifp, &bma->icur); 1770 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1771 1772 if (bma->cur == NULL) 1773 rval = XFS_ILOG_DEXT; 1774 else { 1775 rval = 0; 1776 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1777 if (error) 1778 goto done; 1779 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1780 error = xfs_bmbt_update(bma->cur, &LEFT); 1781 if (error) 1782 goto done; 1783 } 1784 break; 1785 1786 case BMAP_LEFT_FILLING: 1787 /* 1788 * Filling in the first part of a previous delayed allocation. 1789 * The left neighbor is not contiguous. 1790 */ 1791 xfs_iext_update_extent(bma->ip, state, &bma->icur, new); 1792 (*nextents)++; 1793 if (bma->cur == NULL) 1794 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1795 else { 1796 rval = XFS_ILOG_CORE; 1797 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1798 if (error) 1799 goto done; 1800 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1801 error = xfs_btree_insert(bma->cur, &i); 1802 if (error) 1803 goto done; 1804 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1805 } 1806 1807 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1808 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1809 &bma->cur, 1, &tmp_rval, whichfork); 1810 rval |= tmp_rval; 1811 if (error) 1812 goto done; 1813 } 1814 1815 temp = PREV.br_blockcount - new->br_blockcount; 1816 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1817 startblockval(PREV.br_startblock) - 1818 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 1819 1820 PREV.br_startoff = new_endoff; 1821 PREV.br_blockcount = temp; 1822 PREV.br_startblock = nullstartblock(da_new); 1823 xfs_iext_next(ifp, &bma->icur); 1824 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); 1825 xfs_iext_prev(ifp, &bma->icur); 1826 break; 1827 1828 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1829 /* 1830 * Filling in the last part of a previous delayed allocation. 1831 * The right neighbor is contiguous with the new allocation. 1832 */ 1833 old = RIGHT; 1834 RIGHT.br_startoff = new->br_startoff; 1835 RIGHT.br_startblock = new->br_startblock; 1836 RIGHT.br_blockcount += new->br_blockcount; 1837 1838 if (bma->cur == NULL) 1839 rval = XFS_ILOG_DEXT; 1840 else { 1841 rval = 0; 1842 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1843 if (error) 1844 goto done; 1845 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1846 error = xfs_bmbt_update(bma->cur, &RIGHT); 1847 if (error) 1848 goto done; 1849 } 1850 1851 temp = PREV.br_blockcount - new->br_blockcount; 1852 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1853 startblockval(PREV.br_startblock)); 1854 1855 PREV.br_blockcount = temp; 1856 PREV.br_startblock = nullstartblock(da_new); 1857 1858 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1859 xfs_iext_next(ifp, &bma->icur); 1860 xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT); 1861 break; 1862 1863 case BMAP_RIGHT_FILLING: 1864 /* 1865 * Filling in the last part of a previous delayed allocation. 1866 * The right neighbor is not contiguous. 1867 */ 1868 xfs_iext_update_extent(bma->ip, state, &bma->icur, new); 1869 (*nextents)++; 1870 if (bma->cur == NULL) 1871 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1872 else { 1873 rval = XFS_ILOG_CORE; 1874 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1875 if (error) 1876 goto done; 1877 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1878 error = xfs_btree_insert(bma->cur, &i); 1879 if (error) 1880 goto done; 1881 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1882 } 1883 1884 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1885 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1886 &bma->cur, 1, &tmp_rval, whichfork); 1887 rval |= tmp_rval; 1888 if (error) 1889 goto done; 1890 } 1891 1892 temp = PREV.br_blockcount - new->br_blockcount; 1893 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1894 startblockval(PREV.br_startblock) - 1895 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 1896 1897 PREV.br_startblock = nullstartblock(da_new); 1898 PREV.br_blockcount = temp; 1899 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); 1900 xfs_iext_next(ifp, &bma->icur); 1901 break; 1902 1903 case 0: 1904 /* 1905 * Filling in the middle part of a previous delayed allocation. 1906 * Contiguity is impossible here. 1907 * This case is avoided almost all the time. 1908 * 1909 * We start with a delayed allocation: 1910 * 1911 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+ 1912 * PREV @ idx 1913 * 1914 * and we are allocating: 1915 * +rrrrrrrrrrrrrrrrr+ 1916 * new 1917 * 1918 * and we set it up for insertion as: 1919 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+ 1920 * new 1921 * PREV @ idx LEFT RIGHT 1922 * inserted at idx + 1 1923 */ 1924 old = PREV; 1925 1926 /* LEFT is the new middle */ 1927 LEFT = *new; 1928 1929 /* RIGHT is the new right */ 1930 RIGHT.br_state = PREV.br_state; 1931 RIGHT.br_startoff = new_endoff; 1932 RIGHT.br_blockcount = 1933 PREV.br_startoff + PREV.br_blockcount - new_endoff; 1934 RIGHT.br_startblock = 1935 nullstartblock(xfs_bmap_worst_indlen(bma->ip, 1936 RIGHT.br_blockcount)); 1937 1938 /* truncate PREV */ 1939 PREV.br_blockcount = new->br_startoff - PREV.br_startoff; 1940 PREV.br_startblock = 1941 nullstartblock(xfs_bmap_worst_indlen(bma->ip, 1942 PREV.br_blockcount)); 1943 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1944 1945 xfs_iext_next(ifp, &bma->icur); 1946 xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state); 1947 xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state); 1948 (*nextents)++; 1949 1950 if (bma->cur == NULL) 1951 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1952 else { 1953 rval = XFS_ILOG_CORE; 1954 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1955 if (error) 1956 goto done; 1957 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1958 error = xfs_btree_insert(bma->cur, &i); 1959 if (error) 1960 goto done; 1961 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1962 } 1963 1964 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1965 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1966 &bma->cur, 1, &tmp_rval, whichfork); 1967 rval |= tmp_rval; 1968 if (error) 1969 goto done; 1970 } 1971 1972 da_new = startblockval(PREV.br_startblock) + 1973 startblockval(RIGHT.br_startblock); 1974 break; 1975 1976 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1977 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1978 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 1979 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1980 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1981 case BMAP_LEFT_CONTIG: 1982 case BMAP_RIGHT_CONTIG: 1983 /* 1984 * These cases are all impossible. 1985 */ 1986 ASSERT(0); 1987 } 1988 1989 /* add reverse mapping unless caller opted out */ 1990 if (!(bma->flags & XFS_BMAPI_NORMAP)) { 1991 error = xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new); 1992 if (error) 1993 goto done; 1994 } 1995 1996 /* convert to a btree if necessary */ 1997 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1998 int tmp_logflags; /* partial log flag return val */ 1999 2000 ASSERT(bma->cur == NULL); 2001 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 2002 &bma->cur, da_old > 0, &tmp_logflags, 2003 whichfork); 2004 bma->logflags |= tmp_logflags; 2005 if (error) 2006 goto done; 2007 } 2008 2009 if (bma->cur) { 2010 da_new += bma->cur->bc_private.b.allocated; 2011 bma->cur->bc_private.b.allocated = 0; 2012 } 2013 2014 /* adjust for changes in reserved delayed indirect blocks */ 2015 if (da_new != da_old) { 2016 ASSERT(state == 0 || da_new < da_old); 2017 error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), 2018 false); 2019 } 2020 2021 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); 2022 done: 2023 if (whichfork != XFS_COW_FORK) 2024 bma->logflags |= rval; 2025 return error; 2026 #undef LEFT 2027 #undef RIGHT 2028 #undef PREV 2029 } 2030 2031 /* 2032 * Convert an unwritten allocation to a real allocation or vice versa. 2033 */ 2034 int /* error */ 2035 xfs_bmap_add_extent_unwritten_real( 2036 struct xfs_trans *tp, 2037 xfs_inode_t *ip, /* incore inode pointer */ 2038 int whichfork, 2039 struct xfs_iext_cursor *icur, 2040 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 2041 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 2042 int *logflagsp) /* inode logging flags */ 2043 { 2044 xfs_btree_cur_t *cur; /* btree cursor */ 2045 int error; /* error return value */ 2046 int i; /* temp state */ 2047 struct xfs_ifork *ifp; /* inode fork pointer */ 2048 xfs_fileoff_t new_endoff; /* end offset of new entry */ 2049 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 2050 /* left is 0, right is 1, prev is 2 */ 2051 int rval=0; /* return value (logging flags) */ 2052 int state = xfs_bmap_fork_to_state(whichfork); 2053 struct xfs_mount *mp = ip->i_mount; 2054 struct xfs_bmbt_irec old; 2055 2056 *logflagsp = 0; 2057 2058 cur = *curp; 2059 ifp = XFS_IFORK_PTR(ip, whichfork); 2060 2061 ASSERT(!isnullstartblock(new->br_startblock)); 2062 2063 XFS_STATS_INC(mp, xs_add_exlist); 2064 2065 #define LEFT r[0] 2066 #define RIGHT r[1] 2067 #define PREV r[2] 2068 2069 /* 2070 * Set up a bunch of variables to make the tests simpler. 2071 */ 2072 error = 0; 2073 xfs_iext_get_extent(ifp, icur, &PREV); 2074 ASSERT(new->br_state != PREV.br_state); 2075 new_endoff = new->br_startoff + new->br_blockcount; 2076 ASSERT(PREV.br_startoff <= new->br_startoff); 2077 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 2078 2079 /* 2080 * Set flags determining what part of the previous oldext allocation 2081 * extent is being replaced by a newext allocation. 2082 */ 2083 if (PREV.br_startoff == new->br_startoff) 2084 state |= BMAP_LEFT_FILLING; 2085 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 2086 state |= BMAP_RIGHT_FILLING; 2087 2088 /* 2089 * Check and set flags if this segment has a left neighbor. 2090 * Don't set contiguous if the combined extent would be too large. 2091 */ 2092 if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) { 2093 state |= BMAP_LEFT_VALID; 2094 if (isnullstartblock(LEFT.br_startblock)) 2095 state |= BMAP_LEFT_DELAY; 2096 } 2097 2098 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2099 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 2100 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 2101 LEFT.br_state == new->br_state && 2102 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2103 state |= BMAP_LEFT_CONTIG; 2104 2105 /* 2106 * Check and set flags if this segment has a right neighbor. 2107 * Don't set contiguous if the combined extent would be too large. 2108 * Also check for all-three-contiguous being too large. 2109 */ 2110 if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) { 2111 state |= BMAP_RIGHT_VALID; 2112 if (isnullstartblock(RIGHT.br_startblock)) 2113 state |= BMAP_RIGHT_DELAY; 2114 } 2115 2116 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2117 new_endoff == RIGHT.br_startoff && 2118 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 2119 new->br_state == RIGHT.br_state && 2120 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 2121 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2122 BMAP_RIGHT_FILLING)) != 2123 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2124 BMAP_RIGHT_FILLING) || 2125 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 2126 <= MAXEXTLEN)) 2127 state |= BMAP_RIGHT_CONTIG; 2128 2129 /* 2130 * Switch out based on the FILLING and CONTIG state bits. 2131 */ 2132 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2133 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 2134 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2135 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2136 /* 2137 * Setting all of a previous oldext extent to newext. 2138 * The left and right neighbors are both contiguous with new. 2139 */ 2140 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount; 2141 2142 xfs_iext_remove(ip, icur, state); 2143 xfs_iext_remove(ip, icur, state); 2144 xfs_iext_prev(ifp, icur); 2145 xfs_iext_update_extent(ip, state, icur, &LEFT); 2146 XFS_IFORK_NEXT_SET(ip, whichfork, 2147 XFS_IFORK_NEXTENTS(ip, whichfork) - 2); 2148 if (cur == NULL) 2149 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2150 else { 2151 rval = XFS_ILOG_CORE; 2152 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i); 2153 if (error) 2154 goto done; 2155 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2156 if ((error = xfs_btree_delete(cur, &i))) 2157 goto done; 2158 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2159 if ((error = xfs_btree_decrement(cur, 0, &i))) 2160 goto done; 2161 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2162 if ((error = xfs_btree_delete(cur, &i))) 2163 goto done; 2164 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2165 if ((error = xfs_btree_decrement(cur, 0, &i))) 2166 goto done; 2167 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2168 error = xfs_bmbt_update(cur, &LEFT); 2169 if (error) 2170 goto done; 2171 } 2172 break; 2173 2174 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2175 /* 2176 * Setting all of a previous oldext extent to newext. 2177 * The left neighbor is contiguous, the right is not. 2178 */ 2179 LEFT.br_blockcount += PREV.br_blockcount; 2180 2181 xfs_iext_remove(ip, icur, state); 2182 xfs_iext_prev(ifp, icur); 2183 xfs_iext_update_extent(ip, state, icur, &LEFT); 2184 XFS_IFORK_NEXT_SET(ip, whichfork, 2185 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2186 if (cur == NULL) 2187 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2188 else { 2189 rval = XFS_ILOG_CORE; 2190 error = xfs_bmbt_lookup_eq(cur, &PREV, &i); 2191 if (error) 2192 goto done; 2193 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2194 if ((error = xfs_btree_delete(cur, &i))) 2195 goto done; 2196 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2197 if ((error = xfs_btree_decrement(cur, 0, &i))) 2198 goto done; 2199 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2200 error = xfs_bmbt_update(cur, &LEFT); 2201 if (error) 2202 goto done; 2203 } 2204 break; 2205 2206 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2207 /* 2208 * Setting all of a previous oldext extent to newext. 2209 * The right neighbor is contiguous, the left is not. 2210 */ 2211 PREV.br_blockcount += RIGHT.br_blockcount; 2212 PREV.br_state = new->br_state; 2213 2214 xfs_iext_next(ifp, icur); 2215 xfs_iext_remove(ip, icur, state); 2216 xfs_iext_prev(ifp, icur); 2217 xfs_iext_update_extent(ip, state, icur, &PREV); 2218 2219 XFS_IFORK_NEXT_SET(ip, whichfork, 2220 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2221 if (cur == NULL) 2222 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2223 else { 2224 rval = XFS_ILOG_CORE; 2225 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i); 2226 if (error) 2227 goto done; 2228 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2229 if ((error = xfs_btree_delete(cur, &i))) 2230 goto done; 2231 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2232 if ((error = xfs_btree_decrement(cur, 0, &i))) 2233 goto done; 2234 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2235 error = xfs_bmbt_update(cur, &PREV); 2236 if (error) 2237 goto done; 2238 } 2239 break; 2240 2241 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 2242 /* 2243 * Setting all of a previous oldext extent to newext. 2244 * Neither the left nor right neighbors are contiguous with 2245 * the new one. 2246 */ 2247 PREV.br_state = new->br_state; 2248 xfs_iext_update_extent(ip, state, icur, &PREV); 2249 2250 if (cur == NULL) 2251 rval = XFS_ILOG_DEXT; 2252 else { 2253 rval = 0; 2254 error = xfs_bmbt_lookup_eq(cur, new, &i); 2255 if (error) 2256 goto done; 2257 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2258 error = xfs_bmbt_update(cur, &PREV); 2259 if (error) 2260 goto done; 2261 } 2262 break; 2263 2264 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 2265 /* 2266 * Setting the first part of a previous oldext extent to newext. 2267 * The left neighbor is contiguous. 2268 */ 2269 LEFT.br_blockcount += new->br_blockcount; 2270 2271 old = PREV; 2272 PREV.br_startoff += new->br_blockcount; 2273 PREV.br_startblock += new->br_blockcount; 2274 PREV.br_blockcount -= new->br_blockcount; 2275 2276 xfs_iext_update_extent(ip, state, icur, &PREV); 2277 xfs_iext_prev(ifp, icur); 2278 xfs_iext_update_extent(ip, state, icur, &LEFT); 2279 2280 if (cur == NULL) 2281 rval = XFS_ILOG_DEXT; 2282 else { 2283 rval = 0; 2284 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2285 if (error) 2286 goto done; 2287 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2288 error = xfs_bmbt_update(cur, &PREV); 2289 if (error) 2290 goto done; 2291 error = xfs_btree_decrement(cur, 0, &i); 2292 if (error) 2293 goto done; 2294 error = xfs_bmbt_update(cur, &LEFT); 2295 if (error) 2296 goto done; 2297 } 2298 break; 2299 2300 case BMAP_LEFT_FILLING: 2301 /* 2302 * Setting the first part of a previous oldext extent to newext. 2303 * The left neighbor is not contiguous. 2304 */ 2305 old = PREV; 2306 PREV.br_startoff += new->br_blockcount; 2307 PREV.br_startblock += new->br_blockcount; 2308 PREV.br_blockcount -= new->br_blockcount; 2309 2310 xfs_iext_update_extent(ip, state, icur, &PREV); 2311 xfs_iext_insert(ip, icur, new, state); 2312 XFS_IFORK_NEXT_SET(ip, whichfork, 2313 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2314 if (cur == NULL) 2315 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2316 else { 2317 rval = XFS_ILOG_CORE; 2318 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2319 if (error) 2320 goto done; 2321 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2322 error = xfs_bmbt_update(cur, &PREV); 2323 if (error) 2324 goto done; 2325 cur->bc_rec.b = *new; 2326 if ((error = xfs_btree_insert(cur, &i))) 2327 goto done; 2328 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2329 } 2330 break; 2331 2332 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2333 /* 2334 * Setting the last part of a previous oldext extent to newext. 2335 * The right neighbor is contiguous with the new allocation. 2336 */ 2337 old = PREV; 2338 PREV.br_blockcount -= new->br_blockcount; 2339 2340 RIGHT.br_startoff = new->br_startoff; 2341 RIGHT.br_startblock = new->br_startblock; 2342 RIGHT.br_blockcount += new->br_blockcount; 2343 2344 xfs_iext_update_extent(ip, state, icur, &PREV); 2345 xfs_iext_next(ifp, icur); 2346 xfs_iext_update_extent(ip, state, icur, &RIGHT); 2347 2348 if (cur == NULL) 2349 rval = XFS_ILOG_DEXT; 2350 else { 2351 rval = 0; 2352 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2353 if (error) 2354 goto done; 2355 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2356 error = xfs_bmbt_update(cur, &PREV); 2357 if (error) 2358 goto done; 2359 error = xfs_btree_increment(cur, 0, &i); 2360 if (error) 2361 goto done; 2362 error = xfs_bmbt_update(cur, &RIGHT); 2363 if (error) 2364 goto done; 2365 } 2366 break; 2367 2368 case BMAP_RIGHT_FILLING: 2369 /* 2370 * Setting the last part of a previous oldext extent to newext. 2371 * The right neighbor is not contiguous. 2372 */ 2373 old = PREV; 2374 PREV.br_blockcount -= new->br_blockcount; 2375 2376 xfs_iext_update_extent(ip, state, icur, &PREV); 2377 xfs_iext_next(ifp, icur); 2378 xfs_iext_insert(ip, icur, new, state); 2379 2380 XFS_IFORK_NEXT_SET(ip, whichfork, 2381 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2382 if (cur == NULL) 2383 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2384 else { 2385 rval = XFS_ILOG_CORE; 2386 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2387 if (error) 2388 goto done; 2389 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2390 error = xfs_bmbt_update(cur, &PREV); 2391 if (error) 2392 goto done; 2393 error = xfs_bmbt_lookup_eq(cur, new, &i); 2394 if (error) 2395 goto done; 2396 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2397 if ((error = xfs_btree_insert(cur, &i))) 2398 goto done; 2399 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2400 } 2401 break; 2402 2403 case 0: 2404 /* 2405 * Setting the middle part of a previous oldext extent to 2406 * newext. Contiguity is impossible here. 2407 * One extent becomes three extents. 2408 */ 2409 old = PREV; 2410 PREV.br_blockcount = new->br_startoff - PREV.br_startoff; 2411 2412 r[0] = *new; 2413 r[1].br_startoff = new_endoff; 2414 r[1].br_blockcount = 2415 old.br_startoff + old.br_blockcount - new_endoff; 2416 r[1].br_startblock = new->br_startblock + new->br_blockcount; 2417 r[1].br_state = PREV.br_state; 2418 2419 xfs_iext_update_extent(ip, state, icur, &PREV); 2420 xfs_iext_next(ifp, icur); 2421 xfs_iext_insert(ip, icur, &r[1], state); 2422 xfs_iext_insert(ip, icur, &r[0], state); 2423 2424 XFS_IFORK_NEXT_SET(ip, whichfork, 2425 XFS_IFORK_NEXTENTS(ip, whichfork) + 2); 2426 if (cur == NULL) 2427 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2428 else { 2429 rval = XFS_ILOG_CORE; 2430 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2431 if (error) 2432 goto done; 2433 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2434 /* new right extent - oldext */ 2435 error = xfs_bmbt_update(cur, &r[1]); 2436 if (error) 2437 goto done; 2438 /* new left extent - oldext */ 2439 cur->bc_rec.b = PREV; 2440 if ((error = xfs_btree_insert(cur, &i))) 2441 goto done; 2442 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2443 /* 2444 * Reset the cursor to the position of the new extent 2445 * we are about to insert as we can't trust it after 2446 * the previous insert. 2447 */ 2448 error = xfs_bmbt_lookup_eq(cur, new, &i); 2449 if (error) 2450 goto done; 2451 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2452 /* new middle extent - newext */ 2453 if ((error = xfs_btree_insert(cur, &i))) 2454 goto done; 2455 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2456 } 2457 break; 2458 2459 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2460 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2461 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 2462 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2463 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2464 case BMAP_LEFT_CONTIG: 2465 case BMAP_RIGHT_CONTIG: 2466 /* 2467 * These cases are all impossible. 2468 */ 2469 ASSERT(0); 2470 } 2471 2472 /* update reverse mappings */ 2473 error = xfs_rmap_convert_extent(mp, tp, ip, whichfork, new); 2474 if (error) 2475 goto done; 2476 2477 /* convert to a btree if necessary */ 2478 if (xfs_bmap_needs_btree(ip, whichfork)) { 2479 int tmp_logflags; /* partial log flag return val */ 2480 2481 ASSERT(cur == NULL); 2482 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 2483 &tmp_logflags, whichfork); 2484 *logflagsp |= tmp_logflags; 2485 if (error) 2486 goto done; 2487 } 2488 2489 /* clear out the allocated field, done with it now in any case. */ 2490 if (cur) { 2491 cur->bc_private.b.allocated = 0; 2492 *curp = cur; 2493 } 2494 2495 xfs_bmap_check_leaf_extents(*curp, ip, whichfork); 2496 done: 2497 *logflagsp |= rval; 2498 return error; 2499 #undef LEFT 2500 #undef RIGHT 2501 #undef PREV 2502 } 2503 2504 /* 2505 * Convert a hole to a delayed allocation. 2506 */ 2507 STATIC void 2508 xfs_bmap_add_extent_hole_delay( 2509 xfs_inode_t *ip, /* incore inode pointer */ 2510 int whichfork, 2511 struct xfs_iext_cursor *icur, 2512 xfs_bmbt_irec_t *new) /* new data to add to file extents */ 2513 { 2514 struct xfs_ifork *ifp; /* inode fork pointer */ 2515 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2516 xfs_filblks_t newlen=0; /* new indirect size */ 2517 xfs_filblks_t oldlen=0; /* old indirect size */ 2518 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2519 int state = xfs_bmap_fork_to_state(whichfork); 2520 xfs_filblks_t temp; /* temp for indirect calculations */ 2521 2522 ifp = XFS_IFORK_PTR(ip, whichfork); 2523 ASSERT(isnullstartblock(new->br_startblock)); 2524 2525 /* 2526 * Check and set flags if this segment has a left neighbor 2527 */ 2528 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { 2529 state |= BMAP_LEFT_VALID; 2530 if (isnullstartblock(left.br_startblock)) 2531 state |= BMAP_LEFT_DELAY; 2532 } 2533 2534 /* 2535 * Check and set flags if the current (right) segment exists. 2536 * If it doesn't exist, we're converting the hole at end-of-file. 2537 */ 2538 if (xfs_iext_get_extent(ifp, icur, &right)) { 2539 state |= BMAP_RIGHT_VALID; 2540 if (isnullstartblock(right.br_startblock)) 2541 state |= BMAP_RIGHT_DELAY; 2542 } 2543 2544 /* 2545 * Set contiguity flags on the left and right neighbors. 2546 * Don't let extents get too large, even if the pieces are contiguous. 2547 */ 2548 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) && 2549 left.br_startoff + left.br_blockcount == new->br_startoff && 2550 left.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2551 state |= BMAP_LEFT_CONTIG; 2552 2553 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) && 2554 new->br_startoff + new->br_blockcount == right.br_startoff && 2555 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 2556 (!(state & BMAP_LEFT_CONTIG) || 2557 (left.br_blockcount + new->br_blockcount + 2558 right.br_blockcount <= MAXEXTLEN))) 2559 state |= BMAP_RIGHT_CONTIG; 2560 2561 /* 2562 * Switch out based on the contiguity flags. 2563 */ 2564 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2565 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2566 /* 2567 * New allocation is contiguous with delayed allocations 2568 * on the left and on the right. 2569 * Merge all three into a single extent record. 2570 */ 2571 temp = left.br_blockcount + new->br_blockcount + 2572 right.br_blockcount; 2573 2574 oldlen = startblockval(left.br_startblock) + 2575 startblockval(new->br_startblock) + 2576 startblockval(right.br_startblock); 2577 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2578 oldlen); 2579 left.br_startblock = nullstartblock(newlen); 2580 left.br_blockcount = temp; 2581 2582 xfs_iext_remove(ip, icur, state); 2583 xfs_iext_prev(ifp, icur); 2584 xfs_iext_update_extent(ip, state, icur, &left); 2585 break; 2586 2587 case BMAP_LEFT_CONTIG: 2588 /* 2589 * New allocation is contiguous with a delayed allocation 2590 * on the left. 2591 * Merge the new allocation with the left neighbor. 2592 */ 2593 temp = left.br_blockcount + new->br_blockcount; 2594 2595 oldlen = startblockval(left.br_startblock) + 2596 startblockval(new->br_startblock); 2597 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2598 oldlen); 2599 left.br_blockcount = temp; 2600 left.br_startblock = nullstartblock(newlen); 2601 2602 xfs_iext_prev(ifp, icur); 2603 xfs_iext_update_extent(ip, state, icur, &left); 2604 break; 2605 2606 case BMAP_RIGHT_CONTIG: 2607 /* 2608 * New allocation is contiguous with a delayed allocation 2609 * on the right. 2610 * Merge the new allocation with the right neighbor. 2611 */ 2612 temp = new->br_blockcount + right.br_blockcount; 2613 oldlen = startblockval(new->br_startblock) + 2614 startblockval(right.br_startblock); 2615 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2616 oldlen); 2617 right.br_startoff = new->br_startoff; 2618 right.br_startblock = nullstartblock(newlen); 2619 right.br_blockcount = temp; 2620 xfs_iext_update_extent(ip, state, icur, &right); 2621 break; 2622 2623 case 0: 2624 /* 2625 * New allocation is not contiguous with another 2626 * delayed allocation. 2627 * Insert a new entry. 2628 */ 2629 oldlen = newlen = 0; 2630 xfs_iext_insert(ip, icur, new, state); 2631 break; 2632 } 2633 if (oldlen != newlen) { 2634 ASSERT(oldlen > newlen); 2635 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen), 2636 false); 2637 /* 2638 * Nothing to do for disk quota accounting here. 2639 */ 2640 } 2641 } 2642 2643 /* 2644 * Convert a hole to a real allocation. 2645 */ 2646 STATIC int /* error */ 2647 xfs_bmap_add_extent_hole_real( 2648 struct xfs_trans *tp, 2649 struct xfs_inode *ip, 2650 int whichfork, 2651 struct xfs_iext_cursor *icur, 2652 struct xfs_btree_cur **curp, 2653 struct xfs_bmbt_irec *new, 2654 int *logflagsp, 2655 int flags) 2656 { 2657 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 2658 struct xfs_mount *mp = ip->i_mount; 2659 struct xfs_btree_cur *cur = *curp; 2660 int error; /* error return value */ 2661 int i; /* temp state */ 2662 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2663 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2664 int rval=0; /* return value (logging flags) */ 2665 int state = xfs_bmap_fork_to_state(whichfork); 2666 struct xfs_bmbt_irec old; 2667 2668 ASSERT(!isnullstartblock(new->br_startblock)); 2669 ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); 2670 2671 XFS_STATS_INC(mp, xs_add_exlist); 2672 2673 /* 2674 * Check and set flags if this segment has a left neighbor. 2675 */ 2676 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { 2677 state |= BMAP_LEFT_VALID; 2678 if (isnullstartblock(left.br_startblock)) 2679 state |= BMAP_LEFT_DELAY; 2680 } 2681 2682 /* 2683 * Check and set flags if this segment has a current value. 2684 * Not true if we're inserting into the "hole" at eof. 2685 */ 2686 if (xfs_iext_get_extent(ifp, icur, &right)) { 2687 state |= BMAP_RIGHT_VALID; 2688 if (isnullstartblock(right.br_startblock)) 2689 state |= BMAP_RIGHT_DELAY; 2690 } 2691 2692 /* 2693 * We're inserting a real allocation between "left" and "right". 2694 * Set the contiguity flags. Don't let extents get too large. 2695 */ 2696 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2697 left.br_startoff + left.br_blockcount == new->br_startoff && 2698 left.br_startblock + left.br_blockcount == new->br_startblock && 2699 left.br_state == new->br_state && 2700 left.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2701 state |= BMAP_LEFT_CONTIG; 2702 2703 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2704 new->br_startoff + new->br_blockcount == right.br_startoff && 2705 new->br_startblock + new->br_blockcount == right.br_startblock && 2706 new->br_state == right.br_state && 2707 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 2708 (!(state & BMAP_LEFT_CONTIG) || 2709 left.br_blockcount + new->br_blockcount + 2710 right.br_blockcount <= MAXEXTLEN)) 2711 state |= BMAP_RIGHT_CONTIG; 2712 2713 error = 0; 2714 /* 2715 * Select which case we're in here, and implement it. 2716 */ 2717 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2718 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2719 /* 2720 * New allocation is contiguous with real allocations on the 2721 * left and on the right. 2722 * Merge all three into a single extent record. 2723 */ 2724 left.br_blockcount += new->br_blockcount + right.br_blockcount; 2725 2726 xfs_iext_remove(ip, icur, state); 2727 xfs_iext_prev(ifp, icur); 2728 xfs_iext_update_extent(ip, state, icur, &left); 2729 2730 XFS_IFORK_NEXT_SET(ip, whichfork, 2731 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2732 if (cur == NULL) { 2733 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 2734 } else { 2735 rval = XFS_ILOG_CORE; 2736 error = xfs_bmbt_lookup_eq(cur, &right, &i); 2737 if (error) 2738 goto done; 2739 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2740 error = xfs_btree_delete(cur, &i); 2741 if (error) 2742 goto done; 2743 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2744 error = xfs_btree_decrement(cur, 0, &i); 2745 if (error) 2746 goto done; 2747 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2748 error = xfs_bmbt_update(cur, &left); 2749 if (error) 2750 goto done; 2751 } 2752 break; 2753 2754 case BMAP_LEFT_CONTIG: 2755 /* 2756 * New allocation is contiguous with a real allocation 2757 * on the left. 2758 * Merge the new allocation with the left neighbor. 2759 */ 2760 old = left; 2761 left.br_blockcount += new->br_blockcount; 2762 2763 xfs_iext_prev(ifp, icur); 2764 xfs_iext_update_extent(ip, state, icur, &left); 2765 2766 if (cur == NULL) { 2767 rval = xfs_ilog_fext(whichfork); 2768 } else { 2769 rval = 0; 2770 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2771 if (error) 2772 goto done; 2773 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2774 error = xfs_bmbt_update(cur, &left); 2775 if (error) 2776 goto done; 2777 } 2778 break; 2779 2780 case BMAP_RIGHT_CONTIG: 2781 /* 2782 * New allocation is contiguous with a real allocation 2783 * on the right. 2784 * Merge the new allocation with the right neighbor. 2785 */ 2786 old = right; 2787 2788 right.br_startoff = new->br_startoff; 2789 right.br_startblock = new->br_startblock; 2790 right.br_blockcount += new->br_blockcount; 2791 xfs_iext_update_extent(ip, state, icur, &right); 2792 2793 if (cur == NULL) { 2794 rval = xfs_ilog_fext(whichfork); 2795 } else { 2796 rval = 0; 2797 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2798 if (error) 2799 goto done; 2800 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2801 error = xfs_bmbt_update(cur, &right); 2802 if (error) 2803 goto done; 2804 } 2805 break; 2806 2807 case 0: 2808 /* 2809 * New allocation is not contiguous with another 2810 * real allocation. 2811 * Insert a new entry. 2812 */ 2813 xfs_iext_insert(ip, icur, new, state); 2814 XFS_IFORK_NEXT_SET(ip, whichfork, 2815 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2816 if (cur == NULL) { 2817 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 2818 } else { 2819 rval = XFS_ILOG_CORE; 2820 error = xfs_bmbt_lookup_eq(cur, new, &i); 2821 if (error) 2822 goto done; 2823 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2824 error = xfs_btree_insert(cur, &i); 2825 if (error) 2826 goto done; 2827 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2828 } 2829 break; 2830 } 2831 2832 /* add reverse mapping unless caller opted out */ 2833 if (!(flags & XFS_BMAPI_NORMAP)) { 2834 error = xfs_rmap_map_extent(tp, ip, whichfork, new); 2835 if (error) 2836 goto done; 2837 } 2838 2839 /* convert to a btree if necessary */ 2840 if (xfs_bmap_needs_btree(ip, whichfork)) { 2841 int tmp_logflags; /* partial log flag return val */ 2842 2843 ASSERT(cur == NULL); 2844 error = xfs_bmap_extents_to_btree(tp, ip, curp, 0, 2845 &tmp_logflags, whichfork); 2846 *logflagsp |= tmp_logflags; 2847 cur = *curp; 2848 if (error) 2849 goto done; 2850 } 2851 2852 /* clear out the allocated field, done with it now in any case. */ 2853 if (cur) 2854 cur->bc_private.b.allocated = 0; 2855 2856 xfs_bmap_check_leaf_extents(cur, ip, whichfork); 2857 done: 2858 *logflagsp |= rval; 2859 return error; 2860 } 2861 2862 /* 2863 * Functions used in the extent read, allocate and remove paths 2864 */ 2865 2866 /* 2867 * Adjust the size of the new extent based on di_extsize and rt extsize. 2868 */ 2869 int 2870 xfs_bmap_extsize_align( 2871 xfs_mount_t *mp, 2872 xfs_bmbt_irec_t *gotp, /* next extent pointer */ 2873 xfs_bmbt_irec_t *prevp, /* previous extent pointer */ 2874 xfs_extlen_t extsz, /* align to this extent size */ 2875 int rt, /* is this a realtime inode? */ 2876 int eof, /* is extent at end-of-file? */ 2877 int delay, /* creating delalloc extent? */ 2878 int convert, /* overwriting unwritten extent? */ 2879 xfs_fileoff_t *offp, /* in/out: aligned offset */ 2880 xfs_extlen_t *lenp) /* in/out: aligned length */ 2881 { 2882 xfs_fileoff_t orig_off; /* original offset */ 2883 xfs_extlen_t orig_alen; /* original length */ 2884 xfs_fileoff_t orig_end; /* original off+len */ 2885 xfs_fileoff_t nexto; /* next file offset */ 2886 xfs_fileoff_t prevo; /* previous file offset */ 2887 xfs_fileoff_t align_off; /* temp for offset */ 2888 xfs_extlen_t align_alen; /* temp for length */ 2889 xfs_extlen_t temp; /* temp for calculations */ 2890 2891 if (convert) 2892 return 0; 2893 2894 orig_off = align_off = *offp; 2895 orig_alen = align_alen = *lenp; 2896 orig_end = orig_off + orig_alen; 2897 2898 /* 2899 * If this request overlaps an existing extent, then don't 2900 * attempt to perform any additional alignment. 2901 */ 2902 if (!delay && !eof && 2903 (orig_off >= gotp->br_startoff) && 2904 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) { 2905 return 0; 2906 } 2907 2908 /* 2909 * If the file offset is unaligned vs. the extent size 2910 * we need to align it. This will be possible unless 2911 * the file was previously written with a kernel that didn't 2912 * perform this alignment, or if a truncate shot us in the 2913 * foot. 2914 */ 2915 div_u64_rem(orig_off, extsz, &temp); 2916 if (temp) { 2917 align_alen += temp; 2918 align_off -= temp; 2919 } 2920 2921 /* Same adjustment for the end of the requested area. */ 2922 temp = (align_alen % extsz); 2923 if (temp) 2924 align_alen += extsz - temp; 2925 2926 /* 2927 * For large extent hint sizes, the aligned extent might be larger than 2928 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls 2929 * the length back under MAXEXTLEN. The outer allocation loops handle 2930 * short allocation just fine, so it is safe to do this. We only want to 2931 * do it when we are forced to, though, because it means more allocation 2932 * operations are required. 2933 */ 2934 while (align_alen > MAXEXTLEN) 2935 align_alen -= extsz; 2936 ASSERT(align_alen <= MAXEXTLEN); 2937 2938 /* 2939 * If the previous block overlaps with this proposed allocation 2940 * then move the start forward without adjusting the length. 2941 */ 2942 if (prevp->br_startoff != NULLFILEOFF) { 2943 if (prevp->br_startblock == HOLESTARTBLOCK) 2944 prevo = prevp->br_startoff; 2945 else 2946 prevo = prevp->br_startoff + prevp->br_blockcount; 2947 } else 2948 prevo = 0; 2949 if (align_off != orig_off && align_off < prevo) 2950 align_off = prevo; 2951 /* 2952 * If the next block overlaps with this proposed allocation 2953 * then move the start back without adjusting the length, 2954 * but not before offset 0. 2955 * This may of course make the start overlap previous block, 2956 * and if we hit the offset 0 limit then the next block 2957 * can still overlap too. 2958 */ 2959 if (!eof && gotp->br_startoff != NULLFILEOFF) { 2960 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) || 2961 (!delay && gotp->br_startblock == DELAYSTARTBLOCK)) 2962 nexto = gotp->br_startoff + gotp->br_blockcount; 2963 else 2964 nexto = gotp->br_startoff; 2965 } else 2966 nexto = NULLFILEOFF; 2967 if (!eof && 2968 align_off + align_alen != orig_end && 2969 align_off + align_alen > nexto) 2970 align_off = nexto > align_alen ? nexto - align_alen : 0; 2971 /* 2972 * If we're now overlapping the next or previous extent that 2973 * means we can't fit an extsz piece in this hole. Just move 2974 * the start forward to the first valid spot and set 2975 * the length so we hit the end. 2976 */ 2977 if (align_off != orig_off && align_off < prevo) 2978 align_off = prevo; 2979 if (align_off + align_alen != orig_end && 2980 align_off + align_alen > nexto && 2981 nexto != NULLFILEOFF) { 2982 ASSERT(nexto > prevo); 2983 align_alen = nexto - align_off; 2984 } 2985 2986 /* 2987 * If realtime, and the result isn't a multiple of the realtime 2988 * extent size we need to remove blocks until it is. 2989 */ 2990 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) { 2991 /* 2992 * We're not covering the original request, or 2993 * we won't be able to once we fix the length. 2994 */ 2995 if (orig_off < align_off || 2996 orig_end > align_off + align_alen || 2997 align_alen - temp < orig_alen) 2998 return -EINVAL; 2999 /* 3000 * Try to fix it by moving the start up. 3001 */ 3002 if (align_off + temp <= orig_off) { 3003 align_alen -= temp; 3004 align_off += temp; 3005 } 3006 /* 3007 * Try to fix it by moving the end in. 3008 */ 3009 else if (align_off + align_alen - temp >= orig_end) 3010 align_alen -= temp; 3011 /* 3012 * Set the start to the minimum then trim the length. 3013 */ 3014 else { 3015 align_alen -= orig_off - align_off; 3016 align_off = orig_off; 3017 align_alen -= align_alen % mp->m_sb.sb_rextsize; 3018 } 3019 /* 3020 * Result doesn't cover the request, fail it. 3021 */ 3022 if (orig_off < align_off || orig_end > align_off + align_alen) 3023 return -EINVAL; 3024 } else { 3025 ASSERT(orig_off >= align_off); 3026 /* see MAXEXTLEN handling above */ 3027 ASSERT(orig_end <= align_off + align_alen || 3028 align_alen + extsz > MAXEXTLEN); 3029 } 3030 3031 #ifdef DEBUG 3032 if (!eof && gotp->br_startoff != NULLFILEOFF) 3033 ASSERT(align_off + align_alen <= gotp->br_startoff); 3034 if (prevp->br_startoff != NULLFILEOFF) 3035 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount); 3036 #endif 3037 3038 *lenp = align_alen; 3039 *offp = align_off; 3040 return 0; 3041 } 3042 3043 #define XFS_ALLOC_GAP_UNITS 4 3044 3045 void 3046 xfs_bmap_adjacent( 3047 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3048 { 3049 xfs_fsblock_t adjust; /* adjustment to block numbers */ 3050 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ 3051 xfs_mount_t *mp; /* mount point structure */ 3052 int nullfb; /* true if ap->firstblock isn't set */ 3053 int rt; /* true if inode is realtime */ 3054 3055 #define ISVALID(x,y) \ 3056 (rt ? \ 3057 (x) < mp->m_sb.sb_rblocks : \ 3058 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \ 3059 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \ 3060 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks) 3061 3062 mp = ap->ip->i_mount; 3063 nullfb = ap->tp->t_firstblock == NULLFSBLOCK; 3064 rt = XFS_IS_REALTIME_INODE(ap->ip) && 3065 xfs_alloc_is_userdata(ap->datatype); 3066 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, 3067 ap->tp->t_firstblock); 3068 /* 3069 * If allocating at eof, and there's a previous real block, 3070 * try to use its last block as our starting point. 3071 */ 3072 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF && 3073 !isnullstartblock(ap->prev.br_startblock) && 3074 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount, 3075 ap->prev.br_startblock)) { 3076 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount; 3077 /* 3078 * Adjust for the gap between prevp and us. 3079 */ 3080 adjust = ap->offset - 3081 (ap->prev.br_startoff + ap->prev.br_blockcount); 3082 if (adjust && 3083 ISVALID(ap->blkno + adjust, ap->prev.br_startblock)) 3084 ap->blkno += adjust; 3085 } 3086 /* 3087 * If not at eof, then compare the two neighbor blocks. 3088 * Figure out whether either one gives us a good starting point, 3089 * and pick the better one. 3090 */ 3091 else if (!ap->eof) { 3092 xfs_fsblock_t gotbno; /* right side block number */ 3093 xfs_fsblock_t gotdiff=0; /* right side difference */ 3094 xfs_fsblock_t prevbno; /* left side block number */ 3095 xfs_fsblock_t prevdiff=0; /* left side difference */ 3096 3097 /* 3098 * If there's a previous (left) block, select a requested 3099 * start block based on it. 3100 */ 3101 if (ap->prev.br_startoff != NULLFILEOFF && 3102 !isnullstartblock(ap->prev.br_startblock) && 3103 (prevbno = ap->prev.br_startblock + 3104 ap->prev.br_blockcount) && 3105 ISVALID(prevbno, ap->prev.br_startblock)) { 3106 /* 3107 * Calculate gap to end of previous block. 3108 */ 3109 adjust = prevdiff = ap->offset - 3110 (ap->prev.br_startoff + 3111 ap->prev.br_blockcount); 3112 /* 3113 * Figure the startblock based on the previous block's 3114 * end and the gap size. 3115 * Heuristic! 3116 * If the gap is large relative to the piece we're 3117 * allocating, or using it gives us an invalid block 3118 * number, then just use the end of the previous block. 3119 */ 3120 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3121 ISVALID(prevbno + prevdiff, 3122 ap->prev.br_startblock)) 3123 prevbno += adjust; 3124 else 3125 prevdiff += adjust; 3126 /* 3127 * If the firstblock forbids it, can't use it, 3128 * must use default. 3129 */ 3130 if (!rt && !nullfb && 3131 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno) 3132 prevbno = NULLFSBLOCK; 3133 } 3134 /* 3135 * No previous block or can't follow it, just default. 3136 */ 3137 else 3138 prevbno = NULLFSBLOCK; 3139 /* 3140 * If there's a following (right) block, select a requested 3141 * start block based on it. 3142 */ 3143 if (!isnullstartblock(ap->got.br_startblock)) { 3144 /* 3145 * Calculate gap to start of next block. 3146 */ 3147 adjust = gotdiff = ap->got.br_startoff - ap->offset; 3148 /* 3149 * Figure the startblock based on the next block's 3150 * start and the gap size. 3151 */ 3152 gotbno = ap->got.br_startblock; 3153 /* 3154 * Heuristic! 3155 * If the gap is large relative to the piece we're 3156 * allocating, or using it gives us an invalid block 3157 * number, then just use the start of the next block 3158 * offset by our length. 3159 */ 3160 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3161 ISVALID(gotbno - gotdiff, gotbno)) 3162 gotbno -= adjust; 3163 else if (ISVALID(gotbno - ap->length, gotbno)) { 3164 gotbno -= ap->length; 3165 gotdiff += adjust - ap->length; 3166 } else 3167 gotdiff += adjust; 3168 /* 3169 * If the firstblock forbids it, can't use it, 3170 * must use default. 3171 */ 3172 if (!rt && !nullfb && 3173 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno) 3174 gotbno = NULLFSBLOCK; 3175 } 3176 /* 3177 * No next block, just default. 3178 */ 3179 else 3180 gotbno = NULLFSBLOCK; 3181 /* 3182 * If both valid, pick the better one, else the only good 3183 * one, else ap->blkno is already set (to 0 or the inode block). 3184 */ 3185 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) 3186 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno; 3187 else if (prevbno != NULLFSBLOCK) 3188 ap->blkno = prevbno; 3189 else if (gotbno != NULLFSBLOCK) 3190 ap->blkno = gotbno; 3191 } 3192 #undef ISVALID 3193 } 3194 3195 static int 3196 xfs_bmap_longest_free_extent( 3197 struct xfs_trans *tp, 3198 xfs_agnumber_t ag, 3199 xfs_extlen_t *blen, 3200 int *notinit) 3201 { 3202 struct xfs_mount *mp = tp->t_mountp; 3203 struct xfs_perag *pag; 3204 xfs_extlen_t longest; 3205 int error = 0; 3206 3207 pag = xfs_perag_get(mp, ag); 3208 if (!pag->pagf_init) { 3209 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK); 3210 if (error) 3211 goto out; 3212 3213 if (!pag->pagf_init) { 3214 *notinit = 1; 3215 goto out; 3216 } 3217 } 3218 3219 longest = xfs_alloc_longest_free_extent(pag, 3220 xfs_alloc_min_freelist(mp, pag), 3221 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE)); 3222 if (*blen < longest) 3223 *blen = longest; 3224 3225 out: 3226 xfs_perag_put(pag); 3227 return error; 3228 } 3229 3230 static void 3231 xfs_bmap_select_minlen( 3232 struct xfs_bmalloca *ap, 3233 struct xfs_alloc_arg *args, 3234 xfs_extlen_t *blen, 3235 int notinit) 3236 { 3237 if (notinit || *blen < ap->minlen) { 3238 /* 3239 * Since we did a BUF_TRYLOCK above, it is possible that 3240 * there is space for this request. 3241 */ 3242 args->minlen = ap->minlen; 3243 } else if (*blen < args->maxlen) { 3244 /* 3245 * If the best seen length is less than the request length, 3246 * use the best as the minimum. 3247 */ 3248 args->minlen = *blen; 3249 } else { 3250 /* 3251 * Otherwise we've seen an extent as big as maxlen, use that 3252 * as the minimum. 3253 */ 3254 args->minlen = args->maxlen; 3255 } 3256 } 3257 3258 STATIC int 3259 xfs_bmap_btalloc_nullfb( 3260 struct xfs_bmalloca *ap, 3261 struct xfs_alloc_arg *args, 3262 xfs_extlen_t *blen) 3263 { 3264 struct xfs_mount *mp = ap->ip->i_mount; 3265 xfs_agnumber_t ag, startag; 3266 int notinit = 0; 3267 int error; 3268 3269 args->type = XFS_ALLOCTYPE_START_BNO; 3270 args->total = ap->total; 3271 3272 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno); 3273 if (startag == NULLAGNUMBER) 3274 startag = ag = 0; 3275 3276 while (*blen < args->maxlen) { 3277 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, 3278 ¬init); 3279 if (error) 3280 return error; 3281 3282 if (++ag == mp->m_sb.sb_agcount) 3283 ag = 0; 3284 if (ag == startag) 3285 break; 3286 } 3287 3288 xfs_bmap_select_minlen(ap, args, blen, notinit); 3289 return 0; 3290 } 3291 3292 STATIC int 3293 xfs_bmap_btalloc_filestreams( 3294 struct xfs_bmalloca *ap, 3295 struct xfs_alloc_arg *args, 3296 xfs_extlen_t *blen) 3297 { 3298 struct xfs_mount *mp = ap->ip->i_mount; 3299 xfs_agnumber_t ag; 3300 int notinit = 0; 3301 int error; 3302 3303 args->type = XFS_ALLOCTYPE_NEAR_BNO; 3304 args->total = ap->total; 3305 3306 ag = XFS_FSB_TO_AGNO(mp, args->fsbno); 3307 if (ag == NULLAGNUMBER) 3308 ag = 0; 3309 3310 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init); 3311 if (error) 3312 return error; 3313 3314 if (*blen < args->maxlen) { 3315 error = xfs_filestream_new_ag(ap, &ag); 3316 if (error) 3317 return error; 3318 3319 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, 3320 ¬init); 3321 if (error) 3322 return error; 3323 3324 } 3325 3326 xfs_bmap_select_minlen(ap, args, blen, notinit); 3327 3328 /* 3329 * Set the failure fallback case to look in the selected AG as stream 3330 * may have moved. 3331 */ 3332 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0); 3333 return 0; 3334 } 3335 3336 /* Update all inode and quota accounting for the allocation we just did. */ 3337 static void 3338 xfs_bmap_btalloc_accounting( 3339 struct xfs_bmalloca *ap, 3340 struct xfs_alloc_arg *args) 3341 { 3342 if (ap->flags & XFS_BMAPI_COWFORK) { 3343 /* 3344 * COW fork blocks are in-core only and thus are treated as 3345 * in-core quota reservation (like delalloc blocks) even when 3346 * converted to real blocks. The quota reservation is not 3347 * accounted to disk until blocks are remapped to the data 3348 * fork. So if these blocks were previously delalloc, we 3349 * already have quota reservation and there's nothing to do 3350 * yet. 3351 */ 3352 if (ap->wasdel) 3353 return; 3354 3355 /* 3356 * Otherwise, we've allocated blocks in a hole. The transaction 3357 * has acquired in-core quota reservation for this extent. 3358 * Rather than account these as real blocks, however, we reduce 3359 * the transaction quota reservation based on the allocation. 3360 * This essentially transfers the transaction quota reservation 3361 * to that of a delalloc extent. 3362 */ 3363 ap->ip->i_delayed_blks += args->len; 3364 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS, 3365 -(long)args->len); 3366 return; 3367 } 3368 3369 /* data/attr fork only */ 3370 ap->ip->i_d.di_nblocks += args->len; 3371 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); 3372 if (ap->wasdel) 3373 ap->ip->i_delayed_blks -= args->len; 3374 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, 3375 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT, 3376 args->len); 3377 } 3378 3379 STATIC int 3380 xfs_bmap_btalloc( 3381 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3382 { 3383 xfs_mount_t *mp; /* mount point structure */ 3384 xfs_alloctype_t atype = 0; /* type for allocation routines */ 3385 xfs_extlen_t align = 0; /* minimum allocation alignment */ 3386 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ 3387 xfs_agnumber_t ag; 3388 xfs_alloc_arg_t args; 3389 xfs_fileoff_t orig_offset; 3390 xfs_extlen_t orig_length; 3391 xfs_extlen_t blen; 3392 xfs_extlen_t nextminlen = 0; 3393 int nullfb; /* true if ap->firstblock isn't set */ 3394 int isaligned; 3395 int tryagain; 3396 int error; 3397 int stripe_align; 3398 3399 ASSERT(ap->length); 3400 orig_offset = ap->offset; 3401 orig_length = ap->length; 3402 3403 mp = ap->ip->i_mount; 3404 3405 /* stripe alignment for allocation is determined by mount parameters */ 3406 stripe_align = 0; 3407 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) 3408 stripe_align = mp->m_swidth; 3409 else if (mp->m_dalign) 3410 stripe_align = mp->m_dalign; 3411 3412 if (ap->flags & XFS_BMAPI_COWFORK) 3413 align = xfs_get_cowextsz_hint(ap->ip); 3414 else if (xfs_alloc_is_userdata(ap->datatype)) 3415 align = xfs_get_extsz_hint(ap->ip); 3416 if (align) { 3417 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, 3418 align, 0, ap->eof, 0, ap->conv, 3419 &ap->offset, &ap->length); 3420 ASSERT(!error); 3421 ASSERT(ap->length); 3422 } 3423 3424 3425 nullfb = ap->tp->t_firstblock == NULLFSBLOCK; 3426 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, 3427 ap->tp->t_firstblock); 3428 if (nullfb) { 3429 if (xfs_alloc_is_userdata(ap->datatype) && 3430 xfs_inode_is_filestream(ap->ip)) { 3431 ag = xfs_filestream_lookup_ag(ap->ip); 3432 ag = (ag != NULLAGNUMBER) ? ag : 0; 3433 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0); 3434 } else { 3435 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino); 3436 } 3437 } else 3438 ap->blkno = ap->tp->t_firstblock; 3439 3440 xfs_bmap_adjacent(ap); 3441 3442 /* 3443 * If allowed, use ap->blkno; otherwise must use firstblock since 3444 * it's in the right allocation group. 3445 */ 3446 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno) 3447 ; 3448 else 3449 ap->blkno = ap->tp->t_firstblock; 3450 /* 3451 * Normal allocation, done through xfs_alloc_vextent. 3452 */ 3453 tryagain = isaligned = 0; 3454 memset(&args, 0, sizeof(args)); 3455 args.tp = ap->tp; 3456 args.mp = mp; 3457 args.fsbno = ap->blkno; 3458 args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE; 3459 3460 /* Trim the allocation back to the maximum an AG can fit. */ 3461 args.maxlen = min(ap->length, mp->m_ag_max_usable); 3462 blen = 0; 3463 if (nullfb) { 3464 /* 3465 * Search for an allocation group with a single extent large 3466 * enough for the request. If one isn't found, then adjust 3467 * the minimum allocation size to the largest space found. 3468 */ 3469 if (xfs_alloc_is_userdata(ap->datatype) && 3470 xfs_inode_is_filestream(ap->ip)) 3471 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen); 3472 else 3473 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen); 3474 if (error) 3475 return error; 3476 } else if (ap->tp->t_flags & XFS_TRANS_LOWMODE) { 3477 if (xfs_inode_is_filestream(ap->ip)) 3478 args.type = XFS_ALLOCTYPE_FIRST_AG; 3479 else 3480 args.type = XFS_ALLOCTYPE_START_BNO; 3481 args.total = args.minlen = ap->minlen; 3482 } else { 3483 args.type = XFS_ALLOCTYPE_NEAR_BNO; 3484 args.total = ap->total; 3485 args.minlen = ap->minlen; 3486 } 3487 /* apply extent size hints if obtained earlier */ 3488 if (align) { 3489 args.prod = align; 3490 div_u64_rem(ap->offset, args.prod, &args.mod); 3491 if (args.mod) 3492 args.mod = args.prod - args.mod; 3493 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) { 3494 args.prod = 1; 3495 args.mod = 0; 3496 } else { 3497 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog; 3498 div_u64_rem(ap->offset, args.prod, &args.mod); 3499 if (args.mod) 3500 args.mod = args.prod - args.mod; 3501 } 3502 /* 3503 * If we are not low on available data blocks, and the 3504 * underlying logical volume manager is a stripe, and 3505 * the file offset is zero then try to allocate data 3506 * blocks on stripe unit boundary. 3507 * NOTE: ap->aeof is only set if the allocation length 3508 * is >= the stripe unit and the allocation offset is 3509 * at the end of file. 3510 */ 3511 if (!(ap->tp->t_flags & XFS_TRANS_LOWMODE) && ap->aeof) { 3512 if (!ap->offset) { 3513 args.alignment = stripe_align; 3514 atype = args.type; 3515 isaligned = 1; 3516 /* 3517 * Adjust for alignment 3518 */ 3519 if (blen > args.alignment && blen <= args.maxlen) 3520 args.minlen = blen - args.alignment; 3521 args.minalignslop = 0; 3522 } else { 3523 /* 3524 * First try an exact bno allocation. 3525 * If it fails then do a near or start bno 3526 * allocation with alignment turned on. 3527 */ 3528 atype = args.type; 3529 tryagain = 1; 3530 args.type = XFS_ALLOCTYPE_THIS_BNO; 3531 args.alignment = 1; 3532 /* 3533 * Compute the minlen+alignment for the 3534 * next case. Set slop so that the value 3535 * of minlen+alignment+slop doesn't go up 3536 * between the calls. 3537 */ 3538 if (blen > stripe_align && blen <= args.maxlen) 3539 nextminlen = blen - stripe_align; 3540 else 3541 nextminlen = args.minlen; 3542 if (nextminlen + stripe_align > args.minlen + 1) 3543 args.minalignslop = 3544 nextminlen + stripe_align - 3545 args.minlen - 1; 3546 else 3547 args.minalignslop = 0; 3548 } 3549 } else { 3550 args.alignment = 1; 3551 args.minalignslop = 0; 3552 } 3553 args.minleft = ap->minleft; 3554 args.wasdel = ap->wasdel; 3555 args.resv = XFS_AG_RESV_NONE; 3556 args.datatype = ap->datatype; 3557 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) 3558 args.ip = ap->ip; 3559 3560 error = xfs_alloc_vextent(&args); 3561 if (error) 3562 return error; 3563 3564 if (tryagain && args.fsbno == NULLFSBLOCK) { 3565 /* 3566 * Exact allocation failed. Now try with alignment 3567 * turned on. 3568 */ 3569 args.type = atype; 3570 args.fsbno = ap->blkno; 3571 args.alignment = stripe_align; 3572 args.minlen = nextminlen; 3573 args.minalignslop = 0; 3574 isaligned = 1; 3575 if ((error = xfs_alloc_vextent(&args))) 3576 return error; 3577 } 3578 if (isaligned && args.fsbno == NULLFSBLOCK) { 3579 /* 3580 * allocation failed, so turn off alignment and 3581 * try again. 3582 */ 3583 args.type = atype; 3584 args.fsbno = ap->blkno; 3585 args.alignment = 0; 3586 if ((error = xfs_alloc_vextent(&args))) 3587 return error; 3588 } 3589 if (args.fsbno == NULLFSBLOCK && nullfb && 3590 args.minlen > ap->minlen) { 3591 args.minlen = ap->minlen; 3592 args.type = XFS_ALLOCTYPE_START_BNO; 3593 args.fsbno = ap->blkno; 3594 if ((error = xfs_alloc_vextent(&args))) 3595 return error; 3596 } 3597 if (args.fsbno == NULLFSBLOCK && nullfb) { 3598 args.fsbno = 0; 3599 args.type = XFS_ALLOCTYPE_FIRST_AG; 3600 args.total = ap->minlen; 3601 if ((error = xfs_alloc_vextent(&args))) 3602 return error; 3603 ap->tp->t_flags |= XFS_TRANS_LOWMODE; 3604 } 3605 if (args.fsbno != NULLFSBLOCK) { 3606 /* 3607 * check the allocation happened at the same or higher AG than 3608 * the first block that was allocated. 3609 */ 3610 ASSERT(ap->tp->t_firstblock == NULLFSBLOCK || 3611 XFS_FSB_TO_AGNO(mp, ap->tp->t_firstblock) <= 3612 XFS_FSB_TO_AGNO(mp, args.fsbno)); 3613 3614 ap->blkno = args.fsbno; 3615 if (ap->tp->t_firstblock == NULLFSBLOCK) 3616 ap->tp->t_firstblock = args.fsbno; 3617 ASSERT(nullfb || fb_agno <= args.agno); 3618 ap->length = args.len; 3619 /* 3620 * If the extent size hint is active, we tried to round the 3621 * caller's allocation request offset down to extsz and the 3622 * length up to another extsz boundary. If we found a free 3623 * extent we mapped it in starting at this new offset. If the 3624 * newly mapped space isn't long enough to cover any of the 3625 * range of offsets that was originally requested, move the 3626 * mapping up so that we can fill as much of the caller's 3627 * original request as possible. Free space is apparently 3628 * very fragmented so we're unlikely to be able to satisfy the 3629 * hints anyway. 3630 */ 3631 if (ap->length <= orig_length) 3632 ap->offset = orig_offset; 3633 else if (ap->offset + ap->length < orig_offset + orig_length) 3634 ap->offset = orig_offset + orig_length - ap->length; 3635 xfs_bmap_btalloc_accounting(ap, &args); 3636 } else { 3637 ap->blkno = NULLFSBLOCK; 3638 ap->length = 0; 3639 } 3640 return 0; 3641 } 3642 3643 /* 3644 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. 3645 * It figures out where to ask the underlying allocator to put the new extent. 3646 */ 3647 STATIC int 3648 xfs_bmap_alloc( 3649 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3650 { 3651 if (XFS_IS_REALTIME_INODE(ap->ip) && 3652 xfs_alloc_is_userdata(ap->datatype)) 3653 return xfs_bmap_rtalloc(ap); 3654 return xfs_bmap_btalloc(ap); 3655 } 3656 3657 /* Trim extent to fit a logical block range. */ 3658 void 3659 xfs_trim_extent( 3660 struct xfs_bmbt_irec *irec, 3661 xfs_fileoff_t bno, 3662 xfs_filblks_t len) 3663 { 3664 xfs_fileoff_t distance; 3665 xfs_fileoff_t end = bno + len; 3666 3667 if (irec->br_startoff + irec->br_blockcount <= bno || 3668 irec->br_startoff >= end) { 3669 irec->br_blockcount = 0; 3670 return; 3671 } 3672 3673 if (irec->br_startoff < bno) { 3674 distance = bno - irec->br_startoff; 3675 if (isnullstartblock(irec->br_startblock)) 3676 irec->br_startblock = DELAYSTARTBLOCK; 3677 if (irec->br_startblock != DELAYSTARTBLOCK && 3678 irec->br_startblock != HOLESTARTBLOCK) 3679 irec->br_startblock += distance; 3680 irec->br_startoff += distance; 3681 irec->br_blockcount -= distance; 3682 } 3683 3684 if (end < irec->br_startoff + irec->br_blockcount) { 3685 distance = irec->br_startoff + irec->br_blockcount - end; 3686 irec->br_blockcount -= distance; 3687 } 3688 } 3689 3690 /* 3691 * Trim the returned map to the required bounds 3692 */ 3693 STATIC void 3694 xfs_bmapi_trim_map( 3695 struct xfs_bmbt_irec *mval, 3696 struct xfs_bmbt_irec *got, 3697 xfs_fileoff_t *bno, 3698 xfs_filblks_t len, 3699 xfs_fileoff_t obno, 3700 xfs_fileoff_t end, 3701 int n, 3702 int flags) 3703 { 3704 if ((flags & XFS_BMAPI_ENTIRE) || 3705 got->br_startoff + got->br_blockcount <= obno) { 3706 *mval = *got; 3707 if (isnullstartblock(got->br_startblock)) 3708 mval->br_startblock = DELAYSTARTBLOCK; 3709 return; 3710 } 3711 3712 if (obno > *bno) 3713 *bno = obno; 3714 ASSERT((*bno >= obno) || (n == 0)); 3715 ASSERT(*bno < end); 3716 mval->br_startoff = *bno; 3717 if (isnullstartblock(got->br_startblock)) 3718 mval->br_startblock = DELAYSTARTBLOCK; 3719 else 3720 mval->br_startblock = got->br_startblock + 3721 (*bno - got->br_startoff); 3722 /* 3723 * Return the minimum of what we got and what we asked for for 3724 * the length. We can use the len variable here because it is 3725 * modified below and we could have been there before coming 3726 * here if the first part of the allocation didn't overlap what 3727 * was asked for. 3728 */ 3729 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno, 3730 got->br_blockcount - (*bno - got->br_startoff)); 3731 mval->br_state = got->br_state; 3732 ASSERT(mval->br_blockcount <= len); 3733 return; 3734 } 3735 3736 /* 3737 * Update and validate the extent map to return 3738 */ 3739 STATIC void 3740 xfs_bmapi_update_map( 3741 struct xfs_bmbt_irec **map, 3742 xfs_fileoff_t *bno, 3743 xfs_filblks_t *len, 3744 xfs_fileoff_t obno, 3745 xfs_fileoff_t end, 3746 int *n, 3747 int flags) 3748 { 3749 xfs_bmbt_irec_t *mval = *map; 3750 3751 ASSERT((flags & XFS_BMAPI_ENTIRE) || 3752 ((mval->br_startoff + mval->br_blockcount) <= end)); 3753 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) || 3754 (mval->br_startoff < obno)); 3755 3756 *bno = mval->br_startoff + mval->br_blockcount; 3757 *len = end - *bno; 3758 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) { 3759 /* update previous map with new information */ 3760 ASSERT(mval->br_startblock == mval[-1].br_startblock); 3761 ASSERT(mval->br_blockcount > mval[-1].br_blockcount); 3762 ASSERT(mval->br_state == mval[-1].br_state); 3763 mval[-1].br_blockcount = mval->br_blockcount; 3764 mval[-1].br_state = mval->br_state; 3765 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK && 3766 mval[-1].br_startblock != DELAYSTARTBLOCK && 3767 mval[-1].br_startblock != HOLESTARTBLOCK && 3768 mval->br_startblock == mval[-1].br_startblock + 3769 mval[-1].br_blockcount && 3770 mval[-1].br_state == mval->br_state) { 3771 ASSERT(mval->br_startoff == 3772 mval[-1].br_startoff + mval[-1].br_blockcount); 3773 mval[-1].br_blockcount += mval->br_blockcount; 3774 } else if (*n > 0 && 3775 mval->br_startblock == DELAYSTARTBLOCK && 3776 mval[-1].br_startblock == DELAYSTARTBLOCK && 3777 mval->br_startoff == 3778 mval[-1].br_startoff + mval[-1].br_blockcount) { 3779 mval[-1].br_blockcount += mval->br_blockcount; 3780 mval[-1].br_state = mval->br_state; 3781 } else if (!((*n == 0) && 3782 ((mval->br_startoff + mval->br_blockcount) <= 3783 obno))) { 3784 mval++; 3785 (*n)++; 3786 } 3787 *map = mval; 3788 } 3789 3790 /* 3791 * Map file blocks to filesystem blocks without allocation. 3792 */ 3793 int 3794 xfs_bmapi_read( 3795 struct xfs_inode *ip, 3796 xfs_fileoff_t bno, 3797 xfs_filblks_t len, 3798 struct xfs_bmbt_irec *mval, 3799 int *nmap, 3800 int flags) 3801 { 3802 struct xfs_mount *mp = ip->i_mount; 3803 struct xfs_ifork *ifp; 3804 struct xfs_bmbt_irec got; 3805 xfs_fileoff_t obno; 3806 xfs_fileoff_t end; 3807 struct xfs_iext_cursor icur; 3808 int error; 3809 bool eof = false; 3810 int n = 0; 3811 int whichfork = xfs_bmapi_whichfork(flags); 3812 3813 ASSERT(*nmap >= 1); 3814 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE| 3815 XFS_BMAPI_COWFORK))); 3816 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)); 3817 3818 if (unlikely(XFS_TEST_ERROR( 3819 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 3820 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 3821 mp, XFS_ERRTAG_BMAPIFORMAT))) { 3822 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp); 3823 return -EFSCORRUPTED; 3824 } 3825 3826 if (XFS_FORCED_SHUTDOWN(mp)) 3827 return -EIO; 3828 3829 XFS_STATS_INC(mp, xs_blk_mapr); 3830 3831 ifp = XFS_IFORK_PTR(ip, whichfork); 3832 3833 /* No CoW fork? Return a hole. */ 3834 if (whichfork == XFS_COW_FORK && !ifp) { 3835 mval->br_startoff = bno; 3836 mval->br_startblock = HOLESTARTBLOCK; 3837 mval->br_blockcount = len; 3838 mval->br_state = XFS_EXT_NORM; 3839 *nmap = 1; 3840 return 0; 3841 } 3842 3843 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 3844 error = xfs_iread_extents(NULL, ip, whichfork); 3845 if (error) 3846 return error; 3847 } 3848 3849 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) 3850 eof = true; 3851 end = bno + len; 3852 obno = bno; 3853 3854 while (bno < end && n < *nmap) { 3855 /* Reading past eof, act as though there's a hole up to end. */ 3856 if (eof) 3857 got.br_startoff = end; 3858 if (got.br_startoff > bno) { 3859 /* Reading in a hole. */ 3860 mval->br_startoff = bno; 3861 mval->br_startblock = HOLESTARTBLOCK; 3862 mval->br_blockcount = 3863 XFS_FILBLKS_MIN(len, got.br_startoff - bno); 3864 mval->br_state = XFS_EXT_NORM; 3865 bno += mval->br_blockcount; 3866 len -= mval->br_blockcount; 3867 mval++; 3868 n++; 3869 continue; 3870 } 3871 3872 /* set up the extent map to return. */ 3873 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags); 3874 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 3875 3876 /* If we're done, stop now. */ 3877 if (bno >= end || n >= *nmap) 3878 break; 3879 3880 /* Else go on to the next record. */ 3881 if (!xfs_iext_next_extent(ifp, &icur, &got)) 3882 eof = true; 3883 } 3884 *nmap = n; 3885 return 0; 3886 } 3887 3888 /* 3889 * Add a delayed allocation extent to an inode. Blocks are reserved from the 3890 * global pool and the extent inserted into the inode in-core extent tree. 3891 * 3892 * On entry, got refers to the first extent beyond the offset of the extent to 3893 * allocate or eof is specified if no such extent exists. On return, got refers 3894 * to the extent record that was inserted to the inode fork. 3895 * 3896 * Note that the allocated extent may have been merged with contiguous extents 3897 * during insertion into the inode fork. Thus, got does not reflect the current 3898 * state of the inode fork on return. If necessary, the caller can use lastx to 3899 * look up the updated record in the inode fork. 3900 */ 3901 int 3902 xfs_bmapi_reserve_delalloc( 3903 struct xfs_inode *ip, 3904 int whichfork, 3905 xfs_fileoff_t off, 3906 xfs_filblks_t len, 3907 xfs_filblks_t prealloc, 3908 struct xfs_bmbt_irec *got, 3909 struct xfs_iext_cursor *icur, 3910 int eof) 3911 { 3912 struct xfs_mount *mp = ip->i_mount; 3913 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 3914 xfs_extlen_t alen; 3915 xfs_extlen_t indlen; 3916 int error; 3917 xfs_fileoff_t aoff = off; 3918 3919 /* 3920 * Cap the alloc length. Keep track of prealloc so we know whether to 3921 * tag the inode before we return. 3922 */ 3923 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN); 3924 if (!eof) 3925 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff); 3926 if (prealloc && alen >= len) 3927 prealloc = alen - len; 3928 3929 /* Figure out the extent size, adjust alen */ 3930 if (whichfork == XFS_COW_FORK) { 3931 struct xfs_bmbt_irec prev; 3932 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip); 3933 3934 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev)) 3935 prev.br_startoff = NULLFILEOFF; 3936 3937 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof, 3938 1, 0, &aoff, &alen); 3939 ASSERT(!error); 3940 } 3941 3942 /* 3943 * Make a transaction-less quota reservation for delayed allocation 3944 * blocks. This number gets adjusted later. We return if we haven't 3945 * allocated blocks already inside this loop. 3946 */ 3947 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0, 3948 XFS_QMOPT_RES_REGBLKS); 3949 if (error) 3950 return error; 3951 3952 /* 3953 * Split changing sb for alen and indlen since they could be coming 3954 * from different places. 3955 */ 3956 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); 3957 ASSERT(indlen > 0); 3958 3959 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false); 3960 if (error) 3961 goto out_unreserve_quota; 3962 3963 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false); 3964 if (error) 3965 goto out_unreserve_blocks; 3966 3967 3968 ip->i_delayed_blks += alen; 3969 3970 got->br_startoff = aoff; 3971 got->br_startblock = nullstartblock(indlen); 3972 got->br_blockcount = alen; 3973 got->br_state = XFS_EXT_NORM; 3974 3975 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got); 3976 3977 /* 3978 * Tag the inode if blocks were preallocated. Note that COW fork 3979 * preallocation can occur at the start or end of the extent, even when 3980 * prealloc == 0, so we must also check the aligned offset and length. 3981 */ 3982 if (whichfork == XFS_DATA_FORK && prealloc) 3983 xfs_inode_set_eofblocks_tag(ip); 3984 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len)) 3985 xfs_inode_set_cowblocks_tag(ip); 3986 3987 return 0; 3988 3989 out_unreserve_blocks: 3990 xfs_mod_fdblocks(mp, alen, false); 3991 out_unreserve_quota: 3992 if (XFS_IS_QUOTA_ON(mp)) 3993 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, 3994 XFS_QMOPT_RES_REGBLKS); 3995 return error; 3996 } 3997 3998 static int 3999 xfs_bmapi_allocate( 4000 struct xfs_bmalloca *bma) 4001 { 4002 struct xfs_mount *mp = bma->ip->i_mount; 4003 int whichfork = xfs_bmapi_whichfork(bma->flags); 4004 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); 4005 int tmp_logflags = 0; 4006 int error; 4007 4008 ASSERT(bma->length > 0); 4009 4010 /* 4011 * For the wasdelay case, we could also just allocate the stuff asked 4012 * for in this bmap call but that wouldn't be as good. 4013 */ 4014 if (bma->wasdel) { 4015 bma->length = (xfs_extlen_t)bma->got.br_blockcount; 4016 bma->offset = bma->got.br_startoff; 4017 xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev); 4018 } else { 4019 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN); 4020 if (!bma->eof) 4021 bma->length = XFS_FILBLKS_MIN(bma->length, 4022 bma->got.br_startoff - bma->offset); 4023 } 4024 4025 /* 4026 * Set the data type being allocated. For the data fork, the first data 4027 * in the file is treated differently to all other allocations. For the 4028 * attribute fork, we only need to ensure the allocated range is not on 4029 * the busy list. 4030 */ 4031 if (!(bma->flags & XFS_BMAPI_METADATA)) { 4032 bma->datatype = XFS_ALLOC_NOBUSY; 4033 if (whichfork == XFS_DATA_FORK) { 4034 if (bma->offset == 0) 4035 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA; 4036 else 4037 bma->datatype |= XFS_ALLOC_USERDATA; 4038 } 4039 if (bma->flags & XFS_BMAPI_ZERO) 4040 bma->datatype |= XFS_ALLOC_USERDATA_ZERO; 4041 } 4042 4043 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1; 4044 4045 /* 4046 * Only want to do the alignment at the eof if it is userdata and 4047 * allocation length is larger than a stripe unit. 4048 */ 4049 if (mp->m_dalign && bma->length >= mp->m_dalign && 4050 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) { 4051 error = xfs_bmap_isaeof(bma, whichfork); 4052 if (error) 4053 return error; 4054 } 4055 4056 error = xfs_bmap_alloc(bma); 4057 if (error) 4058 return error; 4059 4060 if (bma->blkno == NULLFSBLOCK) 4061 return 0; 4062 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) 4063 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork); 4064 /* 4065 * Bump the number of extents we've allocated 4066 * in this call. 4067 */ 4068 bma->nallocs++; 4069 4070 if (bma->cur) 4071 bma->cur->bc_private.b.flags = 4072 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; 4073 4074 bma->got.br_startoff = bma->offset; 4075 bma->got.br_startblock = bma->blkno; 4076 bma->got.br_blockcount = bma->length; 4077 bma->got.br_state = XFS_EXT_NORM; 4078 4079 /* 4080 * In the data fork, a wasdelay extent has been initialized, so 4081 * shouldn't be flagged as unwritten. 4082 * 4083 * For the cow fork, however, we convert delalloc reservations 4084 * (extents allocated for speculative preallocation) to 4085 * allocated unwritten extents, and only convert the unwritten 4086 * extents to real extents when we're about to write the data. 4087 */ 4088 if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) && 4089 (bma->flags & XFS_BMAPI_PREALLOC)) 4090 bma->got.br_state = XFS_EXT_UNWRITTEN; 4091 4092 if (bma->wasdel) 4093 error = xfs_bmap_add_extent_delay_real(bma, whichfork); 4094 else 4095 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip, 4096 whichfork, &bma->icur, &bma->cur, &bma->got, 4097 &bma->logflags, bma->flags); 4098 4099 bma->logflags |= tmp_logflags; 4100 if (error) 4101 return error; 4102 4103 /* 4104 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real 4105 * or xfs_bmap_add_extent_hole_real might have merged it into one of 4106 * the neighbouring ones. 4107 */ 4108 xfs_iext_get_extent(ifp, &bma->icur, &bma->got); 4109 4110 ASSERT(bma->got.br_startoff <= bma->offset); 4111 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >= 4112 bma->offset + bma->length); 4113 ASSERT(bma->got.br_state == XFS_EXT_NORM || 4114 bma->got.br_state == XFS_EXT_UNWRITTEN); 4115 return 0; 4116 } 4117 4118 STATIC int 4119 xfs_bmapi_convert_unwritten( 4120 struct xfs_bmalloca *bma, 4121 struct xfs_bmbt_irec *mval, 4122 xfs_filblks_t len, 4123 int flags) 4124 { 4125 int whichfork = xfs_bmapi_whichfork(flags); 4126 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); 4127 int tmp_logflags = 0; 4128 int error; 4129 4130 /* check if we need to do unwritten->real conversion */ 4131 if (mval->br_state == XFS_EXT_UNWRITTEN && 4132 (flags & XFS_BMAPI_PREALLOC)) 4133 return 0; 4134 4135 /* check if we need to do real->unwritten conversion */ 4136 if (mval->br_state == XFS_EXT_NORM && 4137 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) != 4138 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) 4139 return 0; 4140 4141 /* 4142 * Modify (by adding) the state flag, if writing. 4143 */ 4144 ASSERT(mval->br_blockcount <= len); 4145 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) { 4146 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp, 4147 bma->ip, whichfork); 4148 } 4149 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) 4150 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN; 4151 4152 /* 4153 * Before insertion into the bmbt, zero the range being converted 4154 * if required. 4155 */ 4156 if (flags & XFS_BMAPI_ZERO) { 4157 error = xfs_zero_extent(bma->ip, mval->br_startblock, 4158 mval->br_blockcount); 4159 if (error) 4160 return error; 4161 } 4162 4163 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork, 4164 &bma->icur, &bma->cur, mval, &tmp_logflags); 4165 /* 4166 * Log the inode core unconditionally in the unwritten extent conversion 4167 * path because the conversion might not have done so (e.g., if the 4168 * extent count hasn't changed). We need to make sure the inode is dirty 4169 * in the transaction for the sake of fsync(), even if nothing has 4170 * changed, because fsync() will not force the log for this transaction 4171 * unless it sees the inode pinned. 4172 * 4173 * Note: If we're only converting cow fork extents, there aren't 4174 * any on-disk updates to make, so we don't need to log anything. 4175 */ 4176 if (whichfork != XFS_COW_FORK) 4177 bma->logflags |= tmp_logflags | XFS_ILOG_CORE; 4178 if (error) 4179 return error; 4180 4181 /* 4182 * Update our extent pointer, given that 4183 * xfs_bmap_add_extent_unwritten_real might have merged it into one 4184 * of the neighbouring ones. 4185 */ 4186 xfs_iext_get_extent(ifp, &bma->icur, &bma->got); 4187 4188 /* 4189 * We may have combined previously unwritten space with written space, 4190 * so generate another request. 4191 */ 4192 if (mval->br_blockcount < len) 4193 return -EAGAIN; 4194 return 0; 4195 } 4196 4197 static inline xfs_extlen_t 4198 xfs_bmapi_minleft( 4199 struct xfs_trans *tp, 4200 struct xfs_inode *ip, 4201 int fork) 4202 { 4203 if (tp && tp->t_firstblock != NULLFSBLOCK) 4204 return 0; 4205 if (XFS_IFORK_FORMAT(ip, fork) != XFS_DINODE_FMT_BTREE) 4206 return 1; 4207 return be16_to_cpu(XFS_IFORK_PTR(ip, fork)->if_broot->bb_level) + 1; 4208 } 4209 4210 /* 4211 * Log whatever the flags say, even if error. Otherwise we might miss detecting 4212 * a case where the data is changed, there's an error, and it's not logged so we 4213 * don't shutdown when we should. Don't bother logging extents/btree changes if 4214 * we converted to the other format. 4215 */ 4216 static void 4217 xfs_bmapi_finish( 4218 struct xfs_bmalloca *bma, 4219 int whichfork, 4220 int error) 4221 { 4222 if ((bma->logflags & xfs_ilog_fext(whichfork)) && 4223 XFS_IFORK_FORMAT(bma->ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 4224 bma->logflags &= ~xfs_ilog_fext(whichfork); 4225 else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) && 4226 XFS_IFORK_FORMAT(bma->ip, whichfork) != XFS_DINODE_FMT_BTREE) 4227 bma->logflags &= ~xfs_ilog_fbroot(whichfork); 4228 4229 if (bma->logflags) 4230 xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags); 4231 if (bma->cur) 4232 xfs_btree_del_cursor(bma->cur, error); 4233 } 4234 4235 /* 4236 * Map file blocks to filesystem blocks, and allocate blocks or convert the 4237 * extent state if necessary. Details behaviour is controlled by the flags 4238 * parameter. Only allocates blocks from a single allocation group, to avoid 4239 * locking problems. 4240 */ 4241 int 4242 xfs_bmapi_write( 4243 struct xfs_trans *tp, /* transaction pointer */ 4244 struct xfs_inode *ip, /* incore inode */ 4245 xfs_fileoff_t bno, /* starting file offs. mapped */ 4246 xfs_filblks_t len, /* length to map in file */ 4247 int flags, /* XFS_BMAPI_... */ 4248 xfs_extlen_t total, /* total blocks needed */ 4249 struct xfs_bmbt_irec *mval, /* output: map values */ 4250 int *nmap) /* i/o: mval size/count */ 4251 { 4252 struct xfs_mount *mp = ip->i_mount; 4253 struct xfs_ifork *ifp; 4254 struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */ 4255 xfs_fileoff_t end; /* end of mapped file region */ 4256 bool eof = false; /* after the end of extents */ 4257 int error; /* error return */ 4258 int n; /* current extent index */ 4259 xfs_fileoff_t obno; /* old block number (offset) */ 4260 int whichfork; /* data or attr fork */ 4261 4262 #ifdef DEBUG 4263 xfs_fileoff_t orig_bno; /* original block number value */ 4264 int orig_flags; /* original flags arg value */ 4265 xfs_filblks_t orig_len; /* original value of len arg */ 4266 struct xfs_bmbt_irec *orig_mval; /* original value of mval */ 4267 int orig_nmap; /* original value of *nmap */ 4268 4269 orig_bno = bno; 4270 orig_len = len; 4271 orig_flags = flags; 4272 orig_mval = mval; 4273 orig_nmap = *nmap; 4274 #endif 4275 whichfork = xfs_bmapi_whichfork(flags); 4276 4277 ASSERT(*nmap >= 1); 4278 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); 4279 ASSERT(tp != NULL); 4280 ASSERT(len > 0); 4281 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL); 4282 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4283 ASSERT(!(flags & XFS_BMAPI_REMAP)); 4284 4285 /* zeroing is for currently only for data extents, not metadata */ 4286 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) != 4287 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)); 4288 /* 4289 * we can allocate unwritten extents or pre-zero allocated blocks, 4290 * but it makes no sense to do both at once. This would result in 4291 * zeroing the unwritten extent twice, but it still being an 4292 * unwritten extent.... 4293 */ 4294 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) != 4295 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)); 4296 4297 if (unlikely(XFS_TEST_ERROR( 4298 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 4299 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 4300 mp, XFS_ERRTAG_BMAPIFORMAT))) { 4301 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp); 4302 return -EFSCORRUPTED; 4303 } 4304 4305 if (XFS_FORCED_SHUTDOWN(mp)) 4306 return -EIO; 4307 4308 ifp = XFS_IFORK_PTR(ip, whichfork); 4309 4310 XFS_STATS_INC(mp, xs_blk_mapw); 4311 4312 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4313 error = xfs_iread_extents(tp, ip, whichfork); 4314 if (error) 4315 goto error0; 4316 } 4317 4318 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got)) 4319 eof = true; 4320 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) 4321 bma.prev.br_startoff = NULLFILEOFF; 4322 bma.tp = tp; 4323 bma.ip = ip; 4324 bma.total = total; 4325 bma.datatype = 0; 4326 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); 4327 4328 n = 0; 4329 end = bno + len; 4330 obno = bno; 4331 while (bno < end && n < *nmap) { 4332 bool need_alloc = false, wasdelay = false; 4333 4334 /* in hole or beyond EOF? */ 4335 if (eof || bma.got.br_startoff > bno) { 4336 /* 4337 * CoW fork conversions should /never/ hit EOF or 4338 * holes. There should always be something for us 4339 * to work on. 4340 */ 4341 ASSERT(!((flags & XFS_BMAPI_CONVERT) && 4342 (flags & XFS_BMAPI_COWFORK))); 4343 4344 need_alloc = true; 4345 } else if (isnullstartblock(bma.got.br_startblock)) { 4346 wasdelay = true; 4347 } 4348 4349 /* 4350 * First, deal with the hole before the allocated space 4351 * that we found, if any. 4352 */ 4353 if (need_alloc || wasdelay) { 4354 bma.eof = eof; 4355 bma.conv = !!(flags & XFS_BMAPI_CONVERT); 4356 bma.wasdel = wasdelay; 4357 bma.offset = bno; 4358 bma.flags = flags; 4359 4360 /* 4361 * There's a 32/64 bit type mismatch between the 4362 * allocation length request (which can be 64 bits in 4363 * length) and the bma length request, which is 4364 * xfs_extlen_t and therefore 32 bits. Hence we have to 4365 * check for 32-bit overflows and handle them here. 4366 */ 4367 if (len > (xfs_filblks_t)MAXEXTLEN) 4368 bma.length = MAXEXTLEN; 4369 else 4370 bma.length = len; 4371 4372 ASSERT(len > 0); 4373 ASSERT(bma.length > 0); 4374 error = xfs_bmapi_allocate(&bma); 4375 if (error) 4376 goto error0; 4377 if (bma.blkno == NULLFSBLOCK) 4378 break; 4379 4380 /* 4381 * If this is a CoW allocation, record the data in 4382 * the refcount btree for orphan recovery. 4383 */ 4384 if (whichfork == XFS_COW_FORK) { 4385 error = xfs_refcount_alloc_cow_extent(tp, 4386 bma.blkno, bma.length); 4387 if (error) 4388 goto error0; 4389 } 4390 } 4391 4392 /* Deal with the allocated space we found. */ 4393 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno, 4394 end, n, flags); 4395 4396 /* Execute unwritten extent conversion if necessary */ 4397 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags); 4398 if (error == -EAGAIN) 4399 continue; 4400 if (error) 4401 goto error0; 4402 4403 /* update the extent map to return */ 4404 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 4405 4406 /* 4407 * If we're done, stop now. Stop when we've allocated 4408 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise 4409 * the transaction may get too big. 4410 */ 4411 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap) 4412 break; 4413 4414 /* Else go on to the next record. */ 4415 bma.prev = bma.got; 4416 if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got)) 4417 eof = true; 4418 } 4419 *nmap = n; 4420 4421 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags, 4422 whichfork); 4423 if (error) 4424 goto error0; 4425 4426 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE || 4427 XFS_IFORK_NEXTENTS(ip, whichfork) > 4428 XFS_IFORK_MAXEXT(ip, whichfork)); 4429 xfs_bmapi_finish(&bma, whichfork, 0); 4430 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval, 4431 orig_nmap, *nmap); 4432 return 0; 4433 error0: 4434 xfs_bmapi_finish(&bma, whichfork, error); 4435 return error; 4436 } 4437 4438 /* 4439 * Convert an existing delalloc extent to real blocks based on file offset. This 4440 * attempts to allocate the entire delalloc extent and may require multiple 4441 * invocations to allocate the target offset if a large enough physical extent 4442 * is not available. 4443 */ 4444 int 4445 xfs_bmapi_convert_delalloc( 4446 struct xfs_inode *ip, 4447 int whichfork, 4448 xfs_fileoff_t offset_fsb, 4449 struct xfs_bmbt_irec *imap, 4450 unsigned int *seq) 4451 { 4452 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 4453 struct xfs_mount *mp = ip->i_mount; 4454 struct xfs_bmalloca bma = { NULL }; 4455 struct xfs_trans *tp; 4456 int error; 4457 4458 /* 4459 * Space for the extent and indirect blocks was reserved when the 4460 * delalloc extent was created so there's no need to do so here. 4461 */ 4462 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 4463 XFS_TRANS_RESERVE, &tp); 4464 if (error) 4465 return error; 4466 4467 xfs_ilock(ip, XFS_ILOCK_EXCL); 4468 xfs_trans_ijoin(tp, ip, 0); 4469 4470 if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) || 4471 bma.got.br_startoff > offset_fsb) { 4472 /* 4473 * No extent found in the range we are trying to convert. This 4474 * should only happen for the COW fork, where another thread 4475 * might have moved the extent to the data fork in the meantime. 4476 */ 4477 WARN_ON_ONCE(whichfork != XFS_COW_FORK); 4478 error = -EAGAIN; 4479 goto out_trans_cancel; 4480 } 4481 4482 /* 4483 * If we find a real extent here we raced with another thread converting 4484 * the extent. Just return the real extent at this offset. 4485 */ 4486 if (!isnullstartblock(bma.got.br_startblock)) { 4487 *imap = bma.got; 4488 *seq = READ_ONCE(ifp->if_seq); 4489 goto out_trans_cancel; 4490 } 4491 4492 bma.tp = tp; 4493 bma.ip = ip; 4494 bma.wasdel = true; 4495 bma.offset = bma.got.br_startoff; 4496 bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount, MAXEXTLEN); 4497 bma.total = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK); 4498 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); 4499 if (whichfork == XFS_COW_FORK) 4500 bma.flags = XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC; 4501 4502 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) 4503 bma.prev.br_startoff = NULLFILEOFF; 4504 4505 error = xfs_bmapi_allocate(&bma); 4506 if (error) 4507 goto out_finish; 4508 4509 error = -ENOSPC; 4510 if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK)) 4511 goto out_finish; 4512 error = -EFSCORRUPTED; 4513 if (WARN_ON_ONCE(!bma.got.br_startblock && !XFS_IS_REALTIME_INODE(ip))) 4514 goto out_finish; 4515 4516 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length)); 4517 XFS_STATS_INC(mp, xs_xstrat_quick); 4518 4519 ASSERT(!isnullstartblock(bma.got.br_startblock)); 4520 *imap = bma.got; 4521 *seq = READ_ONCE(ifp->if_seq); 4522 4523 if (whichfork == XFS_COW_FORK) { 4524 error = xfs_refcount_alloc_cow_extent(tp, bma.blkno, 4525 bma.length); 4526 if (error) 4527 goto out_finish; 4528 } 4529 4530 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags, 4531 whichfork); 4532 if (error) 4533 goto out_finish; 4534 4535 xfs_bmapi_finish(&bma, whichfork, 0); 4536 error = xfs_trans_commit(tp); 4537 xfs_iunlock(ip, XFS_ILOCK_EXCL); 4538 return error; 4539 4540 out_finish: 4541 xfs_bmapi_finish(&bma, whichfork, error); 4542 out_trans_cancel: 4543 xfs_trans_cancel(tp); 4544 xfs_iunlock(ip, XFS_ILOCK_EXCL); 4545 return error; 4546 } 4547 4548 int 4549 xfs_bmapi_remap( 4550 struct xfs_trans *tp, 4551 struct xfs_inode *ip, 4552 xfs_fileoff_t bno, 4553 xfs_filblks_t len, 4554 xfs_fsblock_t startblock, 4555 int flags) 4556 { 4557 struct xfs_mount *mp = ip->i_mount; 4558 struct xfs_ifork *ifp; 4559 struct xfs_btree_cur *cur = NULL; 4560 struct xfs_bmbt_irec got; 4561 struct xfs_iext_cursor icur; 4562 int whichfork = xfs_bmapi_whichfork(flags); 4563 int logflags = 0, error; 4564 4565 ifp = XFS_IFORK_PTR(ip, whichfork); 4566 ASSERT(len > 0); 4567 ASSERT(len <= (xfs_filblks_t)MAXEXTLEN); 4568 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4569 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC | 4570 XFS_BMAPI_NORMAP))); 4571 ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) != 4572 (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)); 4573 4574 if (unlikely(XFS_TEST_ERROR( 4575 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 4576 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 4577 mp, XFS_ERRTAG_BMAPIFORMAT))) { 4578 XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp); 4579 return -EFSCORRUPTED; 4580 } 4581 4582 if (XFS_FORCED_SHUTDOWN(mp)) 4583 return -EIO; 4584 4585 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4586 error = xfs_iread_extents(tp, ip, whichfork); 4587 if (error) 4588 return error; 4589 } 4590 4591 if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) { 4592 /* make sure we only reflink into a hole. */ 4593 ASSERT(got.br_startoff > bno); 4594 ASSERT(got.br_startoff - bno >= len); 4595 } 4596 4597 ip->i_d.di_nblocks += len; 4598 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 4599 4600 if (ifp->if_flags & XFS_IFBROOT) { 4601 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 4602 cur->bc_private.b.flags = 0; 4603 } 4604 4605 got.br_startoff = bno; 4606 got.br_startblock = startblock; 4607 got.br_blockcount = len; 4608 if (flags & XFS_BMAPI_PREALLOC) 4609 got.br_state = XFS_EXT_UNWRITTEN; 4610 else 4611 got.br_state = XFS_EXT_NORM; 4612 4613 error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur, 4614 &cur, &got, &logflags, flags); 4615 if (error) 4616 goto error0; 4617 4618 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork); 4619 4620 error0: 4621 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) 4622 logflags &= ~XFS_ILOG_DEXT; 4623 else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) 4624 logflags &= ~XFS_ILOG_DBROOT; 4625 4626 if (logflags) 4627 xfs_trans_log_inode(tp, ip, logflags); 4628 if (cur) 4629 xfs_btree_del_cursor(cur, error); 4630 return error; 4631 } 4632 4633 /* 4634 * When a delalloc extent is split (e.g., due to a hole punch), the original 4635 * indlen reservation must be shared across the two new extents that are left 4636 * behind. 4637 * 4638 * Given the original reservation and the worst case indlen for the two new 4639 * extents (as calculated by xfs_bmap_worst_indlen()), split the original 4640 * reservation fairly across the two new extents. If necessary, steal available 4641 * blocks from a deleted extent to make up a reservation deficiency (e.g., if 4642 * ores == 1). The number of stolen blocks is returned. The availability and 4643 * subsequent accounting of stolen blocks is the responsibility of the caller. 4644 */ 4645 static xfs_filblks_t 4646 xfs_bmap_split_indlen( 4647 xfs_filblks_t ores, /* original res. */ 4648 xfs_filblks_t *indlen1, /* ext1 worst indlen */ 4649 xfs_filblks_t *indlen2, /* ext2 worst indlen */ 4650 xfs_filblks_t avail) /* stealable blocks */ 4651 { 4652 xfs_filblks_t len1 = *indlen1; 4653 xfs_filblks_t len2 = *indlen2; 4654 xfs_filblks_t nres = len1 + len2; /* new total res. */ 4655 xfs_filblks_t stolen = 0; 4656 xfs_filblks_t resfactor; 4657 4658 /* 4659 * Steal as many blocks as we can to try and satisfy the worst case 4660 * indlen for both new extents. 4661 */ 4662 if (ores < nres && avail) 4663 stolen = XFS_FILBLKS_MIN(nres - ores, avail); 4664 ores += stolen; 4665 4666 /* nothing else to do if we've satisfied the new reservation */ 4667 if (ores >= nres) 4668 return stolen; 4669 4670 /* 4671 * We can't meet the total required reservation for the two extents. 4672 * Calculate the percent of the overall shortage between both extents 4673 * and apply this percentage to each of the requested indlen values. 4674 * This distributes the shortage fairly and reduces the chances that one 4675 * of the two extents is left with nothing when extents are repeatedly 4676 * split. 4677 */ 4678 resfactor = (ores * 100); 4679 do_div(resfactor, nres); 4680 len1 *= resfactor; 4681 do_div(len1, 100); 4682 len2 *= resfactor; 4683 do_div(len2, 100); 4684 ASSERT(len1 + len2 <= ores); 4685 ASSERT(len1 < *indlen1 && len2 < *indlen2); 4686 4687 /* 4688 * Hand out the remainder to each extent. If one of the two reservations 4689 * is zero, we want to make sure that one gets a block first. The loop 4690 * below starts with len1, so hand len2 a block right off the bat if it 4691 * is zero. 4692 */ 4693 ores -= (len1 + len2); 4694 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores); 4695 if (ores && !len2 && *indlen2) { 4696 len2++; 4697 ores--; 4698 } 4699 while (ores) { 4700 if (len1 < *indlen1) { 4701 len1++; 4702 ores--; 4703 } 4704 if (!ores) 4705 break; 4706 if (len2 < *indlen2) { 4707 len2++; 4708 ores--; 4709 } 4710 } 4711 4712 *indlen1 = len1; 4713 *indlen2 = len2; 4714 4715 return stolen; 4716 } 4717 4718 int 4719 xfs_bmap_del_extent_delay( 4720 struct xfs_inode *ip, 4721 int whichfork, 4722 struct xfs_iext_cursor *icur, 4723 struct xfs_bmbt_irec *got, 4724 struct xfs_bmbt_irec *del) 4725 { 4726 struct xfs_mount *mp = ip->i_mount; 4727 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 4728 struct xfs_bmbt_irec new; 4729 int64_t da_old, da_new, da_diff = 0; 4730 xfs_fileoff_t del_endoff, got_endoff; 4731 xfs_filblks_t got_indlen, new_indlen, stolen; 4732 int state = xfs_bmap_fork_to_state(whichfork); 4733 int error = 0; 4734 bool isrt; 4735 4736 XFS_STATS_INC(mp, xs_del_exlist); 4737 4738 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 4739 del_endoff = del->br_startoff + del->br_blockcount; 4740 got_endoff = got->br_startoff + got->br_blockcount; 4741 da_old = startblockval(got->br_startblock); 4742 da_new = 0; 4743 4744 ASSERT(del->br_blockcount > 0); 4745 ASSERT(got->br_startoff <= del->br_startoff); 4746 ASSERT(got_endoff >= del_endoff); 4747 4748 if (isrt) { 4749 uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount); 4750 4751 do_div(rtexts, mp->m_sb.sb_rextsize); 4752 xfs_mod_frextents(mp, rtexts); 4753 } 4754 4755 /* 4756 * Update the inode delalloc counter now and wait to update the 4757 * sb counters as we might have to borrow some blocks for the 4758 * indirect block accounting. 4759 */ 4760 error = xfs_trans_reserve_quota_nblks(NULL, ip, 4761 -((long)del->br_blockcount), 0, 4762 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); 4763 if (error) 4764 return error; 4765 ip->i_delayed_blks -= del->br_blockcount; 4766 4767 if (got->br_startoff == del->br_startoff) 4768 state |= BMAP_LEFT_FILLING; 4769 if (got_endoff == del_endoff) 4770 state |= BMAP_RIGHT_FILLING; 4771 4772 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 4773 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 4774 /* 4775 * Matches the whole extent. Delete the entry. 4776 */ 4777 xfs_iext_remove(ip, icur, state); 4778 xfs_iext_prev(ifp, icur); 4779 break; 4780 case BMAP_LEFT_FILLING: 4781 /* 4782 * Deleting the first part of the extent. 4783 */ 4784 got->br_startoff = del_endoff; 4785 got->br_blockcount -= del->br_blockcount; 4786 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4787 got->br_blockcount), da_old); 4788 got->br_startblock = nullstartblock((int)da_new); 4789 xfs_iext_update_extent(ip, state, icur, got); 4790 break; 4791 case BMAP_RIGHT_FILLING: 4792 /* 4793 * Deleting the last part of the extent. 4794 */ 4795 got->br_blockcount = got->br_blockcount - del->br_blockcount; 4796 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4797 got->br_blockcount), da_old); 4798 got->br_startblock = nullstartblock((int)da_new); 4799 xfs_iext_update_extent(ip, state, icur, got); 4800 break; 4801 case 0: 4802 /* 4803 * Deleting the middle of the extent. 4804 * 4805 * Distribute the original indlen reservation across the two new 4806 * extents. Steal blocks from the deleted extent if necessary. 4807 * Stealing blocks simply fudges the fdblocks accounting below. 4808 * Warn if either of the new indlen reservations is zero as this 4809 * can lead to delalloc problems. 4810 */ 4811 got->br_blockcount = del->br_startoff - got->br_startoff; 4812 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount); 4813 4814 new.br_blockcount = got_endoff - del_endoff; 4815 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount); 4816 4817 WARN_ON_ONCE(!got_indlen || !new_indlen); 4818 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen, 4819 del->br_blockcount); 4820 4821 got->br_startblock = nullstartblock((int)got_indlen); 4822 4823 new.br_startoff = del_endoff; 4824 new.br_state = got->br_state; 4825 new.br_startblock = nullstartblock((int)new_indlen); 4826 4827 xfs_iext_update_extent(ip, state, icur, got); 4828 xfs_iext_next(ifp, icur); 4829 xfs_iext_insert(ip, icur, &new, state); 4830 4831 da_new = got_indlen + new_indlen - stolen; 4832 del->br_blockcount -= stolen; 4833 break; 4834 } 4835 4836 ASSERT(da_old >= da_new); 4837 da_diff = da_old - da_new; 4838 if (!isrt) 4839 da_diff += del->br_blockcount; 4840 if (da_diff) 4841 xfs_mod_fdblocks(mp, da_diff, false); 4842 return error; 4843 } 4844 4845 void 4846 xfs_bmap_del_extent_cow( 4847 struct xfs_inode *ip, 4848 struct xfs_iext_cursor *icur, 4849 struct xfs_bmbt_irec *got, 4850 struct xfs_bmbt_irec *del) 4851 { 4852 struct xfs_mount *mp = ip->i_mount; 4853 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); 4854 struct xfs_bmbt_irec new; 4855 xfs_fileoff_t del_endoff, got_endoff; 4856 int state = BMAP_COWFORK; 4857 4858 XFS_STATS_INC(mp, xs_del_exlist); 4859 4860 del_endoff = del->br_startoff + del->br_blockcount; 4861 got_endoff = got->br_startoff + got->br_blockcount; 4862 4863 ASSERT(del->br_blockcount > 0); 4864 ASSERT(got->br_startoff <= del->br_startoff); 4865 ASSERT(got_endoff >= del_endoff); 4866 ASSERT(!isnullstartblock(got->br_startblock)); 4867 4868 if (got->br_startoff == del->br_startoff) 4869 state |= BMAP_LEFT_FILLING; 4870 if (got_endoff == del_endoff) 4871 state |= BMAP_RIGHT_FILLING; 4872 4873 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 4874 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 4875 /* 4876 * Matches the whole extent. Delete the entry. 4877 */ 4878 xfs_iext_remove(ip, icur, state); 4879 xfs_iext_prev(ifp, icur); 4880 break; 4881 case BMAP_LEFT_FILLING: 4882 /* 4883 * Deleting the first part of the extent. 4884 */ 4885 got->br_startoff = del_endoff; 4886 got->br_blockcount -= del->br_blockcount; 4887 got->br_startblock = del->br_startblock + del->br_blockcount; 4888 xfs_iext_update_extent(ip, state, icur, got); 4889 break; 4890 case BMAP_RIGHT_FILLING: 4891 /* 4892 * Deleting the last part of the extent. 4893 */ 4894 got->br_blockcount -= del->br_blockcount; 4895 xfs_iext_update_extent(ip, state, icur, got); 4896 break; 4897 case 0: 4898 /* 4899 * Deleting the middle of the extent. 4900 */ 4901 got->br_blockcount = del->br_startoff - got->br_startoff; 4902 4903 new.br_startoff = del_endoff; 4904 new.br_blockcount = got_endoff - del_endoff; 4905 new.br_state = got->br_state; 4906 new.br_startblock = del->br_startblock + del->br_blockcount; 4907 4908 xfs_iext_update_extent(ip, state, icur, got); 4909 xfs_iext_next(ifp, icur); 4910 xfs_iext_insert(ip, icur, &new, state); 4911 break; 4912 } 4913 ip->i_delayed_blks -= del->br_blockcount; 4914 } 4915 4916 /* 4917 * Called by xfs_bmapi to update file extent records and the btree 4918 * after removing space. 4919 */ 4920 STATIC int /* error */ 4921 xfs_bmap_del_extent_real( 4922 xfs_inode_t *ip, /* incore inode pointer */ 4923 xfs_trans_t *tp, /* current transaction pointer */ 4924 struct xfs_iext_cursor *icur, 4925 xfs_btree_cur_t *cur, /* if null, not a btree */ 4926 xfs_bmbt_irec_t *del, /* data to remove from extents */ 4927 int *logflagsp, /* inode logging flags */ 4928 int whichfork, /* data or attr fork */ 4929 int bflags) /* bmapi flags */ 4930 { 4931 xfs_fsblock_t del_endblock=0; /* first block past del */ 4932 xfs_fileoff_t del_endoff; /* first offset past del */ 4933 int do_fx; /* free extent at end of routine */ 4934 int error; /* error return value */ 4935 int flags = 0;/* inode logging flags */ 4936 struct xfs_bmbt_irec got; /* current extent entry */ 4937 xfs_fileoff_t got_endoff; /* first offset past got */ 4938 int i; /* temp state */ 4939 struct xfs_ifork *ifp; /* inode fork pointer */ 4940 xfs_mount_t *mp; /* mount structure */ 4941 xfs_filblks_t nblks; /* quota/sb block count */ 4942 xfs_bmbt_irec_t new; /* new record to be inserted */ 4943 /* REFERENCED */ 4944 uint qfield; /* quota field to update */ 4945 int state = xfs_bmap_fork_to_state(whichfork); 4946 struct xfs_bmbt_irec old; 4947 4948 mp = ip->i_mount; 4949 XFS_STATS_INC(mp, xs_del_exlist); 4950 4951 ifp = XFS_IFORK_PTR(ip, whichfork); 4952 ASSERT(del->br_blockcount > 0); 4953 xfs_iext_get_extent(ifp, icur, &got); 4954 ASSERT(got.br_startoff <= del->br_startoff); 4955 del_endoff = del->br_startoff + del->br_blockcount; 4956 got_endoff = got.br_startoff + got.br_blockcount; 4957 ASSERT(got_endoff >= del_endoff); 4958 ASSERT(!isnullstartblock(got.br_startblock)); 4959 qfield = 0; 4960 error = 0; 4961 4962 /* 4963 * If it's the case where the directory code is running with no block 4964 * reservation, and the deleted block is in the middle of its extent, 4965 * and the resulting insert of an extent would cause transformation to 4966 * btree format, then reject it. The calling code will then swap blocks 4967 * around instead. We have to do this now, rather than waiting for the 4968 * conversion to btree format, since the transaction will be dirty then. 4969 */ 4970 if (tp->t_blk_res == 0 && 4971 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && 4972 XFS_IFORK_NEXTENTS(ip, whichfork) >= 4973 XFS_IFORK_MAXEXT(ip, whichfork) && 4974 del->br_startoff > got.br_startoff && del_endoff < got_endoff) 4975 return -ENOSPC; 4976 4977 flags = XFS_ILOG_CORE; 4978 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { 4979 xfs_fsblock_t bno; 4980 xfs_filblks_t len; 4981 xfs_extlen_t mod; 4982 4983 bno = div_u64_rem(del->br_startblock, mp->m_sb.sb_rextsize, 4984 &mod); 4985 ASSERT(mod == 0); 4986 len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize, 4987 &mod); 4988 ASSERT(mod == 0); 4989 4990 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); 4991 if (error) 4992 goto done; 4993 do_fx = 0; 4994 nblks = len * mp->m_sb.sb_rextsize; 4995 qfield = XFS_TRANS_DQ_RTBCOUNT; 4996 } else { 4997 do_fx = 1; 4998 nblks = del->br_blockcount; 4999 qfield = XFS_TRANS_DQ_BCOUNT; 5000 } 5001 5002 del_endblock = del->br_startblock + del->br_blockcount; 5003 if (cur) { 5004 error = xfs_bmbt_lookup_eq(cur, &got, &i); 5005 if (error) 5006 goto done; 5007 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5008 } 5009 5010 if (got.br_startoff == del->br_startoff) 5011 state |= BMAP_LEFT_FILLING; 5012 if (got_endoff == del_endoff) 5013 state |= BMAP_RIGHT_FILLING; 5014 5015 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 5016 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 5017 /* 5018 * Matches the whole extent. Delete the entry. 5019 */ 5020 xfs_iext_remove(ip, icur, state); 5021 xfs_iext_prev(ifp, icur); 5022 XFS_IFORK_NEXT_SET(ip, whichfork, 5023 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 5024 flags |= XFS_ILOG_CORE; 5025 if (!cur) { 5026 flags |= xfs_ilog_fext(whichfork); 5027 break; 5028 } 5029 if ((error = xfs_btree_delete(cur, &i))) 5030 goto done; 5031 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5032 break; 5033 case BMAP_LEFT_FILLING: 5034 /* 5035 * Deleting the first part of the extent. 5036 */ 5037 got.br_startoff = del_endoff; 5038 got.br_startblock = del_endblock; 5039 got.br_blockcount -= del->br_blockcount; 5040 xfs_iext_update_extent(ip, state, icur, &got); 5041 if (!cur) { 5042 flags |= xfs_ilog_fext(whichfork); 5043 break; 5044 } 5045 error = xfs_bmbt_update(cur, &got); 5046 if (error) 5047 goto done; 5048 break; 5049 case BMAP_RIGHT_FILLING: 5050 /* 5051 * Deleting the last part of the extent. 5052 */ 5053 got.br_blockcount -= del->br_blockcount; 5054 xfs_iext_update_extent(ip, state, icur, &got); 5055 if (!cur) { 5056 flags |= xfs_ilog_fext(whichfork); 5057 break; 5058 } 5059 error = xfs_bmbt_update(cur, &got); 5060 if (error) 5061 goto done; 5062 break; 5063 case 0: 5064 /* 5065 * Deleting the middle of the extent. 5066 */ 5067 old = got; 5068 5069 got.br_blockcount = del->br_startoff - got.br_startoff; 5070 xfs_iext_update_extent(ip, state, icur, &got); 5071 5072 new.br_startoff = del_endoff; 5073 new.br_blockcount = got_endoff - del_endoff; 5074 new.br_state = got.br_state; 5075 new.br_startblock = del_endblock; 5076 5077 flags |= XFS_ILOG_CORE; 5078 if (cur) { 5079 error = xfs_bmbt_update(cur, &got); 5080 if (error) 5081 goto done; 5082 error = xfs_btree_increment(cur, 0, &i); 5083 if (error) 5084 goto done; 5085 cur->bc_rec.b = new; 5086 error = xfs_btree_insert(cur, &i); 5087 if (error && error != -ENOSPC) 5088 goto done; 5089 /* 5090 * If get no-space back from btree insert, it tried a 5091 * split, and we have a zero block reservation. Fix up 5092 * our state and return the error. 5093 */ 5094 if (error == -ENOSPC) { 5095 /* 5096 * Reset the cursor, don't trust it after any 5097 * insert operation. 5098 */ 5099 error = xfs_bmbt_lookup_eq(cur, &got, &i); 5100 if (error) 5101 goto done; 5102 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5103 /* 5104 * Update the btree record back 5105 * to the original value. 5106 */ 5107 error = xfs_bmbt_update(cur, &old); 5108 if (error) 5109 goto done; 5110 /* 5111 * Reset the extent record back 5112 * to the original value. 5113 */ 5114 xfs_iext_update_extent(ip, state, icur, &old); 5115 flags = 0; 5116 error = -ENOSPC; 5117 goto done; 5118 } 5119 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5120 } else 5121 flags |= xfs_ilog_fext(whichfork); 5122 XFS_IFORK_NEXT_SET(ip, whichfork, 5123 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 5124 xfs_iext_next(ifp, icur); 5125 xfs_iext_insert(ip, icur, &new, state); 5126 break; 5127 } 5128 5129 /* remove reverse mapping */ 5130 error = xfs_rmap_unmap_extent(tp, ip, whichfork, del); 5131 if (error) 5132 goto done; 5133 5134 /* 5135 * If we need to, add to list of extents to delete. 5136 */ 5137 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) { 5138 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) { 5139 error = xfs_refcount_decrease_extent(tp, del); 5140 if (error) 5141 goto done; 5142 } else { 5143 __xfs_bmap_add_free(tp, del->br_startblock, 5144 del->br_blockcount, NULL, 5145 (bflags & XFS_BMAPI_NODISCARD) || 5146 del->br_state == XFS_EXT_UNWRITTEN); 5147 } 5148 } 5149 5150 /* 5151 * Adjust inode # blocks in the file. 5152 */ 5153 if (nblks) 5154 ip->i_d.di_nblocks -= nblks; 5155 /* 5156 * Adjust quota data. 5157 */ 5158 if (qfield && !(bflags & XFS_BMAPI_REMAP)) 5159 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks); 5160 5161 done: 5162 *logflagsp = flags; 5163 return error; 5164 } 5165 5166 /* 5167 * Unmap (remove) blocks from a file. 5168 * If nexts is nonzero then the number of extents to remove is limited to 5169 * that value. If not all extents in the block range can be removed then 5170 * *done is set. 5171 */ 5172 int /* error */ 5173 __xfs_bunmapi( 5174 struct xfs_trans *tp, /* transaction pointer */ 5175 struct xfs_inode *ip, /* incore inode */ 5176 xfs_fileoff_t start, /* first file offset deleted */ 5177 xfs_filblks_t *rlen, /* i/o: amount remaining */ 5178 int flags, /* misc flags */ 5179 xfs_extnum_t nexts) /* number of extents max */ 5180 { 5181 struct xfs_btree_cur *cur; /* bmap btree cursor */ 5182 struct xfs_bmbt_irec del; /* extent being deleted */ 5183 int error; /* error return value */ 5184 xfs_extnum_t extno; /* extent number in list */ 5185 struct xfs_bmbt_irec got; /* current extent record */ 5186 struct xfs_ifork *ifp; /* inode fork pointer */ 5187 int isrt; /* freeing in rt area */ 5188 int logflags; /* transaction logging flags */ 5189 xfs_extlen_t mod; /* rt extent offset */ 5190 struct xfs_mount *mp; /* mount structure */ 5191 int tmp_logflags; /* partial logging flags */ 5192 int wasdel; /* was a delayed alloc extent */ 5193 int whichfork; /* data or attribute fork */ 5194 xfs_fsblock_t sum; 5195 xfs_filblks_t len = *rlen; /* length to unmap in file */ 5196 xfs_fileoff_t max_len; 5197 xfs_agnumber_t prev_agno = NULLAGNUMBER, agno; 5198 xfs_fileoff_t end; 5199 struct xfs_iext_cursor icur; 5200 bool done = false; 5201 5202 trace_xfs_bunmap(ip, start, len, flags, _RET_IP_); 5203 5204 whichfork = xfs_bmapi_whichfork(flags); 5205 ASSERT(whichfork != XFS_COW_FORK); 5206 ifp = XFS_IFORK_PTR(ip, whichfork); 5207 if (unlikely( 5208 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5209 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 5210 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW, 5211 ip->i_mount); 5212 return -EFSCORRUPTED; 5213 } 5214 mp = ip->i_mount; 5215 if (XFS_FORCED_SHUTDOWN(mp)) 5216 return -EIO; 5217 5218 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5219 ASSERT(len > 0); 5220 ASSERT(nexts >= 0); 5221 5222 /* 5223 * Guesstimate how many blocks we can unmap without running the risk of 5224 * blowing out the transaction with a mix of EFIs and reflink 5225 * adjustments. 5226 */ 5227 if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) 5228 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res)); 5229 else 5230 max_len = len; 5231 5232 if (!(ifp->if_flags & XFS_IFEXTENTS) && 5233 (error = xfs_iread_extents(tp, ip, whichfork))) 5234 return error; 5235 if (xfs_iext_count(ifp) == 0) { 5236 *rlen = 0; 5237 return 0; 5238 } 5239 XFS_STATS_INC(mp, xs_blk_unmap); 5240 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 5241 end = start + len; 5242 5243 if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) { 5244 *rlen = 0; 5245 return 0; 5246 } 5247 end--; 5248 5249 logflags = 0; 5250 if (ifp->if_flags & XFS_IFBROOT) { 5251 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); 5252 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5253 cur->bc_private.b.flags = 0; 5254 } else 5255 cur = NULL; 5256 5257 if (isrt) { 5258 /* 5259 * Synchronize by locking the bitmap inode. 5260 */ 5261 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP); 5262 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL); 5263 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM); 5264 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL); 5265 } 5266 5267 extno = 0; 5268 while (end != (xfs_fileoff_t)-1 && end >= start && 5269 (nexts == 0 || extno < nexts) && max_len > 0) { 5270 /* 5271 * Is the found extent after a hole in which end lives? 5272 * Just back up to the previous extent, if so. 5273 */ 5274 if (got.br_startoff > end && 5275 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5276 done = true; 5277 break; 5278 } 5279 /* 5280 * Is the last block of this extent before the range 5281 * we're supposed to delete? If so, we're done. 5282 */ 5283 end = XFS_FILEOFF_MIN(end, 5284 got.br_startoff + got.br_blockcount - 1); 5285 if (end < start) 5286 break; 5287 /* 5288 * Then deal with the (possibly delayed) allocated space 5289 * we found. 5290 */ 5291 del = got; 5292 wasdel = isnullstartblock(del.br_startblock); 5293 5294 /* 5295 * Make sure we don't touch multiple AGF headers out of order 5296 * in a single transaction, as that could cause AB-BA deadlocks. 5297 */ 5298 if (!wasdel) { 5299 agno = XFS_FSB_TO_AGNO(mp, del.br_startblock); 5300 if (prev_agno != NULLAGNUMBER && prev_agno > agno) 5301 break; 5302 prev_agno = agno; 5303 } 5304 if (got.br_startoff < start) { 5305 del.br_startoff = start; 5306 del.br_blockcount -= start - got.br_startoff; 5307 if (!wasdel) 5308 del.br_startblock += start - got.br_startoff; 5309 } 5310 if (del.br_startoff + del.br_blockcount > end + 1) 5311 del.br_blockcount = end + 1 - del.br_startoff; 5312 5313 /* How much can we safely unmap? */ 5314 if (max_len < del.br_blockcount) { 5315 del.br_startoff += del.br_blockcount - max_len; 5316 if (!wasdel) 5317 del.br_startblock += del.br_blockcount - max_len; 5318 del.br_blockcount = max_len; 5319 } 5320 5321 if (!isrt) 5322 goto delete; 5323 5324 sum = del.br_startblock + del.br_blockcount; 5325 div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod); 5326 if (mod) { 5327 /* 5328 * Realtime extent not lined up at the end. 5329 * The extent could have been split into written 5330 * and unwritten pieces, or we could just be 5331 * unmapping part of it. But we can't really 5332 * get rid of part of a realtime extent. 5333 */ 5334 if (del.br_state == XFS_EXT_UNWRITTEN) { 5335 /* 5336 * This piece is unwritten, or we're not 5337 * using unwritten extents. Skip over it. 5338 */ 5339 ASSERT(end >= mod); 5340 end -= mod > del.br_blockcount ? 5341 del.br_blockcount : mod; 5342 if (end < got.br_startoff && 5343 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5344 done = true; 5345 break; 5346 } 5347 continue; 5348 } 5349 /* 5350 * It's written, turn it unwritten. 5351 * This is better than zeroing it. 5352 */ 5353 ASSERT(del.br_state == XFS_EXT_NORM); 5354 ASSERT(tp->t_blk_res > 0); 5355 /* 5356 * If this spans a realtime extent boundary, 5357 * chop it back to the start of the one we end at. 5358 */ 5359 if (del.br_blockcount > mod) { 5360 del.br_startoff += del.br_blockcount - mod; 5361 del.br_startblock += del.br_blockcount - mod; 5362 del.br_blockcount = mod; 5363 } 5364 del.br_state = XFS_EXT_UNWRITTEN; 5365 error = xfs_bmap_add_extent_unwritten_real(tp, ip, 5366 whichfork, &icur, &cur, &del, 5367 &logflags); 5368 if (error) 5369 goto error0; 5370 goto nodelete; 5371 } 5372 div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod); 5373 if (mod) { 5374 /* 5375 * Realtime extent is lined up at the end but not 5376 * at the front. We'll get rid of full extents if 5377 * we can. 5378 */ 5379 mod = mp->m_sb.sb_rextsize - mod; 5380 if (del.br_blockcount > mod) { 5381 del.br_blockcount -= mod; 5382 del.br_startoff += mod; 5383 del.br_startblock += mod; 5384 } else if (del.br_startoff == start && 5385 (del.br_state == XFS_EXT_UNWRITTEN || 5386 tp->t_blk_res == 0)) { 5387 /* 5388 * Can't make it unwritten. There isn't 5389 * a full extent here so just skip it. 5390 */ 5391 ASSERT(end >= del.br_blockcount); 5392 end -= del.br_blockcount; 5393 if (got.br_startoff > end && 5394 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5395 done = true; 5396 break; 5397 } 5398 continue; 5399 } else if (del.br_state == XFS_EXT_UNWRITTEN) { 5400 struct xfs_bmbt_irec prev; 5401 5402 /* 5403 * This one is already unwritten. 5404 * It must have a written left neighbor. 5405 * Unwrite the killed part of that one and 5406 * try again. 5407 */ 5408 if (!xfs_iext_prev_extent(ifp, &icur, &prev)) 5409 ASSERT(0); 5410 ASSERT(prev.br_state == XFS_EXT_NORM); 5411 ASSERT(!isnullstartblock(prev.br_startblock)); 5412 ASSERT(del.br_startblock == 5413 prev.br_startblock + prev.br_blockcount); 5414 if (prev.br_startoff < start) { 5415 mod = start - prev.br_startoff; 5416 prev.br_blockcount -= mod; 5417 prev.br_startblock += mod; 5418 prev.br_startoff = start; 5419 } 5420 prev.br_state = XFS_EXT_UNWRITTEN; 5421 error = xfs_bmap_add_extent_unwritten_real(tp, 5422 ip, whichfork, &icur, &cur, 5423 &prev, &logflags); 5424 if (error) 5425 goto error0; 5426 goto nodelete; 5427 } else { 5428 ASSERT(del.br_state == XFS_EXT_NORM); 5429 del.br_state = XFS_EXT_UNWRITTEN; 5430 error = xfs_bmap_add_extent_unwritten_real(tp, 5431 ip, whichfork, &icur, &cur, 5432 &del, &logflags); 5433 if (error) 5434 goto error0; 5435 goto nodelete; 5436 } 5437 } 5438 5439 delete: 5440 if (wasdel) { 5441 error = xfs_bmap_del_extent_delay(ip, whichfork, &icur, 5442 &got, &del); 5443 } else { 5444 error = xfs_bmap_del_extent_real(ip, tp, &icur, cur, 5445 &del, &tmp_logflags, whichfork, 5446 flags); 5447 logflags |= tmp_logflags; 5448 } 5449 5450 if (error) 5451 goto error0; 5452 5453 max_len -= del.br_blockcount; 5454 end = del.br_startoff - 1; 5455 nodelete: 5456 /* 5457 * If not done go on to the next (previous) record. 5458 */ 5459 if (end != (xfs_fileoff_t)-1 && end >= start) { 5460 if (!xfs_iext_get_extent(ifp, &icur, &got) || 5461 (got.br_startoff > end && 5462 !xfs_iext_prev_extent(ifp, &icur, &got))) { 5463 done = true; 5464 break; 5465 } 5466 extno++; 5467 } 5468 } 5469 if (done || end == (xfs_fileoff_t)-1 || end < start) 5470 *rlen = 0; 5471 else 5472 *rlen = end - start + 1; 5473 5474 /* 5475 * Convert to a btree if necessary. 5476 */ 5477 if (xfs_bmap_needs_btree(ip, whichfork)) { 5478 ASSERT(cur == NULL); 5479 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 5480 &tmp_logflags, whichfork); 5481 logflags |= tmp_logflags; 5482 } else { 5483 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, 5484 whichfork); 5485 } 5486 5487 error0: 5488 /* 5489 * Log everything. Do this after conversion, there's no point in 5490 * logging the extent records if we've converted to btree format. 5491 */ 5492 if ((logflags & xfs_ilog_fext(whichfork)) && 5493 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 5494 logflags &= ~xfs_ilog_fext(whichfork); 5495 else if ((logflags & xfs_ilog_fbroot(whichfork)) && 5496 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) 5497 logflags &= ~xfs_ilog_fbroot(whichfork); 5498 /* 5499 * Log inode even in the error case, if the transaction 5500 * is dirty we'll need to shut down the filesystem. 5501 */ 5502 if (logflags) 5503 xfs_trans_log_inode(tp, ip, logflags); 5504 if (cur) { 5505 if (!error) 5506 cur->bc_private.b.allocated = 0; 5507 xfs_btree_del_cursor(cur, error); 5508 } 5509 return error; 5510 } 5511 5512 /* Unmap a range of a file. */ 5513 int 5514 xfs_bunmapi( 5515 xfs_trans_t *tp, 5516 struct xfs_inode *ip, 5517 xfs_fileoff_t bno, 5518 xfs_filblks_t len, 5519 int flags, 5520 xfs_extnum_t nexts, 5521 int *done) 5522 { 5523 int error; 5524 5525 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts); 5526 *done = (len == 0); 5527 return error; 5528 } 5529 5530 /* 5531 * Determine whether an extent shift can be accomplished by a merge with the 5532 * extent that precedes the target hole of the shift. 5533 */ 5534 STATIC bool 5535 xfs_bmse_can_merge( 5536 struct xfs_bmbt_irec *left, /* preceding extent */ 5537 struct xfs_bmbt_irec *got, /* current extent to shift */ 5538 xfs_fileoff_t shift) /* shift fsb */ 5539 { 5540 xfs_fileoff_t startoff; 5541 5542 startoff = got->br_startoff - shift; 5543 5544 /* 5545 * The extent, once shifted, must be adjacent in-file and on-disk with 5546 * the preceding extent. 5547 */ 5548 if ((left->br_startoff + left->br_blockcount != startoff) || 5549 (left->br_startblock + left->br_blockcount != got->br_startblock) || 5550 (left->br_state != got->br_state) || 5551 (left->br_blockcount + got->br_blockcount > MAXEXTLEN)) 5552 return false; 5553 5554 return true; 5555 } 5556 5557 /* 5558 * A bmap extent shift adjusts the file offset of an extent to fill a preceding 5559 * hole in the file. If an extent shift would result in the extent being fully 5560 * adjacent to the extent that currently precedes the hole, we can merge with 5561 * the preceding extent rather than do the shift. 5562 * 5563 * This function assumes the caller has verified a shift-by-merge is possible 5564 * with the provided extents via xfs_bmse_can_merge(). 5565 */ 5566 STATIC int 5567 xfs_bmse_merge( 5568 struct xfs_trans *tp, 5569 struct xfs_inode *ip, 5570 int whichfork, 5571 xfs_fileoff_t shift, /* shift fsb */ 5572 struct xfs_iext_cursor *icur, 5573 struct xfs_bmbt_irec *got, /* extent to shift */ 5574 struct xfs_bmbt_irec *left, /* preceding extent */ 5575 struct xfs_btree_cur *cur, 5576 int *logflags) /* output */ 5577 { 5578 struct xfs_bmbt_irec new; 5579 xfs_filblks_t blockcount; 5580 int error, i; 5581 struct xfs_mount *mp = ip->i_mount; 5582 5583 blockcount = left->br_blockcount + got->br_blockcount; 5584 5585 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5586 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5587 ASSERT(xfs_bmse_can_merge(left, got, shift)); 5588 5589 new = *left; 5590 new.br_blockcount = blockcount; 5591 5592 /* 5593 * Update the on-disk extent count, the btree if necessary and log the 5594 * inode. 5595 */ 5596 XFS_IFORK_NEXT_SET(ip, whichfork, 5597 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 5598 *logflags |= XFS_ILOG_CORE; 5599 if (!cur) { 5600 *logflags |= XFS_ILOG_DEXT; 5601 goto done; 5602 } 5603 5604 /* lookup and remove the extent to merge */ 5605 error = xfs_bmbt_lookup_eq(cur, got, &i); 5606 if (error) 5607 return error; 5608 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5609 5610 error = xfs_btree_delete(cur, &i); 5611 if (error) 5612 return error; 5613 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5614 5615 /* lookup and update size of the previous extent */ 5616 error = xfs_bmbt_lookup_eq(cur, left, &i); 5617 if (error) 5618 return error; 5619 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5620 5621 error = xfs_bmbt_update(cur, &new); 5622 if (error) 5623 return error; 5624 5625 done: 5626 xfs_iext_remove(ip, icur, 0); 5627 xfs_iext_prev(XFS_IFORK_PTR(ip, whichfork), icur); 5628 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, 5629 &new); 5630 5631 /* update reverse mapping. rmap functions merge the rmaps for us */ 5632 error = xfs_rmap_unmap_extent(tp, ip, whichfork, got); 5633 if (error) 5634 return error; 5635 memcpy(&new, got, sizeof(new)); 5636 new.br_startoff = left->br_startoff + left->br_blockcount; 5637 return xfs_rmap_map_extent(tp, ip, whichfork, &new); 5638 } 5639 5640 static int 5641 xfs_bmap_shift_update_extent( 5642 struct xfs_trans *tp, 5643 struct xfs_inode *ip, 5644 int whichfork, 5645 struct xfs_iext_cursor *icur, 5646 struct xfs_bmbt_irec *got, 5647 struct xfs_btree_cur *cur, 5648 int *logflags, 5649 xfs_fileoff_t startoff) 5650 { 5651 struct xfs_mount *mp = ip->i_mount; 5652 struct xfs_bmbt_irec prev = *got; 5653 int error, i; 5654 5655 *logflags |= XFS_ILOG_CORE; 5656 5657 got->br_startoff = startoff; 5658 5659 if (cur) { 5660 error = xfs_bmbt_lookup_eq(cur, &prev, &i); 5661 if (error) 5662 return error; 5663 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5664 5665 error = xfs_bmbt_update(cur, got); 5666 if (error) 5667 return error; 5668 } else { 5669 *logflags |= XFS_ILOG_DEXT; 5670 } 5671 5672 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, 5673 got); 5674 5675 /* update reverse mapping */ 5676 error = xfs_rmap_unmap_extent(tp, ip, whichfork, &prev); 5677 if (error) 5678 return error; 5679 return xfs_rmap_map_extent(tp, ip, whichfork, got); 5680 } 5681 5682 int 5683 xfs_bmap_collapse_extents( 5684 struct xfs_trans *tp, 5685 struct xfs_inode *ip, 5686 xfs_fileoff_t *next_fsb, 5687 xfs_fileoff_t offset_shift_fsb, 5688 bool *done) 5689 { 5690 int whichfork = XFS_DATA_FORK; 5691 struct xfs_mount *mp = ip->i_mount; 5692 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 5693 struct xfs_btree_cur *cur = NULL; 5694 struct xfs_bmbt_irec got, prev; 5695 struct xfs_iext_cursor icur; 5696 xfs_fileoff_t new_startoff; 5697 int error = 0; 5698 int logflags = 0; 5699 5700 if (unlikely(XFS_TEST_ERROR( 5701 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5702 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 5703 mp, XFS_ERRTAG_BMAPIFORMAT))) { 5704 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 5705 return -EFSCORRUPTED; 5706 } 5707 5708 if (XFS_FORCED_SHUTDOWN(mp)) 5709 return -EIO; 5710 5711 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL)); 5712 5713 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 5714 error = xfs_iread_extents(tp, ip, whichfork); 5715 if (error) 5716 return error; 5717 } 5718 5719 if (ifp->if_flags & XFS_IFBROOT) { 5720 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5721 cur->bc_private.b.flags = 0; 5722 } 5723 5724 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) { 5725 *done = true; 5726 goto del_cursor; 5727 } 5728 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock), 5729 del_cursor); 5730 5731 new_startoff = got.br_startoff - offset_shift_fsb; 5732 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) { 5733 if (new_startoff < prev.br_startoff + prev.br_blockcount) { 5734 error = -EINVAL; 5735 goto del_cursor; 5736 } 5737 5738 if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) { 5739 error = xfs_bmse_merge(tp, ip, whichfork, 5740 offset_shift_fsb, &icur, &got, &prev, 5741 cur, &logflags); 5742 if (error) 5743 goto del_cursor; 5744 goto done; 5745 } 5746 } else { 5747 if (got.br_startoff < offset_shift_fsb) { 5748 error = -EINVAL; 5749 goto del_cursor; 5750 } 5751 } 5752 5753 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got, 5754 cur, &logflags, new_startoff); 5755 if (error) 5756 goto del_cursor; 5757 5758 done: 5759 if (!xfs_iext_next_extent(ifp, &icur, &got)) { 5760 *done = true; 5761 goto del_cursor; 5762 } 5763 5764 *next_fsb = got.br_startoff; 5765 del_cursor: 5766 if (cur) 5767 xfs_btree_del_cursor(cur, error); 5768 if (logflags) 5769 xfs_trans_log_inode(tp, ip, logflags); 5770 return error; 5771 } 5772 5773 /* Make sure we won't be right-shifting an extent past the maximum bound. */ 5774 int 5775 xfs_bmap_can_insert_extents( 5776 struct xfs_inode *ip, 5777 xfs_fileoff_t off, 5778 xfs_fileoff_t shift) 5779 { 5780 struct xfs_bmbt_irec got; 5781 int is_empty; 5782 int error = 0; 5783 5784 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5785 5786 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 5787 return -EIO; 5788 5789 xfs_ilock(ip, XFS_ILOCK_EXCL); 5790 error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty); 5791 if (!error && !is_empty && got.br_startoff >= off && 5792 ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff) 5793 error = -EINVAL; 5794 xfs_iunlock(ip, XFS_ILOCK_EXCL); 5795 5796 return error; 5797 } 5798 5799 int 5800 xfs_bmap_insert_extents( 5801 struct xfs_trans *tp, 5802 struct xfs_inode *ip, 5803 xfs_fileoff_t *next_fsb, 5804 xfs_fileoff_t offset_shift_fsb, 5805 bool *done, 5806 xfs_fileoff_t stop_fsb) 5807 { 5808 int whichfork = XFS_DATA_FORK; 5809 struct xfs_mount *mp = ip->i_mount; 5810 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 5811 struct xfs_btree_cur *cur = NULL; 5812 struct xfs_bmbt_irec got, next; 5813 struct xfs_iext_cursor icur; 5814 xfs_fileoff_t new_startoff; 5815 int error = 0; 5816 int logflags = 0; 5817 5818 if (unlikely(XFS_TEST_ERROR( 5819 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5820 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 5821 mp, XFS_ERRTAG_BMAPIFORMAT))) { 5822 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 5823 return -EFSCORRUPTED; 5824 } 5825 5826 if (XFS_FORCED_SHUTDOWN(mp)) 5827 return -EIO; 5828 5829 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL)); 5830 5831 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 5832 error = xfs_iread_extents(tp, ip, whichfork); 5833 if (error) 5834 return error; 5835 } 5836 5837 if (ifp->if_flags & XFS_IFBROOT) { 5838 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5839 cur->bc_private.b.flags = 0; 5840 } 5841 5842 if (*next_fsb == NULLFSBLOCK) { 5843 xfs_iext_last(ifp, &icur); 5844 if (!xfs_iext_get_extent(ifp, &icur, &got) || 5845 stop_fsb > got.br_startoff) { 5846 *done = true; 5847 goto del_cursor; 5848 } 5849 } else { 5850 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) { 5851 *done = true; 5852 goto del_cursor; 5853 } 5854 } 5855 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock), 5856 del_cursor); 5857 5858 if (stop_fsb >= got.br_startoff + got.br_blockcount) { 5859 error = -EIO; 5860 goto del_cursor; 5861 } 5862 5863 new_startoff = got.br_startoff + offset_shift_fsb; 5864 if (xfs_iext_peek_next_extent(ifp, &icur, &next)) { 5865 if (new_startoff + got.br_blockcount > next.br_startoff) { 5866 error = -EINVAL; 5867 goto del_cursor; 5868 } 5869 5870 /* 5871 * Unlike a left shift (which involves a hole punch), a right 5872 * shift does not modify extent neighbors in any way. We should 5873 * never find mergeable extents in this scenario. Check anyways 5874 * and warn if we encounter two extents that could be one. 5875 */ 5876 if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb)) 5877 WARN_ON_ONCE(1); 5878 } 5879 5880 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got, 5881 cur, &logflags, new_startoff); 5882 if (error) 5883 goto del_cursor; 5884 5885 if (!xfs_iext_prev_extent(ifp, &icur, &got) || 5886 stop_fsb >= got.br_startoff + got.br_blockcount) { 5887 *done = true; 5888 goto del_cursor; 5889 } 5890 5891 *next_fsb = got.br_startoff; 5892 del_cursor: 5893 if (cur) 5894 xfs_btree_del_cursor(cur, error); 5895 if (logflags) 5896 xfs_trans_log_inode(tp, ip, logflags); 5897 return error; 5898 } 5899 5900 /* 5901 * Splits an extent into two extents at split_fsb block such that it is the 5902 * first block of the current_ext. @ext is a target extent to be split. 5903 * @split_fsb is a block where the extents is split. If split_fsb lies in a 5904 * hole or the first block of extents, just return 0. 5905 */ 5906 STATIC int 5907 xfs_bmap_split_extent_at( 5908 struct xfs_trans *tp, 5909 struct xfs_inode *ip, 5910 xfs_fileoff_t split_fsb) 5911 { 5912 int whichfork = XFS_DATA_FORK; 5913 struct xfs_btree_cur *cur = NULL; 5914 struct xfs_bmbt_irec got; 5915 struct xfs_bmbt_irec new; /* split extent */ 5916 struct xfs_mount *mp = ip->i_mount; 5917 struct xfs_ifork *ifp; 5918 xfs_fsblock_t gotblkcnt; /* new block count for got */ 5919 struct xfs_iext_cursor icur; 5920 int error = 0; 5921 int logflags = 0; 5922 int i = 0; 5923 5924 if (unlikely(XFS_TEST_ERROR( 5925 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5926 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 5927 mp, XFS_ERRTAG_BMAPIFORMAT))) { 5928 XFS_ERROR_REPORT("xfs_bmap_split_extent_at", 5929 XFS_ERRLEVEL_LOW, mp); 5930 return -EFSCORRUPTED; 5931 } 5932 5933 if (XFS_FORCED_SHUTDOWN(mp)) 5934 return -EIO; 5935 5936 ifp = XFS_IFORK_PTR(ip, whichfork); 5937 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 5938 /* Read in all the extents */ 5939 error = xfs_iread_extents(tp, ip, whichfork); 5940 if (error) 5941 return error; 5942 } 5943 5944 /* 5945 * If there are not extents, or split_fsb lies in a hole we are done. 5946 */ 5947 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) || 5948 got.br_startoff >= split_fsb) 5949 return 0; 5950 5951 gotblkcnt = split_fsb - got.br_startoff; 5952 new.br_startoff = split_fsb; 5953 new.br_startblock = got.br_startblock + gotblkcnt; 5954 new.br_blockcount = got.br_blockcount - gotblkcnt; 5955 new.br_state = got.br_state; 5956 5957 if (ifp->if_flags & XFS_IFBROOT) { 5958 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5959 cur->bc_private.b.flags = 0; 5960 error = xfs_bmbt_lookup_eq(cur, &got, &i); 5961 if (error) 5962 goto del_cursor; 5963 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor); 5964 } 5965 5966 got.br_blockcount = gotblkcnt; 5967 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur, 5968 &got); 5969 5970 logflags = XFS_ILOG_CORE; 5971 if (cur) { 5972 error = xfs_bmbt_update(cur, &got); 5973 if (error) 5974 goto del_cursor; 5975 } else 5976 logflags |= XFS_ILOG_DEXT; 5977 5978 /* Add new extent */ 5979 xfs_iext_next(ifp, &icur); 5980 xfs_iext_insert(ip, &icur, &new, 0); 5981 XFS_IFORK_NEXT_SET(ip, whichfork, 5982 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 5983 5984 if (cur) { 5985 error = xfs_bmbt_lookup_eq(cur, &new, &i); 5986 if (error) 5987 goto del_cursor; 5988 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor); 5989 error = xfs_btree_insert(cur, &i); 5990 if (error) 5991 goto del_cursor; 5992 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor); 5993 } 5994 5995 /* 5996 * Convert to a btree if necessary. 5997 */ 5998 if (xfs_bmap_needs_btree(ip, whichfork)) { 5999 int tmp_logflags; /* partial log flag return val */ 6000 6001 ASSERT(cur == NULL); 6002 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 6003 &tmp_logflags, whichfork); 6004 logflags |= tmp_logflags; 6005 } 6006 6007 del_cursor: 6008 if (cur) { 6009 cur->bc_private.b.allocated = 0; 6010 xfs_btree_del_cursor(cur, error); 6011 } 6012 6013 if (logflags) 6014 xfs_trans_log_inode(tp, ip, logflags); 6015 return error; 6016 } 6017 6018 int 6019 xfs_bmap_split_extent( 6020 struct xfs_inode *ip, 6021 xfs_fileoff_t split_fsb) 6022 { 6023 struct xfs_mount *mp = ip->i_mount; 6024 struct xfs_trans *tp; 6025 int error; 6026 6027 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 6028 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp); 6029 if (error) 6030 return error; 6031 6032 xfs_ilock(ip, XFS_ILOCK_EXCL); 6033 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 6034 6035 error = xfs_bmap_split_extent_at(tp, ip, split_fsb); 6036 if (error) 6037 goto out; 6038 6039 return xfs_trans_commit(tp); 6040 6041 out: 6042 xfs_trans_cancel(tp); 6043 return error; 6044 } 6045 6046 /* Deferred mapping is only for real extents in the data fork. */ 6047 static bool 6048 xfs_bmap_is_update_needed( 6049 struct xfs_bmbt_irec *bmap) 6050 { 6051 return bmap->br_startblock != HOLESTARTBLOCK && 6052 bmap->br_startblock != DELAYSTARTBLOCK; 6053 } 6054 6055 /* Record a bmap intent. */ 6056 static int 6057 __xfs_bmap_add( 6058 struct xfs_trans *tp, 6059 enum xfs_bmap_intent_type type, 6060 struct xfs_inode *ip, 6061 int whichfork, 6062 struct xfs_bmbt_irec *bmap) 6063 { 6064 struct xfs_bmap_intent *bi; 6065 6066 trace_xfs_bmap_defer(tp->t_mountp, 6067 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock), 6068 type, 6069 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock), 6070 ip->i_ino, whichfork, 6071 bmap->br_startoff, 6072 bmap->br_blockcount, 6073 bmap->br_state); 6074 6075 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS); 6076 INIT_LIST_HEAD(&bi->bi_list); 6077 bi->bi_type = type; 6078 bi->bi_owner = ip; 6079 bi->bi_whichfork = whichfork; 6080 bi->bi_bmap = *bmap; 6081 6082 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list); 6083 return 0; 6084 } 6085 6086 /* Map an extent into a file. */ 6087 int 6088 xfs_bmap_map_extent( 6089 struct xfs_trans *tp, 6090 struct xfs_inode *ip, 6091 struct xfs_bmbt_irec *PREV) 6092 { 6093 if (!xfs_bmap_is_update_needed(PREV)) 6094 return 0; 6095 6096 return __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV); 6097 } 6098 6099 /* Unmap an extent out of a file. */ 6100 int 6101 xfs_bmap_unmap_extent( 6102 struct xfs_trans *tp, 6103 struct xfs_inode *ip, 6104 struct xfs_bmbt_irec *PREV) 6105 { 6106 if (!xfs_bmap_is_update_needed(PREV)) 6107 return 0; 6108 6109 return __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV); 6110 } 6111 6112 /* 6113 * Process one of the deferred bmap operations. We pass back the 6114 * btree cursor to maintain our lock on the bmapbt between calls. 6115 */ 6116 int 6117 xfs_bmap_finish_one( 6118 struct xfs_trans *tp, 6119 struct xfs_inode *ip, 6120 enum xfs_bmap_intent_type type, 6121 int whichfork, 6122 xfs_fileoff_t startoff, 6123 xfs_fsblock_t startblock, 6124 xfs_filblks_t *blockcount, 6125 xfs_exntst_t state) 6126 { 6127 int error = 0; 6128 6129 ASSERT(tp->t_firstblock == NULLFSBLOCK); 6130 6131 trace_xfs_bmap_deferred(tp->t_mountp, 6132 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type, 6133 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock), 6134 ip->i_ino, whichfork, startoff, *blockcount, state); 6135 6136 if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK)) 6137 return -EFSCORRUPTED; 6138 6139 if (XFS_TEST_ERROR(false, tp->t_mountp, 6140 XFS_ERRTAG_BMAP_FINISH_ONE)) 6141 return -EIO; 6142 6143 switch (type) { 6144 case XFS_BMAP_MAP: 6145 error = xfs_bmapi_remap(tp, ip, startoff, *blockcount, 6146 startblock, 0); 6147 *blockcount = 0; 6148 break; 6149 case XFS_BMAP_UNMAP: 6150 error = __xfs_bunmapi(tp, ip, startoff, blockcount, 6151 XFS_BMAPI_REMAP, 1); 6152 break; 6153 default: 6154 ASSERT(0); 6155 error = -EFSCORRUPTED; 6156 } 6157 6158 return error; 6159 } 6160 6161 /* Check that an inode's extent does not have invalid flags or bad ranges. */ 6162 xfs_failaddr_t 6163 xfs_bmap_validate_extent( 6164 struct xfs_inode *ip, 6165 int whichfork, 6166 struct xfs_bmbt_irec *irec) 6167 { 6168 struct xfs_mount *mp = ip->i_mount; 6169 xfs_fsblock_t endfsb; 6170 bool isrt; 6171 6172 isrt = XFS_IS_REALTIME_INODE(ip); 6173 endfsb = irec->br_startblock + irec->br_blockcount - 1; 6174 if (isrt) { 6175 if (!xfs_verify_rtbno(mp, irec->br_startblock)) 6176 return __this_address; 6177 if (!xfs_verify_rtbno(mp, endfsb)) 6178 return __this_address; 6179 } else { 6180 if (!xfs_verify_fsbno(mp, irec->br_startblock)) 6181 return __this_address; 6182 if (!xfs_verify_fsbno(mp, endfsb)) 6183 return __this_address; 6184 if (XFS_FSB_TO_AGNO(mp, irec->br_startblock) != 6185 XFS_FSB_TO_AGNO(mp, endfsb)) 6186 return __this_address; 6187 } 6188 if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK) 6189 return __this_address; 6190 return NULL; 6191 } 6192