1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_sb.h" 14 #include "xfs_mount.h" 15 #include "xfs_defer.h" 16 #include "xfs_da_format.h" 17 #include "xfs_da_btree.h" 18 #include "xfs_dir2.h" 19 #include "xfs_inode.h" 20 #include "xfs_btree.h" 21 #include "xfs_trans.h" 22 #include "xfs_inode_item.h" 23 #include "xfs_extfree_item.h" 24 #include "xfs_alloc.h" 25 #include "xfs_bmap.h" 26 #include "xfs_bmap_util.h" 27 #include "xfs_bmap_btree.h" 28 #include "xfs_rtalloc.h" 29 #include "xfs_errortag.h" 30 #include "xfs_error.h" 31 #include "xfs_quota.h" 32 #include "xfs_trans_space.h" 33 #include "xfs_buf_item.h" 34 #include "xfs_trace.h" 35 #include "xfs_symlink.h" 36 #include "xfs_attr_leaf.h" 37 #include "xfs_filestream.h" 38 #include "xfs_rmap.h" 39 #include "xfs_ag_resv.h" 40 #include "xfs_refcount.h" 41 #include "xfs_icache.h" 42 43 44 kmem_zone_t *xfs_bmap_free_item_zone; 45 46 /* 47 * Miscellaneous helper functions 48 */ 49 50 /* 51 * Compute and fill in the value of the maximum depth of a bmap btree 52 * in this filesystem. Done once, during mount. 53 */ 54 void 55 xfs_bmap_compute_maxlevels( 56 xfs_mount_t *mp, /* file system mount structure */ 57 int whichfork) /* data or attr fork */ 58 { 59 int level; /* btree level */ 60 uint maxblocks; /* max blocks at this level */ 61 uint maxleafents; /* max leaf entries possible */ 62 int maxrootrecs; /* max records in root block */ 63 int minleafrecs; /* min records in leaf block */ 64 int minnoderecs; /* min records in node block */ 65 int sz; /* root block size */ 66 67 /* 68 * The maximum number of extents in a file, hence the maximum 69 * number of leaf entries, is controlled by the type of di_nextents 70 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents 71 * (a signed 16-bit number, xfs_aextnum_t). 72 * 73 * Note that we can no longer assume that if we are in ATTR1 that 74 * the fork offset of all the inodes will be 75 * (xfs_default_attroffset(ip) >> 3) because we could have mounted 76 * with ATTR2 and then mounted back with ATTR1, keeping the 77 * di_forkoff's fixed but probably at various positions. Therefore, 78 * for both ATTR1 and ATTR2 we have to assume the worst case scenario 79 * of a minimum size available. 80 */ 81 if (whichfork == XFS_DATA_FORK) { 82 maxleafents = MAXEXTNUM; 83 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS); 84 } else { 85 maxleafents = MAXAEXTNUM; 86 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS); 87 } 88 maxrootrecs = xfs_bmdr_maxrecs(sz, 0); 89 minleafrecs = mp->m_bmap_dmnr[0]; 90 minnoderecs = mp->m_bmap_dmnr[1]; 91 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; 92 for (level = 1; maxblocks > 1; level++) { 93 if (maxblocks <= maxrootrecs) 94 maxblocks = 1; 95 else 96 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs; 97 } 98 mp->m_bm_maxlevels[whichfork] = level; 99 } 100 101 STATIC int /* error */ 102 xfs_bmbt_lookup_eq( 103 struct xfs_btree_cur *cur, 104 struct xfs_bmbt_irec *irec, 105 int *stat) /* success/failure */ 106 { 107 cur->bc_rec.b = *irec; 108 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); 109 } 110 111 STATIC int /* error */ 112 xfs_bmbt_lookup_first( 113 struct xfs_btree_cur *cur, 114 int *stat) /* success/failure */ 115 { 116 cur->bc_rec.b.br_startoff = 0; 117 cur->bc_rec.b.br_startblock = 0; 118 cur->bc_rec.b.br_blockcount = 0; 119 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); 120 } 121 122 /* 123 * Check if the inode needs to be converted to btree format. 124 */ 125 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork) 126 { 127 return whichfork != XFS_COW_FORK && 128 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && 129 XFS_IFORK_NEXTENTS(ip, whichfork) > 130 XFS_IFORK_MAXEXT(ip, whichfork); 131 } 132 133 /* 134 * Check if the inode should be converted to extent format. 135 */ 136 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork) 137 { 138 return whichfork != XFS_COW_FORK && 139 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE && 140 XFS_IFORK_NEXTENTS(ip, whichfork) <= 141 XFS_IFORK_MAXEXT(ip, whichfork); 142 } 143 144 /* 145 * Update the record referred to by cur to the value given by irec 146 * This either works (return 0) or gets an EFSCORRUPTED error. 147 */ 148 STATIC int 149 xfs_bmbt_update( 150 struct xfs_btree_cur *cur, 151 struct xfs_bmbt_irec *irec) 152 { 153 union xfs_btree_rec rec; 154 155 xfs_bmbt_disk_set_all(&rec.bmbt, irec); 156 return xfs_btree_update(cur, &rec); 157 } 158 159 /* 160 * Compute the worst-case number of indirect blocks that will be used 161 * for ip's delayed extent of length "len". 162 */ 163 STATIC xfs_filblks_t 164 xfs_bmap_worst_indlen( 165 xfs_inode_t *ip, /* incore inode pointer */ 166 xfs_filblks_t len) /* delayed extent length */ 167 { 168 int level; /* btree level number */ 169 int maxrecs; /* maximum record count at this level */ 170 xfs_mount_t *mp; /* mount structure */ 171 xfs_filblks_t rval; /* return value */ 172 173 mp = ip->i_mount; 174 maxrecs = mp->m_bmap_dmxr[0]; 175 for (level = 0, rval = 0; 176 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); 177 level++) { 178 len += maxrecs - 1; 179 do_div(len, maxrecs); 180 rval += len; 181 if (len == 1) 182 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - 183 level - 1; 184 if (level == 0) 185 maxrecs = mp->m_bmap_dmxr[1]; 186 } 187 return rval; 188 } 189 190 /* 191 * Calculate the default attribute fork offset for newly created inodes. 192 */ 193 uint 194 xfs_default_attroffset( 195 struct xfs_inode *ip) 196 { 197 struct xfs_mount *mp = ip->i_mount; 198 uint offset; 199 200 if (mp->m_sb.sb_inodesize == 256) { 201 offset = XFS_LITINO(mp, ip->i_d.di_version) - 202 XFS_BMDR_SPACE_CALC(MINABTPTRS); 203 } else { 204 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS); 205 } 206 207 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version)); 208 return offset; 209 } 210 211 /* 212 * Helper routine to reset inode di_forkoff field when switching 213 * attribute fork from local to extent format - we reset it where 214 * possible to make space available for inline data fork extents. 215 */ 216 STATIC void 217 xfs_bmap_forkoff_reset( 218 xfs_inode_t *ip, 219 int whichfork) 220 { 221 if (whichfork == XFS_ATTR_FORK && 222 ip->i_d.di_format != XFS_DINODE_FMT_DEV && 223 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) { 224 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3; 225 226 if (dfl_forkoff > ip->i_d.di_forkoff) 227 ip->i_d.di_forkoff = dfl_forkoff; 228 } 229 } 230 231 #ifdef DEBUG 232 STATIC struct xfs_buf * 233 xfs_bmap_get_bp( 234 struct xfs_btree_cur *cur, 235 xfs_fsblock_t bno) 236 { 237 struct xfs_log_item *lip; 238 int i; 239 240 if (!cur) 241 return NULL; 242 243 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) { 244 if (!cur->bc_bufs[i]) 245 break; 246 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno) 247 return cur->bc_bufs[i]; 248 } 249 250 /* Chase down all the log items to see if the bp is there */ 251 list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) { 252 struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip; 253 254 if (bip->bli_item.li_type == XFS_LI_BUF && 255 XFS_BUF_ADDR(bip->bli_buf) == bno) 256 return bip->bli_buf; 257 } 258 259 return NULL; 260 } 261 262 STATIC void 263 xfs_check_block( 264 struct xfs_btree_block *block, 265 xfs_mount_t *mp, 266 int root, 267 short sz) 268 { 269 int i, j, dmxr; 270 __be64 *pp, *thispa; /* pointer to block address */ 271 xfs_bmbt_key_t *prevp, *keyp; 272 273 ASSERT(be16_to_cpu(block->bb_level) > 0); 274 275 prevp = NULL; 276 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) { 277 dmxr = mp->m_bmap_dmxr[0]; 278 keyp = XFS_BMBT_KEY_ADDR(mp, block, i); 279 280 if (prevp) { 281 ASSERT(be64_to_cpu(prevp->br_startoff) < 282 be64_to_cpu(keyp->br_startoff)); 283 } 284 prevp = keyp; 285 286 /* 287 * Compare the block numbers to see if there are dups. 288 */ 289 if (root) 290 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz); 291 else 292 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr); 293 294 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) { 295 if (root) 296 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz); 297 else 298 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr); 299 if (*thispa == *pp) { 300 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld", 301 __func__, j, i, 302 (unsigned long long)be64_to_cpu(*thispa)); 303 xfs_err(mp, "%s: ptrs are equal in node\n", 304 __func__); 305 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 306 } 307 } 308 } 309 } 310 311 /* 312 * Check that the extents for the inode ip are in the right order in all 313 * btree leaves. THis becomes prohibitively expensive for large extent count 314 * files, so don't bother with inodes that have more than 10,000 extents in 315 * them. The btree record ordering checks will still be done, so for such large 316 * bmapbt constructs that is going to catch most corruptions. 317 */ 318 STATIC void 319 xfs_bmap_check_leaf_extents( 320 xfs_btree_cur_t *cur, /* btree cursor or null */ 321 xfs_inode_t *ip, /* incore inode pointer */ 322 int whichfork) /* data or attr fork */ 323 { 324 struct xfs_btree_block *block; /* current btree block */ 325 xfs_fsblock_t bno; /* block # of "block" */ 326 xfs_buf_t *bp; /* buffer for "block" */ 327 int error; /* error return value */ 328 xfs_extnum_t i=0, j; /* index into the extents list */ 329 struct xfs_ifork *ifp; /* fork structure */ 330 int level; /* btree level, for checking */ 331 xfs_mount_t *mp; /* file system mount structure */ 332 __be64 *pp; /* pointer to block address */ 333 xfs_bmbt_rec_t *ep; /* pointer to current extent */ 334 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */ 335 xfs_bmbt_rec_t *nextp; /* pointer to next extent */ 336 int bp_release = 0; 337 338 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) { 339 return; 340 } 341 342 /* skip large extent count inodes */ 343 if (ip->i_d.di_nextents > 10000) 344 return; 345 346 bno = NULLFSBLOCK; 347 mp = ip->i_mount; 348 ifp = XFS_IFORK_PTR(ip, whichfork); 349 block = ifp->if_broot; 350 /* 351 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 352 */ 353 level = be16_to_cpu(block->bb_level); 354 ASSERT(level > 0); 355 xfs_check_block(block, mp, 1, ifp->if_broot_bytes); 356 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 357 bno = be64_to_cpu(*pp); 358 359 ASSERT(bno != NULLFSBLOCK); 360 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 361 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 362 363 /* 364 * Go down the tree until leaf level is reached, following the first 365 * pointer (leftmost) at each level. 366 */ 367 while (level-- > 0) { 368 /* See if buf is in cur first */ 369 bp_release = 0; 370 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 371 if (!bp) { 372 bp_release = 1; 373 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, 374 XFS_BMAP_BTREE_REF, 375 &xfs_bmbt_buf_ops); 376 if (error) 377 goto error_norelse; 378 } 379 block = XFS_BUF_TO_BLOCK(bp); 380 if (level == 0) 381 break; 382 383 /* 384 * Check this block for basic sanity (increasing keys and 385 * no duplicate blocks). 386 */ 387 388 xfs_check_block(block, mp, 0, 0); 389 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 390 bno = be64_to_cpu(*pp); 391 XFS_WANT_CORRUPTED_GOTO(mp, 392 xfs_verify_fsbno(mp, bno), error0); 393 if (bp_release) { 394 bp_release = 0; 395 xfs_trans_brelse(NULL, bp); 396 } 397 } 398 399 /* 400 * Here with bp and block set to the leftmost leaf node in the tree. 401 */ 402 i = 0; 403 404 /* 405 * Loop over all leaf nodes checking that all extents are in the right order. 406 */ 407 for (;;) { 408 xfs_fsblock_t nextbno; 409 xfs_extnum_t num_recs; 410 411 412 num_recs = xfs_btree_get_numrecs(block); 413 414 /* 415 * Read-ahead the next leaf block, if any. 416 */ 417 418 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 419 420 /* 421 * Check all the extents to make sure they are OK. 422 * If we had a previous block, the last entry should 423 * conform with the first entry in this one. 424 */ 425 426 ep = XFS_BMBT_REC_ADDR(mp, block, 1); 427 if (i) { 428 ASSERT(xfs_bmbt_disk_get_startoff(&last) + 429 xfs_bmbt_disk_get_blockcount(&last) <= 430 xfs_bmbt_disk_get_startoff(ep)); 431 } 432 for (j = 1; j < num_recs; j++) { 433 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1); 434 ASSERT(xfs_bmbt_disk_get_startoff(ep) + 435 xfs_bmbt_disk_get_blockcount(ep) <= 436 xfs_bmbt_disk_get_startoff(nextp)); 437 ep = nextp; 438 } 439 440 last = *ep; 441 i += num_recs; 442 if (bp_release) { 443 bp_release = 0; 444 xfs_trans_brelse(NULL, bp); 445 } 446 bno = nextbno; 447 /* 448 * If we've reached the end, stop. 449 */ 450 if (bno == NULLFSBLOCK) 451 break; 452 453 bp_release = 0; 454 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 455 if (!bp) { 456 bp_release = 1; 457 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, 458 XFS_BMAP_BTREE_REF, 459 &xfs_bmbt_buf_ops); 460 if (error) 461 goto error_norelse; 462 } 463 block = XFS_BUF_TO_BLOCK(bp); 464 } 465 466 return; 467 468 error0: 469 xfs_warn(mp, "%s: at error0", __func__); 470 if (bp_release) 471 xfs_trans_brelse(NULL, bp); 472 error_norelse: 473 xfs_warn(mp, "%s: BAD after btree leaves for %d extents", 474 __func__, i); 475 xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__); 476 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 477 return; 478 } 479 480 /* 481 * Validate that the bmbt_irecs being returned from bmapi are valid 482 * given the caller's original parameters. Specifically check the 483 * ranges of the returned irecs to ensure that they only extend beyond 484 * the given parameters if the XFS_BMAPI_ENTIRE flag was set. 485 */ 486 STATIC void 487 xfs_bmap_validate_ret( 488 xfs_fileoff_t bno, 489 xfs_filblks_t len, 490 int flags, 491 xfs_bmbt_irec_t *mval, 492 int nmap, 493 int ret_nmap) 494 { 495 int i; /* index to map values */ 496 497 ASSERT(ret_nmap <= nmap); 498 499 for (i = 0; i < ret_nmap; i++) { 500 ASSERT(mval[i].br_blockcount > 0); 501 if (!(flags & XFS_BMAPI_ENTIRE)) { 502 ASSERT(mval[i].br_startoff >= bno); 503 ASSERT(mval[i].br_blockcount <= len); 504 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <= 505 bno + len); 506 } else { 507 ASSERT(mval[i].br_startoff < bno + len); 508 ASSERT(mval[i].br_startoff + mval[i].br_blockcount > 509 bno); 510 } 511 ASSERT(i == 0 || 512 mval[i - 1].br_startoff + mval[i - 1].br_blockcount == 513 mval[i].br_startoff); 514 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK && 515 mval[i].br_startblock != HOLESTARTBLOCK); 516 ASSERT(mval[i].br_state == XFS_EXT_NORM || 517 mval[i].br_state == XFS_EXT_UNWRITTEN); 518 } 519 } 520 521 #else 522 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0) 523 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0) 524 #endif /* DEBUG */ 525 526 /* 527 * bmap free list manipulation functions 528 */ 529 530 /* 531 * Add the extent to the list of extents to be free at transaction end. 532 * The list is maintained sorted (by block number). 533 */ 534 void 535 __xfs_bmap_add_free( 536 struct xfs_trans *tp, 537 xfs_fsblock_t bno, 538 xfs_filblks_t len, 539 const struct xfs_owner_info *oinfo, 540 bool skip_discard) 541 { 542 struct xfs_extent_free_item *new; /* new element */ 543 #ifdef DEBUG 544 struct xfs_mount *mp = tp->t_mountp; 545 xfs_agnumber_t agno; 546 xfs_agblock_t agbno; 547 548 ASSERT(bno != NULLFSBLOCK); 549 ASSERT(len > 0); 550 ASSERT(len <= MAXEXTLEN); 551 ASSERT(!isnullstartblock(bno)); 552 agno = XFS_FSB_TO_AGNO(mp, bno); 553 agbno = XFS_FSB_TO_AGBNO(mp, bno); 554 ASSERT(agno < mp->m_sb.sb_agcount); 555 ASSERT(agbno < mp->m_sb.sb_agblocks); 556 ASSERT(len < mp->m_sb.sb_agblocks); 557 ASSERT(agbno + len <= mp->m_sb.sb_agblocks); 558 #endif 559 ASSERT(xfs_bmap_free_item_zone != NULL); 560 561 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP); 562 new->xefi_startblock = bno; 563 new->xefi_blockcount = (xfs_extlen_t)len; 564 if (oinfo) 565 new->xefi_oinfo = *oinfo; 566 else 567 new->xefi_oinfo = XFS_RMAP_OINFO_SKIP_UPDATE; 568 new->xefi_skip_discard = skip_discard; 569 trace_xfs_bmap_free_defer(tp->t_mountp, 570 XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0, 571 XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len); 572 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list); 573 } 574 575 /* 576 * Inode fork format manipulation functions 577 */ 578 579 /* 580 * Convert the inode format to extent format if it currently is in btree format, 581 * but the extent list is small enough that it fits into the extent format. 582 * 583 * Since the extents are already in-core, all we have to do is give up the space 584 * for the btree root and pitch the leaf block. 585 */ 586 STATIC int /* error */ 587 xfs_bmap_btree_to_extents( 588 struct xfs_trans *tp, /* transaction pointer */ 589 struct xfs_inode *ip, /* incore inode pointer */ 590 struct xfs_btree_cur *cur, /* btree cursor */ 591 int *logflagsp, /* inode logging flags */ 592 int whichfork) /* data or attr fork */ 593 { 594 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 595 struct xfs_mount *mp = ip->i_mount; 596 struct xfs_btree_block *rblock = ifp->if_broot; 597 struct xfs_btree_block *cblock;/* child btree block */ 598 xfs_fsblock_t cbno; /* child block number */ 599 xfs_buf_t *cbp; /* child block's buffer */ 600 int error; /* error return value */ 601 __be64 *pp; /* ptr to block address */ 602 struct xfs_owner_info oinfo; 603 604 /* check if we actually need the extent format first: */ 605 if (!xfs_bmap_wants_extents(ip, whichfork)) 606 return 0; 607 608 ASSERT(cur); 609 ASSERT(whichfork != XFS_COW_FORK); 610 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 611 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); 612 ASSERT(be16_to_cpu(rblock->bb_level) == 1); 613 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1); 614 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1); 615 616 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes); 617 cbno = be64_to_cpu(*pp); 618 #ifdef DEBUG 619 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, 620 xfs_btree_check_lptr(cur, cbno, 1)); 621 #endif 622 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF, 623 &xfs_bmbt_buf_ops); 624 if (error) 625 return error; 626 cblock = XFS_BUF_TO_BLOCK(cbp); 627 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp))) 628 return error; 629 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); 630 xfs_bmap_add_free(cur->bc_tp, cbno, 1, &oinfo); 631 ip->i_d.di_nblocks--; 632 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 633 xfs_trans_binval(tp, cbp); 634 if (cur->bc_bufs[0] == cbp) 635 cur->bc_bufs[0] = NULL; 636 xfs_iroot_realloc(ip, -1, whichfork); 637 ASSERT(ifp->if_broot == NULL); 638 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0); 639 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 640 *logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 641 return 0; 642 } 643 644 /* 645 * Convert an extents-format file into a btree-format file. 646 * The new file will have a root block (in the inode) and a single child block. 647 */ 648 STATIC int /* error */ 649 xfs_bmap_extents_to_btree( 650 struct xfs_trans *tp, /* transaction pointer */ 651 struct xfs_inode *ip, /* incore inode pointer */ 652 struct xfs_btree_cur **curp, /* cursor returned to caller */ 653 int wasdel, /* converting a delayed alloc */ 654 int *logflagsp, /* inode logging flags */ 655 int whichfork) /* data or attr fork */ 656 { 657 struct xfs_btree_block *ablock; /* allocated (child) bt block */ 658 struct xfs_buf *abp; /* buffer for ablock */ 659 struct xfs_alloc_arg args; /* allocation arguments */ 660 struct xfs_bmbt_rec *arp; /* child record pointer */ 661 struct xfs_btree_block *block; /* btree root block */ 662 struct xfs_btree_cur *cur; /* bmap btree cursor */ 663 int error; /* error return value */ 664 struct xfs_ifork *ifp; /* inode fork pointer */ 665 struct xfs_bmbt_key *kp; /* root block key pointer */ 666 struct xfs_mount *mp; /* mount structure */ 667 xfs_bmbt_ptr_t *pp; /* root block address pointer */ 668 struct xfs_iext_cursor icur; 669 struct xfs_bmbt_irec rec; 670 xfs_extnum_t cnt = 0; 671 672 mp = ip->i_mount; 673 ASSERT(whichfork != XFS_COW_FORK); 674 ifp = XFS_IFORK_PTR(ip, whichfork); 675 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS); 676 677 /* 678 * Make space in the inode incore. This needs to be undone if we fail 679 * to expand the root. 680 */ 681 xfs_iroot_realloc(ip, 1, whichfork); 682 ifp->if_flags |= XFS_IFBROOT; 683 684 /* 685 * Fill in the root. 686 */ 687 block = ifp->if_broot; 688 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL, 689 XFS_BTNUM_BMAP, 1, 1, ip->i_ino, 690 XFS_BTREE_LONG_PTRS); 691 /* 692 * Need a cursor. Can't allocate until bb_level is filled in. 693 */ 694 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 695 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; 696 /* 697 * Convert to a btree with two levels, one record in root. 698 */ 699 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE); 700 memset(&args, 0, sizeof(args)); 701 args.tp = tp; 702 args.mp = mp; 703 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork); 704 if (tp->t_firstblock == NULLFSBLOCK) { 705 args.type = XFS_ALLOCTYPE_START_BNO; 706 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino); 707 } else if (tp->t_flags & XFS_TRANS_LOWMODE) { 708 args.type = XFS_ALLOCTYPE_START_BNO; 709 args.fsbno = tp->t_firstblock; 710 } else { 711 args.type = XFS_ALLOCTYPE_NEAR_BNO; 712 args.fsbno = tp->t_firstblock; 713 } 714 args.minlen = args.maxlen = args.prod = 1; 715 args.wasdel = wasdel; 716 *logflagsp = 0; 717 error = xfs_alloc_vextent(&args); 718 if (error) 719 goto out_root_realloc; 720 721 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { 722 error = -ENOSPC; 723 goto out_root_realloc; 724 } 725 726 /* 727 * Allocation can't fail, the space was reserved. 728 */ 729 ASSERT(tp->t_firstblock == NULLFSBLOCK || 730 args.agno >= XFS_FSB_TO_AGNO(mp, tp->t_firstblock)); 731 tp->t_firstblock = args.fsbno; 732 cur->bc_private.b.allocated++; 733 ip->i_d.di_nblocks++; 734 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); 735 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0); 736 if (!abp) { 737 error = -EFSCORRUPTED; 738 goto out_unreserve_dquot; 739 } 740 741 /* 742 * Fill in the child block. 743 */ 744 abp->b_ops = &xfs_bmbt_buf_ops; 745 ablock = XFS_BUF_TO_BLOCK(abp); 746 xfs_btree_init_block_int(mp, ablock, abp->b_bn, 747 XFS_BTNUM_BMAP, 0, 0, ip->i_ino, 748 XFS_BTREE_LONG_PTRS); 749 750 for_each_xfs_iext(ifp, &icur, &rec) { 751 if (isnullstartblock(rec.br_startblock)) 752 continue; 753 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt); 754 xfs_bmbt_disk_set_all(arp, &rec); 755 cnt++; 756 } 757 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork)); 758 xfs_btree_set_numrecs(ablock, cnt); 759 760 /* 761 * Fill in the root key and pointer. 762 */ 763 kp = XFS_BMBT_KEY_ADDR(mp, block, 1); 764 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); 765 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp)); 766 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur, 767 be16_to_cpu(block->bb_level))); 768 *pp = cpu_to_be64(args.fsbno); 769 770 /* 771 * Do all this logging at the end so that 772 * the root is at the right level. 773 */ 774 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS); 775 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); 776 ASSERT(*curp == NULL); 777 *curp = cur; 778 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork); 779 return 0; 780 781 out_unreserve_dquot: 782 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 783 out_root_realloc: 784 xfs_iroot_realloc(ip, -1, whichfork); 785 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 786 ASSERT(ifp->if_broot == NULL); 787 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 788 789 return error; 790 } 791 792 /* 793 * Convert a local file to an extents file. 794 * This code is out of bounds for data forks of regular files, 795 * since the file data needs to get logged so things will stay consistent. 796 * (The bmap-level manipulations are ok, though). 797 */ 798 void 799 xfs_bmap_local_to_extents_empty( 800 struct xfs_inode *ip, 801 int whichfork) 802 { 803 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 804 805 ASSERT(whichfork != XFS_COW_FORK); 806 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 807 ASSERT(ifp->if_bytes == 0); 808 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0); 809 810 xfs_bmap_forkoff_reset(ip, whichfork); 811 ifp->if_flags &= ~XFS_IFINLINE; 812 ifp->if_flags |= XFS_IFEXTENTS; 813 ifp->if_u1.if_root = NULL; 814 ifp->if_height = 0; 815 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 816 } 817 818 819 STATIC int /* error */ 820 xfs_bmap_local_to_extents( 821 xfs_trans_t *tp, /* transaction pointer */ 822 xfs_inode_t *ip, /* incore inode pointer */ 823 xfs_extlen_t total, /* total blocks needed by transaction */ 824 int *logflagsp, /* inode logging flags */ 825 int whichfork, 826 void (*init_fn)(struct xfs_trans *tp, 827 struct xfs_buf *bp, 828 struct xfs_inode *ip, 829 struct xfs_ifork *ifp)) 830 { 831 int error = 0; 832 int flags; /* logging flags returned */ 833 struct xfs_ifork *ifp; /* inode fork pointer */ 834 xfs_alloc_arg_t args; /* allocation arguments */ 835 xfs_buf_t *bp; /* buffer for extent block */ 836 struct xfs_bmbt_irec rec; 837 struct xfs_iext_cursor icur; 838 839 /* 840 * We don't want to deal with the case of keeping inode data inline yet. 841 * So sending the data fork of a regular inode is invalid. 842 */ 843 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK)); 844 ifp = XFS_IFORK_PTR(ip, whichfork); 845 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 846 847 if (!ifp->if_bytes) { 848 xfs_bmap_local_to_extents_empty(ip, whichfork); 849 flags = XFS_ILOG_CORE; 850 goto done; 851 } 852 853 flags = 0; 854 error = 0; 855 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS)) == XFS_IFINLINE); 856 memset(&args, 0, sizeof(args)); 857 args.tp = tp; 858 args.mp = ip->i_mount; 859 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0); 860 /* 861 * Allocate a block. We know we need only one, since the 862 * file currently fits in an inode. 863 */ 864 if (tp->t_firstblock == NULLFSBLOCK) { 865 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino); 866 args.type = XFS_ALLOCTYPE_START_BNO; 867 } else { 868 args.fsbno = tp->t_firstblock; 869 args.type = XFS_ALLOCTYPE_NEAR_BNO; 870 } 871 args.total = total; 872 args.minlen = args.maxlen = args.prod = 1; 873 error = xfs_alloc_vextent(&args); 874 if (error) 875 goto done; 876 877 /* Can't fail, the space was reserved. */ 878 ASSERT(args.fsbno != NULLFSBLOCK); 879 ASSERT(args.len == 1); 880 tp->t_firstblock = args.fsbno; 881 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0); 882 883 /* 884 * Initialize the block, copy the data and log the remote buffer. 885 * 886 * The callout is responsible for logging because the remote format 887 * might differ from the local format and thus we don't know how much to 888 * log here. Note that init_fn must also set the buffer log item type 889 * correctly. 890 */ 891 init_fn(tp, bp, ip, ifp); 892 893 /* account for the change in fork size */ 894 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); 895 xfs_bmap_local_to_extents_empty(ip, whichfork); 896 flags |= XFS_ILOG_CORE; 897 898 ifp->if_u1.if_root = NULL; 899 ifp->if_height = 0; 900 901 rec.br_startoff = 0; 902 rec.br_startblock = args.fsbno; 903 rec.br_blockcount = 1; 904 rec.br_state = XFS_EXT_NORM; 905 xfs_iext_first(ifp, &icur); 906 xfs_iext_insert(ip, &icur, &rec, 0); 907 908 XFS_IFORK_NEXT_SET(ip, whichfork, 1); 909 ip->i_d.di_nblocks = 1; 910 xfs_trans_mod_dquot_byino(tp, ip, 911 XFS_TRANS_DQ_BCOUNT, 1L); 912 flags |= xfs_ilog_fext(whichfork); 913 914 done: 915 *logflagsp = flags; 916 return error; 917 } 918 919 /* 920 * Called from xfs_bmap_add_attrfork to handle btree format files. 921 */ 922 STATIC int /* error */ 923 xfs_bmap_add_attrfork_btree( 924 xfs_trans_t *tp, /* transaction pointer */ 925 xfs_inode_t *ip, /* incore inode pointer */ 926 int *flags) /* inode logging flags */ 927 { 928 xfs_btree_cur_t *cur; /* btree cursor */ 929 int error; /* error return value */ 930 xfs_mount_t *mp; /* file system mount struct */ 931 int stat; /* newroot status */ 932 933 mp = ip->i_mount; 934 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip)) 935 *flags |= XFS_ILOG_DBROOT; 936 else { 937 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); 938 error = xfs_bmbt_lookup_first(cur, &stat); 939 if (error) 940 goto error0; 941 /* must be at least one entry */ 942 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0); 943 if ((error = xfs_btree_new_iroot(cur, flags, &stat))) 944 goto error0; 945 if (stat == 0) { 946 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 947 return -ENOSPC; 948 } 949 cur->bc_private.b.allocated = 0; 950 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 951 } 952 return 0; 953 error0: 954 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 955 return error; 956 } 957 958 /* 959 * Called from xfs_bmap_add_attrfork to handle extents format files. 960 */ 961 STATIC int /* error */ 962 xfs_bmap_add_attrfork_extents( 963 struct xfs_trans *tp, /* transaction pointer */ 964 struct xfs_inode *ip, /* incore inode pointer */ 965 int *flags) /* inode logging flags */ 966 { 967 xfs_btree_cur_t *cur; /* bmap btree cursor */ 968 int error; /* error return value */ 969 970 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip)) 971 return 0; 972 cur = NULL; 973 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags, 974 XFS_DATA_FORK); 975 if (cur) { 976 cur->bc_private.b.allocated = 0; 977 xfs_btree_del_cursor(cur, error); 978 } 979 return error; 980 } 981 982 /* 983 * Called from xfs_bmap_add_attrfork to handle local format files. Each 984 * different data fork content type needs a different callout to do the 985 * conversion. Some are basic and only require special block initialisation 986 * callouts for the data formating, others (directories) are so specialised they 987 * handle everything themselves. 988 * 989 * XXX (dgc): investigate whether directory conversion can use the generic 990 * formatting callout. It should be possible - it's just a very complex 991 * formatter. 992 */ 993 STATIC int /* error */ 994 xfs_bmap_add_attrfork_local( 995 struct xfs_trans *tp, /* transaction pointer */ 996 struct xfs_inode *ip, /* incore inode pointer */ 997 int *flags) /* inode logging flags */ 998 { 999 struct xfs_da_args dargs; /* args for dir/attr code */ 1000 1001 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip)) 1002 return 0; 1003 1004 if (S_ISDIR(VFS_I(ip)->i_mode)) { 1005 memset(&dargs, 0, sizeof(dargs)); 1006 dargs.geo = ip->i_mount->m_dir_geo; 1007 dargs.dp = ip; 1008 dargs.total = dargs.geo->fsbcount; 1009 dargs.whichfork = XFS_DATA_FORK; 1010 dargs.trans = tp; 1011 return xfs_dir2_sf_to_block(&dargs); 1012 } 1013 1014 if (S_ISLNK(VFS_I(ip)->i_mode)) 1015 return xfs_bmap_local_to_extents(tp, ip, 1, flags, 1016 XFS_DATA_FORK, 1017 xfs_symlink_local_to_remote); 1018 1019 /* should only be called for types that support local format data */ 1020 ASSERT(0); 1021 return -EFSCORRUPTED; 1022 } 1023 1024 /* Set an inode attr fork off based on the format */ 1025 int 1026 xfs_bmap_set_attrforkoff( 1027 struct xfs_inode *ip, 1028 int size, 1029 int *version) 1030 { 1031 switch (ip->i_d.di_format) { 1032 case XFS_DINODE_FMT_DEV: 1033 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; 1034 break; 1035 case XFS_DINODE_FMT_LOCAL: 1036 case XFS_DINODE_FMT_EXTENTS: 1037 case XFS_DINODE_FMT_BTREE: 1038 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size); 1039 if (!ip->i_d.di_forkoff) 1040 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3; 1041 else if ((ip->i_mount->m_flags & XFS_MOUNT_ATTR2) && version) 1042 *version = 2; 1043 break; 1044 default: 1045 ASSERT(0); 1046 return -EINVAL; 1047 } 1048 1049 return 0; 1050 } 1051 1052 /* 1053 * Convert inode from non-attributed to attributed. 1054 * Must not be in a transaction, ip must not be locked. 1055 */ 1056 int /* error code */ 1057 xfs_bmap_add_attrfork( 1058 xfs_inode_t *ip, /* incore inode pointer */ 1059 int size, /* space new attribute needs */ 1060 int rsvd) /* xact may use reserved blks */ 1061 { 1062 xfs_mount_t *mp; /* mount structure */ 1063 xfs_trans_t *tp; /* transaction pointer */ 1064 int blks; /* space reservation */ 1065 int version = 1; /* superblock attr version */ 1066 int logflags; /* logging flags */ 1067 int error; /* error return value */ 1068 1069 ASSERT(XFS_IFORK_Q(ip) == 0); 1070 1071 mp = ip->i_mount; 1072 ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); 1073 1074 blks = XFS_ADDAFORK_SPACE_RES(mp); 1075 1076 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0, 1077 rsvd ? XFS_TRANS_RESERVE : 0, &tp); 1078 if (error) 1079 return error; 1080 1081 xfs_ilock(ip, XFS_ILOCK_EXCL); 1082 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ? 1083 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : 1084 XFS_QMOPT_RES_REGBLKS); 1085 if (error) 1086 goto trans_cancel; 1087 if (XFS_IFORK_Q(ip)) 1088 goto trans_cancel; 1089 if (ip->i_d.di_anextents != 0) { 1090 error = -EFSCORRUPTED; 1091 goto trans_cancel; 1092 } 1093 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) { 1094 /* 1095 * For inodes coming from pre-6.2 filesystems. 1096 */ 1097 ASSERT(ip->i_d.di_aformat == 0); 1098 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 1099 } 1100 1101 xfs_trans_ijoin(tp, ip, 0); 1102 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1103 error = xfs_bmap_set_attrforkoff(ip, size, &version); 1104 if (error) 1105 goto trans_cancel; 1106 ASSERT(ip->i_afp == NULL); 1107 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); 1108 ip->i_afp->if_flags = XFS_IFEXTENTS; 1109 logflags = 0; 1110 switch (ip->i_d.di_format) { 1111 case XFS_DINODE_FMT_LOCAL: 1112 error = xfs_bmap_add_attrfork_local(tp, ip, &logflags); 1113 break; 1114 case XFS_DINODE_FMT_EXTENTS: 1115 error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags); 1116 break; 1117 case XFS_DINODE_FMT_BTREE: 1118 error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags); 1119 break; 1120 default: 1121 error = 0; 1122 break; 1123 } 1124 if (logflags) 1125 xfs_trans_log_inode(tp, ip, logflags); 1126 if (error) 1127 goto trans_cancel; 1128 if (!xfs_sb_version_hasattr(&mp->m_sb) || 1129 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) { 1130 bool log_sb = false; 1131 1132 spin_lock(&mp->m_sb_lock); 1133 if (!xfs_sb_version_hasattr(&mp->m_sb)) { 1134 xfs_sb_version_addattr(&mp->m_sb); 1135 log_sb = true; 1136 } 1137 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) { 1138 xfs_sb_version_addattr2(&mp->m_sb); 1139 log_sb = true; 1140 } 1141 spin_unlock(&mp->m_sb_lock); 1142 if (log_sb) 1143 xfs_log_sb(tp); 1144 } 1145 1146 error = xfs_trans_commit(tp); 1147 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1148 return error; 1149 1150 trans_cancel: 1151 xfs_trans_cancel(tp); 1152 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1153 return error; 1154 } 1155 1156 /* 1157 * Internal and external extent tree search functions. 1158 */ 1159 1160 /* 1161 * Read in extents from a btree-format inode. 1162 */ 1163 int 1164 xfs_iread_extents( 1165 struct xfs_trans *tp, 1166 struct xfs_inode *ip, 1167 int whichfork) 1168 { 1169 struct xfs_mount *mp = ip->i_mount; 1170 int state = xfs_bmap_fork_to_state(whichfork); 1171 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1172 xfs_extnum_t nextents = XFS_IFORK_NEXTENTS(ip, whichfork); 1173 struct xfs_btree_block *block = ifp->if_broot; 1174 struct xfs_iext_cursor icur; 1175 struct xfs_bmbt_irec new; 1176 xfs_fsblock_t bno; 1177 struct xfs_buf *bp; 1178 xfs_extnum_t i, j; 1179 int level; 1180 __be64 *pp; 1181 int error; 1182 1183 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1184 1185 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 1186 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 1187 return -EFSCORRUPTED; 1188 } 1189 1190 /* 1191 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 1192 */ 1193 level = be16_to_cpu(block->bb_level); 1194 if (unlikely(level == 0)) { 1195 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 1196 return -EFSCORRUPTED; 1197 } 1198 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 1199 bno = be64_to_cpu(*pp); 1200 1201 /* 1202 * Go down the tree until leaf level is reached, following the first 1203 * pointer (leftmost) at each level. 1204 */ 1205 while (level-- > 0) { 1206 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 1207 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); 1208 if (error) 1209 goto out; 1210 block = XFS_BUF_TO_BLOCK(bp); 1211 if (level == 0) 1212 break; 1213 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 1214 bno = be64_to_cpu(*pp); 1215 XFS_WANT_CORRUPTED_GOTO(mp, 1216 xfs_verify_fsbno(mp, bno), out_brelse); 1217 xfs_trans_brelse(tp, bp); 1218 } 1219 1220 /* 1221 * Here with bp and block set to the leftmost leaf node in the tree. 1222 */ 1223 i = 0; 1224 xfs_iext_first(ifp, &icur); 1225 1226 /* 1227 * Loop over all leaf nodes. Copy information to the extent records. 1228 */ 1229 for (;;) { 1230 xfs_bmbt_rec_t *frp; 1231 xfs_fsblock_t nextbno; 1232 xfs_extnum_t num_recs; 1233 1234 num_recs = xfs_btree_get_numrecs(block); 1235 if (unlikely(i + num_recs > nextents)) { 1236 xfs_warn(ip->i_mount, 1237 "corrupt dinode %Lu, (btree extents).", 1238 (unsigned long long) ip->i_ino); 1239 xfs_inode_verifier_error(ip, -EFSCORRUPTED, 1240 __func__, block, sizeof(*block), 1241 __this_address); 1242 error = -EFSCORRUPTED; 1243 goto out_brelse; 1244 } 1245 /* 1246 * Read-ahead the next leaf block, if any. 1247 */ 1248 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 1249 if (nextbno != NULLFSBLOCK) 1250 xfs_btree_reada_bufl(mp, nextbno, 1, 1251 &xfs_bmbt_buf_ops); 1252 /* 1253 * Copy records into the extent records. 1254 */ 1255 frp = XFS_BMBT_REC_ADDR(mp, block, 1); 1256 for (j = 0; j < num_recs; j++, frp++, i++) { 1257 xfs_failaddr_t fa; 1258 1259 xfs_bmbt_disk_get_all(frp, &new); 1260 fa = xfs_bmap_validate_extent(ip, whichfork, &new); 1261 if (fa) { 1262 error = -EFSCORRUPTED; 1263 xfs_inode_verifier_error(ip, error, 1264 "xfs_iread_extents(2)", 1265 frp, sizeof(*frp), fa); 1266 goto out_brelse; 1267 } 1268 xfs_iext_insert(ip, &icur, &new, state); 1269 trace_xfs_read_extent(ip, &icur, state, _THIS_IP_); 1270 xfs_iext_next(ifp, &icur); 1271 } 1272 xfs_trans_brelse(tp, bp); 1273 bno = nextbno; 1274 /* 1275 * If we've reached the end, stop. 1276 */ 1277 if (bno == NULLFSBLOCK) 1278 break; 1279 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 1280 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); 1281 if (error) 1282 goto out; 1283 block = XFS_BUF_TO_BLOCK(bp); 1284 } 1285 1286 if (i != XFS_IFORK_NEXTENTS(ip, whichfork)) { 1287 error = -EFSCORRUPTED; 1288 goto out; 1289 } 1290 ASSERT(i == xfs_iext_count(ifp)); 1291 1292 ifp->if_flags |= XFS_IFEXTENTS; 1293 return 0; 1294 1295 out_brelse: 1296 xfs_trans_brelse(tp, bp); 1297 out: 1298 xfs_iext_destroy(ifp); 1299 return error; 1300 } 1301 1302 /* 1303 * Returns the relative block number of the first unused block(s) in the given 1304 * fork with at least "len" logically contiguous blocks free. This is the 1305 * lowest-address hole if the fork has holes, else the first block past the end 1306 * of fork. Return 0 if the fork is currently local (in-inode). 1307 */ 1308 int /* error */ 1309 xfs_bmap_first_unused( 1310 struct xfs_trans *tp, /* transaction pointer */ 1311 struct xfs_inode *ip, /* incore inode */ 1312 xfs_extlen_t len, /* size of hole to find */ 1313 xfs_fileoff_t *first_unused, /* unused block */ 1314 int whichfork) /* data or attr fork */ 1315 { 1316 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1317 struct xfs_bmbt_irec got; 1318 struct xfs_iext_cursor icur; 1319 xfs_fileoff_t lastaddr = 0; 1320 xfs_fileoff_t lowest, max; 1321 int error; 1322 1323 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE || 1324 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS || 1325 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 1326 1327 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { 1328 *first_unused = 0; 1329 return 0; 1330 } 1331 1332 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1333 error = xfs_iread_extents(tp, ip, whichfork); 1334 if (error) 1335 return error; 1336 } 1337 1338 lowest = max = *first_unused; 1339 for_each_xfs_iext(ifp, &icur, &got) { 1340 /* 1341 * See if the hole before this extent will work. 1342 */ 1343 if (got.br_startoff >= lowest + len && 1344 got.br_startoff - max >= len) 1345 break; 1346 lastaddr = got.br_startoff + got.br_blockcount; 1347 max = XFS_FILEOFF_MAX(lastaddr, lowest); 1348 } 1349 1350 *first_unused = max; 1351 return 0; 1352 } 1353 1354 /* 1355 * Returns the file-relative block number of the last block - 1 before 1356 * last_block (input value) in the file. 1357 * This is not based on i_size, it is based on the extent records. 1358 * Returns 0 for local files, as they do not have extent records. 1359 */ 1360 int /* error */ 1361 xfs_bmap_last_before( 1362 struct xfs_trans *tp, /* transaction pointer */ 1363 struct xfs_inode *ip, /* incore inode */ 1364 xfs_fileoff_t *last_block, /* last block */ 1365 int whichfork) /* data or attr fork */ 1366 { 1367 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1368 struct xfs_bmbt_irec got; 1369 struct xfs_iext_cursor icur; 1370 int error; 1371 1372 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 1373 case XFS_DINODE_FMT_LOCAL: 1374 *last_block = 0; 1375 return 0; 1376 case XFS_DINODE_FMT_BTREE: 1377 case XFS_DINODE_FMT_EXTENTS: 1378 break; 1379 default: 1380 return -EIO; 1381 } 1382 1383 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1384 error = xfs_iread_extents(tp, ip, whichfork); 1385 if (error) 1386 return error; 1387 } 1388 1389 if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got)) 1390 *last_block = 0; 1391 return 0; 1392 } 1393 1394 int 1395 xfs_bmap_last_extent( 1396 struct xfs_trans *tp, 1397 struct xfs_inode *ip, 1398 int whichfork, 1399 struct xfs_bmbt_irec *rec, 1400 int *is_empty) 1401 { 1402 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1403 struct xfs_iext_cursor icur; 1404 int error; 1405 1406 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1407 error = xfs_iread_extents(tp, ip, whichfork); 1408 if (error) 1409 return error; 1410 } 1411 1412 xfs_iext_last(ifp, &icur); 1413 if (!xfs_iext_get_extent(ifp, &icur, rec)) 1414 *is_empty = 1; 1415 else 1416 *is_empty = 0; 1417 return 0; 1418 } 1419 1420 /* 1421 * Check the last inode extent to determine whether this allocation will result 1422 * in blocks being allocated at the end of the file. When we allocate new data 1423 * blocks at the end of the file which do not start at the previous data block, 1424 * we will try to align the new blocks at stripe unit boundaries. 1425 * 1426 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be 1427 * at, or past the EOF. 1428 */ 1429 STATIC int 1430 xfs_bmap_isaeof( 1431 struct xfs_bmalloca *bma, 1432 int whichfork) 1433 { 1434 struct xfs_bmbt_irec rec; 1435 int is_empty; 1436 int error; 1437 1438 bma->aeof = false; 1439 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, 1440 &is_empty); 1441 if (error) 1442 return error; 1443 1444 if (is_empty) { 1445 bma->aeof = true; 1446 return 0; 1447 } 1448 1449 /* 1450 * Check if we are allocation or past the last extent, or at least into 1451 * the last delayed allocated extent. 1452 */ 1453 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount || 1454 (bma->offset >= rec.br_startoff && 1455 isnullstartblock(rec.br_startblock)); 1456 return 0; 1457 } 1458 1459 /* 1460 * Returns the file-relative block number of the first block past eof in 1461 * the file. This is not based on i_size, it is based on the extent records. 1462 * Returns 0 for local files, as they do not have extent records. 1463 */ 1464 int 1465 xfs_bmap_last_offset( 1466 struct xfs_inode *ip, 1467 xfs_fileoff_t *last_block, 1468 int whichfork) 1469 { 1470 struct xfs_bmbt_irec rec; 1471 int is_empty; 1472 int error; 1473 1474 *last_block = 0; 1475 1476 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) 1477 return 0; 1478 1479 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && 1480 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 1481 return -EIO; 1482 1483 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty); 1484 if (error || is_empty) 1485 return error; 1486 1487 *last_block = rec.br_startoff + rec.br_blockcount; 1488 return 0; 1489 } 1490 1491 /* 1492 * Returns whether the selected fork of the inode has exactly one 1493 * block or not. For the data fork we check this matches di_size, 1494 * implying the file's range is 0..bsize-1. 1495 */ 1496 int /* 1=>1 block, 0=>otherwise */ 1497 xfs_bmap_one_block( 1498 xfs_inode_t *ip, /* incore inode */ 1499 int whichfork) /* data or attr fork */ 1500 { 1501 struct xfs_ifork *ifp; /* inode fork pointer */ 1502 int rval; /* return value */ 1503 xfs_bmbt_irec_t s; /* internal version of extent */ 1504 struct xfs_iext_cursor icur; 1505 1506 #ifndef DEBUG 1507 if (whichfork == XFS_DATA_FORK) 1508 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize; 1509 #endif /* !DEBUG */ 1510 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1) 1511 return 0; 1512 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 1513 return 0; 1514 ifp = XFS_IFORK_PTR(ip, whichfork); 1515 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 1516 xfs_iext_first(ifp, &icur); 1517 xfs_iext_get_extent(ifp, &icur, &s); 1518 rval = s.br_startoff == 0 && s.br_blockcount == 1; 1519 if (rval && whichfork == XFS_DATA_FORK) 1520 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize); 1521 return rval; 1522 } 1523 1524 /* 1525 * Extent tree manipulation functions used during allocation. 1526 */ 1527 1528 /* 1529 * Convert a delayed allocation to a real allocation. 1530 */ 1531 STATIC int /* error */ 1532 xfs_bmap_add_extent_delay_real( 1533 struct xfs_bmalloca *bma, 1534 int whichfork) 1535 { 1536 struct xfs_bmbt_irec *new = &bma->got; 1537 int error; /* error return value */ 1538 int i; /* temp state */ 1539 struct xfs_ifork *ifp; /* inode fork pointer */ 1540 xfs_fileoff_t new_endoff; /* end offset of new entry */ 1541 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 1542 /* left is 0, right is 1, prev is 2 */ 1543 int rval=0; /* return value (logging flags) */ 1544 int state = xfs_bmap_fork_to_state(whichfork); 1545 xfs_filblks_t da_new; /* new count del alloc blocks used */ 1546 xfs_filblks_t da_old; /* old count del alloc blocks used */ 1547 xfs_filblks_t temp=0; /* value for da_new calculations */ 1548 int tmp_rval; /* partial logging flags */ 1549 struct xfs_mount *mp; 1550 xfs_extnum_t *nextents; 1551 struct xfs_bmbt_irec old; 1552 1553 mp = bma->ip->i_mount; 1554 ifp = XFS_IFORK_PTR(bma->ip, whichfork); 1555 ASSERT(whichfork != XFS_ATTR_FORK); 1556 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents : 1557 &bma->ip->i_d.di_nextents); 1558 1559 ASSERT(!isnullstartblock(new->br_startblock)); 1560 ASSERT(!bma->cur || 1561 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); 1562 1563 XFS_STATS_INC(mp, xs_add_exlist); 1564 1565 #define LEFT r[0] 1566 #define RIGHT r[1] 1567 #define PREV r[2] 1568 1569 /* 1570 * Set up a bunch of variables to make the tests simpler. 1571 */ 1572 xfs_iext_get_extent(ifp, &bma->icur, &PREV); 1573 new_endoff = new->br_startoff + new->br_blockcount; 1574 ASSERT(isnullstartblock(PREV.br_startblock)); 1575 ASSERT(PREV.br_startoff <= new->br_startoff); 1576 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 1577 1578 da_old = startblockval(PREV.br_startblock); 1579 da_new = 0; 1580 1581 /* 1582 * Set flags determining what part of the previous delayed allocation 1583 * extent is being replaced by a real allocation. 1584 */ 1585 if (PREV.br_startoff == new->br_startoff) 1586 state |= BMAP_LEFT_FILLING; 1587 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 1588 state |= BMAP_RIGHT_FILLING; 1589 1590 /* 1591 * Check and set flags if this segment has a left neighbor. 1592 * Don't set contiguous if the combined extent would be too large. 1593 */ 1594 if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) { 1595 state |= BMAP_LEFT_VALID; 1596 if (isnullstartblock(LEFT.br_startblock)) 1597 state |= BMAP_LEFT_DELAY; 1598 } 1599 1600 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 1601 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 1602 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 1603 LEFT.br_state == new->br_state && 1604 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) 1605 state |= BMAP_LEFT_CONTIG; 1606 1607 /* 1608 * Check and set flags if this segment has a right neighbor. 1609 * Don't set contiguous if the combined extent would be too large. 1610 * Also check for all-three-contiguous being too large. 1611 */ 1612 if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) { 1613 state |= BMAP_RIGHT_VALID; 1614 if (isnullstartblock(RIGHT.br_startblock)) 1615 state |= BMAP_RIGHT_DELAY; 1616 } 1617 1618 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 1619 new_endoff == RIGHT.br_startoff && 1620 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 1621 new->br_state == RIGHT.br_state && 1622 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 1623 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1624 BMAP_RIGHT_FILLING)) != 1625 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1626 BMAP_RIGHT_FILLING) || 1627 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 1628 <= MAXEXTLEN)) 1629 state |= BMAP_RIGHT_CONTIG; 1630 1631 error = 0; 1632 /* 1633 * Switch out based on the FILLING and CONTIG state bits. 1634 */ 1635 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1636 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 1637 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1638 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1639 /* 1640 * Filling in all of a previously delayed allocation extent. 1641 * The left and right neighbors are both contiguous with new. 1642 */ 1643 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount; 1644 1645 xfs_iext_remove(bma->ip, &bma->icur, state); 1646 xfs_iext_remove(bma->ip, &bma->icur, state); 1647 xfs_iext_prev(ifp, &bma->icur); 1648 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1649 (*nextents)--; 1650 1651 if (bma->cur == NULL) 1652 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1653 else { 1654 rval = XFS_ILOG_CORE; 1655 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i); 1656 if (error) 1657 goto done; 1658 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1659 error = xfs_btree_delete(bma->cur, &i); 1660 if (error) 1661 goto done; 1662 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1663 error = xfs_btree_decrement(bma->cur, 0, &i); 1664 if (error) 1665 goto done; 1666 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1667 error = xfs_bmbt_update(bma->cur, &LEFT); 1668 if (error) 1669 goto done; 1670 } 1671 break; 1672 1673 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1674 /* 1675 * Filling in all of a previously delayed allocation extent. 1676 * The left neighbor is contiguous, the right is not. 1677 */ 1678 old = LEFT; 1679 LEFT.br_blockcount += PREV.br_blockcount; 1680 1681 xfs_iext_remove(bma->ip, &bma->icur, state); 1682 xfs_iext_prev(ifp, &bma->icur); 1683 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1684 1685 if (bma->cur == NULL) 1686 rval = XFS_ILOG_DEXT; 1687 else { 1688 rval = 0; 1689 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1690 if (error) 1691 goto done; 1692 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1693 error = xfs_bmbt_update(bma->cur, &LEFT); 1694 if (error) 1695 goto done; 1696 } 1697 break; 1698 1699 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1700 /* 1701 * Filling in all of a previously delayed allocation extent. 1702 * The right neighbor is contiguous, the left is not. Take care 1703 * with delay -> unwritten extent allocation here because the 1704 * delalloc record we are overwriting is always written. 1705 */ 1706 PREV.br_startblock = new->br_startblock; 1707 PREV.br_blockcount += RIGHT.br_blockcount; 1708 PREV.br_state = new->br_state; 1709 1710 xfs_iext_next(ifp, &bma->icur); 1711 xfs_iext_remove(bma->ip, &bma->icur, state); 1712 xfs_iext_prev(ifp, &bma->icur); 1713 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1714 1715 if (bma->cur == NULL) 1716 rval = XFS_ILOG_DEXT; 1717 else { 1718 rval = 0; 1719 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i); 1720 if (error) 1721 goto done; 1722 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1723 error = xfs_bmbt_update(bma->cur, &PREV); 1724 if (error) 1725 goto done; 1726 } 1727 break; 1728 1729 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 1730 /* 1731 * Filling in all of a previously delayed allocation extent. 1732 * Neither the left nor right neighbors are contiguous with 1733 * the new one. 1734 */ 1735 PREV.br_startblock = new->br_startblock; 1736 PREV.br_state = new->br_state; 1737 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1738 1739 (*nextents)++; 1740 if (bma->cur == NULL) 1741 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1742 else { 1743 rval = XFS_ILOG_CORE; 1744 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1745 if (error) 1746 goto done; 1747 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1748 error = xfs_btree_insert(bma->cur, &i); 1749 if (error) 1750 goto done; 1751 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1752 } 1753 break; 1754 1755 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 1756 /* 1757 * Filling in the first part of a previous delayed allocation. 1758 * The left neighbor is contiguous. 1759 */ 1760 old = LEFT; 1761 temp = PREV.br_blockcount - new->br_blockcount; 1762 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1763 startblockval(PREV.br_startblock)); 1764 1765 LEFT.br_blockcount += new->br_blockcount; 1766 1767 PREV.br_blockcount = temp; 1768 PREV.br_startoff += new->br_blockcount; 1769 PREV.br_startblock = nullstartblock(da_new); 1770 1771 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1772 xfs_iext_prev(ifp, &bma->icur); 1773 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1774 1775 if (bma->cur == NULL) 1776 rval = XFS_ILOG_DEXT; 1777 else { 1778 rval = 0; 1779 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1780 if (error) 1781 goto done; 1782 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1783 error = xfs_bmbt_update(bma->cur, &LEFT); 1784 if (error) 1785 goto done; 1786 } 1787 break; 1788 1789 case BMAP_LEFT_FILLING: 1790 /* 1791 * Filling in the first part of a previous delayed allocation. 1792 * The left neighbor is not contiguous. 1793 */ 1794 xfs_iext_update_extent(bma->ip, state, &bma->icur, new); 1795 (*nextents)++; 1796 if (bma->cur == NULL) 1797 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1798 else { 1799 rval = XFS_ILOG_CORE; 1800 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1801 if (error) 1802 goto done; 1803 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1804 error = xfs_btree_insert(bma->cur, &i); 1805 if (error) 1806 goto done; 1807 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1808 } 1809 1810 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1811 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1812 &bma->cur, 1, &tmp_rval, whichfork); 1813 rval |= tmp_rval; 1814 if (error) 1815 goto done; 1816 } 1817 1818 temp = PREV.br_blockcount - new->br_blockcount; 1819 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1820 startblockval(PREV.br_startblock) - 1821 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 1822 1823 PREV.br_startoff = new_endoff; 1824 PREV.br_blockcount = temp; 1825 PREV.br_startblock = nullstartblock(da_new); 1826 xfs_iext_next(ifp, &bma->icur); 1827 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); 1828 xfs_iext_prev(ifp, &bma->icur); 1829 break; 1830 1831 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1832 /* 1833 * Filling in the last part of a previous delayed allocation. 1834 * The right neighbor is contiguous with the new allocation. 1835 */ 1836 old = RIGHT; 1837 RIGHT.br_startoff = new->br_startoff; 1838 RIGHT.br_startblock = new->br_startblock; 1839 RIGHT.br_blockcount += new->br_blockcount; 1840 1841 if (bma->cur == NULL) 1842 rval = XFS_ILOG_DEXT; 1843 else { 1844 rval = 0; 1845 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1846 if (error) 1847 goto done; 1848 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1849 error = xfs_bmbt_update(bma->cur, &RIGHT); 1850 if (error) 1851 goto done; 1852 } 1853 1854 temp = PREV.br_blockcount - new->br_blockcount; 1855 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1856 startblockval(PREV.br_startblock)); 1857 1858 PREV.br_blockcount = temp; 1859 PREV.br_startblock = nullstartblock(da_new); 1860 1861 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1862 xfs_iext_next(ifp, &bma->icur); 1863 xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT); 1864 break; 1865 1866 case BMAP_RIGHT_FILLING: 1867 /* 1868 * Filling in the last part of a previous delayed allocation. 1869 * The right neighbor is not contiguous. 1870 */ 1871 xfs_iext_update_extent(bma->ip, state, &bma->icur, new); 1872 (*nextents)++; 1873 if (bma->cur == NULL) 1874 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1875 else { 1876 rval = XFS_ILOG_CORE; 1877 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1878 if (error) 1879 goto done; 1880 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1881 error = xfs_btree_insert(bma->cur, &i); 1882 if (error) 1883 goto done; 1884 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1885 } 1886 1887 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1888 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1889 &bma->cur, 1, &tmp_rval, whichfork); 1890 rval |= tmp_rval; 1891 if (error) 1892 goto done; 1893 } 1894 1895 temp = PREV.br_blockcount - new->br_blockcount; 1896 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1897 startblockval(PREV.br_startblock) - 1898 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 1899 1900 PREV.br_startblock = nullstartblock(da_new); 1901 PREV.br_blockcount = temp; 1902 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); 1903 xfs_iext_next(ifp, &bma->icur); 1904 break; 1905 1906 case 0: 1907 /* 1908 * Filling in the middle part of a previous delayed allocation. 1909 * Contiguity is impossible here. 1910 * This case is avoided almost all the time. 1911 * 1912 * We start with a delayed allocation: 1913 * 1914 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+ 1915 * PREV @ idx 1916 * 1917 * and we are allocating: 1918 * +rrrrrrrrrrrrrrrrr+ 1919 * new 1920 * 1921 * and we set it up for insertion as: 1922 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+ 1923 * new 1924 * PREV @ idx LEFT RIGHT 1925 * inserted at idx + 1 1926 */ 1927 old = PREV; 1928 1929 /* LEFT is the new middle */ 1930 LEFT = *new; 1931 1932 /* RIGHT is the new right */ 1933 RIGHT.br_state = PREV.br_state; 1934 RIGHT.br_startoff = new_endoff; 1935 RIGHT.br_blockcount = 1936 PREV.br_startoff + PREV.br_blockcount - new_endoff; 1937 RIGHT.br_startblock = 1938 nullstartblock(xfs_bmap_worst_indlen(bma->ip, 1939 RIGHT.br_blockcount)); 1940 1941 /* truncate PREV */ 1942 PREV.br_blockcount = new->br_startoff - PREV.br_startoff; 1943 PREV.br_startblock = 1944 nullstartblock(xfs_bmap_worst_indlen(bma->ip, 1945 PREV.br_blockcount)); 1946 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1947 1948 xfs_iext_next(ifp, &bma->icur); 1949 xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state); 1950 xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state); 1951 (*nextents)++; 1952 1953 if (bma->cur == NULL) 1954 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1955 else { 1956 rval = XFS_ILOG_CORE; 1957 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1958 if (error) 1959 goto done; 1960 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1961 error = xfs_btree_insert(bma->cur, &i); 1962 if (error) 1963 goto done; 1964 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1965 } 1966 1967 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1968 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1969 &bma->cur, 1, &tmp_rval, whichfork); 1970 rval |= tmp_rval; 1971 if (error) 1972 goto done; 1973 } 1974 1975 da_new = startblockval(PREV.br_startblock) + 1976 startblockval(RIGHT.br_startblock); 1977 break; 1978 1979 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1980 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1981 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 1982 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1983 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1984 case BMAP_LEFT_CONTIG: 1985 case BMAP_RIGHT_CONTIG: 1986 /* 1987 * These cases are all impossible. 1988 */ 1989 ASSERT(0); 1990 } 1991 1992 /* add reverse mapping unless caller opted out */ 1993 if (!(bma->flags & XFS_BMAPI_NORMAP)) { 1994 error = xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new); 1995 if (error) 1996 goto done; 1997 } 1998 1999 /* convert to a btree if necessary */ 2000 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 2001 int tmp_logflags; /* partial log flag return val */ 2002 2003 ASSERT(bma->cur == NULL); 2004 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 2005 &bma->cur, da_old > 0, &tmp_logflags, 2006 whichfork); 2007 bma->logflags |= tmp_logflags; 2008 if (error) 2009 goto done; 2010 } 2011 2012 if (bma->cur) { 2013 da_new += bma->cur->bc_private.b.allocated; 2014 bma->cur->bc_private.b.allocated = 0; 2015 } 2016 2017 /* adjust for changes in reserved delayed indirect blocks */ 2018 if (da_new != da_old) { 2019 ASSERT(state == 0 || da_new < da_old); 2020 error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), 2021 false); 2022 } 2023 2024 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); 2025 done: 2026 if (whichfork != XFS_COW_FORK) 2027 bma->logflags |= rval; 2028 return error; 2029 #undef LEFT 2030 #undef RIGHT 2031 #undef PREV 2032 } 2033 2034 /* 2035 * Convert an unwritten allocation to a real allocation or vice versa. 2036 */ 2037 int /* error */ 2038 xfs_bmap_add_extent_unwritten_real( 2039 struct xfs_trans *tp, 2040 xfs_inode_t *ip, /* incore inode pointer */ 2041 int whichfork, 2042 struct xfs_iext_cursor *icur, 2043 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 2044 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 2045 int *logflagsp) /* inode logging flags */ 2046 { 2047 xfs_btree_cur_t *cur; /* btree cursor */ 2048 int error; /* error return value */ 2049 int i; /* temp state */ 2050 struct xfs_ifork *ifp; /* inode fork pointer */ 2051 xfs_fileoff_t new_endoff; /* end offset of new entry */ 2052 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 2053 /* left is 0, right is 1, prev is 2 */ 2054 int rval=0; /* return value (logging flags) */ 2055 int state = xfs_bmap_fork_to_state(whichfork); 2056 struct xfs_mount *mp = ip->i_mount; 2057 struct xfs_bmbt_irec old; 2058 2059 *logflagsp = 0; 2060 2061 cur = *curp; 2062 ifp = XFS_IFORK_PTR(ip, whichfork); 2063 2064 ASSERT(!isnullstartblock(new->br_startblock)); 2065 2066 XFS_STATS_INC(mp, xs_add_exlist); 2067 2068 #define LEFT r[0] 2069 #define RIGHT r[1] 2070 #define PREV r[2] 2071 2072 /* 2073 * Set up a bunch of variables to make the tests simpler. 2074 */ 2075 error = 0; 2076 xfs_iext_get_extent(ifp, icur, &PREV); 2077 ASSERT(new->br_state != PREV.br_state); 2078 new_endoff = new->br_startoff + new->br_blockcount; 2079 ASSERT(PREV.br_startoff <= new->br_startoff); 2080 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 2081 2082 /* 2083 * Set flags determining what part of the previous oldext allocation 2084 * extent is being replaced by a newext allocation. 2085 */ 2086 if (PREV.br_startoff == new->br_startoff) 2087 state |= BMAP_LEFT_FILLING; 2088 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 2089 state |= BMAP_RIGHT_FILLING; 2090 2091 /* 2092 * Check and set flags if this segment has a left neighbor. 2093 * Don't set contiguous if the combined extent would be too large. 2094 */ 2095 if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) { 2096 state |= BMAP_LEFT_VALID; 2097 if (isnullstartblock(LEFT.br_startblock)) 2098 state |= BMAP_LEFT_DELAY; 2099 } 2100 2101 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2102 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 2103 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 2104 LEFT.br_state == new->br_state && 2105 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2106 state |= BMAP_LEFT_CONTIG; 2107 2108 /* 2109 * Check and set flags if this segment has a right neighbor. 2110 * Don't set contiguous if the combined extent would be too large. 2111 * Also check for all-three-contiguous being too large. 2112 */ 2113 if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) { 2114 state |= BMAP_RIGHT_VALID; 2115 if (isnullstartblock(RIGHT.br_startblock)) 2116 state |= BMAP_RIGHT_DELAY; 2117 } 2118 2119 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2120 new_endoff == RIGHT.br_startoff && 2121 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 2122 new->br_state == RIGHT.br_state && 2123 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 2124 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2125 BMAP_RIGHT_FILLING)) != 2126 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2127 BMAP_RIGHT_FILLING) || 2128 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 2129 <= MAXEXTLEN)) 2130 state |= BMAP_RIGHT_CONTIG; 2131 2132 /* 2133 * Switch out based on the FILLING and CONTIG state bits. 2134 */ 2135 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2136 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 2137 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2138 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2139 /* 2140 * Setting all of a previous oldext extent to newext. 2141 * The left and right neighbors are both contiguous with new. 2142 */ 2143 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount; 2144 2145 xfs_iext_remove(ip, icur, state); 2146 xfs_iext_remove(ip, icur, state); 2147 xfs_iext_prev(ifp, icur); 2148 xfs_iext_update_extent(ip, state, icur, &LEFT); 2149 XFS_IFORK_NEXT_SET(ip, whichfork, 2150 XFS_IFORK_NEXTENTS(ip, whichfork) - 2); 2151 if (cur == NULL) 2152 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2153 else { 2154 rval = XFS_ILOG_CORE; 2155 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i); 2156 if (error) 2157 goto done; 2158 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2159 if ((error = xfs_btree_delete(cur, &i))) 2160 goto done; 2161 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2162 if ((error = xfs_btree_decrement(cur, 0, &i))) 2163 goto done; 2164 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2165 if ((error = xfs_btree_delete(cur, &i))) 2166 goto done; 2167 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2168 if ((error = xfs_btree_decrement(cur, 0, &i))) 2169 goto done; 2170 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2171 error = xfs_bmbt_update(cur, &LEFT); 2172 if (error) 2173 goto done; 2174 } 2175 break; 2176 2177 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2178 /* 2179 * Setting all of a previous oldext extent to newext. 2180 * The left neighbor is contiguous, the right is not. 2181 */ 2182 LEFT.br_blockcount += PREV.br_blockcount; 2183 2184 xfs_iext_remove(ip, icur, state); 2185 xfs_iext_prev(ifp, icur); 2186 xfs_iext_update_extent(ip, state, icur, &LEFT); 2187 XFS_IFORK_NEXT_SET(ip, whichfork, 2188 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2189 if (cur == NULL) 2190 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2191 else { 2192 rval = XFS_ILOG_CORE; 2193 error = xfs_bmbt_lookup_eq(cur, &PREV, &i); 2194 if (error) 2195 goto done; 2196 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2197 if ((error = xfs_btree_delete(cur, &i))) 2198 goto done; 2199 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2200 if ((error = xfs_btree_decrement(cur, 0, &i))) 2201 goto done; 2202 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2203 error = xfs_bmbt_update(cur, &LEFT); 2204 if (error) 2205 goto done; 2206 } 2207 break; 2208 2209 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2210 /* 2211 * Setting all of a previous oldext extent to newext. 2212 * The right neighbor is contiguous, the left is not. 2213 */ 2214 PREV.br_blockcount += RIGHT.br_blockcount; 2215 PREV.br_state = new->br_state; 2216 2217 xfs_iext_next(ifp, icur); 2218 xfs_iext_remove(ip, icur, state); 2219 xfs_iext_prev(ifp, icur); 2220 xfs_iext_update_extent(ip, state, icur, &PREV); 2221 2222 XFS_IFORK_NEXT_SET(ip, whichfork, 2223 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2224 if (cur == NULL) 2225 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2226 else { 2227 rval = XFS_ILOG_CORE; 2228 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i); 2229 if (error) 2230 goto done; 2231 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2232 if ((error = xfs_btree_delete(cur, &i))) 2233 goto done; 2234 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2235 if ((error = xfs_btree_decrement(cur, 0, &i))) 2236 goto done; 2237 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2238 error = xfs_bmbt_update(cur, &PREV); 2239 if (error) 2240 goto done; 2241 } 2242 break; 2243 2244 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 2245 /* 2246 * Setting all of a previous oldext extent to newext. 2247 * Neither the left nor right neighbors are contiguous with 2248 * the new one. 2249 */ 2250 PREV.br_state = new->br_state; 2251 xfs_iext_update_extent(ip, state, icur, &PREV); 2252 2253 if (cur == NULL) 2254 rval = XFS_ILOG_DEXT; 2255 else { 2256 rval = 0; 2257 error = xfs_bmbt_lookup_eq(cur, new, &i); 2258 if (error) 2259 goto done; 2260 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2261 error = xfs_bmbt_update(cur, &PREV); 2262 if (error) 2263 goto done; 2264 } 2265 break; 2266 2267 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 2268 /* 2269 * Setting the first part of a previous oldext extent to newext. 2270 * The left neighbor is contiguous. 2271 */ 2272 LEFT.br_blockcount += new->br_blockcount; 2273 2274 old = PREV; 2275 PREV.br_startoff += new->br_blockcount; 2276 PREV.br_startblock += new->br_blockcount; 2277 PREV.br_blockcount -= new->br_blockcount; 2278 2279 xfs_iext_update_extent(ip, state, icur, &PREV); 2280 xfs_iext_prev(ifp, icur); 2281 xfs_iext_update_extent(ip, state, icur, &LEFT); 2282 2283 if (cur == NULL) 2284 rval = XFS_ILOG_DEXT; 2285 else { 2286 rval = 0; 2287 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2288 if (error) 2289 goto done; 2290 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2291 error = xfs_bmbt_update(cur, &PREV); 2292 if (error) 2293 goto done; 2294 error = xfs_btree_decrement(cur, 0, &i); 2295 if (error) 2296 goto done; 2297 error = xfs_bmbt_update(cur, &LEFT); 2298 if (error) 2299 goto done; 2300 } 2301 break; 2302 2303 case BMAP_LEFT_FILLING: 2304 /* 2305 * Setting the first part of a previous oldext extent to newext. 2306 * The left neighbor is not contiguous. 2307 */ 2308 old = PREV; 2309 PREV.br_startoff += new->br_blockcount; 2310 PREV.br_startblock += new->br_blockcount; 2311 PREV.br_blockcount -= new->br_blockcount; 2312 2313 xfs_iext_update_extent(ip, state, icur, &PREV); 2314 xfs_iext_insert(ip, icur, new, state); 2315 XFS_IFORK_NEXT_SET(ip, whichfork, 2316 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2317 if (cur == NULL) 2318 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2319 else { 2320 rval = XFS_ILOG_CORE; 2321 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2322 if (error) 2323 goto done; 2324 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2325 error = xfs_bmbt_update(cur, &PREV); 2326 if (error) 2327 goto done; 2328 cur->bc_rec.b = *new; 2329 if ((error = xfs_btree_insert(cur, &i))) 2330 goto done; 2331 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2332 } 2333 break; 2334 2335 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2336 /* 2337 * Setting the last part of a previous oldext extent to newext. 2338 * The right neighbor is contiguous with the new allocation. 2339 */ 2340 old = PREV; 2341 PREV.br_blockcount -= new->br_blockcount; 2342 2343 RIGHT.br_startoff = new->br_startoff; 2344 RIGHT.br_startblock = new->br_startblock; 2345 RIGHT.br_blockcount += new->br_blockcount; 2346 2347 xfs_iext_update_extent(ip, state, icur, &PREV); 2348 xfs_iext_next(ifp, icur); 2349 xfs_iext_update_extent(ip, state, icur, &RIGHT); 2350 2351 if (cur == NULL) 2352 rval = XFS_ILOG_DEXT; 2353 else { 2354 rval = 0; 2355 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2356 if (error) 2357 goto done; 2358 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2359 error = xfs_bmbt_update(cur, &PREV); 2360 if (error) 2361 goto done; 2362 error = xfs_btree_increment(cur, 0, &i); 2363 if (error) 2364 goto done; 2365 error = xfs_bmbt_update(cur, &RIGHT); 2366 if (error) 2367 goto done; 2368 } 2369 break; 2370 2371 case BMAP_RIGHT_FILLING: 2372 /* 2373 * Setting the last part of a previous oldext extent to newext. 2374 * The right neighbor is not contiguous. 2375 */ 2376 old = PREV; 2377 PREV.br_blockcount -= new->br_blockcount; 2378 2379 xfs_iext_update_extent(ip, state, icur, &PREV); 2380 xfs_iext_next(ifp, icur); 2381 xfs_iext_insert(ip, icur, new, state); 2382 2383 XFS_IFORK_NEXT_SET(ip, whichfork, 2384 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2385 if (cur == NULL) 2386 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2387 else { 2388 rval = XFS_ILOG_CORE; 2389 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2390 if (error) 2391 goto done; 2392 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2393 error = xfs_bmbt_update(cur, &PREV); 2394 if (error) 2395 goto done; 2396 error = xfs_bmbt_lookup_eq(cur, new, &i); 2397 if (error) 2398 goto done; 2399 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2400 if ((error = xfs_btree_insert(cur, &i))) 2401 goto done; 2402 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2403 } 2404 break; 2405 2406 case 0: 2407 /* 2408 * Setting the middle part of a previous oldext extent to 2409 * newext. Contiguity is impossible here. 2410 * One extent becomes three extents. 2411 */ 2412 old = PREV; 2413 PREV.br_blockcount = new->br_startoff - PREV.br_startoff; 2414 2415 r[0] = *new; 2416 r[1].br_startoff = new_endoff; 2417 r[1].br_blockcount = 2418 old.br_startoff + old.br_blockcount - new_endoff; 2419 r[1].br_startblock = new->br_startblock + new->br_blockcount; 2420 r[1].br_state = PREV.br_state; 2421 2422 xfs_iext_update_extent(ip, state, icur, &PREV); 2423 xfs_iext_next(ifp, icur); 2424 xfs_iext_insert(ip, icur, &r[1], state); 2425 xfs_iext_insert(ip, icur, &r[0], state); 2426 2427 XFS_IFORK_NEXT_SET(ip, whichfork, 2428 XFS_IFORK_NEXTENTS(ip, whichfork) + 2); 2429 if (cur == NULL) 2430 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2431 else { 2432 rval = XFS_ILOG_CORE; 2433 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2434 if (error) 2435 goto done; 2436 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2437 /* new right extent - oldext */ 2438 error = xfs_bmbt_update(cur, &r[1]); 2439 if (error) 2440 goto done; 2441 /* new left extent - oldext */ 2442 cur->bc_rec.b = PREV; 2443 if ((error = xfs_btree_insert(cur, &i))) 2444 goto done; 2445 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2446 /* 2447 * Reset the cursor to the position of the new extent 2448 * we are about to insert as we can't trust it after 2449 * the previous insert. 2450 */ 2451 error = xfs_bmbt_lookup_eq(cur, new, &i); 2452 if (error) 2453 goto done; 2454 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2455 /* new middle extent - newext */ 2456 if ((error = xfs_btree_insert(cur, &i))) 2457 goto done; 2458 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2459 } 2460 break; 2461 2462 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2463 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2464 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 2465 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2466 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2467 case BMAP_LEFT_CONTIG: 2468 case BMAP_RIGHT_CONTIG: 2469 /* 2470 * These cases are all impossible. 2471 */ 2472 ASSERT(0); 2473 } 2474 2475 /* update reverse mappings */ 2476 error = xfs_rmap_convert_extent(mp, tp, ip, whichfork, new); 2477 if (error) 2478 goto done; 2479 2480 /* convert to a btree if necessary */ 2481 if (xfs_bmap_needs_btree(ip, whichfork)) { 2482 int tmp_logflags; /* partial log flag return val */ 2483 2484 ASSERT(cur == NULL); 2485 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 2486 &tmp_logflags, whichfork); 2487 *logflagsp |= tmp_logflags; 2488 if (error) 2489 goto done; 2490 } 2491 2492 /* clear out the allocated field, done with it now in any case. */ 2493 if (cur) { 2494 cur->bc_private.b.allocated = 0; 2495 *curp = cur; 2496 } 2497 2498 xfs_bmap_check_leaf_extents(*curp, ip, whichfork); 2499 done: 2500 *logflagsp |= rval; 2501 return error; 2502 #undef LEFT 2503 #undef RIGHT 2504 #undef PREV 2505 } 2506 2507 /* 2508 * Convert a hole to a delayed allocation. 2509 */ 2510 STATIC void 2511 xfs_bmap_add_extent_hole_delay( 2512 xfs_inode_t *ip, /* incore inode pointer */ 2513 int whichfork, 2514 struct xfs_iext_cursor *icur, 2515 xfs_bmbt_irec_t *new) /* new data to add to file extents */ 2516 { 2517 struct xfs_ifork *ifp; /* inode fork pointer */ 2518 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2519 xfs_filblks_t newlen=0; /* new indirect size */ 2520 xfs_filblks_t oldlen=0; /* old indirect size */ 2521 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2522 int state = xfs_bmap_fork_to_state(whichfork); 2523 xfs_filblks_t temp; /* temp for indirect calculations */ 2524 2525 ifp = XFS_IFORK_PTR(ip, whichfork); 2526 ASSERT(isnullstartblock(new->br_startblock)); 2527 2528 /* 2529 * Check and set flags if this segment has a left neighbor 2530 */ 2531 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { 2532 state |= BMAP_LEFT_VALID; 2533 if (isnullstartblock(left.br_startblock)) 2534 state |= BMAP_LEFT_DELAY; 2535 } 2536 2537 /* 2538 * Check and set flags if the current (right) segment exists. 2539 * If it doesn't exist, we're converting the hole at end-of-file. 2540 */ 2541 if (xfs_iext_get_extent(ifp, icur, &right)) { 2542 state |= BMAP_RIGHT_VALID; 2543 if (isnullstartblock(right.br_startblock)) 2544 state |= BMAP_RIGHT_DELAY; 2545 } 2546 2547 /* 2548 * Set contiguity flags on the left and right neighbors. 2549 * Don't let extents get too large, even if the pieces are contiguous. 2550 */ 2551 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) && 2552 left.br_startoff + left.br_blockcount == new->br_startoff && 2553 left.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2554 state |= BMAP_LEFT_CONTIG; 2555 2556 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) && 2557 new->br_startoff + new->br_blockcount == right.br_startoff && 2558 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 2559 (!(state & BMAP_LEFT_CONTIG) || 2560 (left.br_blockcount + new->br_blockcount + 2561 right.br_blockcount <= MAXEXTLEN))) 2562 state |= BMAP_RIGHT_CONTIG; 2563 2564 /* 2565 * Switch out based on the contiguity flags. 2566 */ 2567 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2568 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2569 /* 2570 * New allocation is contiguous with delayed allocations 2571 * on the left and on the right. 2572 * Merge all three into a single extent record. 2573 */ 2574 temp = left.br_blockcount + new->br_blockcount + 2575 right.br_blockcount; 2576 2577 oldlen = startblockval(left.br_startblock) + 2578 startblockval(new->br_startblock) + 2579 startblockval(right.br_startblock); 2580 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2581 oldlen); 2582 left.br_startblock = nullstartblock(newlen); 2583 left.br_blockcount = temp; 2584 2585 xfs_iext_remove(ip, icur, state); 2586 xfs_iext_prev(ifp, icur); 2587 xfs_iext_update_extent(ip, state, icur, &left); 2588 break; 2589 2590 case BMAP_LEFT_CONTIG: 2591 /* 2592 * New allocation is contiguous with a delayed allocation 2593 * on the left. 2594 * Merge the new allocation with the left neighbor. 2595 */ 2596 temp = left.br_blockcount + new->br_blockcount; 2597 2598 oldlen = startblockval(left.br_startblock) + 2599 startblockval(new->br_startblock); 2600 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2601 oldlen); 2602 left.br_blockcount = temp; 2603 left.br_startblock = nullstartblock(newlen); 2604 2605 xfs_iext_prev(ifp, icur); 2606 xfs_iext_update_extent(ip, state, icur, &left); 2607 break; 2608 2609 case BMAP_RIGHT_CONTIG: 2610 /* 2611 * New allocation is contiguous with a delayed allocation 2612 * on the right. 2613 * Merge the new allocation with the right neighbor. 2614 */ 2615 temp = new->br_blockcount + right.br_blockcount; 2616 oldlen = startblockval(new->br_startblock) + 2617 startblockval(right.br_startblock); 2618 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2619 oldlen); 2620 right.br_startoff = new->br_startoff; 2621 right.br_startblock = nullstartblock(newlen); 2622 right.br_blockcount = temp; 2623 xfs_iext_update_extent(ip, state, icur, &right); 2624 break; 2625 2626 case 0: 2627 /* 2628 * New allocation is not contiguous with another 2629 * delayed allocation. 2630 * Insert a new entry. 2631 */ 2632 oldlen = newlen = 0; 2633 xfs_iext_insert(ip, icur, new, state); 2634 break; 2635 } 2636 if (oldlen != newlen) { 2637 ASSERT(oldlen > newlen); 2638 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen), 2639 false); 2640 /* 2641 * Nothing to do for disk quota accounting here. 2642 */ 2643 } 2644 } 2645 2646 /* 2647 * Convert a hole to a real allocation. 2648 */ 2649 STATIC int /* error */ 2650 xfs_bmap_add_extent_hole_real( 2651 struct xfs_trans *tp, 2652 struct xfs_inode *ip, 2653 int whichfork, 2654 struct xfs_iext_cursor *icur, 2655 struct xfs_btree_cur **curp, 2656 struct xfs_bmbt_irec *new, 2657 int *logflagsp, 2658 int flags) 2659 { 2660 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 2661 struct xfs_mount *mp = ip->i_mount; 2662 struct xfs_btree_cur *cur = *curp; 2663 int error; /* error return value */ 2664 int i; /* temp state */ 2665 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2666 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2667 int rval=0; /* return value (logging flags) */ 2668 int state = xfs_bmap_fork_to_state(whichfork); 2669 struct xfs_bmbt_irec old; 2670 2671 ASSERT(!isnullstartblock(new->br_startblock)); 2672 ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); 2673 2674 XFS_STATS_INC(mp, xs_add_exlist); 2675 2676 /* 2677 * Check and set flags if this segment has a left neighbor. 2678 */ 2679 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { 2680 state |= BMAP_LEFT_VALID; 2681 if (isnullstartblock(left.br_startblock)) 2682 state |= BMAP_LEFT_DELAY; 2683 } 2684 2685 /* 2686 * Check and set flags if this segment has a current value. 2687 * Not true if we're inserting into the "hole" at eof. 2688 */ 2689 if (xfs_iext_get_extent(ifp, icur, &right)) { 2690 state |= BMAP_RIGHT_VALID; 2691 if (isnullstartblock(right.br_startblock)) 2692 state |= BMAP_RIGHT_DELAY; 2693 } 2694 2695 /* 2696 * We're inserting a real allocation between "left" and "right". 2697 * Set the contiguity flags. Don't let extents get too large. 2698 */ 2699 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2700 left.br_startoff + left.br_blockcount == new->br_startoff && 2701 left.br_startblock + left.br_blockcount == new->br_startblock && 2702 left.br_state == new->br_state && 2703 left.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2704 state |= BMAP_LEFT_CONTIG; 2705 2706 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2707 new->br_startoff + new->br_blockcount == right.br_startoff && 2708 new->br_startblock + new->br_blockcount == right.br_startblock && 2709 new->br_state == right.br_state && 2710 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 2711 (!(state & BMAP_LEFT_CONTIG) || 2712 left.br_blockcount + new->br_blockcount + 2713 right.br_blockcount <= MAXEXTLEN)) 2714 state |= BMAP_RIGHT_CONTIG; 2715 2716 error = 0; 2717 /* 2718 * Select which case we're in here, and implement it. 2719 */ 2720 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2721 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2722 /* 2723 * New allocation is contiguous with real allocations on the 2724 * left and on the right. 2725 * Merge all three into a single extent record. 2726 */ 2727 left.br_blockcount += new->br_blockcount + right.br_blockcount; 2728 2729 xfs_iext_remove(ip, icur, state); 2730 xfs_iext_prev(ifp, icur); 2731 xfs_iext_update_extent(ip, state, icur, &left); 2732 2733 XFS_IFORK_NEXT_SET(ip, whichfork, 2734 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2735 if (cur == NULL) { 2736 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 2737 } else { 2738 rval = XFS_ILOG_CORE; 2739 error = xfs_bmbt_lookup_eq(cur, &right, &i); 2740 if (error) 2741 goto done; 2742 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2743 error = xfs_btree_delete(cur, &i); 2744 if (error) 2745 goto done; 2746 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2747 error = xfs_btree_decrement(cur, 0, &i); 2748 if (error) 2749 goto done; 2750 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2751 error = xfs_bmbt_update(cur, &left); 2752 if (error) 2753 goto done; 2754 } 2755 break; 2756 2757 case BMAP_LEFT_CONTIG: 2758 /* 2759 * New allocation is contiguous with a real allocation 2760 * on the left. 2761 * Merge the new allocation with the left neighbor. 2762 */ 2763 old = left; 2764 left.br_blockcount += new->br_blockcount; 2765 2766 xfs_iext_prev(ifp, icur); 2767 xfs_iext_update_extent(ip, state, icur, &left); 2768 2769 if (cur == NULL) { 2770 rval = xfs_ilog_fext(whichfork); 2771 } else { 2772 rval = 0; 2773 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2774 if (error) 2775 goto done; 2776 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2777 error = xfs_bmbt_update(cur, &left); 2778 if (error) 2779 goto done; 2780 } 2781 break; 2782 2783 case BMAP_RIGHT_CONTIG: 2784 /* 2785 * New allocation is contiguous with a real allocation 2786 * on the right. 2787 * Merge the new allocation with the right neighbor. 2788 */ 2789 old = right; 2790 2791 right.br_startoff = new->br_startoff; 2792 right.br_startblock = new->br_startblock; 2793 right.br_blockcount += new->br_blockcount; 2794 xfs_iext_update_extent(ip, state, icur, &right); 2795 2796 if (cur == NULL) { 2797 rval = xfs_ilog_fext(whichfork); 2798 } else { 2799 rval = 0; 2800 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2801 if (error) 2802 goto done; 2803 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2804 error = xfs_bmbt_update(cur, &right); 2805 if (error) 2806 goto done; 2807 } 2808 break; 2809 2810 case 0: 2811 /* 2812 * New allocation is not contiguous with another 2813 * real allocation. 2814 * Insert a new entry. 2815 */ 2816 xfs_iext_insert(ip, icur, new, state); 2817 XFS_IFORK_NEXT_SET(ip, whichfork, 2818 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2819 if (cur == NULL) { 2820 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 2821 } else { 2822 rval = XFS_ILOG_CORE; 2823 error = xfs_bmbt_lookup_eq(cur, new, &i); 2824 if (error) 2825 goto done; 2826 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2827 error = xfs_btree_insert(cur, &i); 2828 if (error) 2829 goto done; 2830 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2831 } 2832 break; 2833 } 2834 2835 /* add reverse mapping unless caller opted out */ 2836 if (!(flags & XFS_BMAPI_NORMAP)) { 2837 error = xfs_rmap_map_extent(tp, ip, whichfork, new); 2838 if (error) 2839 goto done; 2840 } 2841 2842 /* convert to a btree if necessary */ 2843 if (xfs_bmap_needs_btree(ip, whichfork)) { 2844 int tmp_logflags; /* partial log flag return val */ 2845 2846 ASSERT(cur == NULL); 2847 error = xfs_bmap_extents_to_btree(tp, ip, curp, 0, 2848 &tmp_logflags, whichfork); 2849 *logflagsp |= tmp_logflags; 2850 cur = *curp; 2851 if (error) 2852 goto done; 2853 } 2854 2855 /* clear out the allocated field, done with it now in any case. */ 2856 if (cur) 2857 cur->bc_private.b.allocated = 0; 2858 2859 xfs_bmap_check_leaf_extents(cur, ip, whichfork); 2860 done: 2861 *logflagsp |= rval; 2862 return error; 2863 } 2864 2865 /* 2866 * Functions used in the extent read, allocate and remove paths 2867 */ 2868 2869 /* 2870 * Adjust the size of the new extent based on di_extsize and rt extsize. 2871 */ 2872 int 2873 xfs_bmap_extsize_align( 2874 xfs_mount_t *mp, 2875 xfs_bmbt_irec_t *gotp, /* next extent pointer */ 2876 xfs_bmbt_irec_t *prevp, /* previous extent pointer */ 2877 xfs_extlen_t extsz, /* align to this extent size */ 2878 int rt, /* is this a realtime inode? */ 2879 int eof, /* is extent at end-of-file? */ 2880 int delay, /* creating delalloc extent? */ 2881 int convert, /* overwriting unwritten extent? */ 2882 xfs_fileoff_t *offp, /* in/out: aligned offset */ 2883 xfs_extlen_t *lenp) /* in/out: aligned length */ 2884 { 2885 xfs_fileoff_t orig_off; /* original offset */ 2886 xfs_extlen_t orig_alen; /* original length */ 2887 xfs_fileoff_t orig_end; /* original off+len */ 2888 xfs_fileoff_t nexto; /* next file offset */ 2889 xfs_fileoff_t prevo; /* previous file offset */ 2890 xfs_fileoff_t align_off; /* temp for offset */ 2891 xfs_extlen_t align_alen; /* temp for length */ 2892 xfs_extlen_t temp; /* temp for calculations */ 2893 2894 if (convert) 2895 return 0; 2896 2897 orig_off = align_off = *offp; 2898 orig_alen = align_alen = *lenp; 2899 orig_end = orig_off + orig_alen; 2900 2901 /* 2902 * If this request overlaps an existing extent, then don't 2903 * attempt to perform any additional alignment. 2904 */ 2905 if (!delay && !eof && 2906 (orig_off >= gotp->br_startoff) && 2907 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) { 2908 return 0; 2909 } 2910 2911 /* 2912 * If the file offset is unaligned vs. the extent size 2913 * we need to align it. This will be possible unless 2914 * the file was previously written with a kernel that didn't 2915 * perform this alignment, or if a truncate shot us in the 2916 * foot. 2917 */ 2918 div_u64_rem(orig_off, extsz, &temp); 2919 if (temp) { 2920 align_alen += temp; 2921 align_off -= temp; 2922 } 2923 2924 /* Same adjustment for the end of the requested area. */ 2925 temp = (align_alen % extsz); 2926 if (temp) 2927 align_alen += extsz - temp; 2928 2929 /* 2930 * For large extent hint sizes, the aligned extent might be larger than 2931 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls 2932 * the length back under MAXEXTLEN. The outer allocation loops handle 2933 * short allocation just fine, so it is safe to do this. We only want to 2934 * do it when we are forced to, though, because it means more allocation 2935 * operations are required. 2936 */ 2937 while (align_alen > MAXEXTLEN) 2938 align_alen -= extsz; 2939 ASSERT(align_alen <= MAXEXTLEN); 2940 2941 /* 2942 * If the previous block overlaps with this proposed allocation 2943 * then move the start forward without adjusting the length. 2944 */ 2945 if (prevp->br_startoff != NULLFILEOFF) { 2946 if (prevp->br_startblock == HOLESTARTBLOCK) 2947 prevo = prevp->br_startoff; 2948 else 2949 prevo = prevp->br_startoff + prevp->br_blockcount; 2950 } else 2951 prevo = 0; 2952 if (align_off != orig_off && align_off < prevo) 2953 align_off = prevo; 2954 /* 2955 * If the next block overlaps with this proposed allocation 2956 * then move the start back without adjusting the length, 2957 * but not before offset 0. 2958 * This may of course make the start overlap previous block, 2959 * and if we hit the offset 0 limit then the next block 2960 * can still overlap too. 2961 */ 2962 if (!eof && gotp->br_startoff != NULLFILEOFF) { 2963 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) || 2964 (!delay && gotp->br_startblock == DELAYSTARTBLOCK)) 2965 nexto = gotp->br_startoff + gotp->br_blockcount; 2966 else 2967 nexto = gotp->br_startoff; 2968 } else 2969 nexto = NULLFILEOFF; 2970 if (!eof && 2971 align_off + align_alen != orig_end && 2972 align_off + align_alen > nexto) 2973 align_off = nexto > align_alen ? nexto - align_alen : 0; 2974 /* 2975 * If we're now overlapping the next or previous extent that 2976 * means we can't fit an extsz piece in this hole. Just move 2977 * the start forward to the first valid spot and set 2978 * the length so we hit the end. 2979 */ 2980 if (align_off != orig_off && align_off < prevo) 2981 align_off = prevo; 2982 if (align_off + align_alen != orig_end && 2983 align_off + align_alen > nexto && 2984 nexto != NULLFILEOFF) { 2985 ASSERT(nexto > prevo); 2986 align_alen = nexto - align_off; 2987 } 2988 2989 /* 2990 * If realtime, and the result isn't a multiple of the realtime 2991 * extent size we need to remove blocks until it is. 2992 */ 2993 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) { 2994 /* 2995 * We're not covering the original request, or 2996 * we won't be able to once we fix the length. 2997 */ 2998 if (orig_off < align_off || 2999 orig_end > align_off + align_alen || 3000 align_alen - temp < orig_alen) 3001 return -EINVAL; 3002 /* 3003 * Try to fix it by moving the start up. 3004 */ 3005 if (align_off + temp <= orig_off) { 3006 align_alen -= temp; 3007 align_off += temp; 3008 } 3009 /* 3010 * Try to fix it by moving the end in. 3011 */ 3012 else if (align_off + align_alen - temp >= orig_end) 3013 align_alen -= temp; 3014 /* 3015 * Set the start to the minimum then trim the length. 3016 */ 3017 else { 3018 align_alen -= orig_off - align_off; 3019 align_off = orig_off; 3020 align_alen -= align_alen % mp->m_sb.sb_rextsize; 3021 } 3022 /* 3023 * Result doesn't cover the request, fail it. 3024 */ 3025 if (orig_off < align_off || orig_end > align_off + align_alen) 3026 return -EINVAL; 3027 } else { 3028 ASSERT(orig_off >= align_off); 3029 /* see MAXEXTLEN handling above */ 3030 ASSERT(orig_end <= align_off + align_alen || 3031 align_alen + extsz > MAXEXTLEN); 3032 } 3033 3034 #ifdef DEBUG 3035 if (!eof && gotp->br_startoff != NULLFILEOFF) 3036 ASSERT(align_off + align_alen <= gotp->br_startoff); 3037 if (prevp->br_startoff != NULLFILEOFF) 3038 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount); 3039 #endif 3040 3041 *lenp = align_alen; 3042 *offp = align_off; 3043 return 0; 3044 } 3045 3046 #define XFS_ALLOC_GAP_UNITS 4 3047 3048 void 3049 xfs_bmap_adjacent( 3050 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3051 { 3052 xfs_fsblock_t adjust; /* adjustment to block numbers */ 3053 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ 3054 xfs_mount_t *mp; /* mount point structure */ 3055 int nullfb; /* true if ap->firstblock isn't set */ 3056 int rt; /* true if inode is realtime */ 3057 3058 #define ISVALID(x,y) \ 3059 (rt ? \ 3060 (x) < mp->m_sb.sb_rblocks : \ 3061 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \ 3062 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \ 3063 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks) 3064 3065 mp = ap->ip->i_mount; 3066 nullfb = ap->tp->t_firstblock == NULLFSBLOCK; 3067 rt = XFS_IS_REALTIME_INODE(ap->ip) && 3068 xfs_alloc_is_userdata(ap->datatype); 3069 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, 3070 ap->tp->t_firstblock); 3071 /* 3072 * If allocating at eof, and there's a previous real block, 3073 * try to use its last block as our starting point. 3074 */ 3075 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF && 3076 !isnullstartblock(ap->prev.br_startblock) && 3077 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount, 3078 ap->prev.br_startblock)) { 3079 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount; 3080 /* 3081 * Adjust for the gap between prevp and us. 3082 */ 3083 adjust = ap->offset - 3084 (ap->prev.br_startoff + ap->prev.br_blockcount); 3085 if (adjust && 3086 ISVALID(ap->blkno + adjust, ap->prev.br_startblock)) 3087 ap->blkno += adjust; 3088 } 3089 /* 3090 * If not at eof, then compare the two neighbor blocks. 3091 * Figure out whether either one gives us a good starting point, 3092 * and pick the better one. 3093 */ 3094 else if (!ap->eof) { 3095 xfs_fsblock_t gotbno; /* right side block number */ 3096 xfs_fsblock_t gotdiff=0; /* right side difference */ 3097 xfs_fsblock_t prevbno; /* left side block number */ 3098 xfs_fsblock_t prevdiff=0; /* left side difference */ 3099 3100 /* 3101 * If there's a previous (left) block, select a requested 3102 * start block based on it. 3103 */ 3104 if (ap->prev.br_startoff != NULLFILEOFF && 3105 !isnullstartblock(ap->prev.br_startblock) && 3106 (prevbno = ap->prev.br_startblock + 3107 ap->prev.br_blockcount) && 3108 ISVALID(prevbno, ap->prev.br_startblock)) { 3109 /* 3110 * Calculate gap to end of previous block. 3111 */ 3112 adjust = prevdiff = ap->offset - 3113 (ap->prev.br_startoff + 3114 ap->prev.br_blockcount); 3115 /* 3116 * Figure the startblock based on the previous block's 3117 * end and the gap size. 3118 * Heuristic! 3119 * If the gap is large relative to the piece we're 3120 * allocating, or using it gives us an invalid block 3121 * number, then just use the end of the previous block. 3122 */ 3123 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3124 ISVALID(prevbno + prevdiff, 3125 ap->prev.br_startblock)) 3126 prevbno += adjust; 3127 else 3128 prevdiff += adjust; 3129 /* 3130 * If the firstblock forbids it, can't use it, 3131 * must use default. 3132 */ 3133 if (!rt && !nullfb && 3134 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno) 3135 prevbno = NULLFSBLOCK; 3136 } 3137 /* 3138 * No previous block or can't follow it, just default. 3139 */ 3140 else 3141 prevbno = NULLFSBLOCK; 3142 /* 3143 * If there's a following (right) block, select a requested 3144 * start block based on it. 3145 */ 3146 if (!isnullstartblock(ap->got.br_startblock)) { 3147 /* 3148 * Calculate gap to start of next block. 3149 */ 3150 adjust = gotdiff = ap->got.br_startoff - ap->offset; 3151 /* 3152 * Figure the startblock based on the next block's 3153 * start and the gap size. 3154 */ 3155 gotbno = ap->got.br_startblock; 3156 /* 3157 * Heuristic! 3158 * If the gap is large relative to the piece we're 3159 * allocating, or using it gives us an invalid block 3160 * number, then just use the start of the next block 3161 * offset by our length. 3162 */ 3163 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3164 ISVALID(gotbno - gotdiff, gotbno)) 3165 gotbno -= adjust; 3166 else if (ISVALID(gotbno - ap->length, gotbno)) { 3167 gotbno -= ap->length; 3168 gotdiff += adjust - ap->length; 3169 } else 3170 gotdiff += adjust; 3171 /* 3172 * If the firstblock forbids it, can't use it, 3173 * must use default. 3174 */ 3175 if (!rt && !nullfb && 3176 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno) 3177 gotbno = NULLFSBLOCK; 3178 } 3179 /* 3180 * No next block, just default. 3181 */ 3182 else 3183 gotbno = NULLFSBLOCK; 3184 /* 3185 * If both valid, pick the better one, else the only good 3186 * one, else ap->blkno is already set (to 0 or the inode block). 3187 */ 3188 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) 3189 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno; 3190 else if (prevbno != NULLFSBLOCK) 3191 ap->blkno = prevbno; 3192 else if (gotbno != NULLFSBLOCK) 3193 ap->blkno = gotbno; 3194 } 3195 #undef ISVALID 3196 } 3197 3198 static int 3199 xfs_bmap_longest_free_extent( 3200 struct xfs_trans *tp, 3201 xfs_agnumber_t ag, 3202 xfs_extlen_t *blen, 3203 int *notinit) 3204 { 3205 struct xfs_mount *mp = tp->t_mountp; 3206 struct xfs_perag *pag; 3207 xfs_extlen_t longest; 3208 int error = 0; 3209 3210 pag = xfs_perag_get(mp, ag); 3211 if (!pag->pagf_init) { 3212 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK); 3213 if (error) 3214 goto out; 3215 3216 if (!pag->pagf_init) { 3217 *notinit = 1; 3218 goto out; 3219 } 3220 } 3221 3222 longest = xfs_alloc_longest_free_extent(pag, 3223 xfs_alloc_min_freelist(mp, pag), 3224 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE)); 3225 if (*blen < longest) 3226 *blen = longest; 3227 3228 out: 3229 xfs_perag_put(pag); 3230 return error; 3231 } 3232 3233 static void 3234 xfs_bmap_select_minlen( 3235 struct xfs_bmalloca *ap, 3236 struct xfs_alloc_arg *args, 3237 xfs_extlen_t *blen, 3238 int notinit) 3239 { 3240 if (notinit || *blen < ap->minlen) { 3241 /* 3242 * Since we did a BUF_TRYLOCK above, it is possible that 3243 * there is space for this request. 3244 */ 3245 args->minlen = ap->minlen; 3246 } else if (*blen < args->maxlen) { 3247 /* 3248 * If the best seen length is less than the request length, 3249 * use the best as the minimum. 3250 */ 3251 args->minlen = *blen; 3252 } else { 3253 /* 3254 * Otherwise we've seen an extent as big as maxlen, use that 3255 * as the minimum. 3256 */ 3257 args->minlen = args->maxlen; 3258 } 3259 } 3260 3261 STATIC int 3262 xfs_bmap_btalloc_nullfb( 3263 struct xfs_bmalloca *ap, 3264 struct xfs_alloc_arg *args, 3265 xfs_extlen_t *blen) 3266 { 3267 struct xfs_mount *mp = ap->ip->i_mount; 3268 xfs_agnumber_t ag, startag; 3269 int notinit = 0; 3270 int error; 3271 3272 args->type = XFS_ALLOCTYPE_START_BNO; 3273 args->total = ap->total; 3274 3275 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno); 3276 if (startag == NULLAGNUMBER) 3277 startag = ag = 0; 3278 3279 while (*blen < args->maxlen) { 3280 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, 3281 ¬init); 3282 if (error) 3283 return error; 3284 3285 if (++ag == mp->m_sb.sb_agcount) 3286 ag = 0; 3287 if (ag == startag) 3288 break; 3289 } 3290 3291 xfs_bmap_select_minlen(ap, args, blen, notinit); 3292 return 0; 3293 } 3294 3295 STATIC int 3296 xfs_bmap_btalloc_filestreams( 3297 struct xfs_bmalloca *ap, 3298 struct xfs_alloc_arg *args, 3299 xfs_extlen_t *blen) 3300 { 3301 struct xfs_mount *mp = ap->ip->i_mount; 3302 xfs_agnumber_t ag; 3303 int notinit = 0; 3304 int error; 3305 3306 args->type = XFS_ALLOCTYPE_NEAR_BNO; 3307 args->total = ap->total; 3308 3309 ag = XFS_FSB_TO_AGNO(mp, args->fsbno); 3310 if (ag == NULLAGNUMBER) 3311 ag = 0; 3312 3313 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init); 3314 if (error) 3315 return error; 3316 3317 if (*blen < args->maxlen) { 3318 error = xfs_filestream_new_ag(ap, &ag); 3319 if (error) 3320 return error; 3321 3322 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, 3323 ¬init); 3324 if (error) 3325 return error; 3326 3327 } 3328 3329 xfs_bmap_select_minlen(ap, args, blen, notinit); 3330 3331 /* 3332 * Set the failure fallback case to look in the selected AG as stream 3333 * may have moved. 3334 */ 3335 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0); 3336 return 0; 3337 } 3338 3339 /* Update all inode and quota accounting for the allocation we just did. */ 3340 static void 3341 xfs_bmap_btalloc_accounting( 3342 struct xfs_bmalloca *ap, 3343 struct xfs_alloc_arg *args) 3344 { 3345 if (ap->flags & XFS_BMAPI_COWFORK) { 3346 /* 3347 * COW fork blocks are in-core only and thus are treated as 3348 * in-core quota reservation (like delalloc blocks) even when 3349 * converted to real blocks. The quota reservation is not 3350 * accounted to disk until blocks are remapped to the data 3351 * fork. So if these blocks were previously delalloc, we 3352 * already have quota reservation and there's nothing to do 3353 * yet. 3354 */ 3355 if (ap->wasdel) 3356 return; 3357 3358 /* 3359 * Otherwise, we've allocated blocks in a hole. The transaction 3360 * has acquired in-core quota reservation for this extent. 3361 * Rather than account these as real blocks, however, we reduce 3362 * the transaction quota reservation based on the allocation. 3363 * This essentially transfers the transaction quota reservation 3364 * to that of a delalloc extent. 3365 */ 3366 ap->ip->i_delayed_blks += args->len; 3367 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS, 3368 -(long)args->len); 3369 return; 3370 } 3371 3372 /* data/attr fork only */ 3373 ap->ip->i_d.di_nblocks += args->len; 3374 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); 3375 if (ap->wasdel) 3376 ap->ip->i_delayed_blks -= args->len; 3377 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, 3378 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT, 3379 args->len); 3380 } 3381 3382 STATIC int 3383 xfs_bmap_btalloc( 3384 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3385 { 3386 xfs_mount_t *mp; /* mount point structure */ 3387 xfs_alloctype_t atype = 0; /* type for allocation routines */ 3388 xfs_extlen_t align = 0; /* minimum allocation alignment */ 3389 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ 3390 xfs_agnumber_t ag; 3391 xfs_alloc_arg_t args; 3392 xfs_fileoff_t orig_offset; 3393 xfs_extlen_t orig_length; 3394 xfs_extlen_t blen; 3395 xfs_extlen_t nextminlen = 0; 3396 int nullfb; /* true if ap->firstblock isn't set */ 3397 int isaligned; 3398 int tryagain; 3399 int error; 3400 int stripe_align; 3401 3402 ASSERT(ap->length); 3403 orig_offset = ap->offset; 3404 orig_length = ap->length; 3405 3406 mp = ap->ip->i_mount; 3407 3408 /* stripe alignment for allocation is determined by mount parameters */ 3409 stripe_align = 0; 3410 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) 3411 stripe_align = mp->m_swidth; 3412 else if (mp->m_dalign) 3413 stripe_align = mp->m_dalign; 3414 3415 if (ap->flags & XFS_BMAPI_COWFORK) 3416 align = xfs_get_cowextsz_hint(ap->ip); 3417 else if (xfs_alloc_is_userdata(ap->datatype)) 3418 align = xfs_get_extsz_hint(ap->ip); 3419 if (align) { 3420 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, 3421 align, 0, ap->eof, 0, ap->conv, 3422 &ap->offset, &ap->length); 3423 ASSERT(!error); 3424 ASSERT(ap->length); 3425 } 3426 3427 3428 nullfb = ap->tp->t_firstblock == NULLFSBLOCK; 3429 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, 3430 ap->tp->t_firstblock); 3431 if (nullfb) { 3432 if (xfs_alloc_is_userdata(ap->datatype) && 3433 xfs_inode_is_filestream(ap->ip)) { 3434 ag = xfs_filestream_lookup_ag(ap->ip); 3435 ag = (ag != NULLAGNUMBER) ? ag : 0; 3436 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0); 3437 } else { 3438 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino); 3439 } 3440 } else 3441 ap->blkno = ap->tp->t_firstblock; 3442 3443 xfs_bmap_adjacent(ap); 3444 3445 /* 3446 * If allowed, use ap->blkno; otherwise must use firstblock since 3447 * it's in the right allocation group. 3448 */ 3449 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno) 3450 ; 3451 else 3452 ap->blkno = ap->tp->t_firstblock; 3453 /* 3454 * Normal allocation, done through xfs_alloc_vextent. 3455 */ 3456 tryagain = isaligned = 0; 3457 memset(&args, 0, sizeof(args)); 3458 args.tp = ap->tp; 3459 args.mp = mp; 3460 args.fsbno = ap->blkno; 3461 args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE; 3462 3463 /* Trim the allocation back to the maximum an AG can fit. */ 3464 args.maxlen = min(ap->length, mp->m_ag_max_usable); 3465 blen = 0; 3466 if (nullfb) { 3467 /* 3468 * Search for an allocation group with a single extent large 3469 * enough for the request. If one isn't found, then adjust 3470 * the minimum allocation size to the largest space found. 3471 */ 3472 if (xfs_alloc_is_userdata(ap->datatype) && 3473 xfs_inode_is_filestream(ap->ip)) 3474 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen); 3475 else 3476 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen); 3477 if (error) 3478 return error; 3479 } else if (ap->tp->t_flags & XFS_TRANS_LOWMODE) { 3480 if (xfs_inode_is_filestream(ap->ip)) 3481 args.type = XFS_ALLOCTYPE_FIRST_AG; 3482 else 3483 args.type = XFS_ALLOCTYPE_START_BNO; 3484 args.total = args.minlen = ap->minlen; 3485 } else { 3486 args.type = XFS_ALLOCTYPE_NEAR_BNO; 3487 args.total = ap->total; 3488 args.minlen = ap->minlen; 3489 } 3490 /* apply extent size hints if obtained earlier */ 3491 if (align) { 3492 args.prod = align; 3493 div_u64_rem(ap->offset, args.prod, &args.mod); 3494 if (args.mod) 3495 args.mod = args.prod - args.mod; 3496 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) { 3497 args.prod = 1; 3498 args.mod = 0; 3499 } else { 3500 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog; 3501 div_u64_rem(ap->offset, args.prod, &args.mod); 3502 if (args.mod) 3503 args.mod = args.prod - args.mod; 3504 } 3505 /* 3506 * If we are not low on available data blocks, and the 3507 * underlying logical volume manager is a stripe, and 3508 * the file offset is zero then try to allocate data 3509 * blocks on stripe unit boundary. 3510 * NOTE: ap->aeof is only set if the allocation length 3511 * is >= the stripe unit and the allocation offset is 3512 * at the end of file. 3513 */ 3514 if (!(ap->tp->t_flags & XFS_TRANS_LOWMODE) && ap->aeof) { 3515 if (!ap->offset) { 3516 args.alignment = stripe_align; 3517 atype = args.type; 3518 isaligned = 1; 3519 /* 3520 * Adjust for alignment 3521 */ 3522 if (blen > args.alignment && blen <= args.maxlen) 3523 args.minlen = blen - args.alignment; 3524 args.minalignslop = 0; 3525 } else { 3526 /* 3527 * First try an exact bno allocation. 3528 * If it fails then do a near or start bno 3529 * allocation with alignment turned on. 3530 */ 3531 atype = args.type; 3532 tryagain = 1; 3533 args.type = XFS_ALLOCTYPE_THIS_BNO; 3534 args.alignment = 1; 3535 /* 3536 * Compute the minlen+alignment for the 3537 * next case. Set slop so that the value 3538 * of minlen+alignment+slop doesn't go up 3539 * between the calls. 3540 */ 3541 if (blen > stripe_align && blen <= args.maxlen) 3542 nextminlen = blen - stripe_align; 3543 else 3544 nextminlen = args.minlen; 3545 if (nextminlen + stripe_align > args.minlen + 1) 3546 args.minalignslop = 3547 nextminlen + stripe_align - 3548 args.minlen - 1; 3549 else 3550 args.minalignslop = 0; 3551 } 3552 } else { 3553 args.alignment = 1; 3554 args.minalignslop = 0; 3555 } 3556 args.minleft = ap->minleft; 3557 args.wasdel = ap->wasdel; 3558 args.resv = XFS_AG_RESV_NONE; 3559 args.datatype = ap->datatype; 3560 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) 3561 args.ip = ap->ip; 3562 3563 error = xfs_alloc_vextent(&args); 3564 if (error) 3565 return error; 3566 3567 if (tryagain && args.fsbno == NULLFSBLOCK) { 3568 /* 3569 * Exact allocation failed. Now try with alignment 3570 * turned on. 3571 */ 3572 args.type = atype; 3573 args.fsbno = ap->blkno; 3574 args.alignment = stripe_align; 3575 args.minlen = nextminlen; 3576 args.minalignslop = 0; 3577 isaligned = 1; 3578 if ((error = xfs_alloc_vextent(&args))) 3579 return error; 3580 } 3581 if (isaligned && args.fsbno == NULLFSBLOCK) { 3582 /* 3583 * allocation failed, so turn off alignment and 3584 * try again. 3585 */ 3586 args.type = atype; 3587 args.fsbno = ap->blkno; 3588 args.alignment = 0; 3589 if ((error = xfs_alloc_vextent(&args))) 3590 return error; 3591 } 3592 if (args.fsbno == NULLFSBLOCK && nullfb && 3593 args.minlen > ap->minlen) { 3594 args.minlen = ap->minlen; 3595 args.type = XFS_ALLOCTYPE_START_BNO; 3596 args.fsbno = ap->blkno; 3597 if ((error = xfs_alloc_vextent(&args))) 3598 return error; 3599 } 3600 if (args.fsbno == NULLFSBLOCK && nullfb) { 3601 args.fsbno = 0; 3602 args.type = XFS_ALLOCTYPE_FIRST_AG; 3603 args.total = ap->minlen; 3604 if ((error = xfs_alloc_vextent(&args))) 3605 return error; 3606 ap->tp->t_flags |= XFS_TRANS_LOWMODE; 3607 } 3608 if (args.fsbno != NULLFSBLOCK) { 3609 /* 3610 * check the allocation happened at the same or higher AG than 3611 * the first block that was allocated. 3612 */ 3613 ASSERT(ap->tp->t_firstblock == NULLFSBLOCK || 3614 XFS_FSB_TO_AGNO(mp, ap->tp->t_firstblock) <= 3615 XFS_FSB_TO_AGNO(mp, args.fsbno)); 3616 3617 ap->blkno = args.fsbno; 3618 if (ap->tp->t_firstblock == NULLFSBLOCK) 3619 ap->tp->t_firstblock = args.fsbno; 3620 ASSERT(nullfb || fb_agno <= args.agno); 3621 ap->length = args.len; 3622 /* 3623 * If the extent size hint is active, we tried to round the 3624 * caller's allocation request offset down to extsz and the 3625 * length up to another extsz boundary. If we found a free 3626 * extent we mapped it in starting at this new offset. If the 3627 * newly mapped space isn't long enough to cover any of the 3628 * range of offsets that was originally requested, move the 3629 * mapping up so that we can fill as much of the caller's 3630 * original request as possible. Free space is apparently 3631 * very fragmented so we're unlikely to be able to satisfy the 3632 * hints anyway. 3633 */ 3634 if (ap->length <= orig_length) 3635 ap->offset = orig_offset; 3636 else if (ap->offset + ap->length < orig_offset + orig_length) 3637 ap->offset = orig_offset + orig_length - ap->length; 3638 xfs_bmap_btalloc_accounting(ap, &args); 3639 } else { 3640 ap->blkno = NULLFSBLOCK; 3641 ap->length = 0; 3642 } 3643 return 0; 3644 } 3645 3646 /* 3647 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. 3648 * It figures out where to ask the underlying allocator to put the new extent. 3649 */ 3650 STATIC int 3651 xfs_bmap_alloc( 3652 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3653 { 3654 if (XFS_IS_REALTIME_INODE(ap->ip) && 3655 xfs_alloc_is_userdata(ap->datatype)) 3656 return xfs_bmap_rtalloc(ap); 3657 return xfs_bmap_btalloc(ap); 3658 } 3659 3660 /* Trim extent to fit a logical block range. */ 3661 void 3662 xfs_trim_extent( 3663 struct xfs_bmbt_irec *irec, 3664 xfs_fileoff_t bno, 3665 xfs_filblks_t len) 3666 { 3667 xfs_fileoff_t distance; 3668 xfs_fileoff_t end = bno + len; 3669 3670 if (irec->br_startoff + irec->br_blockcount <= bno || 3671 irec->br_startoff >= end) { 3672 irec->br_blockcount = 0; 3673 return; 3674 } 3675 3676 if (irec->br_startoff < bno) { 3677 distance = bno - irec->br_startoff; 3678 if (isnullstartblock(irec->br_startblock)) 3679 irec->br_startblock = DELAYSTARTBLOCK; 3680 if (irec->br_startblock != DELAYSTARTBLOCK && 3681 irec->br_startblock != HOLESTARTBLOCK) 3682 irec->br_startblock += distance; 3683 irec->br_startoff += distance; 3684 irec->br_blockcount -= distance; 3685 } 3686 3687 if (end < irec->br_startoff + irec->br_blockcount) { 3688 distance = irec->br_startoff + irec->br_blockcount - end; 3689 irec->br_blockcount -= distance; 3690 } 3691 } 3692 3693 /* 3694 * Trim the returned map to the required bounds 3695 */ 3696 STATIC void 3697 xfs_bmapi_trim_map( 3698 struct xfs_bmbt_irec *mval, 3699 struct xfs_bmbt_irec *got, 3700 xfs_fileoff_t *bno, 3701 xfs_filblks_t len, 3702 xfs_fileoff_t obno, 3703 xfs_fileoff_t end, 3704 int n, 3705 int flags) 3706 { 3707 if ((flags & XFS_BMAPI_ENTIRE) || 3708 got->br_startoff + got->br_blockcount <= obno) { 3709 *mval = *got; 3710 if (isnullstartblock(got->br_startblock)) 3711 mval->br_startblock = DELAYSTARTBLOCK; 3712 return; 3713 } 3714 3715 if (obno > *bno) 3716 *bno = obno; 3717 ASSERT((*bno >= obno) || (n == 0)); 3718 ASSERT(*bno < end); 3719 mval->br_startoff = *bno; 3720 if (isnullstartblock(got->br_startblock)) 3721 mval->br_startblock = DELAYSTARTBLOCK; 3722 else 3723 mval->br_startblock = got->br_startblock + 3724 (*bno - got->br_startoff); 3725 /* 3726 * Return the minimum of what we got and what we asked for for 3727 * the length. We can use the len variable here because it is 3728 * modified below and we could have been there before coming 3729 * here if the first part of the allocation didn't overlap what 3730 * was asked for. 3731 */ 3732 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno, 3733 got->br_blockcount - (*bno - got->br_startoff)); 3734 mval->br_state = got->br_state; 3735 ASSERT(mval->br_blockcount <= len); 3736 return; 3737 } 3738 3739 /* 3740 * Update and validate the extent map to return 3741 */ 3742 STATIC void 3743 xfs_bmapi_update_map( 3744 struct xfs_bmbt_irec **map, 3745 xfs_fileoff_t *bno, 3746 xfs_filblks_t *len, 3747 xfs_fileoff_t obno, 3748 xfs_fileoff_t end, 3749 int *n, 3750 int flags) 3751 { 3752 xfs_bmbt_irec_t *mval = *map; 3753 3754 ASSERT((flags & XFS_BMAPI_ENTIRE) || 3755 ((mval->br_startoff + mval->br_blockcount) <= end)); 3756 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) || 3757 (mval->br_startoff < obno)); 3758 3759 *bno = mval->br_startoff + mval->br_blockcount; 3760 *len = end - *bno; 3761 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) { 3762 /* update previous map with new information */ 3763 ASSERT(mval->br_startblock == mval[-1].br_startblock); 3764 ASSERT(mval->br_blockcount > mval[-1].br_blockcount); 3765 ASSERT(mval->br_state == mval[-1].br_state); 3766 mval[-1].br_blockcount = mval->br_blockcount; 3767 mval[-1].br_state = mval->br_state; 3768 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK && 3769 mval[-1].br_startblock != DELAYSTARTBLOCK && 3770 mval[-1].br_startblock != HOLESTARTBLOCK && 3771 mval->br_startblock == mval[-1].br_startblock + 3772 mval[-1].br_blockcount && 3773 mval[-1].br_state == mval->br_state) { 3774 ASSERT(mval->br_startoff == 3775 mval[-1].br_startoff + mval[-1].br_blockcount); 3776 mval[-1].br_blockcount += mval->br_blockcount; 3777 } else if (*n > 0 && 3778 mval->br_startblock == DELAYSTARTBLOCK && 3779 mval[-1].br_startblock == DELAYSTARTBLOCK && 3780 mval->br_startoff == 3781 mval[-1].br_startoff + mval[-1].br_blockcount) { 3782 mval[-1].br_blockcount += mval->br_blockcount; 3783 mval[-1].br_state = mval->br_state; 3784 } else if (!((*n == 0) && 3785 ((mval->br_startoff + mval->br_blockcount) <= 3786 obno))) { 3787 mval++; 3788 (*n)++; 3789 } 3790 *map = mval; 3791 } 3792 3793 /* 3794 * Map file blocks to filesystem blocks without allocation. 3795 */ 3796 int 3797 xfs_bmapi_read( 3798 struct xfs_inode *ip, 3799 xfs_fileoff_t bno, 3800 xfs_filblks_t len, 3801 struct xfs_bmbt_irec *mval, 3802 int *nmap, 3803 int flags) 3804 { 3805 struct xfs_mount *mp = ip->i_mount; 3806 struct xfs_ifork *ifp; 3807 struct xfs_bmbt_irec got; 3808 xfs_fileoff_t obno; 3809 xfs_fileoff_t end; 3810 struct xfs_iext_cursor icur; 3811 int error; 3812 bool eof = false; 3813 int n = 0; 3814 int whichfork = xfs_bmapi_whichfork(flags); 3815 3816 ASSERT(*nmap >= 1); 3817 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE| 3818 XFS_BMAPI_COWFORK))); 3819 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)); 3820 3821 if (unlikely(XFS_TEST_ERROR( 3822 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 3823 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 3824 mp, XFS_ERRTAG_BMAPIFORMAT))) { 3825 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp); 3826 return -EFSCORRUPTED; 3827 } 3828 3829 if (XFS_FORCED_SHUTDOWN(mp)) 3830 return -EIO; 3831 3832 XFS_STATS_INC(mp, xs_blk_mapr); 3833 3834 ifp = XFS_IFORK_PTR(ip, whichfork); 3835 3836 /* No CoW fork? Return a hole. */ 3837 if (whichfork == XFS_COW_FORK && !ifp) { 3838 mval->br_startoff = bno; 3839 mval->br_startblock = HOLESTARTBLOCK; 3840 mval->br_blockcount = len; 3841 mval->br_state = XFS_EXT_NORM; 3842 *nmap = 1; 3843 return 0; 3844 } 3845 3846 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 3847 error = xfs_iread_extents(NULL, ip, whichfork); 3848 if (error) 3849 return error; 3850 } 3851 3852 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) 3853 eof = true; 3854 end = bno + len; 3855 obno = bno; 3856 3857 while (bno < end && n < *nmap) { 3858 /* Reading past eof, act as though there's a hole up to end. */ 3859 if (eof) 3860 got.br_startoff = end; 3861 if (got.br_startoff > bno) { 3862 /* Reading in a hole. */ 3863 mval->br_startoff = bno; 3864 mval->br_startblock = HOLESTARTBLOCK; 3865 mval->br_blockcount = 3866 XFS_FILBLKS_MIN(len, got.br_startoff - bno); 3867 mval->br_state = XFS_EXT_NORM; 3868 bno += mval->br_blockcount; 3869 len -= mval->br_blockcount; 3870 mval++; 3871 n++; 3872 continue; 3873 } 3874 3875 /* set up the extent map to return. */ 3876 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags); 3877 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 3878 3879 /* If we're done, stop now. */ 3880 if (bno >= end || n >= *nmap) 3881 break; 3882 3883 /* Else go on to the next record. */ 3884 if (!xfs_iext_next_extent(ifp, &icur, &got)) 3885 eof = true; 3886 } 3887 *nmap = n; 3888 return 0; 3889 } 3890 3891 /* 3892 * Add a delayed allocation extent to an inode. Blocks are reserved from the 3893 * global pool and the extent inserted into the inode in-core extent tree. 3894 * 3895 * On entry, got refers to the first extent beyond the offset of the extent to 3896 * allocate or eof is specified if no such extent exists. On return, got refers 3897 * to the extent record that was inserted to the inode fork. 3898 * 3899 * Note that the allocated extent may have been merged with contiguous extents 3900 * during insertion into the inode fork. Thus, got does not reflect the current 3901 * state of the inode fork on return. If necessary, the caller can use lastx to 3902 * look up the updated record in the inode fork. 3903 */ 3904 int 3905 xfs_bmapi_reserve_delalloc( 3906 struct xfs_inode *ip, 3907 int whichfork, 3908 xfs_fileoff_t off, 3909 xfs_filblks_t len, 3910 xfs_filblks_t prealloc, 3911 struct xfs_bmbt_irec *got, 3912 struct xfs_iext_cursor *icur, 3913 int eof) 3914 { 3915 struct xfs_mount *mp = ip->i_mount; 3916 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 3917 xfs_extlen_t alen; 3918 xfs_extlen_t indlen; 3919 int error; 3920 xfs_fileoff_t aoff = off; 3921 3922 /* 3923 * Cap the alloc length. Keep track of prealloc so we know whether to 3924 * tag the inode before we return. 3925 */ 3926 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN); 3927 if (!eof) 3928 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff); 3929 if (prealloc && alen >= len) 3930 prealloc = alen - len; 3931 3932 /* Figure out the extent size, adjust alen */ 3933 if (whichfork == XFS_COW_FORK) { 3934 struct xfs_bmbt_irec prev; 3935 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip); 3936 3937 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev)) 3938 prev.br_startoff = NULLFILEOFF; 3939 3940 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof, 3941 1, 0, &aoff, &alen); 3942 ASSERT(!error); 3943 } 3944 3945 /* 3946 * Make a transaction-less quota reservation for delayed allocation 3947 * blocks. This number gets adjusted later. We return if we haven't 3948 * allocated blocks already inside this loop. 3949 */ 3950 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0, 3951 XFS_QMOPT_RES_REGBLKS); 3952 if (error) 3953 return error; 3954 3955 /* 3956 * Split changing sb for alen and indlen since they could be coming 3957 * from different places. 3958 */ 3959 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); 3960 ASSERT(indlen > 0); 3961 3962 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false); 3963 if (error) 3964 goto out_unreserve_quota; 3965 3966 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false); 3967 if (error) 3968 goto out_unreserve_blocks; 3969 3970 3971 ip->i_delayed_blks += alen; 3972 3973 got->br_startoff = aoff; 3974 got->br_startblock = nullstartblock(indlen); 3975 got->br_blockcount = alen; 3976 got->br_state = XFS_EXT_NORM; 3977 3978 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got); 3979 3980 /* 3981 * Tag the inode if blocks were preallocated. Note that COW fork 3982 * preallocation can occur at the start or end of the extent, even when 3983 * prealloc == 0, so we must also check the aligned offset and length. 3984 */ 3985 if (whichfork == XFS_DATA_FORK && prealloc) 3986 xfs_inode_set_eofblocks_tag(ip); 3987 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len)) 3988 xfs_inode_set_cowblocks_tag(ip); 3989 3990 return 0; 3991 3992 out_unreserve_blocks: 3993 xfs_mod_fdblocks(mp, alen, false); 3994 out_unreserve_quota: 3995 if (XFS_IS_QUOTA_ON(mp)) 3996 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, 3997 XFS_QMOPT_RES_REGBLKS); 3998 return error; 3999 } 4000 4001 static int 4002 xfs_bmapi_allocate( 4003 struct xfs_bmalloca *bma) 4004 { 4005 struct xfs_mount *mp = bma->ip->i_mount; 4006 int whichfork = xfs_bmapi_whichfork(bma->flags); 4007 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); 4008 int tmp_logflags = 0; 4009 int error; 4010 4011 ASSERT(bma->length > 0); 4012 4013 /* 4014 * For the wasdelay case, we could also just allocate the stuff asked 4015 * for in this bmap call but that wouldn't be as good. 4016 */ 4017 if (bma->wasdel) { 4018 bma->length = (xfs_extlen_t)bma->got.br_blockcount; 4019 bma->offset = bma->got.br_startoff; 4020 xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev); 4021 } else { 4022 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN); 4023 if (!bma->eof) 4024 bma->length = XFS_FILBLKS_MIN(bma->length, 4025 bma->got.br_startoff - bma->offset); 4026 } 4027 4028 /* 4029 * Set the data type being allocated. For the data fork, the first data 4030 * in the file is treated differently to all other allocations. For the 4031 * attribute fork, we only need to ensure the allocated range is not on 4032 * the busy list. 4033 */ 4034 if (!(bma->flags & XFS_BMAPI_METADATA)) { 4035 bma->datatype = XFS_ALLOC_NOBUSY; 4036 if (whichfork == XFS_DATA_FORK) { 4037 if (bma->offset == 0) 4038 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA; 4039 else 4040 bma->datatype |= XFS_ALLOC_USERDATA; 4041 } 4042 if (bma->flags & XFS_BMAPI_ZERO) 4043 bma->datatype |= XFS_ALLOC_USERDATA_ZERO; 4044 } 4045 4046 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1; 4047 4048 /* 4049 * Only want to do the alignment at the eof if it is userdata and 4050 * allocation length is larger than a stripe unit. 4051 */ 4052 if (mp->m_dalign && bma->length >= mp->m_dalign && 4053 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) { 4054 error = xfs_bmap_isaeof(bma, whichfork); 4055 if (error) 4056 return error; 4057 } 4058 4059 error = xfs_bmap_alloc(bma); 4060 if (error) 4061 return error; 4062 4063 if (bma->blkno == NULLFSBLOCK) 4064 return 0; 4065 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) 4066 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork); 4067 /* 4068 * Bump the number of extents we've allocated 4069 * in this call. 4070 */ 4071 bma->nallocs++; 4072 4073 if (bma->cur) 4074 bma->cur->bc_private.b.flags = 4075 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; 4076 4077 bma->got.br_startoff = bma->offset; 4078 bma->got.br_startblock = bma->blkno; 4079 bma->got.br_blockcount = bma->length; 4080 bma->got.br_state = XFS_EXT_NORM; 4081 4082 /* 4083 * In the data fork, a wasdelay extent has been initialized, so 4084 * shouldn't be flagged as unwritten. 4085 * 4086 * For the cow fork, however, we convert delalloc reservations 4087 * (extents allocated for speculative preallocation) to 4088 * allocated unwritten extents, and only convert the unwritten 4089 * extents to real extents when we're about to write the data. 4090 */ 4091 if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) && 4092 (bma->flags & XFS_BMAPI_PREALLOC)) 4093 bma->got.br_state = XFS_EXT_UNWRITTEN; 4094 4095 if (bma->wasdel) 4096 error = xfs_bmap_add_extent_delay_real(bma, whichfork); 4097 else 4098 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip, 4099 whichfork, &bma->icur, &bma->cur, &bma->got, 4100 &bma->logflags, bma->flags); 4101 4102 bma->logflags |= tmp_logflags; 4103 if (error) 4104 return error; 4105 4106 /* 4107 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real 4108 * or xfs_bmap_add_extent_hole_real might have merged it into one of 4109 * the neighbouring ones. 4110 */ 4111 xfs_iext_get_extent(ifp, &bma->icur, &bma->got); 4112 4113 ASSERT(bma->got.br_startoff <= bma->offset); 4114 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >= 4115 bma->offset + bma->length); 4116 ASSERT(bma->got.br_state == XFS_EXT_NORM || 4117 bma->got.br_state == XFS_EXT_UNWRITTEN); 4118 return 0; 4119 } 4120 4121 STATIC int 4122 xfs_bmapi_convert_unwritten( 4123 struct xfs_bmalloca *bma, 4124 struct xfs_bmbt_irec *mval, 4125 xfs_filblks_t len, 4126 int flags) 4127 { 4128 int whichfork = xfs_bmapi_whichfork(flags); 4129 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); 4130 int tmp_logflags = 0; 4131 int error; 4132 4133 /* check if we need to do unwritten->real conversion */ 4134 if (mval->br_state == XFS_EXT_UNWRITTEN && 4135 (flags & XFS_BMAPI_PREALLOC)) 4136 return 0; 4137 4138 /* check if we need to do real->unwritten conversion */ 4139 if (mval->br_state == XFS_EXT_NORM && 4140 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) != 4141 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) 4142 return 0; 4143 4144 /* 4145 * Modify (by adding) the state flag, if writing. 4146 */ 4147 ASSERT(mval->br_blockcount <= len); 4148 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) { 4149 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp, 4150 bma->ip, whichfork); 4151 } 4152 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) 4153 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN; 4154 4155 /* 4156 * Before insertion into the bmbt, zero the range being converted 4157 * if required. 4158 */ 4159 if (flags & XFS_BMAPI_ZERO) { 4160 error = xfs_zero_extent(bma->ip, mval->br_startblock, 4161 mval->br_blockcount); 4162 if (error) 4163 return error; 4164 } 4165 4166 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork, 4167 &bma->icur, &bma->cur, mval, &tmp_logflags); 4168 /* 4169 * Log the inode core unconditionally in the unwritten extent conversion 4170 * path because the conversion might not have done so (e.g., if the 4171 * extent count hasn't changed). We need to make sure the inode is dirty 4172 * in the transaction for the sake of fsync(), even if nothing has 4173 * changed, because fsync() will not force the log for this transaction 4174 * unless it sees the inode pinned. 4175 * 4176 * Note: If we're only converting cow fork extents, there aren't 4177 * any on-disk updates to make, so we don't need to log anything. 4178 */ 4179 if (whichfork != XFS_COW_FORK) 4180 bma->logflags |= tmp_logflags | XFS_ILOG_CORE; 4181 if (error) 4182 return error; 4183 4184 /* 4185 * Update our extent pointer, given that 4186 * xfs_bmap_add_extent_unwritten_real might have merged it into one 4187 * of the neighbouring ones. 4188 */ 4189 xfs_iext_get_extent(ifp, &bma->icur, &bma->got); 4190 4191 /* 4192 * We may have combined previously unwritten space with written space, 4193 * so generate another request. 4194 */ 4195 if (mval->br_blockcount < len) 4196 return -EAGAIN; 4197 return 0; 4198 } 4199 4200 static inline xfs_extlen_t 4201 xfs_bmapi_minleft( 4202 struct xfs_trans *tp, 4203 struct xfs_inode *ip, 4204 int fork) 4205 { 4206 if (tp && tp->t_firstblock != NULLFSBLOCK) 4207 return 0; 4208 if (XFS_IFORK_FORMAT(ip, fork) != XFS_DINODE_FMT_BTREE) 4209 return 1; 4210 return be16_to_cpu(XFS_IFORK_PTR(ip, fork)->if_broot->bb_level) + 1; 4211 } 4212 4213 /* 4214 * Log whatever the flags say, even if error. Otherwise we might miss detecting 4215 * a case where the data is changed, there's an error, and it's not logged so we 4216 * don't shutdown when we should. Don't bother logging extents/btree changes if 4217 * we converted to the other format. 4218 */ 4219 static void 4220 xfs_bmapi_finish( 4221 struct xfs_bmalloca *bma, 4222 int whichfork, 4223 int error) 4224 { 4225 if ((bma->logflags & xfs_ilog_fext(whichfork)) && 4226 XFS_IFORK_FORMAT(bma->ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 4227 bma->logflags &= ~xfs_ilog_fext(whichfork); 4228 else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) && 4229 XFS_IFORK_FORMAT(bma->ip, whichfork) != XFS_DINODE_FMT_BTREE) 4230 bma->logflags &= ~xfs_ilog_fbroot(whichfork); 4231 4232 if (bma->logflags) 4233 xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags); 4234 if (bma->cur) 4235 xfs_btree_del_cursor(bma->cur, error); 4236 } 4237 4238 /* 4239 * Map file blocks to filesystem blocks, and allocate blocks or convert the 4240 * extent state if necessary. Details behaviour is controlled by the flags 4241 * parameter. Only allocates blocks from a single allocation group, to avoid 4242 * locking problems. 4243 */ 4244 int 4245 xfs_bmapi_write( 4246 struct xfs_trans *tp, /* transaction pointer */ 4247 struct xfs_inode *ip, /* incore inode */ 4248 xfs_fileoff_t bno, /* starting file offs. mapped */ 4249 xfs_filblks_t len, /* length to map in file */ 4250 int flags, /* XFS_BMAPI_... */ 4251 xfs_extlen_t total, /* total blocks needed */ 4252 struct xfs_bmbt_irec *mval, /* output: map values */ 4253 int *nmap) /* i/o: mval size/count */ 4254 { 4255 struct xfs_bmalloca bma = { 4256 .tp = tp, 4257 .ip = ip, 4258 .total = total, 4259 }; 4260 struct xfs_mount *mp = ip->i_mount; 4261 struct xfs_ifork *ifp; 4262 xfs_fileoff_t end; /* end of mapped file region */ 4263 bool eof = false; /* after the end of extents */ 4264 int error; /* error return */ 4265 int n; /* current extent index */ 4266 xfs_fileoff_t obno; /* old block number (offset) */ 4267 int whichfork; /* data or attr fork */ 4268 4269 #ifdef DEBUG 4270 xfs_fileoff_t orig_bno; /* original block number value */ 4271 int orig_flags; /* original flags arg value */ 4272 xfs_filblks_t orig_len; /* original value of len arg */ 4273 struct xfs_bmbt_irec *orig_mval; /* original value of mval */ 4274 int orig_nmap; /* original value of *nmap */ 4275 4276 orig_bno = bno; 4277 orig_len = len; 4278 orig_flags = flags; 4279 orig_mval = mval; 4280 orig_nmap = *nmap; 4281 #endif 4282 whichfork = xfs_bmapi_whichfork(flags); 4283 4284 ASSERT(*nmap >= 1); 4285 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); 4286 ASSERT(tp != NULL); 4287 ASSERT(len > 0); 4288 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL); 4289 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4290 ASSERT(!(flags & XFS_BMAPI_REMAP)); 4291 4292 /* zeroing is for currently only for data extents, not metadata */ 4293 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) != 4294 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)); 4295 /* 4296 * we can allocate unwritten extents or pre-zero allocated blocks, 4297 * but it makes no sense to do both at once. This would result in 4298 * zeroing the unwritten extent twice, but it still being an 4299 * unwritten extent.... 4300 */ 4301 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) != 4302 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)); 4303 4304 if (unlikely(XFS_TEST_ERROR( 4305 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 4306 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 4307 mp, XFS_ERRTAG_BMAPIFORMAT))) { 4308 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp); 4309 return -EFSCORRUPTED; 4310 } 4311 4312 if (XFS_FORCED_SHUTDOWN(mp)) 4313 return -EIO; 4314 4315 ifp = XFS_IFORK_PTR(ip, whichfork); 4316 4317 XFS_STATS_INC(mp, xs_blk_mapw); 4318 4319 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4320 error = xfs_iread_extents(tp, ip, whichfork); 4321 if (error) 4322 goto error0; 4323 } 4324 4325 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got)) 4326 eof = true; 4327 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) 4328 bma.prev.br_startoff = NULLFILEOFF; 4329 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); 4330 4331 n = 0; 4332 end = bno + len; 4333 obno = bno; 4334 while (bno < end && n < *nmap) { 4335 bool need_alloc = false, wasdelay = false; 4336 4337 /* in hole or beyond EOF? */ 4338 if (eof || bma.got.br_startoff > bno) { 4339 /* 4340 * CoW fork conversions should /never/ hit EOF or 4341 * holes. There should always be something for us 4342 * to work on. 4343 */ 4344 ASSERT(!((flags & XFS_BMAPI_CONVERT) && 4345 (flags & XFS_BMAPI_COWFORK))); 4346 4347 need_alloc = true; 4348 } else if (isnullstartblock(bma.got.br_startblock)) { 4349 wasdelay = true; 4350 } 4351 4352 /* 4353 * First, deal with the hole before the allocated space 4354 * that we found, if any. 4355 */ 4356 if (need_alloc || wasdelay) { 4357 bma.eof = eof; 4358 bma.conv = !!(flags & XFS_BMAPI_CONVERT); 4359 bma.wasdel = wasdelay; 4360 bma.offset = bno; 4361 bma.flags = flags; 4362 4363 /* 4364 * There's a 32/64 bit type mismatch between the 4365 * allocation length request (which can be 64 bits in 4366 * length) and the bma length request, which is 4367 * xfs_extlen_t and therefore 32 bits. Hence we have to 4368 * check for 32-bit overflows and handle them here. 4369 */ 4370 if (len > (xfs_filblks_t)MAXEXTLEN) 4371 bma.length = MAXEXTLEN; 4372 else 4373 bma.length = len; 4374 4375 ASSERT(len > 0); 4376 ASSERT(bma.length > 0); 4377 error = xfs_bmapi_allocate(&bma); 4378 if (error) 4379 goto error0; 4380 if (bma.blkno == NULLFSBLOCK) 4381 break; 4382 4383 /* 4384 * If this is a CoW allocation, record the data in 4385 * the refcount btree for orphan recovery. 4386 */ 4387 if (whichfork == XFS_COW_FORK) { 4388 error = xfs_refcount_alloc_cow_extent(tp, 4389 bma.blkno, bma.length); 4390 if (error) 4391 goto error0; 4392 } 4393 } 4394 4395 /* Deal with the allocated space we found. */ 4396 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno, 4397 end, n, flags); 4398 4399 /* Execute unwritten extent conversion if necessary */ 4400 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags); 4401 if (error == -EAGAIN) 4402 continue; 4403 if (error) 4404 goto error0; 4405 4406 /* update the extent map to return */ 4407 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 4408 4409 /* 4410 * If we're done, stop now. Stop when we've allocated 4411 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise 4412 * the transaction may get too big. 4413 */ 4414 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap) 4415 break; 4416 4417 /* Else go on to the next record. */ 4418 bma.prev = bma.got; 4419 if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got)) 4420 eof = true; 4421 } 4422 *nmap = n; 4423 4424 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags, 4425 whichfork); 4426 if (error) 4427 goto error0; 4428 4429 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE || 4430 XFS_IFORK_NEXTENTS(ip, whichfork) > 4431 XFS_IFORK_MAXEXT(ip, whichfork)); 4432 xfs_bmapi_finish(&bma, whichfork, 0); 4433 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval, 4434 orig_nmap, *nmap); 4435 return 0; 4436 error0: 4437 xfs_bmapi_finish(&bma, whichfork, error); 4438 return error; 4439 } 4440 4441 /* 4442 * Convert an existing delalloc extent to real blocks based on file offset. This 4443 * attempts to allocate the entire delalloc extent and may require multiple 4444 * invocations to allocate the target offset if a large enough physical extent 4445 * is not available. 4446 */ 4447 int 4448 xfs_bmapi_convert_delalloc( 4449 struct xfs_inode *ip, 4450 int whichfork, 4451 xfs_fileoff_t offset_fsb, 4452 struct xfs_bmbt_irec *imap, 4453 unsigned int *seq) 4454 { 4455 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 4456 struct xfs_mount *mp = ip->i_mount; 4457 struct xfs_bmalloca bma = { NULL }; 4458 struct xfs_trans *tp; 4459 int error; 4460 4461 /* 4462 * Space for the extent and indirect blocks was reserved when the 4463 * delalloc extent was created so there's no need to do so here. 4464 */ 4465 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 4466 XFS_TRANS_RESERVE, &tp); 4467 if (error) 4468 return error; 4469 4470 xfs_ilock(ip, XFS_ILOCK_EXCL); 4471 xfs_trans_ijoin(tp, ip, 0); 4472 4473 if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) || 4474 bma.got.br_startoff > offset_fsb) { 4475 /* 4476 * No extent found in the range we are trying to convert. This 4477 * should only happen for the COW fork, where another thread 4478 * might have moved the extent to the data fork in the meantime. 4479 */ 4480 WARN_ON_ONCE(whichfork != XFS_COW_FORK); 4481 error = -EAGAIN; 4482 goto out_trans_cancel; 4483 } 4484 4485 /* 4486 * If we find a real extent here we raced with another thread converting 4487 * the extent. Just return the real extent at this offset. 4488 */ 4489 if (!isnullstartblock(bma.got.br_startblock)) { 4490 *imap = bma.got; 4491 *seq = READ_ONCE(ifp->if_seq); 4492 goto out_trans_cancel; 4493 } 4494 4495 bma.tp = tp; 4496 bma.ip = ip; 4497 bma.wasdel = true; 4498 bma.offset = bma.got.br_startoff; 4499 bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount, MAXEXTLEN); 4500 bma.total = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK); 4501 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); 4502 if (whichfork == XFS_COW_FORK) 4503 bma.flags = XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC; 4504 4505 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) 4506 bma.prev.br_startoff = NULLFILEOFF; 4507 4508 error = xfs_bmapi_allocate(&bma); 4509 if (error) 4510 goto out_finish; 4511 4512 error = -ENOSPC; 4513 if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK)) 4514 goto out_finish; 4515 error = -EFSCORRUPTED; 4516 if (WARN_ON_ONCE(!bma.got.br_startblock && !XFS_IS_REALTIME_INODE(ip))) 4517 goto out_finish; 4518 4519 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length)); 4520 XFS_STATS_INC(mp, xs_xstrat_quick); 4521 4522 ASSERT(!isnullstartblock(bma.got.br_startblock)); 4523 *imap = bma.got; 4524 *seq = READ_ONCE(ifp->if_seq); 4525 4526 if (whichfork == XFS_COW_FORK) { 4527 error = xfs_refcount_alloc_cow_extent(tp, bma.blkno, 4528 bma.length); 4529 if (error) 4530 goto out_finish; 4531 } 4532 4533 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags, 4534 whichfork); 4535 if (error) 4536 goto out_finish; 4537 4538 xfs_bmapi_finish(&bma, whichfork, 0); 4539 error = xfs_trans_commit(tp); 4540 xfs_iunlock(ip, XFS_ILOCK_EXCL); 4541 return error; 4542 4543 out_finish: 4544 xfs_bmapi_finish(&bma, whichfork, error); 4545 out_trans_cancel: 4546 xfs_trans_cancel(tp); 4547 xfs_iunlock(ip, XFS_ILOCK_EXCL); 4548 return error; 4549 } 4550 4551 int 4552 xfs_bmapi_remap( 4553 struct xfs_trans *tp, 4554 struct xfs_inode *ip, 4555 xfs_fileoff_t bno, 4556 xfs_filblks_t len, 4557 xfs_fsblock_t startblock, 4558 int flags) 4559 { 4560 struct xfs_mount *mp = ip->i_mount; 4561 struct xfs_ifork *ifp; 4562 struct xfs_btree_cur *cur = NULL; 4563 struct xfs_bmbt_irec got; 4564 struct xfs_iext_cursor icur; 4565 int whichfork = xfs_bmapi_whichfork(flags); 4566 int logflags = 0, error; 4567 4568 ifp = XFS_IFORK_PTR(ip, whichfork); 4569 ASSERT(len > 0); 4570 ASSERT(len <= (xfs_filblks_t)MAXEXTLEN); 4571 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4572 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC | 4573 XFS_BMAPI_NORMAP))); 4574 ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) != 4575 (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)); 4576 4577 if (unlikely(XFS_TEST_ERROR( 4578 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 4579 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 4580 mp, XFS_ERRTAG_BMAPIFORMAT))) { 4581 XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp); 4582 return -EFSCORRUPTED; 4583 } 4584 4585 if (XFS_FORCED_SHUTDOWN(mp)) 4586 return -EIO; 4587 4588 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4589 error = xfs_iread_extents(tp, ip, whichfork); 4590 if (error) 4591 return error; 4592 } 4593 4594 if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) { 4595 /* make sure we only reflink into a hole. */ 4596 ASSERT(got.br_startoff > bno); 4597 ASSERT(got.br_startoff - bno >= len); 4598 } 4599 4600 ip->i_d.di_nblocks += len; 4601 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 4602 4603 if (ifp->if_flags & XFS_IFBROOT) { 4604 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 4605 cur->bc_private.b.flags = 0; 4606 } 4607 4608 got.br_startoff = bno; 4609 got.br_startblock = startblock; 4610 got.br_blockcount = len; 4611 if (flags & XFS_BMAPI_PREALLOC) 4612 got.br_state = XFS_EXT_UNWRITTEN; 4613 else 4614 got.br_state = XFS_EXT_NORM; 4615 4616 error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur, 4617 &cur, &got, &logflags, flags); 4618 if (error) 4619 goto error0; 4620 4621 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork); 4622 4623 error0: 4624 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) 4625 logflags &= ~XFS_ILOG_DEXT; 4626 else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) 4627 logflags &= ~XFS_ILOG_DBROOT; 4628 4629 if (logflags) 4630 xfs_trans_log_inode(tp, ip, logflags); 4631 if (cur) 4632 xfs_btree_del_cursor(cur, error); 4633 return error; 4634 } 4635 4636 /* 4637 * When a delalloc extent is split (e.g., due to a hole punch), the original 4638 * indlen reservation must be shared across the two new extents that are left 4639 * behind. 4640 * 4641 * Given the original reservation and the worst case indlen for the two new 4642 * extents (as calculated by xfs_bmap_worst_indlen()), split the original 4643 * reservation fairly across the two new extents. If necessary, steal available 4644 * blocks from a deleted extent to make up a reservation deficiency (e.g., if 4645 * ores == 1). The number of stolen blocks is returned. The availability and 4646 * subsequent accounting of stolen blocks is the responsibility of the caller. 4647 */ 4648 static xfs_filblks_t 4649 xfs_bmap_split_indlen( 4650 xfs_filblks_t ores, /* original res. */ 4651 xfs_filblks_t *indlen1, /* ext1 worst indlen */ 4652 xfs_filblks_t *indlen2, /* ext2 worst indlen */ 4653 xfs_filblks_t avail) /* stealable blocks */ 4654 { 4655 xfs_filblks_t len1 = *indlen1; 4656 xfs_filblks_t len2 = *indlen2; 4657 xfs_filblks_t nres = len1 + len2; /* new total res. */ 4658 xfs_filblks_t stolen = 0; 4659 xfs_filblks_t resfactor; 4660 4661 /* 4662 * Steal as many blocks as we can to try and satisfy the worst case 4663 * indlen for both new extents. 4664 */ 4665 if (ores < nres && avail) 4666 stolen = XFS_FILBLKS_MIN(nres - ores, avail); 4667 ores += stolen; 4668 4669 /* nothing else to do if we've satisfied the new reservation */ 4670 if (ores >= nres) 4671 return stolen; 4672 4673 /* 4674 * We can't meet the total required reservation for the two extents. 4675 * Calculate the percent of the overall shortage between both extents 4676 * and apply this percentage to each of the requested indlen values. 4677 * This distributes the shortage fairly and reduces the chances that one 4678 * of the two extents is left with nothing when extents are repeatedly 4679 * split. 4680 */ 4681 resfactor = (ores * 100); 4682 do_div(resfactor, nres); 4683 len1 *= resfactor; 4684 do_div(len1, 100); 4685 len2 *= resfactor; 4686 do_div(len2, 100); 4687 ASSERT(len1 + len2 <= ores); 4688 ASSERT(len1 < *indlen1 && len2 < *indlen2); 4689 4690 /* 4691 * Hand out the remainder to each extent. If one of the two reservations 4692 * is zero, we want to make sure that one gets a block first. The loop 4693 * below starts with len1, so hand len2 a block right off the bat if it 4694 * is zero. 4695 */ 4696 ores -= (len1 + len2); 4697 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores); 4698 if (ores && !len2 && *indlen2) { 4699 len2++; 4700 ores--; 4701 } 4702 while (ores) { 4703 if (len1 < *indlen1) { 4704 len1++; 4705 ores--; 4706 } 4707 if (!ores) 4708 break; 4709 if (len2 < *indlen2) { 4710 len2++; 4711 ores--; 4712 } 4713 } 4714 4715 *indlen1 = len1; 4716 *indlen2 = len2; 4717 4718 return stolen; 4719 } 4720 4721 int 4722 xfs_bmap_del_extent_delay( 4723 struct xfs_inode *ip, 4724 int whichfork, 4725 struct xfs_iext_cursor *icur, 4726 struct xfs_bmbt_irec *got, 4727 struct xfs_bmbt_irec *del) 4728 { 4729 struct xfs_mount *mp = ip->i_mount; 4730 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 4731 struct xfs_bmbt_irec new; 4732 int64_t da_old, da_new, da_diff = 0; 4733 xfs_fileoff_t del_endoff, got_endoff; 4734 xfs_filblks_t got_indlen, new_indlen, stolen; 4735 int state = xfs_bmap_fork_to_state(whichfork); 4736 int error = 0; 4737 bool isrt; 4738 4739 XFS_STATS_INC(mp, xs_del_exlist); 4740 4741 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 4742 del_endoff = del->br_startoff + del->br_blockcount; 4743 got_endoff = got->br_startoff + got->br_blockcount; 4744 da_old = startblockval(got->br_startblock); 4745 da_new = 0; 4746 4747 ASSERT(del->br_blockcount > 0); 4748 ASSERT(got->br_startoff <= del->br_startoff); 4749 ASSERT(got_endoff >= del_endoff); 4750 4751 if (isrt) { 4752 uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount); 4753 4754 do_div(rtexts, mp->m_sb.sb_rextsize); 4755 xfs_mod_frextents(mp, rtexts); 4756 } 4757 4758 /* 4759 * Update the inode delalloc counter now and wait to update the 4760 * sb counters as we might have to borrow some blocks for the 4761 * indirect block accounting. 4762 */ 4763 error = xfs_trans_reserve_quota_nblks(NULL, ip, 4764 -((long)del->br_blockcount), 0, 4765 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); 4766 if (error) 4767 return error; 4768 ip->i_delayed_blks -= del->br_blockcount; 4769 4770 if (got->br_startoff == del->br_startoff) 4771 state |= BMAP_LEFT_FILLING; 4772 if (got_endoff == del_endoff) 4773 state |= BMAP_RIGHT_FILLING; 4774 4775 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 4776 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 4777 /* 4778 * Matches the whole extent. Delete the entry. 4779 */ 4780 xfs_iext_remove(ip, icur, state); 4781 xfs_iext_prev(ifp, icur); 4782 break; 4783 case BMAP_LEFT_FILLING: 4784 /* 4785 * Deleting the first part of the extent. 4786 */ 4787 got->br_startoff = del_endoff; 4788 got->br_blockcount -= del->br_blockcount; 4789 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4790 got->br_blockcount), da_old); 4791 got->br_startblock = nullstartblock((int)da_new); 4792 xfs_iext_update_extent(ip, state, icur, got); 4793 break; 4794 case BMAP_RIGHT_FILLING: 4795 /* 4796 * Deleting the last part of the extent. 4797 */ 4798 got->br_blockcount = got->br_blockcount - del->br_blockcount; 4799 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4800 got->br_blockcount), da_old); 4801 got->br_startblock = nullstartblock((int)da_new); 4802 xfs_iext_update_extent(ip, state, icur, got); 4803 break; 4804 case 0: 4805 /* 4806 * Deleting the middle of the extent. 4807 * 4808 * Distribute the original indlen reservation across the two new 4809 * extents. Steal blocks from the deleted extent if necessary. 4810 * Stealing blocks simply fudges the fdblocks accounting below. 4811 * Warn if either of the new indlen reservations is zero as this 4812 * can lead to delalloc problems. 4813 */ 4814 got->br_blockcount = del->br_startoff - got->br_startoff; 4815 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount); 4816 4817 new.br_blockcount = got_endoff - del_endoff; 4818 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount); 4819 4820 WARN_ON_ONCE(!got_indlen || !new_indlen); 4821 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen, 4822 del->br_blockcount); 4823 4824 got->br_startblock = nullstartblock((int)got_indlen); 4825 4826 new.br_startoff = del_endoff; 4827 new.br_state = got->br_state; 4828 new.br_startblock = nullstartblock((int)new_indlen); 4829 4830 xfs_iext_update_extent(ip, state, icur, got); 4831 xfs_iext_next(ifp, icur); 4832 xfs_iext_insert(ip, icur, &new, state); 4833 4834 da_new = got_indlen + new_indlen - stolen; 4835 del->br_blockcount -= stolen; 4836 break; 4837 } 4838 4839 ASSERT(da_old >= da_new); 4840 da_diff = da_old - da_new; 4841 if (!isrt) 4842 da_diff += del->br_blockcount; 4843 if (da_diff) 4844 xfs_mod_fdblocks(mp, da_diff, false); 4845 return error; 4846 } 4847 4848 void 4849 xfs_bmap_del_extent_cow( 4850 struct xfs_inode *ip, 4851 struct xfs_iext_cursor *icur, 4852 struct xfs_bmbt_irec *got, 4853 struct xfs_bmbt_irec *del) 4854 { 4855 struct xfs_mount *mp = ip->i_mount; 4856 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); 4857 struct xfs_bmbt_irec new; 4858 xfs_fileoff_t del_endoff, got_endoff; 4859 int state = BMAP_COWFORK; 4860 4861 XFS_STATS_INC(mp, xs_del_exlist); 4862 4863 del_endoff = del->br_startoff + del->br_blockcount; 4864 got_endoff = got->br_startoff + got->br_blockcount; 4865 4866 ASSERT(del->br_blockcount > 0); 4867 ASSERT(got->br_startoff <= del->br_startoff); 4868 ASSERT(got_endoff >= del_endoff); 4869 ASSERT(!isnullstartblock(got->br_startblock)); 4870 4871 if (got->br_startoff == del->br_startoff) 4872 state |= BMAP_LEFT_FILLING; 4873 if (got_endoff == del_endoff) 4874 state |= BMAP_RIGHT_FILLING; 4875 4876 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 4877 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 4878 /* 4879 * Matches the whole extent. Delete the entry. 4880 */ 4881 xfs_iext_remove(ip, icur, state); 4882 xfs_iext_prev(ifp, icur); 4883 break; 4884 case BMAP_LEFT_FILLING: 4885 /* 4886 * Deleting the first part of the extent. 4887 */ 4888 got->br_startoff = del_endoff; 4889 got->br_blockcount -= del->br_blockcount; 4890 got->br_startblock = del->br_startblock + del->br_blockcount; 4891 xfs_iext_update_extent(ip, state, icur, got); 4892 break; 4893 case BMAP_RIGHT_FILLING: 4894 /* 4895 * Deleting the last part of the extent. 4896 */ 4897 got->br_blockcount -= del->br_blockcount; 4898 xfs_iext_update_extent(ip, state, icur, got); 4899 break; 4900 case 0: 4901 /* 4902 * Deleting the middle of the extent. 4903 */ 4904 got->br_blockcount = del->br_startoff - got->br_startoff; 4905 4906 new.br_startoff = del_endoff; 4907 new.br_blockcount = got_endoff - del_endoff; 4908 new.br_state = got->br_state; 4909 new.br_startblock = del->br_startblock + del->br_blockcount; 4910 4911 xfs_iext_update_extent(ip, state, icur, got); 4912 xfs_iext_next(ifp, icur); 4913 xfs_iext_insert(ip, icur, &new, state); 4914 break; 4915 } 4916 ip->i_delayed_blks -= del->br_blockcount; 4917 } 4918 4919 /* 4920 * Called by xfs_bmapi to update file extent records and the btree 4921 * after removing space. 4922 */ 4923 STATIC int /* error */ 4924 xfs_bmap_del_extent_real( 4925 xfs_inode_t *ip, /* incore inode pointer */ 4926 xfs_trans_t *tp, /* current transaction pointer */ 4927 struct xfs_iext_cursor *icur, 4928 xfs_btree_cur_t *cur, /* if null, not a btree */ 4929 xfs_bmbt_irec_t *del, /* data to remove from extents */ 4930 int *logflagsp, /* inode logging flags */ 4931 int whichfork, /* data or attr fork */ 4932 int bflags) /* bmapi flags */ 4933 { 4934 xfs_fsblock_t del_endblock=0; /* first block past del */ 4935 xfs_fileoff_t del_endoff; /* first offset past del */ 4936 int do_fx; /* free extent at end of routine */ 4937 int error; /* error return value */ 4938 int flags = 0;/* inode logging flags */ 4939 struct xfs_bmbt_irec got; /* current extent entry */ 4940 xfs_fileoff_t got_endoff; /* first offset past got */ 4941 int i; /* temp state */ 4942 struct xfs_ifork *ifp; /* inode fork pointer */ 4943 xfs_mount_t *mp; /* mount structure */ 4944 xfs_filblks_t nblks; /* quota/sb block count */ 4945 xfs_bmbt_irec_t new; /* new record to be inserted */ 4946 /* REFERENCED */ 4947 uint qfield; /* quota field to update */ 4948 int state = xfs_bmap_fork_to_state(whichfork); 4949 struct xfs_bmbt_irec old; 4950 4951 mp = ip->i_mount; 4952 XFS_STATS_INC(mp, xs_del_exlist); 4953 4954 ifp = XFS_IFORK_PTR(ip, whichfork); 4955 ASSERT(del->br_blockcount > 0); 4956 xfs_iext_get_extent(ifp, icur, &got); 4957 ASSERT(got.br_startoff <= del->br_startoff); 4958 del_endoff = del->br_startoff + del->br_blockcount; 4959 got_endoff = got.br_startoff + got.br_blockcount; 4960 ASSERT(got_endoff >= del_endoff); 4961 ASSERT(!isnullstartblock(got.br_startblock)); 4962 qfield = 0; 4963 error = 0; 4964 4965 /* 4966 * If it's the case where the directory code is running with no block 4967 * reservation, and the deleted block is in the middle of its extent, 4968 * and the resulting insert of an extent would cause transformation to 4969 * btree format, then reject it. The calling code will then swap blocks 4970 * around instead. We have to do this now, rather than waiting for the 4971 * conversion to btree format, since the transaction will be dirty then. 4972 */ 4973 if (tp->t_blk_res == 0 && 4974 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && 4975 XFS_IFORK_NEXTENTS(ip, whichfork) >= 4976 XFS_IFORK_MAXEXT(ip, whichfork) && 4977 del->br_startoff > got.br_startoff && del_endoff < got_endoff) 4978 return -ENOSPC; 4979 4980 flags = XFS_ILOG_CORE; 4981 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { 4982 xfs_fsblock_t bno; 4983 xfs_filblks_t len; 4984 xfs_extlen_t mod; 4985 4986 bno = div_u64_rem(del->br_startblock, mp->m_sb.sb_rextsize, 4987 &mod); 4988 ASSERT(mod == 0); 4989 len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize, 4990 &mod); 4991 ASSERT(mod == 0); 4992 4993 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); 4994 if (error) 4995 goto done; 4996 do_fx = 0; 4997 nblks = len * mp->m_sb.sb_rextsize; 4998 qfield = XFS_TRANS_DQ_RTBCOUNT; 4999 } else { 5000 do_fx = 1; 5001 nblks = del->br_blockcount; 5002 qfield = XFS_TRANS_DQ_BCOUNT; 5003 } 5004 5005 del_endblock = del->br_startblock + del->br_blockcount; 5006 if (cur) { 5007 error = xfs_bmbt_lookup_eq(cur, &got, &i); 5008 if (error) 5009 goto done; 5010 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5011 } 5012 5013 if (got.br_startoff == del->br_startoff) 5014 state |= BMAP_LEFT_FILLING; 5015 if (got_endoff == del_endoff) 5016 state |= BMAP_RIGHT_FILLING; 5017 5018 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 5019 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 5020 /* 5021 * Matches the whole extent. Delete the entry. 5022 */ 5023 xfs_iext_remove(ip, icur, state); 5024 xfs_iext_prev(ifp, icur); 5025 XFS_IFORK_NEXT_SET(ip, whichfork, 5026 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 5027 flags |= XFS_ILOG_CORE; 5028 if (!cur) { 5029 flags |= xfs_ilog_fext(whichfork); 5030 break; 5031 } 5032 if ((error = xfs_btree_delete(cur, &i))) 5033 goto done; 5034 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5035 break; 5036 case BMAP_LEFT_FILLING: 5037 /* 5038 * Deleting the first part of the extent. 5039 */ 5040 got.br_startoff = del_endoff; 5041 got.br_startblock = del_endblock; 5042 got.br_blockcount -= del->br_blockcount; 5043 xfs_iext_update_extent(ip, state, icur, &got); 5044 if (!cur) { 5045 flags |= xfs_ilog_fext(whichfork); 5046 break; 5047 } 5048 error = xfs_bmbt_update(cur, &got); 5049 if (error) 5050 goto done; 5051 break; 5052 case BMAP_RIGHT_FILLING: 5053 /* 5054 * Deleting the last part of the extent. 5055 */ 5056 got.br_blockcount -= del->br_blockcount; 5057 xfs_iext_update_extent(ip, state, icur, &got); 5058 if (!cur) { 5059 flags |= xfs_ilog_fext(whichfork); 5060 break; 5061 } 5062 error = xfs_bmbt_update(cur, &got); 5063 if (error) 5064 goto done; 5065 break; 5066 case 0: 5067 /* 5068 * Deleting the middle of the extent. 5069 */ 5070 old = got; 5071 5072 got.br_blockcount = del->br_startoff - got.br_startoff; 5073 xfs_iext_update_extent(ip, state, icur, &got); 5074 5075 new.br_startoff = del_endoff; 5076 new.br_blockcount = got_endoff - del_endoff; 5077 new.br_state = got.br_state; 5078 new.br_startblock = del_endblock; 5079 5080 flags |= XFS_ILOG_CORE; 5081 if (cur) { 5082 error = xfs_bmbt_update(cur, &got); 5083 if (error) 5084 goto done; 5085 error = xfs_btree_increment(cur, 0, &i); 5086 if (error) 5087 goto done; 5088 cur->bc_rec.b = new; 5089 error = xfs_btree_insert(cur, &i); 5090 if (error && error != -ENOSPC) 5091 goto done; 5092 /* 5093 * If get no-space back from btree insert, it tried a 5094 * split, and we have a zero block reservation. Fix up 5095 * our state and return the error. 5096 */ 5097 if (error == -ENOSPC) { 5098 /* 5099 * Reset the cursor, don't trust it after any 5100 * insert operation. 5101 */ 5102 error = xfs_bmbt_lookup_eq(cur, &got, &i); 5103 if (error) 5104 goto done; 5105 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5106 /* 5107 * Update the btree record back 5108 * to the original value. 5109 */ 5110 error = xfs_bmbt_update(cur, &old); 5111 if (error) 5112 goto done; 5113 /* 5114 * Reset the extent record back 5115 * to the original value. 5116 */ 5117 xfs_iext_update_extent(ip, state, icur, &old); 5118 flags = 0; 5119 error = -ENOSPC; 5120 goto done; 5121 } 5122 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5123 } else 5124 flags |= xfs_ilog_fext(whichfork); 5125 XFS_IFORK_NEXT_SET(ip, whichfork, 5126 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 5127 xfs_iext_next(ifp, icur); 5128 xfs_iext_insert(ip, icur, &new, state); 5129 break; 5130 } 5131 5132 /* remove reverse mapping */ 5133 error = xfs_rmap_unmap_extent(tp, ip, whichfork, del); 5134 if (error) 5135 goto done; 5136 5137 /* 5138 * If we need to, add to list of extents to delete. 5139 */ 5140 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) { 5141 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) { 5142 error = xfs_refcount_decrease_extent(tp, del); 5143 if (error) 5144 goto done; 5145 } else { 5146 __xfs_bmap_add_free(tp, del->br_startblock, 5147 del->br_blockcount, NULL, 5148 (bflags & XFS_BMAPI_NODISCARD) || 5149 del->br_state == XFS_EXT_UNWRITTEN); 5150 } 5151 } 5152 5153 /* 5154 * Adjust inode # blocks in the file. 5155 */ 5156 if (nblks) 5157 ip->i_d.di_nblocks -= nblks; 5158 /* 5159 * Adjust quota data. 5160 */ 5161 if (qfield && !(bflags & XFS_BMAPI_REMAP)) 5162 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks); 5163 5164 done: 5165 *logflagsp = flags; 5166 return error; 5167 } 5168 5169 /* 5170 * Unmap (remove) blocks from a file. 5171 * If nexts is nonzero then the number of extents to remove is limited to 5172 * that value. If not all extents in the block range can be removed then 5173 * *done is set. 5174 */ 5175 int /* error */ 5176 __xfs_bunmapi( 5177 struct xfs_trans *tp, /* transaction pointer */ 5178 struct xfs_inode *ip, /* incore inode */ 5179 xfs_fileoff_t start, /* first file offset deleted */ 5180 xfs_filblks_t *rlen, /* i/o: amount remaining */ 5181 int flags, /* misc flags */ 5182 xfs_extnum_t nexts) /* number of extents max */ 5183 { 5184 struct xfs_btree_cur *cur; /* bmap btree cursor */ 5185 struct xfs_bmbt_irec del; /* extent being deleted */ 5186 int error; /* error return value */ 5187 xfs_extnum_t extno; /* extent number in list */ 5188 struct xfs_bmbt_irec got; /* current extent record */ 5189 struct xfs_ifork *ifp; /* inode fork pointer */ 5190 int isrt; /* freeing in rt area */ 5191 int logflags; /* transaction logging flags */ 5192 xfs_extlen_t mod; /* rt extent offset */ 5193 struct xfs_mount *mp; /* mount structure */ 5194 int tmp_logflags; /* partial logging flags */ 5195 int wasdel; /* was a delayed alloc extent */ 5196 int whichfork; /* data or attribute fork */ 5197 xfs_fsblock_t sum; 5198 xfs_filblks_t len = *rlen; /* length to unmap in file */ 5199 xfs_fileoff_t max_len; 5200 xfs_agnumber_t prev_agno = NULLAGNUMBER, agno; 5201 xfs_fileoff_t end; 5202 struct xfs_iext_cursor icur; 5203 bool done = false; 5204 5205 trace_xfs_bunmap(ip, start, len, flags, _RET_IP_); 5206 5207 whichfork = xfs_bmapi_whichfork(flags); 5208 ASSERT(whichfork != XFS_COW_FORK); 5209 ifp = XFS_IFORK_PTR(ip, whichfork); 5210 if (unlikely( 5211 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5212 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 5213 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW, 5214 ip->i_mount); 5215 return -EFSCORRUPTED; 5216 } 5217 mp = ip->i_mount; 5218 if (XFS_FORCED_SHUTDOWN(mp)) 5219 return -EIO; 5220 5221 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5222 ASSERT(len > 0); 5223 ASSERT(nexts >= 0); 5224 5225 /* 5226 * Guesstimate how many blocks we can unmap without running the risk of 5227 * blowing out the transaction with a mix of EFIs and reflink 5228 * adjustments. 5229 */ 5230 if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) 5231 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res)); 5232 else 5233 max_len = len; 5234 5235 if (!(ifp->if_flags & XFS_IFEXTENTS) && 5236 (error = xfs_iread_extents(tp, ip, whichfork))) 5237 return error; 5238 if (xfs_iext_count(ifp) == 0) { 5239 *rlen = 0; 5240 return 0; 5241 } 5242 XFS_STATS_INC(mp, xs_blk_unmap); 5243 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 5244 end = start + len; 5245 5246 if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) { 5247 *rlen = 0; 5248 return 0; 5249 } 5250 end--; 5251 5252 logflags = 0; 5253 if (ifp->if_flags & XFS_IFBROOT) { 5254 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); 5255 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5256 cur->bc_private.b.flags = 0; 5257 } else 5258 cur = NULL; 5259 5260 if (isrt) { 5261 /* 5262 * Synchronize by locking the bitmap inode. 5263 */ 5264 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP); 5265 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL); 5266 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM); 5267 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL); 5268 } 5269 5270 extno = 0; 5271 while (end != (xfs_fileoff_t)-1 && end >= start && 5272 (nexts == 0 || extno < nexts) && max_len > 0) { 5273 /* 5274 * Is the found extent after a hole in which end lives? 5275 * Just back up to the previous extent, if so. 5276 */ 5277 if (got.br_startoff > end && 5278 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5279 done = true; 5280 break; 5281 } 5282 /* 5283 * Is the last block of this extent before the range 5284 * we're supposed to delete? If so, we're done. 5285 */ 5286 end = XFS_FILEOFF_MIN(end, 5287 got.br_startoff + got.br_blockcount - 1); 5288 if (end < start) 5289 break; 5290 /* 5291 * Then deal with the (possibly delayed) allocated space 5292 * we found. 5293 */ 5294 del = got; 5295 wasdel = isnullstartblock(del.br_startblock); 5296 5297 /* 5298 * Make sure we don't touch multiple AGF headers out of order 5299 * in a single transaction, as that could cause AB-BA deadlocks. 5300 */ 5301 if (!wasdel) { 5302 agno = XFS_FSB_TO_AGNO(mp, del.br_startblock); 5303 if (prev_agno != NULLAGNUMBER && prev_agno > agno) 5304 break; 5305 prev_agno = agno; 5306 } 5307 if (got.br_startoff < start) { 5308 del.br_startoff = start; 5309 del.br_blockcount -= start - got.br_startoff; 5310 if (!wasdel) 5311 del.br_startblock += start - got.br_startoff; 5312 } 5313 if (del.br_startoff + del.br_blockcount > end + 1) 5314 del.br_blockcount = end + 1 - del.br_startoff; 5315 5316 /* How much can we safely unmap? */ 5317 if (max_len < del.br_blockcount) { 5318 del.br_startoff += del.br_blockcount - max_len; 5319 if (!wasdel) 5320 del.br_startblock += del.br_blockcount - max_len; 5321 del.br_blockcount = max_len; 5322 } 5323 5324 if (!isrt) 5325 goto delete; 5326 5327 sum = del.br_startblock + del.br_blockcount; 5328 div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod); 5329 if (mod) { 5330 /* 5331 * Realtime extent not lined up at the end. 5332 * The extent could have been split into written 5333 * and unwritten pieces, or we could just be 5334 * unmapping part of it. But we can't really 5335 * get rid of part of a realtime extent. 5336 */ 5337 if (del.br_state == XFS_EXT_UNWRITTEN) { 5338 /* 5339 * This piece is unwritten, or we're not 5340 * using unwritten extents. Skip over it. 5341 */ 5342 ASSERT(end >= mod); 5343 end -= mod > del.br_blockcount ? 5344 del.br_blockcount : mod; 5345 if (end < got.br_startoff && 5346 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5347 done = true; 5348 break; 5349 } 5350 continue; 5351 } 5352 /* 5353 * It's written, turn it unwritten. 5354 * This is better than zeroing it. 5355 */ 5356 ASSERT(del.br_state == XFS_EXT_NORM); 5357 ASSERT(tp->t_blk_res > 0); 5358 /* 5359 * If this spans a realtime extent boundary, 5360 * chop it back to the start of the one we end at. 5361 */ 5362 if (del.br_blockcount > mod) { 5363 del.br_startoff += del.br_blockcount - mod; 5364 del.br_startblock += del.br_blockcount - mod; 5365 del.br_blockcount = mod; 5366 } 5367 del.br_state = XFS_EXT_UNWRITTEN; 5368 error = xfs_bmap_add_extent_unwritten_real(tp, ip, 5369 whichfork, &icur, &cur, &del, 5370 &logflags); 5371 if (error) 5372 goto error0; 5373 goto nodelete; 5374 } 5375 div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod); 5376 if (mod) { 5377 /* 5378 * Realtime extent is lined up at the end but not 5379 * at the front. We'll get rid of full extents if 5380 * we can. 5381 */ 5382 mod = mp->m_sb.sb_rextsize - mod; 5383 if (del.br_blockcount > mod) { 5384 del.br_blockcount -= mod; 5385 del.br_startoff += mod; 5386 del.br_startblock += mod; 5387 } else if (del.br_startoff == start && 5388 (del.br_state == XFS_EXT_UNWRITTEN || 5389 tp->t_blk_res == 0)) { 5390 /* 5391 * Can't make it unwritten. There isn't 5392 * a full extent here so just skip it. 5393 */ 5394 ASSERT(end >= del.br_blockcount); 5395 end -= del.br_blockcount; 5396 if (got.br_startoff > end && 5397 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5398 done = true; 5399 break; 5400 } 5401 continue; 5402 } else if (del.br_state == XFS_EXT_UNWRITTEN) { 5403 struct xfs_bmbt_irec prev; 5404 5405 /* 5406 * This one is already unwritten. 5407 * It must have a written left neighbor. 5408 * Unwrite the killed part of that one and 5409 * try again. 5410 */ 5411 if (!xfs_iext_prev_extent(ifp, &icur, &prev)) 5412 ASSERT(0); 5413 ASSERT(prev.br_state == XFS_EXT_NORM); 5414 ASSERT(!isnullstartblock(prev.br_startblock)); 5415 ASSERT(del.br_startblock == 5416 prev.br_startblock + prev.br_blockcount); 5417 if (prev.br_startoff < start) { 5418 mod = start - prev.br_startoff; 5419 prev.br_blockcount -= mod; 5420 prev.br_startblock += mod; 5421 prev.br_startoff = start; 5422 } 5423 prev.br_state = XFS_EXT_UNWRITTEN; 5424 error = xfs_bmap_add_extent_unwritten_real(tp, 5425 ip, whichfork, &icur, &cur, 5426 &prev, &logflags); 5427 if (error) 5428 goto error0; 5429 goto nodelete; 5430 } else { 5431 ASSERT(del.br_state == XFS_EXT_NORM); 5432 del.br_state = XFS_EXT_UNWRITTEN; 5433 error = xfs_bmap_add_extent_unwritten_real(tp, 5434 ip, whichfork, &icur, &cur, 5435 &del, &logflags); 5436 if (error) 5437 goto error0; 5438 goto nodelete; 5439 } 5440 } 5441 5442 delete: 5443 if (wasdel) { 5444 error = xfs_bmap_del_extent_delay(ip, whichfork, &icur, 5445 &got, &del); 5446 } else { 5447 error = xfs_bmap_del_extent_real(ip, tp, &icur, cur, 5448 &del, &tmp_logflags, whichfork, 5449 flags); 5450 logflags |= tmp_logflags; 5451 } 5452 5453 if (error) 5454 goto error0; 5455 5456 max_len -= del.br_blockcount; 5457 end = del.br_startoff - 1; 5458 nodelete: 5459 /* 5460 * If not done go on to the next (previous) record. 5461 */ 5462 if (end != (xfs_fileoff_t)-1 && end >= start) { 5463 if (!xfs_iext_get_extent(ifp, &icur, &got) || 5464 (got.br_startoff > end && 5465 !xfs_iext_prev_extent(ifp, &icur, &got))) { 5466 done = true; 5467 break; 5468 } 5469 extno++; 5470 } 5471 } 5472 if (done || end == (xfs_fileoff_t)-1 || end < start) 5473 *rlen = 0; 5474 else 5475 *rlen = end - start + 1; 5476 5477 /* 5478 * Convert to a btree if necessary. 5479 */ 5480 if (xfs_bmap_needs_btree(ip, whichfork)) { 5481 ASSERT(cur == NULL); 5482 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 5483 &tmp_logflags, whichfork); 5484 logflags |= tmp_logflags; 5485 } else { 5486 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, 5487 whichfork); 5488 } 5489 5490 error0: 5491 /* 5492 * Log everything. Do this after conversion, there's no point in 5493 * logging the extent records if we've converted to btree format. 5494 */ 5495 if ((logflags & xfs_ilog_fext(whichfork)) && 5496 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 5497 logflags &= ~xfs_ilog_fext(whichfork); 5498 else if ((logflags & xfs_ilog_fbroot(whichfork)) && 5499 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) 5500 logflags &= ~xfs_ilog_fbroot(whichfork); 5501 /* 5502 * Log inode even in the error case, if the transaction 5503 * is dirty we'll need to shut down the filesystem. 5504 */ 5505 if (logflags) 5506 xfs_trans_log_inode(tp, ip, logflags); 5507 if (cur) { 5508 if (!error) 5509 cur->bc_private.b.allocated = 0; 5510 xfs_btree_del_cursor(cur, error); 5511 } 5512 return error; 5513 } 5514 5515 /* Unmap a range of a file. */ 5516 int 5517 xfs_bunmapi( 5518 xfs_trans_t *tp, 5519 struct xfs_inode *ip, 5520 xfs_fileoff_t bno, 5521 xfs_filblks_t len, 5522 int flags, 5523 xfs_extnum_t nexts, 5524 int *done) 5525 { 5526 int error; 5527 5528 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts); 5529 *done = (len == 0); 5530 return error; 5531 } 5532 5533 /* 5534 * Determine whether an extent shift can be accomplished by a merge with the 5535 * extent that precedes the target hole of the shift. 5536 */ 5537 STATIC bool 5538 xfs_bmse_can_merge( 5539 struct xfs_bmbt_irec *left, /* preceding extent */ 5540 struct xfs_bmbt_irec *got, /* current extent to shift */ 5541 xfs_fileoff_t shift) /* shift fsb */ 5542 { 5543 xfs_fileoff_t startoff; 5544 5545 startoff = got->br_startoff - shift; 5546 5547 /* 5548 * The extent, once shifted, must be adjacent in-file and on-disk with 5549 * the preceding extent. 5550 */ 5551 if ((left->br_startoff + left->br_blockcount != startoff) || 5552 (left->br_startblock + left->br_blockcount != got->br_startblock) || 5553 (left->br_state != got->br_state) || 5554 (left->br_blockcount + got->br_blockcount > MAXEXTLEN)) 5555 return false; 5556 5557 return true; 5558 } 5559 5560 /* 5561 * A bmap extent shift adjusts the file offset of an extent to fill a preceding 5562 * hole in the file. If an extent shift would result in the extent being fully 5563 * adjacent to the extent that currently precedes the hole, we can merge with 5564 * the preceding extent rather than do the shift. 5565 * 5566 * This function assumes the caller has verified a shift-by-merge is possible 5567 * with the provided extents via xfs_bmse_can_merge(). 5568 */ 5569 STATIC int 5570 xfs_bmse_merge( 5571 struct xfs_trans *tp, 5572 struct xfs_inode *ip, 5573 int whichfork, 5574 xfs_fileoff_t shift, /* shift fsb */ 5575 struct xfs_iext_cursor *icur, 5576 struct xfs_bmbt_irec *got, /* extent to shift */ 5577 struct xfs_bmbt_irec *left, /* preceding extent */ 5578 struct xfs_btree_cur *cur, 5579 int *logflags) /* output */ 5580 { 5581 struct xfs_bmbt_irec new; 5582 xfs_filblks_t blockcount; 5583 int error, i; 5584 struct xfs_mount *mp = ip->i_mount; 5585 5586 blockcount = left->br_blockcount + got->br_blockcount; 5587 5588 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5589 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5590 ASSERT(xfs_bmse_can_merge(left, got, shift)); 5591 5592 new = *left; 5593 new.br_blockcount = blockcount; 5594 5595 /* 5596 * Update the on-disk extent count, the btree if necessary and log the 5597 * inode. 5598 */ 5599 XFS_IFORK_NEXT_SET(ip, whichfork, 5600 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 5601 *logflags |= XFS_ILOG_CORE; 5602 if (!cur) { 5603 *logflags |= XFS_ILOG_DEXT; 5604 goto done; 5605 } 5606 5607 /* lookup and remove the extent to merge */ 5608 error = xfs_bmbt_lookup_eq(cur, got, &i); 5609 if (error) 5610 return error; 5611 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5612 5613 error = xfs_btree_delete(cur, &i); 5614 if (error) 5615 return error; 5616 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5617 5618 /* lookup and update size of the previous extent */ 5619 error = xfs_bmbt_lookup_eq(cur, left, &i); 5620 if (error) 5621 return error; 5622 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5623 5624 error = xfs_bmbt_update(cur, &new); 5625 if (error) 5626 return error; 5627 5628 done: 5629 xfs_iext_remove(ip, icur, 0); 5630 xfs_iext_prev(XFS_IFORK_PTR(ip, whichfork), icur); 5631 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, 5632 &new); 5633 5634 /* update reverse mapping. rmap functions merge the rmaps for us */ 5635 error = xfs_rmap_unmap_extent(tp, ip, whichfork, got); 5636 if (error) 5637 return error; 5638 memcpy(&new, got, sizeof(new)); 5639 new.br_startoff = left->br_startoff + left->br_blockcount; 5640 return xfs_rmap_map_extent(tp, ip, whichfork, &new); 5641 } 5642 5643 static int 5644 xfs_bmap_shift_update_extent( 5645 struct xfs_trans *tp, 5646 struct xfs_inode *ip, 5647 int whichfork, 5648 struct xfs_iext_cursor *icur, 5649 struct xfs_bmbt_irec *got, 5650 struct xfs_btree_cur *cur, 5651 int *logflags, 5652 xfs_fileoff_t startoff) 5653 { 5654 struct xfs_mount *mp = ip->i_mount; 5655 struct xfs_bmbt_irec prev = *got; 5656 int error, i; 5657 5658 *logflags |= XFS_ILOG_CORE; 5659 5660 got->br_startoff = startoff; 5661 5662 if (cur) { 5663 error = xfs_bmbt_lookup_eq(cur, &prev, &i); 5664 if (error) 5665 return error; 5666 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5667 5668 error = xfs_bmbt_update(cur, got); 5669 if (error) 5670 return error; 5671 } else { 5672 *logflags |= XFS_ILOG_DEXT; 5673 } 5674 5675 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, 5676 got); 5677 5678 /* update reverse mapping */ 5679 error = xfs_rmap_unmap_extent(tp, ip, whichfork, &prev); 5680 if (error) 5681 return error; 5682 return xfs_rmap_map_extent(tp, ip, whichfork, got); 5683 } 5684 5685 int 5686 xfs_bmap_collapse_extents( 5687 struct xfs_trans *tp, 5688 struct xfs_inode *ip, 5689 xfs_fileoff_t *next_fsb, 5690 xfs_fileoff_t offset_shift_fsb, 5691 bool *done) 5692 { 5693 int whichfork = XFS_DATA_FORK; 5694 struct xfs_mount *mp = ip->i_mount; 5695 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 5696 struct xfs_btree_cur *cur = NULL; 5697 struct xfs_bmbt_irec got, prev; 5698 struct xfs_iext_cursor icur; 5699 xfs_fileoff_t new_startoff; 5700 int error = 0; 5701 int logflags = 0; 5702 5703 if (unlikely(XFS_TEST_ERROR( 5704 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5705 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 5706 mp, XFS_ERRTAG_BMAPIFORMAT))) { 5707 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 5708 return -EFSCORRUPTED; 5709 } 5710 5711 if (XFS_FORCED_SHUTDOWN(mp)) 5712 return -EIO; 5713 5714 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL)); 5715 5716 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 5717 error = xfs_iread_extents(tp, ip, whichfork); 5718 if (error) 5719 return error; 5720 } 5721 5722 if (ifp->if_flags & XFS_IFBROOT) { 5723 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5724 cur->bc_private.b.flags = 0; 5725 } 5726 5727 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) { 5728 *done = true; 5729 goto del_cursor; 5730 } 5731 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock), 5732 del_cursor); 5733 5734 new_startoff = got.br_startoff - offset_shift_fsb; 5735 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) { 5736 if (new_startoff < prev.br_startoff + prev.br_blockcount) { 5737 error = -EINVAL; 5738 goto del_cursor; 5739 } 5740 5741 if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) { 5742 error = xfs_bmse_merge(tp, ip, whichfork, 5743 offset_shift_fsb, &icur, &got, &prev, 5744 cur, &logflags); 5745 if (error) 5746 goto del_cursor; 5747 goto done; 5748 } 5749 } else { 5750 if (got.br_startoff < offset_shift_fsb) { 5751 error = -EINVAL; 5752 goto del_cursor; 5753 } 5754 } 5755 5756 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got, 5757 cur, &logflags, new_startoff); 5758 if (error) 5759 goto del_cursor; 5760 5761 done: 5762 if (!xfs_iext_next_extent(ifp, &icur, &got)) { 5763 *done = true; 5764 goto del_cursor; 5765 } 5766 5767 *next_fsb = got.br_startoff; 5768 del_cursor: 5769 if (cur) 5770 xfs_btree_del_cursor(cur, error); 5771 if (logflags) 5772 xfs_trans_log_inode(tp, ip, logflags); 5773 return error; 5774 } 5775 5776 /* Make sure we won't be right-shifting an extent past the maximum bound. */ 5777 int 5778 xfs_bmap_can_insert_extents( 5779 struct xfs_inode *ip, 5780 xfs_fileoff_t off, 5781 xfs_fileoff_t shift) 5782 { 5783 struct xfs_bmbt_irec got; 5784 int is_empty; 5785 int error = 0; 5786 5787 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5788 5789 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 5790 return -EIO; 5791 5792 xfs_ilock(ip, XFS_ILOCK_EXCL); 5793 error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty); 5794 if (!error && !is_empty && got.br_startoff >= off && 5795 ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff) 5796 error = -EINVAL; 5797 xfs_iunlock(ip, XFS_ILOCK_EXCL); 5798 5799 return error; 5800 } 5801 5802 int 5803 xfs_bmap_insert_extents( 5804 struct xfs_trans *tp, 5805 struct xfs_inode *ip, 5806 xfs_fileoff_t *next_fsb, 5807 xfs_fileoff_t offset_shift_fsb, 5808 bool *done, 5809 xfs_fileoff_t stop_fsb) 5810 { 5811 int whichfork = XFS_DATA_FORK; 5812 struct xfs_mount *mp = ip->i_mount; 5813 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 5814 struct xfs_btree_cur *cur = NULL; 5815 struct xfs_bmbt_irec got, next; 5816 struct xfs_iext_cursor icur; 5817 xfs_fileoff_t new_startoff; 5818 int error = 0; 5819 int logflags = 0; 5820 5821 if (unlikely(XFS_TEST_ERROR( 5822 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5823 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 5824 mp, XFS_ERRTAG_BMAPIFORMAT))) { 5825 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 5826 return -EFSCORRUPTED; 5827 } 5828 5829 if (XFS_FORCED_SHUTDOWN(mp)) 5830 return -EIO; 5831 5832 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL)); 5833 5834 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 5835 error = xfs_iread_extents(tp, ip, whichfork); 5836 if (error) 5837 return error; 5838 } 5839 5840 if (ifp->if_flags & XFS_IFBROOT) { 5841 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5842 cur->bc_private.b.flags = 0; 5843 } 5844 5845 if (*next_fsb == NULLFSBLOCK) { 5846 xfs_iext_last(ifp, &icur); 5847 if (!xfs_iext_get_extent(ifp, &icur, &got) || 5848 stop_fsb > got.br_startoff) { 5849 *done = true; 5850 goto del_cursor; 5851 } 5852 } else { 5853 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) { 5854 *done = true; 5855 goto del_cursor; 5856 } 5857 } 5858 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock), 5859 del_cursor); 5860 5861 if (stop_fsb >= got.br_startoff + got.br_blockcount) { 5862 error = -EIO; 5863 goto del_cursor; 5864 } 5865 5866 new_startoff = got.br_startoff + offset_shift_fsb; 5867 if (xfs_iext_peek_next_extent(ifp, &icur, &next)) { 5868 if (new_startoff + got.br_blockcount > next.br_startoff) { 5869 error = -EINVAL; 5870 goto del_cursor; 5871 } 5872 5873 /* 5874 * Unlike a left shift (which involves a hole punch), a right 5875 * shift does not modify extent neighbors in any way. We should 5876 * never find mergeable extents in this scenario. Check anyways 5877 * and warn if we encounter two extents that could be one. 5878 */ 5879 if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb)) 5880 WARN_ON_ONCE(1); 5881 } 5882 5883 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got, 5884 cur, &logflags, new_startoff); 5885 if (error) 5886 goto del_cursor; 5887 5888 if (!xfs_iext_prev_extent(ifp, &icur, &got) || 5889 stop_fsb >= got.br_startoff + got.br_blockcount) { 5890 *done = true; 5891 goto del_cursor; 5892 } 5893 5894 *next_fsb = got.br_startoff; 5895 del_cursor: 5896 if (cur) 5897 xfs_btree_del_cursor(cur, error); 5898 if (logflags) 5899 xfs_trans_log_inode(tp, ip, logflags); 5900 return error; 5901 } 5902 5903 /* 5904 * Splits an extent into two extents at split_fsb block such that it is the 5905 * first block of the current_ext. @ext is a target extent to be split. 5906 * @split_fsb is a block where the extents is split. If split_fsb lies in a 5907 * hole or the first block of extents, just return 0. 5908 */ 5909 STATIC int 5910 xfs_bmap_split_extent_at( 5911 struct xfs_trans *tp, 5912 struct xfs_inode *ip, 5913 xfs_fileoff_t split_fsb) 5914 { 5915 int whichfork = XFS_DATA_FORK; 5916 struct xfs_btree_cur *cur = NULL; 5917 struct xfs_bmbt_irec got; 5918 struct xfs_bmbt_irec new; /* split extent */ 5919 struct xfs_mount *mp = ip->i_mount; 5920 struct xfs_ifork *ifp; 5921 xfs_fsblock_t gotblkcnt; /* new block count for got */ 5922 struct xfs_iext_cursor icur; 5923 int error = 0; 5924 int logflags = 0; 5925 int i = 0; 5926 5927 if (unlikely(XFS_TEST_ERROR( 5928 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5929 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 5930 mp, XFS_ERRTAG_BMAPIFORMAT))) { 5931 XFS_ERROR_REPORT("xfs_bmap_split_extent_at", 5932 XFS_ERRLEVEL_LOW, mp); 5933 return -EFSCORRUPTED; 5934 } 5935 5936 if (XFS_FORCED_SHUTDOWN(mp)) 5937 return -EIO; 5938 5939 ifp = XFS_IFORK_PTR(ip, whichfork); 5940 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 5941 /* Read in all the extents */ 5942 error = xfs_iread_extents(tp, ip, whichfork); 5943 if (error) 5944 return error; 5945 } 5946 5947 /* 5948 * If there are not extents, or split_fsb lies in a hole we are done. 5949 */ 5950 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) || 5951 got.br_startoff >= split_fsb) 5952 return 0; 5953 5954 gotblkcnt = split_fsb - got.br_startoff; 5955 new.br_startoff = split_fsb; 5956 new.br_startblock = got.br_startblock + gotblkcnt; 5957 new.br_blockcount = got.br_blockcount - gotblkcnt; 5958 new.br_state = got.br_state; 5959 5960 if (ifp->if_flags & XFS_IFBROOT) { 5961 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5962 cur->bc_private.b.flags = 0; 5963 error = xfs_bmbt_lookup_eq(cur, &got, &i); 5964 if (error) 5965 goto del_cursor; 5966 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor); 5967 } 5968 5969 got.br_blockcount = gotblkcnt; 5970 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur, 5971 &got); 5972 5973 logflags = XFS_ILOG_CORE; 5974 if (cur) { 5975 error = xfs_bmbt_update(cur, &got); 5976 if (error) 5977 goto del_cursor; 5978 } else 5979 logflags |= XFS_ILOG_DEXT; 5980 5981 /* Add new extent */ 5982 xfs_iext_next(ifp, &icur); 5983 xfs_iext_insert(ip, &icur, &new, 0); 5984 XFS_IFORK_NEXT_SET(ip, whichfork, 5985 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 5986 5987 if (cur) { 5988 error = xfs_bmbt_lookup_eq(cur, &new, &i); 5989 if (error) 5990 goto del_cursor; 5991 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor); 5992 error = xfs_btree_insert(cur, &i); 5993 if (error) 5994 goto del_cursor; 5995 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor); 5996 } 5997 5998 /* 5999 * Convert to a btree if necessary. 6000 */ 6001 if (xfs_bmap_needs_btree(ip, whichfork)) { 6002 int tmp_logflags; /* partial log flag return val */ 6003 6004 ASSERT(cur == NULL); 6005 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 6006 &tmp_logflags, whichfork); 6007 logflags |= tmp_logflags; 6008 } 6009 6010 del_cursor: 6011 if (cur) { 6012 cur->bc_private.b.allocated = 0; 6013 xfs_btree_del_cursor(cur, error); 6014 } 6015 6016 if (logflags) 6017 xfs_trans_log_inode(tp, ip, logflags); 6018 return error; 6019 } 6020 6021 int 6022 xfs_bmap_split_extent( 6023 struct xfs_inode *ip, 6024 xfs_fileoff_t split_fsb) 6025 { 6026 struct xfs_mount *mp = ip->i_mount; 6027 struct xfs_trans *tp; 6028 int error; 6029 6030 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 6031 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp); 6032 if (error) 6033 return error; 6034 6035 xfs_ilock(ip, XFS_ILOCK_EXCL); 6036 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 6037 6038 error = xfs_bmap_split_extent_at(tp, ip, split_fsb); 6039 if (error) 6040 goto out; 6041 6042 return xfs_trans_commit(tp); 6043 6044 out: 6045 xfs_trans_cancel(tp); 6046 return error; 6047 } 6048 6049 /* Deferred mapping is only for real extents in the data fork. */ 6050 static bool 6051 xfs_bmap_is_update_needed( 6052 struct xfs_bmbt_irec *bmap) 6053 { 6054 return bmap->br_startblock != HOLESTARTBLOCK && 6055 bmap->br_startblock != DELAYSTARTBLOCK; 6056 } 6057 6058 /* Record a bmap intent. */ 6059 static int 6060 __xfs_bmap_add( 6061 struct xfs_trans *tp, 6062 enum xfs_bmap_intent_type type, 6063 struct xfs_inode *ip, 6064 int whichfork, 6065 struct xfs_bmbt_irec *bmap) 6066 { 6067 struct xfs_bmap_intent *bi; 6068 6069 trace_xfs_bmap_defer(tp->t_mountp, 6070 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock), 6071 type, 6072 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock), 6073 ip->i_ino, whichfork, 6074 bmap->br_startoff, 6075 bmap->br_blockcount, 6076 bmap->br_state); 6077 6078 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS); 6079 INIT_LIST_HEAD(&bi->bi_list); 6080 bi->bi_type = type; 6081 bi->bi_owner = ip; 6082 bi->bi_whichfork = whichfork; 6083 bi->bi_bmap = *bmap; 6084 6085 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list); 6086 return 0; 6087 } 6088 6089 /* Map an extent into a file. */ 6090 int 6091 xfs_bmap_map_extent( 6092 struct xfs_trans *tp, 6093 struct xfs_inode *ip, 6094 struct xfs_bmbt_irec *PREV) 6095 { 6096 if (!xfs_bmap_is_update_needed(PREV)) 6097 return 0; 6098 6099 return __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV); 6100 } 6101 6102 /* Unmap an extent out of a file. */ 6103 int 6104 xfs_bmap_unmap_extent( 6105 struct xfs_trans *tp, 6106 struct xfs_inode *ip, 6107 struct xfs_bmbt_irec *PREV) 6108 { 6109 if (!xfs_bmap_is_update_needed(PREV)) 6110 return 0; 6111 6112 return __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV); 6113 } 6114 6115 /* 6116 * Process one of the deferred bmap operations. We pass back the 6117 * btree cursor to maintain our lock on the bmapbt between calls. 6118 */ 6119 int 6120 xfs_bmap_finish_one( 6121 struct xfs_trans *tp, 6122 struct xfs_inode *ip, 6123 enum xfs_bmap_intent_type type, 6124 int whichfork, 6125 xfs_fileoff_t startoff, 6126 xfs_fsblock_t startblock, 6127 xfs_filblks_t *blockcount, 6128 xfs_exntst_t state) 6129 { 6130 int error = 0; 6131 6132 ASSERT(tp->t_firstblock == NULLFSBLOCK); 6133 6134 trace_xfs_bmap_deferred(tp->t_mountp, 6135 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type, 6136 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock), 6137 ip->i_ino, whichfork, startoff, *blockcount, state); 6138 6139 if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK)) 6140 return -EFSCORRUPTED; 6141 6142 if (XFS_TEST_ERROR(false, tp->t_mountp, 6143 XFS_ERRTAG_BMAP_FINISH_ONE)) 6144 return -EIO; 6145 6146 switch (type) { 6147 case XFS_BMAP_MAP: 6148 error = xfs_bmapi_remap(tp, ip, startoff, *blockcount, 6149 startblock, 0); 6150 *blockcount = 0; 6151 break; 6152 case XFS_BMAP_UNMAP: 6153 error = __xfs_bunmapi(tp, ip, startoff, blockcount, 6154 XFS_BMAPI_REMAP, 1); 6155 break; 6156 default: 6157 ASSERT(0); 6158 error = -EFSCORRUPTED; 6159 } 6160 6161 return error; 6162 } 6163 6164 /* Check that an inode's extent does not have invalid flags or bad ranges. */ 6165 xfs_failaddr_t 6166 xfs_bmap_validate_extent( 6167 struct xfs_inode *ip, 6168 int whichfork, 6169 struct xfs_bmbt_irec *irec) 6170 { 6171 struct xfs_mount *mp = ip->i_mount; 6172 xfs_fsblock_t endfsb; 6173 bool isrt; 6174 6175 isrt = XFS_IS_REALTIME_INODE(ip); 6176 endfsb = irec->br_startblock + irec->br_blockcount - 1; 6177 if (isrt) { 6178 if (!xfs_verify_rtbno(mp, irec->br_startblock)) 6179 return __this_address; 6180 if (!xfs_verify_rtbno(mp, endfsb)) 6181 return __this_address; 6182 } else { 6183 if (!xfs_verify_fsbno(mp, irec->br_startblock)) 6184 return __this_address; 6185 if (!xfs_verify_fsbno(mp, endfsb)) 6186 return __this_address; 6187 if (XFS_FSB_TO_AGNO(mp, irec->br_startblock) != 6188 XFS_FSB_TO_AGNO(mp, endfsb)) 6189 return __this_address; 6190 } 6191 if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK) 6192 return __this_address; 6193 return NULL; 6194 } 6195