1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_sb.h" 14 #include "xfs_mount.h" 15 #include "xfs_defer.h" 16 #include "xfs_dir2.h" 17 #include "xfs_inode.h" 18 #include "xfs_btree.h" 19 #include "xfs_trans.h" 20 #include "xfs_alloc.h" 21 #include "xfs_bmap.h" 22 #include "xfs_bmap_util.h" 23 #include "xfs_bmap_btree.h" 24 #include "xfs_rtalloc.h" 25 #include "xfs_errortag.h" 26 #include "xfs_error.h" 27 #include "xfs_quota.h" 28 #include "xfs_trans_space.h" 29 #include "xfs_buf_item.h" 30 #include "xfs_trace.h" 31 #include "xfs_attr_leaf.h" 32 #include "xfs_filestream.h" 33 #include "xfs_rmap.h" 34 #include "xfs_ag_resv.h" 35 #include "xfs_refcount.h" 36 #include "xfs_icache.h" 37 38 39 kmem_zone_t *xfs_bmap_free_item_zone; 40 41 /* 42 * Miscellaneous helper functions 43 */ 44 45 /* 46 * Compute and fill in the value of the maximum depth of a bmap btree 47 * in this filesystem. Done once, during mount. 48 */ 49 void 50 xfs_bmap_compute_maxlevels( 51 xfs_mount_t *mp, /* file system mount structure */ 52 int whichfork) /* data or attr fork */ 53 { 54 int level; /* btree level */ 55 uint maxblocks; /* max blocks at this level */ 56 uint maxleafents; /* max leaf entries possible */ 57 int maxrootrecs; /* max records in root block */ 58 int minleafrecs; /* min records in leaf block */ 59 int minnoderecs; /* min records in node block */ 60 int sz; /* root block size */ 61 62 /* 63 * The maximum number of extents in a file, hence the maximum 64 * number of leaf entries, is controlled by the type of di_nextents 65 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents 66 * (a signed 16-bit number, xfs_aextnum_t). 67 * 68 * Note that we can no longer assume that if we are in ATTR1 that 69 * the fork offset of all the inodes will be 70 * (xfs_default_attroffset(ip) >> 3) because we could have mounted 71 * with ATTR2 and then mounted back with ATTR1, keeping the 72 * di_forkoff's fixed but probably at various positions. Therefore, 73 * for both ATTR1 and ATTR2 we have to assume the worst case scenario 74 * of a minimum size available. 75 */ 76 if (whichfork == XFS_DATA_FORK) { 77 maxleafents = MAXEXTNUM; 78 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS); 79 } else { 80 maxleafents = MAXAEXTNUM; 81 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS); 82 } 83 maxrootrecs = xfs_bmdr_maxrecs(sz, 0); 84 minleafrecs = mp->m_bmap_dmnr[0]; 85 minnoderecs = mp->m_bmap_dmnr[1]; 86 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; 87 for (level = 1; maxblocks > 1; level++) { 88 if (maxblocks <= maxrootrecs) 89 maxblocks = 1; 90 else 91 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs; 92 } 93 mp->m_bm_maxlevels[whichfork] = level; 94 } 95 96 STATIC int /* error */ 97 xfs_bmbt_lookup_eq( 98 struct xfs_btree_cur *cur, 99 struct xfs_bmbt_irec *irec, 100 int *stat) /* success/failure */ 101 { 102 cur->bc_rec.b = *irec; 103 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); 104 } 105 106 STATIC int /* error */ 107 xfs_bmbt_lookup_first( 108 struct xfs_btree_cur *cur, 109 int *stat) /* success/failure */ 110 { 111 cur->bc_rec.b.br_startoff = 0; 112 cur->bc_rec.b.br_startblock = 0; 113 cur->bc_rec.b.br_blockcount = 0; 114 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); 115 } 116 117 /* 118 * Check if the inode needs to be converted to btree format. 119 */ 120 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork) 121 { 122 return whichfork != XFS_COW_FORK && 123 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && 124 XFS_IFORK_NEXTENTS(ip, whichfork) > 125 XFS_IFORK_MAXEXT(ip, whichfork); 126 } 127 128 /* 129 * Check if the inode should be converted to extent format. 130 */ 131 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork) 132 { 133 return whichfork != XFS_COW_FORK && 134 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE && 135 XFS_IFORK_NEXTENTS(ip, whichfork) <= 136 XFS_IFORK_MAXEXT(ip, whichfork); 137 } 138 139 /* 140 * Update the record referred to by cur to the value given by irec 141 * This either works (return 0) or gets an EFSCORRUPTED error. 142 */ 143 STATIC int 144 xfs_bmbt_update( 145 struct xfs_btree_cur *cur, 146 struct xfs_bmbt_irec *irec) 147 { 148 union xfs_btree_rec rec; 149 150 xfs_bmbt_disk_set_all(&rec.bmbt, irec); 151 return xfs_btree_update(cur, &rec); 152 } 153 154 /* 155 * Compute the worst-case number of indirect blocks that will be used 156 * for ip's delayed extent of length "len". 157 */ 158 STATIC xfs_filblks_t 159 xfs_bmap_worst_indlen( 160 xfs_inode_t *ip, /* incore inode pointer */ 161 xfs_filblks_t len) /* delayed extent length */ 162 { 163 int level; /* btree level number */ 164 int maxrecs; /* maximum record count at this level */ 165 xfs_mount_t *mp; /* mount structure */ 166 xfs_filblks_t rval; /* return value */ 167 168 mp = ip->i_mount; 169 maxrecs = mp->m_bmap_dmxr[0]; 170 for (level = 0, rval = 0; 171 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); 172 level++) { 173 len += maxrecs - 1; 174 do_div(len, maxrecs); 175 rval += len; 176 if (len == 1) 177 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - 178 level - 1; 179 if (level == 0) 180 maxrecs = mp->m_bmap_dmxr[1]; 181 } 182 return rval; 183 } 184 185 /* 186 * Calculate the default attribute fork offset for newly created inodes. 187 */ 188 uint 189 xfs_default_attroffset( 190 struct xfs_inode *ip) 191 { 192 struct xfs_mount *mp = ip->i_mount; 193 uint offset; 194 195 if (mp->m_sb.sb_inodesize == 256) { 196 offset = XFS_LITINO(mp, ip->i_d.di_version) - 197 XFS_BMDR_SPACE_CALC(MINABTPTRS); 198 } else { 199 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS); 200 } 201 202 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version)); 203 return offset; 204 } 205 206 /* 207 * Helper routine to reset inode di_forkoff field when switching 208 * attribute fork from local to extent format - we reset it where 209 * possible to make space available for inline data fork extents. 210 */ 211 STATIC void 212 xfs_bmap_forkoff_reset( 213 xfs_inode_t *ip, 214 int whichfork) 215 { 216 if (whichfork == XFS_ATTR_FORK && 217 ip->i_d.di_format != XFS_DINODE_FMT_DEV && 218 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) { 219 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3; 220 221 if (dfl_forkoff > ip->i_d.di_forkoff) 222 ip->i_d.di_forkoff = dfl_forkoff; 223 } 224 } 225 226 #ifdef DEBUG 227 STATIC struct xfs_buf * 228 xfs_bmap_get_bp( 229 struct xfs_btree_cur *cur, 230 xfs_fsblock_t bno) 231 { 232 struct xfs_log_item *lip; 233 int i; 234 235 if (!cur) 236 return NULL; 237 238 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) { 239 if (!cur->bc_bufs[i]) 240 break; 241 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno) 242 return cur->bc_bufs[i]; 243 } 244 245 /* Chase down all the log items to see if the bp is there */ 246 list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) { 247 struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip; 248 249 if (bip->bli_item.li_type == XFS_LI_BUF && 250 XFS_BUF_ADDR(bip->bli_buf) == bno) 251 return bip->bli_buf; 252 } 253 254 return NULL; 255 } 256 257 STATIC void 258 xfs_check_block( 259 struct xfs_btree_block *block, 260 xfs_mount_t *mp, 261 int root, 262 short sz) 263 { 264 int i, j, dmxr; 265 __be64 *pp, *thispa; /* pointer to block address */ 266 xfs_bmbt_key_t *prevp, *keyp; 267 268 ASSERT(be16_to_cpu(block->bb_level) > 0); 269 270 prevp = NULL; 271 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) { 272 dmxr = mp->m_bmap_dmxr[0]; 273 keyp = XFS_BMBT_KEY_ADDR(mp, block, i); 274 275 if (prevp) { 276 ASSERT(be64_to_cpu(prevp->br_startoff) < 277 be64_to_cpu(keyp->br_startoff)); 278 } 279 prevp = keyp; 280 281 /* 282 * Compare the block numbers to see if there are dups. 283 */ 284 if (root) 285 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz); 286 else 287 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr); 288 289 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) { 290 if (root) 291 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz); 292 else 293 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr); 294 if (*thispa == *pp) { 295 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld", 296 __func__, j, i, 297 (unsigned long long)be64_to_cpu(*thispa)); 298 xfs_err(mp, "%s: ptrs are equal in node\n", 299 __func__); 300 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 301 } 302 } 303 } 304 } 305 306 /* 307 * Check that the extents for the inode ip are in the right order in all 308 * btree leaves. THis becomes prohibitively expensive for large extent count 309 * files, so don't bother with inodes that have more than 10,000 extents in 310 * them. The btree record ordering checks will still be done, so for such large 311 * bmapbt constructs that is going to catch most corruptions. 312 */ 313 STATIC void 314 xfs_bmap_check_leaf_extents( 315 xfs_btree_cur_t *cur, /* btree cursor or null */ 316 xfs_inode_t *ip, /* incore inode pointer */ 317 int whichfork) /* data or attr fork */ 318 { 319 struct xfs_btree_block *block; /* current btree block */ 320 xfs_fsblock_t bno; /* block # of "block" */ 321 xfs_buf_t *bp; /* buffer for "block" */ 322 int error; /* error return value */ 323 xfs_extnum_t i=0, j; /* index into the extents list */ 324 struct xfs_ifork *ifp; /* fork structure */ 325 int level; /* btree level, for checking */ 326 xfs_mount_t *mp; /* file system mount structure */ 327 __be64 *pp; /* pointer to block address */ 328 xfs_bmbt_rec_t *ep; /* pointer to current extent */ 329 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */ 330 xfs_bmbt_rec_t *nextp; /* pointer to next extent */ 331 int bp_release = 0; 332 333 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) { 334 return; 335 } 336 337 /* skip large extent count inodes */ 338 if (ip->i_d.di_nextents > 10000) 339 return; 340 341 bno = NULLFSBLOCK; 342 mp = ip->i_mount; 343 ifp = XFS_IFORK_PTR(ip, whichfork); 344 block = ifp->if_broot; 345 /* 346 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 347 */ 348 level = be16_to_cpu(block->bb_level); 349 ASSERT(level > 0); 350 xfs_check_block(block, mp, 1, ifp->if_broot_bytes); 351 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 352 bno = be64_to_cpu(*pp); 353 354 ASSERT(bno != NULLFSBLOCK); 355 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 356 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 357 358 /* 359 * Go down the tree until leaf level is reached, following the first 360 * pointer (leftmost) at each level. 361 */ 362 while (level-- > 0) { 363 /* See if buf is in cur first */ 364 bp_release = 0; 365 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 366 if (!bp) { 367 bp_release = 1; 368 error = xfs_btree_read_bufl(mp, NULL, bno, &bp, 369 XFS_BMAP_BTREE_REF, 370 &xfs_bmbt_buf_ops); 371 if (error) 372 goto error_norelse; 373 } 374 block = XFS_BUF_TO_BLOCK(bp); 375 if (level == 0) 376 break; 377 378 /* 379 * Check this block for basic sanity (increasing keys and 380 * no duplicate blocks). 381 */ 382 383 xfs_check_block(block, mp, 0, 0); 384 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 385 bno = be64_to_cpu(*pp); 386 XFS_WANT_CORRUPTED_GOTO(mp, 387 xfs_verify_fsbno(mp, bno), error0); 388 if (bp_release) { 389 bp_release = 0; 390 xfs_trans_brelse(NULL, bp); 391 } 392 } 393 394 /* 395 * Here with bp and block set to the leftmost leaf node in the tree. 396 */ 397 i = 0; 398 399 /* 400 * Loop over all leaf nodes checking that all extents are in the right order. 401 */ 402 for (;;) { 403 xfs_fsblock_t nextbno; 404 xfs_extnum_t num_recs; 405 406 407 num_recs = xfs_btree_get_numrecs(block); 408 409 /* 410 * Read-ahead the next leaf block, if any. 411 */ 412 413 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 414 415 /* 416 * Check all the extents to make sure they are OK. 417 * If we had a previous block, the last entry should 418 * conform with the first entry in this one. 419 */ 420 421 ep = XFS_BMBT_REC_ADDR(mp, block, 1); 422 if (i) { 423 ASSERT(xfs_bmbt_disk_get_startoff(&last) + 424 xfs_bmbt_disk_get_blockcount(&last) <= 425 xfs_bmbt_disk_get_startoff(ep)); 426 } 427 for (j = 1; j < num_recs; j++) { 428 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1); 429 ASSERT(xfs_bmbt_disk_get_startoff(ep) + 430 xfs_bmbt_disk_get_blockcount(ep) <= 431 xfs_bmbt_disk_get_startoff(nextp)); 432 ep = nextp; 433 } 434 435 last = *ep; 436 i += num_recs; 437 if (bp_release) { 438 bp_release = 0; 439 xfs_trans_brelse(NULL, bp); 440 } 441 bno = nextbno; 442 /* 443 * If we've reached the end, stop. 444 */ 445 if (bno == NULLFSBLOCK) 446 break; 447 448 bp_release = 0; 449 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 450 if (!bp) { 451 bp_release = 1; 452 error = xfs_btree_read_bufl(mp, NULL, bno, &bp, 453 XFS_BMAP_BTREE_REF, 454 &xfs_bmbt_buf_ops); 455 if (error) 456 goto error_norelse; 457 } 458 block = XFS_BUF_TO_BLOCK(bp); 459 } 460 461 return; 462 463 error0: 464 xfs_warn(mp, "%s: at error0", __func__); 465 if (bp_release) 466 xfs_trans_brelse(NULL, bp); 467 error_norelse: 468 xfs_warn(mp, "%s: BAD after btree leaves for %d extents", 469 __func__, i); 470 xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__); 471 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 472 return; 473 } 474 475 /* 476 * Validate that the bmbt_irecs being returned from bmapi are valid 477 * given the caller's original parameters. Specifically check the 478 * ranges of the returned irecs to ensure that they only extend beyond 479 * the given parameters if the XFS_BMAPI_ENTIRE flag was set. 480 */ 481 STATIC void 482 xfs_bmap_validate_ret( 483 xfs_fileoff_t bno, 484 xfs_filblks_t len, 485 int flags, 486 xfs_bmbt_irec_t *mval, 487 int nmap, 488 int ret_nmap) 489 { 490 int i; /* index to map values */ 491 492 ASSERT(ret_nmap <= nmap); 493 494 for (i = 0; i < ret_nmap; i++) { 495 ASSERT(mval[i].br_blockcount > 0); 496 if (!(flags & XFS_BMAPI_ENTIRE)) { 497 ASSERT(mval[i].br_startoff >= bno); 498 ASSERT(mval[i].br_blockcount <= len); 499 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <= 500 bno + len); 501 } else { 502 ASSERT(mval[i].br_startoff < bno + len); 503 ASSERT(mval[i].br_startoff + mval[i].br_blockcount > 504 bno); 505 } 506 ASSERT(i == 0 || 507 mval[i - 1].br_startoff + mval[i - 1].br_blockcount == 508 mval[i].br_startoff); 509 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK && 510 mval[i].br_startblock != HOLESTARTBLOCK); 511 ASSERT(mval[i].br_state == XFS_EXT_NORM || 512 mval[i].br_state == XFS_EXT_UNWRITTEN); 513 } 514 } 515 516 #else 517 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0) 518 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0) 519 #endif /* DEBUG */ 520 521 /* 522 * bmap free list manipulation functions 523 */ 524 525 /* 526 * Add the extent to the list of extents to be free at transaction end. 527 * The list is maintained sorted (by block number). 528 */ 529 void 530 __xfs_bmap_add_free( 531 struct xfs_trans *tp, 532 xfs_fsblock_t bno, 533 xfs_filblks_t len, 534 const struct xfs_owner_info *oinfo, 535 bool skip_discard) 536 { 537 struct xfs_extent_free_item *new; /* new element */ 538 #ifdef DEBUG 539 struct xfs_mount *mp = tp->t_mountp; 540 xfs_agnumber_t agno; 541 xfs_agblock_t agbno; 542 543 ASSERT(bno != NULLFSBLOCK); 544 ASSERT(len > 0); 545 ASSERT(len <= MAXEXTLEN); 546 ASSERT(!isnullstartblock(bno)); 547 agno = XFS_FSB_TO_AGNO(mp, bno); 548 agbno = XFS_FSB_TO_AGBNO(mp, bno); 549 ASSERT(agno < mp->m_sb.sb_agcount); 550 ASSERT(agbno < mp->m_sb.sb_agblocks); 551 ASSERT(len < mp->m_sb.sb_agblocks); 552 ASSERT(agbno + len <= mp->m_sb.sb_agblocks); 553 #endif 554 ASSERT(xfs_bmap_free_item_zone != NULL); 555 556 new = kmem_zone_alloc(xfs_bmap_free_item_zone, 0); 557 new->xefi_startblock = bno; 558 new->xefi_blockcount = (xfs_extlen_t)len; 559 if (oinfo) 560 new->xefi_oinfo = *oinfo; 561 else 562 new->xefi_oinfo = XFS_RMAP_OINFO_SKIP_UPDATE; 563 new->xefi_skip_discard = skip_discard; 564 trace_xfs_bmap_free_defer(tp->t_mountp, 565 XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0, 566 XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len); 567 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list); 568 } 569 570 /* 571 * Inode fork format manipulation functions 572 */ 573 574 /* 575 * Convert the inode format to extent format if it currently is in btree format, 576 * but the extent list is small enough that it fits into the extent format. 577 * 578 * Since the extents are already in-core, all we have to do is give up the space 579 * for the btree root and pitch the leaf block. 580 */ 581 STATIC int /* error */ 582 xfs_bmap_btree_to_extents( 583 struct xfs_trans *tp, /* transaction pointer */ 584 struct xfs_inode *ip, /* incore inode pointer */ 585 struct xfs_btree_cur *cur, /* btree cursor */ 586 int *logflagsp, /* inode logging flags */ 587 int whichfork) /* data or attr fork */ 588 { 589 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 590 struct xfs_mount *mp = ip->i_mount; 591 struct xfs_btree_block *rblock = ifp->if_broot; 592 struct xfs_btree_block *cblock;/* child btree block */ 593 xfs_fsblock_t cbno; /* child block number */ 594 xfs_buf_t *cbp; /* child block's buffer */ 595 int error; /* error return value */ 596 __be64 *pp; /* ptr to block address */ 597 struct xfs_owner_info oinfo; 598 599 /* check if we actually need the extent format first: */ 600 if (!xfs_bmap_wants_extents(ip, whichfork)) 601 return 0; 602 603 ASSERT(cur); 604 ASSERT(whichfork != XFS_COW_FORK); 605 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 606 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); 607 ASSERT(be16_to_cpu(rblock->bb_level) == 1); 608 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1); 609 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1); 610 611 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes); 612 cbno = be64_to_cpu(*pp); 613 #ifdef DEBUG 614 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, 615 xfs_btree_check_lptr(cur, cbno, 1)); 616 #endif 617 error = xfs_btree_read_bufl(mp, tp, cbno, &cbp, XFS_BMAP_BTREE_REF, 618 &xfs_bmbt_buf_ops); 619 if (error) 620 return error; 621 cblock = XFS_BUF_TO_BLOCK(cbp); 622 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp))) 623 return error; 624 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); 625 xfs_bmap_add_free(cur->bc_tp, cbno, 1, &oinfo); 626 ip->i_d.di_nblocks--; 627 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 628 xfs_trans_binval(tp, cbp); 629 if (cur->bc_bufs[0] == cbp) 630 cur->bc_bufs[0] = NULL; 631 xfs_iroot_realloc(ip, -1, whichfork); 632 ASSERT(ifp->if_broot == NULL); 633 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0); 634 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 635 *logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 636 return 0; 637 } 638 639 /* 640 * Convert an extents-format file into a btree-format file. 641 * The new file will have a root block (in the inode) and a single child block. 642 */ 643 STATIC int /* error */ 644 xfs_bmap_extents_to_btree( 645 struct xfs_trans *tp, /* transaction pointer */ 646 struct xfs_inode *ip, /* incore inode pointer */ 647 struct xfs_btree_cur **curp, /* cursor returned to caller */ 648 int wasdel, /* converting a delayed alloc */ 649 int *logflagsp, /* inode logging flags */ 650 int whichfork) /* data or attr fork */ 651 { 652 struct xfs_btree_block *ablock; /* allocated (child) bt block */ 653 struct xfs_buf *abp; /* buffer for ablock */ 654 struct xfs_alloc_arg args; /* allocation arguments */ 655 struct xfs_bmbt_rec *arp; /* child record pointer */ 656 struct xfs_btree_block *block; /* btree root block */ 657 struct xfs_btree_cur *cur; /* bmap btree cursor */ 658 int error; /* error return value */ 659 struct xfs_ifork *ifp; /* inode fork pointer */ 660 struct xfs_bmbt_key *kp; /* root block key pointer */ 661 struct xfs_mount *mp; /* mount structure */ 662 xfs_bmbt_ptr_t *pp; /* root block address pointer */ 663 struct xfs_iext_cursor icur; 664 struct xfs_bmbt_irec rec; 665 xfs_extnum_t cnt = 0; 666 667 mp = ip->i_mount; 668 ASSERT(whichfork != XFS_COW_FORK); 669 ifp = XFS_IFORK_PTR(ip, whichfork); 670 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS); 671 672 /* 673 * Make space in the inode incore. This needs to be undone if we fail 674 * to expand the root. 675 */ 676 xfs_iroot_realloc(ip, 1, whichfork); 677 ifp->if_flags |= XFS_IFBROOT; 678 679 /* 680 * Fill in the root. 681 */ 682 block = ifp->if_broot; 683 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL, 684 XFS_BTNUM_BMAP, 1, 1, ip->i_ino, 685 XFS_BTREE_LONG_PTRS); 686 /* 687 * Need a cursor. Can't allocate until bb_level is filled in. 688 */ 689 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 690 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; 691 /* 692 * Convert to a btree with two levels, one record in root. 693 */ 694 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE); 695 memset(&args, 0, sizeof(args)); 696 args.tp = tp; 697 args.mp = mp; 698 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork); 699 if (tp->t_firstblock == NULLFSBLOCK) { 700 args.type = XFS_ALLOCTYPE_START_BNO; 701 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino); 702 } else if (tp->t_flags & XFS_TRANS_LOWMODE) { 703 args.type = XFS_ALLOCTYPE_START_BNO; 704 args.fsbno = tp->t_firstblock; 705 } else { 706 args.type = XFS_ALLOCTYPE_NEAR_BNO; 707 args.fsbno = tp->t_firstblock; 708 } 709 args.minlen = args.maxlen = args.prod = 1; 710 args.wasdel = wasdel; 711 *logflagsp = 0; 712 error = xfs_alloc_vextent(&args); 713 if (error) 714 goto out_root_realloc; 715 716 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { 717 error = -ENOSPC; 718 goto out_root_realloc; 719 } 720 721 /* 722 * Allocation can't fail, the space was reserved. 723 */ 724 ASSERT(tp->t_firstblock == NULLFSBLOCK || 725 args.agno >= XFS_FSB_TO_AGNO(mp, tp->t_firstblock)); 726 tp->t_firstblock = args.fsbno; 727 cur->bc_private.b.allocated++; 728 ip->i_d.di_nblocks++; 729 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); 730 abp = xfs_btree_get_bufl(mp, tp, args.fsbno); 731 if (!abp) { 732 error = -EFSCORRUPTED; 733 goto out_unreserve_dquot; 734 } 735 736 /* 737 * Fill in the child block. 738 */ 739 abp->b_ops = &xfs_bmbt_buf_ops; 740 ablock = XFS_BUF_TO_BLOCK(abp); 741 xfs_btree_init_block_int(mp, ablock, abp->b_bn, 742 XFS_BTNUM_BMAP, 0, 0, ip->i_ino, 743 XFS_BTREE_LONG_PTRS); 744 745 for_each_xfs_iext(ifp, &icur, &rec) { 746 if (isnullstartblock(rec.br_startblock)) 747 continue; 748 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt); 749 xfs_bmbt_disk_set_all(arp, &rec); 750 cnt++; 751 } 752 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork)); 753 xfs_btree_set_numrecs(ablock, cnt); 754 755 /* 756 * Fill in the root key and pointer. 757 */ 758 kp = XFS_BMBT_KEY_ADDR(mp, block, 1); 759 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); 760 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp)); 761 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur, 762 be16_to_cpu(block->bb_level))); 763 *pp = cpu_to_be64(args.fsbno); 764 765 /* 766 * Do all this logging at the end so that 767 * the root is at the right level. 768 */ 769 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS); 770 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); 771 ASSERT(*curp == NULL); 772 *curp = cur; 773 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork); 774 return 0; 775 776 out_unreserve_dquot: 777 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 778 out_root_realloc: 779 xfs_iroot_realloc(ip, -1, whichfork); 780 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 781 ASSERT(ifp->if_broot == NULL); 782 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 783 784 return error; 785 } 786 787 /* 788 * Convert a local file to an extents file. 789 * This code is out of bounds for data forks of regular files, 790 * since the file data needs to get logged so things will stay consistent. 791 * (The bmap-level manipulations are ok, though). 792 */ 793 void 794 xfs_bmap_local_to_extents_empty( 795 struct xfs_inode *ip, 796 int whichfork) 797 { 798 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 799 800 ASSERT(whichfork != XFS_COW_FORK); 801 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 802 ASSERT(ifp->if_bytes == 0); 803 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0); 804 805 xfs_bmap_forkoff_reset(ip, whichfork); 806 ifp->if_flags &= ~XFS_IFINLINE; 807 ifp->if_flags |= XFS_IFEXTENTS; 808 ifp->if_u1.if_root = NULL; 809 ifp->if_height = 0; 810 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 811 } 812 813 814 STATIC int /* error */ 815 xfs_bmap_local_to_extents( 816 xfs_trans_t *tp, /* transaction pointer */ 817 xfs_inode_t *ip, /* incore inode pointer */ 818 xfs_extlen_t total, /* total blocks needed by transaction */ 819 int *logflagsp, /* inode logging flags */ 820 int whichfork, 821 void (*init_fn)(struct xfs_trans *tp, 822 struct xfs_buf *bp, 823 struct xfs_inode *ip, 824 struct xfs_ifork *ifp)) 825 { 826 int error = 0; 827 int flags; /* logging flags returned */ 828 struct xfs_ifork *ifp; /* inode fork pointer */ 829 xfs_alloc_arg_t args; /* allocation arguments */ 830 xfs_buf_t *bp; /* buffer for extent block */ 831 struct xfs_bmbt_irec rec; 832 struct xfs_iext_cursor icur; 833 834 /* 835 * We don't want to deal with the case of keeping inode data inline yet. 836 * So sending the data fork of a regular inode is invalid. 837 */ 838 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK)); 839 ifp = XFS_IFORK_PTR(ip, whichfork); 840 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 841 842 if (!ifp->if_bytes) { 843 xfs_bmap_local_to_extents_empty(ip, whichfork); 844 flags = XFS_ILOG_CORE; 845 goto done; 846 } 847 848 flags = 0; 849 error = 0; 850 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS)) == XFS_IFINLINE); 851 memset(&args, 0, sizeof(args)); 852 args.tp = tp; 853 args.mp = ip->i_mount; 854 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0); 855 /* 856 * Allocate a block. We know we need only one, since the 857 * file currently fits in an inode. 858 */ 859 if (tp->t_firstblock == NULLFSBLOCK) { 860 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino); 861 args.type = XFS_ALLOCTYPE_START_BNO; 862 } else { 863 args.fsbno = tp->t_firstblock; 864 args.type = XFS_ALLOCTYPE_NEAR_BNO; 865 } 866 args.total = total; 867 args.minlen = args.maxlen = args.prod = 1; 868 error = xfs_alloc_vextent(&args); 869 if (error) 870 goto done; 871 872 /* Can't fail, the space was reserved. */ 873 ASSERT(args.fsbno != NULLFSBLOCK); 874 ASSERT(args.len == 1); 875 tp->t_firstblock = args.fsbno; 876 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno); 877 878 /* 879 * Initialize the block, copy the data and log the remote buffer. 880 * 881 * The callout is responsible for logging because the remote format 882 * might differ from the local format and thus we don't know how much to 883 * log here. Note that init_fn must also set the buffer log item type 884 * correctly. 885 */ 886 init_fn(tp, bp, ip, ifp); 887 888 /* account for the change in fork size */ 889 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); 890 xfs_bmap_local_to_extents_empty(ip, whichfork); 891 flags |= XFS_ILOG_CORE; 892 893 ifp->if_u1.if_root = NULL; 894 ifp->if_height = 0; 895 896 rec.br_startoff = 0; 897 rec.br_startblock = args.fsbno; 898 rec.br_blockcount = 1; 899 rec.br_state = XFS_EXT_NORM; 900 xfs_iext_first(ifp, &icur); 901 xfs_iext_insert(ip, &icur, &rec, 0); 902 903 XFS_IFORK_NEXT_SET(ip, whichfork, 1); 904 ip->i_d.di_nblocks = 1; 905 xfs_trans_mod_dquot_byino(tp, ip, 906 XFS_TRANS_DQ_BCOUNT, 1L); 907 flags |= xfs_ilog_fext(whichfork); 908 909 done: 910 *logflagsp = flags; 911 return error; 912 } 913 914 /* 915 * Called from xfs_bmap_add_attrfork to handle btree format files. 916 */ 917 STATIC int /* error */ 918 xfs_bmap_add_attrfork_btree( 919 xfs_trans_t *tp, /* transaction pointer */ 920 xfs_inode_t *ip, /* incore inode pointer */ 921 int *flags) /* inode logging flags */ 922 { 923 xfs_btree_cur_t *cur; /* btree cursor */ 924 int error; /* error return value */ 925 xfs_mount_t *mp; /* file system mount struct */ 926 int stat; /* newroot status */ 927 928 mp = ip->i_mount; 929 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip)) 930 *flags |= XFS_ILOG_DBROOT; 931 else { 932 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); 933 error = xfs_bmbt_lookup_first(cur, &stat); 934 if (error) 935 goto error0; 936 /* must be at least one entry */ 937 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0); 938 if ((error = xfs_btree_new_iroot(cur, flags, &stat))) 939 goto error0; 940 if (stat == 0) { 941 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 942 return -ENOSPC; 943 } 944 cur->bc_private.b.allocated = 0; 945 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 946 } 947 return 0; 948 error0: 949 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 950 return error; 951 } 952 953 /* 954 * Called from xfs_bmap_add_attrfork to handle extents format files. 955 */ 956 STATIC int /* error */ 957 xfs_bmap_add_attrfork_extents( 958 struct xfs_trans *tp, /* transaction pointer */ 959 struct xfs_inode *ip, /* incore inode pointer */ 960 int *flags) /* inode logging flags */ 961 { 962 xfs_btree_cur_t *cur; /* bmap btree cursor */ 963 int error; /* error return value */ 964 965 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip)) 966 return 0; 967 cur = NULL; 968 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags, 969 XFS_DATA_FORK); 970 if (cur) { 971 cur->bc_private.b.allocated = 0; 972 xfs_btree_del_cursor(cur, error); 973 } 974 return error; 975 } 976 977 /* 978 * Called from xfs_bmap_add_attrfork to handle local format files. Each 979 * different data fork content type needs a different callout to do the 980 * conversion. Some are basic and only require special block initialisation 981 * callouts for the data formating, others (directories) are so specialised they 982 * handle everything themselves. 983 * 984 * XXX (dgc): investigate whether directory conversion can use the generic 985 * formatting callout. It should be possible - it's just a very complex 986 * formatter. 987 */ 988 STATIC int /* error */ 989 xfs_bmap_add_attrfork_local( 990 struct xfs_trans *tp, /* transaction pointer */ 991 struct xfs_inode *ip, /* incore inode pointer */ 992 int *flags) /* inode logging flags */ 993 { 994 struct xfs_da_args dargs; /* args for dir/attr code */ 995 996 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip)) 997 return 0; 998 999 if (S_ISDIR(VFS_I(ip)->i_mode)) { 1000 memset(&dargs, 0, sizeof(dargs)); 1001 dargs.geo = ip->i_mount->m_dir_geo; 1002 dargs.dp = ip; 1003 dargs.total = dargs.geo->fsbcount; 1004 dargs.whichfork = XFS_DATA_FORK; 1005 dargs.trans = tp; 1006 return xfs_dir2_sf_to_block(&dargs); 1007 } 1008 1009 if (S_ISLNK(VFS_I(ip)->i_mode)) 1010 return xfs_bmap_local_to_extents(tp, ip, 1, flags, 1011 XFS_DATA_FORK, 1012 xfs_symlink_local_to_remote); 1013 1014 /* should only be called for types that support local format data */ 1015 ASSERT(0); 1016 return -EFSCORRUPTED; 1017 } 1018 1019 /* Set an inode attr fork off based on the format */ 1020 int 1021 xfs_bmap_set_attrforkoff( 1022 struct xfs_inode *ip, 1023 int size, 1024 int *version) 1025 { 1026 switch (ip->i_d.di_format) { 1027 case XFS_DINODE_FMT_DEV: 1028 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; 1029 break; 1030 case XFS_DINODE_FMT_LOCAL: 1031 case XFS_DINODE_FMT_EXTENTS: 1032 case XFS_DINODE_FMT_BTREE: 1033 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size); 1034 if (!ip->i_d.di_forkoff) 1035 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3; 1036 else if ((ip->i_mount->m_flags & XFS_MOUNT_ATTR2) && version) 1037 *version = 2; 1038 break; 1039 default: 1040 ASSERT(0); 1041 return -EINVAL; 1042 } 1043 1044 return 0; 1045 } 1046 1047 /* 1048 * Convert inode from non-attributed to attributed. 1049 * Must not be in a transaction, ip must not be locked. 1050 */ 1051 int /* error code */ 1052 xfs_bmap_add_attrfork( 1053 xfs_inode_t *ip, /* incore inode pointer */ 1054 int size, /* space new attribute needs */ 1055 int rsvd) /* xact may use reserved blks */ 1056 { 1057 xfs_mount_t *mp; /* mount structure */ 1058 xfs_trans_t *tp; /* transaction pointer */ 1059 int blks; /* space reservation */ 1060 int version = 1; /* superblock attr version */ 1061 int logflags; /* logging flags */ 1062 int error; /* error return value */ 1063 1064 ASSERT(XFS_IFORK_Q(ip) == 0); 1065 1066 mp = ip->i_mount; 1067 ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); 1068 1069 blks = XFS_ADDAFORK_SPACE_RES(mp); 1070 1071 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0, 1072 rsvd ? XFS_TRANS_RESERVE : 0, &tp); 1073 if (error) 1074 return error; 1075 1076 xfs_ilock(ip, XFS_ILOCK_EXCL); 1077 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ? 1078 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : 1079 XFS_QMOPT_RES_REGBLKS); 1080 if (error) 1081 goto trans_cancel; 1082 if (XFS_IFORK_Q(ip)) 1083 goto trans_cancel; 1084 if (ip->i_d.di_anextents != 0) { 1085 error = -EFSCORRUPTED; 1086 goto trans_cancel; 1087 } 1088 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) { 1089 /* 1090 * For inodes coming from pre-6.2 filesystems. 1091 */ 1092 ASSERT(ip->i_d.di_aformat == 0); 1093 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 1094 } 1095 1096 xfs_trans_ijoin(tp, ip, 0); 1097 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1098 error = xfs_bmap_set_attrforkoff(ip, size, &version); 1099 if (error) 1100 goto trans_cancel; 1101 ASSERT(ip->i_afp == NULL); 1102 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, 0); 1103 ip->i_afp->if_flags = XFS_IFEXTENTS; 1104 logflags = 0; 1105 switch (ip->i_d.di_format) { 1106 case XFS_DINODE_FMT_LOCAL: 1107 error = xfs_bmap_add_attrfork_local(tp, ip, &logflags); 1108 break; 1109 case XFS_DINODE_FMT_EXTENTS: 1110 error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags); 1111 break; 1112 case XFS_DINODE_FMT_BTREE: 1113 error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags); 1114 break; 1115 default: 1116 error = 0; 1117 break; 1118 } 1119 if (logflags) 1120 xfs_trans_log_inode(tp, ip, logflags); 1121 if (error) 1122 goto trans_cancel; 1123 if (!xfs_sb_version_hasattr(&mp->m_sb) || 1124 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) { 1125 bool log_sb = false; 1126 1127 spin_lock(&mp->m_sb_lock); 1128 if (!xfs_sb_version_hasattr(&mp->m_sb)) { 1129 xfs_sb_version_addattr(&mp->m_sb); 1130 log_sb = true; 1131 } 1132 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) { 1133 xfs_sb_version_addattr2(&mp->m_sb); 1134 log_sb = true; 1135 } 1136 spin_unlock(&mp->m_sb_lock); 1137 if (log_sb) 1138 xfs_log_sb(tp); 1139 } 1140 1141 error = xfs_trans_commit(tp); 1142 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1143 return error; 1144 1145 trans_cancel: 1146 xfs_trans_cancel(tp); 1147 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1148 return error; 1149 } 1150 1151 /* 1152 * Internal and external extent tree search functions. 1153 */ 1154 1155 /* 1156 * Read in extents from a btree-format inode. 1157 */ 1158 int 1159 xfs_iread_extents( 1160 struct xfs_trans *tp, 1161 struct xfs_inode *ip, 1162 int whichfork) 1163 { 1164 struct xfs_mount *mp = ip->i_mount; 1165 int state = xfs_bmap_fork_to_state(whichfork); 1166 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1167 xfs_extnum_t nextents = XFS_IFORK_NEXTENTS(ip, whichfork); 1168 struct xfs_btree_block *block = ifp->if_broot; 1169 struct xfs_iext_cursor icur; 1170 struct xfs_bmbt_irec new; 1171 xfs_fsblock_t bno; 1172 struct xfs_buf *bp; 1173 xfs_extnum_t i, j; 1174 int level; 1175 __be64 *pp; 1176 int error; 1177 1178 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1179 1180 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 1181 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 1182 return -EFSCORRUPTED; 1183 } 1184 1185 /* 1186 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 1187 */ 1188 level = be16_to_cpu(block->bb_level); 1189 if (unlikely(level == 0)) { 1190 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 1191 return -EFSCORRUPTED; 1192 } 1193 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 1194 bno = be64_to_cpu(*pp); 1195 1196 /* 1197 * Go down the tree until leaf level is reached, following the first 1198 * pointer (leftmost) at each level. 1199 */ 1200 while (level-- > 0) { 1201 error = xfs_btree_read_bufl(mp, tp, bno, &bp, 1202 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); 1203 if (error) 1204 goto out; 1205 block = XFS_BUF_TO_BLOCK(bp); 1206 if (level == 0) 1207 break; 1208 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 1209 bno = be64_to_cpu(*pp); 1210 XFS_WANT_CORRUPTED_GOTO(mp, 1211 xfs_verify_fsbno(mp, bno), out_brelse); 1212 xfs_trans_brelse(tp, bp); 1213 } 1214 1215 /* 1216 * Here with bp and block set to the leftmost leaf node in the tree. 1217 */ 1218 i = 0; 1219 xfs_iext_first(ifp, &icur); 1220 1221 /* 1222 * Loop over all leaf nodes. Copy information to the extent records. 1223 */ 1224 for (;;) { 1225 xfs_bmbt_rec_t *frp; 1226 xfs_fsblock_t nextbno; 1227 xfs_extnum_t num_recs; 1228 1229 num_recs = xfs_btree_get_numrecs(block); 1230 if (unlikely(i + num_recs > nextents)) { 1231 xfs_warn(ip->i_mount, 1232 "corrupt dinode %Lu, (btree extents).", 1233 (unsigned long long) ip->i_ino); 1234 xfs_inode_verifier_error(ip, -EFSCORRUPTED, 1235 __func__, block, sizeof(*block), 1236 __this_address); 1237 error = -EFSCORRUPTED; 1238 goto out_brelse; 1239 } 1240 /* 1241 * Read-ahead the next leaf block, if any. 1242 */ 1243 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 1244 if (nextbno != NULLFSBLOCK) 1245 xfs_btree_reada_bufl(mp, nextbno, 1, 1246 &xfs_bmbt_buf_ops); 1247 /* 1248 * Copy records into the extent records. 1249 */ 1250 frp = XFS_BMBT_REC_ADDR(mp, block, 1); 1251 for (j = 0; j < num_recs; j++, frp++, i++) { 1252 xfs_failaddr_t fa; 1253 1254 xfs_bmbt_disk_get_all(frp, &new); 1255 fa = xfs_bmap_validate_extent(ip, whichfork, &new); 1256 if (fa) { 1257 error = -EFSCORRUPTED; 1258 xfs_inode_verifier_error(ip, error, 1259 "xfs_iread_extents(2)", 1260 frp, sizeof(*frp), fa); 1261 goto out_brelse; 1262 } 1263 xfs_iext_insert(ip, &icur, &new, state); 1264 trace_xfs_read_extent(ip, &icur, state, _THIS_IP_); 1265 xfs_iext_next(ifp, &icur); 1266 } 1267 xfs_trans_brelse(tp, bp); 1268 bno = nextbno; 1269 /* 1270 * If we've reached the end, stop. 1271 */ 1272 if (bno == NULLFSBLOCK) 1273 break; 1274 error = xfs_btree_read_bufl(mp, tp, bno, &bp, 1275 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); 1276 if (error) 1277 goto out; 1278 block = XFS_BUF_TO_BLOCK(bp); 1279 } 1280 1281 if (i != XFS_IFORK_NEXTENTS(ip, whichfork)) { 1282 error = -EFSCORRUPTED; 1283 goto out; 1284 } 1285 ASSERT(i == xfs_iext_count(ifp)); 1286 1287 ifp->if_flags |= XFS_IFEXTENTS; 1288 return 0; 1289 1290 out_brelse: 1291 xfs_trans_brelse(tp, bp); 1292 out: 1293 xfs_iext_destroy(ifp); 1294 return error; 1295 } 1296 1297 /* 1298 * Returns the relative block number of the first unused block(s) in the given 1299 * fork with at least "len" logically contiguous blocks free. This is the 1300 * lowest-address hole if the fork has holes, else the first block past the end 1301 * of fork. Return 0 if the fork is currently local (in-inode). 1302 */ 1303 int /* error */ 1304 xfs_bmap_first_unused( 1305 struct xfs_trans *tp, /* transaction pointer */ 1306 struct xfs_inode *ip, /* incore inode */ 1307 xfs_extlen_t len, /* size of hole to find */ 1308 xfs_fileoff_t *first_unused, /* unused block */ 1309 int whichfork) /* data or attr fork */ 1310 { 1311 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1312 struct xfs_bmbt_irec got; 1313 struct xfs_iext_cursor icur; 1314 xfs_fileoff_t lastaddr = 0; 1315 xfs_fileoff_t lowest, max; 1316 int error; 1317 1318 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE || 1319 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS || 1320 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 1321 1322 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { 1323 *first_unused = 0; 1324 return 0; 1325 } 1326 1327 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1328 error = xfs_iread_extents(tp, ip, whichfork); 1329 if (error) 1330 return error; 1331 } 1332 1333 lowest = max = *first_unused; 1334 for_each_xfs_iext(ifp, &icur, &got) { 1335 /* 1336 * See if the hole before this extent will work. 1337 */ 1338 if (got.br_startoff >= lowest + len && 1339 got.br_startoff - max >= len) 1340 break; 1341 lastaddr = got.br_startoff + got.br_blockcount; 1342 max = XFS_FILEOFF_MAX(lastaddr, lowest); 1343 } 1344 1345 *first_unused = max; 1346 return 0; 1347 } 1348 1349 /* 1350 * Returns the file-relative block number of the last block - 1 before 1351 * last_block (input value) in the file. 1352 * This is not based on i_size, it is based on the extent records. 1353 * Returns 0 for local files, as they do not have extent records. 1354 */ 1355 int /* error */ 1356 xfs_bmap_last_before( 1357 struct xfs_trans *tp, /* transaction pointer */ 1358 struct xfs_inode *ip, /* incore inode */ 1359 xfs_fileoff_t *last_block, /* last block */ 1360 int whichfork) /* data or attr fork */ 1361 { 1362 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1363 struct xfs_bmbt_irec got; 1364 struct xfs_iext_cursor icur; 1365 int error; 1366 1367 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 1368 case XFS_DINODE_FMT_LOCAL: 1369 *last_block = 0; 1370 return 0; 1371 case XFS_DINODE_FMT_BTREE: 1372 case XFS_DINODE_FMT_EXTENTS: 1373 break; 1374 default: 1375 return -EIO; 1376 } 1377 1378 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1379 error = xfs_iread_extents(tp, ip, whichfork); 1380 if (error) 1381 return error; 1382 } 1383 1384 if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got)) 1385 *last_block = 0; 1386 return 0; 1387 } 1388 1389 int 1390 xfs_bmap_last_extent( 1391 struct xfs_trans *tp, 1392 struct xfs_inode *ip, 1393 int whichfork, 1394 struct xfs_bmbt_irec *rec, 1395 int *is_empty) 1396 { 1397 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1398 struct xfs_iext_cursor icur; 1399 int error; 1400 1401 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1402 error = xfs_iread_extents(tp, ip, whichfork); 1403 if (error) 1404 return error; 1405 } 1406 1407 xfs_iext_last(ifp, &icur); 1408 if (!xfs_iext_get_extent(ifp, &icur, rec)) 1409 *is_empty = 1; 1410 else 1411 *is_empty = 0; 1412 return 0; 1413 } 1414 1415 /* 1416 * Check the last inode extent to determine whether this allocation will result 1417 * in blocks being allocated at the end of the file. When we allocate new data 1418 * blocks at the end of the file which do not start at the previous data block, 1419 * we will try to align the new blocks at stripe unit boundaries. 1420 * 1421 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be 1422 * at, or past the EOF. 1423 */ 1424 STATIC int 1425 xfs_bmap_isaeof( 1426 struct xfs_bmalloca *bma, 1427 int whichfork) 1428 { 1429 struct xfs_bmbt_irec rec; 1430 int is_empty; 1431 int error; 1432 1433 bma->aeof = false; 1434 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, 1435 &is_empty); 1436 if (error) 1437 return error; 1438 1439 if (is_empty) { 1440 bma->aeof = true; 1441 return 0; 1442 } 1443 1444 /* 1445 * Check if we are allocation or past the last extent, or at least into 1446 * the last delayed allocated extent. 1447 */ 1448 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount || 1449 (bma->offset >= rec.br_startoff && 1450 isnullstartblock(rec.br_startblock)); 1451 return 0; 1452 } 1453 1454 /* 1455 * Returns the file-relative block number of the first block past eof in 1456 * the file. This is not based on i_size, it is based on the extent records. 1457 * Returns 0 for local files, as they do not have extent records. 1458 */ 1459 int 1460 xfs_bmap_last_offset( 1461 struct xfs_inode *ip, 1462 xfs_fileoff_t *last_block, 1463 int whichfork) 1464 { 1465 struct xfs_bmbt_irec rec; 1466 int is_empty; 1467 int error; 1468 1469 *last_block = 0; 1470 1471 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) 1472 return 0; 1473 1474 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && 1475 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 1476 return -EIO; 1477 1478 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty); 1479 if (error || is_empty) 1480 return error; 1481 1482 *last_block = rec.br_startoff + rec.br_blockcount; 1483 return 0; 1484 } 1485 1486 /* 1487 * Returns whether the selected fork of the inode has exactly one 1488 * block or not. For the data fork we check this matches di_size, 1489 * implying the file's range is 0..bsize-1. 1490 */ 1491 int /* 1=>1 block, 0=>otherwise */ 1492 xfs_bmap_one_block( 1493 xfs_inode_t *ip, /* incore inode */ 1494 int whichfork) /* data or attr fork */ 1495 { 1496 struct xfs_ifork *ifp; /* inode fork pointer */ 1497 int rval; /* return value */ 1498 xfs_bmbt_irec_t s; /* internal version of extent */ 1499 struct xfs_iext_cursor icur; 1500 1501 #ifndef DEBUG 1502 if (whichfork == XFS_DATA_FORK) 1503 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize; 1504 #endif /* !DEBUG */ 1505 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1) 1506 return 0; 1507 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 1508 return 0; 1509 ifp = XFS_IFORK_PTR(ip, whichfork); 1510 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 1511 xfs_iext_first(ifp, &icur); 1512 xfs_iext_get_extent(ifp, &icur, &s); 1513 rval = s.br_startoff == 0 && s.br_blockcount == 1; 1514 if (rval && whichfork == XFS_DATA_FORK) 1515 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize); 1516 return rval; 1517 } 1518 1519 /* 1520 * Extent tree manipulation functions used during allocation. 1521 */ 1522 1523 /* 1524 * Convert a delayed allocation to a real allocation. 1525 */ 1526 STATIC int /* error */ 1527 xfs_bmap_add_extent_delay_real( 1528 struct xfs_bmalloca *bma, 1529 int whichfork) 1530 { 1531 struct xfs_bmbt_irec *new = &bma->got; 1532 int error; /* error return value */ 1533 int i; /* temp state */ 1534 struct xfs_ifork *ifp; /* inode fork pointer */ 1535 xfs_fileoff_t new_endoff; /* end offset of new entry */ 1536 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 1537 /* left is 0, right is 1, prev is 2 */ 1538 int rval=0; /* return value (logging flags) */ 1539 int state = xfs_bmap_fork_to_state(whichfork); 1540 xfs_filblks_t da_new; /* new count del alloc blocks used */ 1541 xfs_filblks_t da_old; /* old count del alloc blocks used */ 1542 xfs_filblks_t temp=0; /* value for da_new calculations */ 1543 int tmp_rval; /* partial logging flags */ 1544 struct xfs_mount *mp; 1545 xfs_extnum_t *nextents; 1546 struct xfs_bmbt_irec old; 1547 1548 mp = bma->ip->i_mount; 1549 ifp = XFS_IFORK_PTR(bma->ip, whichfork); 1550 ASSERT(whichfork != XFS_ATTR_FORK); 1551 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents : 1552 &bma->ip->i_d.di_nextents); 1553 1554 ASSERT(!isnullstartblock(new->br_startblock)); 1555 ASSERT(!bma->cur || 1556 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); 1557 1558 XFS_STATS_INC(mp, xs_add_exlist); 1559 1560 #define LEFT r[0] 1561 #define RIGHT r[1] 1562 #define PREV r[2] 1563 1564 /* 1565 * Set up a bunch of variables to make the tests simpler. 1566 */ 1567 xfs_iext_get_extent(ifp, &bma->icur, &PREV); 1568 new_endoff = new->br_startoff + new->br_blockcount; 1569 ASSERT(isnullstartblock(PREV.br_startblock)); 1570 ASSERT(PREV.br_startoff <= new->br_startoff); 1571 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 1572 1573 da_old = startblockval(PREV.br_startblock); 1574 da_new = 0; 1575 1576 /* 1577 * Set flags determining what part of the previous delayed allocation 1578 * extent is being replaced by a real allocation. 1579 */ 1580 if (PREV.br_startoff == new->br_startoff) 1581 state |= BMAP_LEFT_FILLING; 1582 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 1583 state |= BMAP_RIGHT_FILLING; 1584 1585 /* 1586 * Check and set flags if this segment has a left neighbor. 1587 * Don't set contiguous if the combined extent would be too large. 1588 */ 1589 if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) { 1590 state |= BMAP_LEFT_VALID; 1591 if (isnullstartblock(LEFT.br_startblock)) 1592 state |= BMAP_LEFT_DELAY; 1593 } 1594 1595 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 1596 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 1597 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 1598 LEFT.br_state == new->br_state && 1599 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) 1600 state |= BMAP_LEFT_CONTIG; 1601 1602 /* 1603 * Check and set flags if this segment has a right neighbor. 1604 * Don't set contiguous if the combined extent would be too large. 1605 * Also check for all-three-contiguous being too large. 1606 */ 1607 if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) { 1608 state |= BMAP_RIGHT_VALID; 1609 if (isnullstartblock(RIGHT.br_startblock)) 1610 state |= BMAP_RIGHT_DELAY; 1611 } 1612 1613 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 1614 new_endoff == RIGHT.br_startoff && 1615 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 1616 new->br_state == RIGHT.br_state && 1617 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 1618 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1619 BMAP_RIGHT_FILLING)) != 1620 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1621 BMAP_RIGHT_FILLING) || 1622 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 1623 <= MAXEXTLEN)) 1624 state |= BMAP_RIGHT_CONTIG; 1625 1626 error = 0; 1627 /* 1628 * Switch out based on the FILLING and CONTIG state bits. 1629 */ 1630 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1631 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 1632 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1633 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1634 /* 1635 * Filling in all of a previously delayed allocation extent. 1636 * The left and right neighbors are both contiguous with new. 1637 */ 1638 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount; 1639 1640 xfs_iext_remove(bma->ip, &bma->icur, state); 1641 xfs_iext_remove(bma->ip, &bma->icur, state); 1642 xfs_iext_prev(ifp, &bma->icur); 1643 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1644 (*nextents)--; 1645 1646 if (bma->cur == NULL) 1647 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1648 else { 1649 rval = XFS_ILOG_CORE; 1650 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i); 1651 if (error) 1652 goto done; 1653 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1654 error = xfs_btree_delete(bma->cur, &i); 1655 if (error) 1656 goto done; 1657 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1658 error = xfs_btree_decrement(bma->cur, 0, &i); 1659 if (error) 1660 goto done; 1661 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1662 error = xfs_bmbt_update(bma->cur, &LEFT); 1663 if (error) 1664 goto done; 1665 } 1666 break; 1667 1668 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1669 /* 1670 * Filling in all of a previously delayed allocation extent. 1671 * The left neighbor is contiguous, the right is not. 1672 */ 1673 old = LEFT; 1674 LEFT.br_blockcount += PREV.br_blockcount; 1675 1676 xfs_iext_remove(bma->ip, &bma->icur, state); 1677 xfs_iext_prev(ifp, &bma->icur); 1678 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1679 1680 if (bma->cur == NULL) 1681 rval = XFS_ILOG_DEXT; 1682 else { 1683 rval = 0; 1684 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1685 if (error) 1686 goto done; 1687 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1688 error = xfs_bmbt_update(bma->cur, &LEFT); 1689 if (error) 1690 goto done; 1691 } 1692 break; 1693 1694 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1695 /* 1696 * Filling in all of a previously delayed allocation extent. 1697 * The right neighbor is contiguous, the left is not. Take care 1698 * with delay -> unwritten extent allocation here because the 1699 * delalloc record we are overwriting is always written. 1700 */ 1701 PREV.br_startblock = new->br_startblock; 1702 PREV.br_blockcount += RIGHT.br_blockcount; 1703 PREV.br_state = new->br_state; 1704 1705 xfs_iext_next(ifp, &bma->icur); 1706 xfs_iext_remove(bma->ip, &bma->icur, state); 1707 xfs_iext_prev(ifp, &bma->icur); 1708 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1709 1710 if (bma->cur == NULL) 1711 rval = XFS_ILOG_DEXT; 1712 else { 1713 rval = 0; 1714 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i); 1715 if (error) 1716 goto done; 1717 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1718 error = xfs_bmbt_update(bma->cur, &PREV); 1719 if (error) 1720 goto done; 1721 } 1722 break; 1723 1724 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 1725 /* 1726 * Filling in all of a previously delayed allocation extent. 1727 * Neither the left nor right neighbors are contiguous with 1728 * the new one. 1729 */ 1730 PREV.br_startblock = new->br_startblock; 1731 PREV.br_state = new->br_state; 1732 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1733 1734 (*nextents)++; 1735 if (bma->cur == NULL) 1736 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1737 else { 1738 rval = XFS_ILOG_CORE; 1739 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1740 if (error) 1741 goto done; 1742 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1743 error = xfs_btree_insert(bma->cur, &i); 1744 if (error) 1745 goto done; 1746 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1747 } 1748 break; 1749 1750 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 1751 /* 1752 * Filling in the first part of a previous delayed allocation. 1753 * The left neighbor is contiguous. 1754 */ 1755 old = LEFT; 1756 temp = PREV.br_blockcount - new->br_blockcount; 1757 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1758 startblockval(PREV.br_startblock)); 1759 1760 LEFT.br_blockcount += new->br_blockcount; 1761 1762 PREV.br_blockcount = temp; 1763 PREV.br_startoff += new->br_blockcount; 1764 PREV.br_startblock = nullstartblock(da_new); 1765 1766 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1767 xfs_iext_prev(ifp, &bma->icur); 1768 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1769 1770 if (bma->cur == NULL) 1771 rval = XFS_ILOG_DEXT; 1772 else { 1773 rval = 0; 1774 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1775 if (error) 1776 goto done; 1777 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1778 error = xfs_bmbt_update(bma->cur, &LEFT); 1779 if (error) 1780 goto done; 1781 } 1782 break; 1783 1784 case BMAP_LEFT_FILLING: 1785 /* 1786 * Filling in the first part of a previous delayed allocation. 1787 * The left neighbor is not contiguous. 1788 */ 1789 xfs_iext_update_extent(bma->ip, state, &bma->icur, new); 1790 (*nextents)++; 1791 if (bma->cur == NULL) 1792 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1793 else { 1794 rval = XFS_ILOG_CORE; 1795 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1796 if (error) 1797 goto done; 1798 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1799 error = xfs_btree_insert(bma->cur, &i); 1800 if (error) 1801 goto done; 1802 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1803 } 1804 1805 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1806 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1807 &bma->cur, 1, &tmp_rval, whichfork); 1808 rval |= tmp_rval; 1809 if (error) 1810 goto done; 1811 } 1812 1813 temp = PREV.br_blockcount - new->br_blockcount; 1814 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1815 startblockval(PREV.br_startblock) - 1816 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 1817 1818 PREV.br_startoff = new_endoff; 1819 PREV.br_blockcount = temp; 1820 PREV.br_startblock = nullstartblock(da_new); 1821 xfs_iext_next(ifp, &bma->icur); 1822 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); 1823 xfs_iext_prev(ifp, &bma->icur); 1824 break; 1825 1826 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1827 /* 1828 * Filling in the last part of a previous delayed allocation. 1829 * The right neighbor is contiguous with the new allocation. 1830 */ 1831 old = RIGHT; 1832 RIGHT.br_startoff = new->br_startoff; 1833 RIGHT.br_startblock = new->br_startblock; 1834 RIGHT.br_blockcount += new->br_blockcount; 1835 1836 if (bma->cur == NULL) 1837 rval = XFS_ILOG_DEXT; 1838 else { 1839 rval = 0; 1840 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1841 if (error) 1842 goto done; 1843 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1844 error = xfs_bmbt_update(bma->cur, &RIGHT); 1845 if (error) 1846 goto done; 1847 } 1848 1849 temp = PREV.br_blockcount - new->br_blockcount; 1850 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1851 startblockval(PREV.br_startblock)); 1852 1853 PREV.br_blockcount = temp; 1854 PREV.br_startblock = nullstartblock(da_new); 1855 1856 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1857 xfs_iext_next(ifp, &bma->icur); 1858 xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT); 1859 break; 1860 1861 case BMAP_RIGHT_FILLING: 1862 /* 1863 * Filling in the last part of a previous delayed allocation. 1864 * The right neighbor is not contiguous. 1865 */ 1866 xfs_iext_update_extent(bma->ip, state, &bma->icur, new); 1867 (*nextents)++; 1868 if (bma->cur == NULL) 1869 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1870 else { 1871 rval = XFS_ILOG_CORE; 1872 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1873 if (error) 1874 goto done; 1875 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1876 error = xfs_btree_insert(bma->cur, &i); 1877 if (error) 1878 goto done; 1879 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1880 } 1881 1882 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1883 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1884 &bma->cur, 1, &tmp_rval, whichfork); 1885 rval |= tmp_rval; 1886 if (error) 1887 goto done; 1888 } 1889 1890 temp = PREV.br_blockcount - new->br_blockcount; 1891 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1892 startblockval(PREV.br_startblock) - 1893 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 1894 1895 PREV.br_startblock = nullstartblock(da_new); 1896 PREV.br_blockcount = temp; 1897 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); 1898 xfs_iext_next(ifp, &bma->icur); 1899 break; 1900 1901 case 0: 1902 /* 1903 * Filling in the middle part of a previous delayed allocation. 1904 * Contiguity is impossible here. 1905 * This case is avoided almost all the time. 1906 * 1907 * We start with a delayed allocation: 1908 * 1909 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+ 1910 * PREV @ idx 1911 * 1912 * and we are allocating: 1913 * +rrrrrrrrrrrrrrrrr+ 1914 * new 1915 * 1916 * and we set it up for insertion as: 1917 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+ 1918 * new 1919 * PREV @ idx LEFT RIGHT 1920 * inserted at idx + 1 1921 */ 1922 old = PREV; 1923 1924 /* LEFT is the new middle */ 1925 LEFT = *new; 1926 1927 /* RIGHT is the new right */ 1928 RIGHT.br_state = PREV.br_state; 1929 RIGHT.br_startoff = new_endoff; 1930 RIGHT.br_blockcount = 1931 PREV.br_startoff + PREV.br_blockcount - new_endoff; 1932 RIGHT.br_startblock = 1933 nullstartblock(xfs_bmap_worst_indlen(bma->ip, 1934 RIGHT.br_blockcount)); 1935 1936 /* truncate PREV */ 1937 PREV.br_blockcount = new->br_startoff - PREV.br_startoff; 1938 PREV.br_startblock = 1939 nullstartblock(xfs_bmap_worst_indlen(bma->ip, 1940 PREV.br_blockcount)); 1941 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1942 1943 xfs_iext_next(ifp, &bma->icur); 1944 xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state); 1945 xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state); 1946 (*nextents)++; 1947 1948 if (bma->cur == NULL) 1949 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1950 else { 1951 rval = XFS_ILOG_CORE; 1952 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1953 if (error) 1954 goto done; 1955 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1956 error = xfs_btree_insert(bma->cur, &i); 1957 if (error) 1958 goto done; 1959 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1960 } 1961 1962 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1963 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1964 &bma->cur, 1, &tmp_rval, whichfork); 1965 rval |= tmp_rval; 1966 if (error) 1967 goto done; 1968 } 1969 1970 da_new = startblockval(PREV.br_startblock) + 1971 startblockval(RIGHT.br_startblock); 1972 break; 1973 1974 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1975 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1976 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 1977 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1978 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1979 case BMAP_LEFT_CONTIG: 1980 case BMAP_RIGHT_CONTIG: 1981 /* 1982 * These cases are all impossible. 1983 */ 1984 ASSERT(0); 1985 } 1986 1987 /* add reverse mapping unless caller opted out */ 1988 if (!(bma->flags & XFS_BMAPI_NORMAP)) 1989 xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new); 1990 1991 /* convert to a btree if necessary */ 1992 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1993 int tmp_logflags; /* partial log flag return val */ 1994 1995 ASSERT(bma->cur == NULL); 1996 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1997 &bma->cur, da_old > 0, &tmp_logflags, 1998 whichfork); 1999 bma->logflags |= tmp_logflags; 2000 if (error) 2001 goto done; 2002 } 2003 2004 if (da_new != da_old) 2005 xfs_mod_delalloc(mp, (int64_t)da_new - da_old); 2006 2007 if (bma->cur) { 2008 da_new += bma->cur->bc_private.b.allocated; 2009 bma->cur->bc_private.b.allocated = 0; 2010 } 2011 2012 /* adjust for changes in reserved delayed indirect blocks */ 2013 if (da_new != da_old) { 2014 ASSERT(state == 0 || da_new < da_old); 2015 error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), 2016 false); 2017 } 2018 2019 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); 2020 done: 2021 if (whichfork != XFS_COW_FORK) 2022 bma->logflags |= rval; 2023 return error; 2024 #undef LEFT 2025 #undef RIGHT 2026 #undef PREV 2027 } 2028 2029 /* 2030 * Convert an unwritten allocation to a real allocation or vice versa. 2031 */ 2032 int /* error */ 2033 xfs_bmap_add_extent_unwritten_real( 2034 struct xfs_trans *tp, 2035 xfs_inode_t *ip, /* incore inode pointer */ 2036 int whichfork, 2037 struct xfs_iext_cursor *icur, 2038 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 2039 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 2040 int *logflagsp) /* inode logging flags */ 2041 { 2042 xfs_btree_cur_t *cur; /* btree cursor */ 2043 int error; /* error return value */ 2044 int i; /* temp state */ 2045 struct xfs_ifork *ifp; /* inode fork pointer */ 2046 xfs_fileoff_t new_endoff; /* end offset of new entry */ 2047 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 2048 /* left is 0, right is 1, prev is 2 */ 2049 int rval=0; /* return value (logging flags) */ 2050 int state = xfs_bmap_fork_to_state(whichfork); 2051 struct xfs_mount *mp = ip->i_mount; 2052 struct xfs_bmbt_irec old; 2053 2054 *logflagsp = 0; 2055 2056 cur = *curp; 2057 ifp = XFS_IFORK_PTR(ip, whichfork); 2058 2059 ASSERT(!isnullstartblock(new->br_startblock)); 2060 2061 XFS_STATS_INC(mp, xs_add_exlist); 2062 2063 #define LEFT r[0] 2064 #define RIGHT r[1] 2065 #define PREV r[2] 2066 2067 /* 2068 * Set up a bunch of variables to make the tests simpler. 2069 */ 2070 error = 0; 2071 xfs_iext_get_extent(ifp, icur, &PREV); 2072 ASSERT(new->br_state != PREV.br_state); 2073 new_endoff = new->br_startoff + new->br_blockcount; 2074 ASSERT(PREV.br_startoff <= new->br_startoff); 2075 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 2076 2077 /* 2078 * Set flags determining what part of the previous oldext allocation 2079 * extent is being replaced by a newext allocation. 2080 */ 2081 if (PREV.br_startoff == new->br_startoff) 2082 state |= BMAP_LEFT_FILLING; 2083 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 2084 state |= BMAP_RIGHT_FILLING; 2085 2086 /* 2087 * Check and set flags if this segment has a left neighbor. 2088 * Don't set contiguous if the combined extent would be too large. 2089 */ 2090 if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) { 2091 state |= BMAP_LEFT_VALID; 2092 if (isnullstartblock(LEFT.br_startblock)) 2093 state |= BMAP_LEFT_DELAY; 2094 } 2095 2096 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2097 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 2098 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 2099 LEFT.br_state == new->br_state && 2100 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2101 state |= BMAP_LEFT_CONTIG; 2102 2103 /* 2104 * Check and set flags if this segment has a right neighbor. 2105 * Don't set contiguous if the combined extent would be too large. 2106 * Also check for all-three-contiguous being too large. 2107 */ 2108 if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) { 2109 state |= BMAP_RIGHT_VALID; 2110 if (isnullstartblock(RIGHT.br_startblock)) 2111 state |= BMAP_RIGHT_DELAY; 2112 } 2113 2114 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2115 new_endoff == RIGHT.br_startoff && 2116 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 2117 new->br_state == RIGHT.br_state && 2118 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 2119 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2120 BMAP_RIGHT_FILLING)) != 2121 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2122 BMAP_RIGHT_FILLING) || 2123 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 2124 <= MAXEXTLEN)) 2125 state |= BMAP_RIGHT_CONTIG; 2126 2127 /* 2128 * Switch out based on the FILLING and CONTIG state bits. 2129 */ 2130 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2131 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 2132 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2133 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2134 /* 2135 * Setting all of a previous oldext extent to newext. 2136 * The left and right neighbors are both contiguous with new. 2137 */ 2138 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount; 2139 2140 xfs_iext_remove(ip, icur, state); 2141 xfs_iext_remove(ip, icur, state); 2142 xfs_iext_prev(ifp, icur); 2143 xfs_iext_update_extent(ip, state, icur, &LEFT); 2144 XFS_IFORK_NEXT_SET(ip, whichfork, 2145 XFS_IFORK_NEXTENTS(ip, whichfork) - 2); 2146 if (cur == NULL) 2147 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2148 else { 2149 rval = XFS_ILOG_CORE; 2150 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i); 2151 if (error) 2152 goto done; 2153 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2154 if ((error = xfs_btree_delete(cur, &i))) 2155 goto done; 2156 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2157 if ((error = xfs_btree_decrement(cur, 0, &i))) 2158 goto done; 2159 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2160 if ((error = xfs_btree_delete(cur, &i))) 2161 goto done; 2162 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2163 if ((error = xfs_btree_decrement(cur, 0, &i))) 2164 goto done; 2165 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2166 error = xfs_bmbt_update(cur, &LEFT); 2167 if (error) 2168 goto done; 2169 } 2170 break; 2171 2172 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2173 /* 2174 * Setting all of a previous oldext extent to newext. 2175 * The left neighbor is contiguous, the right is not. 2176 */ 2177 LEFT.br_blockcount += PREV.br_blockcount; 2178 2179 xfs_iext_remove(ip, icur, state); 2180 xfs_iext_prev(ifp, icur); 2181 xfs_iext_update_extent(ip, state, icur, &LEFT); 2182 XFS_IFORK_NEXT_SET(ip, whichfork, 2183 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2184 if (cur == NULL) 2185 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2186 else { 2187 rval = XFS_ILOG_CORE; 2188 error = xfs_bmbt_lookup_eq(cur, &PREV, &i); 2189 if (error) 2190 goto done; 2191 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2192 if ((error = xfs_btree_delete(cur, &i))) 2193 goto done; 2194 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2195 if ((error = xfs_btree_decrement(cur, 0, &i))) 2196 goto done; 2197 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2198 error = xfs_bmbt_update(cur, &LEFT); 2199 if (error) 2200 goto done; 2201 } 2202 break; 2203 2204 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2205 /* 2206 * Setting all of a previous oldext extent to newext. 2207 * The right neighbor is contiguous, the left is not. 2208 */ 2209 PREV.br_blockcount += RIGHT.br_blockcount; 2210 PREV.br_state = new->br_state; 2211 2212 xfs_iext_next(ifp, icur); 2213 xfs_iext_remove(ip, icur, state); 2214 xfs_iext_prev(ifp, icur); 2215 xfs_iext_update_extent(ip, state, icur, &PREV); 2216 2217 XFS_IFORK_NEXT_SET(ip, whichfork, 2218 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2219 if (cur == NULL) 2220 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2221 else { 2222 rval = XFS_ILOG_CORE; 2223 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i); 2224 if (error) 2225 goto done; 2226 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2227 if ((error = xfs_btree_delete(cur, &i))) 2228 goto done; 2229 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2230 if ((error = xfs_btree_decrement(cur, 0, &i))) 2231 goto done; 2232 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2233 error = xfs_bmbt_update(cur, &PREV); 2234 if (error) 2235 goto done; 2236 } 2237 break; 2238 2239 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 2240 /* 2241 * Setting all of a previous oldext extent to newext. 2242 * Neither the left nor right neighbors are contiguous with 2243 * the new one. 2244 */ 2245 PREV.br_state = new->br_state; 2246 xfs_iext_update_extent(ip, state, icur, &PREV); 2247 2248 if (cur == NULL) 2249 rval = XFS_ILOG_DEXT; 2250 else { 2251 rval = 0; 2252 error = xfs_bmbt_lookup_eq(cur, new, &i); 2253 if (error) 2254 goto done; 2255 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2256 error = xfs_bmbt_update(cur, &PREV); 2257 if (error) 2258 goto done; 2259 } 2260 break; 2261 2262 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 2263 /* 2264 * Setting the first part of a previous oldext extent to newext. 2265 * The left neighbor is contiguous. 2266 */ 2267 LEFT.br_blockcount += new->br_blockcount; 2268 2269 old = PREV; 2270 PREV.br_startoff += new->br_blockcount; 2271 PREV.br_startblock += new->br_blockcount; 2272 PREV.br_blockcount -= new->br_blockcount; 2273 2274 xfs_iext_update_extent(ip, state, icur, &PREV); 2275 xfs_iext_prev(ifp, icur); 2276 xfs_iext_update_extent(ip, state, icur, &LEFT); 2277 2278 if (cur == NULL) 2279 rval = XFS_ILOG_DEXT; 2280 else { 2281 rval = 0; 2282 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2283 if (error) 2284 goto done; 2285 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2286 error = xfs_bmbt_update(cur, &PREV); 2287 if (error) 2288 goto done; 2289 error = xfs_btree_decrement(cur, 0, &i); 2290 if (error) 2291 goto done; 2292 error = xfs_bmbt_update(cur, &LEFT); 2293 if (error) 2294 goto done; 2295 } 2296 break; 2297 2298 case BMAP_LEFT_FILLING: 2299 /* 2300 * Setting the first part of a previous oldext extent to newext. 2301 * The left neighbor is not contiguous. 2302 */ 2303 old = PREV; 2304 PREV.br_startoff += new->br_blockcount; 2305 PREV.br_startblock += new->br_blockcount; 2306 PREV.br_blockcount -= new->br_blockcount; 2307 2308 xfs_iext_update_extent(ip, state, icur, &PREV); 2309 xfs_iext_insert(ip, icur, new, state); 2310 XFS_IFORK_NEXT_SET(ip, whichfork, 2311 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2312 if (cur == NULL) 2313 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2314 else { 2315 rval = XFS_ILOG_CORE; 2316 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2317 if (error) 2318 goto done; 2319 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2320 error = xfs_bmbt_update(cur, &PREV); 2321 if (error) 2322 goto done; 2323 cur->bc_rec.b = *new; 2324 if ((error = xfs_btree_insert(cur, &i))) 2325 goto done; 2326 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2327 } 2328 break; 2329 2330 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2331 /* 2332 * Setting the last part of a previous oldext extent to newext. 2333 * The right neighbor is contiguous with the new allocation. 2334 */ 2335 old = PREV; 2336 PREV.br_blockcount -= new->br_blockcount; 2337 2338 RIGHT.br_startoff = new->br_startoff; 2339 RIGHT.br_startblock = new->br_startblock; 2340 RIGHT.br_blockcount += new->br_blockcount; 2341 2342 xfs_iext_update_extent(ip, state, icur, &PREV); 2343 xfs_iext_next(ifp, icur); 2344 xfs_iext_update_extent(ip, state, icur, &RIGHT); 2345 2346 if (cur == NULL) 2347 rval = XFS_ILOG_DEXT; 2348 else { 2349 rval = 0; 2350 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2351 if (error) 2352 goto done; 2353 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2354 error = xfs_bmbt_update(cur, &PREV); 2355 if (error) 2356 goto done; 2357 error = xfs_btree_increment(cur, 0, &i); 2358 if (error) 2359 goto done; 2360 error = xfs_bmbt_update(cur, &RIGHT); 2361 if (error) 2362 goto done; 2363 } 2364 break; 2365 2366 case BMAP_RIGHT_FILLING: 2367 /* 2368 * Setting the last part of a previous oldext extent to newext. 2369 * The right neighbor is not contiguous. 2370 */ 2371 old = PREV; 2372 PREV.br_blockcount -= new->br_blockcount; 2373 2374 xfs_iext_update_extent(ip, state, icur, &PREV); 2375 xfs_iext_next(ifp, icur); 2376 xfs_iext_insert(ip, icur, new, state); 2377 2378 XFS_IFORK_NEXT_SET(ip, whichfork, 2379 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2380 if (cur == NULL) 2381 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2382 else { 2383 rval = XFS_ILOG_CORE; 2384 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2385 if (error) 2386 goto done; 2387 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2388 error = xfs_bmbt_update(cur, &PREV); 2389 if (error) 2390 goto done; 2391 error = xfs_bmbt_lookup_eq(cur, new, &i); 2392 if (error) 2393 goto done; 2394 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2395 if ((error = xfs_btree_insert(cur, &i))) 2396 goto done; 2397 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2398 } 2399 break; 2400 2401 case 0: 2402 /* 2403 * Setting the middle part of a previous oldext extent to 2404 * newext. Contiguity is impossible here. 2405 * One extent becomes three extents. 2406 */ 2407 old = PREV; 2408 PREV.br_blockcount = new->br_startoff - PREV.br_startoff; 2409 2410 r[0] = *new; 2411 r[1].br_startoff = new_endoff; 2412 r[1].br_blockcount = 2413 old.br_startoff + old.br_blockcount - new_endoff; 2414 r[1].br_startblock = new->br_startblock + new->br_blockcount; 2415 r[1].br_state = PREV.br_state; 2416 2417 xfs_iext_update_extent(ip, state, icur, &PREV); 2418 xfs_iext_next(ifp, icur); 2419 xfs_iext_insert(ip, icur, &r[1], state); 2420 xfs_iext_insert(ip, icur, &r[0], state); 2421 2422 XFS_IFORK_NEXT_SET(ip, whichfork, 2423 XFS_IFORK_NEXTENTS(ip, whichfork) + 2); 2424 if (cur == NULL) 2425 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2426 else { 2427 rval = XFS_ILOG_CORE; 2428 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2429 if (error) 2430 goto done; 2431 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2432 /* new right extent - oldext */ 2433 error = xfs_bmbt_update(cur, &r[1]); 2434 if (error) 2435 goto done; 2436 /* new left extent - oldext */ 2437 cur->bc_rec.b = PREV; 2438 if ((error = xfs_btree_insert(cur, &i))) 2439 goto done; 2440 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2441 /* 2442 * Reset the cursor to the position of the new extent 2443 * we are about to insert as we can't trust it after 2444 * the previous insert. 2445 */ 2446 error = xfs_bmbt_lookup_eq(cur, new, &i); 2447 if (error) 2448 goto done; 2449 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2450 /* new middle extent - newext */ 2451 if ((error = xfs_btree_insert(cur, &i))) 2452 goto done; 2453 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2454 } 2455 break; 2456 2457 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2458 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2459 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 2460 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2461 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2462 case BMAP_LEFT_CONTIG: 2463 case BMAP_RIGHT_CONTIG: 2464 /* 2465 * These cases are all impossible. 2466 */ 2467 ASSERT(0); 2468 } 2469 2470 /* update reverse mappings */ 2471 xfs_rmap_convert_extent(mp, tp, ip, whichfork, new); 2472 2473 /* convert to a btree if necessary */ 2474 if (xfs_bmap_needs_btree(ip, whichfork)) { 2475 int tmp_logflags; /* partial log flag return val */ 2476 2477 ASSERT(cur == NULL); 2478 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 2479 &tmp_logflags, whichfork); 2480 *logflagsp |= tmp_logflags; 2481 if (error) 2482 goto done; 2483 } 2484 2485 /* clear out the allocated field, done with it now in any case. */ 2486 if (cur) { 2487 cur->bc_private.b.allocated = 0; 2488 *curp = cur; 2489 } 2490 2491 xfs_bmap_check_leaf_extents(*curp, ip, whichfork); 2492 done: 2493 *logflagsp |= rval; 2494 return error; 2495 #undef LEFT 2496 #undef RIGHT 2497 #undef PREV 2498 } 2499 2500 /* 2501 * Convert a hole to a delayed allocation. 2502 */ 2503 STATIC void 2504 xfs_bmap_add_extent_hole_delay( 2505 xfs_inode_t *ip, /* incore inode pointer */ 2506 int whichfork, 2507 struct xfs_iext_cursor *icur, 2508 xfs_bmbt_irec_t *new) /* new data to add to file extents */ 2509 { 2510 struct xfs_ifork *ifp; /* inode fork pointer */ 2511 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2512 xfs_filblks_t newlen=0; /* new indirect size */ 2513 xfs_filblks_t oldlen=0; /* old indirect size */ 2514 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2515 int state = xfs_bmap_fork_to_state(whichfork); 2516 xfs_filblks_t temp; /* temp for indirect calculations */ 2517 2518 ifp = XFS_IFORK_PTR(ip, whichfork); 2519 ASSERT(isnullstartblock(new->br_startblock)); 2520 2521 /* 2522 * Check and set flags if this segment has a left neighbor 2523 */ 2524 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { 2525 state |= BMAP_LEFT_VALID; 2526 if (isnullstartblock(left.br_startblock)) 2527 state |= BMAP_LEFT_DELAY; 2528 } 2529 2530 /* 2531 * Check and set flags if the current (right) segment exists. 2532 * If it doesn't exist, we're converting the hole at end-of-file. 2533 */ 2534 if (xfs_iext_get_extent(ifp, icur, &right)) { 2535 state |= BMAP_RIGHT_VALID; 2536 if (isnullstartblock(right.br_startblock)) 2537 state |= BMAP_RIGHT_DELAY; 2538 } 2539 2540 /* 2541 * Set contiguity flags on the left and right neighbors. 2542 * Don't let extents get too large, even if the pieces are contiguous. 2543 */ 2544 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) && 2545 left.br_startoff + left.br_blockcount == new->br_startoff && 2546 left.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2547 state |= BMAP_LEFT_CONTIG; 2548 2549 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) && 2550 new->br_startoff + new->br_blockcount == right.br_startoff && 2551 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 2552 (!(state & BMAP_LEFT_CONTIG) || 2553 (left.br_blockcount + new->br_blockcount + 2554 right.br_blockcount <= MAXEXTLEN))) 2555 state |= BMAP_RIGHT_CONTIG; 2556 2557 /* 2558 * Switch out based on the contiguity flags. 2559 */ 2560 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2561 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2562 /* 2563 * New allocation is contiguous with delayed allocations 2564 * on the left and on the right. 2565 * Merge all three into a single extent record. 2566 */ 2567 temp = left.br_blockcount + new->br_blockcount + 2568 right.br_blockcount; 2569 2570 oldlen = startblockval(left.br_startblock) + 2571 startblockval(new->br_startblock) + 2572 startblockval(right.br_startblock); 2573 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2574 oldlen); 2575 left.br_startblock = nullstartblock(newlen); 2576 left.br_blockcount = temp; 2577 2578 xfs_iext_remove(ip, icur, state); 2579 xfs_iext_prev(ifp, icur); 2580 xfs_iext_update_extent(ip, state, icur, &left); 2581 break; 2582 2583 case BMAP_LEFT_CONTIG: 2584 /* 2585 * New allocation is contiguous with a delayed allocation 2586 * on the left. 2587 * Merge the new allocation with the left neighbor. 2588 */ 2589 temp = left.br_blockcount + new->br_blockcount; 2590 2591 oldlen = startblockval(left.br_startblock) + 2592 startblockval(new->br_startblock); 2593 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2594 oldlen); 2595 left.br_blockcount = temp; 2596 left.br_startblock = nullstartblock(newlen); 2597 2598 xfs_iext_prev(ifp, icur); 2599 xfs_iext_update_extent(ip, state, icur, &left); 2600 break; 2601 2602 case BMAP_RIGHT_CONTIG: 2603 /* 2604 * New allocation is contiguous with a delayed allocation 2605 * on the right. 2606 * Merge the new allocation with the right neighbor. 2607 */ 2608 temp = new->br_blockcount + right.br_blockcount; 2609 oldlen = startblockval(new->br_startblock) + 2610 startblockval(right.br_startblock); 2611 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2612 oldlen); 2613 right.br_startoff = new->br_startoff; 2614 right.br_startblock = nullstartblock(newlen); 2615 right.br_blockcount = temp; 2616 xfs_iext_update_extent(ip, state, icur, &right); 2617 break; 2618 2619 case 0: 2620 /* 2621 * New allocation is not contiguous with another 2622 * delayed allocation. 2623 * Insert a new entry. 2624 */ 2625 oldlen = newlen = 0; 2626 xfs_iext_insert(ip, icur, new, state); 2627 break; 2628 } 2629 if (oldlen != newlen) { 2630 ASSERT(oldlen > newlen); 2631 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen), 2632 false); 2633 /* 2634 * Nothing to do for disk quota accounting here. 2635 */ 2636 xfs_mod_delalloc(ip->i_mount, (int64_t)newlen - oldlen); 2637 } 2638 } 2639 2640 /* 2641 * Convert a hole to a real allocation. 2642 */ 2643 STATIC int /* error */ 2644 xfs_bmap_add_extent_hole_real( 2645 struct xfs_trans *tp, 2646 struct xfs_inode *ip, 2647 int whichfork, 2648 struct xfs_iext_cursor *icur, 2649 struct xfs_btree_cur **curp, 2650 struct xfs_bmbt_irec *new, 2651 int *logflagsp, 2652 int flags) 2653 { 2654 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 2655 struct xfs_mount *mp = ip->i_mount; 2656 struct xfs_btree_cur *cur = *curp; 2657 int error; /* error return value */ 2658 int i; /* temp state */ 2659 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2660 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2661 int rval=0; /* return value (logging flags) */ 2662 int state = xfs_bmap_fork_to_state(whichfork); 2663 struct xfs_bmbt_irec old; 2664 2665 ASSERT(!isnullstartblock(new->br_startblock)); 2666 ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); 2667 2668 XFS_STATS_INC(mp, xs_add_exlist); 2669 2670 /* 2671 * Check and set flags if this segment has a left neighbor. 2672 */ 2673 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { 2674 state |= BMAP_LEFT_VALID; 2675 if (isnullstartblock(left.br_startblock)) 2676 state |= BMAP_LEFT_DELAY; 2677 } 2678 2679 /* 2680 * Check and set flags if this segment has a current value. 2681 * Not true if we're inserting into the "hole" at eof. 2682 */ 2683 if (xfs_iext_get_extent(ifp, icur, &right)) { 2684 state |= BMAP_RIGHT_VALID; 2685 if (isnullstartblock(right.br_startblock)) 2686 state |= BMAP_RIGHT_DELAY; 2687 } 2688 2689 /* 2690 * We're inserting a real allocation between "left" and "right". 2691 * Set the contiguity flags. Don't let extents get too large. 2692 */ 2693 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2694 left.br_startoff + left.br_blockcount == new->br_startoff && 2695 left.br_startblock + left.br_blockcount == new->br_startblock && 2696 left.br_state == new->br_state && 2697 left.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2698 state |= BMAP_LEFT_CONTIG; 2699 2700 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2701 new->br_startoff + new->br_blockcount == right.br_startoff && 2702 new->br_startblock + new->br_blockcount == right.br_startblock && 2703 new->br_state == right.br_state && 2704 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 2705 (!(state & BMAP_LEFT_CONTIG) || 2706 left.br_blockcount + new->br_blockcount + 2707 right.br_blockcount <= MAXEXTLEN)) 2708 state |= BMAP_RIGHT_CONTIG; 2709 2710 error = 0; 2711 /* 2712 * Select which case we're in here, and implement it. 2713 */ 2714 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2715 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2716 /* 2717 * New allocation is contiguous with real allocations on the 2718 * left and on the right. 2719 * Merge all three into a single extent record. 2720 */ 2721 left.br_blockcount += new->br_blockcount + right.br_blockcount; 2722 2723 xfs_iext_remove(ip, icur, state); 2724 xfs_iext_prev(ifp, icur); 2725 xfs_iext_update_extent(ip, state, icur, &left); 2726 2727 XFS_IFORK_NEXT_SET(ip, whichfork, 2728 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2729 if (cur == NULL) { 2730 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 2731 } else { 2732 rval = XFS_ILOG_CORE; 2733 error = xfs_bmbt_lookup_eq(cur, &right, &i); 2734 if (error) 2735 goto done; 2736 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2737 error = xfs_btree_delete(cur, &i); 2738 if (error) 2739 goto done; 2740 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2741 error = xfs_btree_decrement(cur, 0, &i); 2742 if (error) 2743 goto done; 2744 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2745 error = xfs_bmbt_update(cur, &left); 2746 if (error) 2747 goto done; 2748 } 2749 break; 2750 2751 case BMAP_LEFT_CONTIG: 2752 /* 2753 * New allocation is contiguous with a real allocation 2754 * on the left. 2755 * Merge the new allocation with the left neighbor. 2756 */ 2757 old = left; 2758 left.br_blockcount += new->br_blockcount; 2759 2760 xfs_iext_prev(ifp, icur); 2761 xfs_iext_update_extent(ip, state, icur, &left); 2762 2763 if (cur == NULL) { 2764 rval = xfs_ilog_fext(whichfork); 2765 } else { 2766 rval = 0; 2767 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2768 if (error) 2769 goto done; 2770 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2771 error = xfs_bmbt_update(cur, &left); 2772 if (error) 2773 goto done; 2774 } 2775 break; 2776 2777 case BMAP_RIGHT_CONTIG: 2778 /* 2779 * New allocation is contiguous with a real allocation 2780 * on the right. 2781 * Merge the new allocation with the right neighbor. 2782 */ 2783 old = right; 2784 2785 right.br_startoff = new->br_startoff; 2786 right.br_startblock = new->br_startblock; 2787 right.br_blockcount += new->br_blockcount; 2788 xfs_iext_update_extent(ip, state, icur, &right); 2789 2790 if (cur == NULL) { 2791 rval = xfs_ilog_fext(whichfork); 2792 } else { 2793 rval = 0; 2794 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2795 if (error) 2796 goto done; 2797 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2798 error = xfs_bmbt_update(cur, &right); 2799 if (error) 2800 goto done; 2801 } 2802 break; 2803 2804 case 0: 2805 /* 2806 * New allocation is not contiguous with another 2807 * real allocation. 2808 * Insert a new entry. 2809 */ 2810 xfs_iext_insert(ip, icur, new, state); 2811 XFS_IFORK_NEXT_SET(ip, whichfork, 2812 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2813 if (cur == NULL) { 2814 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 2815 } else { 2816 rval = XFS_ILOG_CORE; 2817 error = xfs_bmbt_lookup_eq(cur, new, &i); 2818 if (error) 2819 goto done; 2820 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2821 error = xfs_btree_insert(cur, &i); 2822 if (error) 2823 goto done; 2824 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2825 } 2826 break; 2827 } 2828 2829 /* add reverse mapping unless caller opted out */ 2830 if (!(flags & XFS_BMAPI_NORMAP)) 2831 xfs_rmap_map_extent(tp, ip, whichfork, new); 2832 2833 /* convert to a btree if necessary */ 2834 if (xfs_bmap_needs_btree(ip, whichfork)) { 2835 int tmp_logflags; /* partial log flag return val */ 2836 2837 ASSERT(cur == NULL); 2838 error = xfs_bmap_extents_to_btree(tp, ip, curp, 0, 2839 &tmp_logflags, whichfork); 2840 *logflagsp |= tmp_logflags; 2841 cur = *curp; 2842 if (error) 2843 goto done; 2844 } 2845 2846 /* clear out the allocated field, done with it now in any case. */ 2847 if (cur) 2848 cur->bc_private.b.allocated = 0; 2849 2850 xfs_bmap_check_leaf_extents(cur, ip, whichfork); 2851 done: 2852 *logflagsp |= rval; 2853 return error; 2854 } 2855 2856 /* 2857 * Functions used in the extent read, allocate and remove paths 2858 */ 2859 2860 /* 2861 * Adjust the size of the new extent based on di_extsize and rt extsize. 2862 */ 2863 int 2864 xfs_bmap_extsize_align( 2865 xfs_mount_t *mp, 2866 xfs_bmbt_irec_t *gotp, /* next extent pointer */ 2867 xfs_bmbt_irec_t *prevp, /* previous extent pointer */ 2868 xfs_extlen_t extsz, /* align to this extent size */ 2869 int rt, /* is this a realtime inode? */ 2870 int eof, /* is extent at end-of-file? */ 2871 int delay, /* creating delalloc extent? */ 2872 int convert, /* overwriting unwritten extent? */ 2873 xfs_fileoff_t *offp, /* in/out: aligned offset */ 2874 xfs_extlen_t *lenp) /* in/out: aligned length */ 2875 { 2876 xfs_fileoff_t orig_off; /* original offset */ 2877 xfs_extlen_t orig_alen; /* original length */ 2878 xfs_fileoff_t orig_end; /* original off+len */ 2879 xfs_fileoff_t nexto; /* next file offset */ 2880 xfs_fileoff_t prevo; /* previous file offset */ 2881 xfs_fileoff_t align_off; /* temp for offset */ 2882 xfs_extlen_t align_alen; /* temp for length */ 2883 xfs_extlen_t temp; /* temp for calculations */ 2884 2885 if (convert) 2886 return 0; 2887 2888 orig_off = align_off = *offp; 2889 orig_alen = align_alen = *lenp; 2890 orig_end = orig_off + orig_alen; 2891 2892 /* 2893 * If this request overlaps an existing extent, then don't 2894 * attempt to perform any additional alignment. 2895 */ 2896 if (!delay && !eof && 2897 (orig_off >= gotp->br_startoff) && 2898 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) { 2899 return 0; 2900 } 2901 2902 /* 2903 * If the file offset is unaligned vs. the extent size 2904 * we need to align it. This will be possible unless 2905 * the file was previously written with a kernel that didn't 2906 * perform this alignment, or if a truncate shot us in the 2907 * foot. 2908 */ 2909 div_u64_rem(orig_off, extsz, &temp); 2910 if (temp) { 2911 align_alen += temp; 2912 align_off -= temp; 2913 } 2914 2915 /* Same adjustment for the end of the requested area. */ 2916 temp = (align_alen % extsz); 2917 if (temp) 2918 align_alen += extsz - temp; 2919 2920 /* 2921 * For large extent hint sizes, the aligned extent might be larger than 2922 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls 2923 * the length back under MAXEXTLEN. The outer allocation loops handle 2924 * short allocation just fine, so it is safe to do this. We only want to 2925 * do it when we are forced to, though, because it means more allocation 2926 * operations are required. 2927 */ 2928 while (align_alen > MAXEXTLEN) 2929 align_alen -= extsz; 2930 ASSERT(align_alen <= MAXEXTLEN); 2931 2932 /* 2933 * If the previous block overlaps with this proposed allocation 2934 * then move the start forward without adjusting the length. 2935 */ 2936 if (prevp->br_startoff != NULLFILEOFF) { 2937 if (prevp->br_startblock == HOLESTARTBLOCK) 2938 prevo = prevp->br_startoff; 2939 else 2940 prevo = prevp->br_startoff + prevp->br_blockcount; 2941 } else 2942 prevo = 0; 2943 if (align_off != orig_off && align_off < prevo) 2944 align_off = prevo; 2945 /* 2946 * If the next block overlaps with this proposed allocation 2947 * then move the start back without adjusting the length, 2948 * but not before offset 0. 2949 * This may of course make the start overlap previous block, 2950 * and if we hit the offset 0 limit then the next block 2951 * can still overlap too. 2952 */ 2953 if (!eof && gotp->br_startoff != NULLFILEOFF) { 2954 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) || 2955 (!delay && gotp->br_startblock == DELAYSTARTBLOCK)) 2956 nexto = gotp->br_startoff + gotp->br_blockcount; 2957 else 2958 nexto = gotp->br_startoff; 2959 } else 2960 nexto = NULLFILEOFF; 2961 if (!eof && 2962 align_off + align_alen != orig_end && 2963 align_off + align_alen > nexto) 2964 align_off = nexto > align_alen ? nexto - align_alen : 0; 2965 /* 2966 * If we're now overlapping the next or previous extent that 2967 * means we can't fit an extsz piece in this hole. Just move 2968 * the start forward to the first valid spot and set 2969 * the length so we hit the end. 2970 */ 2971 if (align_off != orig_off && align_off < prevo) 2972 align_off = prevo; 2973 if (align_off + align_alen != orig_end && 2974 align_off + align_alen > nexto && 2975 nexto != NULLFILEOFF) { 2976 ASSERT(nexto > prevo); 2977 align_alen = nexto - align_off; 2978 } 2979 2980 /* 2981 * If realtime, and the result isn't a multiple of the realtime 2982 * extent size we need to remove blocks until it is. 2983 */ 2984 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) { 2985 /* 2986 * We're not covering the original request, or 2987 * we won't be able to once we fix the length. 2988 */ 2989 if (orig_off < align_off || 2990 orig_end > align_off + align_alen || 2991 align_alen - temp < orig_alen) 2992 return -EINVAL; 2993 /* 2994 * Try to fix it by moving the start up. 2995 */ 2996 if (align_off + temp <= orig_off) { 2997 align_alen -= temp; 2998 align_off += temp; 2999 } 3000 /* 3001 * Try to fix it by moving the end in. 3002 */ 3003 else if (align_off + align_alen - temp >= orig_end) 3004 align_alen -= temp; 3005 /* 3006 * Set the start to the minimum then trim the length. 3007 */ 3008 else { 3009 align_alen -= orig_off - align_off; 3010 align_off = orig_off; 3011 align_alen -= align_alen % mp->m_sb.sb_rextsize; 3012 } 3013 /* 3014 * Result doesn't cover the request, fail it. 3015 */ 3016 if (orig_off < align_off || orig_end > align_off + align_alen) 3017 return -EINVAL; 3018 } else { 3019 ASSERT(orig_off >= align_off); 3020 /* see MAXEXTLEN handling above */ 3021 ASSERT(orig_end <= align_off + align_alen || 3022 align_alen + extsz > MAXEXTLEN); 3023 } 3024 3025 #ifdef DEBUG 3026 if (!eof && gotp->br_startoff != NULLFILEOFF) 3027 ASSERT(align_off + align_alen <= gotp->br_startoff); 3028 if (prevp->br_startoff != NULLFILEOFF) 3029 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount); 3030 #endif 3031 3032 *lenp = align_alen; 3033 *offp = align_off; 3034 return 0; 3035 } 3036 3037 #define XFS_ALLOC_GAP_UNITS 4 3038 3039 void 3040 xfs_bmap_adjacent( 3041 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3042 { 3043 xfs_fsblock_t adjust; /* adjustment to block numbers */ 3044 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ 3045 xfs_mount_t *mp; /* mount point structure */ 3046 int nullfb; /* true if ap->firstblock isn't set */ 3047 int rt; /* true if inode is realtime */ 3048 3049 #define ISVALID(x,y) \ 3050 (rt ? \ 3051 (x) < mp->m_sb.sb_rblocks : \ 3052 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \ 3053 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \ 3054 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks) 3055 3056 mp = ap->ip->i_mount; 3057 nullfb = ap->tp->t_firstblock == NULLFSBLOCK; 3058 rt = XFS_IS_REALTIME_INODE(ap->ip) && 3059 xfs_alloc_is_userdata(ap->datatype); 3060 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, 3061 ap->tp->t_firstblock); 3062 /* 3063 * If allocating at eof, and there's a previous real block, 3064 * try to use its last block as our starting point. 3065 */ 3066 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF && 3067 !isnullstartblock(ap->prev.br_startblock) && 3068 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount, 3069 ap->prev.br_startblock)) { 3070 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount; 3071 /* 3072 * Adjust for the gap between prevp and us. 3073 */ 3074 adjust = ap->offset - 3075 (ap->prev.br_startoff + ap->prev.br_blockcount); 3076 if (adjust && 3077 ISVALID(ap->blkno + adjust, ap->prev.br_startblock)) 3078 ap->blkno += adjust; 3079 } 3080 /* 3081 * If not at eof, then compare the two neighbor blocks. 3082 * Figure out whether either one gives us a good starting point, 3083 * and pick the better one. 3084 */ 3085 else if (!ap->eof) { 3086 xfs_fsblock_t gotbno; /* right side block number */ 3087 xfs_fsblock_t gotdiff=0; /* right side difference */ 3088 xfs_fsblock_t prevbno; /* left side block number */ 3089 xfs_fsblock_t prevdiff=0; /* left side difference */ 3090 3091 /* 3092 * If there's a previous (left) block, select a requested 3093 * start block based on it. 3094 */ 3095 if (ap->prev.br_startoff != NULLFILEOFF && 3096 !isnullstartblock(ap->prev.br_startblock) && 3097 (prevbno = ap->prev.br_startblock + 3098 ap->prev.br_blockcount) && 3099 ISVALID(prevbno, ap->prev.br_startblock)) { 3100 /* 3101 * Calculate gap to end of previous block. 3102 */ 3103 adjust = prevdiff = ap->offset - 3104 (ap->prev.br_startoff + 3105 ap->prev.br_blockcount); 3106 /* 3107 * Figure the startblock based on the previous block's 3108 * end and the gap size. 3109 * Heuristic! 3110 * If the gap is large relative to the piece we're 3111 * allocating, or using it gives us an invalid block 3112 * number, then just use the end of the previous block. 3113 */ 3114 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3115 ISVALID(prevbno + prevdiff, 3116 ap->prev.br_startblock)) 3117 prevbno += adjust; 3118 else 3119 prevdiff += adjust; 3120 /* 3121 * If the firstblock forbids it, can't use it, 3122 * must use default. 3123 */ 3124 if (!rt && !nullfb && 3125 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno) 3126 prevbno = NULLFSBLOCK; 3127 } 3128 /* 3129 * No previous block or can't follow it, just default. 3130 */ 3131 else 3132 prevbno = NULLFSBLOCK; 3133 /* 3134 * If there's a following (right) block, select a requested 3135 * start block based on it. 3136 */ 3137 if (!isnullstartblock(ap->got.br_startblock)) { 3138 /* 3139 * Calculate gap to start of next block. 3140 */ 3141 adjust = gotdiff = ap->got.br_startoff - ap->offset; 3142 /* 3143 * Figure the startblock based on the next block's 3144 * start and the gap size. 3145 */ 3146 gotbno = ap->got.br_startblock; 3147 /* 3148 * Heuristic! 3149 * If the gap is large relative to the piece we're 3150 * allocating, or using it gives us an invalid block 3151 * number, then just use the start of the next block 3152 * offset by our length. 3153 */ 3154 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3155 ISVALID(gotbno - gotdiff, gotbno)) 3156 gotbno -= adjust; 3157 else if (ISVALID(gotbno - ap->length, gotbno)) { 3158 gotbno -= ap->length; 3159 gotdiff += adjust - ap->length; 3160 } else 3161 gotdiff += adjust; 3162 /* 3163 * If the firstblock forbids it, can't use it, 3164 * must use default. 3165 */ 3166 if (!rt && !nullfb && 3167 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno) 3168 gotbno = NULLFSBLOCK; 3169 } 3170 /* 3171 * No next block, just default. 3172 */ 3173 else 3174 gotbno = NULLFSBLOCK; 3175 /* 3176 * If both valid, pick the better one, else the only good 3177 * one, else ap->blkno is already set (to 0 or the inode block). 3178 */ 3179 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) 3180 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno; 3181 else if (prevbno != NULLFSBLOCK) 3182 ap->blkno = prevbno; 3183 else if (gotbno != NULLFSBLOCK) 3184 ap->blkno = gotbno; 3185 } 3186 #undef ISVALID 3187 } 3188 3189 static int 3190 xfs_bmap_longest_free_extent( 3191 struct xfs_trans *tp, 3192 xfs_agnumber_t ag, 3193 xfs_extlen_t *blen, 3194 int *notinit) 3195 { 3196 struct xfs_mount *mp = tp->t_mountp; 3197 struct xfs_perag *pag; 3198 xfs_extlen_t longest; 3199 int error = 0; 3200 3201 pag = xfs_perag_get(mp, ag); 3202 if (!pag->pagf_init) { 3203 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK); 3204 if (error) 3205 goto out; 3206 3207 if (!pag->pagf_init) { 3208 *notinit = 1; 3209 goto out; 3210 } 3211 } 3212 3213 longest = xfs_alloc_longest_free_extent(pag, 3214 xfs_alloc_min_freelist(mp, pag), 3215 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE)); 3216 if (*blen < longest) 3217 *blen = longest; 3218 3219 out: 3220 xfs_perag_put(pag); 3221 return error; 3222 } 3223 3224 static void 3225 xfs_bmap_select_minlen( 3226 struct xfs_bmalloca *ap, 3227 struct xfs_alloc_arg *args, 3228 xfs_extlen_t *blen, 3229 int notinit) 3230 { 3231 if (notinit || *blen < ap->minlen) { 3232 /* 3233 * Since we did a BUF_TRYLOCK above, it is possible that 3234 * there is space for this request. 3235 */ 3236 args->minlen = ap->minlen; 3237 } else if (*blen < args->maxlen) { 3238 /* 3239 * If the best seen length is less than the request length, 3240 * use the best as the minimum. 3241 */ 3242 args->minlen = *blen; 3243 } else { 3244 /* 3245 * Otherwise we've seen an extent as big as maxlen, use that 3246 * as the minimum. 3247 */ 3248 args->minlen = args->maxlen; 3249 } 3250 } 3251 3252 STATIC int 3253 xfs_bmap_btalloc_nullfb( 3254 struct xfs_bmalloca *ap, 3255 struct xfs_alloc_arg *args, 3256 xfs_extlen_t *blen) 3257 { 3258 struct xfs_mount *mp = ap->ip->i_mount; 3259 xfs_agnumber_t ag, startag; 3260 int notinit = 0; 3261 int error; 3262 3263 args->type = XFS_ALLOCTYPE_START_BNO; 3264 args->total = ap->total; 3265 3266 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno); 3267 if (startag == NULLAGNUMBER) 3268 startag = ag = 0; 3269 3270 while (*blen < args->maxlen) { 3271 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, 3272 ¬init); 3273 if (error) 3274 return error; 3275 3276 if (++ag == mp->m_sb.sb_agcount) 3277 ag = 0; 3278 if (ag == startag) 3279 break; 3280 } 3281 3282 xfs_bmap_select_minlen(ap, args, blen, notinit); 3283 return 0; 3284 } 3285 3286 STATIC int 3287 xfs_bmap_btalloc_filestreams( 3288 struct xfs_bmalloca *ap, 3289 struct xfs_alloc_arg *args, 3290 xfs_extlen_t *blen) 3291 { 3292 struct xfs_mount *mp = ap->ip->i_mount; 3293 xfs_agnumber_t ag; 3294 int notinit = 0; 3295 int error; 3296 3297 args->type = XFS_ALLOCTYPE_NEAR_BNO; 3298 args->total = ap->total; 3299 3300 ag = XFS_FSB_TO_AGNO(mp, args->fsbno); 3301 if (ag == NULLAGNUMBER) 3302 ag = 0; 3303 3304 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init); 3305 if (error) 3306 return error; 3307 3308 if (*blen < args->maxlen) { 3309 error = xfs_filestream_new_ag(ap, &ag); 3310 if (error) 3311 return error; 3312 3313 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, 3314 ¬init); 3315 if (error) 3316 return error; 3317 3318 } 3319 3320 xfs_bmap_select_minlen(ap, args, blen, notinit); 3321 3322 /* 3323 * Set the failure fallback case to look in the selected AG as stream 3324 * may have moved. 3325 */ 3326 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0); 3327 return 0; 3328 } 3329 3330 /* Update all inode and quota accounting for the allocation we just did. */ 3331 static void 3332 xfs_bmap_btalloc_accounting( 3333 struct xfs_bmalloca *ap, 3334 struct xfs_alloc_arg *args) 3335 { 3336 if (ap->flags & XFS_BMAPI_COWFORK) { 3337 /* 3338 * COW fork blocks are in-core only and thus are treated as 3339 * in-core quota reservation (like delalloc blocks) even when 3340 * converted to real blocks. The quota reservation is not 3341 * accounted to disk until blocks are remapped to the data 3342 * fork. So if these blocks were previously delalloc, we 3343 * already have quota reservation and there's nothing to do 3344 * yet. 3345 */ 3346 if (ap->wasdel) { 3347 xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len); 3348 return; 3349 } 3350 3351 /* 3352 * Otherwise, we've allocated blocks in a hole. The transaction 3353 * has acquired in-core quota reservation for this extent. 3354 * Rather than account these as real blocks, however, we reduce 3355 * the transaction quota reservation based on the allocation. 3356 * This essentially transfers the transaction quota reservation 3357 * to that of a delalloc extent. 3358 */ 3359 ap->ip->i_delayed_blks += args->len; 3360 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS, 3361 -(long)args->len); 3362 return; 3363 } 3364 3365 /* data/attr fork only */ 3366 ap->ip->i_d.di_nblocks += args->len; 3367 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); 3368 if (ap->wasdel) { 3369 ap->ip->i_delayed_blks -= args->len; 3370 xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len); 3371 } 3372 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, 3373 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT, 3374 args->len); 3375 } 3376 3377 STATIC int 3378 xfs_bmap_btalloc( 3379 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3380 { 3381 xfs_mount_t *mp; /* mount point structure */ 3382 xfs_alloctype_t atype = 0; /* type for allocation routines */ 3383 xfs_extlen_t align = 0; /* minimum allocation alignment */ 3384 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ 3385 xfs_agnumber_t ag; 3386 xfs_alloc_arg_t args; 3387 xfs_fileoff_t orig_offset; 3388 xfs_extlen_t orig_length; 3389 xfs_extlen_t blen; 3390 xfs_extlen_t nextminlen = 0; 3391 int nullfb; /* true if ap->firstblock isn't set */ 3392 int isaligned; 3393 int tryagain; 3394 int error; 3395 int stripe_align; 3396 3397 ASSERT(ap->length); 3398 orig_offset = ap->offset; 3399 orig_length = ap->length; 3400 3401 mp = ap->ip->i_mount; 3402 3403 /* stripe alignment for allocation is determined by mount parameters */ 3404 stripe_align = 0; 3405 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) 3406 stripe_align = mp->m_swidth; 3407 else if (mp->m_dalign) 3408 stripe_align = mp->m_dalign; 3409 3410 if (ap->flags & XFS_BMAPI_COWFORK) 3411 align = xfs_get_cowextsz_hint(ap->ip); 3412 else if (xfs_alloc_is_userdata(ap->datatype)) 3413 align = xfs_get_extsz_hint(ap->ip); 3414 if (align) { 3415 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, 3416 align, 0, ap->eof, 0, ap->conv, 3417 &ap->offset, &ap->length); 3418 ASSERT(!error); 3419 ASSERT(ap->length); 3420 } 3421 3422 3423 nullfb = ap->tp->t_firstblock == NULLFSBLOCK; 3424 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, 3425 ap->tp->t_firstblock); 3426 if (nullfb) { 3427 if (xfs_alloc_is_userdata(ap->datatype) && 3428 xfs_inode_is_filestream(ap->ip)) { 3429 ag = xfs_filestream_lookup_ag(ap->ip); 3430 ag = (ag != NULLAGNUMBER) ? ag : 0; 3431 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0); 3432 } else { 3433 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino); 3434 } 3435 } else 3436 ap->blkno = ap->tp->t_firstblock; 3437 3438 xfs_bmap_adjacent(ap); 3439 3440 /* 3441 * If allowed, use ap->blkno; otherwise must use firstblock since 3442 * it's in the right allocation group. 3443 */ 3444 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno) 3445 ; 3446 else 3447 ap->blkno = ap->tp->t_firstblock; 3448 /* 3449 * Normal allocation, done through xfs_alloc_vextent. 3450 */ 3451 tryagain = isaligned = 0; 3452 memset(&args, 0, sizeof(args)); 3453 args.tp = ap->tp; 3454 args.mp = mp; 3455 args.fsbno = ap->blkno; 3456 args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE; 3457 3458 /* Trim the allocation back to the maximum an AG can fit. */ 3459 args.maxlen = min(ap->length, mp->m_ag_max_usable); 3460 blen = 0; 3461 if (nullfb) { 3462 /* 3463 * Search for an allocation group with a single extent large 3464 * enough for the request. If one isn't found, then adjust 3465 * the minimum allocation size to the largest space found. 3466 */ 3467 if (xfs_alloc_is_userdata(ap->datatype) && 3468 xfs_inode_is_filestream(ap->ip)) 3469 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen); 3470 else 3471 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen); 3472 if (error) 3473 return error; 3474 } else if (ap->tp->t_flags & XFS_TRANS_LOWMODE) { 3475 if (xfs_inode_is_filestream(ap->ip)) 3476 args.type = XFS_ALLOCTYPE_FIRST_AG; 3477 else 3478 args.type = XFS_ALLOCTYPE_START_BNO; 3479 args.total = args.minlen = ap->minlen; 3480 } else { 3481 args.type = XFS_ALLOCTYPE_NEAR_BNO; 3482 args.total = ap->total; 3483 args.minlen = ap->minlen; 3484 } 3485 /* apply extent size hints if obtained earlier */ 3486 if (align) { 3487 args.prod = align; 3488 div_u64_rem(ap->offset, args.prod, &args.mod); 3489 if (args.mod) 3490 args.mod = args.prod - args.mod; 3491 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) { 3492 args.prod = 1; 3493 args.mod = 0; 3494 } else { 3495 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog; 3496 div_u64_rem(ap->offset, args.prod, &args.mod); 3497 if (args.mod) 3498 args.mod = args.prod - args.mod; 3499 } 3500 /* 3501 * If we are not low on available data blocks, and the 3502 * underlying logical volume manager is a stripe, and 3503 * the file offset is zero then try to allocate data 3504 * blocks on stripe unit boundary. 3505 * NOTE: ap->aeof is only set if the allocation length 3506 * is >= the stripe unit and the allocation offset is 3507 * at the end of file. 3508 */ 3509 if (!(ap->tp->t_flags & XFS_TRANS_LOWMODE) && ap->aeof) { 3510 if (!ap->offset) { 3511 args.alignment = stripe_align; 3512 atype = args.type; 3513 isaligned = 1; 3514 /* 3515 * Adjust for alignment 3516 */ 3517 if (blen > args.alignment && blen <= args.maxlen) 3518 args.minlen = blen - args.alignment; 3519 args.minalignslop = 0; 3520 } else { 3521 /* 3522 * First try an exact bno allocation. 3523 * If it fails then do a near or start bno 3524 * allocation with alignment turned on. 3525 */ 3526 atype = args.type; 3527 tryagain = 1; 3528 args.type = XFS_ALLOCTYPE_THIS_BNO; 3529 args.alignment = 1; 3530 /* 3531 * Compute the minlen+alignment for the 3532 * next case. Set slop so that the value 3533 * of minlen+alignment+slop doesn't go up 3534 * between the calls. 3535 */ 3536 if (blen > stripe_align && blen <= args.maxlen) 3537 nextminlen = blen - stripe_align; 3538 else 3539 nextminlen = args.minlen; 3540 if (nextminlen + stripe_align > args.minlen + 1) 3541 args.minalignslop = 3542 nextminlen + stripe_align - 3543 args.minlen - 1; 3544 else 3545 args.minalignslop = 0; 3546 } 3547 } else { 3548 args.alignment = 1; 3549 args.minalignslop = 0; 3550 } 3551 args.minleft = ap->minleft; 3552 args.wasdel = ap->wasdel; 3553 args.resv = XFS_AG_RESV_NONE; 3554 args.datatype = ap->datatype; 3555 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) 3556 args.ip = ap->ip; 3557 3558 error = xfs_alloc_vextent(&args); 3559 if (error) 3560 return error; 3561 3562 if (tryagain && args.fsbno == NULLFSBLOCK) { 3563 /* 3564 * Exact allocation failed. Now try with alignment 3565 * turned on. 3566 */ 3567 args.type = atype; 3568 args.fsbno = ap->blkno; 3569 args.alignment = stripe_align; 3570 args.minlen = nextminlen; 3571 args.minalignslop = 0; 3572 isaligned = 1; 3573 if ((error = xfs_alloc_vextent(&args))) 3574 return error; 3575 } 3576 if (isaligned && args.fsbno == NULLFSBLOCK) { 3577 /* 3578 * allocation failed, so turn off alignment and 3579 * try again. 3580 */ 3581 args.type = atype; 3582 args.fsbno = ap->blkno; 3583 args.alignment = 0; 3584 if ((error = xfs_alloc_vextent(&args))) 3585 return error; 3586 } 3587 if (args.fsbno == NULLFSBLOCK && nullfb && 3588 args.minlen > ap->minlen) { 3589 args.minlen = ap->minlen; 3590 args.type = XFS_ALLOCTYPE_START_BNO; 3591 args.fsbno = ap->blkno; 3592 if ((error = xfs_alloc_vextent(&args))) 3593 return error; 3594 } 3595 if (args.fsbno == NULLFSBLOCK && nullfb) { 3596 args.fsbno = 0; 3597 args.type = XFS_ALLOCTYPE_FIRST_AG; 3598 args.total = ap->minlen; 3599 if ((error = xfs_alloc_vextent(&args))) 3600 return error; 3601 ap->tp->t_flags |= XFS_TRANS_LOWMODE; 3602 } 3603 if (args.fsbno != NULLFSBLOCK) { 3604 /* 3605 * check the allocation happened at the same or higher AG than 3606 * the first block that was allocated. 3607 */ 3608 ASSERT(ap->tp->t_firstblock == NULLFSBLOCK || 3609 XFS_FSB_TO_AGNO(mp, ap->tp->t_firstblock) <= 3610 XFS_FSB_TO_AGNO(mp, args.fsbno)); 3611 3612 ap->blkno = args.fsbno; 3613 if (ap->tp->t_firstblock == NULLFSBLOCK) 3614 ap->tp->t_firstblock = args.fsbno; 3615 ASSERT(nullfb || fb_agno <= args.agno); 3616 ap->length = args.len; 3617 /* 3618 * If the extent size hint is active, we tried to round the 3619 * caller's allocation request offset down to extsz and the 3620 * length up to another extsz boundary. If we found a free 3621 * extent we mapped it in starting at this new offset. If the 3622 * newly mapped space isn't long enough to cover any of the 3623 * range of offsets that was originally requested, move the 3624 * mapping up so that we can fill as much of the caller's 3625 * original request as possible. Free space is apparently 3626 * very fragmented so we're unlikely to be able to satisfy the 3627 * hints anyway. 3628 */ 3629 if (ap->length <= orig_length) 3630 ap->offset = orig_offset; 3631 else if (ap->offset + ap->length < orig_offset + orig_length) 3632 ap->offset = orig_offset + orig_length - ap->length; 3633 xfs_bmap_btalloc_accounting(ap, &args); 3634 } else { 3635 ap->blkno = NULLFSBLOCK; 3636 ap->length = 0; 3637 } 3638 return 0; 3639 } 3640 3641 /* 3642 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. 3643 * It figures out where to ask the underlying allocator to put the new extent. 3644 */ 3645 STATIC int 3646 xfs_bmap_alloc( 3647 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3648 { 3649 if (XFS_IS_REALTIME_INODE(ap->ip) && 3650 xfs_alloc_is_userdata(ap->datatype)) 3651 return xfs_bmap_rtalloc(ap); 3652 return xfs_bmap_btalloc(ap); 3653 } 3654 3655 /* Trim extent to fit a logical block range. */ 3656 void 3657 xfs_trim_extent( 3658 struct xfs_bmbt_irec *irec, 3659 xfs_fileoff_t bno, 3660 xfs_filblks_t len) 3661 { 3662 xfs_fileoff_t distance; 3663 xfs_fileoff_t end = bno + len; 3664 3665 if (irec->br_startoff + irec->br_blockcount <= bno || 3666 irec->br_startoff >= end) { 3667 irec->br_blockcount = 0; 3668 return; 3669 } 3670 3671 if (irec->br_startoff < bno) { 3672 distance = bno - irec->br_startoff; 3673 if (isnullstartblock(irec->br_startblock)) 3674 irec->br_startblock = DELAYSTARTBLOCK; 3675 if (irec->br_startblock != DELAYSTARTBLOCK && 3676 irec->br_startblock != HOLESTARTBLOCK) 3677 irec->br_startblock += distance; 3678 irec->br_startoff += distance; 3679 irec->br_blockcount -= distance; 3680 } 3681 3682 if (end < irec->br_startoff + irec->br_blockcount) { 3683 distance = irec->br_startoff + irec->br_blockcount - end; 3684 irec->br_blockcount -= distance; 3685 } 3686 } 3687 3688 /* 3689 * Trim the returned map to the required bounds 3690 */ 3691 STATIC void 3692 xfs_bmapi_trim_map( 3693 struct xfs_bmbt_irec *mval, 3694 struct xfs_bmbt_irec *got, 3695 xfs_fileoff_t *bno, 3696 xfs_filblks_t len, 3697 xfs_fileoff_t obno, 3698 xfs_fileoff_t end, 3699 int n, 3700 int flags) 3701 { 3702 if ((flags & XFS_BMAPI_ENTIRE) || 3703 got->br_startoff + got->br_blockcount <= obno) { 3704 *mval = *got; 3705 if (isnullstartblock(got->br_startblock)) 3706 mval->br_startblock = DELAYSTARTBLOCK; 3707 return; 3708 } 3709 3710 if (obno > *bno) 3711 *bno = obno; 3712 ASSERT((*bno >= obno) || (n == 0)); 3713 ASSERT(*bno < end); 3714 mval->br_startoff = *bno; 3715 if (isnullstartblock(got->br_startblock)) 3716 mval->br_startblock = DELAYSTARTBLOCK; 3717 else 3718 mval->br_startblock = got->br_startblock + 3719 (*bno - got->br_startoff); 3720 /* 3721 * Return the minimum of what we got and what we asked for for 3722 * the length. We can use the len variable here because it is 3723 * modified below and we could have been there before coming 3724 * here if the first part of the allocation didn't overlap what 3725 * was asked for. 3726 */ 3727 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno, 3728 got->br_blockcount - (*bno - got->br_startoff)); 3729 mval->br_state = got->br_state; 3730 ASSERT(mval->br_blockcount <= len); 3731 return; 3732 } 3733 3734 /* 3735 * Update and validate the extent map to return 3736 */ 3737 STATIC void 3738 xfs_bmapi_update_map( 3739 struct xfs_bmbt_irec **map, 3740 xfs_fileoff_t *bno, 3741 xfs_filblks_t *len, 3742 xfs_fileoff_t obno, 3743 xfs_fileoff_t end, 3744 int *n, 3745 int flags) 3746 { 3747 xfs_bmbt_irec_t *mval = *map; 3748 3749 ASSERT((flags & XFS_BMAPI_ENTIRE) || 3750 ((mval->br_startoff + mval->br_blockcount) <= end)); 3751 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) || 3752 (mval->br_startoff < obno)); 3753 3754 *bno = mval->br_startoff + mval->br_blockcount; 3755 *len = end - *bno; 3756 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) { 3757 /* update previous map with new information */ 3758 ASSERT(mval->br_startblock == mval[-1].br_startblock); 3759 ASSERT(mval->br_blockcount > mval[-1].br_blockcount); 3760 ASSERT(mval->br_state == mval[-1].br_state); 3761 mval[-1].br_blockcount = mval->br_blockcount; 3762 mval[-1].br_state = mval->br_state; 3763 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK && 3764 mval[-1].br_startblock != DELAYSTARTBLOCK && 3765 mval[-1].br_startblock != HOLESTARTBLOCK && 3766 mval->br_startblock == mval[-1].br_startblock + 3767 mval[-1].br_blockcount && 3768 mval[-1].br_state == mval->br_state) { 3769 ASSERT(mval->br_startoff == 3770 mval[-1].br_startoff + mval[-1].br_blockcount); 3771 mval[-1].br_blockcount += mval->br_blockcount; 3772 } else if (*n > 0 && 3773 mval->br_startblock == DELAYSTARTBLOCK && 3774 mval[-1].br_startblock == DELAYSTARTBLOCK && 3775 mval->br_startoff == 3776 mval[-1].br_startoff + mval[-1].br_blockcount) { 3777 mval[-1].br_blockcount += mval->br_blockcount; 3778 mval[-1].br_state = mval->br_state; 3779 } else if (!((*n == 0) && 3780 ((mval->br_startoff + mval->br_blockcount) <= 3781 obno))) { 3782 mval++; 3783 (*n)++; 3784 } 3785 *map = mval; 3786 } 3787 3788 /* 3789 * Map file blocks to filesystem blocks without allocation. 3790 */ 3791 int 3792 xfs_bmapi_read( 3793 struct xfs_inode *ip, 3794 xfs_fileoff_t bno, 3795 xfs_filblks_t len, 3796 struct xfs_bmbt_irec *mval, 3797 int *nmap, 3798 int flags) 3799 { 3800 struct xfs_mount *mp = ip->i_mount; 3801 struct xfs_ifork *ifp; 3802 struct xfs_bmbt_irec got; 3803 xfs_fileoff_t obno; 3804 xfs_fileoff_t end; 3805 struct xfs_iext_cursor icur; 3806 int error; 3807 bool eof = false; 3808 int n = 0; 3809 int whichfork = xfs_bmapi_whichfork(flags); 3810 3811 ASSERT(*nmap >= 1); 3812 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE| 3813 XFS_BMAPI_COWFORK))); 3814 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)); 3815 3816 if (unlikely(XFS_TEST_ERROR( 3817 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 3818 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 3819 mp, XFS_ERRTAG_BMAPIFORMAT))) { 3820 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp); 3821 return -EFSCORRUPTED; 3822 } 3823 3824 if (XFS_FORCED_SHUTDOWN(mp)) 3825 return -EIO; 3826 3827 XFS_STATS_INC(mp, xs_blk_mapr); 3828 3829 ifp = XFS_IFORK_PTR(ip, whichfork); 3830 if (!ifp) { 3831 /* No CoW fork? Return a hole. */ 3832 if (whichfork == XFS_COW_FORK) { 3833 mval->br_startoff = bno; 3834 mval->br_startblock = HOLESTARTBLOCK; 3835 mval->br_blockcount = len; 3836 mval->br_state = XFS_EXT_NORM; 3837 *nmap = 1; 3838 return 0; 3839 } 3840 3841 /* 3842 * A missing attr ifork implies that the inode says we're in 3843 * extents or btree format but failed to pass the inode fork 3844 * verifier while trying to load it. Treat that as a file 3845 * corruption too. 3846 */ 3847 #ifdef DEBUG 3848 xfs_alert(mp, "%s: inode %llu missing fork %d", 3849 __func__, ip->i_ino, whichfork); 3850 #endif /* DEBUG */ 3851 return -EFSCORRUPTED; 3852 } 3853 3854 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 3855 error = xfs_iread_extents(NULL, ip, whichfork); 3856 if (error) 3857 return error; 3858 } 3859 3860 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) 3861 eof = true; 3862 end = bno + len; 3863 obno = bno; 3864 3865 while (bno < end && n < *nmap) { 3866 /* Reading past eof, act as though there's a hole up to end. */ 3867 if (eof) 3868 got.br_startoff = end; 3869 if (got.br_startoff > bno) { 3870 /* Reading in a hole. */ 3871 mval->br_startoff = bno; 3872 mval->br_startblock = HOLESTARTBLOCK; 3873 mval->br_blockcount = 3874 XFS_FILBLKS_MIN(len, got.br_startoff - bno); 3875 mval->br_state = XFS_EXT_NORM; 3876 bno += mval->br_blockcount; 3877 len -= mval->br_blockcount; 3878 mval++; 3879 n++; 3880 continue; 3881 } 3882 3883 /* set up the extent map to return. */ 3884 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags); 3885 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 3886 3887 /* If we're done, stop now. */ 3888 if (bno >= end || n >= *nmap) 3889 break; 3890 3891 /* Else go on to the next record. */ 3892 if (!xfs_iext_next_extent(ifp, &icur, &got)) 3893 eof = true; 3894 } 3895 *nmap = n; 3896 return 0; 3897 } 3898 3899 /* 3900 * Add a delayed allocation extent to an inode. Blocks are reserved from the 3901 * global pool and the extent inserted into the inode in-core extent tree. 3902 * 3903 * On entry, got refers to the first extent beyond the offset of the extent to 3904 * allocate or eof is specified if no such extent exists. On return, got refers 3905 * to the extent record that was inserted to the inode fork. 3906 * 3907 * Note that the allocated extent may have been merged with contiguous extents 3908 * during insertion into the inode fork. Thus, got does not reflect the current 3909 * state of the inode fork on return. If necessary, the caller can use lastx to 3910 * look up the updated record in the inode fork. 3911 */ 3912 int 3913 xfs_bmapi_reserve_delalloc( 3914 struct xfs_inode *ip, 3915 int whichfork, 3916 xfs_fileoff_t off, 3917 xfs_filblks_t len, 3918 xfs_filblks_t prealloc, 3919 struct xfs_bmbt_irec *got, 3920 struct xfs_iext_cursor *icur, 3921 int eof) 3922 { 3923 struct xfs_mount *mp = ip->i_mount; 3924 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 3925 xfs_extlen_t alen; 3926 xfs_extlen_t indlen; 3927 int error; 3928 xfs_fileoff_t aoff = off; 3929 3930 /* 3931 * Cap the alloc length. Keep track of prealloc so we know whether to 3932 * tag the inode before we return. 3933 */ 3934 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN); 3935 if (!eof) 3936 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff); 3937 if (prealloc && alen >= len) 3938 prealloc = alen - len; 3939 3940 /* Figure out the extent size, adjust alen */ 3941 if (whichfork == XFS_COW_FORK) { 3942 struct xfs_bmbt_irec prev; 3943 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip); 3944 3945 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev)) 3946 prev.br_startoff = NULLFILEOFF; 3947 3948 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof, 3949 1, 0, &aoff, &alen); 3950 ASSERT(!error); 3951 } 3952 3953 /* 3954 * Make a transaction-less quota reservation for delayed allocation 3955 * blocks. This number gets adjusted later. We return if we haven't 3956 * allocated blocks already inside this loop. 3957 */ 3958 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0, 3959 XFS_QMOPT_RES_REGBLKS); 3960 if (error) 3961 return error; 3962 3963 /* 3964 * Split changing sb for alen and indlen since they could be coming 3965 * from different places. 3966 */ 3967 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); 3968 ASSERT(indlen > 0); 3969 3970 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false); 3971 if (error) 3972 goto out_unreserve_quota; 3973 3974 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false); 3975 if (error) 3976 goto out_unreserve_blocks; 3977 3978 3979 ip->i_delayed_blks += alen; 3980 xfs_mod_delalloc(ip->i_mount, alen + indlen); 3981 3982 got->br_startoff = aoff; 3983 got->br_startblock = nullstartblock(indlen); 3984 got->br_blockcount = alen; 3985 got->br_state = XFS_EXT_NORM; 3986 3987 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got); 3988 3989 /* 3990 * Tag the inode if blocks were preallocated. Note that COW fork 3991 * preallocation can occur at the start or end of the extent, even when 3992 * prealloc == 0, so we must also check the aligned offset and length. 3993 */ 3994 if (whichfork == XFS_DATA_FORK && prealloc) 3995 xfs_inode_set_eofblocks_tag(ip); 3996 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len)) 3997 xfs_inode_set_cowblocks_tag(ip); 3998 3999 return 0; 4000 4001 out_unreserve_blocks: 4002 xfs_mod_fdblocks(mp, alen, false); 4003 out_unreserve_quota: 4004 if (XFS_IS_QUOTA_ON(mp)) 4005 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, 4006 XFS_QMOPT_RES_REGBLKS); 4007 return error; 4008 } 4009 4010 static int 4011 xfs_bmapi_allocate( 4012 struct xfs_bmalloca *bma) 4013 { 4014 struct xfs_mount *mp = bma->ip->i_mount; 4015 int whichfork = xfs_bmapi_whichfork(bma->flags); 4016 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); 4017 int tmp_logflags = 0; 4018 int error; 4019 4020 ASSERT(bma->length > 0); 4021 4022 /* 4023 * For the wasdelay case, we could also just allocate the stuff asked 4024 * for in this bmap call but that wouldn't be as good. 4025 */ 4026 if (bma->wasdel) { 4027 bma->length = (xfs_extlen_t)bma->got.br_blockcount; 4028 bma->offset = bma->got.br_startoff; 4029 xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev); 4030 } else { 4031 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN); 4032 if (!bma->eof) 4033 bma->length = XFS_FILBLKS_MIN(bma->length, 4034 bma->got.br_startoff - bma->offset); 4035 } 4036 4037 /* 4038 * Set the data type being allocated. For the data fork, the first data 4039 * in the file is treated differently to all other allocations. For the 4040 * attribute fork, we only need to ensure the allocated range is not on 4041 * the busy list. 4042 */ 4043 if (!(bma->flags & XFS_BMAPI_METADATA)) { 4044 bma->datatype = XFS_ALLOC_NOBUSY; 4045 if (whichfork == XFS_DATA_FORK) { 4046 if (bma->offset == 0) 4047 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA; 4048 else 4049 bma->datatype |= XFS_ALLOC_USERDATA; 4050 } 4051 if (bma->flags & XFS_BMAPI_ZERO) 4052 bma->datatype |= XFS_ALLOC_USERDATA_ZERO; 4053 } 4054 4055 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1; 4056 4057 /* 4058 * Only want to do the alignment at the eof if it is userdata and 4059 * allocation length is larger than a stripe unit. 4060 */ 4061 if (mp->m_dalign && bma->length >= mp->m_dalign && 4062 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) { 4063 error = xfs_bmap_isaeof(bma, whichfork); 4064 if (error) 4065 return error; 4066 } 4067 4068 error = xfs_bmap_alloc(bma); 4069 if (error) 4070 return error; 4071 4072 if (bma->blkno == NULLFSBLOCK) 4073 return 0; 4074 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) 4075 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork); 4076 /* 4077 * Bump the number of extents we've allocated 4078 * in this call. 4079 */ 4080 bma->nallocs++; 4081 4082 if (bma->cur) 4083 bma->cur->bc_private.b.flags = 4084 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; 4085 4086 bma->got.br_startoff = bma->offset; 4087 bma->got.br_startblock = bma->blkno; 4088 bma->got.br_blockcount = bma->length; 4089 bma->got.br_state = XFS_EXT_NORM; 4090 4091 /* 4092 * In the data fork, a wasdelay extent has been initialized, so 4093 * shouldn't be flagged as unwritten. 4094 * 4095 * For the cow fork, however, we convert delalloc reservations 4096 * (extents allocated for speculative preallocation) to 4097 * allocated unwritten extents, and only convert the unwritten 4098 * extents to real extents when we're about to write the data. 4099 */ 4100 if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) && 4101 (bma->flags & XFS_BMAPI_PREALLOC)) 4102 bma->got.br_state = XFS_EXT_UNWRITTEN; 4103 4104 if (bma->wasdel) 4105 error = xfs_bmap_add_extent_delay_real(bma, whichfork); 4106 else 4107 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip, 4108 whichfork, &bma->icur, &bma->cur, &bma->got, 4109 &bma->logflags, bma->flags); 4110 4111 bma->logflags |= tmp_logflags; 4112 if (error) 4113 return error; 4114 4115 /* 4116 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real 4117 * or xfs_bmap_add_extent_hole_real might have merged it into one of 4118 * the neighbouring ones. 4119 */ 4120 xfs_iext_get_extent(ifp, &bma->icur, &bma->got); 4121 4122 ASSERT(bma->got.br_startoff <= bma->offset); 4123 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >= 4124 bma->offset + bma->length); 4125 ASSERT(bma->got.br_state == XFS_EXT_NORM || 4126 bma->got.br_state == XFS_EXT_UNWRITTEN); 4127 return 0; 4128 } 4129 4130 STATIC int 4131 xfs_bmapi_convert_unwritten( 4132 struct xfs_bmalloca *bma, 4133 struct xfs_bmbt_irec *mval, 4134 xfs_filblks_t len, 4135 int flags) 4136 { 4137 int whichfork = xfs_bmapi_whichfork(flags); 4138 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); 4139 int tmp_logflags = 0; 4140 int error; 4141 4142 /* check if we need to do unwritten->real conversion */ 4143 if (mval->br_state == XFS_EXT_UNWRITTEN && 4144 (flags & XFS_BMAPI_PREALLOC)) 4145 return 0; 4146 4147 /* check if we need to do real->unwritten conversion */ 4148 if (mval->br_state == XFS_EXT_NORM && 4149 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) != 4150 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) 4151 return 0; 4152 4153 /* 4154 * Modify (by adding) the state flag, if writing. 4155 */ 4156 ASSERT(mval->br_blockcount <= len); 4157 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) { 4158 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp, 4159 bma->ip, whichfork); 4160 } 4161 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) 4162 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN; 4163 4164 /* 4165 * Before insertion into the bmbt, zero the range being converted 4166 * if required. 4167 */ 4168 if (flags & XFS_BMAPI_ZERO) { 4169 error = xfs_zero_extent(bma->ip, mval->br_startblock, 4170 mval->br_blockcount); 4171 if (error) 4172 return error; 4173 } 4174 4175 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork, 4176 &bma->icur, &bma->cur, mval, &tmp_logflags); 4177 /* 4178 * Log the inode core unconditionally in the unwritten extent conversion 4179 * path because the conversion might not have done so (e.g., if the 4180 * extent count hasn't changed). We need to make sure the inode is dirty 4181 * in the transaction for the sake of fsync(), even if nothing has 4182 * changed, because fsync() will not force the log for this transaction 4183 * unless it sees the inode pinned. 4184 * 4185 * Note: If we're only converting cow fork extents, there aren't 4186 * any on-disk updates to make, so we don't need to log anything. 4187 */ 4188 if (whichfork != XFS_COW_FORK) 4189 bma->logflags |= tmp_logflags | XFS_ILOG_CORE; 4190 if (error) 4191 return error; 4192 4193 /* 4194 * Update our extent pointer, given that 4195 * xfs_bmap_add_extent_unwritten_real might have merged it into one 4196 * of the neighbouring ones. 4197 */ 4198 xfs_iext_get_extent(ifp, &bma->icur, &bma->got); 4199 4200 /* 4201 * We may have combined previously unwritten space with written space, 4202 * so generate another request. 4203 */ 4204 if (mval->br_blockcount < len) 4205 return -EAGAIN; 4206 return 0; 4207 } 4208 4209 static inline xfs_extlen_t 4210 xfs_bmapi_minleft( 4211 struct xfs_trans *tp, 4212 struct xfs_inode *ip, 4213 int fork) 4214 { 4215 if (tp && tp->t_firstblock != NULLFSBLOCK) 4216 return 0; 4217 if (XFS_IFORK_FORMAT(ip, fork) != XFS_DINODE_FMT_BTREE) 4218 return 1; 4219 return be16_to_cpu(XFS_IFORK_PTR(ip, fork)->if_broot->bb_level) + 1; 4220 } 4221 4222 /* 4223 * Log whatever the flags say, even if error. Otherwise we might miss detecting 4224 * a case where the data is changed, there's an error, and it's not logged so we 4225 * don't shutdown when we should. Don't bother logging extents/btree changes if 4226 * we converted to the other format. 4227 */ 4228 static void 4229 xfs_bmapi_finish( 4230 struct xfs_bmalloca *bma, 4231 int whichfork, 4232 int error) 4233 { 4234 if ((bma->logflags & xfs_ilog_fext(whichfork)) && 4235 XFS_IFORK_FORMAT(bma->ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 4236 bma->logflags &= ~xfs_ilog_fext(whichfork); 4237 else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) && 4238 XFS_IFORK_FORMAT(bma->ip, whichfork) != XFS_DINODE_FMT_BTREE) 4239 bma->logflags &= ~xfs_ilog_fbroot(whichfork); 4240 4241 if (bma->logflags) 4242 xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags); 4243 if (bma->cur) 4244 xfs_btree_del_cursor(bma->cur, error); 4245 } 4246 4247 /* 4248 * Map file blocks to filesystem blocks, and allocate blocks or convert the 4249 * extent state if necessary. Details behaviour is controlled by the flags 4250 * parameter. Only allocates blocks from a single allocation group, to avoid 4251 * locking problems. 4252 */ 4253 int 4254 xfs_bmapi_write( 4255 struct xfs_trans *tp, /* transaction pointer */ 4256 struct xfs_inode *ip, /* incore inode */ 4257 xfs_fileoff_t bno, /* starting file offs. mapped */ 4258 xfs_filblks_t len, /* length to map in file */ 4259 int flags, /* XFS_BMAPI_... */ 4260 xfs_extlen_t total, /* total blocks needed */ 4261 struct xfs_bmbt_irec *mval, /* output: map values */ 4262 int *nmap) /* i/o: mval size/count */ 4263 { 4264 struct xfs_bmalloca bma = { 4265 .tp = tp, 4266 .ip = ip, 4267 .total = total, 4268 }; 4269 struct xfs_mount *mp = ip->i_mount; 4270 struct xfs_ifork *ifp; 4271 xfs_fileoff_t end; /* end of mapped file region */ 4272 bool eof = false; /* after the end of extents */ 4273 int error; /* error return */ 4274 int n; /* current extent index */ 4275 xfs_fileoff_t obno; /* old block number (offset) */ 4276 int whichfork; /* data or attr fork */ 4277 4278 #ifdef DEBUG 4279 xfs_fileoff_t orig_bno; /* original block number value */ 4280 int orig_flags; /* original flags arg value */ 4281 xfs_filblks_t orig_len; /* original value of len arg */ 4282 struct xfs_bmbt_irec *orig_mval; /* original value of mval */ 4283 int orig_nmap; /* original value of *nmap */ 4284 4285 orig_bno = bno; 4286 orig_len = len; 4287 orig_flags = flags; 4288 orig_mval = mval; 4289 orig_nmap = *nmap; 4290 #endif 4291 whichfork = xfs_bmapi_whichfork(flags); 4292 4293 ASSERT(*nmap >= 1); 4294 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); 4295 ASSERT(tp != NULL); 4296 ASSERT(len > 0); 4297 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL); 4298 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4299 ASSERT(!(flags & XFS_BMAPI_REMAP)); 4300 4301 /* zeroing is for currently only for data extents, not metadata */ 4302 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) != 4303 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)); 4304 /* 4305 * we can allocate unwritten extents or pre-zero allocated blocks, 4306 * but it makes no sense to do both at once. This would result in 4307 * zeroing the unwritten extent twice, but it still being an 4308 * unwritten extent.... 4309 */ 4310 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) != 4311 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)); 4312 4313 if (unlikely(XFS_TEST_ERROR( 4314 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 4315 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 4316 mp, XFS_ERRTAG_BMAPIFORMAT))) { 4317 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp); 4318 return -EFSCORRUPTED; 4319 } 4320 4321 if (XFS_FORCED_SHUTDOWN(mp)) 4322 return -EIO; 4323 4324 ifp = XFS_IFORK_PTR(ip, whichfork); 4325 4326 XFS_STATS_INC(mp, xs_blk_mapw); 4327 4328 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4329 error = xfs_iread_extents(tp, ip, whichfork); 4330 if (error) 4331 goto error0; 4332 } 4333 4334 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got)) 4335 eof = true; 4336 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) 4337 bma.prev.br_startoff = NULLFILEOFF; 4338 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); 4339 4340 n = 0; 4341 end = bno + len; 4342 obno = bno; 4343 while (bno < end && n < *nmap) { 4344 bool need_alloc = false, wasdelay = false; 4345 4346 /* in hole or beyond EOF? */ 4347 if (eof || bma.got.br_startoff > bno) { 4348 /* 4349 * CoW fork conversions should /never/ hit EOF or 4350 * holes. There should always be something for us 4351 * to work on. 4352 */ 4353 ASSERT(!((flags & XFS_BMAPI_CONVERT) && 4354 (flags & XFS_BMAPI_COWFORK))); 4355 4356 need_alloc = true; 4357 } else if (isnullstartblock(bma.got.br_startblock)) { 4358 wasdelay = true; 4359 } 4360 4361 /* 4362 * First, deal with the hole before the allocated space 4363 * that we found, if any. 4364 */ 4365 if (need_alloc || wasdelay) { 4366 bma.eof = eof; 4367 bma.conv = !!(flags & XFS_BMAPI_CONVERT); 4368 bma.wasdel = wasdelay; 4369 bma.offset = bno; 4370 bma.flags = flags; 4371 4372 /* 4373 * There's a 32/64 bit type mismatch between the 4374 * allocation length request (which can be 64 bits in 4375 * length) and the bma length request, which is 4376 * xfs_extlen_t and therefore 32 bits. Hence we have to 4377 * check for 32-bit overflows and handle them here. 4378 */ 4379 if (len > (xfs_filblks_t)MAXEXTLEN) 4380 bma.length = MAXEXTLEN; 4381 else 4382 bma.length = len; 4383 4384 ASSERT(len > 0); 4385 ASSERT(bma.length > 0); 4386 error = xfs_bmapi_allocate(&bma); 4387 if (error) 4388 goto error0; 4389 if (bma.blkno == NULLFSBLOCK) 4390 break; 4391 4392 /* 4393 * If this is a CoW allocation, record the data in 4394 * the refcount btree for orphan recovery. 4395 */ 4396 if (whichfork == XFS_COW_FORK) 4397 xfs_refcount_alloc_cow_extent(tp, bma.blkno, 4398 bma.length); 4399 } 4400 4401 /* Deal with the allocated space we found. */ 4402 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno, 4403 end, n, flags); 4404 4405 /* Execute unwritten extent conversion if necessary */ 4406 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags); 4407 if (error == -EAGAIN) 4408 continue; 4409 if (error) 4410 goto error0; 4411 4412 /* update the extent map to return */ 4413 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 4414 4415 /* 4416 * If we're done, stop now. Stop when we've allocated 4417 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise 4418 * the transaction may get too big. 4419 */ 4420 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap) 4421 break; 4422 4423 /* Else go on to the next record. */ 4424 bma.prev = bma.got; 4425 if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got)) 4426 eof = true; 4427 } 4428 *nmap = n; 4429 4430 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags, 4431 whichfork); 4432 if (error) 4433 goto error0; 4434 4435 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE || 4436 XFS_IFORK_NEXTENTS(ip, whichfork) > 4437 XFS_IFORK_MAXEXT(ip, whichfork)); 4438 xfs_bmapi_finish(&bma, whichfork, 0); 4439 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval, 4440 orig_nmap, *nmap); 4441 return 0; 4442 error0: 4443 xfs_bmapi_finish(&bma, whichfork, error); 4444 return error; 4445 } 4446 4447 /* 4448 * Convert an existing delalloc extent to real blocks based on file offset. This 4449 * attempts to allocate the entire delalloc extent and may require multiple 4450 * invocations to allocate the target offset if a large enough physical extent 4451 * is not available. 4452 */ 4453 int 4454 xfs_bmapi_convert_delalloc( 4455 struct xfs_inode *ip, 4456 int whichfork, 4457 xfs_fileoff_t offset_fsb, 4458 struct xfs_bmbt_irec *imap, 4459 unsigned int *seq) 4460 { 4461 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 4462 struct xfs_mount *mp = ip->i_mount; 4463 struct xfs_bmalloca bma = { NULL }; 4464 struct xfs_trans *tp; 4465 int error; 4466 4467 /* 4468 * Space for the extent and indirect blocks was reserved when the 4469 * delalloc extent was created so there's no need to do so here. 4470 */ 4471 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 4472 XFS_TRANS_RESERVE, &tp); 4473 if (error) 4474 return error; 4475 4476 xfs_ilock(ip, XFS_ILOCK_EXCL); 4477 xfs_trans_ijoin(tp, ip, 0); 4478 4479 if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) || 4480 bma.got.br_startoff > offset_fsb) { 4481 /* 4482 * No extent found in the range we are trying to convert. This 4483 * should only happen for the COW fork, where another thread 4484 * might have moved the extent to the data fork in the meantime. 4485 */ 4486 WARN_ON_ONCE(whichfork != XFS_COW_FORK); 4487 error = -EAGAIN; 4488 goto out_trans_cancel; 4489 } 4490 4491 /* 4492 * If we find a real extent here we raced with another thread converting 4493 * the extent. Just return the real extent at this offset. 4494 */ 4495 if (!isnullstartblock(bma.got.br_startblock)) { 4496 *imap = bma.got; 4497 *seq = READ_ONCE(ifp->if_seq); 4498 goto out_trans_cancel; 4499 } 4500 4501 bma.tp = tp; 4502 bma.ip = ip; 4503 bma.wasdel = true; 4504 bma.offset = bma.got.br_startoff; 4505 bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount, MAXEXTLEN); 4506 bma.total = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK); 4507 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); 4508 if (whichfork == XFS_COW_FORK) 4509 bma.flags = XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC; 4510 4511 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) 4512 bma.prev.br_startoff = NULLFILEOFF; 4513 4514 error = xfs_bmapi_allocate(&bma); 4515 if (error) 4516 goto out_finish; 4517 4518 error = -ENOSPC; 4519 if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK)) 4520 goto out_finish; 4521 error = -EFSCORRUPTED; 4522 if (WARN_ON_ONCE(!xfs_valid_startblock(ip, bma.got.br_startblock))) 4523 goto out_finish; 4524 4525 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length)); 4526 XFS_STATS_INC(mp, xs_xstrat_quick); 4527 4528 ASSERT(!isnullstartblock(bma.got.br_startblock)); 4529 *imap = bma.got; 4530 *seq = READ_ONCE(ifp->if_seq); 4531 4532 if (whichfork == XFS_COW_FORK) 4533 xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length); 4534 4535 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags, 4536 whichfork); 4537 if (error) 4538 goto out_finish; 4539 4540 xfs_bmapi_finish(&bma, whichfork, 0); 4541 error = xfs_trans_commit(tp); 4542 xfs_iunlock(ip, XFS_ILOCK_EXCL); 4543 return error; 4544 4545 out_finish: 4546 xfs_bmapi_finish(&bma, whichfork, error); 4547 out_trans_cancel: 4548 xfs_trans_cancel(tp); 4549 xfs_iunlock(ip, XFS_ILOCK_EXCL); 4550 return error; 4551 } 4552 4553 int 4554 xfs_bmapi_remap( 4555 struct xfs_trans *tp, 4556 struct xfs_inode *ip, 4557 xfs_fileoff_t bno, 4558 xfs_filblks_t len, 4559 xfs_fsblock_t startblock, 4560 int flags) 4561 { 4562 struct xfs_mount *mp = ip->i_mount; 4563 struct xfs_ifork *ifp; 4564 struct xfs_btree_cur *cur = NULL; 4565 struct xfs_bmbt_irec got; 4566 struct xfs_iext_cursor icur; 4567 int whichfork = xfs_bmapi_whichfork(flags); 4568 int logflags = 0, error; 4569 4570 ifp = XFS_IFORK_PTR(ip, whichfork); 4571 ASSERT(len > 0); 4572 ASSERT(len <= (xfs_filblks_t)MAXEXTLEN); 4573 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4574 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC | 4575 XFS_BMAPI_NORMAP))); 4576 ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) != 4577 (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)); 4578 4579 if (unlikely(XFS_TEST_ERROR( 4580 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 4581 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 4582 mp, XFS_ERRTAG_BMAPIFORMAT))) { 4583 XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp); 4584 return -EFSCORRUPTED; 4585 } 4586 4587 if (XFS_FORCED_SHUTDOWN(mp)) 4588 return -EIO; 4589 4590 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4591 error = xfs_iread_extents(tp, ip, whichfork); 4592 if (error) 4593 return error; 4594 } 4595 4596 if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) { 4597 /* make sure we only reflink into a hole. */ 4598 ASSERT(got.br_startoff > bno); 4599 ASSERT(got.br_startoff - bno >= len); 4600 } 4601 4602 ip->i_d.di_nblocks += len; 4603 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 4604 4605 if (ifp->if_flags & XFS_IFBROOT) { 4606 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 4607 cur->bc_private.b.flags = 0; 4608 } 4609 4610 got.br_startoff = bno; 4611 got.br_startblock = startblock; 4612 got.br_blockcount = len; 4613 if (flags & XFS_BMAPI_PREALLOC) 4614 got.br_state = XFS_EXT_UNWRITTEN; 4615 else 4616 got.br_state = XFS_EXT_NORM; 4617 4618 error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur, 4619 &cur, &got, &logflags, flags); 4620 if (error) 4621 goto error0; 4622 4623 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork); 4624 4625 error0: 4626 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) 4627 logflags &= ~XFS_ILOG_DEXT; 4628 else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) 4629 logflags &= ~XFS_ILOG_DBROOT; 4630 4631 if (logflags) 4632 xfs_trans_log_inode(tp, ip, logflags); 4633 if (cur) 4634 xfs_btree_del_cursor(cur, error); 4635 return error; 4636 } 4637 4638 /* 4639 * When a delalloc extent is split (e.g., due to a hole punch), the original 4640 * indlen reservation must be shared across the two new extents that are left 4641 * behind. 4642 * 4643 * Given the original reservation and the worst case indlen for the two new 4644 * extents (as calculated by xfs_bmap_worst_indlen()), split the original 4645 * reservation fairly across the two new extents. If necessary, steal available 4646 * blocks from a deleted extent to make up a reservation deficiency (e.g., if 4647 * ores == 1). The number of stolen blocks is returned. The availability and 4648 * subsequent accounting of stolen blocks is the responsibility of the caller. 4649 */ 4650 static xfs_filblks_t 4651 xfs_bmap_split_indlen( 4652 xfs_filblks_t ores, /* original res. */ 4653 xfs_filblks_t *indlen1, /* ext1 worst indlen */ 4654 xfs_filblks_t *indlen2, /* ext2 worst indlen */ 4655 xfs_filblks_t avail) /* stealable blocks */ 4656 { 4657 xfs_filblks_t len1 = *indlen1; 4658 xfs_filblks_t len2 = *indlen2; 4659 xfs_filblks_t nres = len1 + len2; /* new total res. */ 4660 xfs_filblks_t stolen = 0; 4661 xfs_filblks_t resfactor; 4662 4663 /* 4664 * Steal as many blocks as we can to try and satisfy the worst case 4665 * indlen for both new extents. 4666 */ 4667 if (ores < nres && avail) 4668 stolen = XFS_FILBLKS_MIN(nres - ores, avail); 4669 ores += stolen; 4670 4671 /* nothing else to do if we've satisfied the new reservation */ 4672 if (ores >= nres) 4673 return stolen; 4674 4675 /* 4676 * We can't meet the total required reservation for the two extents. 4677 * Calculate the percent of the overall shortage between both extents 4678 * and apply this percentage to each of the requested indlen values. 4679 * This distributes the shortage fairly and reduces the chances that one 4680 * of the two extents is left with nothing when extents are repeatedly 4681 * split. 4682 */ 4683 resfactor = (ores * 100); 4684 do_div(resfactor, nres); 4685 len1 *= resfactor; 4686 do_div(len1, 100); 4687 len2 *= resfactor; 4688 do_div(len2, 100); 4689 ASSERT(len1 + len2 <= ores); 4690 ASSERT(len1 < *indlen1 && len2 < *indlen2); 4691 4692 /* 4693 * Hand out the remainder to each extent. If one of the two reservations 4694 * is zero, we want to make sure that one gets a block first. The loop 4695 * below starts with len1, so hand len2 a block right off the bat if it 4696 * is zero. 4697 */ 4698 ores -= (len1 + len2); 4699 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores); 4700 if (ores && !len2 && *indlen2) { 4701 len2++; 4702 ores--; 4703 } 4704 while (ores) { 4705 if (len1 < *indlen1) { 4706 len1++; 4707 ores--; 4708 } 4709 if (!ores) 4710 break; 4711 if (len2 < *indlen2) { 4712 len2++; 4713 ores--; 4714 } 4715 } 4716 4717 *indlen1 = len1; 4718 *indlen2 = len2; 4719 4720 return stolen; 4721 } 4722 4723 int 4724 xfs_bmap_del_extent_delay( 4725 struct xfs_inode *ip, 4726 int whichfork, 4727 struct xfs_iext_cursor *icur, 4728 struct xfs_bmbt_irec *got, 4729 struct xfs_bmbt_irec *del) 4730 { 4731 struct xfs_mount *mp = ip->i_mount; 4732 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 4733 struct xfs_bmbt_irec new; 4734 int64_t da_old, da_new, da_diff = 0; 4735 xfs_fileoff_t del_endoff, got_endoff; 4736 xfs_filblks_t got_indlen, new_indlen, stolen; 4737 int state = xfs_bmap_fork_to_state(whichfork); 4738 int error = 0; 4739 bool isrt; 4740 4741 XFS_STATS_INC(mp, xs_del_exlist); 4742 4743 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 4744 del_endoff = del->br_startoff + del->br_blockcount; 4745 got_endoff = got->br_startoff + got->br_blockcount; 4746 da_old = startblockval(got->br_startblock); 4747 da_new = 0; 4748 4749 ASSERT(del->br_blockcount > 0); 4750 ASSERT(got->br_startoff <= del->br_startoff); 4751 ASSERT(got_endoff >= del_endoff); 4752 4753 if (isrt) { 4754 uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount); 4755 4756 do_div(rtexts, mp->m_sb.sb_rextsize); 4757 xfs_mod_frextents(mp, rtexts); 4758 } 4759 4760 /* 4761 * Update the inode delalloc counter now and wait to update the 4762 * sb counters as we might have to borrow some blocks for the 4763 * indirect block accounting. 4764 */ 4765 error = xfs_trans_reserve_quota_nblks(NULL, ip, 4766 -((long)del->br_blockcount), 0, 4767 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); 4768 if (error) 4769 return error; 4770 ip->i_delayed_blks -= del->br_blockcount; 4771 4772 if (got->br_startoff == del->br_startoff) 4773 state |= BMAP_LEFT_FILLING; 4774 if (got_endoff == del_endoff) 4775 state |= BMAP_RIGHT_FILLING; 4776 4777 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 4778 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 4779 /* 4780 * Matches the whole extent. Delete the entry. 4781 */ 4782 xfs_iext_remove(ip, icur, state); 4783 xfs_iext_prev(ifp, icur); 4784 break; 4785 case BMAP_LEFT_FILLING: 4786 /* 4787 * Deleting the first part of the extent. 4788 */ 4789 got->br_startoff = del_endoff; 4790 got->br_blockcount -= del->br_blockcount; 4791 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4792 got->br_blockcount), da_old); 4793 got->br_startblock = nullstartblock((int)da_new); 4794 xfs_iext_update_extent(ip, state, icur, got); 4795 break; 4796 case BMAP_RIGHT_FILLING: 4797 /* 4798 * Deleting the last part of the extent. 4799 */ 4800 got->br_blockcount = got->br_blockcount - del->br_blockcount; 4801 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4802 got->br_blockcount), da_old); 4803 got->br_startblock = nullstartblock((int)da_new); 4804 xfs_iext_update_extent(ip, state, icur, got); 4805 break; 4806 case 0: 4807 /* 4808 * Deleting the middle of the extent. 4809 * 4810 * Distribute the original indlen reservation across the two new 4811 * extents. Steal blocks from the deleted extent if necessary. 4812 * Stealing blocks simply fudges the fdblocks accounting below. 4813 * Warn if either of the new indlen reservations is zero as this 4814 * can lead to delalloc problems. 4815 */ 4816 got->br_blockcount = del->br_startoff - got->br_startoff; 4817 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount); 4818 4819 new.br_blockcount = got_endoff - del_endoff; 4820 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount); 4821 4822 WARN_ON_ONCE(!got_indlen || !new_indlen); 4823 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen, 4824 del->br_blockcount); 4825 4826 got->br_startblock = nullstartblock((int)got_indlen); 4827 4828 new.br_startoff = del_endoff; 4829 new.br_state = got->br_state; 4830 new.br_startblock = nullstartblock((int)new_indlen); 4831 4832 xfs_iext_update_extent(ip, state, icur, got); 4833 xfs_iext_next(ifp, icur); 4834 xfs_iext_insert(ip, icur, &new, state); 4835 4836 da_new = got_indlen + new_indlen - stolen; 4837 del->br_blockcount -= stolen; 4838 break; 4839 } 4840 4841 ASSERT(da_old >= da_new); 4842 da_diff = da_old - da_new; 4843 if (!isrt) 4844 da_diff += del->br_blockcount; 4845 if (da_diff) { 4846 xfs_mod_fdblocks(mp, da_diff, false); 4847 xfs_mod_delalloc(mp, -da_diff); 4848 } 4849 return error; 4850 } 4851 4852 void 4853 xfs_bmap_del_extent_cow( 4854 struct xfs_inode *ip, 4855 struct xfs_iext_cursor *icur, 4856 struct xfs_bmbt_irec *got, 4857 struct xfs_bmbt_irec *del) 4858 { 4859 struct xfs_mount *mp = ip->i_mount; 4860 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); 4861 struct xfs_bmbt_irec new; 4862 xfs_fileoff_t del_endoff, got_endoff; 4863 int state = BMAP_COWFORK; 4864 4865 XFS_STATS_INC(mp, xs_del_exlist); 4866 4867 del_endoff = del->br_startoff + del->br_blockcount; 4868 got_endoff = got->br_startoff + got->br_blockcount; 4869 4870 ASSERT(del->br_blockcount > 0); 4871 ASSERT(got->br_startoff <= del->br_startoff); 4872 ASSERT(got_endoff >= del_endoff); 4873 ASSERT(!isnullstartblock(got->br_startblock)); 4874 4875 if (got->br_startoff == del->br_startoff) 4876 state |= BMAP_LEFT_FILLING; 4877 if (got_endoff == del_endoff) 4878 state |= BMAP_RIGHT_FILLING; 4879 4880 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 4881 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 4882 /* 4883 * Matches the whole extent. Delete the entry. 4884 */ 4885 xfs_iext_remove(ip, icur, state); 4886 xfs_iext_prev(ifp, icur); 4887 break; 4888 case BMAP_LEFT_FILLING: 4889 /* 4890 * Deleting the first part of the extent. 4891 */ 4892 got->br_startoff = del_endoff; 4893 got->br_blockcount -= del->br_blockcount; 4894 got->br_startblock = del->br_startblock + del->br_blockcount; 4895 xfs_iext_update_extent(ip, state, icur, got); 4896 break; 4897 case BMAP_RIGHT_FILLING: 4898 /* 4899 * Deleting the last part of the extent. 4900 */ 4901 got->br_blockcount -= del->br_blockcount; 4902 xfs_iext_update_extent(ip, state, icur, got); 4903 break; 4904 case 0: 4905 /* 4906 * Deleting the middle of the extent. 4907 */ 4908 got->br_blockcount = del->br_startoff - got->br_startoff; 4909 4910 new.br_startoff = del_endoff; 4911 new.br_blockcount = got_endoff - del_endoff; 4912 new.br_state = got->br_state; 4913 new.br_startblock = del->br_startblock + del->br_blockcount; 4914 4915 xfs_iext_update_extent(ip, state, icur, got); 4916 xfs_iext_next(ifp, icur); 4917 xfs_iext_insert(ip, icur, &new, state); 4918 break; 4919 } 4920 ip->i_delayed_blks -= del->br_blockcount; 4921 } 4922 4923 /* 4924 * Called by xfs_bmapi to update file extent records and the btree 4925 * after removing space. 4926 */ 4927 STATIC int /* error */ 4928 xfs_bmap_del_extent_real( 4929 xfs_inode_t *ip, /* incore inode pointer */ 4930 xfs_trans_t *tp, /* current transaction pointer */ 4931 struct xfs_iext_cursor *icur, 4932 xfs_btree_cur_t *cur, /* if null, not a btree */ 4933 xfs_bmbt_irec_t *del, /* data to remove from extents */ 4934 int *logflagsp, /* inode logging flags */ 4935 int whichfork, /* data or attr fork */ 4936 int bflags) /* bmapi flags */ 4937 { 4938 xfs_fsblock_t del_endblock=0; /* first block past del */ 4939 xfs_fileoff_t del_endoff; /* first offset past del */ 4940 int do_fx; /* free extent at end of routine */ 4941 int error; /* error return value */ 4942 int flags = 0;/* inode logging flags */ 4943 struct xfs_bmbt_irec got; /* current extent entry */ 4944 xfs_fileoff_t got_endoff; /* first offset past got */ 4945 int i; /* temp state */ 4946 struct xfs_ifork *ifp; /* inode fork pointer */ 4947 xfs_mount_t *mp; /* mount structure */ 4948 xfs_filblks_t nblks; /* quota/sb block count */ 4949 xfs_bmbt_irec_t new; /* new record to be inserted */ 4950 /* REFERENCED */ 4951 uint qfield; /* quota field to update */ 4952 int state = xfs_bmap_fork_to_state(whichfork); 4953 struct xfs_bmbt_irec old; 4954 4955 mp = ip->i_mount; 4956 XFS_STATS_INC(mp, xs_del_exlist); 4957 4958 ifp = XFS_IFORK_PTR(ip, whichfork); 4959 ASSERT(del->br_blockcount > 0); 4960 xfs_iext_get_extent(ifp, icur, &got); 4961 ASSERT(got.br_startoff <= del->br_startoff); 4962 del_endoff = del->br_startoff + del->br_blockcount; 4963 got_endoff = got.br_startoff + got.br_blockcount; 4964 ASSERT(got_endoff >= del_endoff); 4965 ASSERT(!isnullstartblock(got.br_startblock)); 4966 qfield = 0; 4967 error = 0; 4968 4969 /* 4970 * If it's the case where the directory code is running with no block 4971 * reservation, and the deleted block is in the middle of its extent, 4972 * and the resulting insert of an extent would cause transformation to 4973 * btree format, then reject it. The calling code will then swap blocks 4974 * around instead. We have to do this now, rather than waiting for the 4975 * conversion to btree format, since the transaction will be dirty then. 4976 */ 4977 if (tp->t_blk_res == 0 && 4978 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && 4979 XFS_IFORK_NEXTENTS(ip, whichfork) >= 4980 XFS_IFORK_MAXEXT(ip, whichfork) && 4981 del->br_startoff > got.br_startoff && del_endoff < got_endoff) 4982 return -ENOSPC; 4983 4984 flags = XFS_ILOG_CORE; 4985 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { 4986 xfs_fsblock_t bno; 4987 xfs_filblks_t len; 4988 xfs_extlen_t mod; 4989 4990 bno = div_u64_rem(del->br_startblock, mp->m_sb.sb_rextsize, 4991 &mod); 4992 ASSERT(mod == 0); 4993 len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize, 4994 &mod); 4995 ASSERT(mod == 0); 4996 4997 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); 4998 if (error) 4999 goto done; 5000 do_fx = 0; 5001 nblks = len * mp->m_sb.sb_rextsize; 5002 qfield = XFS_TRANS_DQ_RTBCOUNT; 5003 } else { 5004 do_fx = 1; 5005 nblks = del->br_blockcount; 5006 qfield = XFS_TRANS_DQ_BCOUNT; 5007 } 5008 5009 del_endblock = del->br_startblock + del->br_blockcount; 5010 if (cur) { 5011 error = xfs_bmbt_lookup_eq(cur, &got, &i); 5012 if (error) 5013 goto done; 5014 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5015 } 5016 5017 if (got.br_startoff == del->br_startoff) 5018 state |= BMAP_LEFT_FILLING; 5019 if (got_endoff == del_endoff) 5020 state |= BMAP_RIGHT_FILLING; 5021 5022 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 5023 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 5024 /* 5025 * Matches the whole extent. Delete the entry. 5026 */ 5027 xfs_iext_remove(ip, icur, state); 5028 xfs_iext_prev(ifp, icur); 5029 XFS_IFORK_NEXT_SET(ip, whichfork, 5030 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 5031 flags |= XFS_ILOG_CORE; 5032 if (!cur) { 5033 flags |= xfs_ilog_fext(whichfork); 5034 break; 5035 } 5036 if ((error = xfs_btree_delete(cur, &i))) 5037 goto done; 5038 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5039 break; 5040 case BMAP_LEFT_FILLING: 5041 /* 5042 * Deleting the first part of the extent. 5043 */ 5044 got.br_startoff = del_endoff; 5045 got.br_startblock = del_endblock; 5046 got.br_blockcount -= del->br_blockcount; 5047 xfs_iext_update_extent(ip, state, icur, &got); 5048 if (!cur) { 5049 flags |= xfs_ilog_fext(whichfork); 5050 break; 5051 } 5052 error = xfs_bmbt_update(cur, &got); 5053 if (error) 5054 goto done; 5055 break; 5056 case BMAP_RIGHT_FILLING: 5057 /* 5058 * Deleting the last part of the extent. 5059 */ 5060 got.br_blockcount -= del->br_blockcount; 5061 xfs_iext_update_extent(ip, state, icur, &got); 5062 if (!cur) { 5063 flags |= xfs_ilog_fext(whichfork); 5064 break; 5065 } 5066 error = xfs_bmbt_update(cur, &got); 5067 if (error) 5068 goto done; 5069 break; 5070 case 0: 5071 /* 5072 * Deleting the middle of the extent. 5073 */ 5074 old = got; 5075 5076 got.br_blockcount = del->br_startoff - got.br_startoff; 5077 xfs_iext_update_extent(ip, state, icur, &got); 5078 5079 new.br_startoff = del_endoff; 5080 new.br_blockcount = got_endoff - del_endoff; 5081 new.br_state = got.br_state; 5082 new.br_startblock = del_endblock; 5083 5084 flags |= XFS_ILOG_CORE; 5085 if (cur) { 5086 error = xfs_bmbt_update(cur, &got); 5087 if (error) 5088 goto done; 5089 error = xfs_btree_increment(cur, 0, &i); 5090 if (error) 5091 goto done; 5092 cur->bc_rec.b = new; 5093 error = xfs_btree_insert(cur, &i); 5094 if (error && error != -ENOSPC) 5095 goto done; 5096 /* 5097 * If get no-space back from btree insert, it tried a 5098 * split, and we have a zero block reservation. Fix up 5099 * our state and return the error. 5100 */ 5101 if (error == -ENOSPC) { 5102 /* 5103 * Reset the cursor, don't trust it after any 5104 * insert operation. 5105 */ 5106 error = xfs_bmbt_lookup_eq(cur, &got, &i); 5107 if (error) 5108 goto done; 5109 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5110 /* 5111 * Update the btree record back 5112 * to the original value. 5113 */ 5114 error = xfs_bmbt_update(cur, &old); 5115 if (error) 5116 goto done; 5117 /* 5118 * Reset the extent record back 5119 * to the original value. 5120 */ 5121 xfs_iext_update_extent(ip, state, icur, &old); 5122 flags = 0; 5123 error = -ENOSPC; 5124 goto done; 5125 } 5126 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5127 } else 5128 flags |= xfs_ilog_fext(whichfork); 5129 XFS_IFORK_NEXT_SET(ip, whichfork, 5130 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 5131 xfs_iext_next(ifp, icur); 5132 xfs_iext_insert(ip, icur, &new, state); 5133 break; 5134 } 5135 5136 /* remove reverse mapping */ 5137 xfs_rmap_unmap_extent(tp, ip, whichfork, del); 5138 5139 /* 5140 * If we need to, add to list of extents to delete. 5141 */ 5142 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) { 5143 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) { 5144 xfs_refcount_decrease_extent(tp, del); 5145 } else { 5146 __xfs_bmap_add_free(tp, del->br_startblock, 5147 del->br_blockcount, NULL, 5148 (bflags & XFS_BMAPI_NODISCARD) || 5149 del->br_state == XFS_EXT_UNWRITTEN); 5150 } 5151 } 5152 5153 /* 5154 * Adjust inode # blocks in the file. 5155 */ 5156 if (nblks) 5157 ip->i_d.di_nblocks -= nblks; 5158 /* 5159 * Adjust quota data. 5160 */ 5161 if (qfield && !(bflags & XFS_BMAPI_REMAP)) 5162 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks); 5163 5164 done: 5165 *logflagsp = flags; 5166 return error; 5167 } 5168 5169 /* 5170 * Unmap (remove) blocks from a file. 5171 * If nexts is nonzero then the number of extents to remove is limited to 5172 * that value. If not all extents in the block range can be removed then 5173 * *done is set. 5174 */ 5175 int /* error */ 5176 __xfs_bunmapi( 5177 struct xfs_trans *tp, /* transaction pointer */ 5178 struct xfs_inode *ip, /* incore inode */ 5179 xfs_fileoff_t start, /* first file offset deleted */ 5180 xfs_filblks_t *rlen, /* i/o: amount remaining */ 5181 int flags, /* misc flags */ 5182 xfs_extnum_t nexts) /* number of extents max */ 5183 { 5184 struct xfs_btree_cur *cur; /* bmap btree cursor */ 5185 struct xfs_bmbt_irec del; /* extent being deleted */ 5186 int error; /* error return value */ 5187 xfs_extnum_t extno; /* extent number in list */ 5188 struct xfs_bmbt_irec got; /* current extent record */ 5189 struct xfs_ifork *ifp; /* inode fork pointer */ 5190 int isrt; /* freeing in rt area */ 5191 int logflags; /* transaction logging flags */ 5192 xfs_extlen_t mod; /* rt extent offset */ 5193 struct xfs_mount *mp; /* mount structure */ 5194 int tmp_logflags; /* partial logging flags */ 5195 int wasdel; /* was a delayed alloc extent */ 5196 int whichfork; /* data or attribute fork */ 5197 xfs_fsblock_t sum; 5198 xfs_filblks_t len = *rlen; /* length to unmap in file */ 5199 xfs_fileoff_t max_len; 5200 xfs_agnumber_t prev_agno = NULLAGNUMBER, agno; 5201 xfs_fileoff_t end; 5202 struct xfs_iext_cursor icur; 5203 bool done = false; 5204 5205 trace_xfs_bunmap(ip, start, len, flags, _RET_IP_); 5206 5207 whichfork = xfs_bmapi_whichfork(flags); 5208 ASSERT(whichfork != XFS_COW_FORK); 5209 ifp = XFS_IFORK_PTR(ip, whichfork); 5210 if (unlikely( 5211 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5212 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 5213 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW, 5214 ip->i_mount); 5215 return -EFSCORRUPTED; 5216 } 5217 mp = ip->i_mount; 5218 if (XFS_FORCED_SHUTDOWN(mp)) 5219 return -EIO; 5220 5221 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5222 ASSERT(len > 0); 5223 ASSERT(nexts >= 0); 5224 5225 /* 5226 * Guesstimate how many blocks we can unmap without running the risk of 5227 * blowing out the transaction with a mix of EFIs and reflink 5228 * adjustments. 5229 */ 5230 if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) 5231 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res)); 5232 else 5233 max_len = len; 5234 5235 if (!(ifp->if_flags & XFS_IFEXTENTS) && 5236 (error = xfs_iread_extents(tp, ip, whichfork))) 5237 return error; 5238 if (xfs_iext_count(ifp) == 0) { 5239 *rlen = 0; 5240 return 0; 5241 } 5242 XFS_STATS_INC(mp, xs_blk_unmap); 5243 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 5244 end = start + len; 5245 5246 if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) { 5247 *rlen = 0; 5248 return 0; 5249 } 5250 end--; 5251 5252 logflags = 0; 5253 if (ifp->if_flags & XFS_IFBROOT) { 5254 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); 5255 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5256 cur->bc_private.b.flags = 0; 5257 } else 5258 cur = NULL; 5259 5260 if (isrt) { 5261 /* 5262 * Synchronize by locking the bitmap inode. 5263 */ 5264 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP); 5265 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL); 5266 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM); 5267 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL); 5268 } 5269 5270 extno = 0; 5271 while (end != (xfs_fileoff_t)-1 && end >= start && 5272 (nexts == 0 || extno < nexts) && max_len > 0) { 5273 /* 5274 * Is the found extent after a hole in which end lives? 5275 * Just back up to the previous extent, if so. 5276 */ 5277 if (got.br_startoff > end && 5278 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5279 done = true; 5280 break; 5281 } 5282 /* 5283 * Is the last block of this extent before the range 5284 * we're supposed to delete? If so, we're done. 5285 */ 5286 end = XFS_FILEOFF_MIN(end, 5287 got.br_startoff + got.br_blockcount - 1); 5288 if (end < start) 5289 break; 5290 /* 5291 * Then deal with the (possibly delayed) allocated space 5292 * we found. 5293 */ 5294 del = got; 5295 wasdel = isnullstartblock(del.br_startblock); 5296 5297 /* 5298 * Make sure we don't touch multiple AGF headers out of order 5299 * in a single transaction, as that could cause AB-BA deadlocks. 5300 */ 5301 if (!wasdel) { 5302 agno = XFS_FSB_TO_AGNO(mp, del.br_startblock); 5303 if (prev_agno != NULLAGNUMBER && prev_agno > agno) 5304 break; 5305 prev_agno = agno; 5306 } 5307 if (got.br_startoff < start) { 5308 del.br_startoff = start; 5309 del.br_blockcount -= start - got.br_startoff; 5310 if (!wasdel) 5311 del.br_startblock += start - got.br_startoff; 5312 } 5313 if (del.br_startoff + del.br_blockcount > end + 1) 5314 del.br_blockcount = end + 1 - del.br_startoff; 5315 5316 /* How much can we safely unmap? */ 5317 if (max_len < del.br_blockcount) { 5318 del.br_startoff += del.br_blockcount - max_len; 5319 if (!wasdel) 5320 del.br_startblock += del.br_blockcount - max_len; 5321 del.br_blockcount = max_len; 5322 } 5323 5324 if (!isrt) 5325 goto delete; 5326 5327 sum = del.br_startblock + del.br_blockcount; 5328 div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod); 5329 if (mod) { 5330 /* 5331 * Realtime extent not lined up at the end. 5332 * The extent could have been split into written 5333 * and unwritten pieces, or we could just be 5334 * unmapping part of it. But we can't really 5335 * get rid of part of a realtime extent. 5336 */ 5337 if (del.br_state == XFS_EXT_UNWRITTEN) { 5338 /* 5339 * This piece is unwritten, or we're not 5340 * using unwritten extents. Skip over it. 5341 */ 5342 ASSERT(end >= mod); 5343 end -= mod > del.br_blockcount ? 5344 del.br_blockcount : mod; 5345 if (end < got.br_startoff && 5346 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5347 done = true; 5348 break; 5349 } 5350 continue; 5351 } 5352 /* 5353 * It's written, turn it unwritten. 5354 * This is better than zeroing it. 5355 */ 5356 ASSERT(del.br_state == XFS_EXT_NORM); 5357 ASSERT(tp->t_blk_res > 0); 5358 /* 5359 * If this spans a realtime extent boundary, 5360 * chop it back to the start of the one we end at. 5361 */ 5362 if (del.br_blockcount > mod) { 5363 del.br_startoff += del.br_blockcount - mod; 5364 del.br_startblock += del.br_blockcount - mod; 5365 del.br_blockcount = mod; 5366 } 5367 del.br_state = XFS_EXT_UNWRITTEN; 5368 error = xfs_bmap_add_extent_unwritten_real(tp, ip, 5369 whichfork, &icur, &cur, &del, 5370 &logflags); 5371 if (error) 5372 goto error0; 5373 goto nodelete; 5374 } 5375 div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod); 5376 if (mod) { 5377 /* 5378 * Realtime extent is lined up at the end but not 5379 * at the front. We'll get rid of full extents if 5380 * we can. 5381 */ 5382 mod = mp->m_sb.sb_rextsize - mod; 5383 if (del.br_blockcount > mod) { 5384 del.br_blockcount -= mod; 5385 del.br_startoff += mod; 5386 del.br_startblock += mod; 5387 } else if (del.br_startoff == start && 5388 (del.br_state == XFS_EXT_UNWRITTEN || 5389 tp->t_blk_res == 0)) { 5390 /* 5391 * Can't make it unwritten. There isn't 5392 * a full extent here so just skip it. 5393 */ 5394 ASSERT(end >= del.br_blockcount); 5395 end -= del.br_blockcount; 5396 if (got.br_startoff > end && 5397 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5398 done = true; 5399 break; 5400 } 5401 continue; 5402 } else if (del.br_state == XFS_EXT_UNWRITTEN) { 5403 struct xfs_bmbt_irec prev; 5404 5405 /* 5406 * This one is already unwritten. 5407 * It must have a written left neighbor. 5408 * Unwrite the killed part of that one and 5409 * try again. 5410 */ 5411 if (!xfs_iext_prev_extent(ifp, &icur, &prev)) 5412 ASSERT(0); 5413 ASSERT(prev.br_state == XFS_EXT_NORM); 5414 ASSERT(!isnullstartblock(prev.br_startblock)); 5415 ASSERT(del.br_startblock == 5416 prev.br_startblock + prev.br_blockcount); 5417 if (prev.br_startoff < start) { 5418 mod = start - prev.br_startoff; 5419 prev.br_blockcount -= mod; 5420 prev.br_startblock += mod; 5421 prev.br_startoff = start; 5422 } 5423 prev.br_state = XFS_EXT_UNWRITTEN; 5424 error = xfs_bmap_add_extent_unwritten_real(tp, 5425 ip, whichfork, &icur, &cur, 5426 &prev, &logflags); 5427 if (error) 5428 goto error0; 5429 goto nodelete; 5430 } else { 5431 ASSERT(del.br_state == XFS_EXT_NORM); 5432 del.br_state = XFS_EXT_UNWRITTEN; 5433 error = xfs_bmap_add_extent_unwritten_real(tp, 5434 ip, whichfork, &icur, &cur, 5435 &del, &logflags); 5436 if (error) 5437 goto error0; 5438 goto nodelete; 5439 } 5440 } 5441 5442 delete: 5443 if (wasdel) { 5444 error = xfs_bmap_del_extent_delay(ip, whichfork, &icur, 5445 &got, &del); 5446 } else { 5447 error = xfs_bmap_del_extent_real(ip, tp, &icur, cur, 5448 &del, &tmp_logflags, whichfork, 5449 flags); 5450 logflags |= tmp_logflags; 5451 } 5452 5453 if (error) 5454 goto error0; 5455 5456 max_len -= del.br_blockcount; 5457 end = del.br_startoff - 1; 5458 nodelete: 5459 /* 5460 * If not done go on to the next (previous) record. 5461 */ 5462 if (end != (xfs_fileoff_t)-1 && end >= start) { 5463 if (!xfs_iext_get_extent(ifp, &icur, &got) || 5464 (got.br_startoff > end && 5465 !xfs_iext_prev_extent(ifp, &icur, &got))) { 5466 done = true; 5467 break; 5468 } 5469 extno++; 5470 } 5471 } 5472 if (done || end == (xfs_fileoff_t)-1 || end < start) 5473 *rlen = 0; 5474 else 5475 *rlen = end - start + 1; 5476 5477 /* 5478 * Convert to a btree if necessary. 5479 */ 5480 if (xfs_bmap_needs_btree(ip, whichfork)) { 5481 ASSERT(cur == NULL); 5482 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 5483 &tmp_logflags, whichfork); 5484 logflags |= tmp_logflags; 5485 } else { 5486 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, 5487 whichfork); 5488 } 5489 5490 error0: 5491 /* 5492 * Log everything. Do this after conversion, there's no point in 5493 * logging the extent records if we've converted to btree format. 5494 */ 5495 if ((logflags & xfs_ilog_fext(whichfork)) && 5496 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 5497 logflags &= ~xfs_ilog_fext(whichfork); 5498 else if ((logflags & xfs_ilog_fbroot(whichfork)) && 5499 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) 5500 logflags &= ~xfs_ilog_fbroot(whichfork); 5501 /* 5502 * Log inode even in the error case, if the transaction 5503 * is dirty we'll need to shut down the filesystem. 5504 */ 5505 if (logflags) 5506 xfs_trans_log_inode(tp, ip, logflags); 5507 if (cur) { 5508 if (!error) 5509 cur->bc_private.b.allocated = 0; 5510 xfs_btree_del_cursor(cur, error); 5511 } 5512 return error; 5513 } 5514 5515 /* Unmap a range of a file. */ 5516 int 5517 xfs_bunmapi( 5518 xfs_trans_t *tp, 5519 struct xfs_inode *ip, 5520 xfs_fileoff_t bno, 5521 xfs_filblks_t len, 5522 int flags, 5523 xfs_extnum_t nexts, 5524 int *done) 5525 { 5526 int error; 5527 5528 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts); 5529 *done = (len == 0); 5530 return error; 5531 } 5532 5533 /* 5534 * Determine whether an extent shift can be accomplished by a merge with the 5535 * extent that precedes the target hole of the shift. 5536 */ 5537 STATIC bool 5538 xfs_bmse_can_merge( 5539 struct xfs_bmbt_irec *left, /* preceding extent */ 5540 struct xfs_bmbt_irec *got, /* current extent to shift */ 5541 xfs_fileoff_t shift) /* shift fsb */ 5542 { 5543 xfs_fileoff_t startoff; 5544 5545 startoff = got->br_startoff - shift; 5546 5547 /* 5548 * The extent, once shifted, must be adjacent in-file and on-disk with 5549 * the preceding extent. 5550 */ 5551 if ((left->br_startoff + left->br_blockcount != startoff) || 5552 (left->br_startblock + left->br_blockcount != got->br_startblock) || 5553 (left->br_state != got->br_state) || 5554 (left->br_blockcount + got->br_blockcount > MAXEXTLEN)) 5555 return false; 5556 5557 return true; 5558 } 5559 5560 /* 5561 * A bmap extent shift adjusts the file offset of an extent to fill a preceding 5562 * hole in the file. If an extent shift would result in the extent being fully 5563 * adjacent to the extent that currently precedes the hole, we can merge with 5564 * the preceding extent rather than do the shift. 5565 * 5566 * This function assumes the caller has verified a shift-by-merge is possible 5567 * with the provided extents via xfs_bmse_can_merge(). 5568 */ 5569 STATIC int 5570 xfs_bmse_merge( 5571 struct xfs_trans *tp, 5572 struct xfs_inode *ip, 5573 int whichfork, 5574 xfs_fileoff_t shift, /* shift fsb */ 5575 struct xfs_iext_cursor *icur, 5576 struct xfs_bmbt_irec *got, /* extent to shift */ 5577 struct xfs_bmbt_irec *left, /* preceding extent */ 5578 struct xfs_btree_cur *cur, 5579 int *logflags) /* output */ 5580 { 5581 struct xfs_bmbt_irec new; 5582 xfs_filblks_t blockcount; 5583 int error, i; 5584 struct xfs_mount *mp = ip->i_mount; 5585 5586 blockcount = left->br_blockcount + got->br_blockcount; 5587 5588 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5589 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5590 ASSERT(xfs_bmse_can_merge(left, got, shift)); 5591 5592 new = *left; 5593 new.br_blockcount = blockcount; 5594 5595 /* 5596 * Update the on-disk extent count, the btree if necessary and log the 5597 * inode. 5598 */ 5599 XFS_IFORK_NEXT_SET(ip, whichfork, 5600 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 5601 *logflags |= XFS_ILOG_CORE; 5602 if (!cur) { 5603 *logflags |= XFS_ILOG_DEXT; 5604 goto done; 5605 } 5606 5607 /* lookup and remove the extent to merge */ 5608 error = xfs_bmbt_lookup_eq(cur, got, &i); 5609 if (error) 5610 return error; 5611 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5612 5613 error = xfs_btree_delete(cur, &i); 5614 if (error) 5615 return error; 5616 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5617 5618 /* lookup and update size of the previous extent */ 5619 error = xfs_bmbt_lookup_eq(cur, left, &i); 5620 if (error) 5621 return error; 5622 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5623 5624 error = xfs_bmbt_update(cur, &new); 5625 if (error) 5626 return error; 5627 5628 /* change to extent format if required after extent removal */ 5629 error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork); 5630 if (error) 5631 return error; 5632 5633 done: 5634 xfs_iext_remove(ip, icur, 0); 5635 xfs_iext_prev(XFS_IFORK_PTR(ip, whichfork), icur); 5636 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, 5637 &new); 5638 5639 /* update reverse mapping. rmap functions merge the rmaps for us */ 5640 xfs_rmap_unmap_extent(tp, ip, whichfork, got); 5641 memcpy(&new, got, sizeof(new)); 5642 new.br_startoff = left->br_startoff + left->br_blockcount; 5643 xfs_rmap_map_extent(tp, ip, whichfork, &new); 5644 return 0; 5645 } 5646 5647 static int 5648 xfs_bmap_shift_update_extent( 5649 struct xfs_trans *tp, 5650 struct xfs_inode *ip, 5651 int whichfork, 5652 struct xfs_iext_cursor *icur, 5653 struct xfs_bmbt_irec *got, 5654 struct xfs_btree_cur *cur, 5655 int *logflags, 5656 xfs_fileoff_t startoff) 5657 { 5658 struct xfs_mount *mp = ip->i_mount; 5659 struct xfs_bmbt_irec prev = *got; 5660 int error, i; 5661 5662 *logflags |= XFS_ILOG_CORE; 5663 5664 got->br_startoff = startoff; 5665 5666 if (cur) { 5667 error = xfs_bmbt_lookup_eq(cur, &prev, &i); 5668 if (error) 5669 return error; 5670 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5671 5672 error = xfs_bmbt_update(cur, got); 5673 if (error) 5674 return error; 5675 } else { 5676 *logflags |= XFS_ILOG_DEXT; 5677 } 5678 5679 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, 5680 got); 5681 5682 /* update reverse mapping */ 5683 xfs_rmap_unmap_extent(tp, ip, whichfork, &prev); 5684 xfs_rmap_map_extent(tp, ip, whichfork, got); 5685 return 0; 5686 } 5687 5688 int 5689 xfs_bmap_collapse_extents( 5690 struct xfs_trans *tp, 5691 struct xfs_inode *ip, 5692 xfs_fileoff_t *next_fsb, 5693 xfs_fileoff_t offset_shift_fsb, 5694 bool *done) 5695 { 5696 int whichfork = XFS_DATA_FORK; 5697 struct xfs_mount *mp = ip->i_mount; 5698 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 5699 struct xfs_btree_cur *cur = NULL; 5700 struct xfs_bmbt_irec got, prev; 5701 struct xfs_iext_cursor icur; 5702 xfs_fileoff_t new_startoff; 5703 int error = 0; 5704 int logflags = 0; 5705 5706 if (unlikely(XFS_TEST_ERROR( 5707 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5708 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 5709 mp, XFS_ERRTAG_BMAPIFORMAT))) { 5710 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 5711 return -EFSCORRUPTED; 5712 } 5713 5714 if (XFS_FORCED_SHUTDOWN(mp)) 5715 return -EIO; 5716 5717 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL)); 5718 5719 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 5720 error = xfs_iread_extents(tp, ip, whichfork); 5721 if (error) 5722 return error; 5723 } 5724 5725 if (ifp->if_flags & XFS_IFBROOT) { 5726 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5727 cur->bc_private.b.flags = 0; 5728 } 5729 5730 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) { 5731 *done = true; 5732 goto del_cursor; 5733 } 5734 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock), 5735 del_cursor); 5736 5737 new_startoff = got.br_startoff - offset_shift_fsb; 5738 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) { 5739 if (new_startoff < prev.br_startoff + prev.br_blockcount) { 5740 error = -EINVAL; 5741 goto del_cursor; 5742 } 5743 5744 if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) { 5745 error = xfs_bmse_merge(tp, ip, whichfork, 5746 offset_shift_fsb, &icur, &got, &prev, 5747 cur, &logflags); 5748 if (error) 5749 goto del_cursor; 5750 goto done; 5751 } 5752 } else { 5753 if (got.br_startoff < offset_shift_fsb) { 5754 error = -EINVAL; 5755 goto del_cursor; 5756 } 5757 } 5758 5759 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got, 5760 cur, &logflags, new_startoff); 5761 if (error) 5762 goto del_cursor; 5763 5764 done: 5765 if (!xfs_iext_next_extent(ifp, &icur, &got)) { 5766 *done = true; 5767 goto del_cursor; 5768 } 5769 5770 *next_fsb = got.br_startoff; 5771 del_cursor: 5772 if (cur) 5773 xfs_btree_del_cursor(cur, error); 5774 if (logflags) 5775 xfs_trans_log_inode(tp, ip, logflags); 5776 return error; 5777 } 5778 5779 /* Make sure we won't be right-shifting an extent past the maximum bound. */ 5780 int 5781 xfs_bmap_can_insert_extents( 5782 struct xfs_inode *ip, 5783 xfs_fileoff_t off, 5784 xfs_fileoff_t shift) 5785 { 5786 struct xfs_bmbt_irec got; 5787 int is_empty; 5788 int error = 0; 5789 5790 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5791 5792 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 5793 return -EIO; 5794 5795 xfs_ilock(ip, XFS_ILOCK_EXCL); 5796 error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty); 5797 if (!error && !is_empty && got.br_startoff >= off && 5798 ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff) 5799 error = -EINVAL; 5800 xfs_iunlock(ip, XFS_ILOCK_EXCL); 5801 5802 return error; 5803 } 5804 5805 int 5806 xfs_bmap_insert_extents( 5807 struct xfs_trans *tp, 5808 struct xfs_inode *ip, 5809 xfs_fileoff_t *next_fsb, 5810 xfs_fileoff_t offset_shift_fsb, 5811 bool *done, 5812 xfs_fileoff_t stop_fsb) 5813 { 5814 int whichfork = XFS_DATA_FORK; 5815 struct xfs_mount *mp = ip->i_mount; 5816 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 5817 struct xfs_btree_cur *cur = NULL; 5818 struct xfs_bmbt_irec got, next; 5819 struct xfs_iext_cursor icur; 5820 xfs_fileoff_t new_startoff; 5821 int error = 0; 5822 int logflags = 0; 5823 5824 if (unlikely(XFS_TEST_ERROR( 5825 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5826 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 5827 mp, XFS_ERRTAG_BMAPIFORMAT))) { 5828 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 5829 return -EFSCORRUPTED; 5830 } 5831 5832 if (XFS_FORCED_SHUTDOWN(mp)) 5833 return -EIO; 5834 5835 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL)); 5836 5837 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 5838 error = xfs_iread_extents(tp, ip, whichfork); 5839 if (error) 5840 return error; 5841 } 5842 5843 if (ifp->if_flags & XFS_IFBROOT) { 5844 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5845 cur->bc_private.b.flags = 0; 5846 } 5847 5848 if (*next_fsb == NULLFSBLOCK) { 5849 xfs_iext_last(ifp, &icur); 5850 if (!xfs_iext_get_extent(ifp, &icur, &got) || 5851 stop_fsb > got.br_startoff) { 5852 *done = true; 5853 goto del_cursor; 5854 } 5855 } else { 5856 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) { 5857 *done = true; 5858 goto del_cursor; 5859 } 5860 } 5861 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock), 5862 del_cursor); 5863 5864 if (stop_fsb >= got.br_startoff + got.br_blockcount) { 5865 error = -EIO; 5866 goto del_cursor; 5867 } 5868 5869 new_startoff = got.br_startoff + offset_shift_fsb; 5870 if (xfs_iext_peek_next_extent(ifp, &icur, &next)) { 5871 if (new_startoff + got.br_blockcount > next.br_startoff) { 5872 error = -EINVAL; 5873 goto del_cursor; 5874 } 5875 5876 /* 5877 * Unlike a left shift (which involves a hole punch), a right 5878 * shift does not modify extent neighbors in any way. We should 5879 * never find mergeable extents in this scenario. Check anyways 5880 * and warn if we encounter two extents that could be one. 5881 */ 5882 if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb)) 5883 WARN_ON_ONCE(1); 5884 } 5885 5886 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got, 5887 cur, &logflags, new_startoff); 5888 if (error) 5889 goto del_cursor; 5890 5891 if (!xfs_iext_prev_extent(ifp, &icur, &got) || 5892 stop_fsb >= got.br_startoff + got.br_blockcount) { 5893 *done = true; 5894 goto del_cursor; 5895 } 5896 5897 *next_fsb = got.br_startoff; 5898 del_cursor: 5899 if (cur) 5900 xfs_btree_del_cursor(cur, error); 5901 if (logflags) 5902 xfs_trans_log_inode(tp, ip, logflags); 5903 return error; 5904 } 5905 5906 /* 5907 * Splits an extent into two extents at split_fsb block such that it is the 5908 * first block of the current_ext. @ext is a target extent to be split. 5909 * @split_fsb is a block where the extents is split. If split_fsb lies in a 5910 * hole or the first block of extents, just return 0. 5911 */ 5912 STATIC int 5913 xfs_bmap_split_extent_at( 5914 struct xfs_trans *tp, 5915 struct xfs_inode *ip, 5916 xfs_fileoff_t split_fsb) 5917 { 5918 int whichfork = XFS_DATA_FORK; 5919 struct xfs_btree_cur *cur = NULL; 5920 struct xfs_bmbt_irec got; 5921 struct xfs_bmbt_irec new; /* split extent */ 5922 struct xfs_mount *mp = ip->i_mount; 5923 struct xfs_ifork *ifp; 5924 xfs_fsblock_t gotblkcnt; /* new block count for got */ 5925 struct xfs_iext_cursor icur; 5926 int error = 0; 5927 int logflags = 0; 5928 int i = 0; 5929 5930 if (unlikely(XFS_TEST_ERROR( 5931 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5932 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 5933 mp, XFS_ERRTAG_BMAPIFORMAT))) { 5934 XFS_ERROR_REPORT("xfs_bmap_split_extent_at", 5935 XFS_ERRLEVEL_LOW, mp); 5936 return -EFSCORRUPTED; 5937 } 5938 5939 if (XFS_FORCED_SHUTDOWN(mp)) 5940 return -EIO; 5941 5942 ifp = XFS_IFORK_PTR(ip, whichfork); 5943 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 5944 /* Read in all the extents */ 5945 error = xfs_iread_extents(tp, ip, whichfork); 5946 if (error) 5947 return error; 5948 } 5949 5950 /* 5951 * If there are not extents, or split_fsb lies in a hole we are done. 5952 */ 5953 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) || 5954 got.br_startoff >= split_fsb) 5955 return 0; 5956 5957 gotblkcnt = split_fsb - got.br_startoff; 5958 new.br_startoff = split_fsb; 5959 new.br_startblock = got.br_startblock + gotblkcnt; 5960 new.br_blockcount = got.br_blockcount - gotblkcnt; 5961 new.br_state = got.br_state; 5962 5963 if (ifp->if_flags & XFS_IFBROOT) { 5964 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5965 cur->bc_private.b.flags = 0; 5966 error = xfs_bmbt_lookup_eq(cur, &got, &i); 5967 if (error) 5968 goto del_cursor; 5969 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor); 5970 } 5971 5972 got.br_blockcount = gotblkcnt; 5973 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur, 5974 &got); 5975 5976 logflags = XFS_ILOG_CORE; 5977 if (cur) { 5978 error = xfs_bmbt_update(cur, &got); 5979 if (error) 5980 goto del_cursor; 5981 } else 5982 logflags |= XFS_ILOG_DEXT; 5983 5984 /* Add new extent */ 5985 xfs_iext_next(ifp, &icur); 5986 xfs_iext_insert(ip, &icur, &new, 0); 5987 XFS_IFORK_NEXT_SET(ip, whichfork, 5988 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 5989 5990 if (cur) { 5991 error = xfs_bmbt_lookup_eq(cur, &new, &i); 5992 if (error) 5993 goto del_cursor; 5994 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor); 5995 error = xfs_btree_insert(cur, &i); 5996 if (error) 5997 goto del_cursor; 5998 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor); 5999 } 6000 6001 /* 6002 * Convert to a btree if necessary. 6003 */ 6004 if (xfs_bmap_needs_btree(ip, whichfork)) { 6005 int tmp_logflags; /* partial log flag return val */ 6006 6007 ASSERT(cur == NULL); 6008 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 6009 &tmp_logflags, whichfork); 6010 logflags |= tmp_logflags; 6011 } 6012 6013 del_cursor: 6014 if (cur) { 6015 cur->bc_private.b.allocated = 0; 6016 xfs_btree_del_cursor(cur, error); 6017 } 6018 6019 if (logflags) 6020 xfs_trans_log_inode(tp, ip, logflags); 6021 return error; 6022 } 6023 6024 int 6025 xfs_bmap_split_extent( 6026 struct xfs_inode *ip, 6027 xfs_fileoff_t split_fsb) 6028 { 6029 struct xfs_mount *mp = ip->i_mount; 6030 struct xfs_trans *tp; 6031 int error; 6032 6033 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 6034 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp); 6035 if (error) 6036 return error; 6037 6038 xfs_ilock(ip, XFS_ILOCK_EXCL); 6039 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 6040 6041 error = xfs_bmap_split_extent_at(tp, ip, split_fsb); 6042 if (error) 6043 goto out; 6044 6045 return xfs_trans_commit(tp); 6046 6047 out: 6048 xfs_trans_cancel(tp); 6049 return error; 6050 } 6051 6052 /* Deferred mapping is only for real extents in the data fork. */ 6053 static bool 6054 xfs_bmap_is_update_needed( 6055 struct xfs_bmbt_irec *bmap) 6056 { 6057 return bmap->br_startblock != HOLESTARTBLOCK && 6058 bmap->br_startblock != DELAYSTARTBLOCK; 6059 } 6060 6061 /* Record a bmap intent. */ 6062 static int 6063 __xfs_bmap_add( 6064 struct xfs_trans *tp, 6065 enum xfs_bmap_intent_type type, 6066 struct xfs_inode *ip, 6067 int whichfork, 6068 struct xfs_bmbt_irec *bmap) 6069 { 6070 struct xfs_bmap_intent *bi; 6071 6072 trace_xfs_bmap_defer(tp->t_mountp, 6073 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock), 6074 type, 6075 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock), 6076 ip->i_ino, whichfork, 6077 bmap->br_startoff, 6078 bmap->br_blockcount, 6079 bmap->br_state); 6080 6081 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_NOFS); 6082 INIT_LIST_HEAD(&bi->bi_list); 6083 bi->bi_type = type; 6084 bi->bi_owner = ip; 6085 bi->bi_whichfork = whichfork; 6086 bi->bi_bmap = *bmap; 6087 6088 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list); 6089 return 0; 6090 } 6091 6092 /* Map an extent into a file. */ 6093 void 6094 xfs_bmap_map_extent( 6095 struct xfs_trans *tp, 6096 struct xfs_inode *ip, 6097 struct xfs_bmbt_irec *PREV) 6098 { 6099 if (!xfs_bmap_is_update_needed(PREV)) 6100 return; 6101 6102 __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV); 6103 } 6104 6105 /* Unmap an extent out of a file. */ 6106 void 6107 xfs_bmap_unmap_extent( 6108 struct xfs_trans *tp, 6109 struct xfs_inode *ip, 6110 struct xfs_bmbt_irec *PREV) 6111 { 6112 if (!xfs_bmap_is_update_needed(PREV)) 6113 return; 6114 6115 __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV); 6116 } 6117 6118 /* 6119 * Process one of the deferred bmap operations. We pass back the 6120 * btree cursor to maintain our lock on the bmapbt between calls. 6121 */ 6122 int 6123 xfs_bmap_finish_one( 6124 struct xfs_trans *tp, 6125 struct xfs_inode *ip, 6126 enum xfs_bmap_intent_type type, 6127 int whichfork, 6128 xfs_fileoff_t startoff, 6129 xfs_fsblock_t startblock, 6130 xfs_filblks_t *blockcount, 6131 xfs_exntst_t state) 6132 { 6133 int error = 0; 6134 6135 ASSERT(tp->t_firstblock == NULLFSBLOCK); 6136 6137 trace_xfs_bmap_deferred(tp->t_mountp, 6138 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type, 6139 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock), 6140 ip->i_ino, whichfork, startoff, *blockcount, state); 6141 6142 if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK)) 6143 return -EFSCORRUPTED; 6144 6145 if (XFS_TEST_ERROR(false, tp->t_mountp, 6146 XFS_ERRTAG_BMAP_FINISH_ONE)) 6147 return -EIO; 6148 6149 switch (type) { 6150 case XFS_BMAP_MAP: 6151 error = xfs_bmapi_remap(tp, ip, startoff, *blockcount, 6152 startblock, 0); 6153 *blockcount = 0; 6154 break; 6155 case XFS_BMAP_UNMAP: 6156 error = __xfs_bunmapi(tp, ip, startoff, blockcount, 6157 XFS_BMAPI_REMAP, 1); 6158 break; 6159 default: 6160 ASSERT(0); 6161 error = -EFSCORRUPTED; 6162 } 6163 6164 return error; 6165 } 6166 6167 /* Check that an inode's extent does not have invalid flags or bad ranges. */ 6168 xfs_failaddr_t 6169 xfs_bmap_validate_extent( 6170 struct xfs_inode *ip, 6171 int whichfork, 6172 struct xfs_bmbt_irec *irec) 6173 { 6174 struct xfs_mount *mp = ip->i_mount; 6175 xfs_fsblock_t endfsb; 6176 bool isrt; 6177 6178 isrt = XFS_IS_REALTIME_INODE(ip); 6179 endfsb = irec->br_startblock + irec->br_blockcount - 1; 6180 if (isrt) { 6181 if (!xfs_verify_rtbno(mp, irec->br_startblock)) 6182 return __this_address; 6183 if (!xfs_verify_rtbno(mp, endfsb)) 6184 return __this_address; 6185 } else { 6186 if (!xfs_verify_fsbno(mp, irec->br_startblock)) 6187 return __this_address; 6188 if (!xfs_verify_fsbno(mp, endfsb)) 6189 return __this_address; 6190 if (XFS_FSB_TO_AGNO(mp, irec->br_startblock) != 6191 XFS_FSB_TO_AGNO(mp, endfsb)) 6192 return __this_address; 6193 } 6194 if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK) 6195 return __this_address; 6196 return NULL; 6197 } 6198