1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_sb.h" 14 #include "xfs_mount.h" 15 #include "xfs_defer.h" 16 #include "xfs_dir2.h" 17 #include "xfs_inode.h" 18 #include "xfs_btree.h" 19 #include "xfs_trans.h" 20 #include "xfs_alloc.h" 21 #include "xfs_bmap.h" 22 #include "xfs_bmap_util.h" 23 #include "xfs_bmap_btree.h" 24 #include "xfs_rtalloc.h" 25 #include "xfs_errortag.h" 26 #include "xfs_error.h" 27 #include "xfs_quota.h" 28 #include "xfs_trans_space.h" 29 #include "xfs_buf_item.h" 30 #include "xfs_trace.h" 31 #include "xfs_attr_leaf.h" 32 #include "xfs_filestream.h" 33 #include "xfs_rmap.h" 34 #include "xfs_ag.h" 35 #include "xfs_ag_resv.h" 36 #include "xfs_refcount.h" 37 #include "xfs_icache.h" 38 #include "xfs_iomap.h" 39 40 struct kmem_cache *xfs_bmap_intent_cache; 41 42 /* 43 * Miscellaneous helper functions 44 */ 45 46 /* 47 * Compute and fill in the value of the maximum depth of a bmap btree 48 * in this filesystem. Done once, during mount. 49 */ 50 void 51 xfs_bmap_compute_maxlevels( 52 xfs_mount_t *mp, /* file system mount structure */ 53 int whichfork) /* data or attr fork */ 54 { 55 uint64_t maxblocks; /* max blocks at this level */ 56 xfs_extnum_t maxleafents; /* max leaf entries possible */ 57 int level; /* btree level */ 58 int maxrootrecs; /* max records in root block */ 59 int minleafrecs; /* min records in leaf block */ 60 int minnoderecs; /* min records in node block */ 61 int sz; /* root block size */ 62 63 /* 64 * The maximum number of extents in a fork, hence the maximum number of 65 * leaf entries, is controlled by the size of the on-disk extent count. 66 * 67 * Note that we can no longer assume that if we are in ATTR1 that the 68 * fork offset of all the inodes will be 69 * (xfs_default_attroffset(ip) >> 3) because we could have mounted with 70 * ATTR2 and then mounted back with ATTR1, keeping the i_forkoff's fixed 71 * but probably at various positions. Therefore, for both ATTR1 and 72 * ATTR2 we have to assume the worst case scenario of a minimum size 73 * available. 74 */ 75 maxleafents = xfs_iext_max_nextents(xfs_has_large_extent_counts(mp), 76 whichfork); 77 if (whichfork == XFS_DATA_FORK) 78 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS); 79 else 80 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS); 81 82 maxrootrecs = xfs_bmdr_maxrecs(sz, 0); 83 minleafrecs = mp->m_bmap_dmnr[0]; 84 minnoderecs = mp->m_bmap_dmnr[1]; 85 maxblocks = howmany_64(maxleafents, minleafrecs); 86 for (level = 1; maxblocks > 1; level++) { 87 if (maxblocks <= maxrootrecs) 88 maxblocks = 1; 89 else 90 maxblocks = howmany_64(maxblocks, minnoderecs); 91 } 92 mp->m_bm_maxlevels[whichfork] = level; 93 ASSERT(mp->m_bm_maxlevels[whichfork] <= xfs_bmbt_maxlevels_ondisk()); 94 } 95 96 unsigned int 97 xfs_bmap_compute_attr_offset( 98 struct xfs_mount *mp) 99 { 100 if (mp->m_sb.sb_inodesize == 256) 101 return XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS); 102 return XFS_BMDR_SPACE_CALC(6 * MINABTPTRS); 103 } 104 105 STATIC int /* error */ 106 xfs_bmbt_lookup_eq( 107 struct xfs_btree_cur *cur, 108 struct xfs_bmbt_irec *irec, 109 int *stat) /* success/failure */ 110 { 111 cur->bc_rec.b = *irec; 112 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); 113 } 114 115 STATIC int /* error */ 116 xfs_bmbt_lookup_first( 117 struct xfs_btree_cur *cur, 118 int *stat) /* success/failure */ 119 { 120 cur->bc_rec.b.br_startoff = 0; 121 cur->bc_rec.b.br_startblock = 0; 122 cur->bc_rec.b.br_blockcount = 0; 123 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); 124 } 125 126 /* 127 * Check if the inode needs to be converted to btree format. 128 */ 129 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork) 130 { 131 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 132 133 return whichfork != XFS_COW_FORK && 134 ifp->if_format == XFS_DINODE_FMT_EXTENTS && 135 ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork); 136 } 137 138 /* 139 * Check if the inode should be converted to extent format. 140 */ 141 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork) 142 { 143 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 144 145 return whichfork != XFS_COW_FORK && 146 ifp->if_format == XFS_DINODE_FMT_BTREE && 147 ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork); 148 } 149 150 /* 151 * Update the record referred to by cur to the value given by irec 152 * This either works (return 0) or gets an EFSCORRUPTED error. 153 */ 154 STATIC int 155 xfs_bmbt_update( 156 struct xfs_btree_cur *cur, 157 struct xfs_bmbt_irec *irec) 158 { 159 union xfs_btree_rec rec; 160 161 xfs_bmbt_disk_set_all(&rec.bmbt, irec); 162 return xfs_btree_update(cur, &rec); 163 } 164 165 /* 166 * Compute the worst-case number of indirect blocks that will be used 167 * for ip's delayed extent of length "len". 168 */ 169 STATIC xfs_filblks_t 170 xfs_bmap_worst_indlen( 171 xfs_inode_t *ip, /* incore inode pointer */ 172 xfs_filblks_t len) /* delayed extent length */ 173 { 174 int level; /* btree level number */ 175 int maxrecs; /* maximum record count at this level */ 176 xfs_mount_t *mp; /* mount structure */ 177 xfs_filblks_t rval; /* return value */ 178 179 mp = ip->i_mount; 180 maxrecs = mp->m_bmap_dmxr[0]; 181 for (level = 0, rval = 0; 182 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); 183 level++) { 184 len += maxrecs - 1; 185 do_div(len, maxrecs); 186 rval += len; 187 if (len == 1) 188 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - 189 level - 1; 190 if (level == 0) 191 maxrecs = mp->m_bmap_dmxr[1]; 192 } 193 return rval; 194 } 195 196 /* 197 * Calculate the default attribute fork offset for newly created inodes. 198 */ 199 uint 200 xfs_default_attroffset( 201 struct xfs_inode *ip) 202 { 203 if (ip->i_df.if_format == XFS_DINODE_FMT_DEV) 204 return roundup(sizeof(xfs_dev_t), 8); 205 return M_IGEO(ip->i_mount)->attr_fork_offset; 206 } 207 208 /* 209 * Helper routine to reset inode i_forkoff field when switching attribute fork 210 * from local to extent format - we reset it where possible to make space 211 * available for inline data fork extents. 212 */ 213 STATIC void 214 xfs_bmap_forkoff_reset( 215 xfs_inode_t *ip, 216 int whichfork) 217 { 218 if (whichfork == XFS_ATTR_FORK && 219 ip->i_df.if_format != XFS_DINODE_FMT_DEV && 220 ip->i_df.if_format != XFS_DINODE_FMT_BTREE) { 221 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3; 222 223 if (dfl_forkoff > ip->i_forkoff) 224 ip->i_forkoff = dfl_forkoff; 225 } 226 } 227 228 #ifdef DEBUG 229 STATIC struct xfs_buf * 230 xfs_bmap_get_bp( 231 struct xfs_btree_cur *cur, 232 xfs_fsblock_t bno) 233 { 234 struct xfs_log_item *lip; 235 int i; 236 237 if (!cur) 238 return NULL; 239 240 for (i = 0; i < cur->bc_maxlevels; i++) { 241 if (!cur->bc_levels[i].bp) 242 break; 243 if (xfs_buf_daddr(cur->bc_levels[i].bp) == bno) 244 return cur->bc_levels[i].bp; 245 } 246 247 /* Chase down all the log items to see if the bp is there */ 248 list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) { 249 struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip; 250 251 if (bip->bli_item.li_type == XFS_LI_BUF && 252 xfs_buf_daddr(bip->bli_buf) == bno) 253 return bip->bli_buf; 254 } 255 256 return NULL; 257 } 258 259 STATIC void 260 xfs_check_block( 261 struct xfs_btree_block *block, 262 xfs_mount_t *mp, 263 int root, 264 short sz) 265 { 266 int i, j, dmxr; 267 __be64 *pp, *thispa; /* pointer to block address */ 268 xfs_bmbt_key_t *prevp, *keyp; 269 270 ASSERT(be16_to_cpu(block->bb_level) > 0); 271 272 prevp = NULL; 273 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) { 274 dmxr = mp->m_bmap_dmxr[0]; 275 keyp = XFS_BMBT_KEY_ADDR(mp, block, i); 276 277 if (prevp) { 278 ASSERT(be64_to_cpu(prevp->br_startoff) < 279 be64_to_cpu(keyp->br_startoff)); 280 } 281 prevp = keyp; 282 283 /* 284 * Compare the block numbers to see if there are dups. 285 */ 286 if (root) 287 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz); 288 else 289 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr); 290 291 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) { 292 if (root) 293 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz); 294 else 295 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr); 296 if (*thispa == *pp) { 297 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %lld", 298 __func__, j, i, 299 (unsigned long long)be64_to_cpu(*thispa)); 300 xfs_err(mp, "%s: ptrs are equal in node\n", 301 __func__); 302 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 303 } 304 } 305 } 306 } 307 308 /* 309 * Check that the extents for the inode ip are in the right order in all 310 * btree leaves. THis becomes prohibitively expensive for large extent count 311 * files, so don't bother with inodes that have more than 10,000 extents in 312 * them. The btree record ordering checks will still be done, so for such large 313 * bmapbt constructs that is going to catch most corruptions. 314 */ 315 STATIC void 316 xfs_bmap_check_leaf_extents( 317 struct xfs_btree_cur *cur, /* btree cursor or null */ 318 xfs_inode_t *ip, /* incore inode pointer */ 319 int whichfork) /* data or attr fork */ 320 { 321 struct xfs_mount *mp = ip->i_mount; 322 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 323 struct xfs_btree_block *block; /* current btree block */ 324 xfs_fsblock_t bno; /* block # of "block" */ 325 struct xfs_buf *bp; /* buffer for "block" */ 326 int error; /* error return value */ 327 xfs_extnum_t i=0, j; /* index into the extents list */ 328 int level; /* btree level, for checking */ 329 __be64 *pp; /* pointer to block address */ 330 xfs_bmbt_rec_t *ep; /* pointer to current extent */ 331 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */ 332 xfs_bmbt_rec_t *nextp; /* pointer to next extent */ 333 int bp_release = 0; 334 335 if (ifp->if_format != XFS_DINODE_FMT_BTREE) 336 return; 337 338 /* skip large extent count inodes */ 339 if (ip->i_df.if_nextents > 10000) 340 return; 341 342 bno = NULLFSBLOCK; 343 block = ifp->if_broot; 344 /* 345 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 346 */ 347 level = be16_to_cpu(block->bb_level); 348 ASSERT(level > 0); 349 xfs_check_block(block, mp, 1, ifp->if_broot_bytes); 350 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 351 bno = be64_to_cpu(*pp); 352 353 ASSERT(bno != NULLFSBLOCK); 354 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 355 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 356 357 /* 358 * Go down the tree until leaf level is reached, following the first 359 * pointer (leftmost) at each level. 360 */ 361 while (level-- > 0) { 362 /* See if buf is in cur first */ 363 bp_release = 0; 364 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 365 if (!bp) { 366 bp_release = 1; 367 error = xfs_btree_read_bufl(mp, NULL, bno, &bp, 368 XFS_BMAP_BTREE_REF, 369 &xfs_bmbt_buf_ops); 370 if (error) 371 goto error_norelse; 372 } 373 block = XFS_BUF_TO_BLOCK(bp); 374 if (level == 0) 375 break; 376 377 /* 378 * Check this block for basic sanity (increasing keys and 379 * no duplicate blocks). 380 */ 381 382 xfs_check_block(block, mp, 0, 0); 383 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 384 bno = be64_to_cpu(*pp); 385 if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, bno))) { 386 error = -EFSCORRUPTED; 387 goto error0; 388 } 389 if (bp_release) { 390 bp_release = 0; 391 xfs_trans_brelse(NULL, bp); 392 } 393 } 394 395 /* 396 * Here with bp and block set to the leftmost leaf node in the tree. 397 */ 398 i = 0; 399 400 /* 401 * Loop over all leaf nodes checking that all extents are in the right order. 402 */ 403 for (;;) { 404 xfs_fsblock_t nextbno; 405 xfs_extnum_t num_recs; 406 407 408 num_recs = xfs_btree_get_numrecs(block); 409 410 /* 411 * Read-ahead the next leaf block, if any. 412 */ 413 414 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 415 416 /* 417 * Check all the extents to make sure they are OK. 418 * If we had a previous block, the last entry should 419 * conform with the first entry in this one. 420 */ 421 422 ep = XFS_BMBT_REC_ADDR(mp, block, 1); 423 if (i) { 424 ASSERT(xfs_bmbt_disk_get_startoff(&last) + 425 xfs_bmbt_disk_get_blockcount(&last) <= 426 xfs_bmbt_disk_get_startoff(ep)); 427 } 428 for (j = 1; j < num_recs; j++) { 429 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1); 430 ASSERT(xfs_bmbt_disk_get_startoff(ep) + 431 xfs_bmbt_disk_get_blockcount(ep) <= 432 xfs_bmbt_disk_get_startoff(nextp)); 433 ep = nextp; 434 } 435 436 last = *ep; 437 i += num_recs; 438 if (bp_release) { 439 bp_release = 0; 440 xfs_trans_brelse(NULL, bp); 441 } 442 bno = nextbno; 443 /* 444 * If we've reached the end, stop. 445 */ 446 if (bno == NULLFSBLOCK) 447 break; 448 449 bp_release = 0; 450 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 451 if (!bp) { 452 bp_release = 1; 453 error = xfs_btree_read_bufl(mp, NULL, bno, &bp, 454 XFS_BMAP_BTREE_REF, 455 &xfs_bmbt_buf_ops); 456 if (error) 457 goto error_norelse; 458 } 459 block = XFS_BUF_TO_BLOCK(bp); 460 } 461 462 return; 463 464 error0: 465 xfs_warn(mp, "%s: at error0", __func__); 466 if (bp_release) 467 xfs_trans_brelse(NULL, bp); 468 error_norelse: 469 xfs_warn(mp, "%s: BAD after btree leaves for %llu extents", 470 __func__, i); 471 xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__); 472 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 473 return; 474 } 475 476 /* 477 * Validate that the bmbt_irecs being returned from bmapi are valid 478 * given the caller's original parameters. Specifically check the 479 * ranges of the returned irecs to ensure that they only extend beyond 480 * the given parameters if the XFS_BMAPI_ENTIRE flag was set. 481 */ 482 STATIC void 483 xfs_bmap_validate_ret( 484 xfs_fileoff_t bno, 485 xfs_filblks_t len, 486 uint32_t flags, 487 xfs_bmbt_irec_t *mval, 488 int nmap, 489 int ret_nmap) 490 { 491 int i; /* index to map values */ 492 493 ASSERT(ret_nmap <= nmap); 494 495 for (i = 0; i < ret_nmap; i++) { 496 ASSERT(mval[i].br_blockcount > 0); 497 if (!(flags & XFS_BMAPI_ENTIRE)) { 498 ASSERT(mval[i].br_startoff >= bno); 499 ASSERT(mval[i].br_blockcount <= len); 500 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <= 501 bno + len); 502 } else { 503 ASSERT(mval[i].br_startoff < bno + len); 504 ASSERT(mval[i].br_startoff + mval[i].br_blockcount > 505 bno); 506 } 507 ASSERT(i == 0 || 508 mval[i - 1].br_startoff + mval[i - 1].br_blockcount == 509 mval[i].br_startoff); 510 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK && 511 mval[i].br_startblock != HOLESTARTBLOCK); 512 ASSERT(mval[i].br_state == XFS_EXT_NORM || 513 mval[i].br_state == XFS_EXT_UNWRITTEN); 514 } 515 } 516 517 #else 518 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0) 519 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0) 520 #endif /* DEBUG */ 521 522 /* 523 * Inode fork format manipulation functions 524 */ 525 526 /* 527 * Convert the inode format to extent format if it currently is in btree format, 528 * but the extent list is small enough that it fits into the extent format. 529 * 530 * Since the extents are already in-core, all we have to do is give up the space 531 * for the btree root and pitch the leaf block. 532 */ 533 STATIC int /* error */ 534 xfs_bmap_btree_to_extents( 535 struct xfs_trans *tp, /* transaction pointer */ 536 struct xfs_inode *ip, /* incore inode pointer */ 537 struct xfs_btree_cur *cur, /* btree cursor */ 538 int *logflagsp, /* inode logging flags */ 539 int whichfork) /* data or attr fork */ 540 { 541 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 542 struct xfs_mount *mp = ip->i_mount; 543 struct xfs_btree_block *rblock = ifp->if_broot; 544 struct xfs_btree_block *cblock;/* child btree block */ 545 xfs_fsblock_t cbno; /* child block number */ 546 struct xfs_buf *cbp; /* child block's buffer */ 547 int error; /* error return value */ 548 __be64 *pp; /* ptr to block address */ 549 struct xfs_owner_info oinfo; 550 551 /* check if we actually need the extent format first: */ 552 if (!xfs_bmap_wants_extents(ip, whichfork)) 553 return 0; 554 555 ASSERT(cur); 556 ASSERT(whichfork != XFS_COW_FORK); 557 ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE); 558 ASSERT(be16_to_cpu(rblock->bb_level) == 1); 559 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1); 560 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1); 561 562 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes); 563 cbno = be64_to_cpu(*pp); 564 #ifdef DEBUG 565 if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_btree_check_lptr(cur, cbno, 1))) 566 return -EFSCORRUPTED; 567 #endif 568 error = xfs_btree_read_bufl(mp, tp, cbno, &cbp, XFS_BMAP_BTREE_REF, 569 &xfs_bmbt_buf_ops); 570 if (error) 571 return error; 572 cblock = XFS_BUF_TO_BLOCK(cbp); 573 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp))) 574 return error; 575 576 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); 577 error = xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo, 578 XFS_AG_RESV_NONE); 579 if (error) 580 return error; 581 582 ip->i_nblocks--; 583 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 584 xfs_trans_binval(tp, cbp); 585 if (cur->bc_levels[0].bp == cbp) 586 cur->bc_levels[0].bp = NULL; 587 xfs_iroot_realloc(ip, -1, whichfork); 588 ASSERT(ifp->if_broot == NULL); 589 ifp->if_format = XFS_DINODE_FMT_EXTENTS; 590 *logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 591 return 0; 592 } 593 594 /* 595 * Convert an extents-format file into a btree-format file. 596 * The new file will have a root block (in the inode) and a single child block. 597 */ 598 STATIC int /* error */ 599 xfs_bmap_extents_to_btree( 600 struct xfs_trans *tp, /* transaction pointer */ 601 struct xfs_inode *ip, /* incore inode pointer */ 602 struct xfs_btree_cur **curp, /* cursor returned to caller */ 603 int wasdel, /* converting a delayed alloc */ 604 int *logflagsp, /* inode logging flags */ 605 int whichfork) /* data or attr fork */ 606 { 607 struct xfs_btree_block *ablock; /* allocated (child) bt block */ 608 struct xfs_buf *abp; /* buffer for ablock */ 609 struct xfs_alloc_arg args; /* allocation arguments */ 610 struct xfs_bmbt_rec *arp; /* child record pointer */ 611 struct xfs_btree_block *block; /* btree root block */ 612 struct xfs_btree_cur *cur; /* bmap btree cursor */ 613 int error; /* error return value */ 614 struct xfs_ifork *ifp; /* inode fork pointer */ 615 struct xfs_bmbt_key *kp; /* root block key pointer */ 616 struct xfs_mount *mp; /* mount structure */ 617 xfs_bmbt_ptr_t *pp; /* root block address pointer */ 618 struct xfs_iext_cursor icur; 619 struct xfs_bmbt_irec rec; 620 xfs_extnum_t cnt = 0; 621 622 mp = ip->i_mount; 623 ASSERT(whichfork != XFS_COW_FORK); 624 ifp = xfs_ifork_ptr(ip, whichfork); 625 ASSERT(ifp->if_format == XFS_DINODE_FMT_EXTENTS); 626 627 /* 628 * Make space in the inode incore. This needs to be undone if we fail 629 * to expand the root. 630 */ 631 xfs_iroot_realloc(ip, 1, whichfork); 632 633 /* 634 * Fill in the root. 635 */ 636 block = ifp->if_broot; 637 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL, 638 XFS_BTNUM_BMAP, 1, 1, ip->i_ino, 639 XFS_BTREE_LONG_PTRS); 640 /* 641 * Need a cursor. Can't allocate until bb_level is filled in. 642 */ 643 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 644 cur->bc_ino.flags = wasdel ? XFS_BTCUR_BMBT_WASDEL : 0; 645 /* 646 * Convert to a btree with two levels, one record in root. 647 */ 648 ifp->if_format = XFS_DINODE_FMT_BTREE; 649 memset(&args, 0, sizeof(args)); 650 args.tp = tp; 651 args.mp = mp; 652 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork); 653 654 args.minlen = args.maxlen = args.prod = 1; 655 args.wasdel = wasdel; 656 *logflagsp = 0; 657 error = xfs_alloc_vextent_start_ag(&args, 658 XFS_INO_TO_FSB(mp, ip->i_ino)); 659 if (error) 660 goto out_root_realloc; 661 662 /* 663 * Allocation can't fail, the space was reserved. 664 */ 665 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { 666 error = -ENOSPC; 667 goto out_root_realloc; 668 } 669 670 cur->bc_ino.allocated++; 671 ip->i_nblocks++; 672 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); 673 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, 674 XFS_FSB_TO_DADDR(mp, args.fsbno), 675 mp->m_bsize, 0, &abp); 676 if (error) 677 goto out_unreserve_dquot; 678 679 /* 680 * Fill in the child block. 681 */ 682 abp->b_ops = &xfs_bmbt_buf_ops; 683 ablock = XFS_BUF_TO_BLOCK(abp); 684 xfs_btree_init_block_int(mp, ablock, xfs_buf_daddr(abp), 685 XFS_BTNUM_BMAP, 0, 0, ip->i_ino, 686 XFS_BTREE_LONG_PTRS); 687 688 for_each_xfs_iext(ifp, &icur, &rec) { 689 if (isnullstartblock(rec.br_startblock)) 690 continue; 691 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt); 692 xfs_bmbt_disk_set_all(arp, &rec); 693 cnt++; 694 } 695 ASSERT(cnt == ifp->if_nextents); 696 xfs_btree_set_numrecs(ablock, cnt); 697 698 /* 699 * Fill in the root key and pointer. 700 */ 701 kp = XFS_BMBT_KEY_ADDR(mp, block, 1); 702 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); 703 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp)); 704 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur, 705 be16_to_cpu(block->bb_level))); 706 *pp = cpu_to_be64(args.fsbno); 707 708 /* 709 * Do all this logging at the end so that 710 * the root is at the right level. 711 */ 712 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS); 713 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); 714 ASSERT(*curp == NULL); 715 *curp = cur; 716 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork); 717 return 0; 718 719 out_unreserve_dquot: 720 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 721 out_root_realloc: 722 xfs_iroot_realloc(ip, -1, whichfork); 723 ifp->if_format = XFS_DINODE_FMT_EXTENTS; 724 ASSERT(ifp->if_broot == NULL); 725 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 726 727 return error; 728 } 729 730 /* 731 * Convert a local file to an extents file. 732 * This code is out of bounds for data forks of regular files, 733 * since the file data needs to get logged so things will stay consistent. 734 * (The bmap-level manipulations are ok, though). 735 */ 736 void 737 xfs_bmap_local_to_extents_empty( 738 struct xfs_trans *tp, 739 struct xfs_inode *ip, 740 int whichfork) 741 { 742 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 743 744 ASSERT(whichfork != XFS_COW_FORK); 745 ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL); 746 ASSERT(ifp->if_bytes == 0); 747 ASSERT(ifp->if_nextents == 0); 748 749 xfs_bmap_forkoff_reset(ip, whichfork); 750 ifp->if_u1.if_root = NULL; 751 ifp->if_height = 0; 752 ifp->if_format = XFS_DINODE_FMT_EXTENTS; 753 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 754 } 755 756 757 STATIC int /* error */ 758 xfs_bmap_local_to_extents( 759 xfs_trans_t *tp, /* transaction pointer */ 760 xfs_inode_t *ip, /* incore inode pointer */ 761 xfs_extlen_t total, /* total blocks needed by transaction */ 762 int *logflagsp, /* inode logging flags */ 763 int whichfork, 764 void (*init_fn)(struct xfs_trans *tp, 765 struct xfs_buf *bp, 766 struct xfs_inode *ip, 767 struct xfs_ifork *ifp)) 768 { 769 int error = 0; 770 int flags; /* logging flags returned */ 771 struct xfs_ifork *ifp; /* inode fork pointer */ 772 xfs_alloc_arg_t args; /* allocation arguments */ 773 struct xfs_buf *bp; /* buffer for extent block */ 774 struct xfs_bmbt_irec rec; 775 struct xfs_iext_cursor icur; 776 777 /* 778 * We don't want to deal with the case of keeping inode data inline yet. 779 * So sending the data fork of a regular inode is invalid. 780 */ 781 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK)); 782 ifp = xfs_ifork_ptr(ip, whichfork); 783 ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL); 784 785 if (!ifp->if_bytes) { 786 xfs_bmap_local_to_extents_empty(tp, ip, whichfork); 787 flags = XFS_ILOG_CORE; 788 goto done; 789 } 790 791 flags = 0; 792 error = 0; 793 memset(&args, 0, sizeof(args)); 794 args.tp = tp; 795 args.mp = ip->i_mount; 796 args.total = total; 797 args.minlen = args.maxlen = args.prod = 1; 798 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0); 799 800 /* 801 * Allocate a block. We know we need only one, since the 802 * file currently fits in an inode. 803 */ 804 args.total = total; 805 args.minlen = args.maxlen = args.prod = 1; 806 error = xfs_alloc_vextent_start_ag(&args, 807 XFS_INO_TO_FSB(args.mp, ip->i_ino)); 808 if (error) 809 goto done; 810 811 /* Can't fail, the space was reserved. */ 812 ASSERT(args.fsbno != NULLFSBLOCK); 813 ASSERT(args.len == 1); 814 error = xfs_trans_get_buf(tp, args.mp->m_ddev_targp, 815 XFS_FSB_TO_DADDR(args.mp, args.fsbno), 816 args.mp->m_bsize, 0, &bp); 817 if (error) 818 goto done; 819 820 /* 821 * Initialize the block, copy the data and log the remote buffer. 822 * 823 * The callout is responsible for logging because the remote format 824 * might differ from the local format and thus we don't know how much to 825 * log here. Note that init_fn must also set the buffer log item type 826 * correctly. 827 */ 828 init_fn(tp, bp, ip, ifp); 829 830 /* account for the change in fork size */ 831 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); 832 xfs_bmap_local_to_extents_empty(tp, ip, whichfork); 833 flags |= XFS_ILOG_CORE; 834 835 ifp->if_u1.if_root = NULL; 836 ifp->if_height = 0; 837 838 rec.br_startoff = 0; 839 rec.br_startblock = args.fsbno; 840 rec.br_blockcount = 1; 841 rec.br_state = XFS_EXT_NORM; 842 xfs_iext_first(ifp, &icur); 843 xfs_iext_insert(ip, &icur, &rec, 0); 844 845 ifp->if_nextents = 1; 846 ip->i_nblocks = 1; 847 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); 848 flags |= xfs_ilog_fext(whichfork); 849 850 done: 851 *logflagsp = flags; 852 return error; 853 } 854 855 /* 856 * Called from xfs_bmap_add_attrfork to handle btree format files. 857 */ 858 STATIC int /* error */ 859 xfs_bmap_add_attrfork_btree( 860 xfs_trans_t *tp, /* transaction pointer */ 861 xfs_inode_t *ip, /* incore inode pointer */ 862 int *flags) /* inode logging flags */ 863 { 864 struct xfs_btree_block *block = ip->i_df.if_broot; 865 struct xfs_btree_cur *cur; /* btree cursor */ 866 int error; /* error return value */ 867 xfs_mount_t *mp; /* file system mount struct */ 868 int stat; /* newroot status */ 869 870 mp = ip->i_mount; 871 872 if (XFS_BMAP_BMDR_SPACE(block) <= xfs_inode_data_fork_size(ip)) 873 *flags |= XFS_ILOG_DBROOT; 874 else { 875 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); 876 error = xfs_bmbt_lookup_first(cur, &stat); 877 if (error) 878 goto error0; 879 /* must be at least one entry */ 880 if (XFS_IS_CORRUPT(mp, stat != 1)) { 881 error = -EFSCORRUPTED; 882 goto error0; 883 } 884 if ((error = xfs_btree_new_iroot(cur, flags, &stat))) 885 goto error0; 886 if (stat == 0) { 887 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 888 return -ENOSPC; 889 } 890 cur->bc_ino.allocated = 0; 891 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 892 } 893 return 0; 894 error0: 895 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 896 return error; 897 } 898 899 /* 900 * Called from xfs_bmap_add_attrfork to handle extents format files. 901 */ 902 STATIC int /* error */ 903 xfs_bmap_add_attrfork_extents( 904 struct xfs_trans *tp, /* transaction pointer */ 905 struct xfs_inode *ip, /* incore inode pointer */ 906 int *flags) /* inode logging flags */ 907 { 908 struct xfs_btree_cur *cur; /* bmap btree cursor */ 909 int error; /* error return value */ 910 911 if (ip->i_df.if_nextents * sizeof(struct xfs_bmbt_rec) <= 912 xfs_inode_data_fork_size(ip)) 913 return 0; 914 cur = NULL; 915 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags, 916 XFS_DATA_FORK); 917 if (cur) { 918 cur->bc_ino.allocated = 0; 919 xfs_btree_del_cursor(cur, error); 920 } 921 return error; 922 } 923 924 /* 925 * Called from xfs_bmap_add_attrfork to handle local format files. Each 926 * different data fork content type needs a different callout to do the 927 * conversion. Some are basic and only require special block initialisation 928 * callouts for the data formating, others (directories) are so specialised they 929 * handle everything themselves. 930 * 931 * XXX (dgc): investigate whether directory conversion can use the generic 932 * formatting callout. It should be possible - it's just a very complex 933 * formatter. 934 */ 935 STATIC int /* error */ 936 xfs_bmap_add_attrfork_local( 937 struct xfs_trans *tp, /* transaction pointer */ 938 struct xfs_inode *ip, /* incore inode pointer */ 939 int *flags) /* inode logging flags */ 940 { 941 struct xfs_da_args dargs; /* args for dir/attr code */ 942 943 if (ip->i_df.if_bytes <= xfs_inode_data_fork_size(ip)) 944 return 0; 945 946 if (S_ISDIR(VFS_I(ip)->i_mode)) { 947 memset(&dargs, 0, sizeof(dargs)); 948 dargs.geo = ip->i_mount->m_dir_geo; 949 dargs.dp = ip; 950 dargs.total = dargs.geo->fsbcount; 951 dargs.whichfork = XFS_DATA_FORK; 952 dargs.trans = tp; 953 return xfs_dir2_sf_to_block(&dargs); 954 } 955 956 if (S_ISLNK(VFS_I(ip)->i_mode)) 957 return xfs_bmap_local_to_extents(tp, ip, 1, flags, 958 XFS_DATA_FORK, 959 xfs_symlink_local_to_remote); 960 961 /* should only be called for types that support local format data */ 962 ASSERT(0); 963 return -EFSCORRUPTED; 964 } 965 966 /* 967 * Set an inode attr fork offset based on the format of the data fork. 968 */ 969 static int 970 xfs_bmap_set_attrforkoff( 971 struct xfs_inode *ip, 972 int size, 973 int *version) 974 { 975 int default_size = xfs_default_attroffset(ip) >> 3; 976 977 switch (ip->i_df.if_format) { 978 case XFS_DINODE_FMT_DEV: 979 ip->i_forkoff = default_size; 980 break; 981 case XFS_DINODE_FMT_LOCAL: 982 case XFS_DINODE_FMT_EXTENTS: 983 case XFS_DINODE_FMT_BTREE: 984 ip->i_forkoff = xfs_attr_shortform_bytesfit(ip, size); 985 if (!ip->i_forkoff) 986 ip->i_forkoff = default_size; 987 else if (xfs_has_attr2(ip->i_mount) && version) 988 *version = 2; 989 break; 990 default: 991 ASSERT(0); 992 return -EINVAL; 993 } 994 995 return 0; 996 } 997 998 /* 999 * Convert inode from non-attributed to attributed. 1000 * Must not be in a transaction, ip must not be locked. 1001 */ 1002 int /* error code */ 1003 xfs_bmap_add_attrfork( 1004 xfs_inode_t *ip, /* incore inode pointer */ 1005 int size, /* space new attribute needs */ 1006 int rsvd) /* xact may use reserved blks */ 1007 { 1008 xfs_mount_t *mp; /* mount structure */ 1009 xfs_trans_t *tp; /* transaction pointer */ 1010 int blks; /* space reservation */ 1011 int version = 1; /* superblock attr version */ 1012 int logflags; /* logging flags */ 1013 int error; /* error return value */ 1014 1015 ASSERT(xfs_inode_has_attr_fork(ip) == 0); 1016 1017 mp = ip->i_mount; 1018 ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); 1019 1020 blks = XFS_ADDAFORK_SPACE_RES(mp); 1021 1022 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_addafork, blks, 0, 1023 rsvd, &tp); 1024 if (error) 1025 return error; 1026 if (xfs_inode_has_attr_fork(ip)) 1027 goto trans_cancel; 1028 1029 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1030 error = xfs_bmap_set_attrforkoff(ip, size, &version); 1031 if (error) 1032 goto trans_cancel; 1033 1034 xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0); 1035 logflags = 0; 1036 switch (ip->i_df.if_format) { 1037 case XFS_DINODE_FMT_LOCAL: 1038 error = xfs_bmap_add_attrfork_local(tp, ip, &logflags); 1039 break; 1040 case XFS_DINODE_FMT_EXTENTS: 1041 error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags); 1042 break; 1043 case XFS_DINODE_FMT_BTREE: 1044 error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags); 1045 break; 1046 default: 1047 error = 0; 1048 break; 1049 } 1050 if (logflags) 1051 xfs_trans_log_inode(tp, ip, logflags); 1052 if (error) 1053 goto trans_cancel; 1054 if (!xfs_has_attr(mp) || 1055 (!xfs_has_attr2(mp) && version == 2)) { 1056 bool log_sb = false; 1057 1058 spin_lock(&mp->m_sb_lock); 1059 if (!xfs_has_attr(mp)) { 1060 xfs_add_attr(mp); 1061 log_sb = true; 1062 } 1063 if (!xfs_has_attr2(mp) && version == 2) { 1064 xfs_add_attr2(mp); 1065 log_sb = true; 1066 } 1067 spin_unlock(&mp->m_sb_lock); 1068 if (log_sb) 1069 xfs_log_sb(tp); 1070 } 1071 1072 error = xfs_trans_commit(tp); 1073 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1074 return error; 1075 1076 trans_cancel: 1077 xfs_trans_cancel(tp); 1078 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1079 return error; 1080 } 1081 1082 /* 1083 * Internal and external extent tree search functions. 1084 */ 1085 1086 struct xfs_iread_state { 1087 struct xfs_iext_cursor icur; 1088 xfs_extnum_t loaded; 1089 }; 1090 1091 int 1092 xfs_bmap_complain_bad_rec( 1093 struct xfs_inode *ip, 1094 int whichfork, 1095 xfs_failaddr_t fa, 1096 const struct xfs_bmbt_irec *irec) 1097 { 1098 struct xfs_mount *mp = ip->i_mount; 1099 const char *forkname; 1100 1101 switch (whichfork) { 1102 case XFS_DATA_FORK: forkname = "data"; break; 1103 case XFS_ATTR_FORK: forkname = "attr"; break; 1104 case XFS_COW_FORK: forkname = "CoW"; break; 1105 default: forkname = "???"; break; 1106 } 1107 1108 xfs_warn(mp, 1109 "Bmap BTree record corruption in inode 0x%llx %s fork detected at %pS!", 1110 ip->i_ino, forkname, fa); 1111 xfs_warn(mp, 1112 "Offset 0x%llx, start block 0x%llx, block count 0x%llx state 0x%x", 1113 irec->br_startoff, irec->br_startblock, irec->br_blockcount, 1114 irec->br_state); 1115 1116 return -EFSCORRUPTED; 1117 } 1118 1119 /* Stuff every bmbt record from this block into the incore extent map. */ 1120 static int 1121 xfs_iread_bmbt_block( 1122 struct xfs_btree_cur *cur, 1123 int level, 1124 void *priv) 1125 { 1126 struct xfs_iread_state *ir = priv; 1127 struct xfs_mount *mp = cur->bc_mp; 1128 struct xfs_inode *ip = cur->bc_ino.ip; 1129 struct xfs_btree_block *block; 1130 struct xfs_buf *bp; 1131 struct xfs_bmbt_rec *frp; 1132 xfs_extnum_t num_recs; 1133 xfs_extnum_t j; 1134 int whichfork = cur->bc_ino.whichfork; 1135 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1136 1137 block = xfs_btree_get_block(cur, level, &bp); 1138 1139 /* Abort if we find more records than nextents. */ 1140 num_recs = xfs_btree_get_numrecs(block); 1141 if (unlikely(ir->loaded + num_recs > ifp->if_nextents)) { 1142 xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).", 1143 (unsigned long long)ip->i_ino); 1144 xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block, 1145 sizeof(*block), __this_address); 1146 return -EFSCORRUPTED; 1147 } 1148 1149 /* Copy records into the incore cache. */ 1150 frp = XFS_BMBT_REC_ADDR(mp, block, 1); 1151 for (j = 0; j < num_recs; j++, frp++, ir->loaded++) { 1152 struct xfs_bmbt_irec new; 1153 xfs_failaddr_t fa; 1154 1155 xfs_bmbt_disk_get_all(frp, &new); 1156 fa = xfs_bmap_validate_extent(ip, whichfork, &new); 1157 if (fa) { 1158 xfs_inode_verifier_error(ip, -EFSCORRUPTED, 1159 "xfs_iread_extents(2)", frp, 1160 sizeof(*frp), fa); 1161 return xfs_bmap_complain_bad_rec(ip, whichfork, fa, 1162 &new); 1163 } 1164 xfs_iext_insert(ip, &ir->icur, &new, 1165 xfs_bmap_fork_to_state(whichfork)); 1166 trace_xfs_read_extent(ip, &ir->icur, 1167 xfs_bmap_fork_to_state(whichfork), _THIS_IP_); 1168 xfs_iext_next(ifp, &ir->icur); 1169 } 1170 1171 return 0; 1172 } 1173 1174 /* 1175 * Read in extents from a btree-format inode. 1176 */ 1177 int 1178 xfs_iread_extents( 1179 struct xfs_trans *tp, 1180 struct xfs_inode *ip, 1181 int whichfork) 1182 { 1183 struct xfs_iread_state ir; 1184 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1185 struct xfs_mount *mp = ip->i_mount; 1186 struct xfs_btree_cur *cur; 1187 int error; 1188 1189 if (!xfs_need_iread_extents(ifp)) 1190 return 0; 1191 1192 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1193 1194 ir.loaded = 0; 1195 xfs_iext_first(ifp, &ir.icur); 1196 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 1197 error = xfs_btree_visit_blocks(cur, xfs_iread_bmbt_block, 1198 XFS_BTREE_VISIT_RECORDS, &ir); 1199 xfs_btree_del_cursor(cur, error); 1200 if (error) 1201 goto out; 1202 1203 if (XFS_IS_CORRUPT(mp, ir.loaded != ifp->if_nextents)) { 1204 error = -EFSCORRUPTED; 1205 goto out; 1206 } 1207 ASSERT(ir.loaded == xfs_iext_count(ifp)); 1208 /* 1209 * Use release semantics so that we can use acquire semantics in 1210 * xfs_need_iread_extents and be guaranteed to see a valid mapping tree 1211 * after that load. 1212 */ 1213 smp_store_release(&ifp->if_needextents, 0); 1214 return 0; 1215 out: 1216 xfs_iext_destroy(ifp); 1217 return error; 1218 } 1219 1220 /* 1221 * Returns the relative block number of the first unused block(s) in the given 1222 * fork with at least "len" logically contiguous blocks free. This is the 1223 * lowest-address hole if the fork has holes, else the first block past the end 1224 * of fork. Return 0 if the fork is currently local (in-inode). 1225 */ 1226 int /* error */ 1227 xfs_bmap_first_unused( 1228 struct xfs_trans *tp, /* transaction pointer */ 1229 struct xfs_inode *ip, /* incore inode */ 1230 xfs_extlen_t len, /* size of hole to find */ 1231 xfs_fileoff_t *first_unused, /* unused block */ 1232 int whichfork) /* data or attr fork */ 1233 { 1234 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1235 struct xfs_bmbt_irec got; 1236 struct xfs_iext_cursor icur; 1237 xfs_fileoff_t lastaddr = 0; 1238 xfs_fileoff_t lowest, max; 1239 int error; 1240 1241 if (ifp->if_format == XFS_DINODE_FMT_LOCAL) { 1242 *first_unused = 0; 1243 return 0; 1244 } 1245 1246 ASSERT(xfs_ifork_has_extents(ifp)); 1247 1248 error = xfs_iread_extents(tp, ip, whichfork); 1249 if (error) 1250 return error; 1251 1252 lowest = max = *first_unused; 1253 for_each_xfs_iext(ifp, &icur, &got) { 1254 /* 1255 * See if the hole before this extent will work. 1256 */ 1257 if (got.br_startoff >= lowest + len && 1258 got.br_startoff - max >= len) 1259 break; 1260 lastaddr = got.br_startoff + got.br_blockcount; 1261 max = XFS_FILEOFF_MAX(lastaddr, lowest); 1262 } 1263 1264 *first_unused = max; 1265 return 0; 1266 } 1267 1268 /* 1269 * Returns the file-relative block number of the last block - 1 before 1270 * last_block (input value) in the file. 1271 * This is not based on i_size, it is based on the extent records. 1272 * Returns 0 for local files, as they do not have extent records. 1273 */ 1274 int /* error */ 1275 xfs_bmap_last_before( 1276 struct xfs_trans *tp, /* transaction pointer */ 1277 struct xfs_inode *ip, /* incore inode */ 1278 xfs_fileoff_t *last_block, /* last block */ 1279 int whichfork) /* data or attr fork */ 1280 { 1281 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1282 struct xfs_bmbt_irec got; 1283 struct xfs_iext_cursor icur; 1284 int error; 1285 1286 switch (ifp->if_format) { 1287 case XFS_DINODE_FMT_LOCAL: 1288 *last_block = 0; 1289 return 0; 1290 case XFS_DINODE_FMT_BTREE: 1291 case XFS_DINODE_FMT_EXTENTS: 1292 break; 1293 default: 1294 ASSERT(0); 1295 return -EFSCORRUPTED; 1296 } 1297 1298 error = xfs_iread_extents(tp, ip, whichfork); 1299 if (error) 1300 return error; 1301 1302 if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got)) 1303 *last_block = 0; 1304 return 0; 1305 } 1306 1307 int 1308 xfs_bmap_last_extent( 1309 struct xfs_trans *tp, 1310 struct xfs_inode *ip, 1311 int whichfork, 1312 struct xfs_bmbt_irec *rec, 1313 int *is_empty) 1314 { 1315 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1316 struct xfs_iext_cursor icur; 1317 int error; 1318 1319 error = xfs_iread_extents(tp, ip, whichfork); 1320 if (error) 1321 return error; 1322 1323 xfs_iext_last(ifp, &icur); 1324 if (!xfs_iext_get_extent(ifp, &icur, rec)) 1325 *is_empty = 1; 1326 else 1327 *is_empty = 0; 1328 return 0; 1329 } 1330 1331 /* 1332 * Check the last inode extent to determine whether this allocation will result 1333 * in blocks being allocated at the end of the file. When we allocate new data 1334 * blocks at the end of the file which do not start at the previous data block, 1335 * we will try to align the new blocks at stripe unit boundaries. 1336 * 1337 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be 1338 * at, or past the EOF. 1339 */ 1340 STATIC int 1341 xfs_bmap_isaeof( 1342 struct xfs_bmalloca *bma, 1343 int whichfork) 1344 { 1345 struct xfs_bmbt_irec rec; 1346 int is_empty; 1347 int error; 1348 1349 bma->aeof = false; 1350 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, 1351 &is_empty); 1352 if (error) 1353 return error; 1354 1355 if (is_empty) { 1356 bma->aeof = true; 1357 return 0; 1358 } 1359 1360 /* 1361 * Check if we are allocation or past the last extent, or at least into 1362 * the last delayed allocated extent. 1363 */ 1364 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount || 1365 (bma->offset >= rec.br_startoff && 1366 isnullstartblock(rec.br_startblock)); 1367 return 0; 1368 } 1369 1370 /* 1371 * Returns the file-relative block number of the first block past eof in 1372 * the file. This is not based on i_size, it is based on the extent records. 1373 * Returns 0 for local files, as they do not have extent records. 1374 */ 1375 int 1376 xfs_bmap_last_offset( 1377 struct xfs_inode *ip, 1378 xfs_fileoff_t *last_block, 1379 int whichfork) 1380 { 1381 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1382 struct xfs_bmbt_irec rec; 1383 int is_empty; 1384 int error; 1385 1386 *last_block = 0; 1387 1388 if (ifp->if_format == XFS_DINODE_FMT_LOCAL) 1389 return 0; 1390 1391 if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ifp))) 1392 return -EFSCORRUPTED; 1393 1394 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty); 1395 if (error || is_empty) 1396 return error; 1397 1398 *last_block = rec.br_startoff + rec.br_blockcount; 1399 return 0; 1400 } 1401 1402 /* 1403 * Extent tree manipulation functions used during allocation. 1404 */ 1405 1406 /* 1407 * Convert a delayed allocation to a real allocation. 1408 */ 1409 STATIC int /* error */ 1410 xfs_bmap_add_extent_delay_real( 1411 struct xfs_bmalloca *bma, 1412 int whichfork) 1413 { 1414 struct xfs_mount *mp = bma->ip->i_mount; 1415 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork); 1416 struct xfs_bmbt_irec *new = &bma->got; 1417 int error; /* error return value */ 1418 int i; /* temp state */ 1419 xfs_fileoff_t new_endoff; /* end offset of new entry */ 1420 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 1421 /* left is 0, right is 1, prev is 2 */ 1422 int rval=0; /* return value (logging flags) */ 1423 uint32_t state = xfs_bmap_fork_to_state(whichfork); 1424 xfs_filblks_t da_new; /* new count del alloc blocks used */ 1425 xfs_filblks_t da_old; /* old count del alloc blocks used */ 1426 xfs_filblks_t temp=0; /* value for da_new calculations */ 1427 int tmp_rval; /* partial logging flags */ 1428 struct xfs_bmbt_irec old; 1429 1430 ASSERT(whichfork != XFS_ATTR_FORK); 1431 ASSERT(!isnullstartblock(new->br_startblock)); 1432 ASSERT(!bma->cur || 1433 (bma->cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL)); 1434 1435 XFS_STATS_INC(mp, xs_add_exlist); 1436 1437 #define LEFT r[0] 1438 #define RIGHT r[1] 1439 #define PREV r[2] 1440 1441 /* 1442 * Set up a bunch of variables to make the tests simpler. 1443 */ 1444 xfs_iext_get_extent(ifp, &bma->icur, &PREV); 1445 new_endoff = new->br_startoff + new->br_blockcount; 1446 ASSERT(isnullstartblock(PREV.br_startblock)); 1447 ASSERT(PREV.br_startoff <= new->br_startoff); 1448 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 1449 1450 da_old = startblockval(PREV.br_startblock); 1451 da_new = 0; 1452 1453 /* 1454 * Set flags determining what part of the previous delayed allocation 1455 * extent is being replaced by a real allocation. 1456 */ 1457 if (PREV.br_startoff == new->br_startoff) 1458 state |= BMAP_LEFT_FILLING; 1459 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 1460 state |= BMAP_RIGHT_FILLING; 1461 1462 /* 1463 * Check and set flags if this segment has a left neighbor. 1464 * Don't set contiguous if the combined extent would be too large. 1465 */ 1466 if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) { 1467 state |= BMAP_LEFT_VALID; 1468 if (isnullstartblock(LEFT.br_startblock)) 1469 state |= BMAP_LEFT_DELAY; 1470 } 1471 1472 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 1473 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 1474 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 1475 LEFT.br_state == new->br_state && 1476 LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN) 1477 state |= BMAP_LEFT_CONTIG; 1478 1479 /* 1480 * Check and set flags if this segment has a right neighbor. 1481 * Don't set contiguous if the combined extent would be too large. 1482 * Also check for all-three-contiguous being too large. 1483 */ 1484 if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) { 1485 state |= BMAP_RIGHT_VALID; 1486 if (isnullstartblock(RIGHT.br_startblock)) 1487 state |= BMAP_RIGHT_DELAY; 1488 } 1489 1490 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 1491 new_endoff == RIGHT.br_startoff && 1492 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 1493 new->br_state == RIGHT.br_state && 1494 new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN && 1495 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1496 BMAP_RIGHT_FILLING)) != 1497 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1498 BMAP_RIGHT_FILLING) || 1499 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 1500 <= XFS_MAX_BMBT_EXTLEN)) 1501 state |= BMAP_RIGHT_CONTIG; 1502 1503 error = 0; 1504 /* 1505 * Switch out based on the FILLING and CONTIG state bits. 1506 */ 1507 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1508 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 1509 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1510 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1511 /* 1512 * Filling in all of a previously delayed allocation extent. 1513 * The left and right neighbors are both contiguous with new. 1514 */ 1515 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount; 1516 1517 xfs_iext_remove(bma->ip, &bma->icur, state); 1518 xfs_iext_remove(bma->ip, &bma->icur, state); 1519 xfs_iext_prev(ifp, &bma->icur); 1520 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1521 ifp->if_nextents--; 1522 1523 if (bma->cur == NULL) 1524 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1525 else { 1526 rval = XFS_ILOG_CORE; 1527 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i); 1528 if (error) 1529 goto done; 1530 if (XFS_IS_CORRUPT(mp, i != 1)) { 1531 error = -EFSCORRUPTED; 1532 goto done; 1533 } 1534 error = xfs_btree_delete(bma->cur, &i); 1535 if (error) 1536 goto done; 1537 if (XFS_IS_CORRUPT(mp, i != 1)) { 1538 error = -EFSCORRUPTED; 1539 goto done; 1540 } 1541 error = xfs_btree_decrement(bma->cur, 0, &i); 1542 if (error) 1543 goto done; 1544 if (XFS_IS_CORRUPT(mp, i != 1)) { 1545 error = -EFSCORRUPTED; 1546 goto done; 1547 } 1548 error = xfs_bmbt_update(bma->cur, &LEFT); 1549 if (error) 1550 goto done; 1551 } 1552 break; 1553 1554 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1555 /* 1556 * Filling in all of a previously delayed allocation extent. 1557 * The left neighbor is contiguous, the right is not. 1558 */ 1559 old = LEFT; 1560 LEFT.br_blockcount += PREV.br_blockcount; 1561 1562 xfs_iext_remove(bma->ip, &bma->icur, state); 1563 xfs_iext_prev(ifp, &bma->icur); 1564 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1565 1566 if (bma->cur == NULL) 1567 rval = XFS_ILOG_DEXT; 1568 else { 1569 rval = 0; 1570 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1571 if (error) 1572 goto done; 1573 if (XFS_IS_CORRUPT(mp, i != 1)) { 1574 error = -EFSCORRUPTED; 1575 goto done; 1576 } 1577 error = xfs_bmbt_update(bma->cur, &LEFT); 1578 if (error) 1579 goto done; 1580 } 1581 break; 1582 1583 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1584 /* 1585 * Filling in all of a previously delayed allocation extent. 1586 * The right neighbor is contiguous, the left is not. Take care 1587 * with delay -> unwritten extent allocation here because the 1588 * delalloc record we are overwriting is always written. 1589 */ 1590 PREV.br_startblock = new->br_startblock; 1591 PREV.br_blockcount += RIGHT.br_blockcount; 1592 PREV.br_state = new->br_state; 1593 1594 xfs_iext_next(ifp, &bma->icur); 1595 xfs_iext_remove(bma->ip, &bma->icur, state); 1596 xfs_iext_prev(ifp, &bma->icur); 1597 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1598 1599 if (bma->cur == NULL) 1600 rval = XFS_ILOG_DEXT; 1601 else { 1602 rval = 0; 1603 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i); 1604 if (error) 1605 goto done; 1606 if (XFS_IS_CORRUPT(mp, i != 1)) { 1607 error = -EFSCORRUPTED; 1608 goto done; 1609 } 1610 error = xfs_bmbt_update(bma->cur, &PREV); 1611 if (error) 1612 goto done; 1613 } 1614 break; 1615 1616 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 1617 /* 1618 * Filling in all of a previously delayed allocation extent. 1619 * Neither the left nor right neighbors are contiguous with 1620 * the new one. 1621 */ 1622 PREV.br_startblock = new->br_startblock; 1623 PREV.br_state = new->br_state; 1624 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1625 ifp->if_nextents++; 1626 1627 if (bma->cur == NULL) 1628 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1629 else { 1630 rval = XFS_ILOG_CORE; 1631 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1632 if (error) 1633 goto done; 1634 if (XFS_IS_CORRUPT(mp, i != 0)) { 1635 error = -EFSCORRUPTED; 1636 goto done; 1637 } 1638 error = xfs_btree_insert(bma->cur, &i); 1639 if (error) 1640 goto done; 1641 if (XFS_IS_CORRUPT(mp, i != 1)) { 1642 error = -EFSCORRUPTED; 1643 goto done; 1644 } 1645 } 1646 break; 1647 1648 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 1649 /* 1650 * Filling in the first part of a previous delayed allocation. 1651 * The left neighbor is contiguous. 1652 */ 1653 old = LEFT; 1654 temp = PREV.br_blockcount - new->br_blockcount; 1655 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1656 startblockval(PREV.br_startblock)); 1657 1658 LEFT.br_blockcount += new->br_blockcount; 1659 1660 PREV.br_blockcount = temp; 1661 PREV.br_startoff += new->br_blockcount; 1662 PREV.br_startblock = nullstartblock(da_new); 1663 1664 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1665 xfs_iext_prev(ifp, &bma->icur); 1666 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1667 1668 if (bma->cur == NULL) 1669 rval = XFS_ILOG_DEXT; 1670 else { 1671 rval = 0; 1672 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1673 if (error) 1674 goto done; 1675 if (XFS_IS_CORRUPT(mp, i != 1)) { 1676 error = -EFSCORRUPTED; 1677 goto done; 1678 } 1679 error = xfs_bmbt_update(bma->cur, &LEFT); 1680 if (error) 1681 goto done; 1682 } 1683 break; 1684 1685 case BMAP_LEFT_FILLING: 1686 /* 1687 * Filling in the first part of a previous delayed allocation. 1688 * The left neighbor is not contiguous. 1689 */ 1690 xfs_iext_update_extent(bma->ip, state, &bma->icur, new); 1691 ifp->if_nextents++; 1692 1693 if (bma->cur == NULL) 1694 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1695 else { 1696 rval = XFS_ILOG_CORE; 1697 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1698 if (error) 1699 goto done; 1700 if (XFS_IS_CORRUPT(mp, i != 0)) { 1701 error = -EFSCORRUPTED; 1702 goto done; 1703 } 1704 error = xfs_btree_insert(bma->cur, &i); 1705 if (error) 1706 goto done; 1707 if (XFS_IS_CORRUPT(mp, i != 1)) { 1708 error = -EFSCORRUPTED; 1709 goto done; 1710 } 1711 } 1712 1713 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1714 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1715 &bma->cur, 1, &tmp_rval, whichfork); 1716 rval |= tmp_rval; 1717 if (error) 1718 goto done; 1719 } 1720 1721 temp = PREV.br_blockcount - new->br_blockcount; 1722 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1723 startblockval(PREV.br_startblock) - 1724 (bma->cur ? bma->cur->bc_ino.allocated : 0)); 1725 1726 PREV.br_startoff = new_endoff; 1727 PREV.br_blockcount = temp; 1728 PREV.br_startblock = nullstartblock(da_new); 1729 xfs_iext_next(ifp, &bma->icur); 1730 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); 1731 xfs_iext_prev(ifp, &bma->icur); 1732 break; 1733 1734 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1735 /* 1736 * Filling in the last part of a previous delayed allocation. 1737 * The right neighbor is contiguous with the new allocation. 1738 */ 1739 old = RIGHT; 1740 RIGHT.br_startoff = new->br_startoff; 1741 RIGHT.br_startblock = new->br_startblock; 1742 RIGHT.br_blockcount += new->br_blockcount; 1743 1744 if (bma->cur == NULL) 1745 rval = XFS_ILOG_DEXT; 1746 else { 1747 rval = 0; 1748 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1749 if (error) 1750 goto done; 1751 if (XFS_IS_CORRUPT(mp, i != 1)) { 1752 error = -EFSCORRUPTED; 1753 goto done; 1754 } 1755 error = xfs_bmbt_update(bma->cur, &RIGHT); 1756 if (error) 1757 goto done; 1758 } 1759 1760 temp = PREV.br_blockcount - new->br_blockcount; 1761 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1762 startblockval(PREV.br_startblock)); 1763 1764 PREV.br_blockcount = temp; 1765 PREV.br_startblock = nullstartblock(da_new); 1766 1767 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1768 xfs_iext_next(ifp, &bma->icur); 1769 xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT); 1770 break; 1771 1772 case BMAP_RIGHT_FILLING: 1773 /* 1774 * Filling in the last part of a previous delayed allocation. 1775 * The right neighbor is not contiguous. 1776 */ 1777 xfs_iext_update_extent(bma->ip, state, &bma->icur, new); 1778 ifp->if_nextents++; 1779 1780 if (bma->cur == NULL) 1781 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1782 else { 1783 rval = XFS_ILOG_CORE; 1784 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1785 if (error) 1786 goto done; 1787 if (XFS_IS_CORRUPT(mp, i != 0)) { 1788 error = -EFSCORRUPTED; 1789 goto done; 1790 } 1791 error = xfs_btree_insert(bma->cur, &i); 1792 if (error) 1793 goto done; 1794 if (XFS_IS_CORRUPT(mp, i != 1)) { 1795 error = -EFSCORRUPTED; 1796 goto done; 1797 } 1798 } 1799 1800 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1801 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1802 &bma->cur, 1, &tmp_rval, whichfork); 1803 rval |= tmp_rval; 1804 if (error) 1805 goto done; 1806 } 1807 1808 temp = PREV.br_blockcount - new->br_blockcount; 1809 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1810 startblockval(PREV.br_startblock) - 1811 (bma->cur ? bma->cur->bc_ino.allocated : 0)); 1812 1813 PREV.br_startblock = nullstartblock(da_new); 1814 PREV.br_blockcount = temp; 1815 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); 1816 xfs_iext_next(ifp, &bma->icur); 1817 break; 1818 1819 case 0: 1820 /* 1821 * Filling in the middle part of a previous delayed allocation. 1822 * Contiguity is impossible here. 1823 * This case is avoided almost all the time. 1824 * 1825 * We start with a delayed allocation: 1826 * 1827 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+ 1828 * PREV @ idx 1829 * 1830 * and we are allocating: 1831 * +rrrrrrrrrrrrrrrrr+ 1832 * new 1833 * 1834 * and we set it up for insertion as: 1835 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+ 1836 * new 1837 * PREV @ idx LEFT RIGHT 1838 * inserted at idx + 1 1839 */ 1840 old = PREV; 1841 1842 /* LEFT is the new middle */ 1843 LEFT = *new; 1844 1845 /* RIGHT is the new right */ 1846 RIGHT.br_state = PREV.br_state; 1847 RIGHT.br_startoff = new_endoff; 1848 RIGHT.br_blockcount = 1849 PREV.br_startoff + PREV.br_blockcount - new_endoff; 1850 RIGHT.br_startblock = 1851 nullstartblock(xfs_bmap_worst_indlen(bma->ip, 1852 RIGHT.br_blockcount)); 1853 1854 /* truncate PREV */ 1855 PREV.br_blockcount = new->br_startoff - PREV.br_startoff; 1856 PREV.br_startblock = 1857 nullstartblock(xfs_bmap_worst_indlen(bma->ip, 1858 PREV.br_blockcount)); 1859 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1860 1861 xfs_iext_next(ifp, &bma->icur); 1862 xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state); 1863 xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state); 1864 ifp->if_nextents++; 1865 1866 if (bma->cur == NULL) 1867 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1868 else { 1869 rval = XFS_ILOG_CORE; 1870 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1871 if (error) 1872 goto done; 1873 if (XFS_IS_CORRUPT(mp, i != 0)) { 1874 error = -EFSCORRUPTED; 1875 goto done; 1876 } 1877 error = xfs_btree_insert(bma->cur, &i); 1878 if (error) 1879 goto done; 1880 if (XFS_IS_CORRUPT(mp, i != 1)) { 1881 error = -EFSCORRUPTED; 1882 goto done; 1883 } 1884 } 1885 1886 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1887 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1888 &bma->cur, 1, &tmp_rval, whichfork); 1889 rval |= tmp_rval; 1890 if (error) 1891 goto done; 1892 } 1893 1894 da_new = startblockval(PREV.br_startblock) + 1895 startblockval(RIGHT.br_startblock); 1896 break; 1897 1898 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1899 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1900 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 1901 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1902 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1903 case BMAP_LEFT_CONTIG: 1904 case BMAP_RIGHT_CONTIG: 1905 /* 1906 * These cases are all impossible. 1907 */ 1908 ASSERT(0); 1909 } 1910 1911 /* add reverse mapping unless caller opted out */ 1912 if (!(bma->flags & XFS_BMAPI_NORMAP)) 1913 xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new); 1914 1915 /* convert to a btree if necessary */ 1916 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1917 int tmp_logflags; /* partial log flag return val */ 1918 1919 ASSERT(bma->cur == NULL); 1920 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1921 &bma->cur, da_old > 0, &tmp_logflags, 1922 whichfork); 1923 bma->logflags |= tmp_logflags; 1924 if (error) 1925 goto done; 1926 } 1927 1928 if (da_new != da_old) 1929 xfs_mod_delalloc(mp, (int64_t)da_new - da_old); 1930 1931 if (bma->cur) { 1932 da_new += bma->cur->bc_ino.allocated; 1933 bma->cur->bc_ino.allocated = 0; 1934 } 1935 1936 /* adjust for changes in reserved delayed indirect blocks */ 1937 if (da_new != da_old) { 1938 ASSERT(state == 0 || da_new < da_old); 1939 error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), 1940 false); 1941 } 1942 1943 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); 1944 done: 1945 if (whichfork != XFS_COW_FORK) 1946 bma->logflags |= rval; 1947 return error; 1948 #undef LEFT 1949 #undef RIGHT 1950 #undef PREV 1951 } 1952 1953 /* 1954 * Convert an unwritten allocation to a real allocation or vice versa. 1955 */ 1956 int /* error */ 1957 xfs_bmap_add_extent_unwritten_real( 1958 struct xfs_trans *tp, 1959 xfs_inode_t *ip, /* incore inode pointer */ 1960 int whichfork, 1961 struct xfs_iext_cursor *icur, 1962 struct xfs_btree_cur **curp, /* if *curp is null, not a btree */ 1963 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 1964 int *logflagsp) /* inode logging flags */ 1965 { 1966 struct xfs_btree_cur *cur; /* btree cursor */ 1967 int error; /* error return value */ 1968 int i; /* temp state */ 1969 struct xfs_ifork *ifp; /* inode fork pointer */ 1970 xfs_fileoff_t new_endoff; /* end offset of new entry */ 1971 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 1972 /* left is 0, right is 1, prev is 2 */ 1973 int rval=0; /* return value (logging flags) */ 1974 uint32_t state = xfs_bmap_fork_to_state(whichfork); 1975 struct xfs_mount *mp = ip->i_mount; 1976 struct xfs_bmbt_irec old; 1977 1978 *logflagsp = 0; 1979 1980 cur = *curp; 1981 ifp = xfs_ifork_ptr(ip, whichfork); 1982 1983 ASSERT(!isnullstartblock(new->br_startblock)); 1984 1985 XFS_STATS_INC(mp, xs_add_exlist); 1986 1987 #define LEFT r[0] 1988 #define RIGHT r[1] 1989 #define PREV r[2] 1990 1991 /* 1992 * Set up a bunch of variables to make the tests simpler. 1993 */ 1994 error = 0; 1995 xfs_iext_get_extent(ifp, icur, &PREV); 1996 ASSERT(new->br_state != PREV.br_state); 1997 new_endoff = new->br_startoff + new->br_blockcount; 1998 ASSERT(PREV.br_startoff <= new->br_startoff); 1999 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 2000 2001 /* 2002 * Set flags determining what part of the previous oldext allocation 2003 * extent is being replaced by a newext allocation. 2004 */ 2005 if (PREV.br_startoff == new->br_startoff) 2006 state |= BMAP_LEFT_FILLING; 2007 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 2008 state |= BMAP_RIGHT_FILLING; 2009 2010 /* 2011 * Check and set flags if this segment has a left neighbor. 2012 * Don't set contiguous if the combined extent would be too large. 2013 */ 2014 if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) { 2015 state |= BMAP_LEFT_VALID; 2016 if (isnullstartblock(LEFT.br_startblock)) 2017 state |= BMAP_LEFT_DELAY; 2018 } 2019 2020 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2021 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 2022 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 2023 LEFT.br_state == new->br_state && 2024 LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN) 2025 state |= BMAP_LEFT_CONTIG; 2026 2027 /* 2028 * Check and set flags if this segment has a right neighbor. 2029 * Don't set contiguous if the combined extent would be too large. 2030 * Also check for all-three-contiguous being too large. 2031 */ 2032 if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) { 2033 state |= BMAP_RIGHT_VALID; 2034 if (isnullstartblock(RIGHT.br_startblock)) 2035 state |= BMAP_RIGHT_DELAY; 2036 } 2037 2038 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2039 new_endoff == RIGHT.br_startoff && 2040 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 2041 new->br_state == RIGHT.br_state && 2042 new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN && 2043 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2044 BMAP_RIGHT_FILLING)) != 2045 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2046 BMAP_RIGHT_FILLING) || 2047 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 2048 <= XFS_MAX_BMBT_EXTLEN)) 2049 state |= BMAP_RIGHT_CONTIG; 2050 2051 /* 2052 * Switch out based on the FILLING and CONTIG state bits. 2053 */ 2054 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2055 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 2056 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2057 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2058 /* 2059 * Setting all of a previous oldext extent to newext. 2060 * The left and right neighbors are both contiguous with new. 2061 */ 2062 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount; 2063 2064 xfs_iext_remove(ip, icur, state); 2065 xfs_iext_remove(ip, icur, state); 2066 xfs_iext_prev(ifp, icur); 2067 xfs_iext_update_extent(ip, state, icur, &LEFT); 2068 ifp->if_nextents -= 2; 2069 if (cur == NULL) 2070 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2071 else { 2072 rval = XFS_ILOG_CORE; 2073 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i); 2074 if (error) 2075 goto done; 2076 if (XFS_IS_CORRUPT(mp, i != 1)) { 2077 error = -EFSCORRUPTED; 2078 goto done; 2079 } 2080 if ((error = xfs_btree_delete(cur, &i))) 2081 goto done; 2082 if (XFS_IS_CORRUPT(mp, i != 1)) { 2083 error = -EFSCORRUPTED; 2084 goto done; 2085 } 2086 if ((error = xfs_btree_decrement(cur, 0, &i))) 2087 goto done; 2088 if (XFS_IS_CORRUPT(mp, i != 1)) { 2089 error = -EFSCORRUPTED; 2090 goto done; 2091 } 2092 if ((error = xfs_btree_delete(cur, &i))) 2093 goto done; 2094 if (XFS_IS_CORRUPT(mp, i != 1)) { 2095 error = -EFSCORRUPTED; 2096 goto done; 2097 } 2098 if ((error = xfs_btree_decrement(cur, 0, &i))) 2099 goto done; 2100 if (XFS_IS_CORRUPT(mp, i != 1)) { 2101 error = -EFSCORRUPTED; 2102 goto done; 2103 } 2104 error = xfs_bmbt_update(cur, &LEFT); 2105 if (error) 2106 goto done; 2107 } 2108 break; 2109 2110 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2111 /* 2112 * Setting all of a previous oldext extent to newext. 2113 * The left neighbor is contiguous, the right is not. 2114 */ 2115 LEFT.br_blockcount += PREV.br_blockcount; 2116 2117 xfs_iext_remove(ip, icur, state); 2118 xfs_iext_prev(ifp, icur); 2119 xfs_iext_update_extent(ip, state, icur, &LEFT); 2120 ifp->if_nextents--; 2121 if (cur == NULL) 2122 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2123 else { 2124 rval = XFS_ILOG_CORE; 2125 error = xfs_bmbt_lookup_eq(cur, &PREV, &i); 2126 if (error) 2127 goto done; 2128 if (XFS_IS_CORRUPT(mp, i != 1)) { 2129 error = -EFSCORRUPTED; 2130 goto done; 2131 } 2132 if ((error = xfs_btree_delete(cur, &i))) 2133 goto done; 2134 if (XFS_IS_CORRUPT(mp, i != 1)) { 2135 error = -EFSCORRUPTED; 2136 goto done; 2137 } 2138 if ((error = xfs_btree_decrement(cur, 0, &i))) 2139 goto done; 2140 if (XFS_IS_CORRUPT(mp, i != 1)) { 2141 error = -EFSCORRUPTED; 2142 goto done; 2143 } 2144 error = xfs_bmbt_update(cur, &LEFT); 2145 if (error) 2146 goto done; 2147 } 2148 break; 2149 2150 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2151 /* 2152 * Setting all of a previous oldext extent to newext. 2153 * The right neighbor is contiguous, the left is not. 2154 */ 2155 PREV.br_blockcount += RIGHT.br_blockcount; 2156 PREV.br_state = new->br_state; 2157 2158 xfs_iext_next(ifp, icur); 2159 xfs_iext_remove(ip, icur, state); 2160 xfs_iext_prev(ifp, icur); 2161 xfs_iext_update_extent(ip, state, icur, &PREV); 2162 ifp->if_nextents--; 2163 2164 if (cur == NULL) 2165 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2166 else { 2167 rval = XFS_ILOG_CORE; 2168 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i); 2169 if (error) 2170 goto done; 2171 if (XFS_IS_CORRUPT(mp, i != 1)) { 2172 error = -EFSCORRUPTED; 2173 goto done; 2174 } 2175 if ((error = xfs_btree_delete(cur, &i))) 2176 goto done; 2177 if (XFS_IS_CORRUPT(mp, i != 1)) { 2178 error = -EFSCORRUPTED; 2179 goto done; 2180 } 2181 if ((error = xfs_btree_decrement(cur, 0, &i))) 2182 goto done; 2183 if (XFS_IS_CORRUPT(mp, i != 1)) { 2184 error = -EFSCORRUPTED; 2185 goto done; 2186 } 2187 error = xfs_bmbt_update(cur, &PREV); 2188 if (error) 2189 goto done; 2190 } 2191 break; 2192 2193 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 2194 /* 2195 * Setting all of a previous oldext extent to newext. 2196 * Neither the left nor right neighbors are contiguous with 2197 * the new one. 2198 */ 2199 PREV.br_state = new->br_state; 2200 xfs_iext_update_extent(ip, state, icur, &PREV); 2201 2202 if (cur == NULL) 2203 rval = XFS_ILOG_DEXT; 2204 else { 2205 rval = 0; 2206 error = xfs_bmbt_lookup_eq(cur, new, &i); 2207 if (error) 2208 goto done; 2209 if (XFS_IS_CORRUPT(mp, i != 1)) { 2210 error = -EFSCORRUPTED; 2211 goto done; 2212 } 2213 error = xfs_bmbt_update(cur, &PREV); 2214 if (error) 2215 goto done; 2216 } 2217 break; 2218 2219 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 2220 /* 2221 * Setting the first part of a previous oldext extent to newext. 2222 * The left neighbor is contiguous. 2223 */ 2224 LEFT.br_blockcount += new->br_blockcount; 2225 2226 old = PREV; 2227 PREV.br_startoff += new->br_blockcount; 2228 PREV.br_startblock += new->br_blockcount; 2229 PREV.br_blockcount -= new->br_blockcount; 2230 2231 xfs_iext_update_extent(ip, state, icur, &PREV); 2232 xfs_iext_prev(ifp, icur); 2233 xfs_iext_update_extent(ip, state, icur, &LEFT); 2234 2235 if (cur == NULL) 2236 rval = XFS_ILOG_DEXT; 2237 else { 2238 rval = 0; 2239 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2240 if (error) 2241 goto done; 2242 if (XFS_IS_CORRUPT(mp, i != 1)) { 2243 error = -EFSCORRUPTED; 2244 goto done; 2245 } 2246 error = xfs_bmbt_update(cur, &PREV); 2247 if (error) 2248 goto done; 2249 error = xfs_btree_decrement(cur, 0, &i); 2250 if (error) 2251 goto done; 2252 error = xfs_bmbt_update(cur, &LEFT); 2253 if (error) 2254 goto done; 2255 } 2256 break; 2257 2258 case BMAP_LEFT_FILLING: 2259 /* 2260 * Setting the first part of a previous oldext extent to newext. 2261 * The left neighbor is not contiguous. 2262 */ 2263 old = PREV; 2264 PREV.br_startoff += new->br_blockcount; 2265 PREV.br_startblock += new->br_blockcount; 2266 PREV.br_blockcount -= new->br_blockcount; 2267 2268 xfs_iext_update_extent(ip, state, icur, &PREV); 2269 xfs_iext_insert(ip, icur, new, state); 2270 ifp->if_nextents++; 2271 2272 if (cur == NULL) 2273 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2274 else { 2275 rval = XFS_ILOG_CORE; 2276 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2277 if (error) 2278 goto done; 2279 if (XFS_IS_CORRUPT(mp, i != 1)) { 2280 error = -EFSCORRUPTED; 2281 goto done; 2282 } 2283 error = xfs_bmbt_update(cur, &PREV); 2284 if (error) 2285 goto done; 2286 cur->bc_rec.b = *new; 2287 if ((error = xfs_btree_insert(cur, &i))) 2288 goto done; 2289 if (XFS_IS_CORRUPT(mp, i != 1)) { 2290 error = -EFSCORRUPTED; 2291 goto done; 2292 } 2293 } 2294 break; 2295 2296 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2297 /* 2298 * Setting the last part of a previous oldext extent to newext. 2299 * The right neighbor is contiguous with the new allocation. 2300 */ 2301 old = PREV; 2302 PREV.br_blockcount -= new->br_blockcount; 2303 2304 RIGHT.br_startoff = new->br_startoff; 2305 RIGHT.br_startblock = new->br_startblock; 2306 RIGHT.br_blockcount += new->br_blockcount; 2307 2308 xfs_iext_update_extent(ip, state, icur, &PREV); 2309 xfs_iext_next(ifp, icur); 2310 xfs_iext_update_extent(ip, state, icur, &RIGHT); 2311 2312 if (cur == NULL) 2313 rval = XFS_ILOG_DEXT; 2314 else { 2315 rval = 0; 2316 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2317 if (error) 2318 goto done; 2319 if (XFS_IS_CORRUPT(mp, i != 1)) { 2320 error = -EFSCORRUPTED; 2321 goto done; 2322 } 2323 error = xfs_bmbt_update(cur, &PREV); 2324 if (error) 2325 goto done; 2326 error = xfs_btree_increment(cur, 0, &i); 2327 if (error) 2328 goto done; 2329 error = xfs_bmbt_update(cur, &RIGHT); 2330 if (error) 2331 goto done; 2332 } 2333 break; 2334 2335 case BMAP_RIGHT_FILLING: 2336 /* 2337 * Setting the last part of a previous oldext extent to newext. 2338 * The right neighbor is not contiguous. 2339 */ 2340 old = PREV; 2341 PREV.br_blockcount -= new->br_blockcount; 2342 2343 xfs_iext_update_extent(ip, state, icur, &PREV); 2344 xfs_iext_next(ifp, icur); 2345 xfs_iext_insert(ip, icur, new, state); 2346 ifp->if_nextents++; 2347 2348 if (cur == NULL) 2349 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2350 else { 2351 rval = XFS_ILOG_CORE; 2352 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2353 if (error) 2354 goto done; 2355 if (XFS_IS_CORRUPT(mp, i != 1)) { 2356 error = -EFSCORRUPTED; 2357 goto done; 2358 } 2359 error = xfs_bmbt_update(cur, &PREV); 2360 if (error) 2361 goto done; 2362 error = xfs_bmbt_lookup_eq(cur, new, &i); 2363 if (error) 2364 goto done; 2365 if (XFS_IS_CORRUPT(mp, i != 0)) { 2366 error = -EFSCORRUPTED; 2367 goto done; 2368 } 2369 if ((error = xfs_btree_insert(cur, &i))) 2370 goto done; 2371 if (XFS_IS_CORRUPT(mp, i != 1)) { 2372 error = -EFSCORRUPTED; 2373 goto done; 2374 } 2375 } 2376 break; 2377 2378 case 0: 2379 /* 2380 * Setting the middle part of a previous oldext extent to 2381 * newext. Contiguity is impossible here. 2382 * One extent becomes three extents. 2383 */ 2384 old = PREV; 2385 PREV.br_blockcount = new->br_startoff - PREV.br_startoff; 2386 2387 r[0] = *new; 2388 r[1].br_startoff = new_endoff; 2389 r[1].br_blockcount = 2390 old.br_startoff + old.br_blockcount - new_endoff; 2391 r[1].br_startblock = new->br_startblock + new->br_blockcount; 2392 r[1].br_state = PREV.br_state; 2393 2394 xfs_iext_update_extent(ip, state, icur, &PREV); 2395 xfs_iext_next(ifp, icur); 2396 xfs_iext_insert(ip, icur, &r[1], state); 2397 xfs_iext_insert(ip, icur, &r[0], state); 2398 ifp->if_nextents += 2; 2399 2400 if (cur == NULL) 2401 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2402 else { 2403 rval = XFS_ILOG_CORE; 2404 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2405 if (error) 2406 goto done; 2407 if (XFS_IS_CORRUPT(mp, i != 1)) { 2408 error = -EFSCORRUPTED; 2409 goto done; 2410 } 2411 /* new right extent - oldext */ 2412 error = xfs_bmbt_update(cur, &r[1]); 2413 if (error) 2414 goto done; 2415 /* new left extent - oldext */ 2416 cur->bc_rec.b = PREV; 2417 if ((error = xfs_btree_insert(cur, &i))) 2418 goto done; 2419 if (XFS_IS_CORRUPT(mp, i != 1)) { 2420 error = -EFSCORRUPTED; 2421 goto done; 2422 } 2423 /* 2424 * Reset the cursor to the position of the new extent 2425 * we are about to insert as we can't trust it after 2426 * the previous insert. 2427 */ 2428 error = xfs_bmbt_lookup_eq(cur, new, &i); 2429 if (error) 2430 goto done; 2431 if (XFS_IS_CORRUPT(mp, i != 0)) { 2432 error = -EFSCORRUPTED; 2433 goto done; 2434 } 2435 /* new middle extent - newext */ 2436 if ((error = xfs_btree_insert(cur, &i))) 2437 goto done; 2438 if (XFS_IS_CORRUPT(mp, i != 1)) { 2439 error = -EFSCORRUPTED; 2440 goto done; 2441 } 2442 } 2443 break; 2444 2445 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2446 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2447 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 2448 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2449 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2450 case BMAP_LEFT_CONTIG: 2451 case BMAP_RIGHT_CONTIG: 2452 /* 2453 * These cases are all impossible. 2454 */ 2455 ASSERT(0); 2456 } 2457 2458 /* update reverse mappings */ 2459 xfs_rmap_convert_extent(mp, tp, ip, whichfork, new); 2460 2461 /* convert to a btree if necessary */ 2462 if (xfs_bmap_needs_btree(ip, whichfork)) { 2463 int tmp_logflags; /* partial log flag return val */ 2464 2465 ASSERT(cur == NULL); 2466 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 2467 &tmp_logflags, whichfork); 2468 *logflagsp |= tmp_logflags; 2469 if (error) 2470 goto done; 2471 } 2472 2473 /* clear out the allocated field, done with it now in any case. */ 2474 if (cur) { 2475 cur->bc_ino.allocated = 0; 2476 *curp = cur; 2477 } 2478 2479 xfs_bmap_check_leaf_extents(*curp, ip, whichfork); 2480 done: 2481 *logflagsp |= rval; 2482 return error; 2483 #undef LEFT 2484 #undef RIGHT 2485 #undef PREV 2486 } 2487 2488 /* 2489 * Convert a hole to a delayed allocation. 2490 */ 2491 STATIC void 2492 xfs_bmap_add_extent_hole_delay( 2493 xfs_inode_t *ip, /* incore inode pointer */ 2494 int whichfork, 2495 struct xfs_iext_cursor *icur, 2496 xfs_bmbt_irec_t *new) /* new data to add to file extents */ 2497 { 2498 struct xfs_ifork *ifp; /* inode fork pointer */ 2499 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2500 xfs_filblks_t newlen=0; /* new indirect size */ 2501 xfs_filblks_t oldlen=0; /* old indirect size */ 2502 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2503 uint32_t state = xfs_bmap_fork_to_state(whichfork); 2504 xfs_filblks_t temp; /* temp for indirect calculations */ 2505 2506 ifp = xfs_ifork_ptr(ip, whichfork); 2507 ASSERT(isnullstartblock(new->br_startblock)); 2508 2509 /* 2510 * Check and set flags if this segment has a left neighbor 2511 */ 2512 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { 2513 state |= BMAP_LEFT_VALID; 2514 if (isnullstartblock(left.br_startblock)) 2515 state |= BMAP_LEFT_DELAY; 2516 } 2517 2518 /* 2519 * Check and set flags if the current (right) segment exists. 2520 * If it doesn't exist, we're converting the hole at end-of-file. 2521 */ 2522 if (xfs_iext_get_extent(ifp, icur, &right)) { 2523 state |= BMAP_RIGHT_VALID; 2524 if (isnullstartblock(right.br_startblock)) 2525 state |= BMAP_RIGHT_DELAY; 2526 } 2527 2528 /* 2529 * Set contiguity flags on the left and right neighbors. 2530 * Don't let extents get too large, even if the pieces are contiguous. 2531 */ 2532 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) && 2533 left.br_startoff + left.br_blockcount == new->br_startoff && 2534 left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN) 2535 state |= BMAP_LEFT_CONTIG; 2536 2537 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) && 2538 new->br_startoff + new->br_blockcount == right.br_startoff && 2539 new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN && 2540 (!(state & BMAP_LEFT_CONTIG) || 2541 (left.br_blockcount + new->br_blockcount + 2542 right.br_blockcount <= XFS_MAX_BMBT_EXTLEN))) 2543 state |= BMAP_RIGHT_CONTIG; 2544 2545 /* 2546 * Switch out based on the contiguity flags. 2547 */ 2548 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2549 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2550 /* 2551 * New allocation is contiguous with delayed allocations 2552 * on the left and on the right. 2553 * Merge all three into a single extent record. 2554 */ 2555 temp = left.br_blockcount + new->br_blockcount + 2556 right.br_blockcount; 2557 2558 oldlen = startblockval(left.br_startblock) + 2559 startblockval(new->br_startblock) + 2560 startblockval(right.br_startblock); 2561 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2562 oldlen); 2563 left.br_startblock = nullstartblock(newlen); 2564 left.br_blockcount = temp; 2565 2566 xfs_iext_remove(ip, icur, state); 2567 xfs_iext_prev(ifp, icur); 2568 xfs_iext_update_extent(ip, state, icur, &left); 2569 break; 2570 2571 case BMAP_LEFT_CONTIG: 2572 /* 2573 * New allocation is contiguous with a delayed allocation 2574 * on the left. 2575 * Merge the new allocation with the left neighbor. 2576 */ 2577 temp = left.br_blockcount + new->br_blockcount; 2578 2579 oldlen = startblockval(left.br_startblock) + 2580 startblockval(new->br_startblock); 2581 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2582 oldlen); 2583 left.br_blockcount = temp; 2584 left.br_startblock = nullstartblock(newlen); 2585 2586 xfs_iext_prev(ifp, icur); 2587 xfs_iext_update_extent(ip, state, icur, &left); 2588 break; 2589 2590 case BMAP_RIGHT_CONTIG: 2591 /* 2592 * New allocation is contiguous with a delayed allocation 2593 * on the right. 2594 * Merge the new allocation with the right neighbor. 2595 */ 2596 temp = new->br_blockcount + right.br_blockcount; 2597 oldlen = startblockval(new->br_startblock) + 2598 startblockval(right.br_startblock); 2599 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2600 oldlen); 2601 right.br_startoff = new->br_startoff; 2602 right.br_startblock = nullstartblock(newlen); 2603 right.br_blockcount = temp; 2604 xfs_iext_update_extent(ip, state, icur, &right); 2605 break; 2606 2607 case 0: 2608 /* 2609 * New allocation is not contiguous with another 2610 * delayed allocation. 2611 * Insert a new entry. 2612 */ 2613 oldlen = newlen = 0; 2614 xfs_iext_insert(ip, icur, new, state); 2615 break; 2616 } 2617 if (oldlen != newlen) { 2618 ASSERT(oldlen > newlen); 2619 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen), 2620 false); 2621 /* 2622 * Nothing to do for disk quota accounting here. 2623 */ 2624 xfs_mod_delalloc(ip->i_mount, (int64_t)newlen - oldlen); 2625 } 2626 } 2627 2628 /* 2629 * Convert a hole to a real allocation. 2630 */ 2631 STATIC int /* error */ 2632 xfs_bmap_add_extent_hole_real( 2633 struct xfs_trans *tp, 2634 struct xfs_inode *ip, 2635 int whichfork, 2636 struct xfs_iext_cursor *icur, 2637 struct xfs_btree_cur **curp, 2638 struct xfs_bmbt_irec *new, 2639 int *logflagsp, 2640 uint32_t flags) 2641 { 2642 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 2643 struct xfs_mount *mp = ip->i_mount; 2644 struct xfs_btree_cur *cur = *curp; 2645 int error; /* error return value */ 2646 int i; /* temp state */ 2647 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2648 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2649 int rval=0; /* return value (logging flags) */ 2650 uint32_t state = xfs_bmap_fork_to_state(whichfork); 2651 struct xfs_bmbt_irec old; 2652 2653 ASSERT(!isnullstartblock(new->br_startblock)); 2654 ASSERT(!cur || !(cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL)); 2655 2656 XFS_STATS_INC(mp, xs_add_exlist); 2657 2658 /* 2659 * Check and set flags if this segment has a left neighbor. 2660 */ 2661 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { 2662 state |= BMAP_LEFT_VALID; 2663 if (isnullstartblock(left.br_startblock)) 2664 state |= BMAP_LEFT_DELAY; 2665 } 2666 2667 /* 2668 * Check and set flags if this segment has a current value. 2669 * Not true if we're inserting into the "hole" at eof. 2670 */ 2671 if (xfs_iext_get_extent(ifp, icur, &right)) { 2672 state |= BMAP_RIGHT_VALID; 2673 if (isnullstartblock(right.br_startblock)) 2674 state |= BMAP_RIGHT_DELAY; 2675 } 2676 2677 /* 2678 * We're inserting a real allocation between "left" and "right". 2679 * Set the contiguity flags. Don't let extents get too large. 2680 */ 2681 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2682 left.br_startoff + left.br_blockcount == new->br_startoff && 2683 left.br_startblock + left.br_blockcount == new->br_startblock && 2684 left.br_state == new->br_state && 2685 left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN) 2686 state |= BMAP_LEFT_CONTIG; 2687 2688 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2689 new->br_startoff + new->br_blockcount == right.br_startoff && 2690 new->br_startblock + new->br_blockcount == right.br_startblock && 2691 new->br_state == right.br_state && 2692 new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN && 2693 (!(state & BMAP_LEFT_CONTIG) || 2694 left.br_blockcount + new->br_blockcount + 2695 right.br_blockcount <= XFS_MAX_BMBT_EXTLEN)) 2696 state |= BMAP_RIGHT_CONTIG; 2697 2698 error = 0; 2699 /* 2700 * Select which case we're in here, and implement it. 2701 */ 2702 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2703 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2704 /* 2705 * New allocation is contiguous with real allocations on the 2706 * left and on the right. 2707 * Merge all three into a single extent record. 2708 */ 2709 left.br_blockcount += new->br_blockcount + right.br_blockcount; 2710 2711 xfs_iext_remove(ip, icur, state); 2712 xfs_iext_prev(ifp, icur); 2713 xfs_iext_update_extent(ip, state, icur, &left); 2714 ifp->if_nextents--; 2715 2716 if (cur == NULL) { 2717 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 2718 } else { 2719 rval = XFS_ILOG_CORE; 2720 error = xfs_bmbt_lookup_eq(cur, &right, &i); 2721 if (error) 2722 goto done; 2723 if (XFS_IS_CORRUPT(mp, i != 1)) { 2724 error = -EFSCORRUPTED; 2725 goto done; 2726 } 2727 error = xfs_btree_delete(cur, &i); 2728 if (error) 2729 goto done; 2730 if (XFS_IS_CORRUPT(mp, i != 1)) { 2731 error = -EFSCORRUPTED; 2732 goto done; 2733 } 2734 error = xfs_btree_decrement(cur, 0, &i); 2735 if (error) 2736 goto done; 2737 if (XFS_IS_CORRUPT(mp, i != 1)) { 2738 error = -EFSCORRUPTED; 2739 goto done; 2740 } 2741 error = xfs_bmbt_update(cur, &left); 2742 if (error) 2743 goto done; 2744 } 2745 break; 2746 2747 case BMAP_LEFT_CONTIG: 2748 /* 2749 * New allocation is contiguous with a real allocation 2750 * on the left. 2751 * Merge the new allocation with the left neighbor. 2752 */ 2753 old = left; 2754 left.br_blockcount += new->br_blockcount; 2755 2756 xfs_iext_prev(ifp, icur); 2757 xfs_iext_update_extent(ip, state, icur, &left); 2758 2759 if (cur == NULL) { 2760 rval = xfs_ilog_fext(whichfork); 2761 } else { 2762 rval = 0; 2763 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2764 if (error) 2765 goto done; 2766 if (XFS_IS_CORRUPT(mp, i != 1)) { 2767 error = -EFSCORRUPTED; 2768 goto done; 2769 } 2770 error = xfs_bmbt_update(cur, &left); 2771 if (error) 2772 goto done; 2773 } 2774 break; 2775 2776 case BMAP_RIGHT_CONTIG: 2777 /* 2778 * New allocation is contiguous with a real allocation 2779 * on the right. 2780 * Merge the new allocation with the right neighbor. 2781 */ 2782 old = right; 2783 2784 right.br_startoff = new->br_startoff; 2785 right.br_startblock = new->br_startblock; 2786 right.br_blockcount += new->br_blockcount; 2787 xfs_iext_update_extent(ip, state, icur, &right); 2788 2789 if (cur == NULL) { 2790 rval = xfs_ilog_fext(whichfork); 2791 } else { 2792 rval = 0; 2793 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2794 if (error) 2795 goto done; 2796 if (XFS_IS_CORRUPT(mp, i != 1)) { 2797 error = -EFSCORRUPTED; 2798 goto done; 2799 } 2800 error = xfs_bmbt_update(cur, &right); 2801 if (error) 2802 goto done; 2803 } 2804 break; 2805 2806 case 0: 2807 /* 2808 * New allocation is not contiguous with another 2809 * real allocation. 2810 * Insert a new entry. 2811 */ 2812 xfs_iext_insert(ip, icur, new, state); 2813 ifp->if_nextents++; 2814 2815 if (cur == NULL) { 2816 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 2817 } else { 2818 rval = XFS_ILOG_CORE; 2819 error = xfs_bmbt_lookup_eq(cur, new, &i); 2820 if (error) 2821 goto done; 2822 if (XFS_IS_CORRUPT(mp, i != 0)) { 2823 error = -EFSCORRUPTED; 2824 goto done; 2825 } 2826 error = xfs_btree_insert(cur, &i); 2827 if (error) 2828 goto done; 2829 if (XFS_IS_CORRUPT(mp, i != 1)) { 2830 error = -EFSCORRUPTED; 2831 goto done; 2832 } 2833 } 2834 break; 2835 } 2836 2837 /* add reverse mapping unless caller opted out */ 2838 if (!(flags & XFS_BMAPI_NORMAP)) 2839 xfs_rmap_map_extent(tp, ip, whichfork, new); 2840 2841 /* convert to a btree if necessary */ 2842 if (xfs_bmap_needs_btree(ip, whichfork)) { 2843 int tmp_logflags; /* partial log flag return val */ 2844 2845 ASSERT(cur == NULL); 2846 error = xfs_bmap_extents_to_btree(tp, ip, curp, 0, 2847 &tmp_logflags, whichfork); 2848 *logflagsp |= tmp_logflags; 2849 cur = *curp; 2850 if (error) 2851 goto done; 2852 } 2853 2854 /* clear out the allocated field, done with it now in any case. */ 2855 if (cur) 2856 cur->bc_ino.allocated = 0; 2857 2858 xfs_bmap_check_leaf_extents(cur, ip, whichfork); 2859 done: 2860 *logflagsp |= rval; 2861 return error; 2862 } 2863 2864 /* 2865 * Functions used in the extent read, allocate and remove paths 2866 */ 2867 2868 /* 2869 * Adjust the size of the new extent based on i_extsize and rt extsize. 2870 */ 2871 int 2872 xfs_bmap_extsize_align( 2873 xfs_mount_t *mp, 2874 xfs_bmbt_irec_t *gotp, /* next extent pointer */ 2875 xfs_bmbt_irec_t *prevp, /* previous extent pointer */ 2876 xfs_extlen_t extsz, /* align to this extent size */ 2877 int rt, /* is this a realtime inode? */ 2878 int eof, /* is extent at end-of-file? */ 2879 int delay, /* creating delalloc extent? */ 2880 int convert, /* overwriting unwritten extent? */ 2881 xfs_fileoff_t *offp, /* in/out: aligned offset */ 2882 xfs_extlen_t *lenp) /* in/out: aligned length */ 2883 { 2884 xfs_fileoff_t orig_off; /* original offset */ 2885 xfs_extlen_t orig_alen; /* original length */ 2886 xfs_fileoff_t orig_end; /* original off+len */ 2887 xfs_fileoff_t nexto; /* next file offset */ 2888 xfs_fileoff_t prevo; /* previous file offset */ 2889 xfs_fileoff_t align_off; /* temp for offset */ 2890 xfs_extlen_t align_alen; /* temp for length */ 2891 xfs_extlen_t temp; /* temp for calculations */ 2892 2893 if (convert) 2894 return 0; 2895 2896 orig_off = align_off = *offp; 2897 orig_alen = align_alen = *lenp; 2898 orig_end = orig_off + orig_alen; 2899 2900 /* 2901 * If this request overlaps an existing extent, then don't 2902 * attempt to perform any additional alignment. 2903 */ 2904 if (!delay && !eof && 2905 (orig_off >= gotp->br_startoff) && 2906 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) { 2907 return 0; 2908 } 2909 2910 /* 2911 * If the file offset is unaligned vs. the extent size 2912 * we need to align it. This will be possible unless 2913 * the file was previously written with a kernel that didn't 2914 * perform this alignment, or if a truncate shot us in the 2915 * foot. 2916 */ 2917 div_u64_rem(orig_off, extsz, &temp); 2918 if (temp) { 2919 align_alen += temp; 2920 align_off -= temp; 2921 } 2922 2923 /* Same adjustment for the end of the requested area. */ 2924 temp = (align_alen % extsz); 2925 if (temp) 2926 align_alen += extsz - temp; 2927 2928 /* 2929 * For large extent hint sizes, the aligned extent might be larger than 2930 * XFS_BMBT_MAX_EXTLEN. In that case, reduce the size by an extsz so 2931 * that it pulls the length back under XFS_BMBT_MAX_EXTLEN. The outer 2932 * allocation loops handle short allocation just fine, so it is safe to 2933 * do this. We only want to do it when we are forced to, though, because 2934 * it means more allocation operations are required. 2935 */ 2936 while (align_alen > XFS_MAX_BMBT_EXTLEN) 2937 align_alen -= extsz; 2938 ASSERT(align_alen <= XFS_MAX_BMBT_EXTLEN); 2939 2940 /* 2941 * If the previous block overlaps with this proposed allocation 2942 * then move the start forward without adjusting the length. 2943 */ 2944 if (prevp->br_startoff != NULLFILEOFF) { 2945 if (prevp->br_startblock == HOLESTARTBLOCK) 2946 prevo = prevp->br_startoff; 2947 else 2948 prevo = prevp->br_startoff + prevp->br_blockcount; 2949 } else 2950 prevo = 0; 2951 if (align_off != orig_off && align_off < prevo) 2952 align_off = prevo; 2953 /* 2954 * If the next block overlaps with this proposed allocation 2955 * then move the start back without adjusting the length, 2956 * but not before offset 0. 2957 * This may of course make the start overlap previous block, 2958 * and if we hit the offset 0 limit then the next block 2959 * can still overlap too. 2960 */ 2961 if (!eof && gotp->br_startoff != NULLFILEOFF) { 2962 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) || 2963 (!delay && gotp->br_startblock == DELAYSTARTBLOCK)) 2964 nexto = gotp->br_startoff + gotp->br_blockcount; 2965 else 2966 nexto = gotp->br_startoff; 2967 } else 2968 nexto = NULLFILEOFF; 2969 if (!eof && 2970 align_off + align_alen != orig_end && 2971 align_off + align_alen > nexto) 2972 align_off = nexto > align_alen ? nexto - align_alen : 0; 2973 /* 2974 * If we're now overlapping the next or previous extent that 2975 * means we can't fit an extsz piece in this hole. Just move 2976 * the start forward to the first valid spot and set 2977 * the length so we hit the end. 2978 */ 2979 if (align_off != orig_off && align_off < prevo) 2980 align_off = prevo; 2981 if (align_off + align_alen != orig_end && 2982 align_off + align_alen > nexto && 2983 nexto != NULLFILEOFF) { 2984 ASSERT(nexto > prevo); 2985 align_alen = nexto - align_off; 2986 } 2987 2988 /* 2989 * If realtime, and the result isn't a multiple of the realtime 2990 * extent size we need to remove blocks until it is. 2991 */ 2992 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) { 2993 /* 2994 * We're not covering the original request, or 2995 * we won't be able to once we fix the length. 2996 */ 2997 if (orig_off < align_off || 2998 orig_end > align_off + align_alen || 2999 align_alen - temp < orig_alen) 3000 return -EINVAL; 3001 /* 3002 * Try to fix it by moving the start up. 3003 */ 3004 if (align_off + temp <= orig_off) { 3005 align_alen -= temp; 3006 align_off += temp; 3007 } 3008 /* 3009 * Try to fix it by moving the end in. 3010 */ 3011 else if (align_off + align_alen - temp >= orig_end) 3012 align_alen -= temp; 3013 /* 3014 * Set the start to the minimum then trim the length. 3015 */ 3016 else { 3017 align_alen -= orig_off - align_off; 3018 align_off = orig_off; 3019 align_alen -= align_alen % mp->m_sb.sb_rextsize; 3020 } 3021 /* 3022 * Result doesn't cover the request, fail it. 3023 */ 3024 if (orig_off < align_off || orig_end > align_off + align_alen) 3025 return -EINVAL; 3026 } else { 3027 ASSERT(orig_off >= align_off); 3028 /* see XFS_BMBT_MAX_EXTLEN handling above */ 3029 ASSERT(orig_end <= align_off + align_alen || 3030 align_alen + extsz > XFS_MAX_BMBT_EXTLEN); 3031 } 3032 3033 #ifdef DEBUG 3034 if (!eof && gotp->br_startoff != NULLFILEOFF) 3035 ASSERT(align_off + align_alen <= gotp->br_startoff); 3036 if (prevp->br_startoff != NULLFILEOFF) 3037 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount); 3038 #endif 3039 3040 *lenp = align_alen; 3041 *offp = align_off; 3042 return 0; 3043 } 3044 3045 #define XFS_ALLOC_GAP_UNITS 4 3046 3047 void 3048 xfs_bmap_adjacent( 3049 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3050 { 3051 xfs_fsblock_t adjust; /* adjustment to block numbers */ 3052 xfs_mount_t *mp; /* mount point structure */ 3053 int rt; /* true if inode is realtime */ 3054 3055 #define ISVALID(x,y) \ 3056 (rt ? \ 3057 (x) < mp->m_sb.sb_rblocks : \ 3058 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \ 3059 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \ 3060 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks) 3061 3062 mp = ap->ip->i_mount; 3063 rt = XFS_IS_REALTIME_INODE(ap->ip) && 3064 (ap->datatype & XFS_ALLOC_USERDATA); 3065 /* 3066 * If allocating at eof, and there's a previous real block, 3067 * try to use its last block as our starting point. 3068 */ 3069 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF && 3070 !isnullstartblock(ap->prev.br_startblock) && 3071 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount, 3072 ap->prev.br_startblock)) { 3073 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount; 3074 /* 3075 * Adjust for the gap between prevp and us. 3076 */ 3077 adjust = ap->offset - 3078 (ap->prev.br_startoff + ap->prev.br_blockcount); 3079 if (adjust && 3080 ISVALID(ap->blkno + adjust, ap->prev.br_startblock)) 3081 ap->blkno += adjust; 3082 } 3083 /* 3084 * If not at eof, then compare the two neighbor blocks. 3085 * Figure out whether either one gives us a good starting point, 3086 * and pick the better one. 3087 */ 3088 else if (!ap->eof) { 3089 xfs_fsblock_t gotbno; /* right side block number */ 3090 xfs_fsblock_t gotdiff=0; /* right side difference */ 3091 xfs_fsblock_t prevbno; /* left side block number */ 3092 xfs_fsblock_t prevdiff=0; /* left side difference */ 3093 3094 /* 3095 * If there's a previous (left) block, select a requested 3096 * start block based on it. 3097 */ 3098 if (ap->prev.br_startoff != NULLFILEOFF && 3099 !isnullstartblock(ap->prev.br_startblock) && 3100 (prevbno = ap->prev.br_startblock + 3101 ap->prev.br_blockcount) && 3102 ISVALID(prevbno, ap->prev.br_startblock)) { 3103 /* 3104 * Calculate gap to end of previous block. 3105 */ 3106 adjust = prevdiff = ap->offset - 3107 (ap->prev.br_startoff + 3108 ap->prev.br_blockcount); 3109 /* 3110 * Figure the startblock based on the previous block's 3111 * end and the gap size. 3112 * Heuristic! 3113 * If the gap is large relative to the piece we're 3114 * allocating, or using it gives us an invalid block 3115 * number, then just use the end of the previous block. 3116 */ 3117 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3118 ISVALID(prevbno + prevdiff, 3119 ap->prev.br_startblock)) 3120 prevbno += adjust; 3121 else 3122 prevdiff += adjust; 3123 } 3124 /* 3125 * No previous block or can't follow it, just default. 3126 */ 3127 else 3128 prevbno = NULLFSBLOCK; 3129 /* 3130 * If there's a following (right) block, select a requested 3131 * start block based on it. 3132 */ 3133 if (!isnullstartblock(ap->got.br_startblock)) { 3134 /* 3135 * Calculate gap to start of next block. 3136 */ 3137 adjust = gotdiff = ap->got.br_startoff - ap->offset; 3138 /* 3139 * Figure the startblock based on the next block's 3140 * start and the gap size. 3141 */ 3142 gotbno = ap->got.br_startblock; 3143 /* 3144 * Heuristic! 3145 * If the gap is large relative to the piece we're 3146 * allocating, or using it gives us an invalid block 3147 * number, then just use the start of the next block 3148 * offset by our length. 3149 */ 3150 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3151 ISVALID(gotbno - gotdiff, gotbno)) 3152 gotbno -= adjust; 3153 else if (ISVALID(gotbno - ap->length, gotbno)) { 3154 gotbno -= ap->length; 3155 gotdiff += adjust - ap->length; 3156 } else 3157 gotdiff += adjust; 3158 } 3159 /* 3160 * No next block, just default. 3161 */ 3162 else 3163 gotbno = NULLFSBLOCK; 3164 /* 3165 * If both valid, pick the better one, else the only good 3166 * one, else ap->blkno is already set (to 0 or the inode block). 3167 */ 3168 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) 3169 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno; 3170 else if (prevbno != NULLFSBLOCK) 3171 ap->blkno = prevbno; 3172 else if (gotbno != NULLFSBLOCK) 3173 ap->blkno = gotbno; 3174 } 3175 #undef ISVALID 3176 } 3177 3178 int 3179 xfs_bmap_longest_free_extent( 3180 struct xfs_perag *pag, 3181 struct xfs_trans *tp, 3182 xfs_extlen_t *blen) 3183 { 3184 xfs_extlen_t longest; 3185 int error = 0; 3186 3187 if (!xfs_perag_initialised_agf(pag)) { 3188 error = xfs_alloc_read_agf(pag, tp, XFS_ALLOC_FLAG_TRYLOCK, 3189 NULL); 3190 if (error) 3191 return error; 3192 } 3193 3194 longest = xfs_alloc_longest_free_extent(pag, 3195 xfs_alloc_min_freelist(pag->pag_mount, pag), 3196 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE)); 3197 if (*blen < longest) 3198 *blen = longest; 3199 3200 return 0; 3201 } 3202 3203 static xfs_extlen_t 3204 xfs_bmap_select_minlen( 3205 struct xfs_bmalloca *ap, 3206 struct xfs_alloc_arg *args, 3207 xfs_extlen_t blen) 3208 { 3209 3210 /* 3211 * Since we used XFS_ALLOC_FLAG_TRYLOCK in _longest_free_extent(), it is 3212 * possible that there is enough contiguous free space for this request. 3213 */ 3214 if (blen < ap->minlen) 3215 return ap->minlen; 3216 3217 /* 3218 * If the best seen length is less than the request length, 3219 * use the best as the minimum, otherwise we've got the maxlen we 3220 * were asked for. 3221 */ 3222 if (blen < args->maxlen) 3223 return blen; 3224 return args->maxlen; 3225 } 3226 3227 static int 3228 xfs_bmap_btalloc_select_lengths( 3229 struct xfs_bmalloca *ap, 3230 struct xfs_alloc_arg *args, 3231 xfs_extlen_t *blen) 3232 { 3233 struct xfs_mount *mp = args->mp; 3234 struct xfs_perag *pag; 3235 xfs_agnumber_t agno, startag; 3236 int error = 0; 3237 3238 if (ap->tp->t_flags & XFS_TRANS_LOWMODE) { 3239 args->total = ap->minlen; 3240 args->minlen = ap->minlen; 3241 return 0; 3242 } 3243 3244 args->total = ap->total; 3245 startag = XFS_FSB_TO_AGNO(mp, ap->blkno); 3246 if (startag == NULLAGNUMBER) 3247 startag = 0; 3248 3249 *blen = 0; 3250 for_each_perag_wrap(mp, startag, agno, pag) { 3251 error = xfs_bmap_longest_free_extent(pag, args->tp, blen); 3252 if (error && error != -EAGAIN) 3253 break; 3254 error = 0; 3255 if (*blen >= args->maxlen) 3256 break; 3257 } 3258 if (pag) 3259 xfs_perag_rele(pag); 3260 3261 args->minlen = xfs_bmap_select_minlen(ap, args, *blen); 3262 return error; 3263 } 3264 3265 /* Update all inode and quota accounting for the allocation we just did. */ 3266 static void 3267 xfs_bmap_btalloc_accounting( 3268 struct xfs_bmalloca *ap, 3269 struct xfs_alloc_arg *args) 3270 { 3271 if (ap->flags & XFS_BMAPI_COWFORK) { 3272 /* 3273 * COW fork blocks are in-core only and thus are treated as 3274 * in-core quota reservation (like delalloc blocks) even when 3275 * converted to real blocks. The quota reservation is not 3276 * accounted to disk until blocks are remapped to the data 3277 * fork. So if these blocks were previously delalloc, we 3278 * already have quota reservation and there's nothing to do 3279 * yet. 3280 */ 3281 if (ap->wasdel) { 3282 xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len); 3283 return; 3284 } 3285 3286 /* 3287 * Otherwise, we've allocated blocks in a hole. The transaction 3288 * has acquired in-core quota reservation for this extent. 3289 * Rather than account these as real blocks, however, we reduce 3290 * the transaction quota reservation based on the allocation. 3291 * This essentially transfers the transaction quota reservation 3292 * to that of a delalloc extent. 3293 */ 3294 ap->ip->i_delayed_blks += args->len; 3295 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS, 3296 -(long)args->len); 3297 return; 3298 } 3299 3300 /* data/attr fork only */ 3301 ap->ip->i_nblocks += args->len; 3302 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); 3303 if (ap->wasdel) { 3304 ap->ip->i_delayed_blks -= args->len; 3305 xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len); 3306 } 3307 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, 3308 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT, 3309 args->len); 3310 } 3311 3312 static int 3313 xfs_bmap_compute_alignments( 3314 struct xfs_bmalloca *ap, 3315 struct xfs_alloc_arg *args) 3316 { 3317 struct xfs_mount *mp = args->mp; 3318 xfs_extlen_t align = 0; /* minimum allocation alignment */ 3319 int stripe_align = 0; 3320 3321 /* stripe alignment for allocation is determined by mount parameters */ 3322 if (mp->m_swidth && xfs_has_swalloc(mp)) 3323 stripe_align = mp->m_swidth; 3324 else if (mp->m_dalign) 3325 stripe_align = mp->m_dalign; 3326 3327 if (ap->flags & XFS_BMAPI_COWFORK) 3328 align = xfs_get_cowextsz_hint(ap->ip); 3329 else if (ap->datatype & XFS_ALLOC_USERDATA) 3330 align = xfs_get_extsz_hint(ap->ip); 3331 if (align) { 3332 if (xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 0, 3333 ap->eof, 0, ap->conv, &ap->offset, 3334 &ap->length)) 3335 ASSERT(0); 3336 ASSERT(ap->length); 3337 } 3338 3339 /* apply extent size hints if obtained earlier */ 3340 if (align) { 3341 args->prod = align; 3342 div_u64_rem(ap->offset, args->prod, &args->mod); 3343 if (args->mod) 3344 args->mod = args->prod - args->mod; 3345 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) { 3346 args->prod = 1; 3347 args->mod = 0; 3348 } else { 3349 args->prod = PAGE_SIZE >> mp->m_sb.sb_blocklog; 3350 div_u64_rem(ap->offset, args->prod, &args->mod); 3351 if (args->mod) 3352 args->mod = args->prod - args->mod; 3353 } 3354 3355 return stripe_align; 3356 } 3357 3358 static void 3359 xfs_bmap_process_allocated_extent( 3360 struct xfs_bmalloca *ap, 3361 struct xfs_alloc_arg *args, 3362 xfs_fileoff_t orig_offset, 3363 xfs_extlen_t orig_length) 3364 { 3365 ap->blkno = args->fsbno; 3366 ap->length = args->len; 3367 /* 3368 * If the extent size hint is active, we tried to round the 3369 * caller's allocation request offset down to extsz and the 3370 * length up to another extsz boundary. If we found a free 3371 * extent we mapped it in starting at this new offset. If the 3372 * newly mapped space isn't long enough to cover any of the 3373 * range of offsets that was originally requested, move the 3374 * mapping up so that we can fill as much of the caller's 3375 * original request as possible. Free space is apparently 3376 * very fragmented so we're unlikely to be able to satisfy the 3377 * hints anyway. 3378 */ 3379 if (ap->length <= orig_length) 3380 ap->offset = orig_offset; 3381 else if (ap->offset + ap->length < orig_offset + orig_length) 3382 ap->offset = orig_offset + orig_length - ap->length; 3383 xfs_bmap_btalloc_accounting(ap, args); 3384 } 3385 3386 #ifdef DEBUG 3387 static int 3388 xfs_bmap_exact_minlen_extent_alloc( 3389 struct xfs_bmalloca *ap) 3390 { 3391 struct xfs_mount *mp = ap->ip->i_mount; 3392 struct xfs_alloc_arg args = { .tp = ap->tp, .mp = mp }; 3393 xfs_fileoff_t orig_offset; 3394 xfs_extlen_t orig_length; 3395 int error; 3396 3397 ASSERT(ap->length); 3398 3399 if (ap->minlen != 1) { 3400 ap->blkno = NULLFSBLOCK; 3401 ap->length = 0; 3402 return 0; 3403 } 3404 3405 orig_offset = ap->offset; 3406 orig_length = ap->length; 3407 3408 args.alloc_minlen_only = 1; 3409 3410 xfs_bmap_compute_alignments(ap, &args); 3411 3412 /* 3413 * Unlike the longest extent available in an AG, we don't track 3414 * the length of an AG's shortest extent. 3415 * XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT is a debug only knob and 3416 * hence we can afford to start traversing from the 0th AG since 3417 * we need not be concerned about a drop in performance in 3418 * "debug only" code paths. 3419 */ 3420 ap->blkno = XFS_AGB_TO_FSB(mp, 0, 0); 3421 3422 args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE; 3423 args.minlen = args.maxlen = ap->minlen; 3424 args.total = ap->total; 3425 3426 args.alignment = 1; 3427 args.minalignslop = 0; 3428 3429 args.minleft = ap->minleft; 3430 args.wasdel = ap->wasdel; 3431 args.resv = XFS_AG_RESV_NONE; 3432 args.datatype = ap->datatype; 3433 3434 error = xfs_alloc_vextent_first_ag(&args, ap->blkno); 3435 if (error) 3436 return error; 3437 3438 if (args.fsbno != NULLFSBLOCK) { 3439 xfs_bmap_process_allocated_extent(ap, &args, orig_offset, 3440 orig_length); 3441 } else { 3442 ap->blkno = NULLFSBLOCK; 3443 ap->length = 0; 3444 } 3445 3446 return 0; 3447 } 3448 #else 3449 3450 #define xfs_bmap_exact_minlen_extent_alloc(bma) (-EFSCORRUPTED) 3451 3452 #endif 3453 3454 /* 3455 * If we are not low on available data blocks and we are allocating at 3456 * EOF, optimise allocation for contiguous file extension and/or stripe 3457 * alignment of the new extent. 3458 * 3459 * NOTE: ap->aeof is only set if the allocation length is >= the 3460 * stripe unit and the allocation offset is at the end of file. 3461 */ 3462 static int 3463 xfs_bmap_btalloc_at_eof( 3464 struct xfs_bmalloca *ap, 3465 struct xfs_alloc_arg *args, 3466 xfs_extlen_t blen, 3467 int stripe_align, 3468 bool ag_only) 3469 { 3470 struct xfs_mount *mp = args->mp; 3471 struct xfs_perag *caller_pag = args->pag; 3472 int error; 3473 3474 /* 3475 * If there are already extents in the file, try an exact EOF block 3476 * allocation to extend the file as a contiguous extent. If that fails, 3477 * or it's the first allocation in a file, just try for a stripe aligned 3478 * allocation. 3479 */ 3480 if (ap->offset) { 3481 xfs_extlen_t nextminlen = 0; 3482 3483 /* 3484 * Compute the minlen+alignment for the next case. Set slop so 3485 * that the value of minlen+alignment+slop doesn't go up between 3486 * the calls. 3487 */ 3488 args->alignment = 1; 3489 if (blen > stripe_align && blen <= args->maxlen) 3490 nextminlen = blen - stripe_align; 3491 else 3492 nextminlen = args->minlen; 3493 if (nextminlen + stripe_align > args->minlen + 1) 3494 args->minalignslop = nextminlen + stripe_align - 3495 args->minlen - 1; 3496 else 3497 args->minalignslop = 0; 3498 3499 if (!caller_pag) 3500 args->pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, ap->blkno)); 3501 error = xfs_alloc_vextent_exact_bno(args, ap->blkno); 3502 if (!caller_pag) { 3503 xfs_perag_put(args->pag); 3504 args->pag = NULL; 3505 } 3506 if (error) 3507 return error; 3508 3509 if (args->fsbno != NULLFSBLOCK) 3510 return 0; 3511 /* 3512 * Exact allocation failed. Reset to try an aligned allocation 3513 * according to the original allocation specification. 3514 */ 3515 args->alignment = stripe_align; 3516 args->minlen = nextminlen; 3517 args->minalignslop = 0; 3518 } else { 3519 /* 3520 * Adjust minlen to try and preserve alignment if we 3521 * can't guarantee an aligned maxlen extent. 3522 */ 3523 args->alignment = stripe_align; 3524 if (blen > args->alignment && 3525 blen <= args->maxlen + args->alignment) 3526 args->minlen = blen - args->alignment; 3527 args->minalignslop = 0; 3528 } 3529 3530 if (ag_only) { 3531 error = xfs_alloc_vextent_near_bno(args, ap->blkno); 3532 } else { 3533 args->pag = NULL; 3534 error = xfs_alloc_vextent_start_ag(args, ap->blkno); 3535 ASSERT(args->pag == NULL); 3536 args->pag = caller_pag; 3537 } 3538 if (error) 3539 return error; 3540 3541 if (args->fsbno != NULLFSBLOCK) 3542 return 0; 3543 3544 /* 3545 * Allocation failed, so turn return the allocation args to their 3546 * original non-aligned state so the caller can proceed on allocation 3547 * failure as if this function was never called. 3548 */ 3549 args->alignment = 1; 3550 return 0; 3551 } 3552 3553 /* 3554 * We have failed multiple allocation attempts so now are in a low space 3555 * allocation situation. Try a locality first full filesystem minimum length 3556 * allocation whilst still maintaining necessary total block reservation 3557 * requirements. 3558 * 3559 * If that fails, we are now critically low on space, so perform a last resort 3560 * allocation attempt: no reserve, no locality, blocking, minimum length, full 3561 * filesystem free space scan. We also indicate to future allocations in this 3562 * transaction that we are critically low on space so they don't waste time on 3563 * allocation modes that are unlikely to succeed. 3564 */ 3565 int 3566 xfs_bmap_btalloc_low_space( 3567 struct xfs_bmalloca *ap, 3568 struct xfs_alloc_arg *args) 3569 { 3570 int error; 3571 3572 if (args->minlen > ap->minlen) { 3573 args->minlen = ap->minlen; 3574 error = xfs_alloc_vextent_start_ag(args, ap->blkno); 3575 if (error || args->fsbno != NULLFSBLOCK) 3576 return error; 3577 } 3578 3579 /* Last ditch attempt before failure is declared. */ 3580 args->total = ap->minlen; 3581 error = xfs_alloc_vextent_first_ag(args, 0); 3582 if (error) 3583 return error; 3584 ap->tp->t_flags |= XFS_TRANS_LOWMODE; 3585 return 0; 3586 } 3587 3588 static int 3589 xfs_bmap_btalloc_filestreams( 3590 struct xfs_bmalloca *ap, 3591 struct xfs_alloc_arg *args, 3592 int stripe_align) 3593 { 3594 xfs_extlen_t blen = 0; 3595 int error = 0; 3596 3597 3598 error = xfs_filestream_select_ag(ap, args, &blen); 3599 if (error) 3600 return error; 3601 ASSERT(args->pag); 3602 3603 /* 3604 * If we are in low space mode, then optimal allocation will fail so 3605 * prepare for minimal allocation and jump to the low space algorithm 3606 * immediately. 3607 */ 3608 if (ap->tp->t_flags & XFS_TRANS_LOWMODE) { 3609 args->minlen = ap->minlen; 3610 ASSERT(args->fsbno == NULLFSBLOCK); 3611 goto out_low_space; 3612 } 3613 3614 args->minlen = xfs_bmap_select_minlen(ap, args, blen); 3615 if (ap->aeof) 3616 error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align, 3617 true); 3618 3619 if (!error && args->fsbno == NULLFSBLOCK) 3620 error = xfs_alloc_vextent_near_bno(args, ap->blkno); 3621 3622 out_low_space: 3623 /* 3624 * We are now done with the perag reference for the filestreams 3625 * association provided by xfs_filestream_select_ag(). Release it now as 3626 * we've either succeeded, had a fatal error or we are out of space and 3627 * need to do a full filesystem scan for free space which will take it's 3628 * own references. 3629 */ 3630 xfs_perag_rele(args->pag); 3631 args->pag = NULL; 3632 if (error || args->fsbno != NULLFSBLOCK) 3633 return error; 3634 3635 return xfs_bmap_btalloc_low_space(ap, args); 3636 } 3637 3638 static int 3639 xfs_bmap_btalloc_best_length( 3640 struct xfs_bmalloca *ap, 3641 struct xfs_alloc_arg *args, 3642 int stripe_align) 3643 { 3644 xfs_extlen_t blen = 0; 3645 int error; 3646 3647 ap->blkno = XFS_INO_TO_FSB(args->mp, ap->ip->i_ino); 3648 xfs_bmap_adjacent(ap); 3649 3650 /* 3651 * Search for an allocation group with a single extent large enough for 3652 * the request. If one isn't found, then adjust the minimum allocation 3653 * size to the largest space found. 3654 */ 3655 error = xfs_bmap_btalloc_select_lengths(ap, args, &blen); 3656 if (error) 3657 return error; 3658 3659 /* 3660 * Don't attempt optimal EOF allocation if previous allocations barely 3661 * succeeded due to being near ENOSPC. It is highly unlikely we'll get 3662 * optimal or even aligned allocations in this case, so don't waste time 3663 * trying. 3664 */ 3665 if (ap->aeof && !(ap->tp->t_flags & XFS_TRANS_LOWMODE)) { 3666 error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align, 3667 false); 3668 if (error || args->fsbno != NULLFSBLOCK) 3669 return error; 3670 } 3671 3672 error = xfs_alloc_vextent_start_ag(args, ap->blkno); 3673 if (error || args->fsbno != NULLFSBLOCK) 3674 return error; 3675 3676 return xfs_bmap_btalloc_low_space(ap, args); 3677 } 3678 3679 static int 3680 xfs_bmap_btalloc( 3681 struct xfs_bmalloca *ap) 3682 { 3683 struct xfs_mount *mp = ap->ip->i_mount; 3684 struct xfs_alloc_arg args = { 3685 .tp = ap->tp, 3686 .mp = mp, 3687 .fsbno = NULLFSBLOCK, 3688 .oinfo = XFS_RMAP_OINFO_SKIP_UPDATE, 3689 .minleft = ap->minleft, 3690 .wasdel = ap->wasdel, 3691 .resv = XFS_AG_RESV_NONE, 3692 .datatype = ap->datatype, 3693 .alignment = 1, 3694 .minalignslop = 0, 3695 }; 3696 xfs_fileoff_t orig_offset; 3697 xfs_extlen_t orig_length; 3698 int error; 3699 int stripe_align; 3700 3701 ASSERT(ap->length); 3702 orig_offset = ap->offset; 3703 orig_length = ap->length; 3704 3705 stripe_align = xfs_bmap_compute_alignments(ap, &args); 3706 3707 /* Trim the allocation back to the maximum an AG can fit. */ 3708 args.maxlen = min(ap->length, mp->m_ag_max_usable); 3709 3710 if ((ap->datatype & XFS_ALLOC_USERDATA) && 3711 xfs_inode_is_filestream(ap->ip)) 3712 error = xfs_bmap_btalloc_filestreams(ap, &args, stripe_align); 3713 else 3714 error = xfs_bmap_btalloc_best_length(ap, &args, stripe_align); 3715 if (error) 3716 return error; 3717 3718 if (args.fsbno != NULLFSBLOCK) { 3719 xfs_bmap_process_allocated_extent(ap, &args, orig_offset, 3720 orig_length); 3721 } else { 3722 ap->blkno = NULLFSBLOCK; 3723 ap->length = 0; 3724 } 3725 return 0; 3726 } 3727 3728 /* Trim extent to fit a logical block range. */ 3729 void 3730 xfs_trim_extent( 3731 struct xfs_bmbt_irec *irec, 3732 xfs_fileoff_t bno, 3733 xfs_filblks_t len) 3734 { 3735 xfs_fileoff_t distance; 3736 xfs_fileoff_t end = bno + len; 3737 3738 if (irec->br_startoff + irec->br_blockcount <= bno || 3739 irec->br_startoff >= end) { 3740 irec->br_blockcount = 0; 3741 return; 3742 } 3743 3744 if (irec->br_startoff < bno) { 3745 distance = bno - irec->br_startoff; 3746 if (isnullstartblock(irec->br_startblock)) 3747 irec->br_startblock = DELAYSTARTBLOCK; 3748 if (irec->br_startblock != DELAYSTARTBLOCK && 3749 irec->br_startblock != HOLESTARTBLOCK) 3750 irec->br_startblock += distance; 3751 irec->br_startoff += distance; 3752 irec->br_blockcount -= distance; 3753 } 3754 3755 if (end < irec->br_startoff + irec->br_blockcount) { 3756 distance = irec->br_startoff + irec->br_blockcount - end; 3757 irec->br_blockcount -= distance; 3758 } 3759 } 3760 3761 /* 3762 * Trim the returned map to the required bounds 3763 */ 3764 STATIC void 3765 xfs_bmapi_trim_map( 3766 struct xfs_bmbt_irec *mval, 3767 struct xfs_bmbt_irec *got, 3768 xfs_fileoff_t *bno, 3769 xfs_filblks_t len, 3770 xfs_fileoff_t obno, 3771 xfs_fileoff_t end, 3772 int n, 3773 uint32_t flags) 3774 { 3775 if ((flags & XFS_BMAPI_ENTIRE) || 3776 got->br_startoff + got->br_blockcount <= obno) { 3777 *mval = *got; 3778 if (isnullstartblock(got->br_startblock)) 3779 mval->br_startblock = DELAYSTARTBLOCK; 3780 return; 3781 } 3782 3783 if (obno > *bno) 3784 *bno = obno; 3785 ASSERT((*bno >= obno) || (n == 0)); 3786 ASSERT(*bno < end); 3787 mval->br_startoff = *bno; 3788 if (isnullstartblock(got->br_startblock)) 3789 mval->br_startblock = DELAYSTARTBLOCK; 3790 else 3791 mval->br_startblock = got->br_startblock + 3792 (*bno - got->br_startoff); 3793 /* 3794 * Return the minimum of what we got and what we asked for for 3795 * the length. We can use the len variable here because it is 3796 * modified below and we could have been there before coming 3797 * here if the first part of the allocation didn't overlap what 3798 * was asked for. 3799 */ 3800 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno, 3801 got->br_blockcount - (*bno - got->br_startoff)); 3802 mval->br_state = got->br_state; 3803 ASSERT(mval->br_blockcount <= len); 3804 return; 3805 } 3806 3807 /* 3808 * Update and validate the extent map to return 3809 */ 3810 STATIC void 3811 xfs_bmapi_update_map( 3812 struct xfs_bmbt_irec **map, 3813 xfs_fileoff_t *bno, 3814 xfs_filblks_t *len, 3815 xfs_fileoff_t obno, 3816 xfs_fileoff_t end, 3817 int *n, 3818 uint32_t flags) 3819 { 3820 xfs_bmbt_irec_t *mval = *map; 3821 3822 ASSERT((flags & XFS_BMAPI_ENTIRE) || 3823 ((mval->br_startoff + mval->br_blockcount) <= end)); 3824 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) || 3825 (mval->br_startoff < obno)); 3826 3827 *bno = mval->br_startoff + mval->br_blockcount; 3828 *len = end - *bno; 3829 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) { 3830 /* update previous map with new information */ 3831 ASSERT(mval->br_startblock == mval[-1].br_startblock); 3832 ASSERT(mval->br_blockcount > mval[-1].br_blockcount); 3833 ASSERT(mval->br_state == mval[-1].br_state); 3834 mval[-1].br_blockcount = mval->br_blockcount; 3835 mval[-1].br_state = mval->br_state; 3836 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK && 3837 mval[-1].br_startblock != DELAYSTARTBLOCK && 3838 mval[-1].br_startblock != HOLESTARTBLOCK && 3839 mval->br_startblock == mval[-1].br_startblock + 3840 mval[-1].br_blockcount && 3841 mval[-1].br_state == mval->br_state) { 3842 ASSERT(mval->br_startoff == 3843 mval[-1].br_startoff + mval[-1].br_blockcount); 3844 mval[-1].br_blockcount += mval->br_blockcount; 3845 } else if (*n > 0 && 3846 mval->br_startblock == DELAYSTARTBLOCK && 3847 mval[-1].br_startblock == DELAYSTARTBLOCK && 3848 mval->br_startoff == 3849 mval[-1].br_startoff + mval[-1].br_blockcount) { 3850 mval[-1].br_blockcount += mval->br_blockcount; 3851 mval[-1].br_state = mval->br_state; 3852 } else if (!((*n == 0) && 3853 ((mval->br_startoff + mval->br_blockcount) <= 3854 obno))) { 3855 mval++; 3856 (*n)++; 3857 } 3858 *map = mval; 3859 } 3860 3861 /* 3862 * Map file blocks to filesystem blocks without allocation. 3863 */ 3864 int 3865 xfs_bmapi_read( 3866 struct xfs_inode *ip, 3867 xfs_fileoff_t bno, 3868 xfs_filblks_t len, 3869 struct xfs_bmbt_irec *mval, 3870 int *nmap, 3871 uint32_t flags) 3872 { 3873 struct xfs_mount *mp = ip->i_mount; 3874 int whichfork = xfs_bmapi_whichfork(flags); 3875 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 3876 struct xfs_bmbt_irec got; 3877 xfs_fileoff_t obno; 3878 xfs_fileoff_t end; 3879 struct xfs_iext_cursor icur; 3880 int error; 3881 bool eof = false; 3882 int n = 0; 3883 3884 ASSERT(*nmap >= 1); 3885 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_ENTIRE))); 3886 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)); 3887 3888 if (WARN_ON_ONCE(!ifp)) 3889 return -EFSCORRUPTED; 3890 3891 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 3892 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) 3893 return -EFSCORRUPTED; 3894 3895 if (xfs_is_shutdown(mp)) 3896 return -EIO; 3897 3898 XFS_STATS_INC(mp, xs_blk_mapr); 3899 3900 error = xfs_iread_extents(NULL, ip, whichfork); 3901 if (error) 3902 return error; 3903 3904 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) 3905 eof = true; 3906 end = bno + len; 3907 obno = bno; 3908 3909 while (bno < end && n < *nmap) { 3910 /* Reading past eof, act as though there's a hole up to end. */ 3911 if (eof) 3912 got.br_startoff = end; 3913 if (got.br_startoff > bno) { 3914 /* Reading in a hole. */ 3915 mval->br_startoff = bno; 3916 mval->br_startblock = HOLESTARTBLOCK; 3917 mval->br_blockcount = 3918 XFS_FILBLKS_MIN(len, got.br_startoff - bno); 3919 mval->br_state = XFS_EXT_NORM; 3920 bno += mval->br_blockcount; 3921 len -= mval->br_blockcount; 3922 mval++; 3923 n++; 3924 continue; 3925 } 3926 3927 /* set up the extent map to return. */ 3928 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags); 3929 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 3930 3931 /* If we're done, stop now. */ 3932 if (bno >= end || n >= *nmap) 3933 break; 3934 3935 /* Else go on to the next record. */ 3936 if (!xfs_iext_next_extent(ifp, &icur, &got)) 3937 eof = true; 3938 } 3939 *nmap = n; 3940 return 0; 3941 } 3942 3943 /* 3944 * Add a delayed allocation extent to an inode. Blocks are reserved from the 3945 * global pool and the extent inserted into the inode in-core extent tree. 3946 * 3947 * On entry, got refers to the first extent beyond the offset of the extent to 3948 * allocate or eof is specified if no such extent exists. On return, got refers 3949 * to the extent record that was inserted to the inode fork. 3950 * 3951 * Note that the allocated extent may have been merged with contiguous extents 3952 * during insertion into the inode fork. Thus, got does not reflect the current 3953 * state of the inode fork on return. If necessary, the caller can use lastx to 3954 * look up the updated record in the inode fork. 3955 */ 3956 int 3957 xfs_bmapi_reserve_delalloc( 3958 struct xfs_inode *ip, 3959 int whichfork, 3960 xfs_fileoff_t off, 3961 xfs_filblks_t len, 3962 xfs_filblks_t prealloc, 3963 struct xfs_bmbt_irec *got, 3964 struct xfs_iext_cursor *icur, 3965 int eof) 3966 { 3967 struct xfs_mount *mp = ip->i_mount; 3968 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 3969 xfs_extlen_t alen; 3970 xfs_extlen_t indlen; 3971 int error; 3972 xfs_fileoff_t aoff = off; 3973 3974 /* 3975 * Cap the alloc length. Keep track of prealloc so we know whether to 3976 * tag the inode before we return. 3977 */ 3978 alen = XFS_FILBLKS_MIN(len + prealloc, XFS_MAX_BMBT_EXTLEN); 3979 if (!eof) 3980 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff); 3981 if (prealloc && alen >= len) 3982 prealloc = alen - len; 3983 3984 /* Figure out the extent size, adjust alen */ 3985 if (whichfork == XFS_COW_FORK) { 3986 struct xfs_bmbt_irec prev; 3987 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip); 3988 3989 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev)) 3990 prev.br_startoff = NULLFILEOFF; 3991 3992 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof, 3993 1, 0, &aoff, &alen); 3994 ASSERT(!error); 3995 } 3996 3997 /* 3998 * Make a transaction-less quota reservation for delayed allocation 3999 * blocks. This number gets adjusted later. We return if we haven't 4000 * allocated blocks already inside this loop. 4001 */ 4002 error = xfs_quota_reserve_blkres(ip, alen); 4003 if (error) 4004 return error; 4005 4006 /* 4007 * Split changing sb for alen and indlen since they could be coming 4008 * from different places. 4009 */ 4010 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); 4011 ASSERT(indlen > 0); 4012 4013 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false); 4014 if (error) 4015 goto out_unreserve_quota; 4016 4017 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false); 4018 if (error) 4019 goto out_unreserve_blocks; 4020 4021 4022 ip->i_delayed_blks += alen; 4023 xfs_mod_delalloc(ip->i_mount, alen + indlen); 4024 4025 got->br_startoff = aoff; 4026 got->br_startblock = nullstartblock(indlen); 4027 got->br_blockcount = alen; 4028 got->br_state = XFS_EXT_NORM; 4029 4030 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got); 4031 4032 /* 4033 * Tag the inode if blocks were preallocated. Note that COW fork 4034 * preallocation can occur at the start or end of the extent, even when 4035 * prealloc == 0, so we must also check the aligned offset and length. 4036 */ 4037 if (whichfork == XFS_DATA_FORK && prealloc) 4038 xfs_inode_set_eofblocks_tag(ip); 4039 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len)) 4040 xfs_inode_set_cowblocks_tag(ip); 4041 4042 return 0; 4043 4044 out_unreserve_blocks: 4045 xfs_mod_fdblocks(mp, alen, false); 4046 out_unreserve_quota: 4047 if (XFS_IS_QUOTA_ON(mp)) 4048 xfs_quota_unreserve_blkres(ip, alen); 4049 return error; 4050 } 4051 4052 static int 4053 xfs_bmap_alloc_userdata( 4054 struct xfs_bmalloca *bma) 4055 { 4056 struct xfs_mount *mp = bma->ip->i_mount; 4057 int whichfork = xfs_bmapi_whichfork(bma->flags); 4058 int error; 4059 4060 /* 4061 * Set the data type being allocated. For the data fork, the first data 4062 * in the file is treated differently to all other allocations. For the 4063 * attribute fork, we only need to ensure the allocated range is not on 4064 * the busy list. 4065 */ 4066 bma->datatype = XFS_ALLOC_NOBUSY; 4067 if (whichfork == XFS_DATA_FORK || whichfork == XFS_COW_FORK) { 4068 bma->datatype |= XFS_ALLOC_USERDATA; 4069 if (bma->offset == 0) 4070 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA; 4071 4072 if (mp->m_dalign && bma->length >= mp->m_dalign) { 4073 error = xfs_bmap_isaeof(bma, whichfork); 4074 if (error) 4075 return error; 4076 } 4077 4078 if (XFS_IS_REALTIME_INODE(bma->ip)) 4079 return xfs_bmap_rtalloc(bma); 4080 } 4081 4082 if (unlikely(XFS_TEST_ERROR(false, mp, 4083 XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT))) 4084 return xfs_bmap_exact_minlen_extent_alloc(bma); 4085 4086 return xfs_bmap_btalloc(bma); 4087 } 4088 4089 static int 4090 xfs_bmapi_allocate( 4091 struct xfs_bmalloca *bma) 4092 { 4093 struct xfs_mount *mp = bma->ip->i_mount; 4094 int whichfork = xfs_bmapi_whichfork(bma->flags); 4095 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork); 4096 int tmp_logflags = 0; 4097 int error; 4098 4099 ASSERT(bma->length > 0); 4100 4101 /* 4102 * For the wasdelay case, we could also just allocate the stuff asked 4103 * for in this bmap call but that wouldn't be as good. 4104 */ 4105 if (bma->wasdel) { 4106 bma->length = (xfs_extlen_t)bma->got.br_blockcount; 4107 bma->offset = bma->got.br_startoff; 4108 if (!xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev)) 4109 bma->prev.br_startoff = NULLFILEOFF; 4110 } else { 4111 bma->length = XFS_FILBLKS_MIN(bma->length, XFS_MAX_BMBT_EXTLEN); 4112 if (!bma->eof) 4113 bma->length = XFS_FILBLKS_MIN(bma->length, 4114 bma->got.br_startoff - bma->offset); 4115 } 4116 4117 if (bma->flags & XFS_BMAPI_CONTIG) 4118 bma->minlen = bma->length; 4119 else 4120 bma->minlen = 1; 4121 4122 if (bma->flags & XFS_BMAPI_METADATA) { 4123 if (unlikely(XFS_TEST_ERROR(false, mp, 4124 XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT))) 4125 error = xfs_bmap_exact_minlen_extent_alloc(bma); 4126 else 4127 error = xfs_bmap_btalloc(bma); 4128 } else { 4129 error = xfs_bmap_alloc_userdata(bma); 4130 } 4131 if (error || bma->blkno == NULLFSBLOCK) 4132 return error; 4133 4134 if (bma->flags & XFS_BMAPI_ZERO) { 4135 error = xfs_zero_extent(bma->ip, bma->blkno, bma->length); 4136 if (error) 4137 return error; 4138 } 4139 4140 if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) 4141 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork); 4142 /* 4143 * Bump the number of extents we've allocated 4144 * in this call. 4145 */ 4146 bma->nallocs++; 4147 4148 if (bma->cur) 4149 bma->cur->bc_ino.flags = 4150 bma->wasdel ? XFS_BTCUR_BMBT_WASDEL : 0; 4151 4152 bma->got.br_startoff = bma->offset; 4153 bma->got.br_startblock = bma->blkno; 4154 bma->got.br_blockcount = bma->length; 4155 bma->got.br_state = XFS_EXT_NORM; 4156 4157 if (bma->flags & XFS_BMAPI_PREALLOC) 4158 bma->got.br_state = XFS_EXT_UNWRITTEN; 4159 4160 if (bma->wasdel) 4161 error = xfs_bmap_add_extent_delay_real(bma, whichfork); 4162 else 4163 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip, 4164 whichfork, &bma->icur, &bma->cur, &bma->got, 4165 &bma->logflags, bma->flags); 4166 4167 bma->logflags |= tmp_logflags; 4168 if (error) 4169 return error; 4170 4171 /* 4172 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real 4173 * or xfs_bmap_add_extent_hole_real might have merged it into one of 4174 * the neighbouring ones. 4175 */ 4176 xfs_iext_get_extent(ifp, &bma->icur, &bma->got); 4177 4178 ASSERT(bma->got.br_startoff <= bma->offset); 4179 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >= 4180 bma->offset + bma->length); 4181 ASSERT(bma->got.br_state == XFS_EXT_NORM || 4182 bma->got.br_state == XFS_EXT_UNWRITTEN); 4183 return 0; 4184 } 4185 4186 STATIC int 4187 xfs_bmapi_convert_unwritten( 4188 struct xfs_bmalloca *bma, 4189 struct xfs_bmbt_irec *mval, 4190 xfs_filblks_t len, 4191 uint32_t flags) 4192 { 4193 int whichfork = xfs_bmapi_whichfork(flags); 4194 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork); 4195 int tmp_logflags = 0; 4196 int error; 4197 4198 /* check if we need to do unwritten->real conversion */ 4199 if (mval->br_state == XFS_EXT_UNWRITTEN && 4200 (flags & XFS_BMAPI_PREALLOC)) 4201 return 0; 4202 4203 /* check if we need to do real->unwritten conversion */ 4204 if (mval->br_state == XFS_EXT_NORM && 4205 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) != 4206 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) 4207 return 0; 4208 4209 /* 4210 * Modify (by adding) the state flag, if writing. 4211 */ 4212 ASSERT(mval->br_blockcount <= len); 4213 if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) { 4214 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp, 4215 bma->ip, whichfork); 4216 } 4217 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) 4218 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN; 4219 4220 /* 4221 * Before insertion into the bmbt, zero the range being converted 4222 * if required. 4223 */ 4224 if (flags & XFS_BMAPI_ZERO) { 4225 error = xfs_zero_extent(bma->ip, mval->br_startblock, 4226 mval->br_blockcount); 4227 if (error) 4228 return error; 4229 } 4230 4231 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork, 4232 &bma->icur, &bma->cur, mval, &tmp_logflags); 4233 /* 4234 * Log the inode core unconditionally in the unwritten extent conversion 4235 * path because the conversion might not have done so (e.g., if the 4236 * extent count hasn't changed). We need to make sure the inode is dirty 4237 * in the transaction for the sake of fsync(), even if nothing has 4238 * changed, because fsync() will not force the log for this transaction 4239 * unless it sees the inode pinned. 4240 * 4241 * Note: If we're only converting cow fork extents, there aren't 4242 * any on-disk updates to make, so we don't need to log anything. 4243 */ 4244 if (whichfork != XFS_COW_FORK) 4245 bma->logflags |= tmp_logflags | XFS_ILOG_CORE; 4246 if (error) 4247 return error; 4248 4249 /* 4250 * Update our extent pointer, given that 4251 * xfs_bmap_add_extent_unwritten_real might have merged it into one 4252 * of the neighbouring ones. 4253 */ 4254 xfs_iext_get_extent(ifp, &bma->icur, &bma->got); 4255 4256 /* 4257 * We may have combined previously unwritten space with written space, 4258 * so generate another request. 4259 */ 4260 if (mval->br_blockcount < len) 4261 return -EAGAIN; 4262 return 0; 4263 } 4264 4265 xfs_extlen_t 4266 xfs_bmapi_minleft( 4267 struct xfs_trans *tp, 4268 struct xfs_inode *ip, 4269 int fork) 4270 { 4271 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, fork); 4272 4273 if (tp && tp->t_highest_agno != NULLAGNUMBER) 4274 return 0; 4275 if (ifp->if_format != XFS_DINODE_FMT_BTREE) 4276 return 1; 4277 return be16_to_cpu(ifp->if_broot->bb_level) + 1; 4278 } 4279 4280 /* 4281 * Log whatever the flags say, even if error. Otherwise we might miss detecting 4282 * a case where the data is changed, there's an error, and it's not logged so we 4283 * don't shutdown when we should. Don't bother logging extents/btree changes if 4284 * we converted to the other format. 4285 */ 4286 static void 4287 xfs_bmapi_finish( 4288 struct xfs_bmalloca *bma, 4289 int whichfork, 4290 int error) 4291 { 4292 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork); 4293 4294 if ((bma->logflags & xfs_ilog_fext(whichfork)) && 4295 ifp->if_format != XFS_DINODE_FMT_EXTENTS) 4296 bma->logflags &= ~xfs_ilog_fext(whichfork); 4297 else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) && 4298 ifp->if_format != XFS_DINODE_FMT_BTREE) 4299 bma->logflags &= ~xfs_ilog_fbroot(whichfork); 4300 4301 if (bma->logflags) 4302 xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags); 4303 if (bma->cur) 4304 xfs_btree_del_cursor(bma->cur, error); 4305 } 4306 4307 /* 4308 * Map file blocks to filesystem blocks, and allocate blocks or convert the 4309 * extent state if necessary. Details behaviour is controlled by the flags 4310 * parameter. Only allocates blocks from a single allocation group, to avoid 4311 * locking problems. 4312 */ 4313 int 4314 xfs_bmapi_write( 4315 struct xfs_trans *tp, /* transaction pointer */ 4316 struct xfs_inode *ip, /* incore inode */ 4317 xfs_fileoff_t bno, /* starting file offs. mapped */ 4318 xfs_filblks_t len, /* length to map in file */ 4319 uint32_t flags, /* XFS_BMAPI_... */ 4320 xfs_extlen_t total, /* total blocks needed */ 4321 struct xfs_bmbt_irec *mval, /* output: map values */ 4322 int *nmap) /* i/o: mval size/count */ 4323 { 4324 struct xfs_bmalloca bma = { 4325 .tp = tp, 4326 .ip = ip, 4327 .total = total, 4328 }; 4329 struct xfs_mount *mp = ip->i_mount; 4330 int whichfork = xfs_bmapi_whichfork(flags); 4331 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 4332 xfs_fileoff_t end; /* end of mapped file region */ 4333 bool eof = false; /* after the end of extents */ 4334 int error; /* error return */ 4335 int n; /* current extent index */ 4336 xfs_fileoff_t obno; /* old block number (offset) */ 4337 4338 #ifdef DEBUG 4339 xfs_fileoff_t orig_bno; /* original block number value */ 4340 int orig_flags; /* original flags arg value */ 4341 xfs_filblks_t orig_len; /* original value of len arg */ 4342 struct xfs_bmbt_irec *orig_mval; /* original value of mval */ 4343 int orig_nmap; /* original value of *nmap */ 4344 4345 orig_bno = bno; 4346 orig_len = len; 4347 orig_flags = flags; 4348 orig_mval = mval; 4349 orig_nmap = *nmap; 4350 #endif 4351 4352 ASSERT(*nmap >= 1); 4353 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); 4354 ASSERT(tp != NULL); 4355 ASSERT(len > 0); 4356 ASSERT(ifp->if_format != XFS_DINODE_FMT_LOCAL); 4357 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4358 ASSERT(!(flags & XFS_BMAPI_REMAP)); 4359 4360 /* zeroing is for currently only for data extents, not metadata */ 4361 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) != 4362 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)); 4363 /* 4364 * we can allocate unwritten extents or pre-zero allocated blocks, 4365 * but it makes no sense to do both at once. This would result in 4366 * zeroing the unwritten extent twice, but it still being an 4367 * unwritten extent.... 4368 */ 4369 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) != 4370 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)); 4371 4372 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 4373 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 4374 return -EFSCORRUPTED; 4375 } 4376 4377 if (xfs_is_shutdown(mp)) 4378 return -EIO; 4379 4380 XFS_STATS_INC(mp, xs_blk_mapw); 4381 4382 error = xfs_iread_extents(tp, ip, whichfork); 4383 if (error) 4384 goto error0; 4385 4386 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got)) 4387 eof = true; 4388 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) 4389 bma.prev.br_startoff = NULLFILEOFF; 4390 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); 4391 4392 n = 0; 4393 end = bno + len; 4394 obno = bno; 4395 while (bno < end && n < *nmap) { 4396 bool need_alloc = false, wasdelay = false; 4397 4398 /* in hole or beyond EOF? */ 4399 if (eof || bma.got.br_startoff > bno) { 4400 /* 4401 * CoW fork conversions should /never/ hit EOF or 4402 * holes. There should always be something for us 4403 * to work on. 4404 */ 4405 ASSERT(!((flags & XFS_BMAPI_CONVERT) && 4406 (flags & XFS_BMAPI_COWFORK))); 4407 4408 need_alloc = true; 4409 } else if (isnullstartblock(bma.got.br_startblock)) { 4410 wasdelay = true; 4411 } 4412 4413 /* 4414 * First, deal with the hole before the allocated space 4415 * that we found, if any. 4416 */ 4417 if (need_alloc || wasdelay) { 4418 bma.eof = eof; 4419 bma.conv = !!(flags & XFS_BMAPI_CONVERT); 4420 bma.wasdel = wasdelay; 4421 bma.offset = bno; 4422 bma.flags = flags; 4423 4424 /* 4425 * There's a 32/64 bit type mismatch between the 4426 * allocation length request (which can be 64 bits in 4427 * length) and the bma length request, which is 4428 * xfs_extlen_t and therefore 32 bits. Hence we have to 4429 * check for 32-bit overflows and handle them here. 4430 */ 4431 if (len > (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN) 4432 bma.length = XFS_MAX_BMBT_EXTLEN; 4433 else 4434 bma.length = len; 4435 4436 ASSERT(len > 0); 4437 ASSERT(bma.length > 0); 4438 error = xfs_bmapi_allocate(&bma); 4439 if (error) 4440 goto error0; 4441 if (bma.blkno == NULLFSBLOCK) 4442 break; 4443 4444 /* 4445 * If this is a CoW allocation, record the data in 4446 * the refcount btree for orphan recovery. 4447 */ 4448 if (whichfork == XFS_COW_FORK) 4449 xfs_refcount_alloc_cow_extent(tp, bma.blkno, 4450 bma.length); 4451 } 4452 4453 /* Deal with the allocated space we found. */ 4454 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno, 4455 end, n, flags); 4456 4457 /* Execute unwritten extent conversion if necessary */ 4458 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags); 4459 if (error == -EAGAIN) 4460 continue; 4461 if (error) 4462 goto error0; 4463 4464 /* update the extent map to return */ 4465 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 4466 4467 /* 4468 * If we're done, stop now. Stop when we've allocated 4469 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise 4470 * the transaction may get too big. 4471 */ 4472 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap) 4473 break; 4474 4475 /* Else go on to the next record. */ 4476 bma.prev = bma.got; 4477 if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got)) 4478 eof = true; 4479 } 4480 *nmap = n; 4481 4482 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags, 4483 whichfork); 4484 if (error) 4485 goto error0; 4486 4487 ASSERT(ifp->if_format != XFS_DINODE_FMT_BTREE || 4488 ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork)); 4489 xfs_bmapi_finish(&bma, whichfork, 0); 4490 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval, 4491 orig_nmap, *nmap); 4492 return 0; 4493 error0: 4494 xfs_bmapi_finish(&bma, whichfork, error); 4495 return error; 4496 } 4497 4498 /* 4499 * Convert an existing delalloc extent to real blocks based on file offset. This 4500 * attempts to allocate the entire delalloc extent and may require multiple 4501 * invocations to allocate the target offset if a large enough physical extent 4502 * is not available. 4503 */ 4504 int 4505 xfs_bmapi_convert_delalloc( 4506 struct xfs_inode *ip, 4507 int whichfork, 4508 xfs_off_t offset, 4509 struct iomap *iomap, 4510 unsigned int *seq) 4511 { 4512 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 4513 struct xfs_mount *mp = ip->i_mount; 4514 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 4515 struct xfs_bmalloca bma = { NULL }; 4516 uint16_t flags = 0; 4517 struct xfs_trans *tp; 4518 int error; 4519 4520 if (whichfork == XFS_COW_FORK) 4521 flags |= IOMAP_F_SHARED; 4522 4523 /* 4524 * Space for the extent and indirect blocks was reserved when the 4525 * delalloc extent was created so there's no need to do so here. 4526 */ 4527 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 4528 XFS_TRANS_RESERVE, &tp); 4529 if (error) 4530 return error; 4531 4532 xfs_ilock(ip, XFS_ILOCK_EXCL); 4533 xfs_trans_ijoin(tp, ip, 0); 4534 4535 error = xfs_iext_count_may_overflow(ip, whichfork, 4536 XFS_IEXT_ADD_NOSPLIT_CNT); 4537 if (error == -EFBIG) 4538 error = xfs_iext_count_upgrade(tp, ip, 4539 XFS_IEXT_ADD_NOSPLIT_CNT); 4540 if (error) 4541 goto out_trans_cancel; 4542 4543 if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) || 4544 bma.got.br_startoff > offset_fsb) { 4545 /* 4546 * No extent found in the range we are trying to convert. This 4547 * should only happen for the COW fork, where another thread 4548 * might have moved the extent to the data fork in the meantime. 4549 */ 4550 WARN_ON_ONCE(whichfork != XFS_COW_FORK); 4551 error = -EAGAIN; 4552 goto out_trans_cancel; 4553 } 4554 4555 /* 4556 * If we find a real extent here we raced with another thread converting 4557 * the extent. Just return the real extent at this offset. 4558 */ 4559 if (!isnullstartblock(bma.got.br_startblock)) { 4560 xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags, 4561 xfs_iomap_inode_sequence(ip, flags)); 4562 *seq = READ_ONCE(ifp->if_seq); 4563 goto out_trans_cancel; 4564 } 4565 4566 bma.tp = tp; 4567 bma.ip = ip; 4568 bma.wasdel = true; 4569 bma.offset = bma.got.br_startoff; 4570 bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount, 4571 XFS_MAX_BMBT_EXTLEN); 4572 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); 4573 4574 /* 4575 * When we're converting the delalloc reservations backing dirty pages 4576 * in the page cache, we must be careful about how we create the new 4577 * extents: 4578 * 4579 * New CoW fork extents are created unwritten, turned into real extents 4580 * when we're about to write the data to disk, and mapped into the data 4581 * fork after the write finishes. End of story. 4582 * 4583 * New data fork extents must be mapped in as unwritten and converted 4584 * to real extents after the write succeeds to avoid exposing stale 4585 * disk contents if we crash. 4586 */ 4587 bma.flags = XFS_BMAPI_PREALLOC; 4588 if (whichfork == XFS_COW_FORK) 4589 bma.flags |= XFS_BMAPI_COWFORK; 4590 4591 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) 4592 bma.prev.br_startoff = NULLFILEOFF; 4593 4594 error = xfs_bmapi_allocate(&bma); 4595 if (error) 4596 goto out_finish; 4597 4598 error = -ENOSPC; 4599 if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK)) 4600 goto out_finish; 4601 error = -EFSCORRUPTED; 4602 if (WARN_ON_ONCE(!xfs_valid_startblock(ip, bma.got.br_startblock))) 4603 goto out_finish; 4604 4605 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length)); 4606 XFS_STATS_INC(mp, xs_xstrat_quick); 4607 4608 ASSERT(!isnullstartblock(bma.got.br_startblock)); 4609 xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags, 4610 xfs_iomap_inode_sequence(ip, flags)); 4611 *seq = READ_ONCE(ifp->if_seq); 4612 4613 if (whichfork == XFS_COW_FORK) 4614 xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length); 4615 4616 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags, 4617 whichfork); 4618 if (error) 4619 goto out_finish; 4620 4621 xfs_bmapi_finish(&bma, whichfork, 0); 4622 error = xfs_trans_commit(tp); 4623 xfs_iunlock(ip, XFS_ILOCK_EXCL); 4624 return error; 4625 4626 out_finish: 4627 xfs_bmapi_finish(&bma, whichfork, error); 4628 out_trans_cancel: 4629 xfs_trans_cancel(tp); 4630 xfs_iunlock(ip, XFS_ILOCK_EXCL); 4631 return error; 4632 } 4633 4634 int 4635 xfs_bmapi_remap( 4636 struct xfs_trans *tp, 4637 struct xfs_inode *ip, 4638 xfs_fileoff_t bno, 4639 xfs_filblks_t len, 4640 xfs_fsblock_t startblock, 4641 uint32_t flags) 4642 { 4643 struct xfs_mount *mp = ip->i_mount; 4644 struct xfs_ifork *ifp; 4645 struct xfs_btree_cur *cur = NULL; 4646 struct xfs_bmbt_irec got; 4647 struct xfs_iext_cursor icur; 4648 int whichfork = xfs_bmapi_whichfork(flags); 4649 int logflags = 0, error; 4650 4651 ifp = xfs_ifork_ptr(ip, whichfork); 4652 ASSERT(len > 0); 4653 ASSERT(len <= (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN); 4654 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4655 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC | 4656 XFS_BMAPI_NORMAP))); 4657 ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) != 4658 (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)); 4659 4660 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 4661 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 4662 return -EFSCORRUPTED; 4663 } 4664 4665 if (xfs_is_shutdown(mp)) 4666 return -EIO; 4667 4668 error = xfs_iread_extents(tp, ip, whichfork); 4669 if (error) 4670 return error; 4671 4672 if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) { 4673 /* make sure we only reflink into a hole. */ 4674 ASSERT(got.br_startoff > bno); 4675 ASSERT(got.br_startoff - bno >= len); 4676 } 4677 4678 ip->i_nblocks += len; 4679 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 4680 4681 if (ifp->if_format == XFS_DINODE_FMT_BTREE) { 4682 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 4683 cur->bc_ino.flags = 0; 4684 } 4685 4686 got.br_startoff = bno; 4687 got.br_startblock = startblock; 4688 got.br_blockcount = len; 4689 if (flags & XFS_BMAPI_PREALLOC) 4690 got.br_state = XFS_EXT_UNWRITTEN; 4691 else 4692 got.br_state = XFS_EXT_NORM; 4693 4694 error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur, 4695 &cur, &got, &logflags, flags); 4696 if (error) 4697 goto error0; 4698 4699 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork); 4700 4701 error0: 4702 if (ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS) 4703 logflags &= ~XFS_ILOG_DEXT; 4704 else if (ip->i_df.if_format != XFS_DINODE_FMT_BTREE) 4705 logflags &= ~XFS_ILOG_DBROOT; 4706 4707 if (logflags) 4708 xfs_trans_log_inode(tp, ip, logflags); 4709 if (cur) 4710 xfs_btree_del_cursor(cur, error); 4711 return error; 4712 } 4713 4714 /* 4715 * When a delalloc extent is split (e.g., due to a hole punch), the original 4716 * indlen reservation must be shared across the two new extents that are left 4717 * behind. 4718 * 4719 * Given the original reservation and the worst case indlen for the two new 4720 * extents (as calculated by xfs_bmap_worst_indlen()), split the original 4721 * reservation fairly across the two new extents. If necessary, steal available 4722 * blocks from a deleted extent to make up a reservation deficiency (e.g., if 4723 * ores == 1). The number of stolen blocks is returned. The availability and 4724 * subsequent accounting of stolen blocks is the responsibility of the caller. 4725 */ 4726 static xfs_filblks_t 4727 xfs_bmap_split_indlen( 4728 xfs_filblks_t ores, /* original res. */ 4729 xfs_filblks_t *indlen1, /* ext1 worst indlen */ 4730 xfs_filblks_t *indlen2, /* ext2 worst indlen */ 4731 xfs_filblks_t avail) /* stealable blocks */ 4732 { 4733 xfs_filblks_t len1 = *indlen1; 4734 xfs_filblks_t len2 = *indlen2; 4735 xfs_filblks_t nres = len1 + len2; /* new total res. */ 4736 xfs_filblks_t stolen = 0; 4737 xfs_filblks_t resfactor; 4738 4739 /* 4740 * Steal as many blocks as we can to try and satisfy the worst case 4741 * indlen for both new extents. 4742 */ 4743 if (ores < nres && avail) 4744 stolen = XFS_FILBLKS_MIN(nres - ores, avail); 4745 ores += stolen; 4746 4747 /* nothing else to do if we've satisfied the new reservation */ 4748 if (ores >= nres) 4749 return stolen; 4750 4751 /* 4752 * We can't meet the total required reservation for the two extents. 4753 * Calculate the percent of the overall shortage between both extents 4754 * and apply this percentage to each of the requested indlen values. 4755 * This distributes the shortage fairly and reduces the chances that one 4756 * of the two extents is left with nothing when extents are repeatedly 4757 * split. 4758 */ 4759 resfactor = (ores * 100); 4760 do_div(resfactor, nres); 4761 len1 *= resfactor; 4762 do_div(len1, 100); 4763 len2 *= resfactor; 4764 do_div(len2, 100); 4765 ASSERT(len1 + len2 <= ores); 4766 ASSERT(len1 < *indlen1 && len2 < *indlen2); 4767 4768 /* 4769 * Hand out the remainder to each extent. If one of the two reservations 4770 * is zero, we want to make sure that one gets a block first. The loop 4771 * below starts with len1, so hand len2 a block right off the bat if it 4772 * is zero. 4773 */ 4774 ores -= (len1 + len2); 4775 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores); 4776 if (ores && !len2 && *indlen2) { 4777 len2++; 4778 ores--; 4779 } 4780 while (ores) { 4781 if (len1 < *indlen1) { 4782 len1++; 4783 ores--; 4784 } 4785 if (!ores) 4786 break; 4787 if (len2 < *indlen2) { 4788 len2++; 4789 ores--; 4790 } 4791 } 4792 4793 *indlen1 = len1; 4794 *indlen2 = len2; 4795 4796 return stolen; 4797 } 4798 4799 int 4800 xfs_bmap_del_extent_delay( 4801 struct xfs_inode *ip, 4802 int whichfork, 4803 struct xfs_iext_cursor *icur, 4804 struct xfs_bmbt_irec *got, 4805 struct xfs_bmbt_irec *del) 4806 { 4807 struct xfs_mount *mp = ip->i_mount; 4808 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 4809 struct xfs_bmbt_irec new; 4810 int64_t da_old, da_new, da_diff = 0; 4811 xfs_fileoff_t del_endoff, got_endoff; 4812 xfs_filblks_t got_indlen, new_indlen, stolen; 4813 uint32_t state = xfs_bmap_fork_to_state(whichfork); 4814 int error = 0; 4815 bool isrt; 4816 4817 XFS_STATS_INC(mp, xs_del_exlist); 4818 4819 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 4820 del_endoff = del->br_startoff + del->br_blockcount; 4821 got_endoff = got->br_startoff + got->br_blockcount; 4822 da_old = startblockval(got->br_startblock); 4823 da_new = 0; 4824 4825 ASSERT(del->br_blockcount > 0); 4826 ASSERT(got->br_startoff <= del->br_startoff); 4827 ASSERT(got_endoff >= del_endoff); 4828 4829 if (isrt) { 4830 uint64_t rtexts = del->br_blockcount; 4831 4832 do_div(rtexts, mp->m_sb.sb_rextsize); 4833 xfs_mod_frextents(mp, rtexts); 4834 } 4835 4836 /* 4837 * Update the inode delalloc counter now and wait to update the 4838 * sb counters as we might have to borrow some blocks for the 4839 * indirect block accounting. 4840 */ 4841 ASSERT(!isrt); 4842 error = xfs_quota_unreserve_blkres(ip, del->br_blockcount); 4843 if (error) 4844 return error; 4845 ip->i_delayed_blks -= del->br_blockcount; 4846 4847 if (got->br_startoff == del->br_startoff) 4848 state |= BMAP_LEFT_FILLING; 4849 if (got_endoff == del_endoff) 4850 state |= BMAP_RIGHT_FILLING; 4851 4852 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 4853 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 4854 /* 4855 * Matches the whole extent. Delete the entry. 4856 */ 4857 xfs_iext_remove(ip, icur, state); 4858 xfs_iext_prev(ifp, icur); 4859 break; 4860 case BMAP_LEFT_FILLING: 4861 /* 4862 * Deleting the first part of the extent. 4863 */ 4864 got->br_startoff = del_endoff; 4865 got->br_blockcount -= del->br_blockcount; 4866 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4867 got->br_blockcount), da_old); 4868 got->br_startblock = nullstartblock((int)da_new); 4869 xfs_iext_update_extent(ip, state, icur, got); 4870 break; 4871 case BMAP_RIGHT_FILLING: 4872 /* 4873 * Deleting the last part of the extent. 4874 */ 4875 got->br_blockcount = got->br_blockcount - del->br_blockcount; 4876 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4877 got->br_blockcount), da_old); 4878 got->br_startblock = nullstartblock((int)da_new); 4879 xfs_iext_update_extent(ip, state, icur, got); 4880 break; 4881 case 0: 4882 /* 4883 * Deleting the middle of the extent. 4884 * 4885 * Distribute the original indlen reservation across the two new 4886 * extents. Steal blocks from the deleted extent if necessary. 4887 * Stealing blocks simply fudges the fdblocks accounting below. 4888 * Warn if either of the new indlen reservations is zero as this 4889 * can lead to delalloc problems. 4890 */ 4891 got->br_blockcount = del->br_startoff - got->br_startoff; 4892 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount); 4893 4894 new.br_blockcount = got_endoff - del_endoff; 4895 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount); 4896 4897 WARN_ON_ONCE(!got_indlen || !new_indlen); 4898 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen, 4899 del->br_blockcount); 4900 4901 got->br_startblock = nullstartblock((int)got_indlen); 4902 4903 new.br_startoff = del_endoff; 4904 new.br_state = got->br_state; 4905 new.br_startblock = nullstartblock((int)new_indlen); 4906 4907 xfs_iext_update_extent(ip, state, icur, got); 4908 xfs_iext_next(ifp, icur); 4909 xfs_iext_insert(ip, icur, &new, state); 4910 4911 da_new = got_indlen + new_indlen - stolen; 4912 del->br_blockcount -= stolen; 4913 break; 4914 } 4915 4916 ASSERT(da_old >= da_new); 4917 da_diff = da_old - da_new; 4918 if (!isrt) 4919 da_diff += del->br_blockcount; 4920 if (da_diff) { 4921 xfs_mod_fdblocks(mp, da_diff, false); 4922 xfs_mod_delalloc(mp, -da_diff); 4923 } 4924 return error; 4925 } 4926 4927 void 4928 xfs_bmap_del_extent_cow( 4929 struct xfs_inode *ip, 4930 struct xfs_iext_cursor *icur, 4931 struct xfs_bmbt_irec *got, 4932 struct xfs_bmbt_irec *del) 4933 { 4934 struct xfs_mount *mp = ip->i_mount; 4935 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_COW_FORK); 4936 struct xfs_bmbt_irec new; 4937 xfs_fileoff_t del_endoff, got_endoff; 4938 uint32_t state = BMAP_COWFORK; 4939 4940 XFS_STATS_INC(mp, xs_del_exlist); 4941 4942 del_endoff = del->br_startoff + del->br_blockcount; 4943 got_endoff = got->br_startoff + got->br_blockcount; 4944 4945 ASSERT(del->br_blockcount > 0); 4946 ASSERT(got->br_startoff <= del->br_startoff); 4947 ASSERT(got_endoff >= del_endoff); 4948 ASSERT(!isnullstartblock(got->br_startblock)); 4949 4950 if (got->br_startoff == del->br_startoff) 4951 state |= BMAP_LEFT_FILLING; 4952 if (got_endoff == del_endoff) 4953 state |= BMAP_RIGHT_FILLING; 4954 4955 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 4956 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 4957 /* 4958 * Matches the whole extent. Delete the entry. 4959 */ 4960 xfs_iext_remove(ip, icur, state); 4961 xfs_iext_prev(ifp, icur); 4962 break; 4963 case BMAP_LEFT_FILLING: 4964 /* 4965 * Deleting the first part of the extent. 4966 */ 4967 got->br_startoff = del_endoff; 4968 got->br_blockcount -= del->br_blockcount; 4969 got->br_startblock = del->br_startblock + del->br_blockcount; 4970 xfs_iext_update_extent(ip, state, icur, got); 4971 break; 4972 case BMAP_RIGHT_FILLING: 4973 /* 4974 * Deleting the last part of the extent. 4975 */ 4976 got->br_blockcount -= del->br_blockcount; 4977 xfs_iext_update_extent(ip, state, icur, got); 4978 break; 4979 case 0: 4980 /* 4981 * Deleting the middle of the extent. 4982 */ 4983 got->br_blockcount = del->br_startoff - got->br_startoff; 4984 4985 new.br_startoff = del_endoff; 4986 new.br_blockcount = got_endoff - del_endoff; 4987 new.br_state = got->br_state; 4988 new.br_startblock = del->br_startblock + del->br_blockcount; 4989 4990 xfs_iext_update_extent(ip, state, icur, got); 4991 xfs_iext_next(ifp, icur); 4992 xfs_iext_insert(ip, icur, &new, state); 4993 break; 4994 } 4995 ip->i_delayed_blks -= del->br_blockcount; 4996 } 4997 4998 /* 4999 * Called by xfs_bmapi to update file extent records and the btree 5000 * after removing space. 5001 */ 5002 STATIC int /* error */ 5003 xfs_bmap_del_extent_real( 5004 xfs_inode_t *ip, /* incore inode pointer */ 5005 xfs_trans_t *tp, /* current transaction pointer */ 5006 struct xfs_iext_cursor *icur, 5007 struct xfs_btree_cur *cur, /* if null, not a btree */ 5008 xfs_bmbt_irec_t *del, /* data to remove from extents */ 5009 int *logflagsp, /* inode logging flags */ 5010 int whichfork, /* data or attr fork */ 5011 uint32_t bflags) /* bmapi flags */ 5012 { 5013 xfs_fsblock_t del_endblock=0; /* first block past del */ 5014 xfs_fileoff_t del_endoff; /* first offset past del */ 5015 int do_fx; /* free extent at end of routine */ 5016 int error; /* error return value */ 5017 int flags = 0;/* inode logging flags */ 5018 struct xfs_bmbt_irec got; /* current extent entry */ 5019 xfs_fileoff_t got_endoff; /* first offset past got */ 5020 int i; /* temp state */ 5021 struct xfs_ifork *ifp; /* inode fork pointer */ 5022 xfs_mount_t *mp; /* mount structure */ 5023 xfs_filblks_t nblks; /* quota/sb block count */ 5024 xfs_bmbt_irec_t new; /* new record to be inserted */ 5025 /* REFERENCED */ 5026 uint qfield; /* quota field to update */ 5027 uint32_t state = xfs_bmap_fork_to_state(whichfork); 5028 struct xfs_bmbt_irec old; 5029 5030 mp = ip->i_mount; 5031 XFS_STATS_INC(mp, xs_del_exlist); 5032 5033 ifp = xfs_ifork_ptr(ip, whichfork); 5034 ASSERT(del->br_blockcount > 0); 5035 xfs_iext_get_extent(ifp, icur, &got); 5036 ASSERT(got.br_startoff <= del->br_startoff); 5037 del_endoff = del->br_startoff + del->br_blockcount; 5038 got_endoff = got.br_startoff + got.br_blockcount; 5039 ASSERT(got_endoff >= del_endoff); 5040 ASSERT(!isnullstartblock(got.br_startblock)); 5041 qfield = 0; 5042 error = 0; 5043 5044 /* 5045 * If it's the case where the directory code is running with no block 5046 * reservation, and the deleted block is in the middle of its extent, 5047 * and the resulting insert of an extent would cause transformation to 5048 * btree format, then reject it. The calling code will then swap blocks 5049 * around instead. We have to do this now, rather than waiting for the 5050 * conversion to btree format, since the transaction will be dirty then. 5051 */ 5052 if (tp->t_blk_res == 0 && 5053 ifp->if_format == XFS_DINODE_FMT_EXTENTS && 5054 ifp->if_nextents >= XFS_IFORK_MAXEXT(ip, whichfork) && 5055 del->br_startoff > got.br_startoff && del_endoff < got_endoff) 5056 return -ENOSPC; 5057 5058 flags = XFS_ILOG_CORE; 5059 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { 5060 if (!(bflags & XFS_BMAPI_REMAP)) { 5061 error = xfs_rtfree_blocks(tp, del->br_startblock, 5062 del->br_blockcount); 5063 if (error) 5064 goto done; 5065 } 5066 5067 do_fx = 0; 5068 qfield = XFS_TRANS_DQ_RTBCOUNT; 5069 } else { 5070 do_fx = 1; 5071 qfield = XFS_TRANS_DQ_BCOUNT; 5072 } 5073 nblks = del->br_blockcount; 5074 5075 del_endblock = del->br_startblock + del->br_blockcount; 5076 if (cur) { 5077 error = xfs_bmbt_lookup_eq(cur, &got, &i); 5078 if (error) 5079 goto done; 5080 if (XFS_IS_CORRUPT(mp, i != 1)) { 5081 error = -EFSCORRUPTED; 5082 goto done; 5083 } 5084 } 5085 5086 if (got.br_startoff == del->br_startoff) 5087 state |= BMAP_LEFT_FILLING; 5088 if (got_endoff == del_endoff) 5089 state |= BMAP_RIGHT_FILLING; 5090 5091 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 5092 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 5093 /* 5094 * Matches the whole extent. Delete the entry. 5095 */ 5096 xfs_iext_remove(ip, icur, state); 5097 xfs_iext_prev(ifp, icur); 5098 ifp->if_nextents--; 5099 5100 flags |= XFS_ILOG_CORE; 5101 if (!cur) { 5102 flags |= xfs_ilog_fext(whichfork); 5103 break; 5104 } 5105 if ((error = xfs_btree_delete(cur, &i))) 5106 goto done; 5107 if (XFS_IS_CORRUPT(mp, i != 1)) { 5108 error = -EFSCORRUPTED; 5109 goto done; 5110 } 5111 break; 5112 case BMAP_LEFT_FILLING: 5113 /* 5114 * Deleting the first part of the extent. 5115 */ 5116 got.br_startoff = del_endoff; 5117 got.br_startblock = del_endblock; 5118 got.br_blockcount -= del->br_blockcount; 5119 xfs_iext_update_extent(ip, state, icur, &got); 5120 if (!cur) { 5121 flags |= xfs_ilog_fext(whichfork); 5122 break; 5123 } 5124 error = xfs_bmbt_update(cur, &got); 5125 if (error) 5126 goto done; 5127 break; 5128 case BMAP_RIGHT_FILLING: 5129 /* 5130 * Deleting the last part of the extent. 5131 */ 5132 got.br_blockcount -= del->br_blockcount; 5133 xfs_iext_update_extent(ip, state, icur, &got); 5134 if (!cur) { 5135 flags |= xfs_ilog_fext(whichfork); 5136 break; 5137 } 5138 error = xfs_bmbt_update(cur, &got); 5139 if (error) 5140 goto done; 5141 break; 5142 case 0: 5143 /* 5144 * Deleting the middle of the extent. 5145 */ 5146 5147 old = got; 5148 5149 got.br_blockcount = del->br_startoff - got.br_startoff; 5150 xfs_iext_update_extent(ip, state, icur, &got); 5151 5152 new.br_startoff = del_endoff; 5153 new.br_blockcount = got_endoff - del_endoff; 5154 new.br_state = got.br_state; 5155 new.br_startblock = del_endblock; 5156 5157 flags |= XFS_ILOG_CORE; 5158 if (cur) { 5159 error = xfs_bmbt_update(cur, &got); 5160 if (error) 5161 goto done; 5162 error = xfs_btree_increment(cur, 0, &i); 5163 if (error) 5164 goto done; 5165 cur->bc_rec.b = new; 5166 error = xfs_btree_insert(cur, &i); 5167 if (error && error != -ENOSPC) 5168 goto done; 5169 /* 5170 * If get no-space back from btree insert, it tried a 5171 * split, and we have a zero block reservation. Fix up 5172 * our state and return the error. 5173 */ 5174 if (error == -ENOSPC) { 5175 /* 5176 * Reset the cursor, don't trust it after any 5177 * insert operation. 5178 */ 5179 error = xfs_bmbt_lookup_eq(cur, &got, &i); 5180 if (error) 5181 goto done; 5182 if (XFS_IS_CORRUPT(mp, i != 1)) { 5183 error = -EFSCORRUPTED; 5184 goto done; 5185 } 5186 /* 5187 * Update the btree record back 5188 * to the original value. 5189 */ 5190 error = xfs_bmbt_update(cur, &old); 5191 if (error) 5192 goto done; 5193 /* 5194 * Reset the extent record back 5195 * to the original value. 5196 */ 5197 xfs_iext_update_extent(ip, state, icur, &old); 5198 flags = 0; 5199 error = -ENOSPC; 5200 goto done; 5201 } 5202 if (XFS_IS_CORRUPT(mp, i != 1)) { 5203 error = -EFSCORRUPTED; 5204 goto done; 5205 } 5206 } else 5207 flags |= xfs_ilog_fext(whichfork); 5208 5209 ifp->if_nextents++; 5210 xfs_iext_next(ifp, icur); 5211 xfs_iext_insert(ip, icur, &new, state); 5212 break; 5213 } 5214 5215 /* remove reverse mapping */ 5216 xfs_rmap_unmap_extent(tp, ip, whichfork, del); 5217 5218 /* 5219 * If we need to, add to list of extents to delete. 5220 */ 5221 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) { 5222 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) { 5223 xfs_refcount_decrease_extent(tp, del); 5224 } else { 5225 error = __xfs_free_extent_later(tp, del->br_startblock, 5226 del->br_blockcount, NULL, 5227 XFS_AG_RESV_NONE, 5228 ((bflags & XFS_BMAPI_NODISCARD) || 5229 del->br_state == XFS_EXT_UNWRITTEN)); 5230 if (error) 5231 goto done; 5232 } 5233 } 5234 5235 /* 5236 * Adjust inode # blocks in the file. 5237 */ 5238 if (nblks) 5239 ip->i_nblocks -= nblks; 5240 /* 5241 * Adjust quota data. 5242 */ 5243 if (qfield && !(bflags & XFS_BMAPI_REMAP)) 5244 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks); 5245 5246 done: 5247 *logflagsp = flags; 5248 return error; 5249 } 5250 5251 /* 5252 * Unmap (remove) blocks from a file. 5253 * If nexts is nonzero then the number of extents to remove is limited to 5254 * that value. If not all extents in the block range can be removed then 5255 * *done is set. 5256 */ 5257 int /* error */ 5258 __xfs_bunmapi( 5259 struct xfs_trans *tp, /* transaction pointer */ 5260 struct xfs_inode *ip, /* incore inode */ 5261 xfs_fileoff_t start, /* first file offset deleted */ 5262 xfs_filblks_t *rlen, /* i/o: amount remaining */ 5263 uint32_t flags, /* misc flags */ 5264 xfs_extnum_t nexts) /* number of extents max */ 5265 { 5266 struct xfs_btree_cur *cur; /* bmap btree cursor */ 5267 struct xfs_bmbt_irec del; /* extent being deleted */ 5268 int error; /* error return value */ 5269 xfs_extnum_t extno; /* extent number in list */ 5270 struct xfs_bmbt_irec got; /* current extent record */ 5271 struct xfs_ifork *ifp; /* inode fork pointer */ 5272 int isrt; /* freeing in rt area */ 5273 int logflags; /* transaction logging flags */ 5274 xfs_extlen_t mod; /* rt extent offset */ 5275 struct xfs_mount *mp = ip->i_mount; 5276 int tmp_logflags; /* partial logging flags */ 5277 int wasdel; /* was a delayed alloc extent */ 5278 int whichfork; /* data or attribute fork */ 5279 xfs_fsblock_t sum; 5280 xfs_filblks_t len = *rlen; /* length to unmap in file */ 5281 xfs_fileoff_t end; 5282 struct xfs_iext_cursor icur; 5283 bool done = false; 5284 5285 trace_xfs_bunmap(ip, start, len, flags, _RET_IP_); 5286 5287 whichfork = xfs_bmapi_whichfork(flags); 5288 ASSERT(whichfork != XFS_COW_FORK); 5289 ifp = xfs_ifork_ptr(ip, whichfork); 5290 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp))) 5291 return -EFSCORRUPTED; 5292 if (xfs_is_shutdown(mp)) 5293 return -EIO; 5294 5295 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5296 ASSERT(len > 0); 5297 ASSERT(nexts >= 0); 5298 5299 error = xfs_iread_extents(tp, ip, whichfork); 5300 if (error) 5301 return error; 5302 5303 if (xfs_iext_count(ifp) == 0) { 5304 *rlen = 0; 5305 return 0; 5306 } 5307 XFS_STATS_INC(mp, xs_blk_unmap); 5308 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 5309 end = start + len; 5310 5311 if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) { 5312 *rlen = 0; 5313 return 0; 5314 } 5315 end--; 5316 5317 logflags = 0; 5318 if (ifp->if_format == XFS_DINODE_FMT_BTREE) { 5319 ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE); 5320 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5321 cur->bc_ino.flags = 0; 5322 } else 5323 cur = NULL; 5324 5325 if (isrt) { 5326 /* 5327 * Synchronize by locking the bitmap inode. 5328 */ 5329 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP); 5330 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL); 5331 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM); 5332 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL); 5333 } 5334 5335 extno = 0; 5336 while (end != (xfs_fileoff_t)-1 && end >= start && 5337 (nexts == 0 || extno < nexts)) { 5338 /* 5339 * Is the found extent after a hole in which end lives? 5340 * Just back up to the previous extent, if so. 5341 */ 5342 if (got.br_startoff > end && 5343 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5344 done = true; 5345 break; 5346 } 5347 /* 5348 * Is the last block of this extent before the range 5349 * we're supposed to delete? If so, we're done. 5350 */ 5351 end = XFS_FILEOFF_MIN(end, 5352 got.br_startoff + got.br_blockcount - 1); 5353 if (end < start) 5354 break; 5355 /* 5356 * Then deal with the (possibly delayed) allocated space 5357 * we found. 5358 */ 5359 del = got; 5360 wasdel = isnullstartblock(del.br_startblock); 5361 5362 if (got.br_startoff < start) { 5363 del.br_startoff = start; 5364 del.br_blockcount -= start - got.br_startoff; 5365 if (!wasdel) 5366 del.br_startblock += start - got.br_startoff; 5367 } 5368 if (del.br_startoff + del.br_blockcount > end + 1) 5369 del.br_blockcount = end + 1 - del.br_startoff; 5370 5371 if (!isrt) 5372 goto delete; 5373 5374 sum = del.br_startblock + del.br_blockcount; 5375 div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod); 5376 if (mod) { 5377 /* 5378 * Realtime extent not lined up at the end. 5379 * The extent could have been split into written 5380 * and unwritten pieces, or we could just be 5381 * unmapping part of it. But we can't really 5382 * get rid of part of a realtime extent. 5383 */ 5384 if (del.br_state == XFS_EXT_UNWRITTEN) { 5385 /* 5386 * This piece is unwritten, or we're not 5387 * using unwritten extents. Skip over it. 5388 */ 5389 ASSERT(end >= mod); 5390 end -= mod > del.br_blockcount ? 5391 del.br_blockcount : mod; 5392 if (end < got.br_startoff && 5393 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5394 done = true; 5395 break; 5396 } 5397 continue; 5398 } 5399 /* 5400 * It's written, turn it unwritten. 5401 * This is better than zeroing it. 5402 */ 5403 ASSERT(del.br_state == XFS_EXT_NORM); 5404 ASSERT(tp->t_blk_res > 0); 5405 /* 5406 * If this spans a realtime extent boundary, 5407 * chop it back to the start of the one we end at. 5408 */ 5409 if (del.br_blockcount > mod) { 5410 del.br_startoff += del.br_blockcount - mod; 5411 del.br_startblock += del.br_blockcount - mod; 5412 del.br_blockcount = mod; 5413 } 5414 del.br_state = XFS_EXT_UNWRITTEN; 5415 error = xfs_bmap_add_extent_unwritten_real(tp, ip, 5416 whichfork, &icur, &cur, &del, 5417 &logflags); 5418 if (error) 5419 goto error0; 5420 goto nodelete; 5421 } 5422 div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod); 5423 if (mod) { 5424 xfs_extlen_t off = mp->m_sb.sb_rextsize - mod; 5425 5426 /* 5427 * Realtime extent is lined up at the end but not 5428 * at the front. We'll get rid of full extents if 5429 * we can. 5430 */ 5431 if (del.br_blockcount > off) { 5432 del.br_blockcount -= off; 5433 del.br_startoff += off; 5434 del.br_startblock += off; 5435 } else if (del.br_startoff == start && 5436 (del.br_state == XFS_EXT_UNWRITTEN || 5437 tp->t_blk_res == 0)) { 5438 /* 5439 * Can't make it unwritten. There isn't 5440 * a full extent here so just skip it. 5441 */ 5442 ASSERT(end >= del.br_blockcount); 5443 end -= del.br_blockcount; 5444 if (got.br_startoff > end && 5445 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5446 done = true; 5447 break; 5448 } 5449 continue; 5450 } else if (del.br_state == XFS_EXT_UNWRITTEN) { 5451 struct xfs_bmbt_irec prev; 5452 xfs_fileoff_t unwrite_start; 5453 5454 /* 5455 * This one is already unwritten. 5456 * It must have a written left neighbor. 5457 * Unwrite the killed part of that one and 5458 * try again. 5459 */ 5460 if (!xfs_iext_prev_extent(ifp, &icur, &prev)) 5461 ASSERT(0); 5462 ASSERT(prev.br_state == XFS_EXT_NORM); 5463 ASSERT(!isnullstartblock(prev.br_startblock)); 5464 ASSERT(del.br_startblock == 5465 prev.br_startblock + prev.br_blockcount); 5466 unwrite_start = max3(start, 5467 del.br_startoff - mod, 5468 prev.br_startoff); 5469 mod = unwrite_start - prev.br_startoff; 5470 prev.br_startoff = unwrite_start; 5471 prev.br_startblock += mod; 5472 prev.br_blockcount -= mod; 5473 prev.br_state = XFS_EXT_UNWRITTEN; 5474 error = xfs_bmap_add_extent_unwritten_real(tp, 5475 ip, whichfork, &icur, &cur, 5476 &prev, &logflags); 5477 if (error) 5478 goto error0; 5479 goto nodelete; 5480 } else { 5481 ASSERT(del.br_state == XFS_EXT_NORM); 5482 del.br_state = XFS_EXT_UNWRITTEN; 5483 error = xfs_bmap_add_extent_unwritten_real(tp, 5484 ip, whichfork, &icur, &cur, 5485 &del, &logflags); 5486 if (error) 5487 goto error0; 5488 goto nodelete; 5489 } 5490 } 5491 5492 delete: 5493 if (wasdel) { 5494 error = xfs_bmap_del_extent_delay(ip, whichfork, &icur, 5495 &got, &del); 5496 } else { 5497 error = xfs_bmap_del_extent_real(ip, tp, &icur, cur, 5498 &del, &tmp_logflags, whichfork, 5499 flags); 5500 logflags |= tmp_logflags; 5501 } 5502 5503 if (error) 5504 goto error0; 5505 5506 end = del.br_startoff - 1; 5507 nodelete: 5508 /* 5509 * If not done go on to the next (previous) record. 5510 */ 5511 if (end != (xfs_fileoff_t)-1 && end >= start) { 5512 if (!xfs_iext_get_extent(ifp, &icur, &got) || 5513 (got.br_startoff > end && 5514 !xfs_iext_prev_extent(ifp, &icur, &got))) { 5515 done = true; 5516 break; 5517 } 5518 extno++; 5519 } 5520 } 5521 if (done || end == (xfs_fileoff_t)-1 || end < start) 5522 *rlen = 0; 5523 else 5524 *rlen = end - start + 1; 5525 5526 /* 5527 * Convert to a btree if necessary. 5528 */ 5529 if (xfs_bmap_needs_btree(ip, whichfork)) { 5530 ASSERT(cur == NULL); 5531 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 5532 &tmp_logflags, whichfork); 5533 logflags |= tmp_logflags; 5534 } else { 5535 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, 5536 whichfork); 5537 } 5538 5539 error0: 5540 /* 5541 * Log everything. Do this after conversion, there's no point in 5542 * logging the extent records if we've converted to btree format. 5543 */ 5544 if ((logflags & xfs_ilog_fext(whichfork)) && 5545 ifp->if_format != XFS_DINODE_FMT_EXTENTS) 5546 logflags &= ~xfs_ilog_fext(whichfork); 5547 else if ((logflags & xfs_ilog_fbroot(whichfork)) && 5548 ifp->if_format != XFS_DINODE_FMT_BTREE) 5549 logflags &= ~xfs_ilog_fbroot(whichfork); 5550 /* 5551 * Log inode even in the error case, if the transaction 5552 * is dirty we'll need to shut down the filesystem. 5553 */ 5554 if (logflags) 5555 xfs_trans_log_inode(tp, ip, logflags); 5556 if (cur) { 5557 if (!error) 5558 cur->bc_ino.allocated = 0; 5559 xfs_btree_del_cursor(cur, error); 5560 } 5561 return error; 5562 } 5563 5564 /* Unmap a range of a file. */ 5565 int 5566 xfs_bunmapi( 5567 xfs_trans_t *tp, 5568 struct xfs_inode *ip, 5569 xfs_fileoff_t bno, 5570 xfs_filblks_t len, 5571 uint32_t flags, 5572 xfs_extnum_t nexts, 5573 int *done) 5574 { 5575 int error; 5576 5577 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts); 5578 *done = (len == 0); 5579 return error; 5580 } 5581 5582 /* 5583 * Determine whether an extent shift can be accomplished by a merge with the 5584 * extent that precedes the target hole of the shift. 5585 */ 5586 STATIC bool 5587 xfs_bmse_can_merge( 5588 struct xfs_bmbt_irec *left, /* preceding extent */ 5589 struct xfs_bmbt_irec *got, /* current extent to shift */ 5590 xfs_fileoff_t shift) /* shift fsb */ 5591 { 5592 xfs_fileoff_t startoff; 5593 5594 startoff = got->br_startoff - shift; 5595 5596 /* 5597 * The extent, once shifted, must be adjacent in-file and on-disk with 5598 * the preceding extent. 5599 */ 5600 if ((left->br_startoff + left->br_blockcount != startoff) || 5601 (left->br_startblock + left->br_blockcount != got->br_startblock) || 5602 (left->br_state != got->br_state) || 5603 (left->br_blockcount + got->br_blockcount > XFS_MAX_BMBT_EXTLEN)) 5604 return false; 5605 5606 return true; 5607 } 5608 5609 /* 5610 * A bmap extent shift adjusts the file offset of an extent to fill a preceding 5611 * hole in the file. If an extent shift would result in the extent being fully 5612 * adjacent to the extent that currently precedes the hole, we can merge with 5613 * the preceding extent rather than do the shift. 5614 * 5615 * This function assumes the caller has verified a shift-by-merge is possible 5616 * with the provided extents via xfs_bmse_can_merge(). 5617 */ 5618 STATIC int 5619 xfs_bmse_merge( 5620 struct xfs_trans *tp, 5621 struct xfs_inode *ip, 5622 int whichfork, 5623 xfs_fileoff_t shift, /* shift fsb */ 5624 struct xfs_iext_cursor *icur, 5625 struct xfs_bmbt_irec *got, /* extent to shift */ 5626 struct xfs_bmbt_irec *left, /* preceding extent */ 5627 struct xfs_btree_cur *cur, 5628 int *logflags) /* output */ 5629 { 5630 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 5631 struct xfs_bmbt_irec new; 5632 xfs_filblks_t blockcount; 5633 int error, i; 5634 struct xfs_mount *mp = ip->i_mount; 5635 5636 blockcount = left->br_blockcount + got->br_blockcount; 5637 5638 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5639 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5640 ASSERT(xfs_bmse_can_merge(left, got, shift)); 5641 5642 new = *left; 5643 new.br_blockcount = blockcount; 5644 5645 /* 5646 * Update the on-disk extent count, the btree if necessary and log the 5647 * inode. 5648 */ 5649 ifp->if_nextents--; 5650 *logflags |= XFS_ILOG_CORE; 5651 if (!cur) { 5652 *logflags |= XFS_ILOG_DEXT; 5653 goto done; 5654 } 5655 5656 /* lookup and remove the extent to merge */ 5657 error = xfs_bmbt_lookup_eq(cur, got, &i); 5658 if (error) 5659 return error; 5660 if (XFS_IS_CORRUPT(mp, i != 1)) 5661 return -EFSCORRUPTED; 5662 5663 error = xfs_btree_delete(cur, &i); 5664 if (error) 5665 return error; 5666 if (XFS_IS_CORRUPT(mp, i != 1)) 5667 return -EFSCORRUPTED; 5668 5669 /* lookup and update size of the previous extent */ 5670 error = xfs_bmbt_lookup_eq(cur, left, &i); 5671 if (error) 5672 return error; 5673 if (XFS_IS_CORRUPT(mp, i != 1)) 5674 return -EFSCORRUPTED; 5675 5676 error = xfs_bmbt_update(cur, &new); 5677 if (error) 5678 return error; 5679 5680 /* change to extent format if required after extent removal */ 5681 error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork); 5682 if (error) 5683 return error; 5684 5685 done: 5686 xfs_iext_remove(ip, icur, 0); 5687 xfs_iext_prev(ifp, icur); 5688 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, 5689 &new); 5690 5691 /* update reverse mapping. rmap functions merge the rmaps for us */ 5692 xfs_rmap_unmap_extent(tp, ip, whichfork, got); 5693 memcpy(&new, got, sizeof(new)); 5694 new.br_startoff = left->br_startoff + left->br_blockcount; 5695 xfs_rmap_map_extent(tp, ip, whichfork, &new); 5696 return 0; 5697 } 5698 5699 static int 5700 xfs_bmap_shift_update_extent( 5701 struct xfs_trans *tp, 5702 struct xfs_inode *ip, 5703 int whichfork, 5704 struct xfs_iext_cursor *icur, 5705 struct xfs_bmbt_irec *got, 5706 struct xfs_btree_cur *cur, 5707 int *logflags, 5708 xfs_fileoff_t startoff) 5709 { 5710 struct xfs_mount *mp = ip->i_mount; 5711 struct xfs_bmbt_irec prev = *got; 5712 int error, i; 5713 5714 *logflags |= XFS_ILOG_CORE; 5715 5716 got->br_startoff = startoff; 5717 5718 if (cur) { 5719 error = xfs_bmbt_lookup_eq(cur, &prev, &i); 5720 if (error) 5721 return error; 5722 if (XFS_IS_CORRUPT(mp, i != 1)) 5723 return -EFSCORRUPTED; 5724 5725 error = xfs_bmbt_update(cur, got); 5726 if (error) 5727 return error; 5728 } else { 5729 *logflags |= XFS_ILOG_DEXT; 5730 } 5731 5732 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, 5733 got); 5734 5735 /* update reverse mapping */ 5736 xfs_rmap_unmap_extent(tp, ip, whichfork, &prev); 5737 xfs_rmap_map_extent(tp, ip, whichfork, got); 5738 return 0; 5739 } 5740 5741 int 5742 xfs_bmap_collapse_extents( 5743 struct xfs_trans *tp, 5744 struct xfs_inode *ip, 5745 xfs_fileoff_t *next_fsb, 5746 xfs_fileoff_t offset_shift_fsb, 5747 bool *done) 5748 { 5749 int whichfork = XFS_DATA_FORK; 5750 struct xfs_mount *mp = ip->i_mount; 5751 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 5752 struct xfs_btree_cur *cur = NULL; 5753 struct xfs_bmbt_irec got, prev; 5754 struct xfs_iext_cursor icur; 5755 xfs_fileoff_t new_startoff; 5756 int error = 0; 5757 int logflags = 0; 5758 5759 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 5760 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 5761 return -EFSCORRUPTED; 5762 } 5763 5764 if (xfs_is_shutdown(mp)) 5765 return -EIO; 5766 5767 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL)); 5768 5769 error = xfs_iread_extents(tp, ip, whichfork); 5770 if (error) 5771 return error; 5772 5773 if (ifp->if_format == XFS_DINODE_FMT_BTREE) { 5774 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5775 cur->bc_ino.flags = 0; 5776 } 5777 5778 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) { 5779 *done = true; 5780 goto del_cursor; 5781 } 5782 if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) { 5783 error = -EFSCORRUPTED; 5784 goto del_cursor; 5785 } 5786 5787 new_startoff = got.br_startoff - offset_shift_fsb; 5788 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) { 5789 if (new_startoff < prev.br_startoff + prev.br_blockcount) { 5790 error = -EINVAL; 5791 goto del_cursor; 5792 } 5793 5794 if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) { 5795 error = xfs_bmse_merge(tp, ip, whichfork, 5796 offset_shift_fsb, &icur, &got, &prev, 5797 cur, &logflags); 5798 if (error) 5799 goto del_cursor; 5800 goto done; 5801 } 5802 } else { 5803 if (got.br_startoff < offset_shift_fsb) { 5804 error = -EINVAL; 5805 goto del_cursor; 5806 } 5807 } 5808 5809 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got, 5810 cur, &logflags, new_startoff); 5811 if (error) 5812 goto del_cursor; 5813 5814 done: 5815 if (!xfs_iext_next_extent(ifp, &icur, &got)) { 5816 *done = true; 5817 goto del_cursor; 5818 } 5819 5820 *next_fsb = got.br_startoff; 5821 del_cursor: 5822 if (cur) 5823 xfs_btree_del_cursor(cur, error); 5824 if (logflags) 5825 xfs_trans_log_inode(tp, ip, logflags); 5826 return error; 5827 } 5828 5829 /* Make sure we won't be right-shifting an extent past the maximum bound. */ 5830 int 5831 xfs_bmap_can_insert_extents( 5832 struct xfs_inode *ip, 5833 xfs_fileoff_t off, 5834 xfs_fileoff_t shift) 5835 { 5836 struct xfs_bmbt_irec got; 5837 int is_empty; 5838 int error = 0; 5839 5840 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5841 5842 if (xfs_is_shutdown(ip->i_mount)) 5843 return -EIO; 5844 5845 xfs_ilock(ip, XFS_ILOCK_EXCL); 5846 error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty); 5847 if (!error && !is_empty && got.br_startoff >= off && 5848 ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff) 5849 error = -EINVAL; 5850 xfs_iunlock(ip, XFS_ILOCK_EXCL); 5851 5852 return error; 5853 } 5854 5855 int 5856 xfs_bmap_insert_extents( 5857 struct xfs_trans *tp, 5858 struct xfs_inode *ip, 5859 xfs_fileoff_t *next_fsb, 5860 xfs_fileoff_t offset_shift_fsb, 5861 bool *done, 5862 xfs_fileoff_t stop_fsb) 5863 { 5864 int whichfork = XFS_DATA_FORK; 5865 struct xfs_mount *mp = ip->i_mount; 5866 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 5867 struct xfs_btree_cur *cur = NULL; 5868 struct xfs_bmbt_irec got, next; 5869 struct xfs_iext_cursor icur; 5870 xfs_fileoff_t new_startoff; 5871 int error = 0; 5872 int logflags = 0; 5873 5874 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 5875 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 5876 return -EFSCORRUPTED; 5877 } 5878 5879 if (xfs_is_shutdown(mp)) 5880 return -EIO; 5881 5882 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL)); 5883 5884 error = xfs_iread_extents(tp, ip, whichfork); 5885 if (error) 5886 return error; 5887 5888 if (ifp->if_format == XFS_DINODE_FMT_BTREE) { 5889 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5890 cur->bc_ino.flags = 0; 5891 } 5892 5893 if (*next_fsb == NULLFSBLOCK) { 5894 xfs_iext_last(ifp, &icur); 5895 if (!xfs_iext_get_extent(ifp, &icur, &got) || 5896 stop_fsb > got.br_startoff) { 5897 *done = true; 5898 goto del_cursor; 5899 } 5900 } else { 5901 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) { 5902 *done = true; 5903 goto del_cursor; 5904 } 5905 } 5906 if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) { 5907 error = -EFSCORRUPTED; 5908 goto del_cursor; 5909 } 5910 5911 if (XFS_IS_CORRUPT(mp, stop_fsb > got.br_startoff)) { 5912 error = -EFSCORRUPTED; 5913 goto del_cursor; 5914 } 5915 5916 new_startoff = got.br_startoff + offset_shift_fsb; 5917 if (xfs_iext_peek_next_extent(ifp, &icur, &next)) { 5918 if (new_startoff + got.br_blockcount > next.br_startoff) { 5919 error = -EINVAL; 5920 goto del_cursor; 5921 } 5922 5923 /* 5924 * Unlike a left shift (which involves a hole punch), a right 5925 * shift does not modify extent neighbors in any way. We should 5926 * never find mergeable extents in this scenario. Check anyways 5927 * and warn if we encounter two extents that could be one. 5928 */ 5929 if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb)) 5930 WARN_ON_ONCE(1); 5931 } 5932 5933 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got, 5934 cur, &logflags, new_startoff); 5935 if (error) 5936 goto del_cursor; 5937 5938 if (!xfs_iext_prev_extent(ifp, &icur, &got) || 5939 stop_fsb >= got.br_startoff + got.br_blockcount) { 5940 *done = true; 5941 goto del_cursor; 5942 } 5943 5944 *next_fsb = got.br_startoff; 5945 del_cursor: 5946 if (cur) 5947 xfs_btree_del_cursor(cur, error); 5948 if (logflags) 5949 xfs_trans_log_inode(tp, ip, logflags); 5950 return error; 5951 } 5952 5953 /* 5954 * Splits an extent into two extents at split_fsb block such that it is the 5955 * first block of the current_ext. @ext is a target extent to be split. 5956 * @split_fsb is a block where the extents is split. If split_fsb lies in a 5957 * hole or the first block of extents, just return 0. 5958 */ 5959 int 5960 xfs_bmap_split_extent( 5961 struct xfs_trans *tp, 5962 struct xfs_inode *ip, 5963 xfs_fileoff_t split_fsb) 5964 { 5965 int whichfork = XFS_DATA_FORK; 5966 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 5967 struct xfs_btree_cur *cur = NULL; 5968 struct xfs_bmbt_irec got; 5969 struct xfs_bmbt_irec new; /* split extent */ 5970 struct xfs_mount *mp = ip->i_mount; 5971 xfs_fsblock_t gotblkcnt; /* new block count for got */ 5972 struct xfs_iext_cursor icur; 5973 int error = 0; 5974 int logflags = 0; 5975 int i = 0; 5976 5977 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 5978 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 5979 return -EFSCORRUPTED; 5980 } 5981 5982 if (xfs_is_shutdown(mp)) 5983 return -EIO; 5984 5985 /* Read in all the extents */ 5986 error = xfs_iread_extents(tp, ip, whichfork); 5987 if (error) 5988 return error; 5989 5990 /* 5991 * If there are not extents, or split_fsb lies in a hole we are done. 5992 */ 5993 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) || 5994 got.br_startoff >= split_fsb) 5995 return 0; 5996 5997 gotblkcnt = split_fsb - got.br_startoff; 5998 new.br_startoff = split_fsb; 5999 new.br_startblock = got.br_startblock + gotblkcnt; 6000 new.br_blockcount = got.br_blockcount - gotblkcnt; 6001 new.br_state = got.br_state; 6002 6003 if (ifp->if_format == XFS_DINODE_FMT_BTREE) { 6004 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 6005 cur->bc_ino.flags = 0; 6006 error = xfs_bmbt_lookup_eq(cur, &got, &i); 6007 if (error) 6008 goto del_cursor; 6009 if (XFS_IS_CORRUPT(mp, i != 1)) { 6010 error = -EFSCORRUPTED; 6011 goto del_cursor; 6012 } 6013 } 6014 6015 got.br_blockcount = gotblkcnt; 6016 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur, 6017 &got); 6018 6019 logflags = XFS_ILOG_CORE; 6020 if (cur) { 6021 error = xfs_bmbt_update(cur, &got); 6022 if (error) 6023 goto del_cursor; 6024 } else 6025 logflags |= XFS_ILOG_DEXT; 6026 6027 /* Add new extent */ 6028 xfs_iext_next(ifp, &icur); 6029 xfs_iext_insert(ip, &icur, &new, 0); 6030 ifp->if_nextents++; 6031 6032 if (cur) { 6033 error = xfs_bmbt_lookup_eq(cur, &new, &i); 6034 if (error) 6035 goto del_cursor; 6036 if (XFS_IS_CORRUPT(mp, i != 0)) { 6037 error = -EFSCORRUPTED; 6038 goto del_cursor; 6039 } 6040 error = xfs_btree_insert(cur, &i); 6041 if (error) 6042 goto del_cursor; 6043 if (XFS_IS_CORRUPT(mp, i != 1)) { 6044 error = -EFSCORRUPTED; 6045 goto del_cursor; 6046 } 6047 } 6048 6049 /* 6050 * Convert to a btree if necessary. 6051 */ 6052 if (xfs_bmap_needs_btree(ip, whichfork)) { 6053 int tmp_logflags; /* partial log flag return val */ 6054 6055 ASSERT(cur == NULL); 6056 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 6057 &tmp_logflags, whichfork); 6058 logflags |= tmp_logflags; 6059 } 6060 6061 del_cursor: 6062 if (cur) { 6063 cur->bc_ino.allocated = 0; 6064 xfs_btree_del_cursor(cur, error); 6065 } 6066 6067 if (logflags) 6068 xfs_trans_log_inode(tp, ip, logflags); 6069 return error; 6070 } 6071 6072 /* Deferred mapping is only for real extents in the data fork. */ 6073 static bool 6074 xfs_bmap_is_update_needed( 6075 struct xfs_bmbt_irec *bmap) 6076 { 6077 return bmap->br_startblock != HOLESTARTBLOCK && 6078 bmap->br_startblock != DELAYSTARTBLOCK; 6079 } 6080 6081 /* Record a bmap intent. */ 6082 static int 6083 __xfs_bmap_add( 6084 struct xfs_trans *tp, 6085 enum xfs_bmap_intent_type type, 6086 struct xfs_inode *ip, 6087 int whichfork, 6088 struct xfs_bmbt_irec *bmap) 6089 { 6090 struct xfs_bmap_intent *bi; 6091 6092 trace_xfs_bmap_defer(tp->t_mountp, 6093 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock), 6094 type, 6095 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock), 6096 ip->i_ino, whichfork, 6097 bmap->br_startoff, 6098 bmap->br_blockcount, 6099 bmap->br_state); 6100 6101 bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_NOFS | __GFP_NOFAIL); 6102 INIT_LIST_HEAD(&bi->bi_list); 6103 bi->bi_type = type; 6104 bi->bi_owner = ip; 6105 bi->bi_whichfork = whichfork; 6106 bi->bi_bmap = *bmap; 6107 6108 xfs_bmap_update_get_group(tp->t_mountp, bi); 6109 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list); 6110 return 0; 6111 } 6112 6113 /* Map an extent into a file. */ 6114 void 6115 xfs_bmap_map_extent( 6116 struct xfs_trans *tp, 6117 struct xfs_inode *ip, 6118 struct xfs_bmbt_irec *PREV) 6119 { 6120 if (!xfs_bmap_is_update_needed(PREV)) 6121 return; 6122 6123 __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV); 6124 } 6125 6126 /* Unmap an extent out of a file. */ 6127 void 6128 xfs_bmap_unmap_extent( 6129 struct xfs_trans *tp, 6130 struct xfs_inode *ip, 6131 struct xfs_bmbt_irec *PREV) 6132 { 6133 if (!xfs_bmap_is_update_needed(PREV)) 6134 return; 6135 6136 __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV); 6137 } 6138 6139 /* 6140 * Process one of the deferred bmap operations. We pass back the 6141 * btree cursor to maintain our lock on the bmapbt between calls. 6142 */ 6143 int 6144 xfs_bmap_finish_one( 6145 struct xfs_trans *tp, 6146 struct xfs_bmap_intent *bi) 6147 { 6148 struct xfs_bmbt_irec *bmap = &bi->bi_bmap; 6149 int error = 0; 6150 6151 ASSERT(tp->t_highest_agno == NULLAGNUMBER); 6152 6153 trace_xfs_bmap_deferred(tp->t_mountp, 6154 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock), 6155 bi->bi_type, 6156 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock), 6157 bi->bi_owner->i_ino, bi->bi_whichfork, 6158 bmap->br_startoff, bmap->br_blockcount, 6159 bmap->br_state); 6160 6161 if (WARN_ON_ONCE(bi->bi_whichfork != XFS_DATA_FORK)) 6162 return -EFSCORRUPTED; 6163 6164 if (XFS_TEST_ERROR(false, tp->t_mountp, 6165 XFS_ERRTAG_BMAP_FINISH_ONE)) 6166 return -EIO; 6167 6168 switch (bi->bi_type) { 6169 case XFS_BMAP_MAP: 6170 error = xfs_bmapi_remap(tp, bi->bi_owner, bmap->br_startoff, 6171 bmap->br_blockcount, bmap->br_startblock, 0); 6172 bmap->br_blockcount = 0; 6173 break; 6174 case XFS_BMAP_UNMAP: 6175 error = __xfs_bunmapi(tp, bi->bi_owner, bmap->br_startoff, 6176 &bmap->br_blockcount, XFS_BMAPI_REMAP, 1); 6177 break; 6178 default: 6179 ASSERT(0); 6180 error = -EFSCORRUPTED; 6181 } 6182 6183 return error; 6184 } 6185 6186 /* Check that an inode's extent does not have invalid flags or bad ranges. */ 6187 xfs_failaddr_t 6188 xfs_bmap_validate_extent( 6189 struct xfs_inode *ip, 6190 int whichfork, 6191 struct xfs_bmbt_irec *irec) 6192 { 6193 struct xfs_mount *mp = ip->i_mount; 6194 6195 if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount)) 6196 return __this_address; 6197 6198 if (XFS_IS_REALTIME_INODE(ip) && whichfork == XFS_DATA_FORK) { 6199 if (!xfs_verify_rtext(mp, irec->br_startblock, 6200 irec->br_blockcount)) 6201 return __this_address; 6202 } else { 6203 if (!xfs_verify_fsbext(mp, irec->br_startblock, 6204 irec->br_blockcount)) 6205 return __this_address; 6206 } 6207 if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK) 6208 return __this_address; 6209 return NULL; 6210 } 6211 6212 int __init 6213 xfs_bmap_intent_init_cache(void) 6214 { 6215 xfs_bmap_intent_cache = kmem_cache_create("xfs_bmap_intent", 6216 sizeof(struct xfs_bmap_intent), 6217 0, 0, NULL); 6218 6219 return xfs_bmap_intent_cache != NULL ? 0 : -ENOMEM; 6220 } 6221 6222 void 6223 xfs_bmap_intent_destroy_cache(void) 6224 { 6225 kmem_cache_destroy(xfs_bmap_intent_cache); 6226 xfs_bmap_intent_cache = NULL; 6227 } 6228