1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_bit.h" 21 #include "xfs_log.h" 22 #include "xfs_inum.h" 23 #include "xfs_trans.h" 24 #include "xfs_sb.h" 25 #include "xfs_ag.h" 26 #include "xfs_alloc.h" 27 #include "xfs_quota.h" 28 #include "xfs_mount.h" 29 #include "xfs_bmap_btree.h" 30 #include "xfs_alloc_btree.h" 31 #include "xfs_ialloc_btree.h" 32 #include "xfs_dinode.h" 33 #include "xfs_inode.h" 34 #include "xfs_btree.h" 35 #include "xfs_bmap.h" 36 #include "xfs_rtalloc.h" 37 #include "xfs_error.h" 38 #include "xfs_itable.h" 39 #include "xfs_rw.h" 40 #include "xfs_attr.h" 41 #include "xfs_buf_item.h" 42 #include "xfs_trans_space.h" 43 #include "xfs_utils.h" 44 #include "xfs_iomap.h" 45 #include "xfs_trace.h" 46 47 48 #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ 49 << mp->m_writeio_log) 50 #define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP 51 52 STATIC int 53 xfs_iomap_eof_align_last_fsb( 54 xfs_mount_t *mp, 55 xfs_inode_t *ip, 56 xfs_extlen_t extsize, 57 xfs_fileoff_t *last_fsb) 58 { 59 xfs_fileoff_t new_last_fsb = 0; 60 xfs_extlen_t align; 61 int eof, error; 62 63 if (XFS_IS_REALTIME_INODE(ip)) 64 ; 65 /* 66 * If mounted with the "-o swalloc" option, roundup the allocation 67 * request to a stripe width boundary if the file size is >= 68 * stripe width and we are allocating past the allocation eof. 69 */ 70 else if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC) && 71 (ip->i_size >= XFS_FSB_TO_B(mp, mp->m_swidth))) 72 new_last_fsb = roundup_64(*last_fsb, mp->m_swidth); 73 /* 74 * Roundup the allocation request to a stripe unit (m_dalign) boundary 75 * if the file size is >= stripe unit size, and we are allocating past 76 * the allocation eof. 77 */ 78 else if (mp->m_dalign && (ip->i_size >= XFS_FSB_TO_B(mp, mp->m_dalign))) 79 new_last_fsb = roundup_64(*last_fsb, mp->m_dalign); 80 81 /* 82 * Always round up the allocation request to an extent boundary 83 * (when file on a real-time subvolume or has di_extsize hint). 84 */ 85 if (extsize) { 86 if (new_last_fsb) 87 align = roundup_64(new_last_fsb, extsize); 88 else 89 align = extsize; 90 new_last_fsb = roundup_64(*last_fsb, align); 91 } 92 93 if (new_last_fsb) { 94 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); 95 if (error) 96 return error; 97 if (eof) 98 *last_fsb = new_last_fsb; 99 } 100 return 0; 101 } 102 103 STATIC int 104 xfs_cmn_err_fsblock_zero( 105 xfs_inode_t *ip, 106 xfs_bmbt_irec_t *imap) 107 { 108 xfs_cmn_err(XFS_PTAG_FSBLOCK_ZERO, CE_ALERT, ip->i_mount, 109 "Access to block zero in inode %llu " 110 "start_block: %llx start_off: %llx " 111 "blkcnt: %llx extent-state: %x\n", 112 (unsigned long long)ip->i_ino, 113 (unsigned long long)imap->br_startblock, 114 (unsigned long long)imap->br_startoff, 115 (unsigned long long)imap->br_blockcount, 116 imap->br_state); 117 return EFSCORRUPTED; 118 } 119 120 int 121 xfs_iomap_write_direct( 122 xfs_inode_t *ip, 123 xfs_off_t offset, 124 size_t count, 125 xfs_bmbt_irec_t *imap, 126 int nmaps) 127 { 128 xfs_mount_t *mp = ip->i_mount; 129 xfs_fileoff_t offset_fsb; 130 xfs_fileoff_t last_fsb; 131 xfs_filblks_t count_fsb, resaligned; 132 xfs_fsblock_t firstfsb; 133 xfs_extlen_t extsz, temp; 134 int nimaps; 135 int bmapi_flag; 136 int quota_flag; 137 int rt; 138 xfs_trans_t *tp; 139 xfs_bmap_free_t free_list; 140 uint qblocks, resblks, resrtextents; 141 int committed; 142 int error; 143 144 /* 145 * Make sure that the dquots are there. This doesn't hold 146 * the ilock across a disk read. 147 */ 148 error = xfs_qm_dqattach_locked(ip, 0); 149 if (error) 150 return XFS_ERROR(error); 151 152 rt = XFS_IS_REALTIME_INODE(ip); 153 extsz = xfs_get_extsz_hint(ip); 154 155 offset_fsb = XFS_B_TO_FSBT(mp, offset); 156 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 157 if ((offset + count) > ip->i_size) { 158 error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); 159 if (error) 160 goto error_out; 161 } else { 162 if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) 163 last_fsb = MIN(last_fsb, (xfs_fileoff_t) 164 imap->br_blockcount + 165 imap->br_startoff); 166 } 167 count_fsb = last_fsb - offset_fsb; 168 ASSERT(count_fsb > 0); 169 170 resaligned = count_fsb; 171 if (unlikely(extsz)) { 172 if ((temp = do_mod(offset_fsb, extsz))) 173 resaligned += temp; 174 if ((temp = do_mod(resaligned, extsz))) 175 resaligned += extsz - temp; 176 } 177 178 if (unlikely(rt)) { 179 resrtextents = qblocks = resaligned; 180 resrtextents /= mp->m_sb.sb_rextsize; 181 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 182 quota_flag = XFS_QMOPT_RES_RTBLKS; 183 } else { 184 resrtextents = 0; 185 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); 186 quota_flag = XFS_QMOPT_RES_REGBLKS; 187 } 188 189 /* 190 * Allocate and setup the transaction 191 */ 192 xfs_iunlock(ip, XFS_ILOCK_EXCL); 193 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); 194 error = xfs_trans_reserve(tp, resblks, 195 XFS_WRITE_LOG_RES(mp), resrtextents, 196 XFS_TRANS_PERM_LOG_RES, 197 XFS_WRITE_LOG_COUNT); 198 /* 199 * Check for running out of space, note: need lock to return 200 */ 201 if (error) 202 xfs_trans_cancel(tp, 0); 203 xfs_ilock(ip, XFS_ILOCK_EXCL); 204 if (error) 205 goto error_out; 206 207 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); 208 if (error) 209 goto error1; 210 211 xfs_trans_ijoin(tp, ip); 212 213 bmapi_flag = XFS_BMAPI_WRITE; 214 if (offset < ip->i_size || extsz) 215 bmapi_flag |= XFS_BMAPI_PREALLOC; 216 217 /* 218 * Issue the xfs_bmapi() call to allocate the blocks. 219 * 220 * From this point onwards we overwrite the imap pointer that the 221 * caller gave to us. 222 */ 223 xfs_bmap_init(&free_list, &firstfsb); 224 nimaps = 1; 225 error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, bmapi_flag, 226 &firstfsb, 0, imap, &nimaps, &free_list); 227 if (error) 228 goto error0; 229 230 /* 231 * Complete the transaction 232 */ 233 error = xfs_bmap_finish(&tp, &free_list, &committed); 234 if (error) 235 goto error0; 236 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 237 if (error) 238 goto error_out; 239 240 /* 241 * Copy any maps to caller's array and return any error. 242 */ 243 if (nimaps == 0) { 244 error = ENOSPC; 245 goto error_out; 246 } 247 248 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) { 249 error = xfs_cmn_err_fsblock_zero(ip, imap); 250 goto error_out; 251 } 252 253 return 0; 254 255 error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */ 256 xfs_bmap_cancel(&free_list); 257 xfs_trans_unreserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); 258 259 error1: /* Just cancel transaction */ 260 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 261 262 error_out: 263 return XFS_ERROR(error); 264 } 265 266 /* 267 * If the caller is doing a write at the end of the file, then extend the 268 * allocation out to the file system's write iosize. We clean up any extra 269 * space left over when the file is closed in xfs_inactive(). 270 * 271 * If we find we already have delalloc preallocation beyond EOF, don't do more 272 * preallocation as it it not needed. 273 */ 274 STATIC int 275 xfs_iomap_eof_want_preallocate( 276 xfs_mount_t *mp, 277 xfs_inode_t *ip, 278 xfs_off_t offset, 279 size_t count, 280 xfs_bmbt_irec_t *imap, 281 int nimaps, 282 int *prealloc) 283 { 284 xfs_fileoff_t start_fsb; 285 xfs_filblks_t count_fsb; 286 xfs_fsblock_t firstblock; 287 int n, error, imaps; 288 int found_delalloc = 0; 289 290 *prealloc = 0; 291 if ((offset + count) <= ip->i_size) 292 return 0; 293 294 /* 295 * If there are any real blocks past eof, then don't 296 * do any speculative allocation. 297 */ 298 start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1))); 299 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); 300 while (count_fsb > 0) { 301 imaps = nimaps; 302 firstblock = NULLFSBLOCK; 303 error = xfs_bmapi(NULL, ip, start_fsb, count_fsb, 0, 304 &firstblock, 0, imap, &imaps, NULL); 305 if (error) 306 return error; 307 for (n = 0; n < imaps; n++) { 308 if ((imap[n].br_startblock != HOLESTARTBLOCK) && 309 (imap[n].br_startblock != DELAYSTARTBLOCK)) 310 return 0; 311 start_fsb += imap[n].br_blockcount; 312 count_fsb -= imap[n].br_blockcount; 313 314 if (imap[n].br_startblock == DELAYSTARTBLOCK) 315 found_delalloc = 1; 316 } 317 } 318 if (!found_delalloc) 319 *prealloc = 1; 320 return 0; 321 } 322 323 /* 324 * If we don't have a user specified preallocation size, dynamically increase 325 * the preallocation size as the size of the file grows. Cap the maximum size 326 * at a single extent or less if the filesystem is near full. The closer the 327 * filesystem is to full, the smaller the maximum prealocation. 328 */ 329 STATIC xfs_fsblock_t 330 xfs_iomap_prealloc_size( 331 struct xfs_mount *mp, 332 struct xfs_inode *ip) 333 { 334 xfs_fsblock_t alloc_blocks = 0; 335 336 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) { 337 int shift = 0; 338 int64_t freesp; 339 340 /* 341 * rounddown_pow_of_two() returns an undefined result 342 * if we pass in alloc_blocks = 0. Hence the "+ 1" to 343 * ensure we always pass in a non-zero value. 344 */ 345 alloc_blocks = XFS_B_TO_FSB(mp, ip->i_size) + 1; 346 alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN, 347 rounddown_pow_of_two(alloc_blocks)); 348 349 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); 350 freesp = mp->m_sb.sb_fdblocks; 351 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) { 352 shift = 2; 353 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT]) 354 shift++; 355 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT]) 356 shift++; 357 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT]) 358 shift++; 359 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT]) 360 shift++; 361 } 362 if (shift) 363 alloc_blocks >>= shift; 364 } 365 366 if (alloc_blocks < mp->m_writeio_blocks) 367 alloc_blocks = mp->m_writeio_blocks; 368 369 return alloc_blocks; 370 } 371 372 int 373 xfs_iomap_write_delay( 374 xfs_inode_t *ip, 375 xfs_off_t offset, 376 size_t count, 377 xfs_bmbt_irec_t *ret_imap) 378 { 379 xfs_mount_t *mp = ip->i_mount; 380 xfs_fileoff_t offset_fsb; 381 xfs_fileoff_t last_fsb; 382 xfs_off_t aligned_offset; 383 xfs_fileoff_t ioalign; 384 xfs_fsblock_t firstblock; 385 xfs_extlen_t extsz; 386 int nimaps; 387 xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS]; 388 int prealloc, flushed = 0; 389 int error; 390 391 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 392 393 /* 394 * Make sure that the dquots are there. This doesn't hold 395 * the ilock across a disk read. 396 */ 397 error = xfs_qm_dqattach_locked(ip, 0); 398 if (error) 399 return XFS_ERROR(error); 400 401 extsz = xfs_get_extsz_hint(ip); 402 offset_fsb = XFS_B_TO_FSBT(mp, offset); 403 404 405 error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count, 406 imap, XFS_WRITE_IMAPS, &prealloc); 407 if (error) 408 return error; 409 410 retry: 411 if (prealloc) { 412 xfs_fsblock_t alloc_blocks = xfs_iomap_prealloc_size(mp, ip); 413 414 aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); 415 ioalign = XFS_B_TO_FSBT(mp, aligned_offset); 416 last_fsb = ioalign + alloc_blocks; 417 } else { 418 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 419 } 420 421 if (prealloc || extsz) { 422 error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); 423 if (error) 424 return error; 425 } 426 427 nimaps = XFS_WRITE_IMAPS; 428 firstblock = NULLFSBLOCK; 429 error = xfs_bmapi(NULL, ip, offset_fsb, 430 (xfs_filblks_t)(last_fsb - offset_fsb), 431 XFS_BMAPI_DELAY | XFS_BMAPI_WRITE | 432 XFS_BMAPI_ENTIRE, &firstblock, 1, imap, 433 &nimaps, NULL); 434 switch (error) { 435 case 0: 436 case ENOSPC: 437 case EDQUOT: 438 break; 439 default: 440 return XFS_ERROR(error); 441 } 442 443 /* 444 * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. For 445 * ENOSPC, * flush all other inodes with delalloc blocks to free up 446 * some of the excess reserved metadata space. For both cases, retry 447 * without EOF preallocation. 448 */ 449 if (nimaps == 0) { 450 trace_xfs_delalloc_enospc(ip, offset, count); 451 if (flushed) 452 return XFS_ERROR(error ? error : ENOSPC); 453 454 if (error == ENOSPC) { 455 xfs_iunlock(ip, XFS_ILOCK_EXCL); 456 xfs_flush_inodes(ip); 457 xfs_ilock(ip, XFS_ILOCK_EXCL); 458 } 459 460 flushed = 1; 461 error = 0; 462 prealloc = 0; 463 goto retry; 464 } 465 466 if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip))) 467 return xfs_cmn_err_fsblock_zero(ip, &imap[0]); 468 469 *ret_imap = imap[0]; 470 return 0; 471 } 472 473 /* 474 * Pass in a delayed allocate extent, convert it to real extents; 475 * return to the caller the extent we create which maps on top of 476 * the originating callers request. 477 * 478 * Called without a lock on the inode. 479 * 480 * We no longer bother to look at the incoming map - all we have to 481 * guarantee is that whatever we allocate fills the required range. 482 */ 483 int 484 xfs_iomap_write_allocate( 485 xfs_inode_t *ip, 486 xfs_off_t offset, 487 size_t count, 488 xfs_bmbt_irec_t *imap) 489 { 490 xfs_mount_t *mp = ip->i_mount; 491 xfs_fileoff_t offset_fsb, last_block; 492 xfs_fileoff_t end_fsb, map_start_fsb; 493 xfs_fsblock_t first_block; 494 xfs_bmap_free_t free_list; 495 xfs_filblks_t count_fsb; 496 xfs_trans_t *tp; 497 int nimaps, committed; 498 int error = 0; 499 int nres; 500 501 /* 502 * Make sure that the dquots are there. 503 */ 504 error = xfs_qm_dqattach(ip, 0); 505 if (error) 506 return XFS_ERROR(error); 507 508 offset_fsb = XFS_B_TO_FSBT(mp, offset); 509 count_fsb = imap->br_blockcount; 510 map_start_fsb = imap->br_startoff; 511 512 XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); 513 514 while (count_fsb != 0) { 515 /* 516 * Set up a transaction with which to allocate the 517 * backing store for the file. Do allocations in a 518 * loop until we get some space in the range we are 519 * interested in. The other space that might be allocated 520 * is in the delayed allocation extent on which we sit 521 * but before our buffer starts. 522 */ 523 524 nimaps = 0; 525 while (nimaps == 0) { 526 tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); 527 tp->t_flags |= XFS_TRANS_RESERVE; 528 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); 529 error = xfs_trans_reserve(tp, nres, 530 XFS_WRITE_LOG_RES(mp), 531 0, XFS_TRANS_PERM_LOG_RES, 532 XFS_WRITE_LOG_COUNT); 533 if (error) { 534 xfs_trans_cancel(tp, 0); 535 return XFS_ERROR(error); 536 } 537 xfs_ilock(ip, XFS_ILOCK_EXCL); 538 xfs_trans_ijoin(tp, ip); 539 540 xfs_bmap_init(&free_list, &first_block); 541 542 /* 543 * it is possible that the extents have changed since 544 * we did the read call as we dropped the ilock for a 545 * while. We have to be careful about truncates or hole 546 * punchs here - we are not allowed to allocate 547 * non-delalloc blocks here. 548 * 549 * The only protection against truncation is the pages 550 * for the range we are being asked to convert are 551 * locked and hence a truncate will block on them 552 * first. 553 * 554 * As a result, if we go beyond the range we really 555 * need and hit an delalloc extent boundary followed by 556 * a hole while we have excess blocks in the map, we 557 * will fill the hole incorrectly and overrun the 558 * transaction reservation. 559 * 560 * Using a single map prevents this as we are forced to 561 * check each map we look for overlap with the desired 562 * range and abort as soon as we find it. Also, given 563 * that we only return a single map, having one beyond 564 * what we can return is probably a bit silly. 565 * 566 * We also need to check that we don't go beyond EOF; 567 * this is a truncate optimisation as a truncate sets 568 * the new file size before block on the pages we 569 * currently have locked under writeback. Because they 570 * are about to be tossed, we don't need to write them 571 * back.... 572 */ 573 nimaps = 1; 574 end_fsb = XFS_B_TO_FSB(mp, ip->i_size); 575 error = xfs_bmap_last_offset(NULL, ip, &last_block, 576 XFS_DATA_FORK); 577 if (error) 578 goto trans_cancel; 579 580 last_block = XFS_FILEOFF_MAX(last_block, end_fsb); 581 if ((map_start_fsb + count_fsb) > last_block) { 582 count_fsb = last_block - map_start_fsb; 583 if (count_fsb == 0) { 584 error = EAGAIN; 585 goto trans_cancel; 586 } 587 } 588 589 /* 590 * Go get the actual blocks. 591 * 592 * From this point onwards we overwrite the imap 593 * pointer that the caller gave to us. 594 */ 595 error = xfs_bmapi(tp, ip, map_start_fsb, count_fsb, 596 XFS_BMAPI_WRITE, &first_block, 1, 597 imap, &nimaps, &free_list); 598 if (error) 599 goto trans_cancel; 600 601 error = xfs_bmap_finish(&tp, &free_list, &committed); 602 if (error) 603 goto trans_cancel; 604 605 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 606 if (error) 607 goto error0; 608 609 xfs_iunlock(ip, XFS_ILOCK_EXCL); 610 } 611 612 /* 613 * See if we were able to allocate an extent that 614 * covers at least part of the callers request 615 */ 616 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) 617 return xfs_cmn_err_fsblock_zero(ip, imap); 618 619 if ((offset_fsb >= imap->br_startoff) && 620 (offset_fsb < (imap->br_startoff + 621 imap->br_blockcount))) { 622 XFS_STATS_INC(xs_xstrat_quick); 623 return 0; 624 } 625 626 /* 627 * So far we have not mapped the requested part of the 628 * file, just surrounding data, try again. 629 */ 630 count_fsb -= imap->br_blockcount; 631 map_start_fsb = imap->br_startoff + imap->br_blockcount; 632 } 633 634 trans_cancel: 635 xfs_bmap_cancel(&free_list); 636 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 637 error0: 638 xfs_iunlock(ip, XFS_ILOCK_EXCL); 639 return XFS_ERROR(error); 640 } 641 642 int 643 xfs_iomap_write_unwritten( 644 xfs_inode_t *ip, 645 xfs_off_t offset, 646 size_t count) 647 { 648 xfs_mount_t *mp = ip->i_mount; 649 xfs_fileoff_t offset_fsb; 650 xfs_filblks_t count_fsb; 651 xfs_filblks_t numblks_fsb; 652 xfs_fsblock_t firstfsb; 653 int nimaps; 654 xfs_trans_t *tp; 655 xfs_bmbt_irec_t imap; 656 xfs_bmap_free_t free_list; 657 uint resblks; 658 int committed; 659 int error; 660 661 trace_xfs_unwritten_convert(ip, offset, count); 662 663 offset_fsb = XFS_B_TO_FSBT(mp, offset); 664 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 665 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); 666 667 /* 668 * Reserve enough blocks in this transaction for two complete extent 669 * btree splits. We may be converting the middle part of an unwritten 670 * extent and in this case we will insert two new extents in the btree 671 * each of which could cause a full split. 672 * 673 * This reservation amount will be used in the first call to 674 * xfs_bmbt_split() to select an AG with enough space to satisfy the 675 * rest of the operation. 676 */ 677 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; 678 679 do { 680 /* 681 * set up a transaction to convert the range of extents 682 * from unwritten to real. Do allocations in a loop until 683 * we have covered the range passed in. 684 * 685 * Note that we open code the transaction allocation here 686 * to pass KM_NOFS--we can't risk to recursing back into 687 * the filesystem here as we might be asked to write out 688 * the same inode that we complete here and might deadlock 689 * on the iolock. 690 */ 691 xfs_wait_for_freeze(mp, SB_FREEZE_TRANS); 692 tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS); 693 tp->t_flags |= XFS_TRANS_RESERVE; 694 error = xfs_trans_reserve(tp, resblks, 695 XFS_WRITE_LOG_RES(mp), 0, 696 XFS_TRANS_PERM_LOG_RES, 697 XFS_WRITE_LOG_COUNT); 698 if (error) { 699 xfs_trans_cancel(tp, 0); 700 return XFS_ERROR(error); 701 } 702 703 xfs_ilock(ip, XFS_ILOCK_EXCL); 704 xfs_trans_ijoin(tp, ip); 705 706 /* 707 * Modify the unwritten extent state of the buffer. 708 */ 709 xfs_bmap_init(&free_list, &firstfsb); 710 nimaps = 1; 711 error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, 712 XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb, 713 1, &imap, &nimaps, &free_list); 714 if (error) 715 goto error_on_bmapi_transaction; 716 717 error = xfs_bmap_finish(&(tp), &(free_list), &committed); 718 if (error) 719 goto error_on_bmapi_transaction; 720 721 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 722 xfs_iunlock(ip, XFS_ILOCK_EXCL); 723 if (error) 724 return XFS_ERROR(error); 725 726 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) 727 return xfs_cmn_err_fsblock_zero(ip, &imap); 728 729 if ((numblks_fsb = imap.br_blockcount) == 0) { 730 /* 731 * The numblks_fsb value should always get 732 * smaller, otherwise the loop is stuck. 733 */ 734 ASSERT(imap.br_blockcount); 735 break; 736 } 737 offset_fsb += numblks_fsb; 738 count_fsb -= numblks_fsb; 739 } while (count_fsb > 0); 740 741 return 0; 742 743 error_on_bmapi_transaction: 744 xfs_bmap_cancel(&free_list); 745 xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT)); 746 xfs_iunlock(ip, XFS_ILOCK_EXCL); 747 return XFS_ERROR(error); 748 } 749