1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_log.h" 21 #include "xfs_trans.h" 22 #include "xfs_sb.h" 23 #include "xfs_ag.h" 24 #include "xfs_alloc.h" 25 #include "xfs_quota.h" 26 #include "xfs_mount.h" 27 #include "xfs_bmap_btree.h" 28 #include "xfs_alloc_btree.h" 29 #include "xfs_ialloc_btree.h" 30 #include "xfs_dinode.h" 31 #include "xfs_inode.h" 32 #include "xfs_inode_item.h" 33 #include "xfs_btree.h" 34 #include "xfs_bmap.h" 35 #include "xfs_rtalloc.h" 36 #include "xfs_error.h" 37 #include "xfs_itable.h" 38 #include "xfs_attr.h" 39 #include "xfs_buf_item.h" 40 #include "xfs_trans_space.h" 41 #include "xfs_utils.h" 42 #include "xfs_iomap.h" 43 #include "xfs_trace.h" 44 #include "xfs_icache.h" 45 46 47 #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ 48 << mp->m_writeio_log) 49 #define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP 50 51 STATIC int 52 xfs_iomap_eof_align_last_fsb( 53 xfs_mount_t *mp, 54 xfs_inode_t *ip, 55 xfs_extlen_t extsize, 56 xfs_fileoff_t *last_fsb) 57 { 58 xfs_fileoff_t new_last_fsb = 0; 59 xfs_extlen_t align = 0; 60 int eof, error; 61 62 if (!XFS_IS_REALTIME_INODE(ip)) { 63 /* 64 * Round up the allocation request to a stripe unit 65 * (m_dalign) boundary if the file size is >= stripe unit 66 * size, and we are allocating past the allocation eof. 67 * 68 * If mounted with the "-o swalloc" option the alignment is 69 * increased from the strip unit size to the stripe width. 70 */ 71 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) 72 align = mp->m_swidth; 73 else if (mp->m_dalign) 74 align = mp->m_dalign; 75 76 if (align && XFS_ISIZE(ip) >= XFS_FSB_TO_B(mp, align)) 77 new_last_fsb = roundup_64(*last_fsb, align); 78 } 79 80 /* 81 * Always round up the allocation request to an extent boundary 82 * (when file on a real-time subvolume or has di_extsize hint). 83 */ 84 if (extsize) { 85 if (new_last_fsb) 86 align = roundup_64(new_last_fsb, extsize); 87 else 88 align = extsize; 89 new_last_fsb = roundup_64(*last_fsb, align); 90 } 91 92 if (new_last_fsb) { 93 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); 94 if (error) 95 return error; 96 if (eof) 97 *last_fsb = new_last_fsb; 98 } 99 return 0; 100 } 101 102 STATIC int 103 xfs_alert_fsblock_zero( 104 xfs_inode_t *ip, 105 xfs_bmbt_irec_t *imap) 106 { 107 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, 108 "Access to block zero in inode %llu " 109 "start_block: %llx start_off: %llx " 110 "blkcnt: %llx extent-state: %x\n", 111 (unsigned long long)ip->i_ino, 112 (unsigned long long)imap->br_startblock, 113 (unsigned long long)imap->br_startoff, 114 (unsigned long long)imap->br_blockcount, 115 imap->br_state); 116 return EFSCORRUPTED; 117 } 118 119 int 120 xfs_iomap_write_direct( 121 xfs_inode_t *ip, 122 xfs_off_t offset, 123 size_t count, 124 xfs_bmbt_irec_t *imap, 125 int nmaps) 126 { 127 xfs_mount_t *mp = ip->i_mount; 128 xfs_fileoff_t offset_fsb; 129 xfs_fileoff_t last_fsb; 130 xfs_filblks_t count_fsb, resaligned; 131 xfs_fsblock_t firstfsb; 132 xfs_extlen_t extsz, temp; 133 int nimaps; 134 int bmapi_flag; 135 int quota_flag; 136 int rt; 137 xfs_trans_t *tp; 138 xfs_bmap_free_t free_list; 139 uint qblocks, resblks, resrtextents; 140 int committed; 141 int error; 142 143 error = xfs_qm_dqattach(ip, 0); 144 if (error) 145 return XFS_ERROR(error); 146 147 rt = XFS_IS_REALTIME_INODE(ip); 148 extsz = xfs_get_extsz_hint(ip); 149 150 offset_fsb = XFS_B_TO_FSBT(mp, offset); 151 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 152 if ((offset + count) > XFS_ISIZE(ip)) { 153 error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); 154 if (error) 155 return XFS_ERROR(error); 156 } else { 157 if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) 158 last_fsb = MIN(last_fsb, (xfs_fileoff_t) 159 imap->br_blockcount + 160 imap->br_startoff); 161 } 162 count_fsb = last_fsb - offset_fsb; 163 ASSERT(count_fsb > 0); 164 165 resaligned = count_fsb; 166 if (unlikely(extsz)) { 167 if ((temp = do_mod(offset_fsb, extsz))) 168 resaligned += temp; 169 if ((temp = do_mod(resaligned, extsz))) 170 resaligned += extsz - temp; 171 } 172 173 if (unlikely(rt)) { 174 resrtextents = qblocks = resaligned; 175 resrtextents /= mp->m_sb.sb_rextsize; 176 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 177 quota_flag = XFS_QMOPT_RES_RTBLKS; 178 } else { 179 resrtextents = 0; 180 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); 181 quota_flag = XFS_QMOPT_RES_REGBLKS; 182 } 183 184 /* 185 * Allocate and setup the transaction 186 */ 187 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); 188 error = xfs_trans_reserve(tp, resblks, 189 XFS_WRITE_LOG_RES(mp), resrtextents, 190 XFS_TRANS_PERM_LOG_RES, 191 XFS_WRITE_LOG_COUNT); 192 /* 193 * Check for running out of space, note: need lock to return 194 */ 195 if (error) { 196 xfs_trans_cancel(tp, 0); 197 return XFS_ERROR(error); 198 } 199 200 xfs_ilock(ip, XFS_ILOCK_EXCL); 201 202 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); 203 if (error) 204 goto out_trans_cancel; 205 206 xfs_trans_ijoin(tp, ip, 0); 207 208 bmapi_flag = 0; 209 if (offset < XFS_ISIZE(ip) || extsz) 210 bmapi_flag |= XFS_BMAPI_PREALLOC; 211 212 /* 213 * From this point onwards we overwrite the imap pointer that the 214 * caller gave to us. 215 */ 216 xfs_bmap_init(&free_list, &firstfsb); 217 nimaps = 1; 218 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flag, 219 &firstfsb, 0, imap, &nimaps, &free_list); 220 if (error) 221 goto out_bmap_cancel; 222 223 /* 224 * Complete the transaction 225 */ 226 error = xfs_bmap_finish(&tp, &free_list, &committed); 227 if (error) 228 goto out_bmap_cancel; 229 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 230 if (error) 231 goto out_unlock; 232 233 /* 234 * Copy any maps to caller's array and return any error. 235 */ 236 if (nimaps == 0) { 237 error = XFS_ERROR(ENOSPC); 238 goto out_unlock; 239 } 240 241 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) 242 error = xfs_alert_fsblock_zero(ip, imap); 243 244 out_unlock: 245 xfs_iunlock(ip, XFS_ILOCK_EXCL); 246 return error; 247 248 out_bmap_cancel: 249 xfs_bmap_cancel(&free_list); 250 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag); 251 out_trans_cancel: 252 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 253 goto out_unlock; 254 } 255 256 /* 257 * If the caller is doing a write at the end of the file, then extend the 258 * allocation out to the file system's write iosize. We clean up any extra 259 * space left over when the file is closed in xfs_inactive(). 260 * 261 * If we find we already have delalloc preallocation beyond EOF, don't do more 262 * preallocation as it it not needed. 263 */ 264 STATIC int 265 xfs_iomap_eof_want_preallocate( 266 xfs_mount_t *mp, 267 xfs_inode_t *ip, 268 xfs_off_t offset, 269 size_t count, 270 xfs_bmbt_irec_t *imap, 271 int nimaps, 272 int *prealloc) 273 { 274 xfs_fileoff_t start_fsb; 275 xfs_filblks_t count_fsb; 276 xfs_fsblock_t firstblock; 277 int n, error, imaps; 278 int found_delalloc = 0; 279 280 *prealloc = 0; 281 if (offset + count <= XFS_ISIZE(ip)) 282 return 0; 283 284 /* 285 * If there are any real blocks past eof, then don't 286 * do any speculative allocation. 287 */ 288 start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1))); 289 count_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 290 while (count_fsb > 0) { 291 imaps = nimaps; 292 firstblock = NULLFSBLOCK; 293 error = xfs_bmapi_read(ip, start_fsb, count_fsb, imap, &imaps, 294 0); 295 if (error) 296 return error; 297 for (n = 0; n < imaps; n++) { 298 if ((imap[n].br_startblock != HOLESTARTBLOCK) && 299 (imap[n].br_startblock != DELAYSTARTBLOCK)) 300 return 0; 301 start_fsb += imap[n].br_blockcount; 302 count_fsb -= imap[n].br_blockcount; 303 304 if (imap[n].br_startblock == DELAYSTARTBLOCK) 305 found_delalloc = 1; 306 } 307 } 308 if (!found_delalloc) 309 *prealloc = 1; 310 return 0; 311 } 312 313 /* 314 * Determine the initial size of the preallocation. We are beyond the current 315 * EOF here, but we need to take into account whether this is a sparse write or 316 * an extending write when determining the preallocation size. Hence we need to 317 * look up the extent that ends at the current write offset and use the result 318 * to determine the preallocation size. 319 * 320 * If the extent is a hole, then preallocation is essentially disabled. 321 * Otherwise we take the size of the preceeding data extent as the basis for the 322 * preallocation size. If the size of the extent is greater than half the 323 * maximum extent length, then use the current offset as the basis. This ensures 324 * that for large files the preallocation size always extends to MAXEXTLEN 325 * rather than falling short due to things like stripe unit/width alignment of 326 * real extents. 327 */ 328 STATIC int 329 xfs_iomap_eof_prealloc_initial_size( 330 struct xfs_mount *mp, 331 struct xfs_inode *ip, 332 xfs_off_t offset, 333 xfs_bmbt_irec_t *imap, 334 int nimaps) 335 { 336 xfs_fileoff_t start_fsb; 337 int imaps = 1; 338 int error; 339 340 ASSERT(nimaps >= imaps); 341 342 /* if we are using a specific prealloc size, return now */ 343 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) 344 return 0; 345 346 /* 347 * As we write multiple pages, the offset will always align to the 348 * start of a page and hence point to a hole at EOF. i.e. if the size is 349 * 4096 bytes, we only have one block at FSB 0, but XFS_B_TO_FSB(4096) 350 * will return FSB 1. Hence if there are blocks in the file, we want to 351 * point to the block prior to the EOF block and not the hole that maps 352 * directly at @offset. 353 */ 354 start_fsb = XFS_B_TO_FSB(mp, offset); 355 if (start_fsb) 356 start_fsb--; 357 error = xfs_bmapi_read(ip, start_fsb, 1, imap, &imaps, XFS_BMAPI_ENTIRE); 358 if (error) 359 return 0; 360 361 ASSERT(imaps == 1); 362 if (imap[0].br_startblock == HOLESTARTBLOCK) 363 return 0; 364 if (imap[0].br_blockcount <= (MAXEXTLEN >> 1)) 365 return imap[0].br_blockcount; 366 return XFS_B_TO_FSB(mp, offset); 367 } 368 369 /* 370 * If we don't have a user specified preallocation size, dynamically increase 371 * the preallocation size as the size of the file grows. Cap the maximum size 372 * at a single extent or less if the filesystem is near full. The closer the 373 * filesystem is to full, the smaller the maximum prealocation. 374 */ 375 STATIC xfs_fsblock_t 376 xfs_iomap_prealloc_size( 377 struct xfs_mount *mp, 378 struct xfs_inode *ip, 379 xfs_off_t offset, 380 struct xfs_bmbt_irec *imap, 381 int nimaps) 382 { 383 xfs_fsblock_t alloc_blocks = 0; 384 385 alloc_blocks = xfs_iomap_eof_prealloc_initial_size(mp, ip, offset, 386 imap, nimaps); 387 if (alloc_blocks > 0) { 388 int shift = 0; 389 int64_t freesp; 390 391 alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN, 392 rounddown_pow_of_two(alloc_blocks)); 393 394 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); 395 freesp = mp->m_sb.sb_fdblocks; 396 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) { 397 shift = 2; 398 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT]) 399 shift++; 400 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT]) 401 shift++; 402 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT]) 403 shift++; 404 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT]) 405 shift++; 406 } 407 if (shift) 408 alloc_blocks >>= shift; 409 410 /* 411 * If we are still trying to allocate more space than is 412 * available, squash the prealloc hard. This can happen if we 413 * have a large file on a small filesystem and the above 414 * lowspace thresholds are smaller than MAXEXTLEN. 415 */ 416 while (alloc_blocks >= freesp) 417 alloc_blocks >>= 4; 418 } 419 420 if (alloc_blocks < mp->m_writeio_blocks) 421 alloc_blocks = mp->m_writeio_blocks; 422 423 return alloc_blocks; 424 } 425 426 int 427 xfs_iomap_write_delay( 428 xfs_inode_t *ip, 429 xfs_off_t offset, 430 size_t count, 431 xfs_bmbt_irec_t *ret_imap) 432 { 433 xfs_mount_t *mp = ip->i_mount; 434 xfs_fileoff_t offset_fsb; 435 xfs_fileoff_t last_fsb; 436 xfs_off_t aligned_offset; 437 xfs_fileoff_t ioalign; 438 xfs_extlen_t extsz; 439 int nimaps; 440 xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS]; 441 int prealloc; 442 int error; 443 444 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 445 446 /* 447 * Make sure that the dquots are there. This doesn't hold 448 * the ilock across a disk read. 449 */ 450 error = xfs_qm_dqattach_locked(ip, 0); 451 if (error) 452 return XFS_ERROR(error); 453 454 extsz = xfs_get_extsz_hint(ip); 455 offset_fsb = XFS_B_TO_FSBT(mp, offset); 456 457 error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count, 458 imap, XFS_WRITE_IMAPS, &prealloc); 459 if (error) 460 return error; 461 462 retry: 463 if (prealloc) { 464 xfs_fsblock_t alloc_blocks; 465 466 alloc_blocks = xfs_iomap_prealloc_size(mp, ip, offset, imap, 467 XFS_WRITE_IMAPS); 468 469 aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); 470 ioalign = XFS_B_TO_FSBT(mp, aligned_offset); 471 last_fsb = ioalign + alloc_blocks; 472 } else { 473 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 474 } 475 476 if (prealloc || extsz) { 477 error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); 478 if (error) 479 return error; 480 } 481 482 /* 483 * Make sure preallocation does not create extents beyond the range we 484 * actually support in this filesystem. 485 */ 486 if (last_fsb > XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes)) 487 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 488 489 ASSERT(last_fsb > offset_fsb); 490 491 nimaps = XFS_WRITE_IMAPS; 492 error = xfs_bmapi_delay(ip, offset_fsb, last_fsb - offset_fsb, 493 imap, &nimaps, XFS_BMAPI_ENTIRE); 494 switch (error) { 495 case 0: 496 case ENOSPC: 497 case EDQUOT: 498 break; 499 default: 500 return XFS_ERROR(error); 501 } 502 503 /* 504 * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. Retry 505 * without EOF preallocation. 506 */ 507 if (nimaps == 0) { 508 trace_xfs_delalloc_enospc(ip, offset, count); 509 if (prealloc) { 510 prealloc = 0; 511 error = 0; 512 goto retry; 513 } 514 return XFS_ERROR(error ? error : ENOSPC); 515 } 516 517 if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip))) 518 return xfs_alert_fsblock_zero(ip, &imap[0]); 519 520 /* 521 * Tag the inode as speculatively preallocated so we can reclaim this 522 * space on demand, if necessary. 523 */ 524 if (prealloc) 525 xfs_inode_set_eofblocks_tag(ip); 526 527 *ret_imap = imap[0]; 528 return 0; 529 } 530 531 /* 532 * Pass in a delayed allocate extent, convert it to real extents; 533 * return to the caller the extent we create which maps on top of 534 * the originating callers request. 535 * 536 * Called without a lock on the inode. 537 * 538 * We no longer bother to look at the incoming map - all we have to 539 * guarantee is that whatever we allocate fills the required range. 540 */ 541 int 542 xfs_iomap_write_allocate( 543 xfs_inode_t *ip, 544 xfs_off_t offset, 545 size_t count, 546 xfs_bmbt_irec_t *imap) 547 { 548 xfs_mount_t *mp = ip->i_mount; 549 xfs_fileoff_t offset_fsb, last_block; 550 xfs_fileoff_t end_fsb, map_start_fsb; 551 xfs_fsblock_t first_block; 552 xfs_bmap_free_t free_list; 553 xfs_filblks_t count_fsb; 554 xfs_trans_t *tp; 555 int nimaps, committed; 556 int error = 0; 557 int nres; 558 559 /* 560 * Make sure that the dquots are there. 561 */ 562 error = xfs_qm_dqattach(ip, 0); 563 if (error) 564 return XFS_ERROR(error); 565 566 offset_fsb = XFS_B_TO_FSBT(mp, offset); 567 count_fsb = imap->br_blockcount; 568 map_start_fsb = imap->br_startoff; 569 570 XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); 571 572 while (count_fsb != 0) { 573 /* 574 * Set up a transaction with which to allocate the 575 * backing store for the file. Do allocations in a 576 * loop until we get some space in the range we are 577 * interested in. The other space that might be allocated 578 * is in the delayed allocation extent on which we sit 579 * but before our buffer starts. 580 */ 581 582 nimaps = 0; 583 while (nimaps == 0) { 584 tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); 585 tp->t_flags |= XFS_TRANS_RESERVE; 586 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); 587 error = xfs_trans_reserve(tp, nres, 588 XFS_WRITE_LOG_RES(mp), 589 0, XFS_TRANS_PERM_LOG_RES, 590 XFS_WRITE_LOG_COUNT); 591 if (error) { 592 xfs_trans_cancel(tp, 0); 593 return XFS_ERROR(error); 594 } 595 xfs_ilock(ip, XFS_ILOCK_EXCL); 596 xfs_trans_ijoin(tp, ip, 0); 597 598 xfs_bmap_init(&free_list, &first_block); 599 600 /* 601 * it is possible that the extents have changed since 602 * we did the read call as we dropped the ilock for a 603 * while. We have to be careful about truncates or hole 604 * punchs here - we are not allowed to allocate 605 * non-delalloc blocks here. 606 * 607 * The only protection against truncation is the pages 608 * for the range we are being asked to convert are 609 * locked and hence a truncate will block on them 610 * first. 611 * 612 * As a result, if we go beyond the range we really 613 * need and hit an delalloc extent boundary followed by 614 * a hole while we have excess blocks in the map, we 615 * will fill the hole incorrectly and overrun the 616 * transaction reservation. 617 * 618 * Using a single map prevents this as we are forced to 619 * check each map we look for overlap with the desired 620 * range and abort as soon as we find it. Also, given 621 * that we only return a single map, having one beyond 622 * what we can return is probably a bit silly. 623 * 624 * We also need to check that we don't go beyond EOF; 625 * this is a truncate optimisation as a truncate sets 626 * the new file size before block on the pages we 627 * currently have locked under writeback. Because they 628 * are about to be tossed, we don't need to write them 629 * back.... 630 */ 631 nimaps = 1; 632 end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)); 633 error = xfs_bmap_last_offset(NULL, ip, &last_block, 634 XFS_DATA_FORK); 635 if (error) 636 goto trans_cancel; 637 638 last_block = XFS_FILEOFF_MAX(last_block, end_fsb); 639 if ((map_start_fsb + count_fsb) > last_block) { 640 count_fsb = last_block - map_start_fsb; 641 if (count_fsb == 0) { 642 error = EAGAIN; 643 goto trans_cancel; 644 } 645 } 646 647 /* 648 * From this point onwards we overwrite the imap 649 * pointer that the caller gave to us. 650 */ 651 error = xfs_bmapi_write(tp, ip, map_start_fsb, 652 count_fsb, 653 XFS_BMAPI_STACK_SWITCH, 654 &first_block, 1, 655 imap, &nimaps, &free_list); 656 if (error) 657 goto trans_cancel; 658 659 error = xfs_bmap_finish(&tp, &free_list, &committed); 660 if (error) 661 goto trans_cancel; 662 663 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 664 if (error) 665 goto error0; 666 667 xfs_iunlock(ip, XFS_ILOCK_EXCL); 668 } 669 670 /* 671 * See if we were able to allocate an extent that 672 * covers at least part of the callers request 673 */ 674 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) 675 return xfs_alert_fsblock_zero(ip, imap); 676 677 if ((offset_fsb >= imap->br_startoff) && 678 (offset_fsb < (imap->br_startoff + 679 imap->br_blockcount))) { 680 XFS_STATS_INC(xs_xstrat_quick); 681 return 0; 682 } 683 684 /* 685 * So far we have not mapped the requested part of the 686 * file, just surrounding data, try again. 687 */ 688 count_fsb -= imap->br_blockcount; 689 map_start_fsb = imap->br_startoff + imap->br_blockcount; 690 } 691 692 trans_cancel: 693 xfs_bmap_cancel(&free_list); 694 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 695 error0: 696 xfs_iunlock(ip, XFS_ILOCK_EXCL); 697 return XFS_ERROR(error); 698 } 699 700 int 701 xfs_iomap_write_unwritten( 702 xfs_inode_t *ip, 703 xfs_off_t offset, 704 size_t count) 705 { 706 xfs_mount_t *mp = ip->i_mount; 707 xfs_fileoff_t offset_fsb; 708 xfs_filblks_t count_fsb; 709 xfs_filblks_t numblks_fsb; 710 xfs_fsblock_t firstfsb; 711 int nimaps; 712 xfs_trans_t *tp; 713 xfs_bmbt_irec_t imap; 714 xfs_bmap_free_t free_list; 715 xfs_fsize_t i_size; 716 uint resblks; 717 int committed; 718 int error; 719 720 trace_xfs_unwritten_convert(ip, offset, count); 721 722 offset_fsb = XFS_B_TO_FSBT(mp, offset); 723 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 724 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); 725 726 /* 727 * Reserve enough blocks in this transaction for two complete extent 728 * btree splits. We may be converting the middle part of an unwritten 729 * extent and in this case we will insert two new extents in the btree 730 * each of which could cause a full split. 731 * 732 * This reservation amount will be used in the first call to 733 * xfs_bmbt_split() to select an AG with enough space to satisfy the 734 * rest of the operation. 735 */ 736 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; 737 738 do { 739 /* 740 * set up a transaction to convert the range of extents 741 * from unwritten to real. Do allocations in a loop until 742 * we have covered the range passed in. 743 * 744 * Note that we open code the transaction allocation here 745 * to pass KM_NOFS--we can't risk to recursing back into 746 * the filesystem here as we might be asked to write out 747 * the same inode that we complete here and might deadlock 748 * on the iolock. 749 */ 750 sb_start_intwrite(mp->m_super); 751 tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS); 752 tp->t_flags |= XFS_TRANS_RESERVE | XFS_TRANS_FREEZE_PROT; 753 error = xfs_trans_reserve(tp, resblks, 754 XFS_WRITE_LOG_RES(mp), 0, 755 XFS_TRANS_PERM_LOG_RES, 756 XFS_WRITE_LOG_COUNT); 757 if (error) { 758 xfs_trans_cancel(tp, 0); 759 return XFS_ERROR(error); 760 } 761 762 xfs_ilock(ip, XFS_ILOCK_EXCL); 763 xfs_trans_ijoin(tp, ip, 0); 764 765 /* 766 * Modify the unwritten extent state of the buffer. 767 */ 768 xfs_bmap_init(&free_list, &firstfsb); 769 nimaps = 1; 770 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, 771 XFS_BMAPI_CONVERT, &firstfsb, 772 1, &imap, &nimaps, &free_list); 773 if (error) 774 goto error_on_bmapi_transaction; 775 776 /* 777 * Log the updated inode size as we go. We have to be careful 778 * to only log it up to the actual write offset if it is 779 * halfway into a block. 780 */ 781 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb); 782 if (i_size > offset + count) 783 i_size = offset + count; 784 785 i_size = xfs_new_eof(ip, i_size); 786 if (i_size) { 787 ip->i_d.di_size = i_size; 788 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 789 } 790 791 error = xfs_bmap_finish(&tp, &free_list, &committed); 792 if (error) 793 goto error_on_bmapi_transaction; 794 795 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 796 xfs_iunlock(ip, XFS_ILOCK_EXCL); 797 if (error) 798 return XFS_ERROR(error); 799 800 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) 801 return xfs_alert_fsblock_zero(ip, &imap); 802 803 if ((numblks_fsb = imap.br_blockcount) == 0) { 804 /* 805 * The numblks_fsb value should always get 806 * smaller, otherwise the loop is stuck. 807 */ 808 ASSERT(imap.br_blockcount); 809 break; 810 } 811 offset_fsb += numblks_fsb; 812 count_fsb -= numblks_fsb; 813 } while (count_fsb > 0); 814 815 return 0; 816 817 error_on_bmapi_transaction: 818 xfs_bmap_cancel(&free_list); 819 xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT)); 820 xfs_iunlock(ip, XFS_ILOCK_EXCL); 821 return XFS_ERROR(error); 822 } 823