1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_sb.h" 25 #include "xfs_ag.h" 26 #include "xfs_mount.h" 27 #include "xfs_inode.h" 28 #include "xfs_btree.h" 29 #include "xfs_bmap_btree.h" 30 #include "xfs_bmap.h" 31 #include "xfs_bmap_util.h" 32 #include "xfs_error.h" 33 #include "xfs_trans.h" 34 #include "xfs_trans_space.h" 35 #include "xfs_iomap.h" 36 #include "xfs_trace.h" 37 #include "xfs_icache.h" 38 #include "xfs_quota.h" 39 #include "xfs_dquot_item.h" 40 #include "xfs_dquot.h" 41 #include "xfs_dinode.h" 42 43 44 #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ 45 << mp->m_writeio_log) 46 #define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP 47 48 STATIC int 49 xfs_iomap_eof_align_last_fsb( 50 xfs_mount_t *mp, 51 xfs_inode_t *ip, 52 xfs_extlen_t extsize, 53 xfs_fileoff_t *last_fsb) 54 { 55 xfs_fileoff_t new_last_fsb = 0; 56 xfs_extlen_t align = 0; 57 int eof, error; 58 59 if (!XFS_IS_REALTIME_INODE(ip)) { 60 /* 61 * Round up the allocation request to a stripe unit 62 * (m_dalign) boundary if the file size is >= stripe unit 63 * size, and we are allocating past the allocation eof. 64 * 65 * If mounted with the "-o swalloc" option the alignment is 66 * increased from the strip unit size to the stripe width. 67 */ 68 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) 69 align = mp->m_swidth; 70 else if (mp->m_dalign) 71 align = mp->m_dalign; 72 73 if (align && XFS_ISIZE(ip) >= XFS_FSB_TO_B(mp, align)) 74 new_last_fsb = roundup_64(*last_fsb, align); 75 } 76 77 /* 78 * Always round up the allocation request to an extent boundary 79 * (when file on a real-time subvolume or has di_extsize hint). 80 */ 81 if (extsize) { 82 if (new_last_fsb) 83 align = roundup_64(new_last_fsb, extsize); 84 else 85 align = extsize; 86 new_last_fsb = roundup_64(*last_fsb, align); 87 } 88 89 if (new_last_fsb) { 90 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); 91 if (error) 92 return error; 93 if (eof) 94 *last_fsb = new_last_fsb; 95 } 96 return 0; 97 } 98 99 STATIC int 100 xfs_alert_fsblock_zero( 101 xfs_inode_t *ip, 102 xfs_bmbt_irec_t *imap) 103 { 104 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, 105 "Access to block zero in inode %llu " 106 "start_block: %llx start_off: %llx " 107 "blkcnt: %llx extent-state: %x", 108 (unsigned long long)ip->i_ino, 109 (unsigned long long)imap->br_startblock, 110 (unsigned long long)imap->br_startoff, 111 (unsigned long long)imap->br_blockcount, 112 imap->br_state); 113 return EFSCORRUPTED; 114 } 115 116 int 117 xfs_iomap_write_direct( 118 xfs_inode_t *ip, 119 xfs_off_t offset, 120 size_t count, 121 xfs_bmbt_irec_t *imap, 122 int nmaps) 123 { 124 xfs_mount_t *mp = ip->i_mount; 125 xfs_fileoff_t offset_fsb; 126 xfs_fileoff_t last_fsb; 127 xfs_filblks_t count_fsb, resaligned; 128 xfs_fsblock_t firstfsb; 129 xfs_extlen_t extsz, temp; 130 int nimaps; 131 int bmapi_flag; 132 int quota_flag; 133 int rt; 134 xfs_trans_t *tp; 135 xfs_bmap_free_t free_list; 136 uint qblocks, resblks, resrtextents; 137 int committed; 138 int error; 139 140 error = xfs_qm_dqattach(ip, 0); 141 if (error) 142 return XFS_ERROR(error); 143 144 rt = XFS_IS_REALTIME_INODE(ip); 145 extsz = xfs_get_extsz_hint(ip); 146 147 offset_fsb = XFS_B_TO_FSBT(mp, offset); 148 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 149 if ((offset + count) > XFS_ISIZE(ip)) { 150 error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); 151 if (error) 152 return XFS_ERROR(error); 153 } else { 154 if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) 155 last_fsb = MIN(last_fsb, (xfs_fileoff_t) 156 imap->br_blockcount + 157 imap->br_startoff); 158 } 159 count_fsb = last_fsb - offset_fsb; 160 ASSERT(count_fsb > 0); 161 162 resaligned = count_fsb; 163 if (unlikely(extsz)) { 164 if ((temp = do_mod(offset_fsb, extsz))) 165 resaligned += temp; 166 if ((temp = do_mod(resaligned, extsz))) 167 resaligned += extsz - temp; 168 } 169 170 if (unlikely(rt)) { 171 resrtextents = qblocks = resaligned; 172 resrtextents /= mp->m_sb.sb_rextsize; 173 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 174 quota_flag = XFS_QMOPT_RES_RTBLKS; 175 } else { 176 resrtextents = 0; 177 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); 178 quota_flag = XFS_QMOPT_RES_REGBLKS; 179 } 180 181 /* 182 * Allocate and setup the transaction 183 */ 184 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); 185 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, 186 resblks, resrtextents); 187 /* 188 * Check for running out of space, note: need lock to return 189 */ 190 if (error) { 191 xfs_trans_cancel(tp, 0); 192 return XFS_ERROR(error); 193 } 194 195 xfs_ilock(ip, XFS_ILOCK_EXCL); 196 197 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); 198 if (error) 199 goto out_trans_cancel; 200 201 xfs_trans_ijoin(tp, ip, 0); 202 203 bmapi_flag = 0; 204 if (offset < XFS_ISIZE(ip) || extsz) 205 bmapi_flag |= XFS_BMAPI_PREALLOC; 206 207 /* 208 * From this point onwards we overwrite the imap pointer that the 209 * caller gave to us. 210 */ 211 xfs_bmap_init(&free_list, &firstfsb); 212 nimaps = 1; 213 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flag, 214 &firstfsb, 0, imap, &nimaps, &free_list); 215 if (error) 216 goto out_bmap_cancel; 217 218 /* 219 * Complete the transaction 220 */ 221 error = xfs_bmap_finish(&tp, &free_list, &committed); 222 if (error) 223 goto out_bmap_cancel; 224 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 225 if (error) 226 goto out_unlock; 227 228 /* 229 * Copy any maps to caller's array and return any error. 230 */ 231 if (nimaps == 0) { 232 error = XFS_ERROR(ENOSPC); 233 goto out_unlock; 234 } 235 236 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) 237 error = xfs_alert_fsblock_zero(ip, imap); 238 239 out_unlock: 240 xfs_iunlock(ip, XFS_ILOCK_EXCL); 241 return error; 242 243 out_bmap_cancel: 244 xfs_bmap_cancel(&free_list); 245 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag); 246 out_trans_cancel: 247 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 248 goto out_unlock; 249 } 250 251 /* 252 * If the caller is doing a write at the end of the file, then extend the 253 * allocation out to the file system's write iosize. We clean up any extra 254 * space left over when the file is closed in xfs_inactive(). 255 * 256 * If we find we already have delalloc preallocation beyond EOF, don't do more 257 * preallocation as it it not needed. 258 */ 259 STATIC int 260 xfs_iomap_eof_want_preallocate( 261 xfs_mount_t *mp, 262 xfs_inode_t *ip, 263 xfs_off_t offset, 264 size_t count, 265 xfs_bmbt_irec_t *imap, 266 int nimaps, 267 int *prealloc) 268 { 269 xfs_fileoff_t start_fsb; 270 xfs_filblks_t count_fsb; 271 xfs_fsblock_t firstblock; 272 int n, error, imaps; 273 int found_delalloc = 0; 274 275 *prealloc = 0; 276 if (offset + count <= XFS_ISIZE(ip)) 277 return 0; 278 279 /* 280 * If the file is smaller than the minimum prealloc and we are using 281 * dynamic preallocation, don't do any preallocation at all as it is 282 * likely this is the only write to the file that is going to be done. 283 */ 284 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) && 285 XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks)) 286 return 0; 287 288 /* 289 * If there are any real blocks past eof, then don't 290 * do any speculative allocation. 291 */ 292 start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1))); 293 count_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 294 while (count_fsb > 0) { 295 imaps = nimaps; 296 firstblock = NULLFSBLOCK; 297 error = xfs_bmapi_read(ip, start_fsb, count_fsb, imap, &imaps, 298 0); 299 if (error) 300 return error; 301 for (n = 0; n < imaps; n++) { 302 if ((imap[n].br_startblock != HOLESTARTBLOCK) && 303 (imap[n].br_startblock != DELAYSTARTBLOCK)) 304 return 0; 305 start_fsb += imap[n].br_blockcount; 306 count_fsb -= imap[n].br_blockcount; 307 308 if (imap[n].br_startblock == DELAYSTARTBLOCK) 309 found_delalloc = 1; 310 } 311 } 312 if (!found_delalloc) 313 *prealloc = 1; 314 return 0; 315 } 316 317 /* 318 * Determine the initial size of the preallocation. We are beyond the current 319 * EOF here, but we need to take into account whether this is a sparse write or 320 * an extending write when determining the preallocation size. Hence we need to 321 * look up the extent that ends at the current write offset and use the result 322 * to determine the preallocation size. 323 * 324 * If the extent is a hole, then preallocation is essentially disabled. 325 * Otherwise we take the size of the preceeding data extent as the basis for the 326 * preallocation size. If the size of the extent is greater than half the 327 * maximum extent length, then use the current offset as the basis. This ensures 328 * that for large files the preallocation size always extends to MAXEXTLEN 329 * rather than falling short due to things like stripe unit/width alignment of 330 * real extents. 331 */ 332 STATIC xfs_fsblock_t 333 xfs_iomap_eof_prealloc_initial_size( 334 struct xfs_mount *mp, 335 struct xfs_inode *ip, 336 xfs_off_t offset, 337 xfs_bmbt_irec_t *imap, 338 int nimaps) 339 { 340 xfs_fileoff_t start_fsb; 341 int imaps = 1; 342 int error; 343 344 ASSERT(nimaps >= imaps); 345 346 /* if we are using a specific prealloc size, return now */ 347 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) 348 return 0; 349 350 /* If the file is small, then use the minimum prealloc */ 351 if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign)) 352 return 0; 353 354 /* 355 * As we write multiple pages, the offset will always align to the 356 * start of a page and hence point to a hole at EOF. i.e. if the size is 357 * 4096 bytes, we only have one block at FSB 0, but XFS_B_TO_FSB(4096) 358 * will return FSB 1. Hence if there are blocks in the file, we want to 359 * point to the block prior to the EOF block and not the hole that maps 360 * directly at @offset. 361 */ 362 start_fsb = XFS_B_TO_FSB(mp, offset); 363 if (start_fsb) 364 start_fsb--; 365 error = xfs_bmapi_read(ip, start_fsb, 1, imap, &imaps, XFS_BMAPI_ENTIRE); 366 if (error) 367 return 0; 368 369 ASSERT(imaps == 1); 370 if (imap[0].br_startblock == HOLESTARTBLOCK) 371 return 0; 372 if (imap[0].br_blockcount <= (MAXEXTLEN >> 1)) 373 return imap[0].br_blockcount << 1; 374 return XFS_B_TO_FSB(mp, offset); 375 } 376 377 STATIC bool 378 xfs_quota_need_throttle( 379 struct xfs_inode *ip, 380 int type, 381 xfs_fsblock_t alloc_blocks) 382 { 383 struct xfs_dquot *dq = xfs_inode_dquot(ip, type); 384 385 if (!dq || !xfs_this_quota_on(ip->i_mount, type)) 386 return false; 387 388 /* no hi watermark, no throttle */ 389 if (!dq->q_prealloc_hi_wmark) 390 return false; 391 392 /* under the lo watermark, no throttle */ 393 if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark) 394 return false; 395 396 return true; 397 } 398 399 STATIC void 400 xfs_quota_calc_throttle( 401 struct xfs_inode *ip, 402 int type, 403 xfs_fsblock_t *qblocks, 404 int *qshift) 405 { 406 int64_t freesp; 407 int shift = 0; 408 struct xfs_dquot *dq = xfs_inode_dquot(ip, type); 409 410 /* over hi wmark, squash the prealloc completely */ 411 if (dq->q_res_bcount >= dq->q_prealloc_hi_wmark) { 412 *qblocks = 0; 413 return; 414 } 415 416 freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount; 417 if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) { 418 shift = 2; 419 if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT]) 420 shift += 2; 421 if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT]) 422 shift += 2; 423 } 424 425 /* only overwrite the throttle values if we are more aggressive */ 426 if ((freesp >> shift) < (*qblocks >> *qshift)) { 427 *qblocks = freesp; 428 *qshift = shift; 429 } 430 } 431 432 /* 433 * If we don't have a user specified preallocation size, dynamically increase 434 * the preallocation size as the size of the file grows. Cap the maximum size 435 * at a single extent or less if the filesystem is near full. The closer the 436 * filesystem is to full, the smaller the maximum prealocation. 437 */ 438 STATIC xfs_fsblock_t 439 xfs_iomap_prealloc_size( 440 struct xfs_mount *mp, 441 struct xfs_inode *ip, 442 xfs_off_t offset, 443 struct xfs_bmbt_irec *imap, 444 int nimaps) 445 { 446 xfs_fsblock_t alloc_blocks = 0; 447 int shift = 0; 448 int64_t freesp; 449 xfs_fsblock_t qblocks; 450 int qshift = 0; 451 452 alloc_blocks = xfs_iomap_eof_prealloc_initial_size(mp, ip, offset, 453 imap, nimaps); 454 if (!alloc_blocks) 455 goto check_writeio; 456 qblocks = alloc_blocks; 457 458 /* 459 * MAXEXTLEN is not a power of two value but we round the prealloc down 460 * to the nearest power of two value after throttling. To prevent the 461 * round down from unconditionally reducing the maximum supported prealloc 462 * size, we round up first, apply appropriate throttling, round down and 463 * cap the value to MAXEXTLEN. 464 */ 465 alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN), 466 alloc_blocks); 467 468 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); 469 freesp = mp->m_sb.sb_fdblocks; 470 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) { 471 shift = 2; 472 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT]) 473 shift++; 474 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT]) 475 shift++; 476 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT]) 477 shift++; 478 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT]) 479 shift++; 480 } 481 482 /* 483 * Check each quota to cap the prealloc size and provide a shift 484 * value to throttle with. 485 */ 486 if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks)) 487 xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift); 488 if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks)) 489 xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift); 490 if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks)) 491 xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift); 492 493 /* 494 * The final prealloc size is set to the minimum of free space available 495 * in each of the quotas and the overall filesystem. 496 * 497 * The shift throttle value is set to the maximum value as determined by 498 * the global low free space values and per-quota low free space values. 499 */ 500 alloc_blocks = MIN(alloc_blocks, qblocks); 501 shift = MAX(shift, qshift); 502 503 if (shift) 504 alloc_blocks >>= shift; 505 /* 506 * rounddown_pow_of_two() returns an undefined result if we pass in 507 * alloc_blocks = 0. 508 */ 509 if (alloc_blocks) 510 alloc_blocks = rounddown_pow_of_two(alloc_blocks); 511 if (alloc_blocks > MAXEXTLEN) 512 alloc_blocks = MAXEXTLEN; 513 514 /* 515 * If we are still trying to allocate more space than is 516 * available, squash the prealloc hard. This can happen if we 517 * have a large file on a small filesystem and the above 518 * lowspace thresholds are smaller than MAXEXTLEN. 519 */ 520 while (alloc_blocks && alloc_blocks >= freesp) 521 alloc_blocks >>= 4; 522 523 check_writeio: 524 if (alloc_blocks < mp->m_writeio_blocks) 525 alloc_blocks = mp->m_writeio_blocks; 526 527 trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift, 528 mp->m_writeio_blocks); 529 530 return alloc_blocks; 531 } 532 533 int 534 xfs_iomap_write_delay( 535 xfs_inode_t *ip, 536 xfs_off_t offset, 537 size_t count, 538 xfs_bmbt_irec_t *ret_imap) 539 { 540 xfs_mount_t *mp = ip->i_mount; 541 xfs_fileoff_t offset_fsb; 542 xfs_fileoff_t last_fsb; 543 xfs_off_t aligned_offset; 544 xfs_fileoff_t ioalign; 545 xfs_extlen_t extsz; 546 int nimaps; 547 xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS]; 548 int prealloc; 549 int error; 550 551 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 552 553 /* 554 * Make sure that the dquots are there. This doesn't hold 555 * the ilock across a disk read. 556 */ 557 error = xfs_qm_dqattach_locked(ip, 0); 558 if (error) 559 return XFS_ERROR(error); 560 561 extsz = xfs_get_extsz_hint(ip); 562 offset_fsb = XFS_B_TO_FSBT(mp, offset); 563 564 error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count, 565 imap, XFS_WRITE_IMAPS, &prealloc); 566 if (error) 567 return error; 568 569 retry: 570 if (prealloc) { 571 xfs_fsblock_t alloc_blocks; 572 573 alloc_blocks = xfs_iomap_prealloc_size(mp, ip, offset, imap, 574 XFS_WRITE_IMAPS); 575 576 aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); 577 ioalign = XFS_B_TO_FSBT(mp, aligned_offset); 578 last_fsb = ioalign + alloc_blocks; 579 } else { 580 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 581 } 582 583 if (prealloc || extsz) { 584 error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); 585 if (error) 586 return error; 587 } 588 589 /* 590 * Make sure preallocation does not create extents beyond the range we 591 * actually support in this filesystem. 592 */ 593 if (last_fsb > XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes)) 594 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 595 596 ASSERT(last_fsb > offset_fsb); 597 598 nimaps = XFS_WRITE_IMAPS; 599 error = xfs_bmapi_delay(ip, offset_fsb, last_fsb - offset_fsb, 600 imap, &nimaps, XFS_BMAPI_ENTIRE); 601 switch (error) { 602 case 0: 603 case ENOSPC: 604 case EDQUOT: 605 break; 606 default: 607 return XFS_ERROR(error); 608 } 609 610 /* 611 * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. Retry 612 * without EOF preallocation. 613 */ 614 if (nimaps == 0) { 615 trace_xfs_delalloc_enospc(ip, offset, count); 616 if (prealloc) { 617 prealloc = 0; 618 error = 0; 619 goto retry; 620 } 621 return XFS_ERROR(error ? error : ENOSPC); 622 } 623 624 if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip))) 625 return xfs_alert_fsblock_zero(ip, &imap[0]); 626 627 /* 628 * Tag the inode as speculatively preallocated so we can reclaim this 629 * space on demand, if necessary. 630 */ 631 if (prealloc) 632 xfs_inode_set_eofblocks_tag(ip); 633 634 *ret_imap = imap[0]; 635 return 0; 636 } 637 638 /* 639 * Pass in a delayed allocate extent, convert it to real extents; 640 * return to the caller the extent we create which maps on top of 641 * the originating callers request. 642 * 643 * Called without a lock on the inode. 644 * 645 * We no longer bother to look at the incoming map - all we have to 646 * guarantee is that whatever we allocate fills the required range. 647 */ 648 int 649 xfs_iomap_write_allocate( 650 xfs_inode_t *ip, 651 xfs_off_t offset, 652 xfs_bmbt_irec_t *imap) 653 { 654 xfs_mount_t *mp = ip->i_mount; 655 xfs_fileoff_t offset_fsb, last_block; 656 xfs_fileoff_t end_fsb, map_start_fsb; 657 xfs_fsblock_t first_block; 658 xfs_bmap_free_t free_list; 659 xfs_filblks_t count_fsb; 660 xfs_trans_t *tp; 661 int nimaps, committed; 662 int error = 0; 663 int nres; 664 665 /* 666 * Make sure that the dquots are there. 667 */ 668 error = xfs_qm_dqattach(ip, 0); 669 if (error) 670 return XFS_ERROR(error); 671 672 offset_fsb = XFS_B_TO_FSBT(mp, offset); 673 count_fsb = imap->br_blockcount; 674 map_start_fsb = imap->br_startoff; 675 676 XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); 677 678 while (count_fsb != 0) { 679 /* 680 * Set up a transaction with which to allocate the 681 * backing store for the file. Do allocations in a 682 * loop until we get some space in the range we are 683 * interested in. The other space that might be allocated 684 * is in the delayed allocation extent on which we sit 685 * but before our buffer starts. 686 */ 687 688 nimaps = 0; 689 while (nimaps == 0) { 690 tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); 691 tp->t_flags |= XFS_TRANS_RESERVE; 692 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); 693 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, 694 nres, 0); 695 if (error) { 696 xfs_trans_cancel(tp, 0); 697 return XFS_ERROR(error); 698 } 699 xfs_ilock(ip, XFS_ILOCK_EXCL); 700 xfs_trans_ijoin(tp, ip, 0); 701 702 xfs_bmap_init(&free_list, &first_block); 703 704 /* 705 * it is possible that the extents have changed since 706 * we did the read call as we dropped the ilock for a 707 * while. We have to be careful about truncates or hole 708 * punchs here - we are not allowed to allocate 709 * non-delalloc blocks here. 710 * 711 * The only protection against truncation is the pages 712 * for the range we are being asked to convert are 713 * locked and hence a truncate will block on them 714 * first. 715 * 716 * As a result, if we go beyond the range we really 717 * need and hit an delalloc extent boundary followed by 718 * a hole while we have excess blocks in the map, we 719 * will fill the hole incorrectly and overrun the 720 * transaction reservation. 721 * 722 * Using a single map prevents this as we are forced to 723 * check each map we look for overlap with the desired 724 * range and abort as soon as we find it. Also, given 725 * that we only return a single map, having one beyond 726 * what we can return is probably a bit silly. 727 * 728 * We also need to check that we don't go beyond EOF; 729 * this is a truncate optimisation as a truncate sets 730 * the new file size before block on the pages we 731 * currently have locked under writeback. Because they 732 * are about to be tossed, we don't need to write them 733 * back.... 734 */ 735 nimaps = 1; 736 end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)); 737 error = xfs_bmap_last_offset(NULL, ip, &last_block, 738 XFS_DATA_FORK); 739 if (error) 740 goto trans_cancel; 741 742 last_block = XFS_FILEOFF_MAX(last_block, end_fsb); 743 if ((map_start_fsb + count_fsb) > last_block) { 744 count_fsb = last_block - map_start_fsb; 745 if (count_fsb == 0) { 746 error = EAGAIN; 747 goto trans_cancel; 748 } 749 } 750 751 /* 752 * From this point onwards we overwrite the imap 753 * pointer that the caller gave to us. 754 */ 755 error = xfs_bmapi_write(tp, ip, map_start_fsb, 756 count_fsb, 757 XFS_BMAPI_STACK_SWITCH, 758 &first_block, 1, 759 imap, &nimaps, &free_list); 760 if (error) 761 goto trans_cancel; 762 763 error = xfs_bmap_finish(&tp, &free_list, &committed); 764 if (error) 765 goto trans_cancel; 766 767 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 768 if (error) 769 goto error0; 770 771 xfs_iunlock(ip, XFS_ILOCK_EXCL); 772 } 773 774 /* 775 * See if we were able to allocate an extent that 776 * covers at least part of the callers request 777 */ 778 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) 779 return xfs_alert_fsblock_zero(ip, imap); 780 781 if ((offset_fsb >= imap->br_startoff) && 782 (offset_fsb < (imap->br_startoff + 783 imap->br_blockcount))) { 784 XFS_STATS_INC(xs_xstrat_quick); 785 return 0; 786 } 787 788 /* 789 * So far we have not mapped the requested part of the 790 * file, just surrounding data, try again. 791 */ 792 count_fsb -= imap->br_blockcount; 793 map_start_fsb = imap->br_startoff + imap->br_blockcount; 794 } 795 796 trans_cancel: 797 xfs_bmap_cancel(&free_list); 798 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 799 error0: 800 xfs_iunlock(ip, XFS_ILOCK_EXCL); 801 return XFS_ERROR(error); 802 } 803 804 int 805 xfs_iomap_write_unwritten( 806 xfs_inode_t *ip, 807 xfs_off_t offset, 808 size_t count) 809 { 810 xfs_mount_t *mp = ip->i_mount; 811 xfs_fileoff_t offset_fsb; 812 xfs_filblks_t count_fsb; 813 xfs_filblks_t numblks_fsb; 814 xfs_fsblock_t firstfsb; 815 int nimaps; 816 xfs_trans_t *tp; 817 xfs_bmbt_irec_t imap; 818 xfs_bmap_free_t free_list; 819 xfs_fsize_t i_size; 820 uint resblks; 821 int committed; 822 int error; 823 824 trace_xfs_unwritten_convert(ip, offset, count); 825 826 offset_fsb = XFS_B_TO_FSBT(mp, offset); 827 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 828 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); 829 830 /* 831 * Reserve enough blocks in this transaction for two complete extent 832 * btree splits. We may be converting the middle part of an unwritten 833 * extent and in this case we will insert two new extents in the btree 834 * each of which could cause a full split. 835 * 836 * This reservation amount will be used in the first call to 837 * xfs_bmbt_split() to select an AG with enough space to satisfy the 838 * rest of the operation. 839 */ 840 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; 841 842 do { 843 /* 844 * set up a transaction to convert the range of extents 845 * from unwritten to real. Do allocations in a loop until 846 * we have covered the range passed in. 847 * 848 * Note that we open code the transaction allocation here 849 * to pass KM_NOFS--we can't risk to recursing back into 850 * the filesystem here as we might be asked to write out 851 * the same inode that we complete here and might deadlock 852 * on the iolock. 853 */ 854 sb_start_intwrite(mp->m_super); 855 tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS); 856 tp->t_flags |= XFS_TRANS_RESERVE | XFS_TRANS_FREEZE_PROT; 857 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, 858 resblks, 0); 859 if (error) { 860 xfs_trans_cancel(tp, 0); 861 return XFS_ERROR(error); 862 } 863 864 xfs_ilock(ip, XFS_ILOCK_EXCL); 865 xfs_trans_ijoin(tp, ip, 0); 866 867 /* 868 * Modify the unwritten extent state of the buffer. 869 */ 870 xfs_bmap_init(&free_list, &firstfsb); 871 nimaps = 1; 872 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, 873 XFS_BMAPI_CONVERT, &firstfsb, 874 1, &imap, &nimaps, &free_list); 875 if (error) 876 goto error_on_bmapi_transaction; 877 878 /* 879 * Log the updated inode size as we go. We have to be careful 880 * to only log it up to the actual write offset if it is 881 * halfway into a block. 882 */ 883 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb); 884 if (i_size > offset + count) 885 i_size = offset + count; 886 887 i_size = xfs_new_eof(ip, i_size); 888 if (i_size) { 889 ip->i_d.di_size = i_size; 890 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 891 } 892 893 error = xfs_bmap_finish(&tp, &free_list, &committed); 894 if (error) 895 goto error_on_bmapi_transaction; 896 897 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 898 xfs_iunlock(ip, XFS_ILOCK_EXCL); 899 if (error) 900 return XFS_ERROR(error); 901 902 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) 903 return xfs_alert_fsblock_zero(ip, &imap); 904 905 if ((numblks_fsb = imap.br_blockcount) == 0) { 906 /* 907 * The numblks_fsb value should always get 908 * smaller, otherwise the loop is stuck. 909 */ 910 ASSERT(imap.br_blockcount); 911 break; 912 } 913 offset_fsb += numblks_fsb; 914 count_fsb -= numblks_fsb; 915 } while (count_fsb > 0); 916 917 return 0; 918 919 error_on_bmapi_transaction: 920 xfs_bmap_cancel(&free_list); 921 xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT)); 922 xfs_iunlock(ip, XFS_ILOCK_EXCL); 923 return XFS_ERROR(error); 924 } 925