1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * Copyright (c) 2016 Christoph Hellwig. 4 * All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it would be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 */ 19 #include <linux/iomap.h> 20 #include "xfs.h" 21 #include "xfs_fs.h" 22 #include "xfs_shared.h" 23 #include "xfs_format.h" 24 #include "xfs_log_format.h" 25 #include "xfs_trans_resv.h" 26 #include "xfs_mount.h" 27 #include "xfs_defer.h" 28 #include "xfs_inode.h" 29 #include "xfs_btree.h" 30 #include "xfs_bmap_btree.h" 31 #include "xfs_bmap.h" 32 #include "xfs_bmap_util.h" 33 #include "xfs_error.h" 34 #include "xfs_trans.h" 35 #include "xfs_trans_space.h" 36 #include "xfs_iomap.h" 37 #include "xfs_trace.h" 38 #include "xfs_icache.h" 39 #include "xfs_quota.h" 40 #include "xfs_dquot_item.h" 41 #include "xfs_dquot.h" 42 #include "xfs_reflink.h" 43 44 45 #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ 46 << mp->m_writeio_log) 47 48 void 49 xfs_bmbt_to_iomap( 50 struct xfs_inode *ip, 51 struct iomap *iomap, 52 struct xfs_bmbt_irec *imap) 53 { 54 struct xfs_mount *mp = ip->i_mount; 55 56 if (imap->br_startblock == HOLESTARTBLOCK) { 57 iomap->blkno = IOMAP_NULL_BLOCK; 58 iomap->type = IOMAP_HOLE; 59 } else if (imap->br_startblock == DELAYSTARTBLOCK) { 60 iomap->blkno = IOMAP_NULL_BLOCK; 61 iomap->type = IOMAP_DELALLOC; 62 } else { 63 iomap->blkno = xfs_fsb_to_db(ip, imap->br_startblock); 64 if (imap->br_state == XFS_EXT_UNWRITTEN) 65 iomap->type = IOMAP_UNWRITTEN; 66 else 67 iomap->type = IOMAP_MAPPED; 68 } 69 iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff); 70 iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount); 71 iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip)); 72 } 73 74 xfs_extlen_t 75 xfs_eof_alignment( 76 struct xfs_inode *ip, 77 xfs_extlen_t extsize) 78 { 79 struct xfs_mount *mp = ip->i_mount; 80 xfs_extlen_t align = 0; 81 82 if (!XFS_IS_REALTIME_INODE(ip)) { 83 /* 84 * Round up the allocation request to a stripe unit 85 * (m_dalign) boundary if the file size is >= stripe unit 86 * size, and we are allocating past the allocation eof. 87 * 88 * If mounted with the "-o swalloc" option the alignment is 89 * increased from the strip unit size to the stripe width. 90 */ 91 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) 92 align = mp->m_swidth; 93 else if (mp->m_dalign) 94 align = mp->m_dalign; 95 96 if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align)) 97 align = 0; 98 } 99 100 /* 101 * Always round up the allocation request to an extent boundary 102 * (when file on a real-time subvolume or has di_extsize hint). 103 */ 104 if (extsize) { 105 if (align) 106 align = roundup_64(align, extsize); 107 else 108 align = extsize; 109 } 110 111 return align; 112 } 113 114 STATIC int 115 xfs_iomap_eof_align_last_fsb( 116 struct xfs_inode *ip, 117 xfs_extlen_t extsize, 118 xfs_fileoff_t *last_fsb) 119 { 120 xfs_extlen_t align = xfs_eof_alignment(ip, extsize); 121 122 if (align) { 123 xfs_fileoff_t new_last_fsb = roundup_64(*last_fsb, align); 124 int eof, error; 125 126 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); 127 if (error) 128 return error; 129 if (eof) 130 *last_fsb = new_last_fsb; 131 } 132 return 0; 133 } 134 135 STATIC int 136 xfs_alert_fsblock_zero( 137 xfs_inode_t *ip, 138 xfs_bmbt_irec_t *imap) 139 { 140 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, 141 "Access to block zero in inode %llu " 142 "start_block: %llx start_off: %llx " 143 "blkcnt: %llx extent-state: %x", 144 (unsigned long long)ip->i_ino, 145 (unsigned long long)imap->br_startblock, 146 (unsigned long long)imap->br_startoff, 147 (unsigned long long)imap->br_blockcount, 148 imap->br_state); 149 return -EFSCORRUPTED; 150 } 151 152 int 153 xfs_iomap_write_direct( 154 xfs_inode_t *ip, 155 xfs_off_t offset, 156 size_t count, 157 xfs_bmbt_irec_t *imap, 158 int nmaps) 159 { 160 xfs_mount_t *mp = ip->i_mount; 161 xfs_fileoff_t offset_fsb; 162 xfs_fileoff_t last_fsb; 163 xfs_filblks_t count_fsb, resaligned; 164 xfs_fsblock_t firstfsb; 165 xfs_extlen_t extsz; 166 int nimaps; 167 int quota_flag; 168 int rt; 169 xfs_trans_t *tp; 170 struct xfs_defer_ops dfops; 171 uint qblocks, resblks, resrtextents; 172 int error; 173 int lockmode; 174 int bmapi_flags = XFS_BMAPI_PREALLOC; 175 uint tflags = 0; 176 177 rt = XFS_IS_REALTIME_INODE(ip); 178 extsz = xfs_get_extsz_hint(ip); 179 lockmode = XFS_ILOCK_SHARED; /* locked by caller */ 180 181 ASSERT(xfs_isilocked(ip, lockmode)); 182 183 offset_fsb = XFS_B_TO_FSBT(mp, offset); 184 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 185 if ((offset + count) > XFS_ISIZE(ip)) { 186 /* 187 * Assert that the in-core extent list is present since this can 188 * call xfs_iread_extents() and we only have the ilock shared. 189 * This should be safe because the lock was held around a bmapi 190 * call in the caller and we only need it to access the in-core 191 * list. 192 */ 193 ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags & 194 XFS_IFEXTENTS); 195 error = xfs_iomap_eof_align_last_fsb(ip, extsz, &last_fsb); 196 if (error) 197 goto out_unlock; 198 } else { 199 if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) 200 last_fsb = MIN(last_fsb, (xfs_fileoff_t) 201 imap->br_blockcount + 202 imap->br_startoff); 203 } 204 count_fsb = last_fsb - offset_fsb; 205 ASSERT(count_fsb > 0); 206 resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, extsz); 207 208 if (unlikely(rt)) { 209 resrtextents = qblocks = resaligned; 210 resrtextents /= mp->m_sb.sb_rextsize; 211 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 212 quota_flag = XFS_QMOPT_RES_RTBLKS; 213 } else { 214 resrtextents = 0; 215 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); 216 quota_flag = XFS_QMOPT_RES_REGBLKS; 217 } 218 219 /* 220 * Drop the shared lock acquired by the caller, attach the dquot if 221 * necessary and move on to transaction setup. 222 */ 223 xfs_iunlock(ip, lockmode); 224 error = xfs_qm_dqattach(ip, 0); 225 if (error) 226 return error; 227 228 /* 229 * For DAX, we do not allocate unwritten extents, but instead we zero 230 * the block before we commit the transaction. Ideally we'd like to do 231 * this outside the transaction context, but if we commit and then crash 232 * we may not have zeroed the blocks and this will be exposed on 233 * recovery of the allocation. Hence we must zero before commit. 234 * 235 * Further, if we are mapping unwritten extents here, we need to zero 236 * and convert them to written so that we don't need an unwritten extent 237 * callback for DAX. This also means that we need to be able to dip into 238 * the reserve block pool for bmbt block allocation if there is no space 239 * left but we need to do unwritten extent conversion. 240 */ 241 if (IS_DAX(VFS_I(ip))) { 242 bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO; 243 if (imap->br_state == XFS_EXT_UNWRITTEN) { 244 tflags |= XFS_TRANS_RESERVE; 245 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; 246 } 247 } 248 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents, 249 tflags, &tp); 250 if (error) 251 return error; 252 253 lockmode = XFS_ILOCK_EXCL; 254 xfs_ilock(ip, lockmode); 255 256 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); 257 if (error) 258 goto out_trans_cancel; 259 260 xfs_trans_ijoin(tp, ip, 0); 261 262 /* 263 * From this point onwards we overwrite the imap pointer that the 264 * caller gave to us. 265 */ 266 xfs_defer_init(&dfops, &firstfsb); 267 nimaps = 1; 268 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, 269 bmapi_flags, &firstfsb, resblks, imap, 270 &nimaps, &dfops); 271 if (error) 272 goto out_bmap_cancel; 273 274 /* 275 * Complete the transaction 276 */ 277 error = xfs_defer_finish(&tp, &dfops, NULL); 278 if (error) 279 goto out_bmap_cancel; 280 281 error = xfs_trans_commit(tp); 282 if (error) 283 goto out_unlock; 284 285 /* 286 * Copy any maps to caller's array and return any error. 287 */ 288 if (nimaps == 0) { 289 error = -ENOSPC; 290 goto out_unlock; 291 } 292 293 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) 294 error = xfs_alert_fsblock_zero(ip, imap); 295 296 out_unlock: 297 xfs_iunlock(ip, lockmode); 298 return error; 299 300 out_bmap_cancel: 301 xfs_defer_cancel(&dfops); 302 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag); 303 out_trans_cancel: 304 xfs_trans_cancel(tp); 305 goto out_unlock; 306 } 307 308 STATIC bool 309 xfs_quota_need_throttle( 310 struct xfs_inode *ip, 311 int type, 312 xfs_fsblock_t alloc_blocks) 313 { 314 struct xfs_dquot *dq = xfs_inode_dquot(ip, type); 315 316 if (!dq || !xfs_this_quota_on(ip->i_mount, type)) 317 return false; 318 319 /* no hi watermark, no throttle */ 320 if (!dq->q_prealloc_hi_wmark) 321 return false; 322 323 /* under the lo watermark, no throttle */ 324 if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark) 325 return false; 326 327 return true; 328 } 329 330 STATIC void 331 xfs_quota_calc_throttle( 332 struct xfs_inode *ip, 333 int type, 334 xfs_fsblock_t *qblocks, 335 int *qshift, 336 int64_t *qfreesp) 337 { 338 int64_t freesp; 339 int shift = 0; 340 struct xfs_dquot *dq = xfs_inode_dquot(ip, type); 341 342 /* no dq, or over hi wmark, squash the prealloc completely */ 343 if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) { 344 *qblocks = 0; 345 *qfreesp = 0; 346 return; 347 } 348 349 freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount; 350 if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) { 351 shift = 2; 352 if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT]) 353 shift += 2; 354 if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT]) 355 shift += 2; 356 } 357 358 if (freesp < *qfreesp) 359 *qfreesp = freesp; 360 361 /* only overwrite the throttle values if we are more aggressive */ 362 if ((freesp >> shift) < (*qblocks >> *qshift)) { 363 *qblocks = freesp; 364 *qshift = shift; 365 } 366 } 367 368 /* 369 * If we are doing a write at the end of the file and there are no allocations 370 * past this one, then extend the allocation out to the file system's write 371 * iosize. 372 * 373 * If we don't have a user specified preallocation size, dynamically increase 374 * the preallocation size as the size of the file grows. Cap the maximum size 375 * at a single extent or less if the filesystem is near full. The closer the 376 * filesystem is to full, the smaller the maximum prealocation. 377 * 378 * As an exception we don't do any preallocation at all if the file is smaller 379 * than the minimum preallocation and we are using the default dynamic 380 * preallocation scheme, as it is likely this is the only write to the file that 381 * is going to be done. 382 * 383 * We clean up any extra space left over when the file is closed in 384 * xfs_inactive(). 385 */ 386 STATIC xfs_fsblock_t 387 xfs_iomap_prealloc_size( 388 struct xfs_inode *ip, 389 loff_t offset, 390 loff_t count, 391 xfs_extnum_t idx) 392 { 393 struct xfs_mount *mp = ip->i_mount; 394 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 395 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 396 struct xfs_bmbt_irec prev; 397 int shift = 0; 398 int64_t freesp; 399 xfs_fsblock_t qblocks; 400 int qshift = 0; 401 xfs_fsblock_t alloc_blocks = 0; 402 403 if (offset + count <= XFS_ISIZE(ip)) 404 return 0; 405 406 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) && 407 (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks))) 408 return 0; 409 410 /* 411 * If an explicit allocsize is set, the file is small, or we 412 * are writing behind a hole, then use the minimum prealloc: 413 */ 414 if ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) || 415 XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) || 416 !xfs_iext_get_extent(ifp, idx - 1, &prev) || 417 prev.br_startoff + prev.br_blockcount < offset_fsb) 418 return mp->m_writeio_blocks; 419 420 /* 421 * Determine the initial size of the preallocation. We are beyond the 422 * current EOF here, but we need to take into account whether this is 423 * a sparse write or an extending write when determining the 424 * preallocation size. Hence we need to look up the extent that ends 425 * at the current write offset and use the result to determine the 426 * preallocation size. 427 * 428 * If the extent is a hole, then preallocation is essentially disabled. 429 * Otherwise we take the size of the preceding data extent as the basis 430 * for the preallocation size. If the size of the extent is greater than 431 * half the maximum extent length, then use the current offset as the 432 * basis. This ensures that for large files the preallocation size 433 * always extends to MAXEXTLEN rather than falling short due to things 434 * like stripe unit/width alignment of real extents. 435 */ 436 if (prev.br_blockcount <= (MAXEXTLEN >> 1)) 437 alloc_blocks = prev.br_blockcount << 1; 438 else 439 alloc_blocks = XFS_B_TO_FSB(mp, offset); 440 if (!alloc_blocks) 441 goto check_writeio; 442 qblocks = alloc_blocks; 443 444 /* 445 * MAXEXTLEN is not a power of two value but we round the prealloc down 446 * to the nearest power of two value after throttling. To prevent the 447 * round down from unconditionally reducing the maximum supported prealloc 448 * size, we round up first, apply appropriate throttling, round down and 449 * cap the value to MAXEXTLEN. 450 */ 451 alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN), 452 alloc_blocks); 453 454 freesp = percpu_counter_read_positive(&mp->m_fdblocks); 455 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) { 456 shift = 2; 457 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT]) 458 shift++; 459 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT]) 460 shift++; 461 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT]) 462 shift++; 463 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT]) 464 shift++; 465 } 466 467 /* 468 * Check each quota to cap the prealloc size, provide a shift value to 469 * throttle with and adjust amount of available space. 470 */ 471 if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks)) 472 xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift, 473 &freesp); 474 if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks)) 475 xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift, 476 &freesp); 477 if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks)) 478 xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift, 479 &freesp); 480 481 /* 482 * The final prealloc size is set to the minimum of free space available 483 * in each of the quotas and the overall filesystem. 484 * 485 * The shift throttle value is set to the maximum value as determined by 486 * the global low free space values and per-quota low free space values. 487 */ 488 alloc_blocks = MIN(alloc_blocks, qblocks); 489 shift = MAX(shift, qshift); 490 491 if (shift) 492 alloc_blocks >>= shift; 493 /* 494 * rounddown_pow_of_two() returns an undefined result if we pass in 495 * alloc_blocks = 0. 496 */ 497 if (alloc_blocks) 498 alloc_blocks = rounddown_pow_of_two(alloc_blocks); 499 if (alloc_blocks > MAXEXTLEN) 500 alloc_blocks = MAXEXTLEN; 501 502 /* 503 * If we are still trying to allocate more space than is 504 * available, squash the prealloc hard. This can happen if we 505 * have a large file on a small filesystem and the above 506 * lowspace thresholds are smaller than MAXEXTLEN. 507 */ 508 while (alloc_blocks && alloc_blocks >= freesp) 509 alloc_blocks >>= 4; 510 check_writeio: 511 if (alloc_blocks < mp->m_writeio_blocks) 512 alloc_blocks = mp->m_writeio_blocks; 513 trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift, 514 mp->m_writeio_blocks); 515 return alloc_blocks; 516 } 517 518 static int 519 xfs_file_iomap_begin_delay( 520 struct inode *inode, 521 loff_t offset, 522 loff_t count, 523 unsigned flags, 524 struct iomap *iomap) 525 { 526 struct xfs_inode *ip = XFS_I(inode); 527 struct xfs_mount *mp = ip->i_mount; 528 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 529 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 530 xfs_fileoff_t maxbytes_fsb = 531 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 532 xfs_fileoff_t end_fsb; 533 int error = 0, eof = 0; 534 struct xfs_bmbt_irec got; 535 xfs_extnum_t idx; 536 xfs_fsblock_t prealloc_blocks = 0; 537 538 ASSERT(!XFS_IS_REALTIME_INODE(ip)); 539 ASSERT(!xfs_get_extsz_hint(ip)); 540 541 xfs_ilock(ip, XFS_ILOCK_EXCL); 542 543 if (unlikely(XFS_TEST_ERROR( 544 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS && 545 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE), 546 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { 547 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 548 error = -EFSCORRUPTED; 549 goto out_unlock; 550 } 551 552 XFS_STATS_INC(mp, xs_blk_mapw); 553 554 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 555 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); 556 if (error) 557 goto out_unlock; 558 } 559 560 eof = !xfs_iext_lookup_extent(ip, ifp, offset_fsb, &idx, &got); 561 if (!eof && got.br_startoff <= offset_fsb) { 562 if (xfs_is_reflink_inode(ip)) { 563 bool shared; 564 565 end_fsb = min(XFS_B_TO_FSB(mp, offset + count), 566 maxbytes_fsb); 567 xfs_trim_extent(&got, offset_fsb, end_fsb - offset_fsb); 568 error = xfs_reflink_reserve_cow(ip, &got, &shared); 569 if (error) 570 goto out_unlock; 571 } 572 573 trace_xfs_iomap_found(ip, offset, count, 0, &got); 574 goto done; 575 } 576 577 error = xfs_qm_dqattach_locked(ip, 0); 578 if (error) 579 goto out_unlock; 580 581 /* 582 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES pages 583 * to keep the chunks of work done where somewhat symmetric with the 584 * work writeback does. This is a completely arbitrary number pulled 585 * out of thin air as a best guess for initial testing. 586 * 587 * Note that the values needs to be less than 32-bits wide until 588 * the lower level functions are updated. 589 */ 590 count = min_t(loff_t, count, 1024 * PAGE_SIZE); 591 end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb); 592 593 if (eof) { 594 prealloc_blocks = xfs_iomap_prealloc_size(ip, offset, count, idx); 595 if (prealloc_blocks) { 596 xfs_extlen_t align; 597 xfs_off_t end_offset; 598 xfs_fileoff_t p_end_fsb; 599 600 end_offset = XFS_WRITEIO_ALIGN(mp, offset + count - 1); 601 p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) + 602 prealloc_blocks; 603 604 align = xfs_eof_alignment(ip, 0); 605 if (align) 606 p_end_fsb = roundup_64(p_end_fsb, align); 607 608 p_end_fsb = min(p_end_fsb, maxbytes_fsb); 609 ASSERT(p_end_fsb > offset_fsb); 610 prealloc_blocks = p_end_fsb - end_fsb; 611 } 612 } 613 614 retry: 615 error = xfs_bmapi_reserve_delalloc(ip, XFS_DATA_FORK, offset_fsb, 616 end_fsb - offset_fsb, prealloc_blocks, &got, &idx, eof); 617 switch (error) { 618 case 0: 619 break; 620 case -ENOSPC: 621 case -EDQUOT: 622 /* retry without any preallocation */ 623 trace_xfs_delalloc_enospc(ip, offset, count); 624 if (prealloc_blocks) { 625 prealloc_blocks = 0; 626 goto retry; 627 } 628 /*FALLTHRU*/ 629 default: 630 goto out_unlock; 631 } 632 633 /* 634 * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch 635 * them out if the write happens to fail. 636 */ 637 iomap->flags = IOMAP_F_NEW; 638 trace_xfs_iomap_alloc(ip, offset, count, 0, &got); 639 done: 640 if (isnullstartblock(got.br_startblock)) 641 got.br_startblock = DELAYSTARTBLOCK; 642 643 if (!got.br_startblock) { 644 error = xfs_alert_fsblock_zero(ip, &got); 645 if (error) 646 goto out_unlock; 647 } 648 649 xfs_bmbt_to_iomap(ip, iomap, &got); 650 651 out_unlock: 652 xfs_iunlock(ip, XFS_ILOCK_EXCL); 653 return error; 654 } 655 656 /* 657 * Pass in a delayed allocate extent, convert it to real extents; 658 * return to the caller the extent we create which maps on top of 659 * the originating callers request. 660 * 661 * Called without a lock on the inode. 662 * 663 * We no longer bother to look at the incoming map - all we have to 664 * guarantee is that whatever we allocate fills the required range. 665 */ 666 int 667 xfs_iomap_write_allocate( 668 xfs_inode_t *ip, 669 int whichfork, 670 xfs_off_t offset, 671 xfs_bmbt_irec_t *imap) 672 { 673 xfs_mount_t *mp = ip->i_mount; 674 xfs_fileoff_t offset_fsb, last_block; 675 xfs_fileoff_t end_fsb, map_start_fsb; 676 xfs_fsblock_t first_block; 677 struct xfs_defer_ops dfops; 678 xfs_filblks_t count_fsb; 679 xfs_trans_t *tp; 680 int nimaps; 681 int error = 0; 682 int flags = XFS_BMAPI_DELALLOC; 683 int nres; 684 685 if (whichfork == XFS_COW_FORK) 686 flags |= XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC; 687 688 /* 689 * Make sure that the dquots are there. 690 */ 691 error = xfs_qm_dqattach(ip, 0); 692 if (error) 693 return error; 694 695 offset_fsb = XFS_B_TO_FSBT(mp, offset); 696 count_fsb = imap->br_blockcount; 697 map_start_fsb = imap->br_startoff; 698 699 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); 700 701 while (count_fsb != 0) { 702 /* 703 * Set up a transaction with which to allocate the 704 * backing store for the file. Do allocations in a 705 * loop until we get some space in the range we are 706 * interested in. The other space that might be allocated 707 * is in the delayed allocation extent on which we sit 708 * but before our buffer starts. 709 */ 710 nimaps = 0; 711 while (nimaps == 0) { 712 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); 713 /* 714 * We have already reserved space for the extent and any 715 * indirect blocks when creating the delalloc extent, 716 * there is no need to reserve space in this transaction 717 * again. 718 */ 719 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 720 0, XFS_TRANS_RESERVE, &tp); 721 if (error) 722 return error; 723 724 xfs_ilock(ip, XFS_ILOCK_EXCL); 725 xfs_trans_ijoin(tp, ip, 0); 726 727 xfs_defer_init(&dfops, &first_block); 728 729 /* 730 * it is possible that the extents have changed since 731 * we did the read call as we dropped the ilock for a 732 * while. We have to be careful about truncates or hole 733 * punchs here - we are not allowed to allocate 734 * non-delalloc blocks here. 735 * 736 * The only protection against truncation is the pages 737 * for the range we are being asked to convert are 738 * locked and hence a truncate will block on them 739 * first. 740 * 741 * As a result, if we go beyond the range we really 742 * need and hit an delalloc extent boundary followed by 743 * a hole while we have excess blocks in the map, we 744 * will fill the hole incorrectly and overrun the 745 * transaction reservation. 746 * 747 * Using a single map prevents this as we are forced to 748 * check each map we look for overlap with the desired 749 * range and abort as soon as we find it. Also, given 750 * that we only return a single map, having one beyond 751 * what we can return is probably a bit silly. 752 * 753 * We also need to check that we don't go beyond EOF; 754 * this is a truncate optimisation as a truncate sets 755 * the new file size before block on the pages we 756 * currently have locked under writeback. Because they 757 * are about to be tossed, we don't need to write them 758 * back.... 759 */ 760 nimaps = 1; 761 end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)); 762 error = xfs_bmap_last_offset(ip, &last_block, 763 XFS_DATA_FORK); 764 if (error) 765 goto trans_cancel; 766 767 last_block = XFS_FILEOFF_MAX(last_block, end_fsb); 768 if ((map_start_fsb + count_fsb) > last_block) { 769 count_fsb = last_block - map_start_fsb; 770 if (count_fsb == 0) { 771 error = -EAGAIN; 772 goto trans_cancel; 773 } 774 } 775 776 /* 777 * From this point onwards we overwrite the imap 778 * pointer that the caller gave to us. 779 */ 780 error = xfs_bmapi_write(tp, ip, map_start_fsb, 781 count_fsb, flags, &first_block, 782 nres, imap, &nimaps, 783 &dfops); 784 if (error) 785 goto trans_cancel; 786 787 error = xfs_defer_finish(&tp, &dfops, NULL); 788 if (error) 789 goto trans_cancel; 790 791 error = xfs_trans_commit(tp); 792 if (error) 793 goto error0; 794 795 xfs_iunlock(ip, XFS_ILOCK_EXCL); 796 } 797 798 /* 799 * See if we were able to allocate an extent that 800 * covers at least part of the callers request 801 */ 802 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) 803 return xfs_alert_fsblock_zero(ip, imap); 804 805 if ((offset_fsb >= imap->br_startoff) && 806 (offset_fsb < (imap->br_startoff + 807 imap->br_blockcount))) { 808 XFS_STATS_INC(mp, xs_xstrat_quick); 809 return 0; 810 } 811 812 /* 813 * So far we have not mapped the requested part of the 814 * file, just surrounding data, try again. 815 */ 816 count_fsb -= imap->br_blockcount; 817 map_start_fsb = imap->br_startoff + imap->br_blockcount; 818 } 819 820 trans_cancel: 821 xfs_defer_cancel(&dfops); 822 xfs_trans_cancel(tp); 823 error0: 824 xfs_iunlock(ip, XFS_ILOCK_EXCL); 825 return error; 826 } 827 828 int 829 xfs_iomap_write_unwritten( 830 xfs_inode_t *ip, 831 xfs_off_t offset, 832 xfs_off_t count) 833 { 834 xfs_mount_t *mp = ip->i_mount; 835 xfs_fileoff_t offset_fsb; 836 xfs_filblks_t count_fsb; 837 xfs_filblks_t numblks_fsb; 838 xfs_fsblock_t firstfsb; 839 int nimaps; 840 xfs_trans_t *tp; 841 xfs_bmbt_irec_t imap; 842 struct xfs_defer_ops dfops; 843 xfs_fsize_t i_size; 844 uint resblks; 845 int error; 846 847 trace_xfs_unwritten_convert(ip, offset, count); 848 849 offset_fsb = XFS_B_TO_FSBT(mp, offset); 850 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 851 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); 852 853 /* 854 * Reserve enough blocks in this transaction for two complete extent 855 * btree splits. We may be converting the middle part of an unwritten 856 * extent and in this case we will insert two new extents in the btree 857 * each of which could cause a full split. 858 * 859 * This reservation amount will be used in the first call to 860 * xfs_bmbt_split() to select an AG with enough space to satisfy the 861 * rest of the operation. 862 */ 863 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; 864 865 do { 866 /* 867 * Set up a transaction to convert the range of extents 868 * from unwritten to real. Do allocations in a loop until 869 * we have covered the range passed in. 870 * 871 * Note that we can't risk to recursing back into the filesystem 872 * here as we might be asked to write out the same inode that we 873 * complete here and might deadlock on the iolock. 874 */ 875 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 876 XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp); 877 if (error) 878 return error; 879 880 xfs_ilock(ip, XFS_ILOCK_EXCL); 881 xfs_trans_ijoin(tp, ip, 0); 882 883 /* 884 * Modify the unwritten extent state of the buffer. 885 */ 886 xfs_defer_init(&dfops, &firstfsb); 887 nimaps = 1; 888 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, 889 XFS_BMAPI_CONVERT, &firstfsb, resblks, 890 &imap, &nimaps, &dfops); 891 if (error) 892 goto error_on_bmapi_transaction; 893 894 /* 895 * Log the updated inode size as we go. We have to be careful 896 * to only log it up to the actual write offset if it is 897 * halfway into a block. 898 */ 899 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb); 900 if (i_size > offset + count) 901 i_size = offset + count; 902 903 i_size = xfs_new_eof(ip, i_size); 904 if (i_size) { 905 ip->i_d.di_size = i_size; 906 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 907 } 908 909 error = xfs_defer_finish(&tp, &dfops, NULL); 910 if (error) 911 goto error_on_bmapi_transaction; 912 913 error = xfs_trans_commit(tp); 914 xfs_iunlock(ip, XFS_ILOCK_EXCL); 915 if (error) 916 return error; 917 918 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) 919 return xfs_alert_fsblock_zero(ip, &imap); 920 921 if ((numblks_fsb = imap.br_blockcount) == 0) { 922 /* 923 * The numblks_fsb value should always get 924 * smaller, otherwise the loop is stuck. 925 */ 926 ASSERT(imap.br_blockcount); 927 break; 928 } 929 offset_fsb += numblks_fsb; 930 count_fsb -= numblks_fsb; 931 } while (count_fsb > 0); 932 933 return 0; 934 935 error_on_bmapi_transaction: 936 xfs_defer_cancel(&dfops); 937 xfs_trans_cancel(tp); 938 xfs_iunlock(ip, XFS_ILOCK_EXCL); 939 return error; 940 } 941 942 static inline bool imap_needs_alloc(struct inode *inode, 943 struct xfs_bmbt_irec *imap, int nimaps) 944 { 945 return !nimaps || 946 imap->br_startblock == HOLESTARTBLOCK || 947 imap->br_startblock == DELAYSTARTBLOCK || 948 (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN); 949 } 950 951 static inline bool need_excl_ilock(struct xfs_inode *ip, unsigned flags) 952 { 953 /* 954 * COW writes will allocate delalloc space, so we need to make sure 955 * to take the lock exclusively here. 956 */ 957 if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO))) 958 return true; 959 if ((flags & IOMAP_DIRECT) && (flags & IOMAP_WRITE)) 960 return true; 961 return false; 962 } 963 964 static int 965 xfs_file_iomap_begin( 966 struct inode *inode, 967 loff_t offset, 968 loff_t length, 969 unsigned flags, 970 struct iomap *iomap) 971 { 972 struct xfs_inode *ip = XFS_I(inode); 973 struct xfs_mount *mp = ip->i_mount; 974 struct xfs_bmbt_irec imap; 975 xfs_fileoff_t offset_fsb, end_fsb; 976 int nimaps = 1, error = 0; 977 bool shared = false, trimmed = false; 978 unsigned lockmode; 979 struct block_device *bdev; 980 981 if (XFS_FORCED_SHUTDOWN(mp)) 982 return -EIO; 983 984 if (((flags & (IOMAP_WRITE | IOMAP_DIRECT)) == IOMAP_WRITE) && 985 !IS_DAX(inode) && !xfs_get_extsz_hint(ip)) { 986 /* Reserve delalloc blocks for regular writeback. */ 987 return xfs_file_iomap_begin_delay(inode, offset, length, flags, 988 iomap); 989 } 990 991 if (need_excl_ilock(ip, flags)) { 992 lockmode = XFS_ILOCK_EXCL; 993 xfs_ilock(ip, XFS_ILOCK_EXCL); 994 } else { 995 lockmode = xfs_ilock_data_map_shared(ip); 996 } 997 998 ASSERT(offset <= mp->m_super->s_maxbytes); 999 if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes) 1000 length = mp->m_super->s_maxbytes - offset; 1001 offset_fsb = XFS_B_TO_FSBT(mp, offset); 1002 end_fsb = XFS_B_TO_FSB(mp, offset + length); 1003 1004 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, 1005 &nimaps, 0); 1006 if (error) 1007 goto out_unlock; 1008 1009 if (flags & IOMAP_REPORT) { 1010 /* Trim the mapping to the nearest shared extent boundary. */ 1011 error = xfs_reflink_trim_around_shared(ip, &imap, &shared, 1012 &trimmed); 1013 if (error) 1014 goto out_unlock; 1015 } 1016 1017 if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) { 1018 if (flags & IOMAP_DIRECT) { 1019 /* may drop and re-acquire the ilock */ 1020 error = xfs_reflink_allocate_cow(ip, &imap, &shared, 1021 &lockmode); 1022 if (error) 1023 goto out_unlock; 1024 } else { 1025 error = xfs_reflink_reserve_cow(ip, &imap, &shared); 1026 if (error) 1027 goto out_unlock; 1028 } 1029 1030 end_fsb = imap.br_startoff + imap.br_blockcount; 1031 length = XFS_FSB_TO_B(mp, end_fsb) - offset; 1032 } 1033 1034 if ((flags & IOMAP_WRITE) && imap_needs_alloc(inode, &imap, nimaps)) { 1035 /* 1036 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES 1037 * pages to keep the chunks of work done where somewhat symmetric 1038 * with the work writeback does. This is a completely arbitrary 1039 * number pulled out of thin air as a best guess for initial 1040 * testing. 1041 * 1042 * Note that the values needs to be less than 32-bits wide until 1043 * the lower level functions are updated. 1044 */ 1045 length = min_t(loff_t, length, 1024 * PAGE_SIZE); 1046 /* 1047 * xfs_iomap_write_direct() expects the shared lock. It 1048 * is unlocked on return. 1049 */ 1050 if (lockmode == XFS_ILOCK_EXCL) 1051 xfs_ilock_demote(ip, lockmode); 1052 error = xfs_iomap_write_direct(ip, offset, length, &imap, 1053 nimaps); 1054 if (error) 1055 return error; 1056 1057 iomap->flags = IOMAP_F_NEW; 1058 trace_xfs_iomap_alloc(ip, offset, length, 0, &imap); 1059 } else { 1060 ASSERT(nimaps); 1061 1062 xfs_iunlock(ip, lockmode); 1063 trace_xfs_iomap_found(ip, offset, length, 0, &imap); 1064 } 1065 1066 xfs_bmbt_to_iomap(ip, iomap, &imap); 1067 1068 /* optionally associate a dax device with the iomap bdev */ 1069 bdev = iomap->bdev; 1070 if (blk_queue_dax(bdev->bd_queue)) 1071 iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 1072 else 1073 iomap->dax_dev = NULL; 1074 1075 if (shared) 1076 iomap->flags |= IOMAP_F_SHARED; 1077 return 0; 1078 out_unlock: 1079 xfs_iunlock(ip, lockmode); 1080 return error; 1081 } 1082 1083 static int 1084 xfs_file_iomap_end_delalloc( 1085 struct xfs_inode *ip, 1086 loff_t offset, 1087 loff_t length, 1088 ssize_t written, 1089 struct iomap *iomap) 1090 { 1091 struct xfs_mount *mp = ip->i_mount; 1092 xfs_fileoff_t start_fsb; 1093 xfs_fileoff_t end_fsb; 1094 int error = 0; 1095 1096 /* 1097 * Behave as if the write failed if drop writes is enabled. Set the NEW 1098 * flag to force delalloc cleanup. 1099 */ 1100 if (xfs_mp_drop_writes(mp)) { 1101 iomap->flags |= IOMAP_F_NEW; 1102 written = 0; 1103 } 1104 1105 /* 1106 * start_fsb refers to the first unused block after a short write. If 1107 * nothing was written, round offset down to point at the first block in 1108 * the range. 1109 */ 1110 if (unlikely(!written)) 1111 start_fsb = XFS_B_TO_FSBT(mp, offset); 1112 else 1113 start_fsb = XFS_B_TO_FSB(mp, offset + written); 1114 end_fsb = XFS_B_TO_FSB(mp, offset + length); 1115 1116 /* 1117 * Trim delalloc blocks if they were allocated by this write and we 1118 * didn't manage to write the whole range. 1119 * 1120 * We don't need to care about racing delalloc as we hold i_mutex 1121 * across the reserve/allocate/unreserve calls. If there are delalloc 1122 * blocks in the range, they are ours. 1123 */ 1124 if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) { 1125 truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb), 1126 XFS_FSB_TO_B(mp, end_fsb) - 1); 1127 1128 xfs_ilock(ip, XFS_ILOCK_EXCL); 1129 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1130 end_fsb - start_fsb); 1131 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1132 1133 if (error && !XFS_FORCED_SHUTDOWN(mp)) { 1134 xfs_alert(mp, "%s: unable to clean up ino %lld", 1135 __func__, ip->i_ino); 1136 return error; 1137 } 1138 } 1139 1140 return 0; 1141 } 1142 1143 static int 1144 xfs_file_iomap_end( 1145 struct inode *inode, 1146 loff_t offset, 1147 loff_t length, 1148 ssize_t written, 1149 unsigned flags, 1150 struct iomap *iomap) 1151 { 1152 put_dax(iomap->dax_dev); 1153 if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC) 1154 return xfs_file_iomap_end_delalloc(XFS_I(inode), offset, 1155 length, written, iomap); 1156 return 0; 1157 } 1158 1159 const struct iomap_ops xfs_iomap_ops = { 1160 .iomap_begin = xfs_file_iomap_begin, 1161 .iomap_end = xfs_file_iomap_end, 1162 }; 1163 1164 static int 1165 xfs_xattr_iomap_begin( 1166 struct inode *inode, 1167 loff_t offset, 1168 loff_t length, 1169 unsigned flags, 1170 struct iomap *iomap) 1171 { 1172 struct xfs_inode *ip = XFS_I(inode); 1173 struct xfs_mount *mp = ip->i_mount; 1174 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 1175 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length); 1176 struct xfs_bmbt_irec imap; 1177 int nimaps = 1, error = 0; 1178 unsigned lockmode; 1179 1180 if (XFS_FORCED_SHUTDOWN(mp)) 1181 return -EIO; 1182 1183 lockmode = xfs_ilock_attr_map_shared(ip); 1184 1185 /* if there are no attribute fork or extents, return ENOENT */ 1186 if (!XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) { 1187 error = -ENOENT; 1188 goto out_unlock; 1189 } 1190 1191 ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL); 1192 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, 1193 &nimaps, XFS_BMAPI_ENTIRE | XFS_BMAPI_ATTRFORK); 1194 out_unlock: 1195 xfs_iunlock(ip, lockmode); 1196 1197 if (!error) { 1198 ASSERT(nimaps); 1199 xfs_bmbt_to_iomap(ip, iomap, &imap); 1200 } 1201 1202 return error; 1203 } 1204 1205 const struct iomap_ops xfs_xattr_iomap_ops = { 1206 .iomap_begin = xfs_xattr_iomap_begin, 1207 }; 1208