1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * Copyright (c) 2016-2018 Christoph Hellwig. 5 * All Rights Reserved. 6 */ 7 #include "xfs.h" 8 #include "xfs_fs.h" 9 #include "xfs_shared.h" 10 #include "xfs_format.h" 11 #include "xfs_log_format.h" 12 #include "xfs_trans_resv.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_btree.h" 16 #include "xfs_bmap_btree.h" 17 #include "xfs_bmap.h" 18 #include "xfs_bmap_util.h" 19 #include "xfs_errortag.h" 20 #include "xfs_error.h" 21 #include "xfs_trans.h" 22 #include "xfs_trans_space.h" 23 #include "xfs_inode_item.h" 24 #include "xfs_iomap.h" 25 #include "xfs_trace.h" 26 #include "xfs_quota.h" 27 #include "xfs_dquot_item.h" 28 #include "xfs_dquot.h" 29 #include "xfs_reflink.h" 30 31 32 #define XFS_ALLOC_ALIGN(mp, off) \ 33 (((off) >> mp->m_allocsize_log) << mp->m_allocsize_log) 34 35 static int 36 xfs_alert_fsblock_zero( 37 xfs_inode_t *ip, 38 xfs_bmbt_irec_t *imap) 39 { 40 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, 41 "Access to block zero in inode %llu " 42 "start_block: %llx start_off: %llx " 43 "blkcnt: %llx extent-state: %x", 44 (unsigned long long)ip->i_ino, 45 (unsigned long long)imap->br_startblock, 46 (unsigned long long)imap->br_startoff, 47 (unsigned long long)imap->br_blockcount, 48 imap->br_state); 49 return -EFSCORRUPTED; 50 } 51 52 int 53 xfs_bmbt_to_iomap( 54 struct xfs_inode *ip, 55 struct iomap *iomap, 56 struct xfs_bmbt_irec *imap, 57 u16 flags) 58 { 59 struct xfs_mount *mp = ip->i_mount; 60 struct xfs_buftarg *target = xfs_inode_buftarg(ip); 61 62 if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) 63 return xfs_alert_fsblock_zero(ip, imap); 64 65 if (imap->br_startblock == HOLESTARTBLOCK) { 66 iomap->addr = IOMAP_NULL_ADDR; 67 iomap->type = IOMAP_HOLE; 68 } else if (imap->br_startblock == DELAYSTARTBLOCK || 69 isnullstartblock(imap->br_startblock)) { 70 iomap->addr = IOMAP_NULL_ADDR; 71 iomap->type = IOMAP_DELALLOC; 72 } else { 73 iomap->addr = BBTOB(xfs_fsb_to_db(ip, imap->br_startblock)); 74 if (imap->br_state == XFS_EXT_UNWRITTEN) 75 iomap->type = IOMAP_UNWRITTEN; 76 else 77 iomap->type = IOMAP_MAPPED; 78 } 79 iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff); 80 iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount); 81 iomap->bdev = target->bt_bdev; 82 iomap->dax_dev = target->bt_daxdev; 83 iomap->flags = flags; 84 85 if (xfs_ipincount(ip) && 86 (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) 87 iomap->flags |= IOMAP_F_DIRTY; 88 return 0; 89 } 90 91 static void 92 xfs_hole_to_iomap( 93 struct xfs_inode *ip, 94 struct iomap *iomap, 95 xfs_fileoff_t offset_fsb, 96 xfs_fileoff_t end_fsb) 97 { 98 struct xfs_buftarg *target = xfs_inode_buftarg(ip); 99 100 iomap->addr = IOMAP_NULL_ADDR; 101 iomap->type = IOMAP_HOLE; 102 iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb); 103 iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb); 104 iomap->bdev = target->bt_bdev; 105 iomap->dax_dev = target->bt_daxdev; 106 } 107 108 static inline xfs_fileoff_t 109 xfs_iomap_end_fsb( 110 struct xfs_mount *mp, 111 loff_t offset, 112 loff_t count) 113 { 114 ASSERT(offset <= mp->m_super->s_maxbytes); 115 return min(XFS_B_TO_FSB(mp, offset + count), 116 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes)); 117 } 118 119 static xfs_extlen_t 120 xfs_eof_alignment( 121 struct xfs_inode *ip) 122 { 123 struct xfs_mount *mp = ip->i_mount; 124 xfs_extlen_t align = 0; 125 126 if (!XFS_IS_REALTIME_INODE(ip)) { 127 /* 128 * Round up the allocation request to a stripe unit 129 * (m_dalign) boundary if the file size is >= stripe unit 130 * size, and we are allocating past the allocation eof. 131 * 132 * If mounted with the "-o swalloc" option the alignment is 133 * increased from the strip unit size to the stripe width. 134 */ 135 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) 136 align = mp->m_swidth; 137 else if (mp->m_dalign) 138 align = mp->m_dalign; 139 140 if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align)) 141 align = 0; 142 } 143 144 return align; 145 } 146 147 /* 148 * Check if last_fsb is outside the last extent, and if so grow it to the next 149 * stripe unit boundary. 150 */ 151 xfs_fileoff_t 152 xfs_iomap_eof_align_last_fsb( 153 struct xfs_inode *ip, 154 xfs_fileoff_t end_fsb) 155 { 156 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 157 xfs_extlen_t extsz = xfs_get_extsz_hint(ip); 158 xfs_extlen_t align = xfs_eof_alignment(ip); 159 struct xfs_bmbt_irec irec; 160 struct xfs_iext_cursor icur; 161 162 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 163 164 /* 165 * Always round up the allocation request to the extent hint boundary. 166 */ 167 if (extsz) { 168 if (align) 169 align = roundup_64(align, extsz); 170 else 171 align = extsz; 172 } 173 174 if (align) { 175 xfs_fileoff_t aligned_end_fsb = roundup_64(end_fsb, align); 176 177 xfs_iext_last(ifp, &icur); 178 if (!xfs_iext_get_extent(ifp, &icur, &irec) || 179 aligned_end_fsb >= irec.br_startoff + irec.br_blockcount) 180 return aligned_end_fsb; 181 } 182 183 return end_fsb; 184 } 185 186 int 187 xfs_iomap_write_direct( 188 struct xfs_inode *ip, 189 xfs_fileoff_t offset_fsb, 190 xfs_fileoff_t count_fsb, 191 struct xfs_bmbt_irec *imap) 192 { 193 struct xfs_mount *mp = ip->i_mount; 194 struct xfs_trans *tp; 195 xfs_filblks_t resaligned; 196 int nimaps; 197 int quota_flag; 198 uint qblocks, resblks; 199 unsigned int resrtextents = 0; 200 int error; 201 int bmapi_flags = XFS_BMAPI_PREALLOC; 202 uint tflags = 0; 203 204 ASSERT(count_fsb > 0); 205 206 resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, 207 xfs_get_extsz_hint(ip)); 208 if (unlikely(XFS_IS_REALTIME_INODE(ip))) { 209 resrtextents = qblocks = resaligned; 210 resrtextents /= mp->m_sb.sb_rextsize; 211 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 212 quota_flag = XFS_QMOPT_RES_RTBLKS; 213 } else { 214 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); 215 quota_flag = XFS_QMOPT_RES_REGBLKS; 216 } 217 218 error = xfs_qm_dqattach(ip); 219 if (error) 220 return error; 221 222 /* 223 * For DAX, we do not allocate unwritten extents, but instead we zero 224 * the block before we commit the transaction. Ideally we'd like to do 225 * this outside the transaction context, but if we commit and then crash 226 * we may not have zeroed the blocks and this will be exposed on 227 * recovery of the allocation. Hence we must zero before commit. 228 * 229 * Further, if we are mapping unwritten extents here, we need to zero 230 * and convert them to written so that we don't need an unwritten extent 231 * callback for DAX. This also means that we need to be able to dip into 232 * the reserve block pool for bmbt block allocation if there is no space 233 * left but we need to do unwritten extent conversion. 234 */ 235 if (IS_DAX(VFS_I(ip))) { 236 bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO; 237 if (imap->br_state == XFS_EXT_UNWRITTEN) { 238 tflags |= XFS_TRANS_RESERVE; 239 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; 240 } 241 } 242 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents, 243 tflags, &tp); 244 if (error) 245 return error; 246 247 xfs_ilock(ip, XFS_ILOCK_EXCL); 248 249 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); 250 if (error) 251 goto out_trans_cancel; 252 253 xfs_trans_ijoin(tp, ip, 0); 254 255 /* 256 * From this point onwards we overwrite the imap pointer that the 257 * caller gave to us. 258 */ 259 nimaps = 1; 260 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flags, 0, 261 imap, &nimaps); 262 if (error) 263 goto out_res_cancel; 264 265 /* 266 * Complete the transaction 267 */ 268 error = xfs_trans_commit(tp); 269 if (error) 270 goto out_unlock; 271 272 /* 273 * Copy any maps to caller's array and return any error. 274 */ 275 if (nimaps == 0) { 276 error = -ENOSPC; 277 goto out_unlock; 278 } 279 280 if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) 281 error = xfs_alert_fsblock_zero(ip, imap); 282 283 out_unlock: 284 xfs_iunlock(ip, XFS_ILOCK_EXCL); 285 return error; 286 287 out_res_cancel: 288 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag); 289 out_trans_cancel: 290 xfs_trans_cancel(tp); 291 goto out_unlock; 292 } 293 294 STATIC bool 295 xfs_quota_need_throttle( 296 struct xfs_inode *ip, 297 xfs_dqtype_t type, 298 xfs_fsblock_t alloc_blocks) 299 { 300 struct xfs_dquot *dq = xfs_inode_dquot(ip, type); 301 302 if (!dq || !xfs_this_quota_on(ip->i_mount, type)) 303 return false; 304 305 /* no hi watermark, no throttle */ 306 if (!dq->q_prealloc_hi_wmark) 307 return false; 308 309 /* under the lo watermark, no throttle */ 310 if (dq->q_blk.reserved + alloc_blocks < dq->q_prealloc_lo_wmark) 311 return false; 312 313 return true; 314 } 315 316 STATIC void 317 xfs_quota_calc_throttle( 318 struct xfs_inode *ip, 319 xfs_dqtype_t type, 320 xfs_fsblock_t *qblocks, 321 int *qshift, 322 int64_t *qfreesp) 323 { 324 struct xfs_dquot *dq = xfs_inode_dquot(ip, type); 325 int64_t freesp; 326 int shift = 0; 327 328 /* no dq, or over hi wmark, squash the prealloc completely */ 329 if (!dq || dq->q_blk.reserved >= dq->q_prealloc_hi_wmark) { 330 *qblocks = 0; 331 *qfreesp = 0; 332 return; 333 } 334 335 freesp = dq->q_prealloc_hi_wmark - dq->q_blk.reserved; 336 if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) { 337 shift = 2; 338 if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT]) 339 shift += 2; 340 if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT]) 341 shift += 2; 342 } 343 344 if (freesp < *qfreesp) 345 *qfreesp = freesp; 346 347 /* only overwrite the throttle values if we are more aggressive */ 348 if ((freesp >> shift) < (*qblocks >> *qshift)) { 349 *qblocks = freesp; 350 *qshift = shift; 351 } 352 } 353 354 /* 355 * If we don't have a user specified preallocation size, dynamically increase 356 * the preallocation size as the size of the file grows. Cap the maximum size 357 * at a single extent or less if the filesystem is near full. The closer the 358 * filesystem is to being full, the smaller the maximum preallocation. 359 */ 360 STATIC xfs_fsblock_t 361 xfs_iomap_prealloc_size( 362 struct xfs_inode *ip, 363 int whichfork, 364 loff_t offset, 365 loff_t count, 366 struct xfs_iext_cursor *icur) 367 { 368 struct xfs_iext_cursor ncur = *icur; 369 struct xfs_bmbt_irec prev, got; 370 struct xfs_mount *mp = ip->i_mount; 371 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 372 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 373 int64_t freesp; 374 xfs_fsblock_t qblocks; 375 xfs_fsblock_t alloc_blocks = 0; 376 xfs_extlen_t plen; 377 int shift = 0; 378 int qshift = 0; 379 380 /* 381 * As an exception we don't do any preallocation at all if the file is 382 * smaller than the minimum preallocation and we are using the default 383 * dynamic preallocation scheme, as it is likely this is the only write 384 * to the file that is going to be done. 385 */ 386 if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks)) 387 return 0; 388 389 /* 390 * Use the minimum preallocation size for small files or if we are 391 * writing right after a hole. 392 */ 393 if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) || 394 !xfs_iext_prev_extent(ifp, &ncur, &prev) || 395 prev.br_startoff + prev.br_blockcount < offset_fsb) 396 return mp->m_allocsize_blocks; 397 398 /* 399 * Take the size of the preceding data extents as the basis for the 400 * preallocation size. Note that we don't care if the previous extents 401 * are written or not. 402 */ 403 plen = prev.br_blockcount; 404 while (xfs_iext_prev_extent(ifp, &ncur, &got)) { 405 if (plen > MAXEXTLEN / 2 || 406 isnullstartblock(got.br_startblock) || 407 got.br_startoff + got.br_blockcount != prev.br_startoff || 408 got.br_startblock + got.br_blockcount != prev.br_startblock) 409 break; 410 plen += got.br_blockcount; 411 prev = got; 412 } 413 414 /* 415 * If the size of the extents is greater than half the maximum extent 416 * length, then use the current offset as the basis. This ensures that 417 * for large files the preallocation size always extends to MAXEXTLEN 418 * rather than falling short due to things like stripe unit/width 419 * alignment of real extents. 420 */ 421 alloc_blocks = plen * 2; 422 if (alloc_blocks > MAXEXTLEN) 423 alloc_blocks = XFS_B_TO_FSB(mp, offset); 424 qblocks = alloc_blocks; 425 426 /* 427 * MAXEXTLEN is not a power of two value but we round the prealloc down 428 * to the nearest power of two value after throttling. To prevent the 429 * round down from unconditionally reducing the maximum supported 430 * prealloc size, we round up first, apply appropriate throttling, 431 * round down and cap the value to MAXEXTLEN. 432 */ 433 alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN), 434 alloc_blocks); 435 436 freesp = percpu_counter_read_positive(&mp->m_fdblocks); 437 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) { 438 shift = 2; 439 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT]) 440 shift++; 441 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT]) 442 shift++; 443 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT]) 444 shift++; 445 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT]) 446 shift++; 447 } 448 449 /* 450 * Check each quota to cap the prealloc size, provide a shift value to 451 * throttle with and adjust amount of available space. 452 */ 453 if (xfs_quota_need_throttle(ip, XFS_DQTYPE_USER, alloc_blocks)) 454 xfs_quota_calc_throttle(ip, XFS_DQTYPE_USER, &qblocks, &qshift, 455 &freesp); 456 if (xfs_quota_need_throttle(ip, XFS_DQTYPE_GROUP, alloc_blocks)) 457 xfs_quota_calc_throttle(ip, XFS_DQTYPE_GROUP, &qblocks, &qshift, 458 &freesp); 459 if (xfs_quota_need_throttle(ip, XFS_DQTYPE_PROJ, alloc_blocks)) 460 xfs_quota_calc_throttle(ip, XFS_DQTYPE_PROJ, &qblocks, &qshift, 461 &freesp); 462 463 /* 464 * The final prealloc size is set to the minimum of free space available 465 * in each of the quotas and the overall filesystem. 466 * 467 * The shift throttle value is set to the maximum value as determined by 468 * the global low free space values and per-quota low free space values. 469 */ 470 alloc_blocks = min(alloc_blocks, qblocks); 471 shift = max(shift, qshift); 472 473 if (shift) 474 alloc_blocks >>= shift; 475 /* 476 * rounddown_pow_of_two() returns an undefined result if we pass in 477 * alloc_blocks = 0. 478 */ 479 if (alloc_blocks) 480 alloc_blocks = rounddown_pow_of_two(alloc_blocks); 481 if (alloc_blocks > MAXEXTLEN) 482 alloc_blocks = MAXEXTLEN; 483 484 /* 485 * If we are still trying to allocate more space than is 486 * available, squash the prealloc hard. This can happen if we 487 * have a large file on a small filesystem and the above 488 * lowspace thresholds are smaller than MAXEXTLEN. 489 */ 490 while (alloc_blocks && alloc_blocks >= freesp) 491 alloc_blocks >>= 4; 492 if (alloc_blocks < mp->m_allocsize_blocks) 493 alloc_blocks = mp->m_allocsize_blocks; 494 trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift, 495 mp->m_allocsize_blocks); 496 return alloc_blocks; 497 } 498 499 int 500 xfs_iomap_write_unwritten( 501 xfs_inode_t *ip, 502 xfs_off_t offset, 503 xfs_off_t count, 504 bool update_isize) 505 { 506 xfs_mount_t *mp = ip->i_mount; 507 xfs_fileoff_t offset_fsb; 508 xfs_filblks_t count_fsb; 509 xfs_filblks_t numblks_fsb; 510 int nimaps; 511 xfs_trans_t *tp; 512 xfs_bmbt_irec_t imap; 513 struct inode *inode = VFS_I(ip); 514 xfs_fsize_t i_size; 515 uint resblks; 516 int error; 517 518 trace_xfs_unwritten_convert(ip, offset, count); 519 520 offset_fsb = XFS_B_TO_FSBT(mp, offset); 521 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 522 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); 523 524 /* 525 * Reserve enough blocks in this transaction for two complete extent 526 * btree splits. We may be converting the middle part of an unwritten 527 * extent and in this case we will insert two new extents in the btree 528 * each of which could cause a full split. 529 * 530 * This reservation amount will be used in the first call to 531 * xfs_bmbt_split() to select an AG with enough space to satisfy the 532 * rest of the operation. 533 */ 534 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; 535 536 /* Attach dquots so that bmbt splits are accounted correctly. */ 537 error = xfs_qm_dqattach(ip); 538 if (error) 539 return error; 540 541 do { 542 /* 543 * Set up a transaction to convert the range of extents 544 * from unwritten to real. Do allocations in a loop until 545 * we have covered the range passed in. 546 * 547 * Note that we can't risk to recursing back into the filesystem 548 * here as we might be asked to write out the same inode that we 549 * complete here and might deadlock on the iolock. 550 */ 551 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 552 XFS_TRANS_RESERVE, &tp); 553 if (error) 554 return error; 555 556 xfs_ilock(ip, XFS_ILOCK_EXCL); 557 xfs_trans_ijoin(tp, ip, 0); 558 559 error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0, 560 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES); 561 if (error) 562 goto error_on_bmapi_transaction; 563 564 /* 565 * Modify the unwritten extent state of the buffer. 566 */ 567 nimaps = 1; 568 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, 569 XFS_BMAPI_CONVERT, resblks, &imap, 570 &nimaps); 571 if (error) 572 goto error_on_bmapi_transaction; 573 574 /* 575 * Log the updated inode size as we go. We have to be careful 576 * to only log it up to the actual write offset if it is 577 * halfway into a block. 578 */ 579 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb); 580 if (i_size > offset + count) 581 i_size = offset + count; 582 if (update_isize && i_size > i_size_read(inode)) 583 i_size_write(inode, i_size); 584 i_size = xfs_new_eof(ip, i_size); 585 if (i_size) { 586 ip->i_d.di_size = i_size; 587 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 588 } 589 590 error = xfs_trans_commit(tp); 591 xfs_iunlock(ip, XFS_ILOCK_EXCL); 592 if (error) 593 return error; 594 595 if (unlikely(!xfs_valid_startblock(ip, imap.br_startblock))) 596 return xfs_alert_fsblock_zero(ip, &imap); 597 598 if ((numblks_fsb = imap.br_blockcount) == 0) { 599 /* 600 * The numblks_fsb value should always get 601 * smaller, otherwise the loop is stuck. 602 */ 603 ASSERT(imap.br_blockcount); 604 break; 605 } 606 offset_fsb += numblks_fsb; 607 count_fsb -= numblks_fsb; 608 } while (count_fsb > 0); 609 610 return 0; 611 612 error_on_bmapi_transaction: 613 xfs_trans_cancel(tp); 614 xfs_iunlock(ip, XFS_ILOCK_EXCL); 615 return error; 616 } 617 618 static inline bool 619 imap_needs_alloc( 620 struct inode *inode, 621 unsigned flags, 622 struct xfs_bmbt_irec *imap, 623 int nimaps) 624 { 625 /* don't allocate blocks when just zeroing */ 626 if (flags & IOMAP_ZERO) 627 return false; 628 if (!nimaps || 629 imap->br_startblock == HOLESTARTBLOCK || 630 imap->br_startblock == DELAYSTARTBLOCK) 631 return true; 632 /* we convert unwritten extents before copying the data for DAX */ 633 if (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN) 634 return true; 635 return false; 636 } 637 638 static inline bool 639 imap_needs_cow( 640 struct xfs_inode *ip, 641 unsigned int flags, 642 struct xfs_bmbt_irec *imap, 643 int nimaps) 644 { 645 if (!xfs_is_cow_inode(ip)) 646 return false; 647 648 /* when zeroing we don't have to COW holes or unwritten extents */ 649 if (flags & IOMAP_ZERO) { 650 if (!nimaps || 651 imap->br_startblock == HOLESTARTBLOCK || 652 imap->br_state == XFS_EXT_UNWRITTEN) 653 return false; 654 } 655 656 return true; 657 } 658 659 static int 660 xfs_ilock_for_iomap( 661 struct xfs_inode *ip, 662 unsigned flags, 663 unsigned *lockmode) 664 { 665 unsigned mode = XFS_ILOCK_SHARED; 666 bool is_write = flags & (IOMAP_WRITE | IOMAP_ZERO); 667 668 /* 669 * COW writes may allocate delalloc space or convert unwritten COW 670 * extents, so we need to make sure to take the lock exclusively here. 671 */ 672 if (xfs_is_cow_inode(ip) && is_write) 673 mode = XFS_ILOCK_EXCL; 674 675 /* 676 * Extents not yet cached requires exclusive access, don't block. This 677 * is an opencoded xfs_ilock_data_map_shared() call but with 678 * non-blocking behaviour. 679 */ 680 if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) { 681 if (flags & IOMAP_NOWAIT) 682 return -EAGAIN; 683 mode = XFS_ILOCK_EXCL; 684 } 685 686 relock: 687 if (flags & IOMAP_NOWAIT) { 688 if (!xfs_ilock_nowait(ip, mode)) 689 return -EAGAIN; 690 } else { 691 xfs_ilock(ip, mode); 692 } 693 694 /* 695 * The reflink iflag could have changed since the earlier unlocked 696 * check, so if we got ILOCK_SHARED for a write and but we're now a 697 * reflink inode we have to switch to ILOCK_EXCL and relock. 698 */ 699 if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_cow_inode(ip)) { 700 xfs_iunlock(ip, mode); 701 mode = XFS_ILOCK_EXCL; 702 goto relock; 703 } 704 705 *lockmode = mode; 706 return 0; 707 } 708 709 static int 710 xfs_direct_write_iomap_begin( 711 struct inode *inode, 712 loff_t offset, 713 loff_t length, 714 unsigned flags, 715 struct iomap *iomap, 716 struct iomap *srcmap) 717 { 718 struct xfs_inode *ip = XFS_I(inode); 719 struct xfs_mount *mp = ip->i_mount; 720 struct xfs_bmbt_irec imap, cmap; 721 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 722 xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length); 723 int nimaps = 1, error = 0; 724 bool shared = false; 725 u16 iomap_flags = 0; 726 unsigned lockmode; 727 728 ASSERT(flags & (IOMAP_WRITE | IOMAP_ZERO)); 729 730 if (XFS_FORCED_SHUTDOWN(mp)) 731 return -EIO; 732 733 /* 734 * Writes that span EOF might trigger an IO size update on completion, 735 * so consider them to be dirty for the purposes of O_DSYNC even if 736 * there is no other metadata changes pending or have been made here. 737 */ 738 if (offset + length > i_size_read(inode)) 739 iomap_flags |= IOMAP_F_DIRTY; 740 741 error = xfs_ilock_for_iomap(ip, flags, &lockmode); 742 if (error) 743 return error; 744 745 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, 746 &nimaps, 0); 747 if (error) 748 goto out_unlock; 749 750 if (imap_needs_cow(ip, flags, &imap, nimaps)) { 751 error = -EAGAIN; 752 if (flags & IOMAP_NOWAIT) 753 goto out_unlock; 754 755 /* may drop and re-acquire the ilock */ 756 error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared, 757 &lockmode, flags & IOMAP_DIRECT); 758 if (error) 759 goto out_unlock; 760 if (shared) 761 goto out_found_cow; 762 end_fsb = imap.br_startoff + imap.br_blockcount; 763 length = XFS_FSB_TO_B(mp, end_fsb) - offset; 764 } 765 766 if (imap_needs_alloc(inode, flags, &imap, nimaps)) 767 goto allocate_blocks; 768 769 xfs_iunlock(ip, lockmode); 770 trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap); 771 return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags); 772 773 allocate_blocks: 774 error = -EAGAIN; 775 if (flags & IOMAP_NOWAIT) 776 goto out_unlock; 777 778 /* 779 * We cap the maximum length we map to a sane size to keep the chunks 780 * of work done where somewhat symmetric with the work writeback does. 781 * This is a completely arbitrary number pulled out of thin air as a 782 * best guess for initial testing. 783 * 784 * Note that the values needs to be less than 32-bits wide until the 785 * lower level functions are updated. 786 */ 787 length = min_t(loff_t, length, 1024 * PAGE_SIZE); 788 end_fsb = xfs_iomap_end_fsb(mp, offset, length); 789 790 if (offset + length > XFS_ISIZE(ip)) 791 end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb); 792 else if (nimaps && imap.br_startblock == HOLESTARTBLOCK) 793 end_fsb = min(end_fsb, imap.br_startoff + imap.br_blockcount); 794 xfs_iunlock(ip, lockmode); 795 796 error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb, 797 &imap); 798 if (error) 799 return error; 800 801 trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap); 802 return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags | IOMAP_F_NEW); 803 804 out_found_cow: 805 xfs_iunlock(ip, lockmode); 806 length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount); 807 trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap); 808 if (imap.br_startblock != HOLESTARTBLOCK) { 809 error = xfs_bmbt_to_iomap(ip, srcmap, &imap, 0); 810 if (error) 811 return error; 812 } 813 return xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED); 814 815 out_unlock: 816 xfs_iunlock(ip, lockmode); 817 return error; 818 } 819 820 const struct iomap_ops xfs_direct_write_iomap_ops = { 821 .iomap_begin = xfs_direct_write_iomap_begin, 822 }; 823 824 static int 825 xfs_buffered_write_iomap_begin( 826 struct inode *inode, 827 loff_t offset, 828 loff_t count, 829 unsigned flags, 830 struct iomap *iomap, 831 struct iomap *srcmap) 832 { 833 struct xfs_inode *ip = XFS_I(inode); 834 struct xfs_mount *mp = ip->i_mount; 835 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 836 xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, count); 837 struct xfs_bmbt_irec imap, cmap; 838 struct xfs_iext_cursor icur, ccur; 839 xfs_fsblock_t prealloc_blocks = 0; 840 bool eof = false, cow_eof = false, shared = false; 841 int allocfork = XFS_DATA_FORK; 842 int error = 0; 843 844 /* we can't use delayed allocations when using extent size hints */ 845 if (xfs_get_extsz_hint(ip)) 846 return xfs_direct_write_iomap_begin(inode, offset, count, 847 flags, iomap, srcmap); 848 849 ASSERT(!XFS_IS_REALTIME_INODE(ip)); 850 851 xfs_ilock(ip, XFS_ILOCK_EXCL); 852 853 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) || 854 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 855 error = -EFSCORRUPTED; 856 goto out_unlock; 857 } 858 859 XFS_STATS_INC(mp, xs_blk_mapw); 860 861 if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) { 862 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); 863 if (error) 864 goto out_unlock; 865 } 866 867 /* 868 * Search the data fork first to look up our source mapping. We 869 * always need the data fork map, as we have to return it to the 870 * iomap code so that the higher level write code can read data in to 871 * perform read-modify-write cycles for unaligned writes. 872 */ 873 eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap); 874 if (eof) 875 imap.br_startoff = end_fsb; /* fake hole until the end */ 876 877 /* We never need to allocate blocks for zeroing a hole. */ 878 if ((flags & IOMAP_ZERO) && imap.br_startoff > offset_fsb) { 879 xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff); 880 goto out_unlock; 881 } 882 883 /* 884 * Search the COW fork extent list even if we did not find a data fork 885 * extent. This serves two purposes: first this implements the 886 * speculative preallocation using cowextsize, so that we also unshare 887 * block adjacent to shared blocks instead of just the shared blocks 888 * themselves. Second the lookup in the extent list is generally faster 889 * than going out to the shared extent tree. 890 */ 891 if (xfs_is_cow_inode(ip)) { 892 if (!ip->i_cowfp) { 893 ASSERT(!xfs_is_reflink_inode(ip)); 894 xfs_ifork_init_cow(ip); 895 } 896 cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, 897 &ccur, &cmap); 898 if (!cow_eof && cmap.br_startoff <= offset_fsb) { 899 trace_xfs_reflink_cow_found(ip, &cmap); 900 goto found_cow; 901 } 902 } 903 904 if (imap.br_startoff <= offset_fsb) { 905 /* 906 * For reflink files we may need a delalloc reservation when 907 * overwriting shared extents. This includes zeroing of 908 * existing extents that contain data. 909 */ 910 if (!xfs_is_cow_inode(ip) || 911 ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) { 912 trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK, 913 &imap); 914 goto found_imap; 915 } 916 917 xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb); 918 919 /* Trim the mapping to the nearest shared extent boundary. */ 920 error = xfs_bmap_trim_cow(ip, &imap, &shared); 921 if (error) 922 goto out_unlock; 923 924 /* Not shared? Just report the (potentially capped) extent. */ 925 if (!shared) { 926 trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK, 927 &imap); 928 goto found_imap; 929 } 930 931 /* 932 * Fork all the shared blocks from our write offset until the 933 * end of the extent. 934 */ 935 allocfork = XFS_COW_FORK; 936 end_fsb = imap.br_startoff + imap.br_blockcount; 937 } else { 938 /* 939 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES 940 * pages to keep the chunks of work done where somewhat 941 * symmetric with the work writeback does. This is a completely 942 * arbitrary number pulled out of thin air. 943 * 944 * Note that the values needs to be less than 32-bits wide until 945 * the lower level functions are updated. 946 */ 947 count = min_t(loff_t, count, 1024 * PAGE_SIZE); 948 end_fsb = xfs_iomap_end_fsb(mp, offset, count); 949 950 if (xfs_is_always_cow_inode(ip)) 951 allocfork = XFS_COW_FORK; 952 } 953 954 error = xfs_qm_dqattach_locked(ip, false); 955 if (error) 956 goto out_unlock; 957 958 if (eof && offset + count > XFS_ISIZE(ip)) { 959 /* 960 * Determine the initial size of the preallocation. 961 * We clean up any extra preallocation when the file is closed. 962 */ 963 if (mp->m_flags & XFS_MOUNT_ALLOCSIZE) 964 prealloc_blocks = mp->m_allocsize_blocks; 965 else 966 prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork, 967 offset, count, &icur); 968 if (prealloc_blocks) { 969 xfs_extlen_t align; 970 xfs_off_t end_offset; 971 xfs_fileoff_t p_end_fsb; 972 973 end_offset = XFS_ALLOC_ALIGN(mp, offset + count - 1); 974 p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) + 975 prealloc_blocks; 976 977 align = xfs_eof_alignment(ip); 978 if (align) 979 p_end_fsb = roundup_64(p_end_fsb, align); 980 981 p_end_fsb = min(p_end_fsb, 982 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes)); 983 ASSERT(p_end_fsb > offset_fsb); 984 prealloc_blocks = p_end_fsb - end_fsb; 985 } 986 } 987 988 retry: 989 error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb, 990 end_fsb - offset_fsb, prealloc_blocks, 991 allocfork == XFS_DATA_FORK ? &imap : &cmap, 992 allocfork == XFS_DATA_FORK ? &icur : &ccur, 993 allocfork == XFS_DATA_FORK ? eof : cow_eof); 994 switch (error) { 995 case 0: 996 break; 997 case -ENOSPC: 998 case -EDQUOT: 999 /* retry without any preallocation */ 1000 trace_xfs_delalloc_enospc(ip, offset, count); 1001 if (prealloc_blocks) { 1002 prealloc_blocks = 0; 1003 goto retry; 1004 } 1005 /*FALLTHRU*/ 1006 default: 1007 goto out_unlock; 1008 } 1009 1010 if (allocfork == XFS_COW_FORK) { 1011 trace_xfs_iomap_alloc(ip, offset, count, allocfork, &cmap); 1012 goto found_cow; 1013 } 1014 1015 /* 1016 * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch 1017 * them out if the write happens to fail. 1018 */ 1019 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1020 trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap); 1021 return xfs_bmbt_to_iomap(ip, iomap, &imap, IOMAP_F_NEW); 1022 1023 found_imap: 1024 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1025 return xfs_bmbt_to_iomap(ip, iomap, &imap, 0); 1026 1027 found_cow: 1028 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1029 if (imap.br_startoff <= offset_fsb) { 1030 error = xfs_bmbt_to_iomap(ip, srcmap, &imap, 0); 1031 if (error) 1032 return error; 1033 } else { 1034 xfs_trim_extent(&cmap, offset_fsb, 1035 imap.br_startoff - offset_fsb); 1036 } 1037 return xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED); 1038 1039 out_unlock: 1040 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1041 return error; 1042 } 1043 1044 static int 1045 xfs_buffered_write_iomap_end( 1046 struct inode *inode, 1047 loff_t offset, 1048 loff_t length, 1049 ssize_t written, 1050 unsigned flags, 1051 struct iomap *iomap) 1052 { 1053 struct xfs_inode *ip = XFS_I(inode); 1054 struct xfs_mount *mp = ip->i_mount; 1055 xfs_fileoff_t start_fsb; 1056 xfs_fileoff_t end_fsb; 1057 int error = 0; 1058 1059 if (iomap->type != IOMAP_DELALLOC) 1060 return 0; 1061 1062 /* 1063 * Behave as if the write failed if drop writes is enabled. Set the NEW 1064 * flag to force delalloc cleanup. 1065 */ 1066 if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_DROP_WRITES)) { 1067 iomap->flags |= IOMAP_F_NEW; 1068 written = 0; 1069 } 1070 1071 /* 1072 * start_fsb refers to the first unused block after a short write. If 1073 * nothing was written, round offset down to point at the first block in 1074 * the range. 1075 */ 1076 if (unlikely(!written)) 1077 start_fsb = XFS_B_TO_FSBT(mp, offset); 1078 else 1079 start_fsb = XFS_B_TO_FSB(mp, offset + written); 1080 end_fsb = XFS_B_TO_FSB(mp, offset + length); 1081 1082 /* 1083 * Trim delalloc blocks if they were allocated by this write and we 1084 * didn't manage to write the whole range. 1085 * 1086 * We don't need to care about racing delalloc as we hold i_mutex 1087 * across the reserve/allocate/unreserve calls. If there are delalloc 1088 * blocks in the range, they are ours. 1089 */ 1090 if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) { 1091 truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb), 1092 XFS_FSB_TO_B(mp, end_fsb) - 1); 1093 1094 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1095 end_fsb - start_fsb); 1096 if (error && !XFS_FORCED_SHUTDOWN(mp)) { 1097 xfs_alert(mp, "%s: unable to clean up ino %lld", 1098 __func__, ip->i_ino); 1099 return error; 1100 } 1101 } 1102 1103 return 0; 1104 } 1105 1106 const struct iomap_ops xfs_buffered_write_iomap_ops = { 1107 .iomap_begin = xfs_buffered_write_iomap_begin, 1108 .iomap_end = xfs_buffered_write_iomap_end, 1109 }; 1110 1111 static int 1112 xfs_read_iomap_begin( 1113 struct inode *inode, 1114 loff_t offset, 1115 loff_t length, 1116 unsigned flags, 1117 struct iomap *iomap, 1118 struct iomap *srcmap) 1119 { 1120 struct xfs_inode *ip = XFS_I(inode); 1121 struct xfs_mount *mp = ip->i_mount; 1122 struct xfs_bmbt_irec imap; 1123 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 1124 xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length); 1125 int nimaps = 1, error = 0; 1126 bool shared = false; 1127 unsigned lockmode; 1128 1129 ASSERT(!(flags & (IOMAP_WRITE | IOMAP_ZERO))); 1130 1131 if (XFS_FORCED_SHUTDOWN(mp)) 1132 return -EIO; 1133 1134 error = xfs_ilock_for_iomap(ip, flags, &lockmode); 1135 if (error) 1136 return error; 1137 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, 1138 &nimaps, 0); 1139 if (!error && (flags & IOMAP_REPORT)) 1140 error = xfs_reflink_trim_around_shared(ip, &imap, &shared); 1141 xfs_iunlock(ip, lockmode); 1142 1143 if (error) 1144 return error; 1145 trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap); 1146 return xfs_bmbt_to_iomap(ip, iomap, &imap, shared ? IOMAP_F_SHARED : 0); 1147 } 1148 1149 const struct iomap_ops xfs_read_iomap_ops = { 1150 .iomap_begin = xfs_read_iomap_begin, 1151 }; 1152 1153 static int 1154 xfs_seek_iomap_begin( 1155 struct inode *inode, 1156 loff_t offset, 1157 loff_t length, 1158 unsigned flags, 1159 struct iomap *iomap, 1160 struct iomap *srcmap) 1161 { 1162 struct xfs_inode *ip = XFS_I(inode); 1163 struct xfs_mount *mp = ip->i_mount; 1164 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 1165 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length); 1166 xfs_fileoff_t cow_fsb = NULLFILEOFF, data_fsb = NULLFILEOFF; 1167 struct xfs_iext_cursor icur; 1168 struct xfs_bmbt_irec imap, cmap; 1169 int error = 0; 1170 unsigned lockmode; 1171 1172 if (XFS_FORCED_SHUTDOWN(mp)) 1173 return -EIO; 1174 1175 lockmode = xfs_ilock_data_map_shared(ip); 1176 if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) { 1177 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); 1178 if (error) 1179 goto out_unlock; 1180 } 1181 1182 if (xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) { 1183 /* 1184 * If we found a data extent we are done. 1185 */ 1186 if (imap.br_startoff <= offset_fsb) 1187 goto done; 1188 data_fsb = imap.br_startoff; 1189 } else { 1190 /* 1191 * Fake a hole until the end of the file. 1192 */ 1193 data_fsb = xfs_iomap_end_fsb(mp, offset, length); 1194 } 1195 1196 /* 1197 * If a COW fork extent covers the hole, report it - capped to the next 1198 * data fork extent: 1199 */ 1200 if (xfs_inode_has_cow_data(ip) && 1201 xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap)) 1202 cow_fsb = cmap.br_startoff; 1203 if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) { 1204 if (data_fsb < cow_fsb + cmap.br_blockcount) 1205 end_fsb = min(end_fsb, data_fsb); 1206 xfs_trim_extent(&cmap, offset_fsb, end_fsb); 1207 error = xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED); 1208 /* 1209 * This is a COW extent, so we must probe the page cache 1210 * because there could be dirty page cache being backed 1211 * by this extent. 1212 */ 1213 iomap->type = IOMAP_UNWRITTEN; 1214 goto out_unlock; 1215 } 1216 1217 /* 1218 * Else report a hole, capped to the next found data or COW extent. 1219 */ 1220 if (cow_fsb != NULLFILEOFF && cow_fsb < data_fsb) 1221 imap.br_blockcount = cow_fsb - offset_fsb; 1222 else 1223 imap.br_blockcount = data_fsb - offset_fsb; 1224 imap.br_startoff = offset_fsb; 1225 imap.br_startblock = HOLESTARTBLOCK; 1226 imap.br_state = XFS_EXT_NORM; 1227 done: 1228 xfs_trim_extent(&imap, offset_fsb, end_fsb); 1229 error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0); 1230 out_unlock: 1231 xfs_iunlock(ip, lockmode); 1232 return error; 1233 } 1234 1235 const struct iomap_ops xfs_seek_iomap_ops = { 1236 .iomap_begin = xfs_seek_iomap_begin, 1237 }; 1238 1239 static int 1240 xfs_xattr_iomap_begin( 1241 struct inode *inode, 1242 loff_t offset, 1243 loff_t length, 1244 unsigned flags, 1245 struct iomap *iomap, 1246 struct iomap *srcmap) 1247 { 1248 struct xfs_inode *ip = XFS_I(inode); 1249 struct xfs_mount *mp = ip->i_mount; 1250 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 1251 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length); 1252 struct xfs_bmbt_irec imap; 1253 int nimaps = 1, error = 0; 1254 unsigned lockmode; 1255 1256 if (XFS_FORCED_SHUTDOWN(mp)) 1257 return -EIO; 1258 1259 lockmode = xfs_ilock_attr_map_shared(ip); 1260 1261 /* if there are no attribute fork or extents, return ENOENT */ 1262 if (!XFS_IFORK_Q(ip) || !ip->i_afp->if_nextents) { 1263 error = -ENOENT; 1264 goto out_unlock; 1265 } 1266 1267 ASSERT(ip->i_afp->if_format != XFS_DINODE_FMT_LOCAL); 1268 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, 1269 &nimaps, XFS_BMAPI_ATTRFORK); 1270 out_unlock: 1271 xfs_iunlock(ip, lockmode); 1272 1273 if (error) 1274 return error; 1275 ASSERT(nimaps); 1276 return xfs_bmbt_to_iomap(ip, iomap, &imap, 0); 1277 } 1278 1279 const struct iomap_ops xfs_xattr_iomap_ops = { 1280 .iomap_begin = xfs_xattr_iomap_begin, 1281 }; 1282