xfs_iomap.c (740fd671e04f8a977018eb9cfe440b4817850f0d) xfs_iomap.c (952da06375c8f3aa58474fff718d9ae8442531b9)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
5 * All Rights Reserved.
6 */
7#include "xfs.h"
8#include "xfs_fs.h"

--- 174 unchanged lines hidden (view full) ---

183 return end_fsb;
184}
185
186int
187xfs_iomap_write_direct(
188 struct xfs_inode *ip,
189 xfs_fileoff_t offset_fsb,
190 xfs_fileoff_t count_fsb,
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
5 * All Rights Reserved.
6 */
7#include "xfs.h"
8#include "xfs_fs.h"

--- 174 unchanged lines hidden (view full) ---

183 return end_fsb;
184}
185
186int
187xfs_iomap_write_direct(
188 struct xfs_inode *ip,
189 xfs_fileoff_t offset_fsb,
190 xfs_fileoff_t count_fsb,
191 unsigned int flags,
191 struct xfs_bmbt_irec *imap)
192{
193 struct xfs_mount *mp = ip->i_mount;
194 struct xfs_trans *tp;
195 xfs_filblks_t resaligned;
196 int nimaps;
197 unsigned int dblocks, rblocks;
198 bool force = false;

--- 25 unchanged lines hidden (view full) ---

224 * recovery of the allocation. Hence we must zero before commit.
225 *
226 * Further, if we are mapping unwritten extents here, we need to zero
227 * and convert them to written so that we don't need an unwritten extent
228 * callback for DAX. This also means that we need to be able to dip into
229 * the reserve block pool for bmbt block allocation if there is no space
230 * left but we need to do unwritten extent conversion.
231 */
192 struct xfs_bmbt_irec *imap)
193{
194 struct xfs_mount *mp = ip->i_mount;
195 struct xfs_trans *tp;
196 xfs_filblks_t resaligned;
197 int nimaps;
198 unsigned int dblocks, rblocks;
199 bool force = false;

--- 25 unchanged lines hidden (view full) ---

225 * recovery of the allocation. Hence we must zero before commit.
226 *
227 * Further, if we are mapping unwritten extents here, we need to zero
228 * and convert them to written so that we don't need an unwritten extent
229 * callback for DAX. This also means that we need to be able to dip into
230 * the reserve block pool for bmbt block allocation if there is no space
231 * left but we need to do unwritten extent conversion.
232 */
232 if (IS_DAX(VFS_I(ip))) {
233 if (flags & IOMAP_DAX) {
233 bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
234 if (imap->br_state == XFS_EXT_UNWRITTEN) {
235 force = true;
236 nr_exts = XFS_IEXT_WRITE_UNWRITTEN_CNT;
237 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
238 }
239 }
240

--- 374 unchanged lines hidden (view full) ---

615 /* don't allocate blocks when just zeroing */
616 if (flags & IOMAP_ZERO)
617 return false;
618 if (!nimaps ||
619 imap->br_startblock == HOLESTARTBLOCK ||
620 imap->br_startblock == DELAYSTARTBLOCK)
621 return true;
622 /* we convert unwritten extents before copying the data for DAX */
234 bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
235 if (imap->br_state == XFS_EXT_UNWRITTEN) {
236 force = true;
237 nr_exts = XFS_IEXT_WRITE_UNWRITTEN_CNT;
238 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
239 }
240 }
241

--- 374 unchanged lines hidden (view full) ---

616 /* don't allocate blocks when just zeroing */
617 if (flags & IOMAP_ZERO)
618 return false;
619 if (!nimaps ||
620 imap->br_startblock == HOLESTARTBLOCK ||
621 imap->br_startblock == DELAYSTARTBLOCK)
622 return true;
623 /* we convert unwritten extents before copying the data for DAX */
623 if (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN)
624 if ((flags & IOMAP_DAX) && imap->br_state == XFS_EXT_UNWRITTEN)
624 return true;
625 return false;
626}
627
628static inline bool
629imap_needs_cow(
630 struct xfs_inode *ip,
631 unsigned int flags,

--- 189 unchanged lines hidden (view full) ---

821
822 if (offset + length > XFS_ISIZE(ip))
823 end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb);
824 else if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
825 end_fsb = min(end_fsb, imap.br_startoff + imap.br_blockcount);
826 xfs_iunlock(ip, lockmode);
827
828 error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb,
625 return true;
626 return false;
627}
628
629static inline bool
630imap_needs_cow(
631 struct xfs_inode *ip,
632 unsigned int flags,

--- 189 unchanged lines hidden (view full) ---

822
823 if (offset + length > XFS_ISIZE(ip))
824 end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb);
825 else if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
826 end_fsb = min(end_fsb, imap.br_startoff + imap.br_blockcount);
827 xfs_iunlock(ip, lockmode);
828
829 error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb,
829 &imap);
830 flags, &imap);
830 if (error)
831 return error;
832
833 trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
834 return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
835 iomap_flags | IOMAP_F_NEW);
836
837out_found_cow:

--- 511 unchanged lines hidden ---
831 if (error)
832 return error;
833
834 trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
835 return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
836 iomap_flags | IOMAP_F_NEW);
837
838out_found_cow:

--- 511 unchanged lines hidden ---