10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 33e57ecf6SOlaf Weber * Copyright (c) 2000-2006 Silicon Graphics, Inc. 47b718769SNathan Scott * All Rights Reserved. 51da177e4SLinus Torvalds */ 6f0e28280SJeff Layton #include <linux/iversion.h> 740ebd81dSRobert P. J. Day 81da177e4SLinus Torvalds #include "xfs.h" 9a844f451SNathan Scott #include "xfs_fs.h" 1070a9883cSDave Chinner #include "xfs_shared.h" 11239880efSDave Chinner #include "xfs_format.h" 12239880efSDave Chinner #include "xfs_log_format.h" 13239880efSDave Chinner #include "xfs_trans_resv.h" 141da177e4SLinus Torvalds #include "xfs_mount.h" 153ab78df2SDarrick J. Wong #include "xfs_defer.h" 16a4fbe6abSDave Chinner #include "xfs_inode.h" 17c24b5dfaSDave Chinner #include "xfs_dir2.h" 18c24b5dfaSDave Chinner #include "xfs_attr.h" 19239880efSDave Chinner #include "xfs_trans_space.h" 20239880efSDave Chinner #include "xfs_trans.h" 211da177e4SLinus Torvalds #include "xfs_buf_item.h" 22a844f451SNathan Scott #include "xfs_inode_item.h" 23784eb7d8SDave Chinner #include "xfs_iunlink_item.h" 24a844f451SNathan Scott #include "xfs_ialloc.h" 25a844f451SNathan Scott #include "xfs_bmap.h" 2668988114SDave Chinner #include "xfs_bmap_util.h" 27e9e899a2SDarrick J. Wong #include "xfs_errortag.h" 281da177e4SLinus Torvalds #include "xfs_error.h" 291da177e4SLinus Torvalds #include "xfs_quota.h" 302a82b8beSDavid Chinner #include "xfs_filestream.h" 310b1b213fSChristoph Hellwig #include "xfs_trace.h" 3233479e05SDave Chinner #include "xfs_icache.h" 33c24b5dfaSDave Chinner #include "xfs_symlink.h" 34239880efSDave Chinner #include "xfs_trans_priv.h" 35239880efSDave Chinner #include "xfs_log.h" 36a4fbe6abSDave Chinner #include "xfs_bmap_btree.h" 37aa8968f2SDarrick J. Wong #include "xfs_reflink.h" 389bbafc71SDave Chinner #include "xfs_ag.h" 3901728b44SDave Chinner #include "xfs_log_priv.h" 401da177e4SLinus Torvalds 41182696fbSDarrick J. Wong struct kmem_cache *xfs_inode_cache; 421da177e4SLinus Torvalds 431da177e4SLinus Torvalds /* 448f04c47aSChristoph Hellwig * Used in xfs_itruncate_extents(). This is the maximum number of extents 451da177e4SLinus Torvalds * freed from a file in a single transaction. 461da177e4SLinus Torvalds */ 471da177e4SLinus Torvalds #define XFS_ITRUNC_MAX_EXTENTS 2 481da177e4SLinus Torvalds 4954d7b5c1SDave Chinner STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *); 50f40aadb2SDave Chinner STATIC int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag, 51f40aadb2SDave Chinner struct xfs_inode *); 52ab297431SZhi Yong Wu 532a0ec1d9SDave Chinner /* 542a0ec1d9SDave Chinner * helper function to extract extent size hint from inode 552a0ec1d9SDave Chinner */ 562a0ec1d9SDave Chinner xfs_extlen_t 572a0ec1d9SDave Chinner xfs_get_extsz_hint( 582a0ec1d9SDave Chinner struct xfs_inode *ip) 592a0ec1d9SDave Chinner { 60bdb2ed2dSChristoph Hellwig /* 61bdb2ed2dSChristoph Hellwig * No point in aligning allocations if we need to COW to actually 62bdb2ed2dSChristoph Hellwig * write to them. 63bdb2ed2dSChristoph Hellwig */ 64bdb2ed2dSChristoph Hellwig if (xfs_is_always_cow_inode(ip)) 65bdb2ed2dSChristoph Hellwig return 0; 66db07349dSChristoph Hellwig if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize) 67031474c2SChristoph Hellwig return ip->i_extsize; 682a0ec1d9SDave Chinner if (XFS_IS_REALTIME_INODE(ip)) 692a0ec1d9SDave Chinner return ip->i_mount->m_sb.sb_rextsize; 702a0ec1d9SDave Chinner return 0; 712a0ec1d9SDave Chinner } 722a0ec1d9SDave Chinner 73fa96acadSDave Chinner /* 74f7ca3522SDarrick J. Wong * Helper function to extract CoW extent size hint from inode. 75f7ca3522SDarrick J. Wong * Between the extent size hint and the CoW extent size hint, we 76e153aa79SDarrick J. Wong * return the greater of the two. If the value is zero (automatic), 77e153aa79SDarrick J. Wong * use the default size. 78f7ca3522SDarrick J. Wong */ 79f7ca3522SDarrick J. Wong xfs_extlen_t 80f7ca3522SDarrick J. Wong xfs_get_cowextsz_hint( 81f7ca3522SDarrick J. Wong struct xfs_inode *ip) 82f7ca3522SDarrick J. Wong { 83f7ca3522SDarrick J. Wong xfs_extlen_t a, b; 84f7ca3522SDarrick J. Wong 85f7ca3522SDarrick J. Wong a = 0; 863e09ab8fSChristoph Hellwig if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) 87b33ce57dSChristoph Hellwig a = ip->i_cowextsize; 88f7ca3522SDarrick J. Wong b = xfs_get_extsz_hint(ip); 89f7ca3522SDarrick J. Wong 90e153aa79SDarrick J. Wong a = max(a, b); 91e153aa79SDarrick J. Wong if (a == 0) 92e153aa79SDarrick J. Wong return XFS_DEFAULT_COWEXTSZ_HINT; 93f7ca3522SDarrick J. Wong return a; 94f7ca3522SDarrick J. Wong } 95f7ca3522SDarrick J. Wong 96f7ca3522SDarrick J. Wong /* 97efa70be1SChristoph Hellwig * These two are wrapper routines around the xfs_ilock() routine used to 98efa70be1SChristoph Hellwig * centralize some grungy code. They are used in places that wish to lock the 99efa70be1SChristoph Hellwig * inode solely for reading the extents. The reason these places can't just 100efa70be1SChristoph Hellwig * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to 101efa70be1SChristoph Hellwig * bringing in of the extents from disk for a file in b-tree format. If the 102efa70be1SChristoph Hellwig * inode is in b-tree format, then we need to lock the inode exclusively until 103efa70be1SChristoph Hellwig * the extents are read in. Locking it exclusively all the time would limit 104efa70be1SChristoph Hellwig * our parallelism unnecessarily, though. What we do instead is check to see 105efa70be1SChristoph Hellwig * if the extents have been read in yet, and only lock the inode exclusively 106efa70be1SChristoph Hellwig * if they have not. 107fa96acadSDave Chinner * 108efa70be1SChristoph Hellwig * The functions return a value which should be given to the corresponding 10901f4f327SChristoph Hellwig * xfs_iunlock() call. 110fa96acadSDave Chinner */ 111fa96acadSDave Chinner uint 112309ecac8SChristoph Hellwig xfs_ilock_data_map_shared( 113309ecac8SChristoph Hellwig struct xfs_inode *ip) 114fa96acadSDave Chinner { 115309ecac8SChristoph Hellwig uint lock_mode = XFS_ILOCK_SHARED; 116fa96acadSDave Chinner 117b2197a36SChristoph Hellwig if (xfs_need_iread_extents(&ip->i_df)) 118fa96acadSDave Chinner lock_mode = XFS_ILOCK_EXCL; 119fa96acadSDave Chinner xfs_ilock(ip, lock_mode); 120fa96acadSDave Chinner return lock_mode; 121fa96acadSDave Chinner } 122fa96acadSDave Chinner 123efa70be1SChristoph Hellwig uint 124efa70be1SChristoph Hellwig xfs_ilock_attr_map_shared( 125efa70be1SChristoph Hellwig struct xfs_inode *ip) 126fa96acadSDave Chinner { 127efa70be1SChristoph Hellwig uint lock_mode = XFS_ILOCK_SHARED; 128efa70be1SChristoph Hellwig 129932b42c6SDarrick J. Wong if (xfs_inode_has_attr_fork(ip) && xfs_need_iread_extents(&ip->i_af)) 130efa70be1SChristoph Hellwig lock_mode = XFS_ILOCK_EXCL; 131efa70be1SChristoph Hellwig xfs_ilock(ip, lock_mode); 132efa70be1SChristoph Hellwig return lock_mode; 133fa96acadSDave Chinner } 134fa96acadSDave Chinner 135fa96acadSDave Chinner /* 136ca76a761SKaixu Xia * You can't set both SHARED and EXCL for the same lock, 137ca76a761SKaixu Xia * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_MMAPLOCK_SHARED, 138ca76a761SKaixu Xia * XFS_MMAPLOCK_EXCL, XFS_ILOCK_SHARED, XFS_ILOCK_EXCL are valid values 139ca76a761SKaixu Xia * to set in lock_flags. 140ca76a761SKaixu Xia */ 141ca76a761SKaixu Xia static inline void 142ca76a761SKaixu Xia xfs_lock_flags_assert( 143ca76a761SKaixu Xia uint lock_flags) 144ca76a761SKaixu Xia { 145ca76a761SKaixu Xia ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != 146ca76a761SKaixu Xia (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); 147ca76a761SKaixu Xia ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != 148ca76a761SKaixu Xia (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); 149ca76a761SKaixu Xia ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != 150ca76a761SKaixu Xia (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); 151ca76a761SKaixu Xia ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); 152ca76a761SKaixu Xia ASSERT(lock_flags != 0); 153ca76a761SKaixu Xia } 154ca76a761SKaixu Xia 155ca76a761SKaixu Xia /* 15665523218SChristoph Hellwig * In addition to i_rwsem in the VFS inode, the xfs inode contains 2 1572433480aSJan Kara * multi-reader locks: invalidate_lock and the i_lock. This routine allows 15865523218SChristoph Hellwig * various combinations of the locks to be obtained. 159fa96acadSDave Chinner * 160653c60b6SDave Chinner * The 3 locks should always be ordered so that the IO lock is obtained first, 161653c60b6SDave Chinner * the mmap lock second and the ilock last in order to prevent deadlock. 162fa96acadSDave Chinner * 163653c60b6SDave Chinner * Basic locking order: 164653c60b6SDave Chinner * 1652433480aSJan Kara * i_rwsem -> invalidate_lock -> page_lock -> i_ilock 166653c60b6SDave Chinner * 167c1e8d7c6SMichel Lespinasse * mmap_lock locking order: 168653c60b6SDave Chinner * 169c1e8d7c6SMichel Lespinasse * i_rwsem -> page lock -> mmap_lock 1702433480aSJan Kara * mmap_lock -> invalidate_lock -> page_lock 171653c60b6SDave Chinner * 172c1e8d7c6SMichel Lespinasse * The difference in mmap_lock locking order mean that we cannot hold the 1732433480aSJan Kara * invalidate_lock over syscall based read(2)/write(2) based IO. These IO paths 1742433480aSJan Kara * can fault in pages during copy in/out (for buffered IO) or require the 1752433480aSJan Kara * mmap_lock in get_user_pages() to map the user pages into the kernel address 1762433480aSJan Kara * space for direct IO. Similarly the i_rwsem cannot be taken inside a page 1772433480aSJan Kara * fault because page faults already hold the mmap_lock. 178653c60b6SDave Chinner * 179653c60b6SDave Chinner * Hence to serialise fully against both syscall and mmap based IO, we need to 1802433480aSJan Kara * take both the i_rwsem and the invalidate_lock. These locks should *only* be 1812433480aSJan Kara * both taken in places where we need to invalidate the page cache in a race 182653c60b6SDave Chinner * free manner (e.g. truncate, hole punch and other extent manipulation 183653c60b6SDave Chinner * functions). 184fa96acadSDave Chinner */ 185fa96acadSDave Chinner void 186fa96acadSDave Chinner xfs_ilock( 187fa96acadSDave Chinner xfs_inode_t *ip, 188fa96acadSDave Chinner uint lock_flags) 189fa96acadSDave Chinner { 190fa96acadSDave Chinner trace_xfs_ilock(ip, lock_flags, _RET_IP_); 191fa96acadSDave Chinner 192ca76a761SKaixu Xia xfs_lock_flags_assert(lock_flags); 193fa96acadSDave Chinner 19465523218SChristoph Hellwig if (lock_flags & XFS_IOLOCK_EXCL) { 19565523218SChristoph Hellwig down_write_nested(&VFS_I(ip)->i_rwsem, 19665523218SChristoph Hellwig XFS_IOLOCK_DEP(lock_flags)); 19765523218SChristoph Hellwig } else if (lock_flags & XFS_IOLOCK_SHARED) { 19865523218SChristoph Hellwig down_read_nested(&VFS_I(ip)->i_rwsem, 19965523218SChristoph Hellwig XFS_IOLOCK_DEP(lock_flags)); 20065523218SChristoph Hellwig } 201fa96acadSDave Chinner 2022433480aSJan Kara if (lock_flags & XFS_MMAPLOCK_EXCL) { 2032433480aSJan Kara down_write_nested(&VFS_I(ip)->i_mapping->invalidate_lock, 2042433480aSJan Kara XFS_MMAPLOCK_DEP(lock_flags)); 2052433480aSJan Kara } else if (lock_flags & XFS_MMAPLOCK_SHARED) { 2062433480aSJan Kara down_read_nested(&VFS_I(ip)->i_mapping->invalidate_lock, 2072433480aSJan Kara XFS_MMAPLOCK_DEP(lock_flags)); 2082433480aSJan Kara } 209653c60b6SDave Chinner 210fa96acadSDave Chinner if (lock_flags & XFS_ILOCK_EXCL) 211fa96acadSDave Chinner mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); 212fa96acadSDave Chinner else if (lock_flags & XFS_ILOCK_SHARED) 213fa96acadSDave Chinner mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); 214fa96acadSDave Chinner } 215fa96acadSDave Chinner 216fa96acadSDave Chinner /* 217fa96acadSDave Chinner * This is just like xfs_ilock(), except that the caller 218fa96acadSDave Chinner * is guaranteed not to sleep. It returns 1 if it gets 219fa96acadSDave Chinner * the requested locks and 0 otherwise. If the IO lock is 220fa96acadSDave Chinner * obtained but the inode lock cannot be, then the IO lock 221fa96acadSDave Chinner * is dropped before returning. 222fa96acadSDave Chinner * 223fa96acadSDave Chinner * ip -- the inode being locked 224fa96acadSDave Chinner * lock_flags -- this parameter indicates the inode's locks to be 225fa96acadSDave Chinner * to be locked. See the comment for xfs_ilock() for a list 226fa96acadSDave Chinner * of valid values. 227fa96acadSDave Chinner */ 228fa96acadSDave Chinner int 229fa96acadSDave Chinner xfs_ilock_nowait( 230fa96acadSDave Chinner xfs_inode_t *ip, 231fa96acadSDave Chinner uint lock_flags) 232fa96acadSDave Chinner { 233fa96acadSDave Chinner trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_); 234fa96acadSDave Chinner 235ca76a761SKaixu Xia xfs_lock_flags_assert(lock_flags); 236fa96acadSDave Chinner 237fa96acadSDave Chinner if (lock_flags & XFS_IOLOCK_EXCL) { 23865523218SChristoph Hellwig if (!down_write_trylock(&VFS_I(ip)->i_rwsem)) 239fa96acadSDave Chinner goto out; 240fa96acadSDave Chinner } else if (lock_flags & XFS_IOLOCK_SHARED) { 24165523218SChristoph Hellwig if (!down_read_trylock(&VFS_I(ip)->i_rwsem)) 242fa96acadSDave Chinner goto out; 243fa96acadSDave Chinner } 244653c60b6SDave Chinner 245653c60b6SDave Chinner if (lock_flags & XFS_MMAPLOCK_EXCL) { 2462433480aSJan Kara if (!down_write_trylock(&VFS_I(ip)->i_mapping->invalidate_lock)) 247653c60b6SDave Chinner goto out_undo_iolock; 248653c60b6SDave Chinner } else if (lock_flags & XFS_MMAPLOCK_SHARED) { 2492433480aSJan Kara if (!down_read_trylock(&VFS_I(ip)->i_mapping->invalidate_lock)) 250653c60b6SDave Chinner goto out_undo_iolock; 251653c60b6SDave Chinner } 252653c60b6SDave Chinner 253fa96acadSDave Chinner if (lock_flags & XFS_ILOCK_EXCL) { 254fa96acadSDave Chinner if (!mrtryupdate(&ip->i_lock)) 255653c60b6SDave Chinner goto out_undo_mmaplock; 256fa96acadSDave Chinner } else if (lock_flags & XFS_ILOCK_SHARED) { 257fa96acadSDave Chinner if (!mrtryaccess(&ip->i_lock)) 258653c60b6SDave Chinner goto out_undo_mmaplock; 259fa96acadSDave Chinner } 260fa96acadSDave Chinner return 1; 261fa96acadSDave Chinner 262653c60b6SDave Chinner out_undo_mmaplock: 263653c60b6SDave Chinner if (lock_flags & XFS_MMAPLOCK_EXCL) 2642433480aSJan Kara up_write(&VFS_I(ip)->i_mapping->invalidate_lock); 265653c60b6SDave Chinner else if (lock_flags & XFS_MMAPLOCK_SHARED) 2662433480aSJan Kara up_read(&VFS_I(ip)->i_mapping->invalidate_lock); 267fa96acadSDave Chinner out_undo_iolock: 268fa96acadSDave Chinner if (lock_flags & XFS_IOLOCK_EXCL) 26965523218SChristoph Hellwig up_write(&VFS_I(ip)->i_rwsem); 270fa96acadSDave Chinner else if (lock_flags & XFS_IOLOCK_SHARED) 27165523218SChristoph Hellwig up_read(&VFS_I(ip)->i_rwsem); 272fa96acadSDave Chinner out: 273fa96acadSDave Chinner return 0; 274fa96acadSDave Chinner } 275fa96acadSDave Chinner 276fa96acadSDave Chinner /* 277fa96acadSDave Chinner * xfs_iunlock() is used to drop the inode locks acquired with 278fa96acadSDave Chinner * xfs_ilock() and xfs_ilock_nowait(). The caller must pass 279fa96acadSDave Chinner * in the flags given to xfs_ilock() or xfs_ilock_nowait() so 280fa96acadSDave Chinner * that we know which locks to drop. 281fa96acadSDave Chinner * 282fa96acadSDave Chinner * ip -- the inode being unlocked 283fa96acadSDave Chinner * lock_flags -- this parameter indicates the inode's locks to be 284fa96acadSDave Chinner * to be unlocked. See the comment for xfs_ilock() for a list 285fa96acadSDave Chinner * of valid values for this parameter. 286fa96acadSDave Chinner * 287fa96acadSDave Chinner */ 288fa96acadSDave Chinner void 289fa96acadSDave Chinner xfs_iunlock( 290fa96acadSDave Chinner xfs_inode_t *ip, 291fa96acadSDave Chinner uint lock_flags) 292fa96acadSDave Chinner { 293ca76a761SKaixu Xia xfs_lock_flags_assert(lock_flags); 294fa96acadSDave Chinner 295fa96acadSDave Chinner if (lock_flags & XFS_IOLOCK_EXCL) 29665523218SChristoph Hellwig up_write(&VFS_I(ip)->i_rwsem); 297fa96acadSDave Chinner else if (lock_flags & XFS_IOLOCK_SHARED) 29865523218SChristoph Hellwig up_read(&VFS_I(ip)->i_rwsem); 299fa96acadSDave Chinner 300653c60b6SDave Chinner if (lock_flags & XFS_MMAPLOCK_EXCL) 3012433480aSJan Kara up_write(&VFS_I(ip)->i_mapping->invalidate_lock); 302653c60b6SDave Chinner else if (lock_flags & XFS_MMAPLOCK_SHARED) 3032433480aSJan Kara up_read(&VFS_I(ip)->i_mapping->invalidate_lock); 304653c60b6SDave Chinner 305fa96acadSDave Chinner if (lock_flags & XFS_ILOCK_EXCL) 306fa96acadSDave Chinner mrunlock_excl(&ip->i_lock); 307fa96acadSDave Chinner else if (lock_flags & XFS_ILOCK_SHARED) 308fa96acadSDave Chinner mrunlock_shared(&ip->i_lock); 309fa96acadSDave Chinner 310fa96acadSDave Chinner trace_xfs_iunlock(ip, lock_flags, _RET_IP_); 311fa96acadSDave Chinner } 312fa96acadSDave Chinner 313fa96acadSDave Chinner /* 314fa96acadSDave Chinner * give up write locks. the i/o lock cannot be held nested 315fa96acadSDave Chinner * if it is being demoted. 316fa96acadSDave Chinner */ 317fa96acadSDave Chinner void 318fa96acadSDave Chinner xfs_ilock_demote( 319fa96acadSDave Chinner xfs_inode_t *ip, 320fa96acadSDave Chinner uint lock_flags) 321fa96acadSDave Chinner { 322653c60b6SDave Chinner ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)); 323653c60b6SDave Chinner ASSERT((lock_flags & 324653c60b6SDave Chinner ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0); 325fa96acadSDave Chinner 326fa96acadSDave Chinner if (lock_flags & XFS_ILOCK_EXCL) 327fa96acadSDave Chinner mrdemote(&ip->i_lock); 328653c60b6SDave Chinner if (lock_flags & XFS_MMAPLOCK_EXCL) 3292433480aSJan Kara downgrade_write(&VFS_I(ip)->i_mapping->invalidate_lock); 330fa96acadSDave Chinner if (lock_flags & XFS_IOLOCK_EXCL) 33165523218SChristoph Hellwig downgrade_write(&VFS_I(ip)->i_rwsem); 332fa96acadSDave Chinner 333fa96acadSDave Chinner trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_); 334fa96acadSDave Chinner } 335fa96acadSDave Chinner 336742ae1e3SDave Chinner #if defined(DEBUG) || defined(XFS_WARN) 337e31cbde7SPavel Reichl static inline bool 338e31cbde7SPavel Reichl __xfs_rwsem_islocked( 339e31cbde7SPavel Reichl struct rw_semaphore *rwsem, 340e31cbde7SPavel Reichl bool shared) 341e31cbde7SPavel Reichl { 342e31cbde7SPavel Reichl if (!debug_locks) 343e31cbde7SPavel Reichl return rwsem_is_locked(rwsem); 344e31cbde7SPavel Reichl 345e31cbde7SPavel Reichl if (!shared) 346e31cbde7SPavel Reichl return lockdep_is_held_type(rwsem, 0); 347e31cbde7SPavel Reichl 348e31cbde7SPavel Reichl /* 349e31cbde7SPavel Reichl * We are checking that the lock is held at least in shared 350e31cbde7SPavel Reichl * mode but don't care that it might be held exclusively 351e31cbde7SPavel Reichl * (i.e. shared | excl). Hence we check if the lock is held 352e31cbde7SPavel Reichl * in any mode rather than an explicit shared mode. 353e31cbde7SPavel Reichl */ 354e31cbde7SPavel Reichl return lockdep_is_held_type(rwsem, -1); 355e31cbde7SPavel Reichl } 356e31cbde7SPavel Reichl 357e31cbde7SPavel Reichl bool 358fa96acadSDave Chinner xfs_isilocked( 359e31cbde7SPavel Reichl struct xfs_inode *ip, 360fa96acadSDave Chinner uint lock_flags) 361fa96acadSDave Chinner { 362fa96acadSDave Chinner if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) { 363fa96acadSDave Chinner if (!(lock_flags & XFS_ILOCK_SHARED)) 364fa96acadSDave Chinner return !!ip->i_lock.mr_writer; 365fa96acadSDave Chinner return rwsem_is_locked(&ip->i_lock.mr_lock); 366fa96acadSDave Chinner } 367fa96acadSDave Chinner 368653c60b6SDave Chinner if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) { 36982af8806SKaixu Xia return __xfs_rwsem_islocked(&VFS_I(ip)->i_mapping->invalidate_lock, 37082af8806SKaixu Xia (lock_flags & XFS_MMAPLOCK_SHARED)); 371653c60b6SDave Chinner } 372653c60b6SDave Chinner 373fa96acadSDave Chinner if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) { 374e31cbde7SPavel Reichl return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem, 375e31cbde7SPavel Reichl (lock_flags & XFS_IOLOCK_SHARED)); 376fa96acadSDave Chinner } 377fa96acadSDave Chinner 378fa96acadSDave Chinner ASSERT(0); 379e31cbde7SPavel Reichl return false; 380fa96acadSDave Chinner } 381fa96acadSDave Chinner #endif 382fa96acadSDave Chinner 383b6a9947eSDave Chinner /* 384b6a9947eSDave Chinner * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when 385b6a9947eSDave Chinner * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined 386b6a9947eSDave Chinner * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build 387b6a9947eSDave Chinner * errors and warnings. 388b6a9947eSDave Chinner */ 389b6a9947eSDave Chinner #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP) 3903403ccc0SDave Chinner static bool 3913403ccc0SDave Chinner xfs_lockdep_subclass_ok( 3923403ccc0SDave Chinner int subclass) 3933403ccc0SDave Chinner { 3943403ccc0SDave Chinner return subclass < MAX_LOCKDEP_SUBCLASSES; 3953403ccc0SDave Chinner } 3963403ccc0SDave Chinner #else 3973403ccc0SDave Chinner #define xfs_lockdep_subclass_ok(subclass) (true) 3983403ccc0SDave Chinner #endif 3993403ccc0SDave Chinner 400c24b5dfaSDave Chinner /* 401653c60b6SDave Chinner * Bump the subclass so xfs_lock_inodes() acquires each lock with a different 4020952c818SDave Chinner * value. This can be called for any type of inode lock combination, including 4030952c818SDave Chinner * parent locking. Care must be taken to ensure we don't overrun the subclass 4040952c818SDave Chinner * storage fields in the class mask we build. 405c24b5dfaSDave Chinner */ 406a1033753SDave Chinner static inline uint 407a1033753SDave Chinner xfs_lock_inumorder( 408a1033753SDave Chinner uint lock_mode, 409a1033753SDave Chinner uint subclass) 410c24b5dfaSDave Chinner { 411a1033753SDave Chinner uint class = 0; 4120952c818SDave Chinner 4130952c818SDave Chinner ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP | 4140952c818SDave Chinner XFS_ILOCK_RTSUM))); 4153403ccc0SDave Chinner ASSERT(xfs_lockdep_subclass_ok(subclass)); 4160952c818SDave Chinner 417653c60b6SDave Chinner if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) { 4180952c818SDave Chinner ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS); 4190952c818SDave Chinner class += subclass << XFS_IOLOCK_SHIFT; 420653c60b6SDave Chinner } 421653c60b6SDave Chinner 422653c60b6SDave Chinner if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) { 4230952c818SDave Chinner ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS); 4240952c818SDave Chinner class += subclass << XFS_MMAPLOCK_SHIFT; 425653c60b6SDave Chinner } 426653c60b6SDave Chinner 4270952c818SDave Chinner if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) { 4280952c818SDave Chinner ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS); 4290952c818SDave Chinner class += subclass << XFS_ILOCK_SHIFT; 4300952c818SDave Chinner } 431c24b5dfaSDave Chinner 4320952c818SDave Chinner return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class; 433c24b5dfaSDave Chinner } 434c24b5dfaSDave Chinner 435c24b5dfaSDave Chinner /* 43695afcf5cSDave Chinner * The following routine will lock n inodes in exclusive mode. We assume the 43795afcf5cSDave Chinner * caller calls us with the inodes in i_ino order. 438c24b5dfaSDave Chinner * 43995afcf5cSDave Chinner * We need to detect deadlock where an inode that we lock is in the AIL and we 44095afcf5cSDave Chinner * start waiting for another inode that is locked by a thread in a long running 44195afcf5cSDave Chinner * transaction (such as truncate). This can result in deadlock since the long 44295afcf5cSDave Chinner * running trans might need to wait for the inode we just locked in order to 44395afcf5cSDave Chinner * push the tail and free space in the log. 4440952c818SDave Chinner * 4450952c818SDave Chinner * xfs_lock_inodes() can only be used to lock one type of lock at a time - 4460952c818SDave Chinner * the iolock, the mmaplock or the ilock, but not more than one at a time. If we 4470952c818SDave Chinner * lock more than one at a time, lockdep will report false positives saying we 4480952c818SDave Chinner * have violated locking orders. 449c24b5dfaSDave Chinner */ 4500d5a75e9SEric Sandeen static void 451c24b5dfaSDave Chinner xfs_lock_inodes( 452efe2330fSChristoph Hellwig struct xfs_inode **ips, 453c24b5dfaSDave Chinner int inodes, 454c24b5dfaSDave Chinner uint lock_mode) 455c24b5dfaSDave Chinner { 456a1033753SDave Chinner int attempts = 0; 457a1033753SDave Chinner uint i; 458a1033753SDave Chinner int j; 459a1033753SDave Chinner bool try_lock; 460efe2330fSChristoph Hellwig struct xfs_log_item *lp; 461c24b5dfaSDave Chinner 4620952c818SDave Chinner /* 4630952c818SDave Chinner * Currently supports between 2 and 5 inodes with exclusive locking. We 4640952c818SDave Chinner * support an arbitrary depth of locking here, but absolute limits on 465b63da6c8SRandy Dunlap * inodes depend on the type of locking and the limits placed by 4660952c818SDave Chinner * lockdep annotations in xfs_lock_inumorder. These are all checked by 4670952c818SDave Chinner * the asserts. 4680952c818SDave Chinner */ 46995afcf5cSDave Chinner ASSERT(ips && inodes >= 2 && inodes <= 5); 4700952c818SDave Chinner ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL | 4710952c818SDave Chinner XFS_ILOCK_EXCL)); 4720952c818SDave Chinner ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED | 4730952c818SDave Chinner XFS_ILOCK_SHARED))); 4740952c818SDave Chinner ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) || 4750952c818SDave Chinner inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1); 4760952c818SDave Chinner ASSERT(!(lock_mode & XFS_ILOCK_EXCL) || 4770952c818SDave Chinner inodes <= XFS_ILOCK_MAX_SUBCLASS + 1); 4780952c818SDave Chinner 4790952c818SDave Chinner if (lock_mode & XFS_IOLOCK_EXCL) { 4800952c818SDave Chinner ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL))); 4810952c818SDave Chinner } else if (lock_mode & XFS_MMAPLOCK_EXCL) 4820952c818SDave Chinner ASSERT(!(lock_mode & XFS_ILOCK_EXCL)); 483c24b5dfaSDave Chinner 484c24b5dfaSDave Chinner again: 485a1033753SDave Chinner try_lock = false; 486a1033753SDave Chinner i = 0; 487c24b5dfaSDave Chinner for (; i < inodes; i++) { 488c24b5dfaSDave Chinner ASSERT(ips[i]); 489c24b5dfaSDave Chinner 490c24b5dfaSDave Chinner if (i && (ips[i] == ips[i - 1])) /* Already locked */ 491c24b5dfaSDave Chinner continue; 492c24b5dfaSDave Chinner 493c24b5dfaSDave Chinner /* 49495afcf5cSDave Chinner * If try_lock is not set yet, make sure all locked inodes are 49595afcf5cSDave Chinner * not in the AIL. If any are, set try_lock to be used later. 496c24b5dfaSDave Chinner */ 497c24b5dfaSDave Chinner if (!try_lock) { 498c24b5dfaSDave Chinner for (j = (i - 1); j >= 0 && !try_lock; j--) { 499b3b14aacSChristoph Hellwig lp = &ips[j]->i_itemp->ili_item; 50022525c17SDave Chinner if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) 501a1033753SDave Chinner try_lock = true; 502c24b5dfaSDave Chinner } 503c24b5dfaSDave Chinner } 504c24b5dfaSDave Chinner 505c24b5dfaSDave Chinner /* 506c24b5dfaSDave Chinner * If any of the previous locks we have locked is in the AIL, 507c24b5dfaSDave Chinner * we must TRY to get the second and subsequent locks. If 508c24b5dfaSDave Chinner * we can't get any, we must release all we have 509c24b5dfaSDave Chinner * and try again. 510c24b5dfaSDave Chinner */ 51195afcf5cSDave Chinner if (!try_lock) { 51295afcf5cSDave Chinner xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i)); 51395afcf5cSDave Chinner continue; 51495afcf5cSDave Chinner } 515c24b5dfaSDave Chinner 51695afcf5cSDave Chinner /* try_lock means we have an inode locked that is in the AIL. */ 517c24b5dfaSDave Chinner ASSERT(i != 0); 51895afcf5cSDave Chinner if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i))) 51995afcf5cSDave Chinner continue; 52095afcf5cSDave Chinner 52195afcf5cSDave Chinner /* 52295afcf5cSDave Chinner * Unlock all previous guys and try again. xfs_iunlock will try 52395afcf5cSDave Chinner * to push the tail if the inode is in the AIL. 52495afcf5cSDave Chinner */ 525c24b5dfaSDave Chinner attempts++; 526c24b5dfaSDave Chinner for (j = i - 1; j >= 0; j--) { 527c24b5dfaSDave Chinner /* 52895afcf5cSDave Chinner * Check to see if we've already unlocked this one. Not 52995afcf5cSDave Chinner * the first one going back, and the inode ptr is the 53095afcf5cSDave Chinner * same. 531c24b5dfaSDave Chinner */ 53295afcf5cSDave Chinner if (j != (i - 1) && ips[j] == ips[j + 1]) 533c24b5dfaSDave Chinner continue; 534c24b5dfaSDave Chinner 535c24b5dfaSDave Chinner xfs_iunlock(ips[j], lock_mode); 536c24b5dfaSDave Chinner } 537c24b5dfaSDave Chinner 538c24b5dfaSDave Chinner if ((attempts % 5) == 0) { 539c24b5dfaSDave Chinner delay(1); /* Don't just spin the CPU */ 540c24b5dfaSDave Chinner } 541c24b5dfaSDave Chinner goto again; 542c24b5dfaSDave Chinner } 543c24b5dfaSDave Chinner } 544c24b5dfaSDave Chinner 545c24b5dfaSDave Chinner /* 546d2c292d8SJan Kara * xfs_lock_two_inodes() can only be used to lock ilock. The iolock and 547d2c292d8SJan Kara * mmaplock must be double-locked separately since we use i_rwsem and 548d2c292d8SJan Kara * invalidate_lock for that. We now support taking one lock EXCL and the 549d2c292d8SJan Kara * other SHARED. 550c24b5dfaSDave Chinner */ 551c24b5dfaSDave Chinner void 552c24b5dfaSDave Chinner xfs_lock_two_inodes( 5537c2d238aSDarrick J. Wong struct xfs_inode *ip0, 5547c2d238aSDarrick J. Wong uint ip0_mode, 5557c2d238aSDarrick J. Wong struct xfs_inode *ip1, 5567c2d238aSDarrick J. Wong uint ip1_mode) 557c24b5dfaSDave Chinner { 558c24b5dfaSDave Chinner int attempts = 0; 559efe2330fSChristoph Hellwig struct xfs_log_item *lp; 560c24b5dfaSDave Chinner 5617c2d238aSDarrick J. Wong ASSERT(hweight32(ip0_mode) == 1); 5627c2d238aSDarrick J. Wong ASSERT(hweight32(ip1_mode) == 1); 5637c2d238aSDarrick J. Wong ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))); 5647c2d238aSDarrick J. Wong ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))); 565d2c292d8SJan Kara ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL))); 566d2c292d8SJan Kara ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL))); 567c24b5dfaSDave Chinner ASSERT(ip0->i_ino != ip1->i_ino); 568c24b5dfaSDave Chinner 569c24b5dfaSDave Chinner if (ip0->i_ino > ip1->i_ino) { 5702a09b575SChangcheng Deng swap(ip0, ip1); 5712a09b575SChangcheng Deng swap(ip0_mode, ip1_mode); 572c24b5dfaSDave Chinner } 573c24b5dfaSDave Chinner 574c24b5dfaSDave Chinner again: 5757c2d238aSDarrick J. Wong xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0)); 576c24b5dfaSDave Chinner 577c24b5dfaSDave Chinner /* 578c24b5dfaSDave Chinner * If the first lock we have locked is in the AIL, we must TRY to get 579c24b5dfaSDave Chinner * the second lock. If we can't get it, we must release the first one 580c24b5dfaSDave Chinner * and try again. 581c24b5dfaSDave Chinner */ 582b3b14aacSChristoph Hellwig lp = &ip0->i_itemp->ili_item; 58322525c17SDave Chinner if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) { 5847c2d238aSDarrick J. Wong if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) { 5857c2d238aSDarrick J. Wong xfs_iunlock(ip0, ip0_mode); 586c24b5dfaSDave Chinner if ((++attempts % 5) == 0) 587c24b5dfaSDave Chinner delay(1); /* Don't just spin the CPU */ 588c24b5dfaSDave Chinner goto again; 589c24b5dfaSDave Chinner } 590c24b5dfaSDave Chinner } else { 5917c2d238aSDarrick J. Wong xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1)); 592c24b5dfaSDave Chinner } 593c24b5dfaSDave Chinner } 594c24b5dfaSDave Chinner 5951da177e4SLinus Torvalds uint 5961da177e4SLinus Torvalds xfs_ip2xflags( 59758f88ca2SDave Chinner struct xfs_inode *ip) 5981da177e4SLinus Torvalds { 5994422501dSChristoph Hellwig uint flags = 0; 6001da177e4SLinus Torvalds 6014422501dSChristoph Hellwig if (ip->i_diflags & XFS_DIFLAG_ANY) { 6024422501dSChristoph Hellwig if (ip->i_diflags & XFS_DIFLAG_REALTIME) 6034422501dSChristoph Hellwig flags |= FS_XFLAG_REALTIME; 6044422501dSChristoph Hellwig if (ip->i_diflags & XFS_DIFLAG_PREALLOC) 6054422501dSChristoph Hellwig flags |= FS_XFLAG_PREALLOC; 6064422501dSChristoph Hellwig if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE) 6074422501dSChristoph Hellwig flags |= FS_XFLAG_IMMUTABLE; 6084422501dSChristoph Hellwig if (ip->i_diflags & XFS_DIFLAG_APPEND) 6094422501dSChristoph Hellwig flags |= FS_XFLAG_APPEND; 6104422501dSChristoph Hellwig if (ip->i_diflags & XFS_DIFLAG_SYNC) 6114422501dSChristoph Hellwig flags |= FS_XFLAG_SYNC; 6124422501dSChristoph Hellwig if (ip->i_diflags & XFS_DIFLAG_NOATIME) 6134422501dSChristoph Hellwig flags |= FS_XFLAG_NOATIME; 6144422501dSChristoph Hellwig if (ip->i_diflags & XFS_DIFLAG_NODUMP) 6154422501dSChristoph Hellwig flags |= FS_XFLAG_NODUMP; 6164422501dSChristoph Hellwig if (ip->i_diflags & XFS_DIFLAG_RTINHERIT) 6174422501dSChristoph Hellwig flags |= FS_XFLAG_RTINHERIT; 6184422501dSChristoph Hellwig if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT) 6194422501dSChristoph Hellwig flags |= FS_XFLAG_PROJINHERIT; 6204422501dSChristoph Hellwig if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS) 6214422501dSChristoph Hellwig flags |= FS_XFLAG_NOSYMLINKS; 6224422501dSChristoph Hellwig if (ip->i_diflags & XFS_DIFLAG_EXTSIZE) 6234422501dSChristoph Hellwig flags |= FS_XFLAG_EXTSIZE; 6244422501dSChristoph Hellwig if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) 6254422501dSChristoph Hellwig flags |= FS_XFLAG_EXTSZINHERIT; 6264422501dSChristoph Hellwig if (ip->i_diflags & XFS_DIFLAG_NODEFRAG) 6274422501dSChristoph Hellwig flags |= FS_XFLAG_NODEFRAG; 6284422501dSChristoph Hellwig if (ip->i_diflags & XFS_DIFLAG_FILESTREAM) 6294422501dSChristoph Hellwig flags |= FS_XFLAG_FILESTREAM; 6304422501dSChristoph Hellwig } 6314422501dSChristoph Hellwig 6324422501dSChristoph Hellwig if (ip->i_diflags2 & XFS_DIFLAG2_ANY) { 6334422501dSChristoph Hellwig if (ip->i_diflags2 & XFS_DIFLAG2_DAX) 6344422501dSChristoph Hellwig flags |= FS_XFLAG_DAX; 6354422501dSChristoph Hellwig if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) 6364422501dSChristoph Hellwig flags |= FS_XFLAG_COWEXTSIZE; 6374422501dSChristoph Hellwig } 6384422501dSChristoph Hellwig 639932b42c6SDarrick J. Wong if (xfs_inode_has_attr_fork(ip)) 6404422501dSChristoph Hellwig flags |= FS_XFLAG_HASATTR; 6414422501dSChristoph Hellwig return flags; 6421da177e4SLinus Torvalds } 6431da177e4SLinus Torvalds 6441da177e4SLinus Torvalds /* 645c24b5dfaSDave Chinner * Lookups up an inode from "name". If ci_name is not NULL, then a CI match 646c24b5dfaSDave Chinner * is allowed, otherwise it has to be an exact match. If a CI match is found, 647c24b5dfaSDave Chinner * ci_name->name will point to a the actual name (caller must free) or 648c24b5dfaSDave Chinner * will be set to NULL if an exact match is found. 649c24b5dfaSDave Chinner */ 650c24b5dfaSDave Chinner int 651c24b5dfaSDave Chinner xfs_lookup( 652996b2329SDarrick J. Wong struct xfs_inode *dp, 653996b2329SDarrick J. Wong const struct xfs_name *name, 654996b2329SDarrick J. Wong struct xfs_inode **ipp, 655c24b5dfaSDave Chinner struct xfs_name *ci_name) 656c24b5dfaSDave Chinner { 657c24b5dfaSDave Chinner xfs_ino_t inum; 658c24b5dfaSDave Chinner int error; 659c24b5dfaSDave Chinner 660c24b5dfaSDave Chinner trace_xfs_lookup(dp, name); 661c24b5dfaSDave Chinner 66275c8c50fSDave Chinner if (xfs_is_shutdown(dp->i_mount)) 6632451337dSDave Chinner return -EIO; 664c24b5dfaSDave Chinner 665c24b5dfaSDave Chinner error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name); 666c24b5dfaSDave Chinner if (error) 667dbad7c99SDave Chinner goto out_unlock; 668c24b5dfaSDave Chinner 669c24b5dfaSDave Chinner error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp); 670c24b5dfaSDave Chinner if (error) 671c24b5dfaSDave Chinner goto out_free_name; 672c24b5dfaSDave Chinner 673c24b5dfaSDave Chinner return 0; 674c24b5dfaSDave Chinner 675c24b5dfaSDave Chinner out_free_name: 676c24b5dfaSDave Chinner if (ci_name) 677c24b5dfaSDave Chinner kmem_free(ci_name->name); 678dbad7c99SDave Chinner out_unlock: 679c24b5dfaSDave Chinner *ipp = NULL; 680c24b5dfaSDave Chinner return error; 681c24b5dfaSDave Chinner } 682c24b5dfaSDave Chinner 6838a569d71SDarrick J. Wong /* Propagate di_flags from a parent inode to a child inode. */ 6848a569d71SDarrick J. Wong static void 6858a569d71SDarrick J. Wong xfs_inode_inherit_flags( 6868a569d71SDarrick J. Wong struct xfs_inode *ip, 6878a569d71SDarrick J. Wong const struct xfs_inode *pip) 6888a569d71SDarrick J. Wong { 6898a569d71SDarrick J. Wong unsigned int di_flags = 0; 690603f000bSDarrick J. Wong xfs_failaddr_t failaddr; 6918a569d71SDarrick J. Wong umode_t mode = VFS_I(ip)->i_mode; 6928a569d71SDarrick J. Wong 6938a569d71SDarrick J. Wong if (S_ISDIR(mode)) { 694db07349dSChristoph Hellwig if (pip->i_diflags & XFS_DIFLAG_RTINHERIT) 6958a569d71SDarrick J. Wong di_flags |= XFS_DIFLAG_RTINHERIT; 696db07349dSChristoph Hellwig if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) { 6978a569d71SDarrick J. Wong di_flags |= XFS_DIFLAG_EXTSZINHERIT; 698031474c2SChristoph Hellwig ip->i_extsize = pip->i_extsize; 6998a569d71SDarrick J. Wong } 700db07349dSChristoph Hellwig if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT) 7018a569d71SDarrick J. Wong di_flags |= XFS_DIFLAG_PROJINHERIT; 7028a569d71SDarrick J. Wong } else if (S_ISREG(mode)) { 703db07349dSChristoph Hellwig if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) && 70438c26bfdSDave Chinner xfs_has_realtime(ip->i_mount)) 7058a569d71SDarrick J. Wong di_flags |= XFS_DIFLAG_REALTIME; 706db07349dSChristoph Hellwig if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) { 7078a569d71SDarrick J. Wong di_flags |= XFS_DIFLAG_EXTSIZE; 708031474c2SChristoph Hellwig ip->i_extsize = pip->i_extsize; 7098a569d71SDarrick J. Wong } 7108a569d71SDarrick J. Wong } 711db07349dSChristoph Hellwig if ((pip->i_diflags & XFS_DIFLAG_NOATIME) && 7128a569d71SDarrick J. Wong xfs_inherit_noatime) 7138a569d71SDarrick J. Wong di_flags |= XFS_DIFLAG_NOATIME; 714db07349dSChristoph Hellwig if ((pip->i_diflags & XFS_DIFLAG_NODUMP) && 7158a569d71SDarrick J. Wong xfs_inherit_nodump) 7168a569d71SDarrick J. Wong di_flags |= XFS_DIFLAG_NODUMP; 717db07349dSChristoph Hellwig if ((pip->i_diflags & XFS_DIFLAG_SYNC) && 7188a569d71SDarrick J. Wong xfs_inherit_sync) 7198a569d71SDarrick J. Wong di_flags |= XFS_DIFLAG_SYNC; 720db07349dSChristoph Hellwig if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) && 7218a569d71SDarrick J. Wong xfs_inherit_nosymlinks) 7228a569d71SDarrick J. Wong di_flags |= XFS_DIFLAG_NOSYMLINKS; 723db07349dSChristoph Hellwig if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) && 7248a569d71SDarrick J. Wong xfs_inherit_nodefrag) 7258a569d71SDarrick J. Wong di_flags |= XFS_DIFLAG_NODEFRAG; 726db07349dSChristoph Hellwig if (pip->i_diflags & XFS_DIFLAG_FILESTREAM) 7278a569d71SDarrick J. Wong di_flags |= XFS_DIFLAG_FILESTREAM; 7288a569d71SDarrick J. Wong 729db07349dSChristoph Hellwig ip->i_diflags |= di_flags; 730603f000bSDarrick J. Wong 731603f000bSDarrick J. Wong /* 732603f000bSDarrick J. Wong * Inode verifiers on older kernels only check that the extent size 733603f000bSDarrick J. Wong * hint is an integer multiple of the rt extent size on realtime files. 734603f000bSDarrick J. Wong * They did not check the hint alignment on a directory with both 735603f000bSDarrick J. Wong * rtinherit and extszinherit flags set. If the misaligned hint is 736603f000bSDarrick J. Wong * propagated from a directory into a new realtime file, new file 737603f000bSDarrick J. Wong * allocations will fail due to math errors in the rt allocator and/or 738603f000bSDarrick J. Wong * trip the verifiers. Validate the hint settings in the new file so 739603f000bSDarrick J. Wong * that we don't let broken hints propagate. 740603f000bSDarrick J. Wong */ 741603f000bSDarrick J. Wong failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize, 742603f000bSDarrick J. Wong VFS_I(ip)->i_mode, ip->i_diflags); 743603f000bSDarrick J. Wong if (failaddr) { 744603f000bSDarrick J. Wong ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE | 745603f000bSDarrick J. Wong XFS_DIFLAG_EXTSZINHERIT); 746603f000bSDarrick J. Wong ip->i_extsize = 0; 747603f000bSDarrick J. Wong } 7488a569d71SDarrick J. Wong } 7498a569d71SDarrick J. Wong 7508a569d71SDarrick J. Wong /* Propagate di_flags2 from a parent inode to a child inode. */ 7518a569d71SDarrick J. Wong static void 7528a569d71SDarrick J. Wong xfs_inode_inherit_flags2( 7538a569d71SDarrick J. Wong struct xfs_inode *ip, 7548a569d71SDarrick J. Wong const struct xfs_inode *pip) 7558a569d71SDarrick J. Wong { 756603f000bSDarrick J. Wong xfs_failaddr_t failaddr; 757603f000bSDarrick J. Wong 7583e09ab8fSChristoph Hellwig if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) { 7593e09ab8fSChristoph Hellwig ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE; 760b33ce57dSChristoph Hellwig ip->i_cowextsize = pip->i_cowextsize; 7618a569d71SDarrick J. Wong } 7623e09ab8fSChristoph Hellwig if (pip->i_diflags2 & XFS_DIFLAG2_DAX) 7633e09ab8fSChristoph Hellwig ip->i_diflags2 |= XFS_DIFLAG2_DAX; 764603f000bSDarrick J. Wong 765603f000bSDarrick J. Wong /* Don't let invalid cowextsize hints propagate. */ 766603f000bSDarrick J. Wong failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize, 767603f000bSDarrick J. Wong VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2); 768603f000bSDarrick J. Wong if (failaddr) { 769603f000bSDarrick J. Wong ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE; 770603f000bSDarrick J. Wong ip->i_cowextsize = 0; 771603f000bSDarrick J. Wong } 7728a569d71SDarrick J. Wong } 7738a569d71SDarrick J. Wong 774c24b5dfaSDave Chinner /* 7751abcf261SDave Chinner * Initialise a newly allocated inode and return the in-core inode to the 7761abcf261SDave Chinner * caller locked exclusively. 7771da177e4SLinus Torvalds */ 778b652afd9SDave Chinner int 7791abcf261SDave Chinner xfs_init_new_inode( 780f2d40141SChristian Brauner struct mnt_idmap *idmap, 7811abcf261SDave Chinner struct xfs_trans *tp, 7821abcf261SDave Chinner struct xfs_inode *pip, 7831abcf261SDave Chinner xfs_ino_t ino, 784576b1d67SAl Viro umode_t mode, 78531b084aeSNathan Scott xfs_nlink_t nlink, 78666f36464SChristoph Hellwig dev_t rdev, 7876743099cSArkadiusz Mi?kiewicz prid_t prid, 788e6a688c3SDave Chinner bool init_xattrs, 7891abcf261SDave Chinner struct xfs_inode **ipp) 7901da177e4SLinus Torvalds { 79101ea173eSChristoph Hellwig struct inode *dir = pip ? VFS_I(pip) : NULL; 79293848a99SChristoph Hellwig struct xfs_mount *mp = tp->t_mountp; 7931abcf261SDave Chinner struct xfs_inode *ip; 7941abcf261SDave Chinner unsigned int flags; 7951da177e4SLinus Torvalds int error; 79695582b00SDeepa Dinamani struct timespec64 tv; 7973987848cSDave Chinner struct inode *inode; 7981da177e4SLinus Torvalds 7991da177e4SLinus Torvalds /* 8008b26984dSDave Chinner * Protect against obviously corrupt allocation btree records. Later 8018b26984dSDave Chinner * xfs_iget checks will catch re-allocation of other active in-memory 8028b26984dSDave Chinner * and on-disk inodes. If we don't catch reallocating the parent inode 8038b26984dSDave Chinner * here we will deadlock in xfs_iget() so we have to do these checks 8048b26984dSDave Chinner * first. 8058b26984dSDave Chinner */ 8068b26984dSDave Chinner if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) { 8078b26984dSDave Chinner xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino); 8088b26984dSDave Chinner return -EFSCORRUPTED; 8098b26984dSDave Chinner } 8108b26984dSDave Chinner 8118b26984dSDave Chinner /* 8121abcf261SDave Chinner * Get the in-core inode with the lock held exclusively to prevent 8131abcf261SDave Chinner * others from looking at until we're done. 8141da177e4SLinus Torvalds */ 8151abcf261SDave Chinner error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip); 816bf904248SDavid Chinner if (error) 8171da177e4SLinus Torvalds return error; 8181abcf261SDave Chinner 8191da177e4SLinus Torvalds ASSERT(ip != NULL); 8203987848cSDave Chinner inode = VFS_I(ip); 82154d7b5c1SDave Chinner set_nlink(inode, nlink); 82266f36464SChristoph Hellwig inode->i_rdev = rdev; 823ceaf603cSChristoph Hellwig ip->i_projid = prid; 8241da177e4SLinus Torvalds 8250560f31aSDave Chinner if (dir && !(dir->i_mode & S_ISGID) && xfs_has_grpid(mp)) { 826c14329d3SChristian Brauner inode_fsuid_set(inode, idmap); 82701ea173eSChristoph Hellwig inode->i_gid = dir->i_gid; 82801ea173eSChristoph Hellwig inode->i_mode = mode; 8293d8f2821SChristoph Hellwig } else { 830f2d40141SChristian Brauner inode_init_owner(idmap, inode, dir, mode); 8311da177e4SLinus Torvalds } 8321da177e4SLinus Torvalds 8331da177e4SLinus Torvalds /* 8341da177e4SLinus Torvalds * If the group ID of the new file does not match the effective group 8351da177e4SLinus Torvalds * ID or one of the supplementary group IDs, the S_ISGID bit is cleared 8361da177e4SLinus Torvalds * (and only if the irix_sgid_inherit compatibility variable is set). 8371da177e4SLinus Torvalds */ 83842b7cc11SChristian Brauner if (irix_sgid_inherit && (inode->i_mode & S_ISGID) && 839e67fe633SChristian Brauner !vfsgid_in_group_p(i_gid_into_vfsgid(idmap, inode))) 840c19b3b05SDave Chinner inode->i_mode &= ~S_ISGID; 8411da177e4SLinus Torvalds 84213d2c10bSChristoph Hellwig ip->i_disk_size = 0; 843daf83964SChristoph Hellwig ip->i_df.if_nextents = 0; 8446e73a545SChristoph Hellwig ASSERT(ip->i_nblocks == 0); 845dff35fd4SChristoph Hellwig 846a0a415e3SJeff Layton tv = inode_set_ctime_current(inode); 8473987848cSDave Chinner inode->i_mtime = tv; 8483987848cSDave Chinner inode->i_atime = tv; 849dff35fd4SChristoph Hellwig 850031474c2SChristoph Hellwig ip->i_extsize = 0; 851db07349dSChristoph Hellwig ip->i_diflags = 0; 85293848a99SChristoph Hellwig 85338c26bfdSDave Chinner if (xfs_has_v3inodes(mp)) { 854f0e28280SJeff Layton inode_set_iversion(inode, 1); 855b33ce57dSChristoph Hellwig ip->i_cowextsize = 0; 856e98d5e88SChristoph Hellwig ip->i_crtime = tv; 85793848a99SChristoph Hellwig } 85893848a99SChristoph Hellwig 8591da177e4SLinus Torvalds flags = XFS_ILOG_CORE; 8601da177e4SLinus Torvalds switch (mode & S_IFMT) { 8611da177e4SLinus Torvalds case S_IFIFO: 8621da177e4SLinus Torvalds case S_IFCHR: 8631da177e4SLinus Torvalds case S_IFBLK: 8641da177e4SLinus Torvalds case S_IFSOCK: 865f7e67b20SChristoph Hellwig ip->i_df.if_format = XFS_DINODE_FMT_DEV; 8661da177e4SLinus Torvalds flags |= XFS_ILOG_DEV; 8671da177e4SLinus Torvalds break; 8681da177e4SLinus Torvalds case S_IFREG: 8691da177e4SLinus Torvalds case S_IFDIR: 870db07349dSChristoph Hellwig if (pip && (pip->i_diflags & XFS_DIFLAG_ANY)) 8718a569d71SDarrick J. Wong xfs_inode_inherit_flags(ip, pip); 8723e09ab8fSChristoph Hellwig if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY)) 8738a569d71SDarrick J. Wong xfs_inode_inherit_flags2(ip, pip); 87453004ee7SGustavo A. R. Silva fallthrough; 8751da177e4SLinus Torvalds case S_IFLNK: 876f7e67b20SChristoph Hellwig ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS; 877fcacbc3fSChristoph Hellwig ip->i_df.if_bytes = 0; 8786bdcf26aSChristoph Hellwig ip->i_df.if_u1.if_root = NULL; 8791da177e4SLinus Torvalds break; 8801da177e4SLinus Torvalds default: 8811da177e4SLinus Torvalds ASSERT(0); 8821da177e4SLinus Torvalds } 8831da177e4SLinus Torvalds 8841da177e4SLinus Torvalds /* 885e6a688c3SDave Chinner * If we need to create attributes immediately after allocating the 886e6a688c3SDave Chinner * inode, initialise an empty attribute fork right now. We use the 887e6a688c3SDave Chinner * default fork offset for attributes here as we don't know exactly what 888e6a688c3SDave Chinner * size or how many attributes we might be adding. We can do this 889e6a688c3SDave Chinner * safely here because we know the data fork is completely empty and 890e6a688c3SDave Chinner * this saves us from needing to run a separate transaction to set the 891e6a688c3SDave Chinner * fork offset in the immediate future. 892e6a688c3SDave Chinner */ 89338c26bfdSDave Chinner if (init_xattrs && xfs_has_attr(mp)) { 8947821ea30SChristoph Hellwig ip->i_forkoff = xfs_default_attroffset(ip) >> 3; 8952ed5b09bSDarrick J. Wong xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0); 896e6a688c3SDave Chinner } 897e6a688c3SDave Chinner 898e6a688c3SDave Chinner /* 8991da177e4SLinus Torvalds * Log the new values stuffed into the inode. 9001da177e4SLinus Torvalds */ 901ddc3415aSChristoph Hellwig xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 9021da177e4SLinus Torvalds xfs_trans_log_inode(tp, ip, flags); 9031da177e4SLinus Torvalds 90458c90473SDave Chinner /* now that we have an i_mode we can setup the inode structure */ 90541be8bedSChristoph Hellwig xfs_setup_inode(ip); 9061da177e4SLinus Torvalds 9071da177e4SLinus Torvalds *ipp = ip; 9081da177e4SLinus Torvalds return 0; 9091da177e4SLinus Torvalds } 9101da177e4SLinus Torvalds 911e546cb79SDave Chinner /* 91254d7b5c1SDave Chinner * Decrement the link count on an inode & log the change. If this causes the 91354d7b5c1SDave Chinner * link count to go to zero, move the inode to AGI unlinked list so that it can 91454d7b5c1SDave Chinner * be freed when the last active reference goes away via xfs_inactive(). 915e546cb79SDave Chinner */ 9160d5a75e9SEric Sandeen static int /* error */ 917e546cb79SDave Chinner xfs_droplink( 918e546cb79SDave Chinner xfs_trans_t *tp, 919e546cb79SDave Chinner xfs_inode_t *ip) 920e546cb79SDave Chinner { 921e546cb79SDave Chinner xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); 922e546cb79SDave Chinner 923e546cb79SDave Chinner drop_nlink(VFS_I(ip)); 924e546cb79SDave Chinner xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 925e546cb79SDave Chinner 92654d7b5c1SDave Chinner if (VFS_I(ip)->i_nlink) 92754d7b5c1SDave Chinner return 0; 92854d7b5c1SDave Chinner 92954d7b5c1SDave Chinner return xfs_iunlink(tp, ip); 930e546cb79SDave Chinner } 931e546cb79SDave Chinner 932e546cb79SDave Chinner /* 933e546cb79SDave Chinner * Increment the link count on an inode & log the change. 934e546cb79SDave Chinner */ 93591083269SEric Sandeen static void 936e546cb79SDave Chinner xfs_bumplink( 937e546cb79SDave Chinner xfs_trans_t *tp, 938e546cb79SDave Chinner xfs_inode_t *ip) 939e546cb79SDave Chinner { 940e546cb79SDave Chinner xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); 941e546cb79SDave Chinner 942e546cb79SDave Chinner inc_nlink(VFS_I(ip)); 943e546cb79SDave Chinner xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 944e546cb79SDave Chinner } 945e546cb79SDave Chinner 946c24b5dfaSDave Chinner int 947c24b5dfaSDave Chinner xfs_create( 948f2d40141SChristian Brauner struct mnt_idmap *idmap, 949c24b5dfaSDave Chinner xfs_inode_t *dp, 950c24b5dfaSDave Chinner struct xfs_name *name, 951c24b5dfaSDave Chinner umode_t mode, 95266f36464SChristoph Hellwig dev_t rdev, 953e6a688c3SDave Chinner bool init_xattrs, 954c24b5dfaSDave Chinner xfs_inode_t **ipp) 955c24b5dfaSDave Chinner { 956c24b5dfaSDave Chinner int is_dir = S_ISDIR(mode); 957c24b5dfaSDave Chinner struct xfs_mount *mp = dp->i_mount; 958c24b5dfaSDave Chinner struct xfs_inode *ip = NULL; 959c24b5dfaSDave Chinner struct xfs_trans *tp = NULL; 960c24b5dfaSDave Chinner int error; 961c24b5dfaSDave Chinner bool unlock_dp_on_error = false; 962c24b5dfaSDave Chinner prid_t prid; 963c24b5dfaSDave Chinner struct xfs_dquot *udqp = NULL; 964c24b5dfaSDave Chinner struct xfs_dquot *gdqp = NULL; 965c24b5dfaSDave Chinner struct xfs_dquot *pdqp = NULL; 966062647a8SBrian Foster struct xfs_trans_res *tres; 967c24b5dfaSDave Chinner uint resblks; 968b652afd9SDave Chinner xfs_ino_t ino; 969c24b5dfaSDave Chinner 970c24b5dfaSDave Chinner trace_xfs_create(dp, name); 971c24b5dfaSDave Chinner 97275c8c50fSDave Chinner if (xfs_is_shutdown(mp)) 9732451337dSDave Chinner return -EIO; 974c24b5dfaSDave Chinner 975163467d3SZhi Yong Wu prid = xfs_get_initial_prid(dp); 976c24b5dfaSDave Chinner 977c24b5dfaSDave Chinner /* 978c24b5dfaSDave Chinner * Make sure that we have allocated dquot(s) on disk. 979c24b5dfaSDave Chinner */ 980c14329d3SChristian Brauner error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(idmap, &init_user_ns), 981c14329d3SChristian Brauner mapped_fsgid(idmap, &init_user_ns), prid, 982c24b5dfaSDave Chinner XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, 983c24b5dfaSDave Chinner &udqp, &gdqp, &pdqp); 984c24b5dfaSDave Chinner if (error) 985c24b5dfaSDave Chinner return error; 986c24b5dfaSDave Chinner 987c24b5dfaSDave Chinner if (is_dir) { 988c24b5dfaSDave Chinner resblks = XFS_MKDIR_SPACE_RES(mp, name->len); 989062647a8SBrian Foster tres = &M_RES(mp)->tr_mkdir; 990c24b5dfaSDave Chinner } else { 991c24b5dfaSDave Chinner resblks = XFS_CREATE_SPACE_RES(mp, name->len); 992062647a8SBrian Foster tres = &M_RES(mp)->tr_create; 993c24b5dfaSDave Chinner } 994c24b5dfaSDave Chinner 995c24b5dfaSDave Chinner /* 996c24b5dfaSDave Chinner * Initially assume that the file does not exist and 997c24b5dfaSDave Chinner * reserve the resources for that case. If that is not 998c24b5dfaSDave Chinner * the case we'll drop the one we have and get a more 999c24b5dfaSDave Chinner * appropriate transaction later. 1000c24b5dfaSDave Chinner */ 1001f2f7b9ffSDarrick J. Wong error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks, 1002f2f7b9ffSDarrick J. Wong &tp); 10032451337dSDave Chinner if (error == -ENOSPC) { 1004c24b5dfaSDave Chinner /* flush outstanding delalloc blocks and retry */ 1005c24b5dfaSDave Chinner xfs_flush_inodes(mp); 1006f2f7b9ffSDarrick J. Wong error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, 1007f2f7b9ffSDarrick J. Wong resblks, &tp); 1008c24b5dfaSDave Chinner } 10094906e215SChristoph Hellwig if (error) 1010f2f7b9ffSDarrick J. Wong goto out_release_dquots; 1011c24b5dfaSDave Chinner 101265523218SChristoph Hellwig xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); 1013c24b5dfaSDave Chinner unlock_dp_on_error = true; 1014c24b5dfaSDave Chinner 1015c24b5dfaSDave Chinner /* 1016c24b5dfaSDave Chinner * A newly created regular or special file just has one directory 1017c24b5dfaSDave Chinner * entry pointing to them, but a directory also the "." entry 1018c24b5dfaSDave Chinner * pointing to itself. 1019c24b5dfaSDave Chinner */ 1020b652afd9SDave Chinner error = xfs_dialloc(&tp, dp->i_ino, mode, &ino); 1021b652afd9SDave Chinner if (!error) 1022f2d40141SChristian Brauner error = xfs_init_new_inode(idmap, tp, dp, ino, mode, 1023b652afd9SDave Chinner is_dir ? 2 : 1, rdev, prid, init_xattrs, &ip); 1024d6077aa3SJan Kara if (error) 1025c24b5dfaSDave Chinner goto out_trans_cancel; 1026c24b5dfaSDave Chinner 1027c24b5dfaSDave Chinner /* 1028c24b5dfaSDave Chinner * Now we join the directory inode to the transaction. We do not do it 1029b652afd9SDave Chinner * earlier because xfs_dialloc might commit the previous transaction 1030c24b5dfaSDave Chinner * (and release all the locks). An error from here on will result in 1031c24b5dfaSDave Chinner * the transaction cancel unlocking dp so don't do it explicitly in the 1032c24b5dfaSDave Chinner * error path. 1033c24b5dfaSDave Chinner */ 103465523218SChristoph Hellwig xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); 1035c24b5dfaSDave Chinner unlock_dp_on_error = false; 1036c24b5dfaSDave Chinner 1037381eee69SBrian Foster error = xfs_dir_createname(tp, dp, name, ip->i_ino, 103863337b63SKaixu Xia resblks - XFS_IALLOC_SPACE_RES(mp)); 1039c24b5dfaSDave Chinner if (error) { 10402451337dSDave Chinner ASSERT(error != -ENOSPC); 10414906e215SChristoph Hellwig goto out_trans_cancel; 1042c24b5dfaSDave Chinner } 1043c24b5dfaSDave Chinner xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 1044c24b5dfaSDave Chinner xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); 1045c24b5dfaSDave Chinner 1046c24b5dfaSDave Chinner if (is_dir) { 1047c24b5dfaSDave Chinner error = xfs_dir_init(tp, ip, dp); 1048c24b5dfaSDave Chinner if (error) 1049c8eac49eSBrian Foster goto out_trans_cancel; 1050c24b5dfaSDave Chinner 105191083269SEric Sandeen xfs_bumplink(tp, dp); 1052c24b5dfaSDave Chinner } 1053c24b5dfaSDave Chinner 1054c24b5dfaSDave Chinner /* 1055c24b5dfaSDave Chinner * If this is a synchronous mount, make sure that the 1056c24b5dfaSDave Chinner * create transaction goes to disk before returning to 1057c24b5dfaSDave Chinner * the user. 1058c24b5dfaSDave Chinner */ 10590560f31aSDave Chinner if (xfs_has_wsync(mp) || xfs_has_dirsync(mp)) 1060c24b5dfaSDave Chinner xfs_trans_set_sync(tp); 1061c24b5dfaSDave Chinner 1062c24b5dfaSDave Chinner /* 1063c24b5dfaSDave Chinner * Attach the dquot(s) to the inodes and modify them incore. 1064c24b5dfaSDave Chinner * These ids of the inode couldn't have changed since the new 1065c24b5dfaSDave Chinner * inode has been locked ever since it was created. 1066c24b5dfaSDave Chinner */ 1067c24b5dfaSDave Chinner xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp); 1068c24b5dfaSDave Chinner 106970393313SChristoph Hellwig error = xfs_trans_commit(tp); 1070c24b5dfaSDave Chinner if (error) 1071c24b5dfaSDave Chinner goto out_release_inode; 1072c24b5dfaSDave Chinner 1073c24b5dfaSDave Chinner xfs_qm_dqrele(udqp); 1074c24b5dfaSDave Chinner xfs_qm_dqrele(gdqp); 1075c24b5dfaSDave Chinner xfs_qm_dqrele(pdqp); 1076c24b5dfaSDave Chinner 1077c24b5dfaSDave Chinner *ipp = ip; 1078c24b5dfaSDave Chinner return 0; 1079c24b5dfaSDave Chinner 1080c24b5dfaSDave Chinner out_trans_cancel: 10814906e215SChristoph Hellwig xfs_trans_cancel(tp); 1082c24b5dfaSDave Chinner out_release_inode: 1083c24b5dfaSDave Chinner /* 108458c90473SDave Chinner * Wait until after the current transaction is aborted to finish the 108558c90473SDave Chinner * setup of the inode and release the inode. This prevents recursive 108658c90473SDave Chinner * transactions and deadlocks from xfs_inactive. 1087c24b5dfaSDave Chinner */ 108858c90473SDave Chinner if (ip) { 108958c90473SDave Chinner xfs_finish_inode_setup(ip); 109044a8736bSDarrick J. Wong xfs_irele(ip); 109158c90473SDave Chinner } 1092f2f7b9ffSDarrick J. Wong out_release_dquots: 1093c24b5dfaSDave Chinner xfs_qm_dqrele(udqp); 1094c24b5dfaSDave Chinner xfs_qm_dqrele(gdqp); 1095c24b5dfaSDave Chinner xfs_qm_dqrele(pdqp); 1096c24b5dfaSDave Chinner 1097c24b5dfaSDave Chinner if (unlock_dp_on_error) 109865523218SChristoph Hellwig xfs_iunlock(dp, XFS_ILOCK_EXCL); 1099c24b5dfaSDave Chinner return error; 1100c24b5dfaSDave Chinner } 1101c24b5dfaSDave Chinner 1102c24b5dfaSDave Chinner int 110399b6436bSZhi Yong Wu xfs_create_tmpfile( 1104f2d40141SChristian Brauner struct mnt_idmap *idmap, 110599b6436bSZhi Yong Wu struct xfs_inode *dp, 1106330033d6SBrian Foster umode_t mode, 1107330033d6SBrian Foster struct xfs_inode **ipp) 110899b6436bSZhi Yong Wu { 110999b6436bSZhi Yong Wu struct xfs_mount *mp = dp->i_mount; 111099b6436bSZhi Yong Wu struct xfs_inode *ip = NULL; 111199b6436bSZhi Yong Wu struct xfs_trans *tp = NULL; 111299b6436bSZhi Yong Wu int error; 111399b6436bSZhi Yong Wu prid_t prid; 111499b6436bSZhi Yong Wu struct xfs_dquot *udqp = NULL; 111599b6436bSZhi Yong Wu struct xfs_dquot *gdqp = NULL; 111699b6436bSZhi Yong Wu struct xfs_dquot *pdqp = NULL; 111799b6436bSZhi Yong Wu struct xfs_trans_res *tres; 111899b6436bSZhi Yong Wu uint resblks; 1119b652afd9SDave Chinner xfs_ino_t ino; 112099b6436bSZhi Yong Wu 112175c8c50fSDave Chinner if (xfs_is_shutdown(mp)) 11222451337dSDave Chinner return -EIO; 112399b6436bSZhi Yong Wu 112499b6436bSZhi Yong Wu prid = xfs_get_initial_prid(dp); 112599b6436bSZhi Yong Wu 112699b6436bSZhi Yong Wu /* 112799b6436bSZhi Yong Wu * Make sure that we have allocated dquot(s) on disk. 112899b6436bSZhi Yong Wu */ 1129c14329d3SChristian Brauner error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(idmap, &init_user_ns), 1130c14329d3SChristian Brauner mapped_fsgid(idmap, &init_user_ns), prid, 113199b6436bSZhi Yong Wu XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, 113299b6436bSZhi Yong Wu &udqp, &gdqp, &pdqp); 113399b6436bSZhi Yong Wu if (error) 113499b6436bSZhi Yong Wu return error; 113599b6436bSZhi Yong Wu 113699b6436bSZhi Yong Wu resblks = XFS_IALLOC_SPACE_RES(mp); 113799b6436bSZhi Yong Wu tres = &M_RES(mp)->tr_create_tmpfile; 1138253f4911SChristoph Hellwig 1139f2f7b9ffSDarrick J. Wong error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks, 1140f2f7b9ffSDarrick J. Wong &tp); 11414906e215SChristoph Hellwig if (error) 1142f2f7b9ffSDarrick J. Wong goto out_release_dquots; 114399b6436bSZhi Yong Wu 1144b652afd9SDave Chinner error = xfs_dialloc(&tp, dp->i_ino, mode, &ino); 1145b652afd9SDave Chinner if (!error) 1146f2d40141SChristian Brauner error = xfs_init_new_inode(idmap, tp, dp, ino, mode, 1147b652afd9SDave Chinner 0, 0, prid, false, &ip); 1148d6077aa3SJan Kara if (error) 114999b6436bSZhi Yong Wu goto out_trans_cancel; 115099b6436bSZhi Yong Wu 11510560f31aSDave Chinner if (xfs_has_wsync(mp)) 115299b6436bSZhi Yong Wu xfs_trans_set_sync(tp); 115399b6436bSZhi Yong Wu 115499b6436bSZhi Yong Wu /* 115599b6436bSZhi Yong Wu * Attach the dquot(s) to the inodes and modify them incore. 115699b6436bSZhi Yong Wu * These ids of the inode couldn't have changed since the new 115799b6436bSZhi Yong Wu * inode has been locked ever since it was created. 115899b6436bSZhi Yong Wu */ 115999b6436bSZhi Yong Wu xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp); 116099b6436bSZhi Yong Wu 116199b6436bSZhi Yong Wu error = xfs_iunlink(tp, ip); 116299b6436bSZhi Yong Wu if (error) 11634906e215SChristoph Hellwig goto out_trans_cancel; 116499b6436bSZhi Yong Wu 116570393313SChristoph Hellwig error = xfs_trans_commit(tp); 116699b6436bSZhi Yong Wu if (error) 116799b6436bSZhi Yong Wu goto out_release_inode; 116899b6436bSZhi Yong Wu 116999b6436bSZhi Yong Wu xfs_qm_dqrele(udqp); 117099b6436bSZhi Yong Wu xfs_qm_dqrele(gdqp); 117199b6436bSZhi Yong Wu xfs_qm_dqrele(pdqp); 117299b6436bSZhi Yong Wu 1173330033d6SBrian Foster *ipp = ip; 117499b6436bSZhi Yong Wu return 0; 117599b6436bSZhi Yong Wu 117699b6436bSZhi Yong Wu out_trans_cancel: 11774906e215SChristoph Hellwig xfs_trans_cancel(tp); 117899b6436bSZhi Yong Wu out_release_inode: 117999b6436bSZhi Yong Wu /* 118058c90473SDave Chinner * Wait until after the current transaction is aborted to finish the 118158c90473SDave Chinner * setup of the inode and release the inode. This prevents recursive 118258c90473SDave Chinner * transactions and deadlocks from xfs_inactive. 118399b6436bSZhi Yong Wu */ 118458c90473SDave Chinner if (ip) { 118558c90473SDave Chinner xfs_finish_inode_setup(ip); 118644a8736bSDarrick J. Wong xfs_irele(ip); 118758c90473SDave Chinner } 1188f2f7b9ffSDarrick J. Wong out_release_dquots: 118999b6436bSZhi Yong Wu xfs_qm_dqrele(udqp); 119099b6436bSZhi Yong Wu xfs_qm_dqrele(gdqp); 119199b6436bSZhi Yong Wu xfs_qm_dqrele(pdqp); 119299b6436bSZhi Yong Wu 119399b6436bSZhi Yong Wu return error; 119499b6436bSZhi Yong Wu } 119599b6436bSZhi Yong Wu 119699b6436bSZhi Yong Wu int 1197c24b5dfaSDave Chinner xfs_link( 1198c24b5dfaSDave Chinner xfs_inode_t *tdp, 1199c24b5dfaSDave Chinner xfs_inode_t *sip, 1200c24b5dfaSDave Chinner struct xfs_name *target_name) 1201c24b5dfaSDave Chinner { 1202c24b5dfaSDave Chinner xfs_mount_t *mp = tdp->i_mount; 1203c24b5dfaSDave Chinner xfs_trans_t *tp; 1204871b9316SDarrick J. Wong int error, nospace_error = 0; 1205c24b5dfaSDave Chinner int resblks; 1206c24b5dfaSDave Chinner 1207c24b5dfaSDave Chinner trace_xfs_link(tdp, target_name); 1208c24b5dfaSDave Chinner 1209c19b3b05SDave Chinner ASSERT(!S_ISDIR(VFS_I(sip)->i_mode)); 1210c24b5dfaSDave Chinner 121175c8c50fSDave Chinner if (xfs_is_shutdown(mp)) 12122451337dSDave Chinner return -EIO; 1213c24b5dfaSDave Chinner 1214c14cfccaSDarrick J. Wong error = xfs_qm_dqattach(sip); 1215c24b5dfaSDave Chinner if (error) 1216c24b5dfaSDave Chinner goto std_return; 1217c24b5dfaSDave Chinner 1218c14cfccaSDarrick J. Wong error = xfs_qm_dqattach(tdp); 1219c24b5dfaSDave Chinner if (error) 1220c24b5dfaSDave Chinner goto std_return; 1221c24b5dfaSDave Chinner 1222c24b5dfaSDave Chinner resblks = XFS_LINK_SPACE_RES(mp, target_name->len); 1223871b9316SDarrick J. Wong error = xfs_trans_alloc_dir(tdp, &M_RES(mp)->tr_link, sip, &resblks, 1224871b9316SDarrick J. Wong &tp, &nospace_error); 12254906e215SChristoph Hellwig if (error) 1226253f4911SChristoph Hellwig goto std_return; 1227c24b5dfaSDave Chinner 1228c24b5dfaSDave Chinner /* 1229c24b5dfaSDave Chinner * If we are using project inheritance, we only allow hard link 1230c24b5dfaSDave Chinner * creation in our tree when the project IDs are the same; else 1231c24b5dfaSDave Chinner * the tree quota mechanism could be circumvented. 1232c24b5dfaSDave Chinner */ 1233db07349dSChristoph Hellwig if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) && 1234ceaf603cSChristoph Hellwig tdp->i_projid != sip->i_projid)) { 12352451337dSDave Chinner error = -EXDEV; 1236c24b5dfaSDave Chinner goto error_return; 1237c24b5dfaSDave Chinner } 1238c24b5dfaSDave Chinner 123994f3cad5SEric Sandeen if (!resblks) { 124094f3cad5SEric Sandeen error = xfs_dir_canenter(tp, tdp, target_name); 1241c24b5dfaSDave Chinner if (error) 1242c24b5dfaSDave Chinner goto error_return; 124394f3cad5SEric Sandeen } 1244c24b5dfaSDave Chinner 124554d7b5c1SDave Chinner /* 124654d7b5c1SDave Chinner * Handle initial link state of O_TMPFILE inode 124754d7b5c1SDave Chinner */ 124854d7b5c1SDave Chinner if (VFS_I(sip)->i_nlink == 0) { 1249f40aadb2SDave Chinner struct xfs_perag *pag; 1250f40aadb2SDave Chinner 1251f40aadb2SDave Chinner pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino)); 1252f40aadb2SDave Chinner error = xfs_iunlink_remove(tp, pag, sip); 1253f40aadb2SDave Chinner xfs_perag_put(pag); 1254ab297431SZhi Yong Wu if (error) 12554906e215SChristoph Hellwig goto error_return; 1256ab297431SZhi Yong Wu } 1257ab297431SZhi Yong Wu 1258c24b5dfaSDave Chinner error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino, 1259381eee69SBrian Foster resblks); 1260c24b5dfaSDave Chinner if (error) 12614906e215SChristoph Hellwig goto error_return; 1262c24b5dfaSDave Chinner xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 1263c24b5dfaSDave Chinner xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE); 1264c24b5dfaSDave Chinner 126591083269SEric Sandeen xfs_bumplink(tp, sip); 1266c24b5dfaSDave Chinner 1267c24b5dfaSDave Chinner /* 1268c24b5dfaSDave Chinner * If this is a synchronous mount, make sure that the 1269c24b5dfaSDave Chinner * link transaction goes to disk before returning to 1270c24b5dfaSDave Chinner * the user. 1271c24b5dfaSDave Chinner */ 12720560f31aSDave Chinner if (xfs_has_wsync(mp) || xfs_has_dirsync(mp)) 1273c24b5dfaSDave Chinner xfs_trans_set_sync(tp); 1274c24b5dfaSDave Chinner 127570393313SChristoph Hellwig return xfs_trans_commit(tp); 1276c24b5dfaSDave Chinner 1277c24b5dfaSDave Chinner error_return: 12784906e215SChristoph Hellwig xfs_trans_cancel(tp); 1279c24b5dfaSDave Chinner std_return: 1280871b9316SDarrick J. Wong if (error == -ENOSPC && nospace_error) 1281871b9316SDarrick J. Wong error = nospace_error; 1282c24b5dfaSDave Chinner return error; 1283c24b5dfaSDave Chinner } 1284c24b5dfaSDave Chinner 1285363e59baSDarrick J. Wong /* Clear the reflink flag and the cowblocks tag if possible. */ 1286363e59baSDarrick J. Wong static void 1287363e59baSDarrick J. Wong xfs_itruncate_clear_reflink_flags( 1288363e59baSDarrick J. Wong struct xfs_inode *ip) 1289363e59baSDarrick J. Wong { 1290363e59baSDarrick J. Wong struct xfs_ifork *dfork; 1291363e59baSDarrick J. Wong struct xfs_ifork *cfork; 1292363e59baSDarrick J. Wong 1293363e59baSDarrick J. Wong if (!xfs_is_reflink_inode(ip)) 1294363e59baSDarrick J. Wong return; 1295732436efSDarrick J. Wong dfork = xfs_ifork_ptr(ip, XFS_DATA_FORK); 1296732436efSDarrick J. Wong cfork = xfs_ifork_ptr(ip, XFS_COW_FORK); 1297363e59baSDarrick J. Wong if (dfork->if_bytes == 0 && cfork->if_bytes == 0) 12983e09ab8fSChristoph Hellwig ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK; 1299363e59baSDarrick J. Wong if (cfork->if_bytes == 0) 1300363e59baSDarrick J. Wong xfs_inode_clear_cowblocks_tag(ip); 1301363e59baSDarrick J. Wong } 1302363e59baSDarrick J. Wong 13031da177e4SLinus Torvalds /* 13048f04c47aSChristoph Hellwig * Free up the underlying blocks past new_size. The new size must be smaller 13058f04c47aSChristoph Hellwig * than the current size. This routine can be used both for the attribute and 13068f04c47aSChristoph Hellwig * data fork, and does not modify the inode size, which is left to the caller. 13071da177e4SLinus Torvalds * 1308f6485057SDavid Chinner * The transaction passed to this routine must have made a permanent log 1309f6485057SDavid Chinner * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the 1310f6485057SDavid Chinner * given transaction and start new ones, so make sure everything involved in 1311f6485057SDavid Chinner * the transaction is tidy before calling here. Some transaction will be 1312f6485057SDavid Chinner * returned to the caller to be committed. The incoming transaction must 1313f6485057SDavid Chinner * already include the inode, and both inode locks must be held exclusively. 1314f6485057SDavid Chinner * The inode must also be "held" within the transaction. On return the inode 1315f6485057SDavid Chinner * will be "held" within the returned transaction. This routine does NOT 1316f6485057SDavid Chinner * require any disk space to be reserved for it within the transaction. 13171da177e4SLinus Torvalds * 1318f6485057SDavid Chinner * If we get an error, we must return with the inode locked and linked into the 1319f6485057SDavid Chinner * current transaction. This keeps things simple for the higher level code, 1320f6485057SDavid Chinner * because it always knows that the inode is locked and held in the transaction 1321f6485057SDavid Chinner * that returns to it whether errors occur or not. We don't mark the inode 1322f6485057SDavid Chinner * dirty on error so that transactions can be easily aborted if possible. 13231da177e4SLinus Torvalds */ 13241da177e4SLinus Torvalds int 13254e529339SBrian Foster xfs_itruncate_extents_flags( 13268f04c47aSChristoph Hellwig struct xfs_trans **tpp, 13278f04c47aSChristoph Hellwig struct xfs_inode *ip, 13288f04c47aSChristoph Hellwig int whichfork, 132913b86fc3SBrian Foster xfs_fsize_t new_size, 13304e529339SBrian Foster int flags) 13311da177e4SLinus Torvalds { 13328f04c47aSChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 13338f04c47aSChristoph Hellwig struct xfs_trans *tp = *tpp; 13341da177e4SLinus Torvalds xfs_fileoff_t first_unmap_block; 13358f04c47aSChristoph Hellwig xfs_filblks_t unmap_len; 13368f04c47aSChristoph Hellwig int error = 0; 13371da177e4SLinus Torvalds 13380b56185bSChristoph Hellwig ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 13390b56185bSChristoph Hellwig ASSERT(!atomic_read(&VFS_I(ip)->i_count) || 13400b56185bSChristoph Hellwig xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 1341ce7ae151SChristoph Hellwig ASSERT(new_size <= XFS_ISIZE(ip)); 13428f04c47aSChristoph Hellwig ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 13431da177e4SLinus Torvalds ASSERT(ip->i_itemp != NULL); 1344898621d5SChristoph Hellwig ASSERT(ip->i_itemp->ili_lock_flags == 0); 13451da177e4SLinus Torvalds ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); 13461da177e4SLinus Torvalds 1347673e8e59SChristoph Hellwig trace_xfs_itruncate_extents_start(ip, new_size); 1348673e8e59SChristoph Hellwig 13494e529339SBrian Foster flags |= xfs_bmapi_aflag(whichfork); 135013b86fc3SBrian Foster 13511da177e4SLinus Torvalds /* 13521da177e4SLinus Torvalds * Since it is possible for space to become allocated beyond 13531da177e4SLinus Torvalds * the end of the file (in a crash where the space is allocated 13541da177e4SLinus Torvalds * but the inode size is not yet updated), simply remove any 13551da177e4SLinus Torvalds * blocks which show up between the new EOF and the maximum 13564bbb04abSDarrick J. Wong * possible file size. 13574bbb04abSDarrick J. Wong * 13584bbb04abSDarrick J. Wong * We have to free all the blocks to the bmbt maximum offset, even if 13594bbb04abSDarrick J. Wong * the page cache can't scale that far. 13601da177e4SLinus Torvalds */ 13618f04c47aSChristoph Hellwig first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 136233005fd0SDarrick J. Wong if (!xfs_verify_fileoff(mp, first_unmap_block)) { 13634bbb04abSDarrick J. Wong WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF); 13648f04c47aSChristoph Hellwig return 0; 13654bbb04abSDarrick J. Wong } 13668f04c47aSChristoph Hellwig 13674bbb04abSDarrick J. Wong unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1; 13684bbb04abSDarrick J. Wong while (unmap_len > 0) { 1369692b6cddSDave Chinner ASSERT(tp->t_highest_agno == NULLAGNUMBER); 13704bbb04abSDarrick J. Wong error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len, 13714bbb04abSDarrick J. Wong flags, XFS_ITRUNC_MAX_EXTENTS); 13728f04c47aSChristoph Hellwig if (error) 1373d5a2e289SBrian Foster goto out; 13741da177e4SLinus Torvalds 13756dd379c7SBrian Foster /* free the just unmapped extents */ 13769e28a242SBrian Foster error = xfs_defer_finish(&tp); 13778f04c47aSChristoph Hellwig if (error) 13789b1f4e98SBrian Foster goto out; 13791da177e4SLinus Torvalds } 13808f04c47aSChristoph Hellwig 13814919d42aSDarrick J. Wong if (whichfork == XFS_DATA_FORK) { 1382aa8968f2SDarrick J. Wong /* Remove all pending CoW reservations. */ 13834919d42aSDarrick J. Wong error = xfs_reflink_cancel_cow_blocks(ip, &tp, 13844bbb04abSDarrick J. Wong first_unmap_block, XFS_MAX_FILEOFF, true); 1385aa8968f2SDarrick J. Wong if (error) 1386aa8968f2SDarrick J. Wong goto out; 1387aa8968f2SDarrick J. Wong 1388363e59baSDarrick J. Wong xfs_itruncate_clear_reflink_flags(ip); 13894919d42aSDarrick J. Wong } 1390aa8968f2SDarrick J. Wong 1391673e8e59SChristoph Hellwig /* 1392673e8e59SChristoph Hellwig * Always re-log the inode so that our permanent transaction can keep 1393673e8e59SChristoph Hellwig * on rolling it forward in the log. 1394673e8e59SChristoph Hellwig */ 1395673e8e59SChristoph Hellwig xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1396673e8e59SChristoph Hellwig 1397673e8e59SChristoph Hellwig trace_xfs_itruncate_extents_end(ip, new_size); 1398673e8e59SChristoph Hellwig 13998f04c47aSChristoph Hellwig out: 14008f04c47aSChristoph Hellwig *tpp = tp; 14018f04c47aSChristoph Hellwig return error; 14028f04c47aSChristoph Hellwig } 14038f04c47aSChristoph Hellwig 1404c24b5dfaSDave Chinner int 1405c24b5dfaSDave Chinner xfs_release( 1406c24b5dfaSDave Chinner xfs_inode_t *ip) 1407c24b5dfaSDave Chinner { 1408c24b5dfaSDave Chinner xfs_mount_t *mp = ip->i_mount; 14097d88329eSDarrick J. Wong int error = 0; 1410c24b5dfaSDave Chinner 1411c19b3b05SDave Chinner if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0)) 1412c24b5dfaSDave Chinner return 0; 1413c24b5dfaSDave Chinner 1414c24b5dfaSDave Chinner /* If this is a read-only mount, don't do this (would generate I/O) */ 14152e973b2cSDave Chinner if (xfs_is_readonly(mp)) 1416c24b5dfaSDave Chinner return 0; 1417c24b5dfaSDave Chinner 141875c8c50fSDave Chinner if (!xfs_is_shutdown(mp)) { 1419c24b5dfaSDave Chinner int truncated; 1420c24b5dfaSDave Chinner 1421c24b5dfaSDave Chinner /* 1422c24b5dfaSDave Chinner * If we previously truncated this file and removed old data 1423c24b5dfaSDave Chinner * in the process, we want to initiate "early" writeout on 1424c24b5dfaSDave Chinner * the last close. This is an attempt to combat the notorious 1425c24b5dfaSDave Chinner * NULL files problem which is particularly noticeable from a 1426c24b5dfaSDave Chinner * truncate down, buffered (re-)write (delalloc), followed by 1427c24b5dfaSDave Chinner * a crash. What we are effectively doing here is 1428c24b5dfaSDave Chinner * significantly reducing the time window where we'd otherwise 1429c24b5dfaSDave Chinner * be exposed to that problem. 1430c24b5dfaSDave Chinner */ 1431c24b5dfaSDave Chinner truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); 1432c24b5dfaSDave Chinner if (truncated) { 1433c24b5dfaSDave Chinner xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE); 1434eac152b4SDave Chinner if (ip->i_delayed_blks > 0) { 14352451337dSDave Chinner error = filemap_flush(VFS_I(ip)->i_mapping); 1436c24b5dfaSDave Chinner if (error) 1437c24b5dfaSDave Chinner return error; 1438c24b5dfaSDave Chinner } 1439c24b5dfaSDave Chinner } 1440c24b5dfaSDave Chinner } 1441c24b5dfaSDave Chinner 144254d7b5c1SDave Chinner if (VFS_I(ip)->i_nlink == 0) 1443c24b5dfaSDave Chinner return 0; 1444c24b5dfaSDave Chinner 14457d88329eSDarrick J. Wong /* 14467d88329eSDarrick J. Wong * If we can't get the iolock just skip truncating the blocks past EOF 14477d88329eSDarrick J. Wong * because we could deadlock with the mmap_lock otherwise. We'll get 14487d88329eSDarrick J. Wong * another chance to drop them once the last reference to the inode is 14497d88329eSDarrick J. Wong * dropped, so we'll never leak blocks permanently. 14507d88329eSDarrick J. Wong */ 14517d88329eSDarrick J. Wong if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) 14527d88329eSDarrick J. Wong return 0; 1453c24b5dfaSDave Chinner 14547d88329eSDarrick J. Wong if (xfs_can_free_eofblocks(ip, false)) { 1455c24b5dfaSDave Chinner /* 1456a36b9261SBrian Foster * Check if the inode is being opened, written and closed 1457a36b9261SBrian Foster * frequently and we have delayed allocation blocks outstanding 1458a36b9261SBrian Foster * (e.g. streaming writes from the NFS server), truncating the 1459a36b9261SBrian Foster * blocks past EOF will cause fragmentation to occur. 1460a36b9261SBrian Foster * 1461a36b9261SBrian Foster * In this case don't do the truncation, but we have to be 1462a36b9261SBrian Foster * careful how we detect this case. Blocks beyond EOF show up as 1463a36b9261SBrian Foster * i_delayed_blks even when the inode is clean, so we need to 1464a36b9261SBrian Foster * truncate them away first before checking for a dirty release. 1465a36b9261SBrian Foster * Hence on the first dirty close we will still remove the 1466a36b9261SBrian Foster * speculative allocation, but after that we will leave it in 1467a36b9261SBrian Foster * place. 1468a36b9261SBrian Foster */ 1469a36b9261SBrian Foster if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE)) 14707d88329eSDarrick J. Wong goto out_unlock; 14717d88329eSDarrick J. Wong 1472a36b9261SBrian Foster error = xfs_free_eofblocks(ip); 1473a36b9261SBrian Foster if (error) 14747d88329eSDarrick J. Wong goto out_unlock; 1475c24b5dfaSDave Chinner 1476c24b5dfaSDave Chinner /* delalloc blocks after truncation means it really is dirty */ 1477c24b5dfaSDave Chinner if (ip->i_delayed_blks) 1478c24b5dfaSDave Chinner xfs_iflags_set(ip, XFS_IDIRTY_RELEASE); 1479c24b5dfaSDave Chinner } 14807d88329eSDarrick J. Wong 14817d88329eSDarrick J. Wong out_unlock: 14827d88329eSDarrick J. Wong xfs_iunlock(ip, XFS_IOLOCK_EXCL); 14837d88329eSDarrick J. Wong return error; 1484c24b5dfaSDave Chinner } 1485c24b5dfaSDave Chinner 1486c24b5dfaSDave Chinner /* 1487f7be2d7fSBrian Foster * xfs_inactive_truncate 1488f7be2d7fSBrian Foster * 1489f7be2d7fSBrian Foster * Called to perform a truncate when an inode becomes unlinked. 1490f7be2d7fSBrian Foster */ 1491f7be2d7fSBrian Foster STATIC int 1492f7be2d7fSBrian Foster xfs_inactive_truncate( 1493f7be2d7fSBrian Foster struct xfs_inode *ip) 1494f7be2d7fSBrian Foster { 1495f7be2d7fSBrian Foster struct xfs_mount *mp = ip->i_mount; 1496f7be2d7fSBrian Foster struct xfs_trans *tp; 1497f7be2d7fSBrian Foster int error; 1498f7be2d7fSBrian Foster 1499253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp); 1500f7be2d7fSBrian Foster if (error) { 150175c8c50fSDave Chinner ASSERT(xfs_is_shutdown(mp)); 1502f7be2d7fSBrian Foster return error; 1503f7be2d7fSBrian Foster } 1504f7be2d7fSBrian Foster xfs_ilock(ip, XFS_ILOCK_EXCL); 1505f7be2d7fSBrian Foster xfs_trans_ijoin(tp, ip, 0); 1506f7be2d7fSBrian Foster 1507f7be2d7fSBrian Foster /* 1508f7be2d7fSBrian Foster * Log the inode size first to prevent stale data exposure in the event 1509f7be2d7fSBrian Foster * of a system crash before the truncate completes. See the related 151069bca807SJan Kara * comment in xfs_vn_setattr_size() for details. 1511f7be2d7fSBrian Foster */ 151213d2c10bSChristoph Hellwig ip->i_disk_size = 0; 1513f7be2d7fSBrian Foster xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1514f7be2d7fSBrian Foster 1515f7be2d7fSBrian Foster error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0); 1516f7be2d7fSBrian Foster if (error) 1517f7be2d7fSBrian Foster goto error_trans_cancel; 1518f7be2d7fSBrian Foster 1519daf83964SChristoph Hellwig ASSERT(ip->i_df.if_nextents == 0); 1520f7be2d7fSBrian Foster 152170393313SChristoph Hellwig error = xfs_trans_commit(tp); 1522f7be2d7fSBrian Foster if (error) 1523f7be2d7fSBrian Foster goto error_unlock; 1524f7be2d7fSBrian Foster 1525f7be2d7fSBrian Foster xfs_iunlock(ip, XFS_ILOCK_EXCL); 1526f7be2d7fSBrian Foster return 0; 1527f7be2d7fSBrian Foster 1528f7be2d7fSBrian Foster error_trans_cancel: 15294906e215SChristoph Hellwig xfs_trans_cancel(tp); 1530f7be2d7fSBrian Foster error_unlock: 1531f7be2d7fSBrian Foster xfs_iunlock(ip, XFS_ILOCK_EXCL); 1532f7be2d7fSBrian Foster return error; 1533f7be2d7fSBrian Foster } 1534f7be2d7fSBrian Foster 1535f7be2d7fSBrian Foster /* 153688877d2bSBrian Foster * xfs_inactive_ifree() 153788877d2bSBrian Foster * 153888877d2bSBrian Foster * Perform the inode free when an inode is unlinked. 153988877d2bSBrian Foster */ 154088877d2bSBrian Foster STATIC int 154188877d2bSBrian Foster xfs_inactive_ifree( 154288877d2bSBrian Foster struct xfs_inode *ip) 154388877d2bSBrian Foster { 154488877d2bSBrian Foster struct xfs_mount *mp = ip->i_mount; 154588877d2bSBrian Foster struct xfs_trans *tp; 154688877d2bSBrian Foster int error; 154788877d2bSBrian Foster 15489d43b180SBrian Foster /* 154976d771b4SChristoph Hellwig * We try to use a per-AG reservation for any block needed by the finobt 155076d771b4SChristoph Hellwig * tree, but as the finobt feature predates the per-AG reservation 155176d771b4SChristoph Hellwig * support a degraded file system might not have enough space for the 155276d771b4SChristoph Hellwig * reservation at mount time. In that case try to dip into the reserved 155376d771b4SChristoph Hellwig * pool and pray. 15549d43b180SBrian Foster * 15559d43b180SBrian Foster * Send a warning if the reservation does happen to fail, as the inode 15569d43b180SBrian Foster * now remains allocated and sits on the unlinked list until the fs is 15579d43b180SBrian Foster * repaired. 15589d43b180SBrian Foster */ 1559e1f6ca11SDarrick J. Wong if (unlikely(mp->m_finobt_nores)) { 1560253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 156176d771b4SChristoph Hellwig XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, 156276d771b4SChristoph Hellwig &tp); 156376d771b4SChristoph Hellwig } else { 156476d771b4SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp); 156576d771b4SChristoph Hellwig } 156688877d2bSBrian Foster if (error) { 15672451337dSDave Chinner if (error == -ENOSPC) { 15689d43b180SBrian Foster xfs_warn_ratelimited(mp, 15699d43b180SBrian Foster "Failed to remove inode(s) from unlinked list. " 15709d43b180SBrian Foster "Please free space, unmount and run xfs_repair."); 15719d43b180SBrian Foster } else { 157275c8c50fSDave Chinner ASSERT(xfs_is_shutdown(mp)); 15739d43b180SBrian Foster } 157488877d2bSBrian Foster return error; 157588877d2bSBrian Foster } 157688877d2bSBrian Foster 157796355d5aSDave Chinner /* 157896355d5aSDave Chinner * We do not hold the inode locked across the entire rolling transaction 157996355d5aSDave Chinner * here. We only need to hold it for the first transaction that 158096355d5aSDave Chinner * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the 158196355d5aSDave Chinner * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode 158296355d5aSDave Chinner * here breaks the relationship between cluster buffer invalidation and 158396355d5aSDave Chinner * stale inode invalidation on cluster buffer item journal commit 158496355d5aSDave Chinner * completion, and can result in leaving dirty stale inodes hanging 158596355d5aSDave Chinner * around in memory. 158696355d5aSDave Chinner * 158796355d5aSDave Chinner * We have no need for serialising this inode operation against other 158896355d5aSDave Chinner * operations - we freed the inode and hence reallocation is required 158996355d5aSDave Chinner * and that will serialise on reallocating the space the deferops need 159096355d5aSDave Chinner * to free. Hence we can unlock the inode on the first commit of 159196355d5aSDave Chinner * the transaction rather than roll it right through the deferops. This 159296355d5aSDave Chinner * avoids relogging the XFS_ISTALE inode. 159396355d5aSDave Chinner * 159496355d5aSDave Chinner * We check that xfs_ifree() hasn't grown an internal transaction roll 159596355d5aSDave Chinner * by asserting that the inode is still locked when it returns. 159696355d5aSDave Chinner */ 159788877d2bSBrian Foster xfs_ilock(ip, XFS_ILOCK_EXCL); 159896355d5aSDave Chinner xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 159988877d2bSBrian Foster 16000e0417f3SBrian Foster error = xfs_ifree(tp, ip); 160196355d5aSDave Chinner ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 160288877d2bSBrian Foster if (error) { 160388877d2bSBrian Foster /* 160488877d2bSBrian Foster * If we fail to free the inode, shut down. The cancel 160588877d2bSBrian Foster * might do that, we need to make sure. Otherwise the 160688877d2bSBrian Foster * inode might be lost for a long time or forever. 160788877d2bSBrian Foster */ 160875c8c50fSDave Chinner if (!xfs_is_shutdown(mp)) { 160988877d2bSBrian Foster xfs_notice(mp, "%s: xfs_ifree returned error %d", 161088877d2bSBrian Foster __func__, error); 161188877d2bSBrian Foster xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); 161288877d2bSBrian Foster } 16134906e215SChristoph Hellwig xfs_trans_cancel(tp); 161488877d2bSBrian Foster return error; 161588877d2bSBrian Foster } 161688877d2bSBrian Foster 161788877d2bSBrian Foster /* 161888877d2bSBrian Foster * Credit the quota account(s). The inode is gone. 161988877d2bSBrian Foster */ 162088877d2bSBrian Foster xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1); 162188877d2bSBrian Foster 1622d4d12c02SDave Chinner return xfs_trans_commit(tp); 162388877d2bSBrian Foster } 162488877d2bSBrian Foster 162588877d2bSBrian Foster /* 162662af7d54SDarrick J. Wong * Returns true if we need to update the on-disk metadata before we can free 162762af7d54SDarrick J. Wong * the memory used by this inode. Updates include freeing post-eof 162862af7d54SDarrick J. Wong * preallocations; freeing COW staging extents; and marking the inode free in 162962af7d54SDarrick J. Wong * the inobt if it is on the unlinked list. 163062af7d54SDarrick J. Wong */ 163162af7d54SDarrick J. Wong bool 163262af7d54SDarrick J. Wong xfs_inode_needs_inactive( 163362af7d54SDarrick J. Wong struct xfs_inode *ip) 163462af7d54SDarrick J. Wong { 163562af7d54SDarrick J. Wong struct xfs_mount *mp = ip->i_mount; 1636732436efSDarrick J. Wong struct xfs_ifork *cow_ifp = xfs_ifork_ptr(ip, XFS_COW_FORK); 163762af7d54SDarrick J. Wong 163862af7d54SDarrick J. Wong /* 163962af7d54SDarrick J. Wong * If the inode is already free, then there can be nothing 164062af7d54SDarrick J. Wong * to clean up here. 164162af7d54SDarrick J. Wong */ 164262af7d54SDarrick J. Wong if (VFS_I(ip)->i_mode == 0) 164362af7d54SDarrick J. Wong return false; 164462af7d54SDarrick J. Wong 164576e58901SDarrick J. Wong /* 164676e58901SDarrick J. Wong * If this is a read-only mount, don't do this (would generate I/O) 164776e58901SDarrick J. Wong * unless we're in log recovery and cleaning the iunlinked list. 164876e58901SDarrick J. Wong */ 164976e58901SDarrick J. Wong if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log)) 165062af7d54SDarrick J. Wong return false; 165162af7d54SDarrick J. Wong 165262af7d54SDarrick J. Wong /* If the log isn't running, push inodes straight to reclaim. */ 165375c8c50fSDave Chinner if (xfs_is_shutdown(mp) || xfs_has_norecovery(mp)) 165462af7d54SDarrick J. Wong return false; 165562af7d54SDarrick J. Wong 165662af7d54SDarrick J. Wong /* Metadata inodes require explicit resource cleanup. */ 165762af7d54SDarrick J. Wong if (xfs_is_metadata_inode(ip)) 165862af7d54SDarrick J. Wong return false; 165962af7d54SDarrick J. Wong 166062af7d54SDarrick J. Wong /* Want to clean out the cow blocks if there are any. */ 166162af7d54SDarrick J. Wong if (cow_ifp && cow_ifp->if_bytes > 0) 166262af7d54SDarrick J. Wong return true; 166362af7d54SDarrick J. Wong 166462af7d54SDarrick J. Wong /* Unlinked files must be freed. */ 166562af7d54SDarrick J. Wong if (VFS_I(ip)->i_nlink == 0) 166662af7d54SDarrick J. Wong return true; 166762af7d54SDarrick J. Wong 166862af7d54SDarrick J. Wong /* 166962af7d54SDarrick J. Wong * This file isn't being freed, so check if there are post-eof blocks 167062af7d54SDarrick J. Wong * to free. @force is true because we are evicting an inode from the 167162af7d54SDarrick J. Wong * cache. Post-eof blocks must be freed, lest we end up with broken 167262af7d54SDarrick J. Wong * free space accounting. 167362af7d54SDarrick J. Wong * 167462af7d54SDarrick J. Wong * Note: don't bother with iolock here since lockdep complains about 167562af7d54SDarrick J. Wong * acquiring it in reclaim context. We have the only reference to the 167662af7d54SDarrick J. Wong * inode at this point anyways. 167762af7d54SDarrick J. Wong */ 167862af7d54SDarrick J. Wong return xfs_can_free_eofblocks(ip, true); 167962af7d54SDarrick J. Wong } 168062af7d54SDarrick J. Wong 168162af7d54SDarrick J. Wong /* 1682c24b5dfaSDave Chinner * xfs_inactive 1683c24b5dfaSDave Chinner * 1684c24b5dfaSDave Chinner * This is called when the vnode reference count for the vnode 1685c24b5dfaSDave Chinner * goes to zero. If the file has been unlinked, then it must 1686c24b5dfaSDave Chinner * now be truncated. Also, we clear all of the read-ahead state 1687c24b5dfaSDave Chinner * kept for the inode here since the file is now closed. 1688c24b5dfaSDave Chinner */ 1689d4d12c02SDave Chinner int 1690c24b5dfaSDave Chinner xfs_inactive( 1691c24b5dfaSDave Chinner xfs_inode_t *ip) 1692c24b5dfaSDave Chinner { 16933d3c8b52SJie Liu struct xfs_mount *mp; 1694d4d12c02SDave Chinner int error = 0; 1695c24b5dfaSDave Chinner int truncate = 0; 1696c24b5dfaSDave Chinner 1697c24b5dfaSDave Chinner /* 1698c24b5dfaSDave Chinner * If the inode is already free, then there can be nothing 1699c24b5dfaSDave Chinner * to clean up here. 1700c24b5dfaSDave Chinner */ 1701c19b3b05SDave Chinner if (VFS_I(ip)->i_mode == 0) { 1702c24b5dfaSDave Chinner ASSERT(ip->i_df.if_broot_bytes == 0); 17033ea06d73SDarrick J. Wong goto out; 1704c24b5dfaSDave Chinner } 1705c24b5dfaSDave Chinner 1706c24b5dfaSDave Chinner mp = ip->i_mount; 170717c12bcdSDarrick J. Wong ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY)); 1708c24b5dfaSDave Chinner 170976e58901SDarrick J. Wong /* 171076e58901SDarrick J. Wong * If this is a read-only mount, don't do this (would generate I/O) 171176e58901SDarrick J. Wong * unless we're in log recovery and cleaning the iunlinked list. 171276e58901SDarrick J. Wong */ 171376e58901SDarrick J. Wong if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log)) 17143ea06d73SDarrick J. Wong goto out; 1715c24b5dfaSDave Chinner 1716383e32b0SDarrick J. Wong /* Metadata inodes require explicit resource cleanup. */ 1717383e32b0SDarrick J. Wong if (xfs_is_metadata_inode(ip)) 17183ea06d73SDarrick J. Wong goto out; 1719383e32b0SDarrick J. Wong 17206231848cSDarrick J. Wong /* Try to clean out the cow blocks if there are any. */ 172151d62690SChristoph Hellwig if (xfs_inode_has_cow_data(ip)) 17226231848cSDarrick J. Wong xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true); 17236231848cSDarrick J. Wong 172454d7b5c1SDave Chinner if (VFS_I(ip)->i_nlink != 0) { 1725c24b5dfaSDave Chinner /* 1726c24b5dfaSDave Chinner * force is true because we are evicting an inode from the 1727c24b5dfaSDave Chinner * cache. Post-eof blocks must be freed, lest we end up with 1728c24b5dfaSDave Chinner * broken free space accounting. 17293b4683c2SBrian Foster * 17303b4683c2SBrian Foster * Note: don't bother with iolock here since lockdep complains 17313b4683c2SBrian Foster * about acquiring it in reclaim context. We have the only 17323b4683c2SBrian Foster * reference to the inode at this point anyways. 1733c24b5dfaSDave Chinner */ 17343b4683c2SBrian Foster if (xfs_can_free_eofblocks(ip, true)) 1735d4d12c02SDave Chinner error = xfs_free_eofblocks(ip); 173674564fb4SBrian Foster 17373ea06d73SDarrick J. Wong goto out; 1738c24b5dfaSDave Chinner } 1739c24b5dfaSDave Chinner 1740c19b3b05SDave Chinner if (S_ISREG(VFS_I(ip)->i_mode) && 174113d2c10bSChristoph Hellwig (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 || 1742daf83964SChristoph Hellwig ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0)) 1743c24b5dfaSDave Chinner truncate = 1; 1744c24b5dfaSDave Chinner 1745*49813a21SDarrick J. Wong if (xfs_iflags_test(ip, XFS_IQUOTAUNCHECKED)) { 1746*49813a21SDarrick J. Wong xfs_qm_dqdetach(ip); 1747*49813a21SDarrick J. Wong } else { 1748c14cfccaSDarrick J. Wong error = xfs_qm_dqattach(ip); 1749c24b5dfaSDave Chinner if (error) 17503ea06d73SDarrick J. Wong goto out; 1751*49813a21SDarrick J. Wong } 1752c24b5dfaSDave Chinner 1753c19b3b05SDave Chinner if (S_ISLNK(VFS_I(ip)->i_mode)) 175436b21ddeSBrian Foster error = xfs_inactive_symlink(ip); 1755f7be2d7fSBrian Foster else if (truncate) 1756f7be2d7fSBrian Foster error = xfs_inactive_truncate(ip); 175736b21ddeSBrian Foster if (error) 17583ea06d73SDarrick J. Wong goto out; 1759c24b5dfaSDave Chinner 1760c24b5dfaSDave Chinner /* 1761c24b5dfaSDave Chinner * If there are attributes associated with the file then blow them away 1762c24b5dfaSDave Chinner * now. The code calls a routine that recursively deconstructs the 17636dfe5a04SDave Chinner * attribute fork. If also blows away the in-core attribute fork. 1764c24b5dfaSDave Chinner */ 1765932b42c6SDarrick J. Wong if (xfs_inode_has_attr_fork(ip)) { 1766c24b5dfaSDave Chinner error = xfs_attr_inactive(ip); 1767c24b5dfaSDave Chinner if (error) 17683ea06d73SDarrick J. Wong goto out; 1769c24b5dfaSDave Chinner } 1770c24b5dfaSDave Chinner 17717821ea30SChristoph Hellwig ASSERT(ip->i_forkoff == 0); 1772c24b5dfaSDave Chinner 1773c24b5dfaSDave Chinner /* 1774c24b5dfaSDave Chinner * Free the inode. 1775c24b5dfaSDave Chinner */ 1776d4d12c02SDave Chinner error = xfs_inactive_ifree(ip); 1777c24b5dfaSDave Chinner 17783ea06d73SDarrick J. Wong out: 1779c24b5dfaSDave Chinner /* 17803ea06d73SDarrick J. Wong * We're done making metadata updates for this inode, so we can release 17813ea06d73SDarrick J. Wong * the attached dquots. 1782c24b5dfaSDave Chinner */ 1783c24b5dfaSDave Chinner xfs_qm_dqdetach(ip); 1784d4d12c02SDave Chinner return error; 1785c24b5dfaSDave Chinner } 1786c24b5dfaSDave Chinner 17871da177e4SLinus Torvalds /* 17889b247179SDarrick J. Wong * In-Core Unlinked List Lookups 17899b247179SDarrick J. Wong * ============================= 17909b247179SDarrick J. Wong * 17919b247179SDarrick J. Wong * Every inode is supposed to be reachable from some other piece of metadata 17929b247179SDarrick J. Wong * with the exception of the root directory. Inodes with a connection to a 17939b247179SDarrick J. Wong * file descriptor but not linked from anywhere in the on-disk directory tree 17949b247179SDarrick J. Wong * are collectively known as unlinked inodes, though the filesystem itself 17959b247179SDarrick J. Wong * maintains links to these inodes so that on-disk metadata are consistent. 17969b247179SDarrick J. Wong * 17979b247179SDarrick J. Wong * XFS implements a per-AG on-disk hash table of unlinked inodes. The AGI 17989b247179SDarrick J. Wong * header contains a number of buckets that point to an inode, and each inode 17999b247179SDarrick J. Wong * record has a pointer to the next inode in the hash chain. This 18009b247179SDarrick J. Wong * singly-linked list causes scaling problems in the iunlink remove function 18019b247179SDarrick J. Wong * because we must walk that list to find the inode that points to the inode 18029b247179SDarrick J. Wong * being removed from the unlinked hash bucket list. 18039b247179SDarrick J. Wong * 18042fd26cc0SDave Chinner * Hence we keep an in-memory double linked list to link each inode on an 18052fd26cc0SDave Chinner * unlinked list. Because there are 64 unlinked lists per AGI, keeping pointer 18062fd26cc0SDave Chinner * based lists would require having 64 list heads in the perag, one for each 18072fd26cc0SDave Chinner * list. This is expensive in terms of memory (think millions of AGs) and cache 18082fd26cc0SDave Chinner * misses on lookups. Instead, use the fact that inodes on the unlinked list 18092fd26cc0SDave Chinner * must be referenced at the VFS level to keep them on the list and hence we 18102fd26cc0SDave Chinner * have an existence guarantee for inodes on the unlinked list. 18119b247179SDarrick J. Wong * 18122fd26cc0SDave Chinner * Given we have an existence guarantee, we can use lockless inode cache lookups 18132fd26cc0SDave Chinner * to resolve aginos to xfs inodes. This means we only need 8 bytes per inode 18142fd26cc0SDave Chinner * for the double linked unlinked list, and we don't need any extra locking to 18152fd26cc0SDave Chinner * keep the list safe as all manipulations are done under the AGI buffer lock. 18162fd26cc0SDave Chinner * Keeping the list up to date does not require memory allocation, just finding 18172fd26cc0SDave Chinner * the XFS inode and updating the next/prev unlinked list aginos. 18189b247179SDarrick J. Wong */ 18199b247179SDarrick J. Wong 18209b247179SDarrick J. Wong /* 1821a83d5a8bSDave Chinner * Find an inode on the unlinked list. This does not take references to the 1822a83d5a8bSDave Chinner * inode as we have existence guarantees by holding the AGI buffer lock and that 1823a83d5a8bSDave Chinner * only unlinked, referenced inodes can be on the unlinked inode list. If we 1824a83d5a8bSDave Chinner * don't find the inode in cache, then let the caller handle the situation. 18259b247179SDarrick J. Wong */ 1826a83d5a8bSDave Chinner static struct xfs_inode * 1827a83d5a8bSDave Chinner xfs_iunlink_lookup( 18289b247179SDarrick J. Wong struct xfs_perag *pag, 18299b247179SDarrick J. Wong xfs_agino_t agino) 18309b247179SDarrick J. Wong { 1831a83d5a8bSDave Chinner struct xfs_inode *ip; 18329b247179SDarrick J. Wong 1833a83d5a8bSDave Chinner rcu_read_lock(); 1834a83d5a8bSDave Chinner ip = radix_tree_lookup(&pag->pag_ici_root, agino); 183568b957f6SDarrick J. Wong if (!ip) { 183668b957f6SDarrick J. Wong /* Caller can handle inode not being in memory. */ 183768b957f6SDarrick J. Wong rcu_read_unlock(); 183868b957f6SDarrick J. Wong return NULL; 183968b957f6SDarrick J. Wong } 18409b247179SDarrick J. Wong 18419b247179SDarrick J. Wong /* 184268b957f6SDarrick J. Wong * Inode in RCU freeing limbo should not happen. Warn about this and 184368b957f6SDarrick J. Wong * let the caller handle the failure. 18449b247179SDarrick J. Wong */ 184568b957f6SDarrick J. Wong if (WARN_ON_ONCE(!ip->i_ino)) { 1846a83d5a8bSDave Chinner rcu_read_unlock(); 1847a83d5a8bSDave Chinner return NULL; 1848a83d5a8bSDave Chinner } 1849a83d5a8bSDave Chinner ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM)); 1850a83d5a8bSDave Chinner rcu_read_unlock(); 1851a83d5a8bSDave Chinner return ip; 1852a83d5a8bSDave Chinner } 1853a83d5a8bSDave Chinner 185468b957f6SDarrick J. Wong /* 185568b957f6SDarrick J. Wong * Update the prev pointer of the next agino. Returns -ENOLINK if the inode 185668b957f6SDarrick J. Wong * is not in cache. 185768b957f6SDarrick J. Wong */ 18589b247179SDarrick J. Wong static int 18592fd26cc0SDave Chinner xfs_iunlink_update_backref( 18609b247179SDarrick J. Wong struct xfs_perag *pag, 18619b247179SDarrick J. Wong xfs_agino_t prev_agino, 18622fd26cc0SDave Chinner xfs_agino_t next_agino) 18639b247179SDarrick J. Wong { 18642fd26cc0SDave Chinner struct xfs_inode *ip; 18659b247179SDarrick J. Wong 18662fd26cc0SDave Chinner /* No update necessary if we are at the end of the list. */ 18672fd26cc0SDave Chinner if (next_agino == NULLAGINO) 18689b247179SDarrick J. Wong return 0; 18699b247179SDarrick J. Wong 18702fd26cc0SDave Chinner ip = xfs_iunlink_lookup(pag, next_agino); 18712fd26cc0SDave Chinner if (!ip) 187268b957f6SDarrick J. Wong return -ENOLINK; 187368b957f6SDarrick J. Wong 18742fd26cc0SDave Chinner ip->i_prev_unlinked = prev_agino; 18759b247179SDarrick J. Wong return 0; 18769b247179SDarrick J. Wong } 18779b247179SDarrick J. Wong 18789b247179SDarrick J. Wong /* 18799a4a5118SDarrick J. Wong * Point the AGI unlinked bucket at an inode and log the results. The caller 18809a4a5118SDarrick J. Wong * is responsible for validating the old value. 18819a4a5118SDarrick J. Wong */ 18829a4a5118SDarrick J. Wong STATIC int 18839a4a5118SDarrick J. Wong xfs_iunlink_update_bucket( 18849a4a5118SDarrick J. Wong struct xfs_trans *tp, 1885f40aadb2SDave Chinner struct xfs_perag *pag, 18869a4a5118SDarrick J. Wong struct xfs_buf *agibp, 18879a4a5118SDarrick J. Wong unsigned int bucket_index, 18889a4a5118SDarrick J. Wong xfs_agino_t new_agino) 18899a4a5118SDarrick J. Wong { 1890370c782bSChristoph Hellwig struct xfs_agi *agi = agibp->b_addr; 18919a4a5118SDarrick J. Wong xfs_agino_t old_value; 18929a4a5118SDarrick J. Wong int offset; 18939a4a5118SDarrick J. Wong 18942d6ca832SDave Chinner ASSERT(xfs_verify_agino_or_null(pag, new_agino)); 18959a4a5118SDarrick J. Wong 18969a4a5118SDarrick J. Wong old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]); 1897f40aadb2SDave Chinner trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index, 18989a4a5118SDarrick J. Wong old_value, new_agino); 18999a4a5118SDarrick J. Wong 19009a4a5118SDarrick J. Wong /* 19019a4a5118SDarrick J. Wong * We should never find the head of the list already set to the value 19029a4a5118SDarrick J. Wong * passed in because either we're adding or removing ourselves from the 19039a4a5118SDarrick J. Wong * head of the list. 19049a4a5118SDarrick J. Wong */ 1905a5155b87SDarrick J. Wong if (old_value == new_agino) { 19068d57c216SDarrick J. Wong xfs_buf_mark_corrupt(agibp); 19079a4a5118SDarrick J. Wong return -EFSCORRUPTED; 1908a5155b87SDarrick J. Wong } 19099a4a5118SDarrick J. Wong 19109a4a5118SDarrick J. Wong agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino); 19119a4a5118SDarrick J. Wong offset = offsetof(struct xfs_agi, agi_unlinked) + 19129a4a5118SDarrick J. Wong (sizeof(xfs_agino_t) * bucket_index); 19139a4a5118SDarrick J. Wong xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1); 19149a4a5118SDarrick J. Wong return 0; 19159a4a5118SDarrick J. Wong } 19169a4a5118SDarrick J. Wong 191768b957f6SDarrick J. Wong /* 191868b957f6SDarrick J. Wong * Load the inode @next_agino into the cache and set its prev_unlinked pointer 191968b957f6SDarrick J. Wong * to @prev_agino. Caller must hold the AGI to synchronize with other changes 192068b957f6SDarrick J. Wong * to the unlinked list. 192168b957f6SDarrick J. Wong */ 192268b957f6SDarrick J. Wong STATIC int 192368b957f6SDarrick J. Wong xfs_iunlink_reload_next( 192468b957f6SDarrick J. Wong struct xfs_trans *tp, 192568b957f6SDarrick J. Wong struct xfs_buf *agibp, 192668b957f6SDarrick J. Wong xfs_agino_t prev_agino, 192768b957f6SDarrick J. Wong xfs_agino_t next_agino) 192868b957f6SDarrick J. Wong { 192968b957f6SDarrick J. Wong struct xfs_perag *pag = agibp->b_pag; 193068b957f6SDarrick J. Wong struct xfs_mount *mp = pag->pag_mount; 193168b957f6SDarrick J. Wong struct xfs_inode *next_ip = NULL; 193268b957f6SDarrick J. Wong xfs_ino_t ino; 193368b957f6SDarrick J. Wong int error; 193468b957f6SDarrick J. Wong 193568b957f6SDarrick J. Wong ASSERT(next_agino != NULLAGINO); 193668b957f6SDarrick J. Wong 193768b957f6SDarrick J. Wong #ifdef DEBUG 193868b957f6SDarrick J. Wong rcu_read_lock(); 193968b957f6SDarrick J. Wong next_ip = radix_tree_lookup(&pag->pag_ici_root, next_agino); 194068b957f6SDarrick J. Wong ASSERT(next_ip == NULL); 194168b957f6SDarrick J. Wong rcu_read_unlock(); 194268b957f6SDarrick J. Wong #endif 194368b957f6SDarrick J. Wong 194468b957f6SDarrick J. Wong xfs_info_ratelimited(mp, 194568b957f6SDarrick J. Wong "Found unrecovered unlinked inode 0x%x in AG 0x%x. Initiating recovery.", 194668b957f6SDarrick J. Wong next_agino, pag->pag_agno); 194768b957f6SDarrick J. Wong 194868b957f6SDarrick J. Wong /* 194968b957f6SDarrick J. Wong * Use an untrusted lookup just to be cautious in case the AGI has been 195068b957f6SDarrick J. Wong * corrupted and now points at a free inode. That shouldn't happen, 195168b957f6SDarrick J. Wong * but we'd rather shut down now since we're already running in a weird 195268b957f6SDarrick J. Wong * situation. 195368b957f6SDarrick J. Wong */ 195468b957f6SDarrick J. Wong ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, next_agino); 195568b957f6SDarrick J. Wong error = xfs_iget(mp, tp, ino, XFS_IGET_UNTRUSTED, 0, &next_ip); 195668b957f6SDarrick J. Wong if (error) 195768b957f6SDarrick J. Wong return error; 195868b957f6SDarrick J. Wong 195968b957f6SDarrick J. Wong /* If this is not an unlinked inode, something is very wrong. */ 196068b957f6SDarrick J. Wong if (VFS_I(next_ip)->i_nlink != 0) { 196168b957f6SDarrick J. Wong error = -EFSCORRUPTED; 196268b957f6SDarrick J. Wong goto rele; 196368b957f6SDarrick J. Wong } 196468b957f6SDarrick J. Wong 196568b957f6SDarrick J. Wong next_ip->i_prev_unlinked = prev_agino; 196668b957f6SDarrick J. Wong trace_xfs_iunlink_reload_next(next_ip); 196768b957f6SDarrick J. Wong rele: 196868b957f6SDarrick J. Wong ASSERT(!(VFS_I(next_ip)->i_state & I_DONTCACHE)); 1969*49813a21SDarrick J. Wong if (xfs_is_quotacheck_running(mp) && next_ip) 1970*49813a21SDarrick J. Wong xfs_iflags_set(next_ip, XFS_IQUOTAUNCHECKED); 197168b957f6SDarrick J. Wong xfs_irele(next_ip); 197268b957f6SDarrick J. Wong return error; 197368b957f6SDarrick J. Wong } 197468b957f6SDarrick J. Wong 1975a4454cd6SDave Chinner static int 1976a4454cd6SDave Chinner xfs_iunlink_insert_inode( 1977f2fc16a3SDarrick J. Wong struct xfs_trans *tp, 1978f40aadb2SDave Chinner struct xfs_perag *pag, 1979a4454cd6SDave Chinner struct xfs_buf *agibp, 1980a4454cd6SDave Chinner struct xfs_inode *ip) 1981f2fc16a3SDarrick J. Wong { 1982f2fc16a3SDarrick J. Wong struct xfs_mount *mp = tp->t_mountp; 1983a4454cd6SDave Chinner struct xfs_agi *agi = agibp->b_addr; 1984a4454cd6SDave Chinner xfs_agino_t next_agino; 1985a4454cd6SDave Chinner xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 1986a4454cd6SDave Chinner short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 1987f2fc16a3SDarrick J. Wong int error; 1988f2fc16a3SDarrick J. Wong 1989a4454cd6SDave Chinner /* 1990a4454cd6SDave Chinner * Get the index into the agi hash table for the list this inode will 1991a4454cd6SDave Chinner * go on. Make sure the pointer isn't garbage and that this inode 1992a4454cd6SDave Chinner * isn't already on the list. 1993a4454cd6SDave Chinner */ 1994a4454cd6SDave Chinner next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); 1995a4454cd6SDave Chinner if (next_agino == agino || 1996a4454cd6SDave Chinner !xfs_verify_agino_or_null(pag, next_agino)) { 1997a4454cd6SDave Chinner xfs_buf_mark_corrupt(agibp); 1998a4454cd6SDave Chinner return -EFSCORRUPTED; 1999f2fc16a3SDarrick J. Wong } 2000f2fc16a3SDarrick J. Wong 2001f2fc16a3SDarrick J. Wong /* 20022fd26cc0SDave Chinner * Update the prev pointer in the next inode to point back to this 20032fd26cc0SDave Chinner * inode. 2004f2fc16a3SDarrick J. Wong */ 20052fd26cc0SDave Chinner error = xfs_iunlink_update_backref(pag, agino, next_agino); 200668b957f6SDarrick J. Wong if (error == -ENOLINK) 200768b957f6SDarrick J. Wong error = xfs_iunlink_reload_next(tp, agibp, agino, next_agino); 20082fd26cc0SDave Chinner if (error) 20092fd26cc0SDave Chinner return error; 20102fd26cc0SDave Chinner 2011a5155b87SDarrick J. Wong if (next_agino != NULLAGINO) { 2012a4454cd6SDave Chinner /* 2013a4454cd6SDave Chinner * There is already another inode in the bucket, so point this 2014a4454cd6SDave Chinner * inode to the current head of the list. 2015a4454cd6SDave Chinner */ 2016062efdb0SDave Chinner error = xfs_iunlink_log_inode(tp, ip, pag, next_agino); 2017a4454cd6SDave Chinner if (error) 2018a4454cd6SDave Chinner return error; 20194fcc94d6SDave Chinner ip->i_next_unlinked = next_agino; 2020f2fc16a3SDarrick J. Wong } 2021f2fc16a3SDarrick J. Wong 2022a4454cd6SDave Chinner /* Point the head of the list to point to this inode. */ 2023f12b9668SDarrick J. Wong ip->i_prev_unlinked = NULLAGINO; 2024a4454cd6SDave Chinner return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino); 2025f2fc16a3SDarrick J. Wong } 2026f2fc16a3SDarrick J. Wong 20279a4a5118SDarrick J. Wong /* 2028c4a6bf7fSDarrick J. Wong * This is called when the inode's link count has gone to 0 or we are creating 2029c4a6bf7fSDarrick J. Wong * a tmpfile via O_TMPFILE. The inode @ip must have nlink == 0. 203054d7b5c1SDave Chinner * 203154d7b5c1SDave Chinner * We place the on-disk inode on a list in the AGI. It will be pulled from this 203254d7b5c1SDave Chinner * list when the inode is freed. 20331da177e4SLinus Torvalds */ 203454d7b5c1SDave Chinner STATIC int 20351da177e4SLinus Torvalds xfs_iunlink( 203654d7b5c1SDave Chinner struct xfs_trans *tp, 203754d7b5c1SDave Chinner struct xfs_inode *ip) 20381da177e4SLinus Torvalds { 20395837f625SDarrick J. Wong struct xfs_mount *mp = tp->t_mountp; 2040f40aadb2SDave Chinner struct xfs_perag *pag; 20415837f625SDarrick J. Wong struct xfs_buf *agibp; 20421da177e4SLinus Torvalds int error; 20431da177e4SLinus Torvalds 2044c4a6bf7fSDarrick J. Wong ASSERT(VFS_I(ip)->i_nlink == 0); 2045c19b3b05SDave Chinner ASSERT(VFS_I(ip)->i_mode != 0); 20464664c66cSDarrick J. Wong trace_xfs_iunlink(ip); 20471da177e4SLinus Torvalds 2048f40aadb2SDave Chinner pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 2049f40aadb2SDave Chinner 20505837f625SDarrick J. Wong /* Get the agi buffer first. It ensures lock ordering on the list. */ 205161021debSDave Chinner error = xfs_read_agi(pag, tp, &agibp); 2052859d7182SVlad Apostolov if (error) 2053f40aadb2SDave Chinner goto out; 20545e1be0fbSChristoph Hellwig 2055a4454cd6SDave Chinner error = xfs_iunlink_insert_inode(tp, pag, agibp, ip); 2056f40aadb2SDave Chinner out: 2057f40aadb2SDave Chinner xfs_perag_put(pag); 2058f40aadb2SDave Chinner return error; 20591da177e4SLinus Torvalds } 20601da177e4SLinus Torvalds 2061a4454cd6SDave Chinner static int 2062a4454cd6SDave Chinner xfs_iunlink_remove_inode( 206323ffa52cSDarrick J. Wong struct xfs_trans *tp, 2064f40aadb2SDave Chinner struct xfs_perag *pag, 2065a4454cd6SDave Chinner struct xfs_buf *agibp, 20665837f625SDarrick J. Wong struct xfs_inode *ip) 20671da177e4SLinus Torvalds { 20685837f625SDarrick J. Wong struct xfs_mount *mp = tp->t_mountp; 2069a4454cd6SDave Chinner struct xfs_agi *agi = agibp->b_addr; 20705837f625SDarrick J. Wong xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 2071b1d2a068SDarrick J. Wong xfs_agino_t head_agino; 20725837f625SDarrick J. Wong short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 20731da177e4SLinus Torvalds int error; 20741da177e4SLinus Torvalds 20754664c66cSDarrick J. Wong trace_xfs_iunlink_remove(ip); 20764664c66cSDarrick J. Wong 20771da177e4SLinus Torvalds /* 207886bfd375SDarrick J. Wong * Get the index into the agi hash table for the list this inode will 207986bfd375SDarrick J. Wong * go on. Make sure the head pointer isn't garbage. 20801da177e4SLinus Torvalds */ 2081b1d2a068SDarrick J. Wong head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); 20822d6ca832SDave Chinner if (!xfs_verify_agino(pag, head_agino)) { 2083d2e73665SDarrick J. Wong XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 2084d2e73665SDarrick J. Wong agi, sizeof(*agi)); 2085d2e73665SDarrick J. Wong return -EFSCORRUPTED; 2086d2e73665SDarrick J. Wong } 20871da177e4SLinus Torvalds 20881da177e4SLinus Torvalds /* 2089b1d2a068SDarrick J. Wong * Set our inode's next_unlinked pointer to NULL and then return 2090b1d2a068SDarrick J. Wong * the old pointer value so that we can update whatever was previous 2091b1d2a068SDarrick J. Wong * to us in the list to point to whatever was next in the list. 20921da177e4SLinus Torvalds */ 2093062efdb0SDave Chinner error = xfs_iunlink_log_inode(tp, ip, pag, NULLAGINO); 2094f2fc16a3SDarrick J. Wong if (error) 20951da177e4SLinus Torvalds return error; 20969a4a5118SDarrick J. Wong 20979b247179SDarrick J. Wong /* 20982fd26cc0SDave Chinner * Update the prev pointer in the next inode to point back to previous 20992fd26cc0SDave Chinner * inode in the chain. 21009b247179SDarrick J. Wong */ 21012fd26cc0SDave Chinner error = xfs_iunlink_update_backref(pag, ip->i_prev_unlinked, 21022fd26cc0SDave Chinner ip->i_next_unlinked); 210368b957f6SDarrick J. Wong if (error == -ENOLINK) 210468b957f6SDarrick J. Wong error = xfs_iunlink_reload_next(tp, agibp, ip->i_prev_unlinked, 210568b957f6SDarrick J. Wong ip->i_next_unlinked); 21069b247179SDarrick J. Wong if (error) 210792a00544SGao Xiang return error; 21089b247179SDarrick J. Wong 210992a00544SGao Xiang if (head_agino != agino) { 2110a83d5a8bSDave Chinner struct xfs_inode *prev_ip; 2111f2fc16a3SDarrick J. Wong 21122fd26cc0SDave Chinner prev_ip = xfs_iunlink_lookup(pag, ip->i_prev_unlinked); 21132fd26cc0SDave Chinner if (!prev_ip) 21142fd26cc0SDave Chinner return -EFSCORRUPTED; 2115475ee413SChristoph Hellwig 2116062efdb0SDave Chinner error = xfs_iunlink_log_inode(tp, prev_ip, pag, 21175301f870SDave Chinner ip->i_next_unlinked); 2118a83d5a8bSDave Chinner prev_ip->i_next_unlinked = ip->i_next_unlinked; 21192fd26cc0SDave Chinner } else { 21202fd26cc0SDave Chinner /* Point the head of the list to the next unlinked inode. */ 21212fd26cc0SDave Chinner error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, 21222fd26cc0SDave Chinner ip->i_next_unlinked); 21231da177e4SLinus Torvalds } 21249b247179SDarrick J. Wong 2125a83d5a8bSDave Chinner ip->i_next_unlinked = NULLAGINO; 2126f12b9668SDarrick J. Wong ip->i_prev_unlinked = 0; 21272fd26cc0SDave Chinner return error; 21281da177e4SLinus Torvalds } 21291da177e4SLinus Torvalds 21305b3eed75SDave Chinner /* 2131a4454cd6SDave Chinner * Pull the on-disk inode from the AGI unlinked list. 2132a4454cd6SDave Chinner */ 2133a4454cd6SDave Chinner STATIC int 2134a4454cd6SDave Chinner xfs_iunlink_remove( 2135a4454cd6SDave Chinner struct xfs_trans *tp, 2136a4454cd6SDave Chinner struct xfs_perag *pag, 2137a4454cd6SDave Chinner struct xfs_inode *ip) 2138a4454cd6SDave Chinner { 2139a4454cd6SDave Chinner struct xfs_buf *agibp; 2140a4454cd6SDave Chinner int error; 2141a4454cd6SDave Chinner 2142a4454cd6SDave Chinner trace_xfs_iunlink_remove(ip); 2143a4454cd6SDave Chinner 2144a4454cd6SDave Chinner /* Get the agi buffer first. It ensures lock ordering on the list. */ 2145a4454cd6SDave Chinner error = xfs_read_agi(pag, tp, &agibp); 21461da177e4SLinus Torvalds if (error) 21471baaed8fSDave Chinner return error; 21481da177e4SLinus Torvalds 2149a4454cd6SDave Chinner return xfs_iunlink_remove_inode(tp, pag, agibp, ip); 21501da177e4SLinus Torvalds } 21511da177e4SLinus Torvalds 21521da177e4SLinus Torvalds /* 215371e3e356SDave Chinner * Look up the inode number specified and if it is not already marked XFS_ISTALE 215471e3e356SDave Chinner * mark it stale. We should only find clean inodes in this lookup that aren't 215571e3e356SDave Chinner * already stale. 21565806165aSDave Chinner */ 215771e3e356SDave Chinner static void 215871e3e356SDave Chinner xfs_ifree_mark_inode_stale( 2159f40aadb2SDave Chinner struct xfs_perag *pag, 21605806165aSDave Chinner struct xfs_inode *free_ip, 2161d9fdd0adSBrian Foster xfs_ino_t inum) 21625806165aSDave Chinner { 2163f40aadb2SDave Chinner struct xfs_mount *mp = pag->pag_mount; 216471e3e356SDave Chinner struct xfs_inode_log_item *iip; 21655806165aSDave Chinner struct xfs_inode *ip; 21665806165aSDave Chinner 21675806165aSDave Chinner retry: 21685806165aSDave Chinner rcu_read_lock(); 21695806165aSDave Chinner ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum)); 21705806165aSDave Chinner 21715806165aSDave Chinner /* Inode not in memory, nothing to do */ 217271e3e356SDave Chinner if (!ip) { 217371e3e356SDave Chinner rcu_read_unlock(); 217471e3e356SDave Chinner return; 217571e3e356SDave Chinner } 21765806165aSDave Chinner 21775806165aSDave Chinner /* 21785806165aSDave Chinner * because this is an RCU protected lookup, we could find a recently 21795806165aSDave Chinner * freed or even reallocated inode during the lookup. We need to check 21805806165aSDave Chinner * under the i_flags_lock for a valid inode here. Skip it if it is not 21815806165aSDave Chinner * valid, the wrong inode or stale. 21825806165aSDave Chinner */ 21835806165aSDave Chinner spin_lock(&ip->i_flags_lock); 2184718ecc50SDave Chinner if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE)) 2185718ecc50SDave Chinner goto out_iflags_unlock; 21865806165aSDave Chinner 21875806165aSDave Chinner /* 21885806165aSDave Chinner * Don't try to lock/unlock the current inode, but we _cannot_ skip the 21895806165aSDave Chinner * other inodes that we did not find in the list attached to the buffer 21905806165aSDave Chinner * and are not already marked stale. If we can't lock it, back off and 21915806165aSDave Chinner * retry. 21925806165aSDave Chinner */ 21935806165aSDave Chinner if (ip != free_ip) { 21945806165aSDave Chinner if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { 219571e3e356SDave Chinner spin_unlock(&ip->i_flags_lock); 21965806165aSDave Chinner rcu_read_unlock(); 21975806165aSDave Chinner delay(1); 21985806165aSDave Chinner goto retry; 21995806165aSDave Chinner } 22005806165aSDave Chinner } 220171e3e356SDave Chinner ip->i_flags |= XFS_ISTALE; 22025806165aSDave Chinner 220371e3e356SDave Chinner /* 2204718ecc50SDave Chinner * If the inode is flushing, it is already attached to the buffer. All 220571e3e356SDave Chinner * we needed to do here is mark the inode stale so buffer IO completion 220671e3e356SDave Chinner * will remove it from the AIL. 220771e3e356SDave Chinner */ 220871e3e356SDave Chinner iip = ip->i_itemp; 2209718ecc50SDave Chinner if (__xfs_iflags_test(ip, XFS_IFLUSHING)) { 221071e3e356SDave Chinner ASSERT(!list_empty(&iip->ili_item.li_bio_list)); 221171e3e356SDave Chinner ASSERT(iip->ili_last_fields); 221271e3e356SDave Chinner goto out_iunlock; 221371e3e356SDave Chinner } 22145806165aSDave Chinner 22155806165aSDave Chinner /* 221648d55e2aSDave Chinner * Inodes not attached to the buffer can be released immediately. 221748d55e2aSDave Chinner * Everything else has to go through xfs_iflush_abort() on journal 221848d55e2aSDave Chinner * commit as the flock synchronises removal of the inode from the 221948d55e2aSDave Chinner * cluster buffer against inode reclaim. 22205806165aSDave Chinner */ 2221718ecc50SDave Chinner if (!iip || list_empty(&iip->ili_item.li_bio_list)) 222271e3e356SDave Chinner goto out_iunlock; 2223718ecc50SDave Chinner 2224718ecc50SDave Chinner __xfs_iflags_set(ip, XFS_IFLUSHING); 2225718ecc50SDave Chinner spin_unlock(&ip->i_flags_lock); 2226718ecc50SDave Chinner rcu_read_unlock(); 22275806165aSDave Chinner 222871e3e356SDave Chinner /* we have a dirty inode in memory that has not yet been flushed. */ 222971e3e356SDave Chinner spin_lock(&iip->ili_lock); 223071e3e356SDave Chinner iip->ili_last_fields = iip->ili_fields; 223171e3e356SDave Chinner iip->ili_fields = 0; 223271e3e356SDave Chinner iip->ili_fsync_fields = 0; 223371e3e356SDave Chinner spin_unlock(&iip->ili_lock); 223471e3e356SDave Chinner ASSERT(iip->ili_last_fields); 223571e3e356SDave Chinner 2236718ecc50SDave Chinner if (ip != free_ip) 2237718ecc50SDave Chinner xfs_iunlock(ip, XFS_ILOCK_EXCL); 2238718ecc50SDave Chinner return; 2239718ecc50SDave Chinner 224071e3e356SDave Chinner out_iunlock: 224171e3e356SDave Chinner if (ip != free_ip) 224271e3e356SDave Chinner xfs_iunlock(ip, XFS_ILOCK_EXCL); 2243718ecc50SDave Chinner out_iflags_unlock: 2244718ecc50SDave Chinner spin_unlock(&ip->i_flags_lock); 2245718ecc50SDave Chinner rcu_read_unlock(); 22465806165aSDave Chinner } 22475806165aSDave Chinner 22485806165aSDave Chinner /* 22491da177e4SLinus Torvalds * A big issue when freeing the inode cluster is that we _cannot_ skip any 22501da177e4SLinus Torvalds * inodes that are in memory - they all must be marked stale and attached to 22511da177e4SLinus Torvalds * the cluster buffer. 22521da177e4SLinus Torvalds */ 2253f40aadb2SDave Chinner static int 22541da177e4SLinus Torvalds xfs_ifree_cluster( 225571e3e356SDave Chinner struct xfs_trans *tp, 2256f40aadb2SDave Chinner struct xfs_perag *pag, 2257f40aadb2SDave Chinner struct xfs_inode *free_ip, 22581da177e4SLinus Torvalds struct xfs_icluster *xic) 22591da177e4SLinus Torvalds { 226071e3e356SDave Chinner struct xfs_mount *mp = free_ip->i_mount; 226171e3e356SDave Chinner struct xfs_ino_geometry *igeo = M_IGEO(mp); 226271e3e356SDave Chinner struct xfs_buf *bp; 226371e3e356SDave Chinner xfs_daddr_t blkno; 226471e3e356SDave Chinner xfs_ino_t inum = xic->first_ino; 22651da177e4SLinus Torvalds int nbufs; 22661da177e4SLinus Torvalds int i, j; 22671da177e4SLinus Torvalds int ioffset; 2268ce92464cSDarrick J. Wong int error; 22691da177e4SLinus Torvalds 2270ef325959SDarrick J. Wong nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster; 22711da177e4SLinus Torvalds 2272ef325959SDarrick J. Wong for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) { 22731da177e4SLinus Torvalds /* 22741da177e4SLinus Torvalds * The allocation bitmap tells us which inodes of the chunk were 22751da177e4SLinus Torvalds * physically allocated. Skip the cluster if an inode falls into 22761da177e4SLinus Torvalds * a sparse region. 22771da177e4SLinus Torvalds */ 22781da177e4SLinus Torvalds ioffset = inum - xic->first_ino; 22791da177e4SLinus Torvalds if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) { 2280ef325959SDarrick J. Wong ASSERT(ioffset % igeo->inodes_per_cluster == 0); 22811da177e4SLinus Torvalds continue; 22821da177e4SLinus Torvalds } 22831da177e4SLinus Torvalds 22841da177e4SLinus Torvalds blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), 22851da177e4SLinus Torvalds XFS_INO_TO_AGBNO(mp, inum)); 22861da177e4SLinus Torvalds 22871da177e4SLinus Torvalds /* 22881da177e4SLinus Torvalds * We obtain and lock the backing buffer first in the process 2289718ecc50SDave Chinner * here to ensure dirty inodes attached to the buffer remain in 2290718ecc50SDave Chinner * the flushing state while we mark them stale. 2291718ecc50SDave Chinner * 22921da177e4SLinus Torvalds * If we scan the in-memory inodes first, then buffer IO can 22931da177e4SLinus Torvalds * complete before we get a lock on it, and hence we may fail 22941da177e4SLinus Torvalds * to mark all the active inodes on the buffer stale. 22951da177e4SLinus Torvalds */ 2296ce92464cSDarrick J. Wong error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, 2297ef325959SDarrick J. Wong mp->m_bsize * igeo->blocks_per_cluster, 2298ce92464cSDarrick J. Wong XBF_UNMAPPED, &bp); 229971e3e356SDave Chinner if (error) 2300ce92464cSDarrick J. Wong return error; 23011da177e4SLinus Torvalds 23021da177e4SLinus Torvalds /* 23031da177e4SLinus Torvalds * This buffer may not have been correctly initialised as we 23041da177e4SLinus Torvalds * didn't read it from disk. That's not important because we are 23051da177e4SLinus Torvalds * only using to mark the buffer as stale in the log, and to 23061da177e4SLinus Torvalds * attach stale cached inodes on it. That means it will never be 23071da177e4SLinus Torvalds * dispatched for IO. If it is, we want to know about it, and we 23081da177e4SLinus Torvalds * want it to fail. We can acheive this by adding a write 23091da177e4SLinus Torvalds * verifier to the buffer. 23101da177e4SLinus Torvalds */ 23111da177e4SLinus Torvalds bp->b_ops = &xfs_inode_buf_ops; 23121da177e4SLinus Torvalds 23131da177e4SLinus Torvalds /* 231471e3e356SDave Chinner * Now we need to set all the cached clean inodes as XFS_ISTALE, 231571e3e356SDave Chinner * too. This requires lookups, and will skip inodes that we've 231671e3e356SDave Chinner * already marked XFS_ISTALE. 23171da177e4SLinus Torvalds */ 231871e3e356SDave Chinner for (i = 0; i < igeo->inodes_per_cluster; i++) 2319f40aadb2SDave Chinner xfs_ifree_mark_inode_stale(pag, free_ip, inum + i); 23201da177e4SLinus Torvalds 23211da177e4SLinus Torvalds xfs_trans_stale_inode_buf(tp, bp); 23221da177e4SLinus Torvalds xfs_trans_binval(tp, bp); 23231da177e4SLinus Torvalds } 23241da177e4SLinus Torvalds return 0; 23251da177e4SLinus Torvalds } 23261da177e4SLinus Torvalds 23271da177e4SLinus Torvalds /* 23289a5280b3SDave Chinner * This is called to return an inode to the inode free list. The inode should 23299a5280b3SDave Chinner * already be truncated to 0 length and have no pages associated with it. This 23309a5280b3SDave Chinner * routine also assumes that the inode is already a part of the transaction. 23311da177e4SLinus Torvalds * 23329a5280b3SDave Chinner * The on-disk copy of the inode will have been added to the list of unlinked 23339a5280b3SDave Chinner * inodes in the AGI. We need to remove the inode from that list atomically with 23349a5280b3SDave Chinner * respect to freeing it here. 23351da177e4SLinus Torvalds */ 23361da177e4SLinus Torvalds int 23371da177e4SLinus Torvalds xfs_ifree( 23381da177e4SLinus Torvalds struct xfs_trans *tp, 23391da177e4SLinus Torvalds struct xfs_inode *ip) 23401da177e4SLinus Torvalds { 2341f40aadb2SDave Chinner struct xfs_mount *mp = ip->i_mount; 2342f40aadb2SDave Chinner struct xfs_perag *pag; 23431da177e4SLinus Torvalds struct xfs_icluster xic = { 0 }; 23441319ebefSDave Chinner struct xfs_inode_log_item *iip = ip->i_itemp; 2345f40aadb2SDave Chinner int error; 23461da177e4SLinus Torvalds 23471da177e4SLinus Torvalds ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 23481da177e4SLinus Torvalds ASSERT(VFS_I(ip)->i_nlink == 0); 2349daf83964SChristoph Hellwig ASSERT(ip->i_df.if_nextents == 0); 235013d2c10bSChristoph Hellwig ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode)); 23516e73a545SChristoph Hellwig ASSERT(ip->i_nblocks == 0); 23521da177e4SLinus Torvalds 2353f40aadb2SDave Chinner pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 2354f40aadb2SDave Chinner 23551da177e4SLinus Torvalds /* 23569a5280b3SDave Chinner * Free the inode first so that we guarantee that the AGI lock is going 23579a5280b3SDave Chinner * to be taken before we remove the inode from the unlinked list. This 23589a5280b3SDave Chinner * makes the AGI lock -> unlinked list modification order the same as 23599a5280b3SDave Chinner * used in O_TMPFILE creation. 23601da177e4SLinus Torvalds */ 2361f40aadb2SDave Chinner error = xfs_difree(tp, pag, ip->i_ino, &xic); 23621baaed8fSDave Chinner if (error) 23636f5097e3SBrian Foster goto out; 23649a5280b3SDave Chinner 23659a5280b3SDave Chinner error = xfs_iunlink_remove(tp, pag, ip); 23669a5280b3SDave Chinner if (error) 2367f40aadb2SDave Chinner goto out; 23681baaed8fSDave Chinner 2369b2c20045SChristoph Hellwig /* 2370b2c20045SChristoph Hellwig * Free any local-format data sitting around before we reset the 2371b2c20045SChristoph Hellwig * data fork to extents format. Note that the attr fork data has 2372b2c20045SChristoph Hellwig * already been freed by xfs_attr_inactive. 2373b2c20045SChristoph Hellwig */ 2374f7e67b20SChristoph Hellwig if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) { 2375b2c20045SChristoph Hellwig kmem_free(ip->i_df.if_u1.if_data); 2376b2c20045SChristoph Hellwig ip->i_df.if_u1.if_data = NULL; 2377b2c20045SChristoph Hellwig ip->i_df.if_bytes = 0; 2378b2c20045SChristoph Hellwig } 237998c4f78dSDarrick J. Wong 2380c19b3b05SDave Chinner VFS_I(ip)->i_mode = 0; /* mark incore inode as free */ 2381db07349dSChristoph Hellwig ip->i_diflags = 0; 2382f40aadb2SDave Chinner ip->i_diflags2 = mp->m_ino_geo.new_diflags2; 23837821ea30SChristoph Hellwig ip->i_forkoff = 0; /* mark the attr fork not in use */ 2384f7e67b20SChristoph Hellwig ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS; 23859b3beb02SChristoph Hellwig if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS)) 23869b3beb02SChristoph Hellwig xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS); 2387dc1baa71SEric Sandeen 2388dc1baa71SEric Sandeen /* Don't attempt to replay owner changes for a deleted inode */ 23891319ebefSDave Chinner spin_lock(&iip->ili_lock); 23901319ebefSDave Chinner iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER); 23911319ebefSDave Chinner spin_unlock(&iip->ili_lock); 2392dc1baa71SEric Sandeen 23931da177e4SLinus Torvalds /* 23941da177e4SLinus Torvalds * Bump the generation count so no one will be confused 23951da177e4SLinus Torvalds * by reincarnations of this inode. 23961da177e4SLinus Torvalds */ 23979e9a2674SDave Chinner VFS_I(ip)->i_generation++; 23981da177e4SLinus Torvalds xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 23991da177e4SLinus Torvalds 240009b56604SBrian Foster if (xic.deleted) 2401f40aadb2SDave Chinner error = xfs_ifree_cluster(tp, pag, ip, &xic); 2402f40aadb2SDave Chinner out: 2403f40aadb2SDave Chinner xfs_perag_put(pag); 24042a30f36dSChandra Seetharaman return error; 24051da177e4SLinus Torvalds } 24061da177e4SLinus Torvalds 24071da177e4SLinus Torvalds /* 240860ec6783SChristoph Hellwig * This is called to unpin an inode. The caller must have the inode locked 240960ec6783SChristoph Hellwig * in at least shared mode so that the buffer cannot be subsequently pinned 241060ec6783SChristoph Hellwig * once someone is waiting for it to be unpinned. 24111da177e4SLinus Torvalds */ 241260ec6783SChristoph Hellwig static void 2413f392e631SChristoph Hellwig xfs_iunpin( 241460ec6783SChristoph Hellwig struct xfs_inode *ip) 2415a3f74ffbSDavid Chinner { 2416579aa9caSChristoph Hellwig ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2417a3f74ffbSDavid Chinner 24184aaf15d1SDave Chinner trace_xfs_inode_unpin_nowait(ip, _RET_IP_); 24194aaf15d1SDave Chinner 2420a3f74ffbSDavid Chinner /* Give the log a push to start the unpinning I/O */ 24215f9b4b0dSDave Chinner xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL); 2422a14a348bSChristoph Hellwig 2423a3f74ffbSDavid Chinner } 2424a3f74ffbSDavid Chinner 2425f392e631SChristoph Hellwig static void 2426f392e631SChristoph Hellwig __xfs_iunpin_wait( 2427f392e631SChristoph Hellwig struct xfs_inode *ip) 2428f392e631SChristoph Hellwig { 2429f392e631SChristoph Hellwig wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT); 2430f392e631SChristoph Hellwig DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT); 2431f392e631SChristoph Hellwig 2432f392e631SChristoph Hellwig xfs_iunpin(ip); 2433f392e631SChristoph Hellwig 2434f392e631SChristoph Hellwig do { 243521417136SIngo Molnar prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); 2436f392e631SChristoph Hellwig if (xfs_ipincount(ip)) 2437f392e631SChristoph Hellwig io_schedule(); 2438f392e631SChristoph Hellwig } while (xfs_ipincount(ip)); 243921417136SIngo Molnar finish_wait(wq, &wait.wq_entry); 2440f392e631SChristoph Hellwig } 2441f392e631SChristoph Hellwig 2442777df5afSDave Chinner void 24431da177e4SLinus Torvalds xfs_iunpin_wait( 244460ec6783SChristoph Hellwig struct xfs_inode *ip) 24451da177e4SLinus Torvalds { 2446f392e631SChristoph Hellwig if (xfs_ipincount(ip)) 2447f392e631SChristoph Hellwig __xfs_iunpin_wait(ip); 24481da177e4SLinus Torvalds } 24491da177e4SLinus Torvalds 245027320369SDave Chinner /* 245127320369SDave Chinner * Removing an inode from the namespace involves removing the directory entry 245227320369SDave Chinner * and dropping the link count on the inode. Removing the directory entry can 245327320369SDave Chinner * result in locking an AGF (directory blocks were freed) and removing a link 245427320369SDave Chinner * count can result in placing the inode on an unlinked list which results in 245527320369SDave Chinner * locking an AGI. 245627320369SDave Chinner * 245727320369SDave Chinner * The big problem here is that we have an ordering constraint on AGF and AGI 245827320369SDave Chinner * locking - inode allocation locks the AGI, then can allocate a new extent for 245927320369SDave Chinner * new inodes, locking the AGF after the AGI. Similarly, freeing the inode 246027320369SDave Chinner * removes the inode from the unlinked list, requiring that we lock the AGI 246127320369SDave Chinner * first, and then freeing the inode can result in an inode chunk being freed 246227320369SDave Chinner * and hence freeing disk space requiring that we lock an AGF. 246327320369SDave Chinner * 246427320369SDave Chinner * Hence the ordering that is imposed by other parts of the code is AGI before 246527320369SDave Chinner * AGF. This means we cannot remove the directory entry before we drop the inode 246627320369SDave Chinner * reference count and put it on the unlinked list as this results in a lock 246727320369SDave Chinner * order of AGF then AGI, and this can deadlock against inode allocation and 246827320369SDave Chinner * freeing. Therefore we must drop the link counts before we remove the 246927320369SDave Chinner * directory entry. 247027320369SDave Chinner * 247127320369SDave Chinner * This is still safe from a transactional point of view - it is not until we 2472310a75a3SDarrick J. Wong * get to xfs_defer_finish() that we have the possibility of multiple 247327320369SDave Chinner * transactions in this operation. Hence as long as we remove the directory 247427320369SDave Chinner * entry and drop the link count in the first transaction of the remove 247527320369SDave Chinner * operation, there are no transactional constraints on the ordering here. 247627320369SDave Chinner */ 2477c24b5dfaSDave Chinner int 2478c24b5dfaSDave Chinner xfs_remove( 2479c24b5dfaSDave Chinner xfs_inode_t *dp, 2480c24b5dfaSDave Chinner struct xfs_name *name, 2481c24b5dfaSDave Chinner xfs_inode_t *ip) 2482c24b5dfaSDave Chinner { 2483c24b5dfaSDave Chinner xfs_mount_t *mp = dp->i_mount; 2484c24b5dfaSDave Chinner xfs_trans_t *tp = NULL; 2485c19b3b05SDave Chinner int is_dir = S_ISDIR(VFS_I(ip)->i_mode); 2486871b9316SDarrick J. Wong int dontcare; 2487c24b5dfaSDave Chinner int error = 0; 2488c24b5dfaSDave Chinner uint resblks; 2489c24b5dfaSDave Chinner 2490c24b5dfaSDave Chinner trace_xfs_remove(dp, name); 2491c24b5dfaSDave Chinner 249275c8c50fSDave Chinner if (xfs_is_shutdown(mp)) 24932451337dSDave Chinner return -EIO; 2494c24b5dfaSDave Chinner 2495c14cfccaSDarrick J. Wong error = xfs_qm_dqattach(dp); 2496c24b5dfaSDave Chinner if (error) 2497c24b5dfaSDave Chinner goto std_return; 2498c24b5dfaSDave Chinner 2499c14cfccaSDarrick J. Wong error = xfs_qm_dqattach(ip); 2500c24b5dfaSDave Chinner if (error) 2501c24b5dfaSDave Chinner goto std_return; 2502c24b5dfaSDave Chinner 2503c24b5dfaSDave Chinner /* 2504871b9316SDarrick J. Wong * We try to get the real space reservation first, allowing for 2505871b9316SDarrick J. Wong * directory btree deletion(s) implying possible bmap insert(s). If we 2506871b9316SDarrick J. Wong * can't get the space reservation then we use 0 instead, and avoid the 2507871b9316SDarrick J. Wong * bmap btree insert(s) in the directory code by, if the bmap insert 2508871b9316SDarrick J. Wong * tries to happen, instead trimming the LAST block from the directory. 2509871b9316SDarrick J. Wong * 2510871b9316SDarrick J. Wong * Ignore EDQUOT and ENOSPC being returned via nospace_error because 2511871b9316SDarrick J. Wong * the directory code can handle a reservationless update and we don't 2512871b9316SDarrick J. Wong * want to prevent a user from trying to free space by deleting things. 2513c24b5dfaSDave Chinner */ 2514c24b5dfaSDave Chinner resblks = XFS_REMOVE_SPACE_RES(mp); 2515871b9316SDarrick J. Wong error = xfs_trans_alloc_dir(dp, &M_RES(mp)->tr_remove, ip, &resblks, 2516871b9316SDarrick J. Wong &tp, &dontcare); 2517c24b5dfaSDave Chinner if (error) { 25182451337dSDave Chinner ASSERT(error != -ENOSPC); 2519253f4911SChristoph Hellwig goto std_return; 2520c24b5dfaSDave Chinner } 2521c24b5dfaSDave Chinner 2522c24b5dfaSDave Chinner /* 2523c24b5dfaSDave Chinner * If we're removing a directory perform some additional validation. 2524c24b5dfaSDave Chinner */ 2525c24b5dfaSDave Chinner if (is_dir) { 252654d7b5c1SDave Chinner ASSERT(VFS_I(ip)->i_nlink >= 2); 252754d7b5c1SDave Chinner if (VFS_I(ip)->i_nlink != 2) { 25282451337dSDave Chinner error = -ENOTEMPTY; 2529c24b5dfaSDave Chinner goto out_trans_cancel; 2530c24b5dfaSDave Chinner } 2531c24b5dfaSDave Chinner if (!xfs_dir_isempty(ip)) { 25322451337dSDave Chinner error = -ENOTEMPTY; 2533c24b5dfaSDave Chinner goto out_trans_cancel; 2534c24b5dfaSDave Chinner } 2535c24b5dfaSDave Chinner 253627320369SDave Chinner /* Drop the link from ip's "..". */ 2537c24b5dfaSDave Chinner error = xfs_droplink(tp, dp); 2538c24b5dfaSDave Chinner if (error) 253927320369SDave Chinner goto out_trans_cancel; 2540c24b5dfaSDave Chinner 254127320369SDave Chinner /* Drop the "." link from ip to self. */ 2542c24b5dfaSDave Chinner error = xfs_droplink(tp, ip); 2543c24b5dfaSDave Chinner if (error) 254427320369SDave Chinner goto out_trans_cancel; 25455838d035SDarrick J. Wong 25465838d035SDarrick J. Wong /* 25475838d035SDarrick J. Wong * Point the unlinked child directory's ".." entry to the root 25485838d035SDarrick J. Wong * directory to eliminate back-references to inodes that may 25495838d035SDarrick J. Wong * get freed before the child directory is closed. If the fs 25505838d035SDarrick J. Wong * gets shrunk, this can lead to dirent inode validation errors. 25515838d035SDarrick J. Wong */ 25525838d035SDarrick J. Wong if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) { 25535838d035SDarrick J. Wong error = xfs_dir_replace(tp, ip, &xfs_name_dotdot, 25545838d035SDarrick J. Wong tp->t_mountp->m_sb.sb_rootino, 0); 25555838d035SDarrick J. Wong if (error) 25562653d533SDarrick J. Wong goto out_trans_cancel; 25575838d035SDarrick J. Wong } 2558c24b5dfaSDave Chinner } else { 2559c24b5dfaSDave Chinner /* 2560c24b5dfaSDave Chinner * When removing a non-directory we need to log the parent 2561c24b5dfaSDave Chinner * inode here. For a directory this is done implicitly 2562c24b5dfaSDave Chinner * by the xfs_droplink call for the ".." entry. 2563c24b5dfaSDave Chinner */ 2564c24b5dfaSDave Chinner xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); 2565c24b5dfaSDave Chinner } 256627320369SDave Chinner xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 2567c24b5dfaSDave Chinner 256827320369SDave Chinner /* Drop the link from dp to ip. */ 2569c24b5dfaSDave Chinner error = xfs_droplink(tp, ip); 2570c24b5dfaSDave Chinner if (error) 257127320369SDave Chinner goto out_trans_cancel; 2572c24b5dfaSDave Chinner 2573381eee69SBrian Foster error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks); 257427320369SDave Chinner if (error) { 25752451337dSDave Chinner ASSERT(error != -ENOENT); 2576c8eac49eSBrian Foster goto out_trans_cancel; 257727320369SDave Chinner } 257827320369SDave Chinner 2579c24b5dfaSDave Chinner /* 2580c24b5dfaSDave Chinner * If this is a synchronous mount, make sure that the 2581c24b5dfaSDave Chinner * remove transaction goes to disk before returning to 2582c24b5dfaSDave Chinner * the user. 2583c24b5dfaSDave Chinner */ 25840560f31aSDave Chinner if (xfs_has_wsync(mp) || xfs_has_dirsync(mp)) 2585c24b5dfaSDave Chinner xfs_trans_set_sync(tp); 2586c24b5dfaSDave Chinner 258770393313SChristoph Hellwig error = xfs_trans_commit(tp); 2588c24b5dfaSDave Chinner if (error) 2589c24b5dfaSDave Chinner goto std_return; 2590c24b5dfaSDave Chinner 25912cd2ef6aSChristoph Hellwig if (is_dir && xfs_inode_is_filestream(ip)) 2592c24b5dfaSDave Chinner xfs_filestream_deassociate(ip); 2593c24b5dfaSDave Chinner 2594c24b5dfaSDave Chinner return 0; 2595c24b5dfaSDave Chinner 2596c24b5dfaSDave Chinner out_trans_cancel: 25974906e215SChristoph Hellwig xfs_trans_cancel(tp); 2598c24b5dfaSDave Chinner std_return: 2599c24b5dfaSDave Chinner return error; 2600c24b5dfaSDave Chinner } 2601c24b5dfaSDave Chinner 2602f6bba201SDave Chinner /* 2603f6bba201SDave Chinner * Enter all inodes for a rename transaction into a sorted array. 2604f6bba201SDave Chinner */ 260595afcf5cSDave Chinner #define __XFS_SORT_INODES 5 2606f6bba201SDave Chinner STATIC void 2607f6bba201SDave Chinner xfs_sort_for_rename( 260895afcf5cSDave Chinner struct xfs_inode *dp1, /* in: old (source) directory inode */ 260995afcf5cSDave Chinner struct xfs_inode *dp2, /* in: new (target) directory inode */ 261095afcf5cSDave Chinner struct xfs_inode *ip1, /* in: inode of old entry */ 261195afcf5cSDave Chinner struct xfs_inode *ip2, /* in: inode of new entry */ 261295afcf5cSDave Chinner struct xfs_inode *wip, /* in: whiteout inode */ 261395afcf5cSDave Chinner struct xfs_inode **i_tab,/* out: sorted array of inodes */ 261495afcf5cSDave Chinner int *num_inodes) /* in/out: inodes in array */ 2615f6bba201SDave Chinner { 2616f6bba201SDave Chinner int i, j; 2617f6bba201SDave Chinner 261895afcf5cSDave Chinner ASSERT(*num_inodes == __XFS_SORT_INODES); 261995afcf5cSDave Chinner memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *)); 262095afcf5cSDave Chinner 2621f6bba201SDave Chinner /* 2622f6bba201SDave Chinner * i_tab contains a list of pointers to inodes. We initialize 2623f6bba201SDave Chinner * the table here & we'll sort it. We will then use it to 2624f6bba201SDave Chinner * order the acquisition of the inode locks. 2625f6bba201SDave Chinner * 2626f6bba201SDave Chinner * Note that the table may contain duplicates. e.g., dp1 == dp2. 2627f6bba201SDave Chinner */ 262895afcf5cSDave Chinner i = 0; 262995afcf5cSDave Chinner i_tab[i++] = dp1; 263095afcf5cSDave Chinner i_tab[i++] = dp2; 263195afcf5cSDave Chinner i_tab[i++] = ip1; 263295afcf5cSDave Chinner if (ip2) 263395afcf5cSDave Chinner i_tab[i++] = ip2; 263495afcf5cSDave Chinner if (wip) 263595afcf5cSDave Chinner i_tab[i++] = wip; 263695afcf5cSDave Chinner *num_inodes = i; 2637f6bba201SDave Chinner 2638f6bba201SDave Chinner /* 2639f6bba201SDave Chinner * Sort the elements via bubble sort. (Remember, there are at 264095afcf5cSDave Chinner * most 5 elements to sort, so this is adequate.) 2641f6bba201SDave Chinner */ 2642f6bba201SDave Chinner for (i = 0; i < *num_inodes; i++) { 2643f6bba201SDave Chinner for (j = 1; j < *num_inodes; j++) { 2644f6bba201SDave Chinner if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) { 264595afcf5cSDave Chinner struct xfs_inode *temp = i_tab[j]; 2646f6bba201SDave Chinner i_tab[j] = i_tab[j-1]; 2647f6bba201SDave Chinner i_tab[j-1] = temp; 2648f6bba201SDave Chinner } 2649f6bba201SDave Chinner } 2650f6bba201SDave Chinner } 2651f6bba201SDave Chinner } 2652f6bba201SDave Chinner 2653310606b0SDave Chinner static int 2654310606b0SDave Chinner xfs_finish_rename( 2655c9cfdb38SBrian Foster struct xfs_trans *tp) 2656310606b0SDave Chinner { 2657310606b0SDave Chinner /* 2658310606b0SDave Chinner * If this is a synchronous mount, make sure that the rename transaction 2659310606b0SDave Chinner * goes to disk before returning to the user. 2660310606b0SDave Chinner */ 26610560f31aSDave Chinner if (xfs_has_wsync(tp->t_mountp) || xfs_has_dirsync(tp->t_mountp)) 2662310606b0SDave Chinner xfs_trans_set_sync(tp); 2663310606b0SDave Chinner 266470393313SChristoph Hellwig return xfs_trans_commit(tp); 2665310606b0SDave Chinner } 2666310606b0SDave Chinner 2667f6bba201SDave Chinner /* 2668d31a1825SCarlos Maiolino * xfs_cross_rename() 2669d31a1825SCarlos Maiolino * 26700145225eSBhaskar Chowdhury * responsible for handling RENAME_EXCHANGE flag in renameat2() syscall 2671d31a1825SCarlos Maiolino */ 2672d31a1825SCarlos Maiolino STATIC int 2673d31a1825SCarlos Maiolino xfs_cross_rename( 2674d31a1825SCarlos Maiolino struct xfs_trans *tp, 2675d31a1825SCarlos Maiolino struct xfs_inode *dp1, 2676d31a1825SCarlos Maiolino struct xfs_name *name1, 2677d31a1825SCarlos Maiolino struct xfs_inode *ip1, 2678d31a1825SCarlos Maiolino struct xfs_inode *dp2, 2679d31a1825SCarlos Maiolino struct xfs_name *name2, 2680d31a1825SCarlos Maiolino struct xfs_inode *ip2, 2681d31a1825SCarlos Maiolino int spaceres) 2682d31a1825SCarlos Maiolino { 2683d31a1825SCarlos Maiolino int error = 0; 2684d31a1825SCarlos Maiolino int ip1_flags = 0; 2685d31a1825SCarlos Maiolino int ip2_flags = 0; 2686d31a1825SCarlos Maiolino int dp2_flags = 0; 2687d31a1825SCarlos Maiolino 2688d31a1825SCarlos Maiolino /* Swap inode number for dirent in first parent */ 2689381eee69SBrian Foster error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres); 2690d31a1825SCarlos Maiolino if (error) 2691eeacd321SDave Chinner goto out_trans_abort; 2692d31a1825SCarlos Maiolino 2693d31a1825SCarlos Maiolino /* Swap inode number for dirent in second parent */ 2694381eee69SBrian Foster error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres); 2695d31a1825SCarlos Maiolino if (error) 2696eeacd321SDave Chinner goto out_trans_abort; 2697d31a1825SCarlos Maiolino 2698d31a1825SCarlos Maiolino /* 2699d31a1825SCarlos Maiolino * If we're renaming one or more directories across different parents, 2700d31a1825SCarlos Maiolino * update the respective ".." entries (and link counts) to match the new 2701d31a1825SCarlos Maiolino * parents. 2702d31a1825SCarlos Maiolino */ 2703d31a1825SCarlos Maiolino if (dp1 != dp2) { 2704d31a1825SCarlos Maiolino dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; 2705d31a1825SCarlos Maiolino 2706c19b3b05SDave Chinner if (S_ISDIR(VFS_I(ip2)->i_mode)) { 2707d31a1825SCarlos Maiolino error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot, 2708381eee69SBrian Foster dp1->i_ino, spaceres); 2709d31a1825SCarlos Maiolino if (error) 2710eeacd321SDave Chinner goto out_trans_abort; 2711d31a1825SCarlos Maiolino 2712d31a1825SCarlos Maiolino /* transfer ip2 ".." reference to dp1 */ 2713c19b3b05SDave Chinner if (!S_ISDIR(VFS_I(ip1)->i_mode)) { 2714d31a1825SCarlos Maiolino error = xfs_droplink(tp, dp2); 2715d31a1825SCarlos Maiolino if (error) 2716eeacd321SDave Chinner goto out_trans_abort; 271791083269SEric Sandeen xfs_bumplink(tp, dp1); 2718d31a1825SCarlos Maiolino } 2719d31a1825SCarlos Maiolino 2720d31a1825SCarlos Maiolino /* 2721d31a1825SCarlos Maiolino * Although ip1 isn't changed here, userspace needs 2722d31a1825SCarlos Maiolino * to be warned about the change, so that applications 2723d31a1825SCarlos Maiolino * relying on it (like backup ones), will properly 2724d31a1825SCarlos Maiolino * notify the change 2725d31a1825SCarlos Maiolino */ 2726d31a1825SCarlos Maiolino ip1_flags |= XFS_ICHGTIME_CHG; 2727d31a1825SCarlos Maiolino ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; 2728d31a1825SCarlos Maiolino } 2729d31a1825SCarlos Maiolino 2730c19b3b05SDave Chinner if (S_ISDIR(VFS_I(ip1)->i_mode)) { 2731d31a1825SCarlos Maiolino error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot, 2732381eee69SBrian Foster dp2->i_ino, spaceres); 2733d31a1825SCarlos Maiolino if (error) 2734eeacd321SDave Chinner goto out_trans_abort; 2735d31a1825SCarlos Maiolino 2736d31a1825SCarlos Maiolino /* transfer ip1 ".." reference to dp2 */ 2737c19b3b05SDave Chinner if (!S_ISDIR(VFS_I(ip2)->i_mode)) { 2738d31a1825SCarlos Maiolino error = xfs_droplink(tp, dp1); 2739d31a1825SCarlos Maiolino if (error) 2740eeacd321SDave Chinner goto out_trans_abort; 274191083269SEric Sandeen xfs_bumplink(tp, dp2); 2742d31a1825SCarlos Maiolino } 2743d31a1825SCarlos Maiolino 2744d31a1825SCarlos Maiolino /* 2745d31a1825SCarlos Maiolino * Although ip2 isn't changed here, userspace needs 2746d31a1825SCarlos Maiolino * to be warned about the change, so that applications 2747d31a1825SCarlos Maiolino * relying on it (like backup ones), will properly 2748d31a1825SCarlos Maiolino * notify the change 2749d31a1825SCarlos Maiolino */ 2750d31a1825SCarlos Maiolino ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; 2751d31a1825SCarlos Maiolino ip2_flags |= XFS_ICHGTIME_CHG; 2752d31a1825SCarlos Maiolino } 2753d31a1825SCarlos Maiolino } 2754d31a1825SCarlos Maiolino 2755d31a1825SCarlos Maiolino if (ip1_flags) { 2756d31a1825SCarlos Maiolino xfs_trans_ichgtime(tp, ip1, ip1_flags); 2757d31a1825SCarlos Maiolino xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE); 2758d31a1825SCarlos Maiolino } 2759d31a1825SCarlos Maiolino if (ip2_flags) { 2760d31a1825SCarlos Maiolino xfs_trans_ichgtime(tp, ip2, ip2_flags); 2761d31a1825SCarlos Maiolino xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE); 2762d31a1825SCarlos Maiolino } 2763d31a1825SCarlos Maiolino if (dp2_flags) { 2764d31a1825SCarlos Maiolino xfs_trans_ichgtime(tp, dp2, dp2_flags); 2765d31a1825SCarlos Maiolino xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE); 2766d31a1825SCarlos Maiolino } 2767d31a1825SCarlos Maiolino xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 2768d31a1825SCarlos Maiolino xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE); 2769c9cfdb38SBrian Foster return xfs_finish_rename(tp); 2770eeacd321SDave Chinner 2771eeacd321SDave Chinner out_trans_abort: 27724906e215SChristoph Hellwig xfs_trans_cancel(tp); 2773d31a1825SCarlos Maiolino return error; 2774d31a1825SCarlos Maiolino } 2775d31a1825SCarlos Maiolino 2776d31a1825SCarlos Maiolino /* 27777dcf5c3eSDave Chinner * xfs_rename_alloc_whiteout() 27787dcf5c3eSDave Chinner * 2779b63da6c8SRandy Dunlap * Return a referenced, unlinked, unlocked inode that can be used as a 27807dcf5c3eSDave Chinner * whiteout in a rename transaction. We use a tmpfile inode here so that if we 27817dcf5c3eSDave Chinner * crash between allocating the inode and linking it into the rename transaction 27827dcf5c3eSDave Chinner * recovery will free the inode and we won't leak it. 27837dcf5c3eSDave Chinner */ 27847dcf5c3eSDave Chinner static int 27857dcf5c3eSDave Chinner xfs_rename_alloc_whiteout( 2786f2d40141SChristian Brauner struct mnt_idmap *idmap, 278770b589a3SEric Sandeen struct xfs_name *src_name, 27887dcf5c3eSDave Chinner struct xfs_inode *dp, 27897dcf5c3eSDave Chinner struct xfs_inode **wip) 27907dcf5c3eSDave Chinner { 27917dcf5c3eSDave Chinner struct xfs_inode *tmpfile; 279270b589a3SEric Sandeen struct qstr name; 27937dcf5c3eSDave Chinner int error; 27947dcf5c3eSDave Chinner 2795f2d40141SChristian Brauner error = xfs_create_tmpfile(idmap, dp, S_IFCHR | WHITEOUT_MODE, 2796f736d93dSChristoph Hellwig &tmpfile); 27977dcf5c3eSDave Chinner if (error) 27987dcf5c3eSDave Chinner return error; 27997dcf5c3eSDave Chinner 280070b589a3SEric Sandeen name.name = src_name->name; 280170b589a3SEric Sandeen name.len = src_name->len; 280270b589a3SEric Sandeen error = xfs_inode_init_security(VFS_I(tmpfile), VFS_I(dp), &name); 280370b589a3SEric Sandeen if (error) { 280470b589a3SEric Sandeen xfs_finish_inode_setup(tmpfile); 280570b589a3SEric Sandeen xfs_irele(tmpfile); 280670b589a3SEric Sandeen return error; 280770b589a3SEric Sandeen } 280870b589a3SEric Sandeen 280922419ac9SBrian Foster /* 281022419ac9SBrian Foster * Prepare the tmpfile inode as if it were created through the VFS. 2811c4a6bf7fSDarrick J. Wong * Complete the inode setup and flag it as linkable. nlink is already 2812c4a6bf7fSDarrick J. Wong * zero, so we can skip the drop_nlink. 281322419ac9SBrian Foster */ 28142b3d1d41SChristoph Hellwig xfs_setup_iops(tmpfile); 28157dcf5c3eSDave Chinner xfs_finish_inode_setup(tmpfile); 28167dcf5c3eSDave Chinner VFS_I(tmpfile)->i_state |= I_LINKABLE; 28177dcf5c3eSDave Chinner 28187dcf5c3eSDave Chinner *wip = tmpfile; 28197dcf5c3eSDave Chinner return 0; 28207dcf5c3eSDave Chinner } 28217dcf5c3eSDave Chinner 28227dcf5c3eSDave Chinner /* 2823f6bba201SDave Chinner * xfs_rename 2824f6bba201SDave Chinner */ 2825f6bba201SDave Chinner int 2826f6bba201SDave Chinner xfs_rename( 2827f2d40141SChristian Brauner struct mnt_idmap *idmap, 28287dcf5c3eSDave Chinner struct xfs_inode *src_dp, 2829f6bba201SDave Chinner struct xfs_name *src_name, 28307dcf5c3eSDave Chinner struct xfs_inode *src_ip, 28317dcf5c3eSDave Chinner struct xfs_inode *target_dp, 2832f6bba201SDave Chinner struct xfs_name *target_name, 28337dcf5c3eSDave Chinner struct xfs_inode *target_ip, 2834d31a1825SCarlos Maiolino unsigned int flags) 2835f6bba201SDave Chinner { 28367dcf5c3eSDave Chinner struct xfs_mount *mp = src_dp->i_mount; 28377dcf5c3eSDave Chinner struct xfs_trans *tp; 28387dcf5c3eSDave Chinner struct xfs_inode *wip = NULL; /* whiteout inode */ 28397dcf5c3eSDave Chinner struct xfs_inode *inodes[__XFS_SORT_INODES]; 28406da1b4b1SDarrick J. Wong int i; 284195afcf5cSDave Chinner int num_inodes = __XFS_SORT_INODES; 28422b93681fSDave Chinner bool new_parent = (src_dp != target_dp); 2843c19b3b05SDave Chinner bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode); 2844f6bba201SDave Chinner int spaceres; 284541667260SDarrick J. Wong bool retried = false; 284641667260SDarrick J. Wong int error, nospace_error = 0; 2847f6bba201SDave Chinner 2848f6bba201SDave Chinner trace_xfs_rename(src_dp, target_dp, src_name, target_name); 2849f6bba201SDave Chinner 2850eeacd321SDave Chinner if ((flags & RENAME_EXCHANGE) && !target_ip) 2851eeacd321SDave Chinner return -EINVAL; 2852f6bba201SDave Chinner 28537dcf5c3eSDave Chinner /* 28547dcf5c3eSDave Chinner * If we are doing a whiteout operation, allocate the whiteout inode 28557dcf5c3eSDave Chinner * we will be placing at the target and ensure the type is set 28567dcf5c3eSDave Chinner * appropriately. 28577dcf5c3eSDave Chinner */ 28587dcf5c3eSDave Chinner if (flags & RENAME_WHITEOUT) { 2859f2d40141SChristian Brauner error = xfs_rename_alloc_whiteout(idmap, src_name, 286070b589a3SEric Sandeen target_dp, &wip); 28617dcf5c3eSDave Chinner if (error) 28627dcf5c3eSDave Chinner return error; 2863f6bba201SDave Chinner 28647dcf5c3eSDave Chinner /* setup target dirent info as whiteout */ 28657dcf5c3eSDave Chinner src_name->type = XFS_DIR3_FT_CHRDEV; 28667dcf5c3eSDave Chinner } 28677dcf5c3eSDave Chinner 28687dcf5c3eSDave Chinner xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip, 2869f6bba201SDave Chinner inodes, &num_inodes); 2870f6bba201SDave Chinner 287141667260SDarrick J. Wong retry: 287241667260SDarrick J. Wong nospace_error = 0; 2873f6bba201SDave Chinner spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len); 2874253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp); 28752451337dSDave Chinner if (error == -ENOSPC) { 287641667260SDarrick J. Wong nospace_error = error; 2877f6bba201SDave Chinner spaceres = 0; 2878253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0, 2879253f4911SChristoph Hellwig &tp); 2880f6bba201SDave Chinner } 2881445883e8SDave Chinner if (error) 2882253f4911SChristoph Hellwig goto out_release_wip; 2883f6bba201SDave Chinner 2884f6bba201SDave Chinner /* 2885f6bba201SDave Chinner * Attach the dquots to the inodes 2886f6bba201SDave Chinner */ 2887f6bba201SDave Chinner error = xfs_qm_vop_rename_dqattach(inodes); 2888445883e8SDave Chinner if (error) 2889445883e8SDave Chinner goto out_trans_cancel; 2890f6bba201SDave Chinner 2891f6bba201SDave Chinner /* 2892f6bba201SDave Chinner * Lock all the participating inodes. Depending upon whether 2893f6bba201SDave Chinner * the target_name exists in the target directory, and 2894f6bba201SDave Chinner * whether the target directory is the same as the source 2895e07ee6feSAllison Henderson * directory, we can lock from 2 to 5 inodes. 2896f6bba201SDave Chinner */ 2897f6bba201SDave Chinner xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL); 2898f6bba201SDave Chinner 2899f6bba201SDave Chinner /* 2900f6bba201SDave Chinner * Join all the inodes to the transaction. From this point on, 2901f6bba201SDave Chinner * we can rely on either trans_commit or trans_cancel to unlock 2902f6bba201SDave Chinner * them. 2903f6bba201SDave Chinner */ 290465523218SChristoph Hellwig xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL); 2905f6bba201SDave Chinner if (new_parent) 290665523218SChristoph Hellwig xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL); 2907f6bba201SDave Chinner xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL); 2908f6bba201SDave Chinner if (target_ip) 2909f6bba201SDave Chinner xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL); 29107dcf5c3eSDave Chinner if (wip) 29117dcf5c3eSDave Chinner xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL); 2912f6bba201SDave Chinner 2913f6bba201SDave Chinner /* 2914f6bba201SDave Chinner * If we are using project inheritance, we only allow renames 2915f6bba201SDave Chinner * into our tree when the project IDs are the same; else the 2916f6bba201SDave Chinner * tree quota mechanism would be circumvented. 2917f6bba201SDave Chinner */ 2918db07349dSChristoph Hellwig if (unlikely((target_dp->i_diflags & XFS_DIFLAG_PROJINHERIT) && 2919ceaf603cSChristoph Hellwig target_dp->i_projid != src_ip->i_projid)) { 29202451337dSDave Chinner error = -EXDEV; 2921445883e8SDave Chinner goto out_trans_cancel; 2922f6bba201SDave Chinner } 2923f6bba201SDave Chinner 2924eeacd321SDave Chinner /* RENAME_EXCHANGE is unique from here on. */ 2925eeacd321SDave Chinner if (flags & RENAME_EXCHANGE) 2926eeacd321SDave Chinner return xfs_cross_rename(tp, src_dp, src_name, src_ip, 2927d31a1825SCarlos Maiolino target_dp, target_name, target_ip, 2928f16dea54SBrian Foster spaceres); 2929d31a1825SCarlos Maiolino 2930d31a1825SCarlos Maiolino /* 293141667260SDarrick J. Wong * Try to reserve quota to handle an expansion of the target directory. 293241667260SDarrick J. Wong * We'll allow the rename to continue in reservationless mode if we hit 293341667260SDarrick J. Wong * a space usage constraint. If we trigger reservationless mode, save 293441667260SDarrick J. Wong * the errno if there isn't any free space in the target directory. 293541667260SDarrick J. Wong */ 293641667260SDarrick J. Wong if (spaceres != 0) { 293741667260SDarrick J. Wong error = xfs_trans_reserve_quota_nblks(tp, target_dp, spaceres, 293841667260SDarrick J. Wong 0, false); 293941667260SDarrick J. Wong if (error == -EDQUOT || error == -ENOSPC) { 294041667260SDarrick J. Wong if (!retried) { 294141667260SDarrick J. Wong xfs_trans_cancel(tp); 294241667260SDarrick J. Wong xfs_blockgc_free_quota(target_dp, 0); 294341667260SDarrick J. Wong retried = true; 294441667260SDarrick J. Wong goto retry; 294541667260SDarrick J. Wong } 294641667260SDarrick J. Wong 294741667260SDarrick J. Wong nospace_error = error; 294841667260SDarrick J. Wong spaceres = 0; 294941667260SDarrick J. Wong error = 0; 295041667260SDarrick J. Wong } 295141667260SDarrick J. Wong if (error) 295241667260SDarrick J. Wong goto out_trans_cancel; 295341667260SDarrick J. Wong } 295441667260SDarrick J. Wong 295541667260SDarrick J. Wong /* 2956bc56ad8cSkaixuxia * Check for expected errors before we dirty the transaction 2957bc56ad8cSkaixuxia * so we can return an error without a transaction abort. 2958f6bba201SDave Chinner */ 2959f6bba201SDave Chinner if (target_ip == NULL) { 2960f6bba201SDave Chinner /* 2961f6bba201SDave Chinner * If there's no space reservation, check the entry will 2962f6bba201SDave Chinner * fit before actually inserting it. 2963f6bba201SDave Chinner */ 296494f3cad5SEric Sandeen if (!spaceres) { 296594f3cad5SEric Sandeen error = xfs_dir_canenter(tp, target_dp, target_name); 2966f6bba201SDave Chinner if (error) 2967445883e8SDave Chinner goto out_trans_cancel; 296894f3cad5SEric Sandeen } 2969bc56ad8cSkaixuxia } else { 2970bc56ad8cSkaixuxia /* 2971bc56ad8cSkaixuxia * If target exists and it's a directory, check that whether 2972bc56ad8cSkaixuxia * it can be destroyed. 2973bc56ad8cSkaixuxia */ 2974bc56ad8cSkaixuxia if (S_ISDIR(VFS_I(target_ip)->i_mode) && 2975bc56ad8cSkaixuxia (!xfs_dir_isempty(target_ip) || 2976bc56ad8cSkaixuxia (VFS_I(target_ip)->i_nlink > 2))) { 2977bc56ad8cSkaixuxia error = -EEXIST; 2978bc56ad8cSkaixuxia goto out_trans_cancel; 2979bc56ad8cSkaixuxia } 2980bc56ad8cSkaixuxia } 2981bc56ad8cSkaixuxia 2982bc56ad8cSkaixuxia /* 29836da1b4b1SDarrick J. Wong * Lock the AGI buffers we need to handle bumping the nlink of the 29846da1b4b1SDarrick J. Wong * whiteout inode off the unlinked list and to handle dropping the 29856da1b4b1SDarrick J. Wong * nlink of the target inode. Per locking order rules, do this in 29866da1b4b1SDarrick J. Wong * increasing AG order and before directory block allocation tries to 29876da1b4b1SDarrick J. Wong * grab AGFs because we grab AGIs before AGFs. 29886da1b4b1SDarrick J. Wong * 29896da1b4b1SDarrick J. Wong * The (vfs) caller must ensure that if src is a directory then 29906da1b4b1SDarrick J. Wong * target_ip is either null or an empty directory. 29916da1b4b1SDarrick J. Wong */ 29926da1b4b1SDarrick J. Wong for (i = 0; i < num_inodes && inodes[i] != NULL; i++) { 29936da1b4b1SDarrick J. Wong if (inodes[i] == wip || 29946da1b4b1SDarrick J. Wong (inodes[i] == target_ip && 29956da1b4b1SDarrick J. Wong (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) { 299661021debSDave Chinner struct xfs_perag *pag; 29976da1b4b1SDarrick J. Wong struct xfs_buf *bp; 29986da1b4b1SDarrick J. Wong 299961021debSDave Chinner pag = xfs_perag_get(mp, 300061021debSDave Chinner XFS_INO_TO_AGNO(mp, inodes[i]->i_ino)); 300161021debSDave Chinner error = xfs_read_agi(pag, tp, &bp); 300261021debSDave Chinner xfs_perag_put(pag); 30036da1b4b1SDarrick J. Wong if (error) 30046da1b4b1SDarrick J. Wong goto out_trans_cancel; 30056da1b4b1SDarrick J. Wong } 30066da1b4b1SDarrick J. Wong } 30076da1b4b1SDarrick J. Wong 30086da1b4b1SDarrick J. Wong /* 3009bc56ad8cSkaixuxia * Directory entry creation below may acquire the AGF. Remove 3010bc56ad8cSkaixuxia * the whiteout from the unlinked list first to preserve correct 3011bc56ad8cSkaixuxia * AGI/AGF locking order. This dirties the transaction so failures 3012bc56ad8cSkaixuxia * after this point will abort and log recovery will clean up the 3013bc56ad8cSkaixuxia * mess. 3014bc56ad8cSkaixuxia * 3015bc56ad8cSkaixuxia * For whiteouts, we need to bump the link count on the whiteout 3016bc56ad8cSkaixuxia * inode. After this point, we have a real link, clear the tmpfile 3017bc56ad8cSkaixuxia * state flag from the inode so it doesn't accidentally get misused 3018bc56ad8cSkaixuxia * in future. 3019bc56ad8cSkaixuxia */ 3020bc56ad8cSkaixuxia if (wip) { 3021f40aadb2SDave Chinner struct xfs_perag *pag; 3022f40aadb2SDave Chinner 3023bc56ad8cSkaixuxia ASSERT(VFS_I(wip)->i_nlink == 0); 3024f40aadb2SDave Chinner 3025f40aadb2SDave Chinner pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, wip->i_ino)); 3026f40aadb2SDave Chinner error = xfs_iunlink_remove(tp, pag, wip); 3027f40aadb2SDave Chinner xfs_perag_put(pag); 3028bc56ad8cSkaixuxia if (error) 3029bc56ad8cSkaixuxia goto out_trans_cancel; 3030bc56ad8cSkaixuxia 3031bc56ad8cSkaixuxia xfs_bumplink(tp, wip); 3032bc56ad8cSkaixuxia VFS_I(wip)->i_state &= ~I_LINKABLE; 3033bc56ad8cSkaixuxia } 3034bc56ad8cSkaixuxia 3035bc56ad8cSkaixuxia /* 3036bc56ad8cSkaixuxia * Set up the target. 3037bc56ad8cSkaixuxia */ 3038bc56ad8cSkaixuxia if (target_ip == NULL) { 3039f6bba201SDave Chinner /* 3040f6bba201SDave Chinner * If target does not exist and the rename crosses 3041f6bba201SDave Chinner * directories, adjust the target directory link count 3042f6bba201SDave Chinner * to account for the ".." reference from the new entry. 3043f6bba201SDave Chinner */ 3044f6bba201SDave Chinner error = xfs_dir_createname(tp, target_dp, target_name, 3045381eee69SBrian Foster src_ip->i_ino, spaceres); 3046f6bba201SDave Chinner if (error) 3047c8eac49eSBrian Foster goto out_trans_cancel; 3048f6bba201SDave Chinner 3049f6bba201SDave Chinner xfs_trans_ichgtime(tp, target_dp, 3050f6bba201SDave Chinner XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 3051f6bba201SDave Chinner 3052f6bba201SDave Chinner if (new_parent && src_is_directory) { 305391083269SEric Sandeen xfs_bumplink(tp, target_dp); 3054f6bba201SDave Chinner } 3055f6bba201SDave Chinner } else { /* target_ip != NULL */ 3056f6bba201SDave Chinner /* 3057f6bba201SDave Chinner * Link the source inode under the target name. 3058f6bba201SDave Chinner * If the source inode is a directory and we are moving 3059f6bba201SDave Chinner * it across directories, its ".." entry will be 3060f6bba201SDave Chinner * inconsistent until we replace that down below. 3061f6bba201SDave Chinner * 3062f6bba201SDave Chinner * In case there is already an entry with the same 3063f6bba201SDave Chinner * name at the destination directory, remove it first. 3064f6bba201SDave Chinner */ 3065f6bba201SDave Chinner error = xfs_dir_replace(tp, target_dp, target_name, 3066381eee69SBrian Foster src_ip->i_ino, spaceres); 3067f6bba201SDave Chinner if (error) 3068c8eac49eSBrian Foster goto out_trans_cancel; 3069f6bba201SDave Chinner 3070f6bba201SDave Chinner xfs_trans_ichgtime(tp, target_dp, 3071f6bba201SDave Chinner XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 3072f6bba201SDave Chinner 3073f6bba201SDave Chinner /* 3074f6bba201SDave Chinner * Decrement the link count on the target since the target 3075f6bba201SDave Chinner * dir no longer points to it. 3076f6bba201SDave Chinner */ 3077f6bba201SDave Chinner error = xfs_droplink(tp, target_ip); 3078f6bba201SDave Chinner if (error) 3079c8eac49eSBrian Foster goto out_trans_cancel; 3080f6bba201SDave Chinner 3081f6bba201SDave Chinner if (src_is_directory) { 3082f6bba201SDave Chinner /* 3083f6bba201SDave Chinner * Drop the link from the old "." entry. 3084f6bba201SDave Chinner */ 3085f6bba201SDave Chinner error = xfs_droplink(tp, target_ip); 3086f6bba201SDave Chinner if (error) 3087c8eac49eSBrian Foster goto out_trans_cancel; 3088f6bba201SDave Chinner } 3089f6bba201SDave Chinner } /* target_ip != NULL */ 3090f6bba201SDave Chinner 3091f6bba201SDave Chinner /* 3092f6bba201SDave Chinner * Remove the source. 3093f6bba201SDave Chinner */ 3094f6bba201SDave Chinner if (new_parent && src_is_directory) { 3095f6bba201SDave Chinner /* 3096f6bba201SDave Chinner * Rewrite the ".." entry to point to the new 3097f6bba201SDave Chinner * directory. 3098f6bba201SDave Chinner */ 3099f6bba201SDave Chinner error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot, 3100381eee69SBrian Foster target_dp->i_ino, spaceres); 31012451337dSDave Chinner ASSERT(error != -EEXIST); 3102f6bba201SDave Chinner if (error) 3103c8eac49eSBrian Foster goto out_trans_cancel; 3104f6bba201SDave Chinner } 3105f6bba201SDave Chinner 3106f6bba201SDave Chinner /* 3107f6bba201SDave Chinner * We always want to hit the ctime on the source inode. 3108f6bba201SDave Chinner * 3109f6bba201SDave Chinner * This isn't strictly required by the standards since the source 3110f6bba201SDave Chinner * inode isn't really being changed, but old unix file systems did 3111f6bba201SDave Chinner * it and some incremental backup programs won't work without it. 3112f6bba201SDave Chinner */ 3113f6bba201SDave Chinner xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG); 3114f6bba201SDave Chinner xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE); 3115f6bba201SDave Chinner 3116f6bba201SDave Chinner /* 3117f6bba201SDave Chinner * Adjust the link count on src_dp. This is necessary when 3118f6bba201SDave Chinner * renaming a directory, either within one parent when 3119f6bba201SDave Chinner * the target existed, or across two parent directories. 3120f6bba201SDave Chinner */ 3121f6bba201SDave Chinner if (src_is_directory && (new_parent || target_ip != NULL)) { 3122f6bba201SDave Chinner 3123f6bba201SDave Chinner /* 3124f6bba201SDave Chinner * Decrement link count on src_directory since the 3125f6bba201SDave Chinner * entry that's moved no longer points to it. 3126f6bba201SDave Chinner */ 3127f6bba201SDave Chinner error = xfs_droplink(tp, src_dp); 3128f6bba201SDave Chinner if (error) 3129c8eac49eSBrian Foster goto out_trans_cancel; 3130f6bba201SDave Chinner } 3131f6bba201SDave Chinner 31327dcf5c3eSDave Chinner /* 31337dcf5c3eSDave Chinner * For whiteouts, we only need to update the source dirent with the 31347dcf5c3eSDave Chinner * inode number of the whiteout inode rather than removing it 31357dcf5c3eSDave Chinner * altogether. 31367dcf5c3eSDave Chinner */ 313783a21c18SChandan Babu R if (wip) 31387dcf5c3eSDave Chinner error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino, 3139381eee69SBrian Foster spaceres); 314083a21c18SChandan Babu R else 3141f6bba201SDave Chinner error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino, 3142381eee69SBrian Foster spaceres); 314302092a2fSChandan Babu R 3144f6bba201SDave Chinner if (error) 3145c8eac49eSBrian Foster goto out_trans_cancel; 3146f6bba201SDave Chinner 3147f6bba201SDave Chinner xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 3148f6bba201SDave Chinner xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE); 3149f6bba201SDave Chinner if (new_parent) 3150f6bba201SDave Chinner xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE); 3151f6bba201SDave Chinner 3152c9cfdb38SBrian Foster error = xfs_finish_rename(tp); 31537dcf5c3eSDave Chinner if (wip) 315444a8736bSDarrick J. Wong xfs_irele(wip); 31557dcf5c3eSDave Chinner return error; 3156f6bba201SDave Chinner 3157445883e8SDave Chinner out_trans_cancel: 31584906e215SChristoph Hellwig xfs_trans_cancel(tp); 3159253f4911SChristoph Hellwig out_release_wip: 31607dcf5c3eSDave Chinner if (wip) 316144a8736bSDarrick J. Wong xfs_irele(wip); 316241667260SDarrick J. Wong if (error == -ENOSPC && nospace_error) 316341667260SDarrick J. Wong error = nospace_error; 3164f6bba201SDave Chinner return error; 3165f6bba201SDave Chinner } 3166f6bba201SDave Chinner 3167e6187b34SDave Chinner static int 3168e6187b34SDave Chinner xfs_iflush( 316993848a99SChristoph Hellwig struct xfs_inode *ip, 317093848a99SChristoph Hellwig struct xfs_buf *bp) 31711da177e4SLinus Torvalds { 317293848a99SChristoph Hellwig struct xfs_inode_log_item *iip = ip->i_itemp; 317393848a99SChristoph Hellwig struct xfs_dinode *dip; 317493848a99SChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 3175f2019299SBrian Foster int error; 31761da177e4SLinus Torvalds 3177579aa9caSChristoph Hellwig ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 3178718ecc50SDave Chinner ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING)); 3179f7e67b20SChristoph Hellwig ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE || 3180daf83964SChristoph Hellwig ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); 318190c60e16SDave Chinner ASSERT(iip->ili_item.li_buf == bp); 31821da177e4SLinus Torvalds 318388ee2df7SChristoph Hellwig dip = xfs_buf_offset(bp, ip->i_imap.im_boffset); 31841da177e4SLinus Torvalds 3185f2019299SBrian Foster /* 3186f2019299SBrian Foster * We don't flush the inode if any of the following checks fail, but we 3187f2019299SBrian Foster * do still update the log item and attach to the backing buffer as if 3188f2019299SBrian Foster * the flush happened. This is a formality to facilitate predictable 3189f2019299SBrian Foster * error handling as the caller will shutdown and fail the buffer. 3190f2019299SBrian Foster */ 3191f2019299SBrian Foster error = -EFSCORRUPTED; 319269ef921bSChristoph Hellwig if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC), 31939e24cfd0SDarrick J. Wong mp, XFS_ERRTAG_IFLUSH_1)) { 31946a19d939SDave Chinner xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 319578b0f58bSZeng Heng "%s: Bad inode %llu magic number 0x%x, ptr "PTR_FMT, 31966a19d939SDave Chinner __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip); 3197f2019299SBrian Foster goto flush_out; 31981da177e4SLinus Torvalds } 3199c19b3b05SDave Chinner if (S_ISREG(VFS_I(ip)->i_mode)) { 32001da177e4SLinus Torvalds if (XFS_TEST_ERROR( 3201f7e67b20SChristoph Hellwig ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS && 3202f7e67b20SChristoph Hellwig ip->i_df.if_format != XFS_DINODE_FMT_BTREE, 32039e24cfd0SDarrick J. Wong mp, XFS_ERRTAG_IFLUSH_3)) { 32046a19d939SDave Chinner xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 320578b0f58bSZeng Heng "%s: Bad regular inode %llu, ptr "PTR_FMT, 32066a19d939SDave Chinner __func__, ip->i_ino, ip); 3207f2019299SBrian Foster goto flush_out; 32081da177e4SLinus Torvalds } 3209c19b3b05SDave Chinner } else if (S_ISDIR(VFS_I(ip)->i_mode)) { 32101da177e4SLinus Torvalds if (XFS_TEST_ERROR( 3211f7e67b20SChristoph Hellwig ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS && 3212f7e67b20SChristoph Hellwig ip->i_df.if_format != XFS_DINODE_FMT_BTREE && 3213f7e67b20SChristoph Hellwig ip->i_df.if_format != XFS_DINODE_FMT_LOCAL, 32149e24cfd0SDarrick J. Wong mp, XFS_ERRTAG_IFLUSH_4)) { 32156a19d939SDave Chinner xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 321678b0f58bSZeng Heng "%s: Bad directory inode %llu, ptr "PTR_FMT, 32176a19d939SDave Chinner __func__, ip->i_ino, ip); 3218f2019299SBrian Foster goto flush_out; 32191da177e4SLinus Torvalds } 32201da177e4SLinus Torvalds } 32212ed5b09bSDarrick J. Wong if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af) > 32226e73a545SChristoph Hellwig ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) { 32236a19d939SDave Chinner xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 3224755c38ffSChandan Babu R "%s: detected corrupt incore inode %llu, " 3225755c38ffSChandan Babu R "total extents = %llu nblocks = %lld, ptr "PTR_FMT, 32266a19d939SDave Chinner __func__, ip->i_ino, 32272ed5b09bSDarrick J. Wong ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af), 32286e73a545SChristoph Hellwig ip->i_nblocks, ip); 3229f2019299SBrian Foster goto flush_out; 32301da177e4SLinus Torvalds } 32317821ea30SChristoph Hellwig if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize, 32329e24cfd0SDarrick J. Wong mp, XFS_ERRTAG_IFLUSH_6)) { 32336a19d939SDave Chinner xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 323478b0f58bSZeng Heng "%s: bad inode %llu, forkoff 0x%x, ptr "PTR_FMT, 32357821ea30SChristoph Hellwig __func__, ip->i_ino, ip->i_forkoff, ip); 3236f2019299SBrian Foster goto flush_out; 32371da177e4SLinus Torvalds } 3238e60896d8SDave Chinner 32391da177e4SLinus Torvalds /* 3240965e0a1aSChristoph Hellwig * Inode item log recovery for v2 inodes are dependent on the flushiter 3241965e0a1aSChristoph Hellwig * count for correct sequencing. We bump the flush iteration count so 3242965e0a1aSChristoph Hellwig * we can detect flushes which postdate a log record during recovery. 3243965e0a1aSChristoph Hellwig * This is redundant as we now log every change and hence this can't 3244965e0a1aSChristoph Hellwig * happen but we need to still do it to ensure backwards compatibility 3245965e0a1aSChristoph Hellwig * with old kernels that predate logging all inode changes. 32461da177e4SLinus Torvalds */ 324738c26bfdSDave Chinner if (!xfs_has_v3inodes(mp)) 3248965e0a1aSChristoph Hellwig ip->i_flushiter++; 32491da177e4SLinus Torvalds 32500f45a1b2SChristoph Hellwig /* 32510f45a1b2SChristoph Hellwig * If there are inline format data / attr forks attached to this inode, 32520f45a1b2SChristoph Hellwig * make sure they are not corrupt. 32530f45a1b2SChristoph Hellwig */ 3254f7e67b20SChristoph Hellwig if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL && 32550f45a1b2SChristoph Hellwig xfs_ifork_verify_local_data(ip)) 32560f45a1b2SChristoph Hellwig goto flush_out; 3257932b42c6SDarrick J. Wong if (xfs_inode_has_attr_fork(ip) && 32582ed5b09bSDarrick J. Wong ip->i_af.if_format == XFS_DINODE_FMT_LOCAL && 32590f45a1b2SChristoph Hellwig xfs_ifork_verify_local_attr(ip)) 3260f2019299SBrian Foster goto flush_out; 3261005c5db8SDarrick J. Wong 32621da177e4SLinus Torvalds /* 32633987848cSDave Chinner * Copy the dirty parts of the inode into the on-disk inode. We always 32643987848cSDave Chinner * copy out the core of the inode, because if the inode is dirty at all 32653987848cSDave Chinner * the core must be. 32661da177e4SLinus Torvalds */ 326793f958f9SDave Chinner xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn); 32681da177e4SLinus Torvalds 32691da177e4SLinus Torvalds /* Wrap, we never let the log put out DI_MAX_FLUSH */ 327038c26bfdSDave Chinner if (!xfs_has_v3inodes(mp)) { 3271965e0a1aSChristoph Hellwig if (ip->i_flushiter == DI_MAX_FLUSH) 3272965e0a1aSChristoph Hellwig ip->i_flushiter = 0; 3273ee7b83fdSChristoph Hellwig } 32741da177e4SLinus Torvalds 3275005c5db8SDarrick J. Wong xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK); 3276932b42c6SDarrick J. Wong if (xfs_inode_has_attr_fork(ip)) 3277005c5db8SDarrick J. Wong xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK); 32781da177e4SLinus Torvalds 32791da177e4SLinus Torvalds /* 3280f5d8d5c4SChristoph Hellwig * We've recorded everything logged in the inode, so we'd like to clear 3281f5d8d5c4SChristoph Hellwig * the ili_fields bits so we don't log and flush things unnecessarily. 3282f5d8d5c4SChristoph Hellwig * However, we can't stop logging all this information until the data 3283f5d8d5c4SChristoph Hellwig * we've copied into the disk buffer is written to disk. If we did we 3284f5d8d5c4SChristoph Hellwig * might overwrite the copy of the inode in the log with all the data 3285f5d8d5c4SChristoph Hellwig * after re-logging only part of it, and in the face of a crash we 3286f5d8d5c4SChristoph Hellwig * wouldn't have all the data we need to recover. 32871da177e4SLinus Torvalds * 3288f5d8d5c4SChristoph Hellwig * What we do is move the bits to the ili_last_fields field. When 3289f5d8d5c4SChristoph Hellwig * logging the inode, these bits are moved back to the ili_fields field. 3290664ffb8aSChristoph Hellwig * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since 3291664ffb8aSChristoph Hellwig * we know that the information those bits represent is permanently on 3292f5d8d5c4SChristoph Hellwig * disk. As long as the flush completes before the inode is logged 3293f5d8d5c4SChristoph Hellwig * again, then both ili_fields and ili_last_fields will be cleared. 32941da177e4SLinus Torvalds */ 3295f2019299SBrian Foster error = 0; 3296f2019299SBrian Foster flush_out: 32971319ebefSDave Chinner spin_lock(&iip->ili_lock); 3298f5d8d5c4SChristoph Hellwig iip->ili_last_fields = iip->ili_fields; 3299f5d8d5c4SChristoph Hellwig iip->ili_fields = 0; 3300fc0561ceSDave Chinner iip->ili_fsync_fields = 0; 33011319ebefSDave Chinner spin_unlock(&iip->ili_lock); 33021da177e4SLinus Torvalds 33031319ebefSDave Chinner /* 33041319ebefSDave Chinner * Store the current LSN of the inode so that we can tell whether the 3305664ffb8aSChristoph Hellwig * item has moved in the AIL from xfs_buf_inode_iodone(). 33061319ebefSDave Chinner */ 33077b2e2a31SDavid Chinner xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, 33087b2e2a31SDavid Chinner &iip->ili_item.li_lsn); 33091da177e4SLinus Torvalds 331093848a99SChristoph Hellwig /* generate the checksum. */ 331193848a99SChristoph Hellwig xfs_dinode_calc_crc(mp, dip); 3312f2019299SBrian Foster return error; 33131da177e4SLinus Torvalds } 331444a8736bSDarrick J. Wong 3315e6187b34SDave Chinner /* 3316e6187b34SDave Chinner * Non-blocking flush of dirty inode metadata into the backing buffer. 3317e6187b34SDave Chinner * 3318e6187b34SDave Chinner * The caller must have a reference to the inode and hold the cluster buffer 3319e6187b34SDave Chinner * locked. The function will walk across all the inodes on the cluster buffer it 3320e6187b34SDave Chinner * can find and lock without blocking, and flush them to the cluster buffer. 3321e6187b34SDave Chinner * 33225717ea4dSDave Chinner * On successful flushing of at least one inode, the caller must write out the 33235717ea4dSDave Chinner * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and 33245717ea4dSDave Chinner * the caller needs to release the buffer. On failure, the filesystem will be 33255717ea4dSDave Chinner * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED 33265717ea4dSDave Chinner * will be returned. 3327e6187b34SDave Chinner */ 3328e6187b34SDave Chinner int 3329e6187b34SDave Chinner xfs_iflush_cluster( 3330e6187b34SDave Chinner struct xfs_buf *bp) 3331e6187b34SDave Chinner { 33325717ea4dSDave Chinner struct xfs_mount *mp = bp->b_mount; 33335717ea4dSDave Chinner struct xfs_log_item *lip, *n; 33345717ea4dSDave Chinner struct xfs_inode *ip; 33355717ea4dSDave Chinner struct xfs_inode_log_item *iip; 3336e6187b34SDave Chinner int clcount = 0; 33375717ea4dSDave Chinner int error = 0; 3338e6187b34SDave Chinner 3339e6187b34SDave Chinner /* 33405717ea4dSDave Chinner * We must use the safe variant here as on shutdown xfs_iflush_abort() 3341d2d7c047SDave Chinner * will remove itself from the list. 3342e6187b34SDave Chinner */ 33435717ea4dSDave Chinner list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) { 33445717ea4dSDave Chinner iip = (struct xfs_inode_log_item *)lip; 33455717ea4dSDave Chinner ip = iip->ili_inode; 33465717ea4dSDave Chinner 33475717ea4dSDave Chinner /* 33485717ea4dSDave Chinner * Quick and dirty check to avoid locks if possible. 33495717ea4dSDave Chinner */ 3350718ecc50SDave Chinner if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) 33515717ea4dSDave Chinner continue; 33525717ea4dSDave Chinner if (xfs_ipincount(ip)) 33535717ea4dSDave Chinner continue; 33545717ea4dSDave Chinner 33555717ea4dSDave Chinner /* 33565717ea4dSDave Chinner * The inode is still attached to the buffer, which means it is 33575717ea4dSDave Chinner * dirty but reclaim might try to grab it. Check carefully for 33585717ea4dSDave Chinner * that, and grab the ilock while still holding the i_flags_lock 33595717ea4dSDave Chinner * to guarantee reclaim will not be able to reclaim this inode 33605717ea4dSDave Chinner * once we drop the i_flags_lock. 33615717ea4dSDave Chinner */ 33625717ea4dSDave Chinner spin_lock(&ip->i_flags_lock); 33635717ea4dSDave Chinner ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE)); 3364718ecc50SDave Chinner if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) { 33655717ea4dSDave Chinner spin_unlock(&ip->i_flags_lock); 3366e6187b34SDave Chinner continue; 3367e6187b34SDave Chinner } 3368e6187b34SDave Chinner 3369e6187b34SDave Chinner /* 33705717ea4dSDave Chinner * ILOCK will pin the inode against reclaim and prevent 33715717ea4dSDave Chinner * concurrent transactions modifying the inode while we are 3372718ecc50SDave Chinner * flushing the inode. If we get the lock, set the flushing 3373718ecc50SDave Chinner * state before we drop the i_flags_lock. 3374e6187b34SDave Chinner */ 33755717ea4dSDave Chinner if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { 33765717ea4dSDave Chinner spin_unlock(&ip->i_flags_lock); 33775717ea4dSDave Chinner continue; 33785717ea4dSDave Chinner } 3379718ecc50SDave Chinner __xfs_iflags_set(ip, XFS_IFLUSHING); 33805717ea4dSDave Chinner spin_unlock(&ip->i_flags_lock); 33815717ea4dSDave Chinner 33825717ea4dSDave Chinner /* 33835717ea4dSDave Chinner * Abort flushing this inode if we are shut down because the 33845717ea4dSDave Chinner * inode may not currently be in the AIL. This can occur when 33855717ea4dSDave Chinner * log I/O failure unpins the inode without inserting into the 33865717ea4dSDave Chinner * AIL, leaving a dirty/unpinned inode attached to the buffer 33875717ea4dSDave Chinner * that otherwise looks like it should be flushed. 33885717ea4dSDave Chinner */ 338901728b44SDave Chinner if (xlog_is_shutdown(mp->m_log)) { 33905717ea4dSDave Chinner xfs_iunpin_wait(ip); 33915717ea4dSDave Chinner xfs_iflush_abort(ip); 33925717ea4dSDave Chinner xfs_iunlock(ip, XFS_ILOCK_SHARED); 33935717ea4dSDave Chinner error = -EIO; 33945717ea4dSDave Chinner continue; 33955717ea4dSDave Chinner } 33965717ea4dSDave Chinner 33975717ea4dSDave Chinner /* don't block waiting on a log force to unpin dirty inodes */ 33985717ea4dSDave Chinner if (xfs_ipincount(ip)) { 3399718ecc50SDave Chinner xfs_iflags_clear(ip, XFS_IFLUSHING); 34005717ea4dSDave Chinner xfs_iunlock(ip, XFS_ILOCK_SHARED); 34015717ea4dSDave Chinner continue; 34025717ea4dSDave Chinner } 34035717ea4dSDave Chinner 34045717ea4dSDave Chinner if (!xfs_inode_clean(ip)) 34055717ea4dSDave Chinner error = xfs_iflush(ip, bp); 34065717ea4dSDave Chinner else 3407718ecc50SDave Chinner xfs_iflags_clear(ip, XFS_IFLUSHING); 34085717ea4dSDave Chinner xfs_iunlock(ip, XFS_ILOCK_SHARED); 34095717ea4dSDave Chinner if (error) 3410e6187b34SDave Chinner break; 3411e6187b34SDave Chinner clcount++; 3412e6187b34SDave Chinner } 3413e6187b34SDave Chinner 3414e6187b34SDave Chinner if (error) { 341501728b44SDave Chinner /* 341601728b44SDave Chinner * Shutdown first so we kill the log before we release this 341701728b44SDave Chinner * buffer. If it is an INODE_ALLOC buffer and pins the tail 341801728b44SDave Chinner * of the log, failing it before the _log_ is shut down can 341901728b44SDave Chinner * result in the log tail being moved forward in the journal 342001728b44SDave Chinner * on disk because log writes can still be taking place. Hence 342101728b44SDave Chinner * unpinning the tail will allow the ICREATE intent to be 342201728b44SDave Chinner * removed from the log an recovery will fail with uninitialised 342301728b44SDave Chinner * inode cluster buffers. 342401728b44SDave Chinner */ 342501728b44SDave Chinner xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 3426e6187b34SDave Chinner bp->b_flags |= XBF_ASYNC; 3427e6187b34SDave Chinner xfs_buf_ioend_fail(bp); 3428e6187b34SDave Chinner return error; 3429e6187b34SDave Chinner } 3430e6187b34SDave Chinner 34315717ea4dSDave Chinner if (!clcount) 34325717ea4dSDave Chinner return -EAGAIN; 34335717ea4dSDave Chinner 34345717ea4dSDave Chinner XFS_STATS_INC(mp, xs_icluster_flushcnt); 34355717ea4dSDave Chinner XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount); 34365717ea4dSDave Chinner return 0; 34375717ea4dSDave Chinner 34385717ea4dSDave Chinner } 34395717ea4dSDave Chinner 344044a8736bSDarrick J. Wong /* Release an inode. */ 344144a8736bSDarrick J. Wong void 344244a8736bSDarrick J. Wong xfs_irele( 344344a8736bSDarrick J. Wong struct xfs_inode *ip) 344444a8736bSDarrick J. Wong { 344544a8736bSDarrick J. Wong trace_xfs_irele(ip, _RET_IP_); 344644a8736bSDarrick J. Wong iput(VFS_I(ip)); 344744a8736bSDarrick J. Wong } 344854fbdd10SChristoph Hellwig 344954fbdd10SChristoph Hellwig /* 345054fbdd10SChristoph Hellwig * Ensure all commited transactions touching the inode are written to the log. 345154fbdd10SChristoph Hellwig */ 345254fbdd10SChristoph Hellwig int 345354fbdd10SChristoph Hellwig xfs_log_force_inode( 345454fbdd10SChristoph Hellwig struct xfs_inode *ip) 345554fbdd10SChristoph Hellwig { 34565f9b4b0dSDave Chinner xfs_csn_t seq = 0; 345754fbdd10SChristoph Hellwig 345854fbdd10SChristoph Hellwig xfs_ilock(ip, XFS_ILOCK_SHARED); 345954fbdd10SChristoph Hellwig if (xfs_ipincount(ip)) 34605f9b4b0dSDave Chinner seq = ip->i_itemp->ili_commit_seq; 346154fbdd10SChristoph Hellwig xfs_iunlock(ip, XFS_ILOCK_SHARED); 346254fbdd10SChristoph Hellwig 34635f9b4b0dSDave Chinner if (!seq) 346454fbdd10SChristoph Hellwig return 0; 34655f9b4b0dSDave Chinner return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL); 346654fbdd10SChristoph Hellwig } 3467e2aaee9cSDarrick J. Wong 3468e2aaee9cSDarrick J. Wong /* 3469e2aaee9cSDarrick J. Wong * Grab the exclusive iolock for a data copy from src to dest, making sure to 3470e2aaee9cSDarrick J. Wong * abide vfs locking order (lowest pointer value goes first) and breaking the 3471e2aaee9cSDarrick J. Wong * layout leases before proceeding. The loop is needed because we cannot call 3472e2aaee9cSDarrick J. Wong * the blocking break_layout() with the iolocks held, and therefore have to 3473e2aaee9cSDarrick J. Wong * back out both locks. 3474e2aaee9cSDarrick J. Wong */ 3475e2aaee9cSDarrick J. Wong static int 3476e2aaee9cSDarrick J. Wong xfs_iolock_two_inodes_and_break_layout( 3477e2aaee9cSDarrick J. Wong struct inode *src, 3478e2aaee9cSDarrick J. Wong struct inode *dest) 3479e2aaee9cSDarrick J. Wong { 3480e2aaee9cSDarrick J. Wong int error; 3481e2aaee9cSDarrick J. Wong 3482e2aaee9cSDarrick J. Wong if (src > dest) 3483e2aaee9cSDarrick J. Wong swap(src, dest); 3484e2aaee9cSDarrick J. Wong 3485e2aaee9cSDarrick J. Wong retry: 3486e2aaee9cSDarrick J. Wong /* Wait to break both inodes' layouts before we start locking. */ 3487e2aaee9cSDarrick J. Wong error = break_layout(src, true); 3488e2aaee9cSDarrick J. Wong if (error) 3489e2aaee9cSDarrick J. Wong return error; 3490e2aaee9cSDarrick J. Wong if (src != dest) { 3491e2aaee9cSDarrick J. Wong error = break_layout(dest, true); 3492e2aaee9cSDarrick J. Wong if (error) 3493e2aaee9cSDarrick J. Wong return error; 3494e2aaee9cSDarrick J. Wong } 3495e2aaee9cSDarrick J. Wong 3496e2aaee9cSDarrick J. Wong /* Lock one inode and make sure nobody got in and leased it. */ 3497e2aaee9cSDarrick J. Wong inode_lock(src); 3498e2aaee9cSDarrick J. Wong error = break_layout(src, false); 3499e2aaee9cSDarrick J. Wong if (error) { 3500e2aaee9cSDarrick J. Wong inode_unlock(src); 3501e2aaee9cSDarrick J. Wong if (error == -EWOULDBLOCK) 3502e2aaee9cSDarrick J. Wong goto retry; 3503e2aaee9cSDarrick J. Wong return error; 3504e2aaee9cSDarrick J. Wong } 3505e2aaee9cSDarrick J. Wong 3506e2aaee9cSDarrick J. Wong if (src == dest) 3507e2aaee9cSDarrick J. Wong return 0; 3508e2aaee9cSDarrick J. Wong 3509e2aaee9cSDarrick J. Wong /* Lock the other inode and make sure nobody got in and leased it. */ 3510e2aaee9cSDarrick J. Wong inode_lock_nested(dest, I_MUTEX_NONDIR2); 3511e2aaee9cSDarrick J. Wong error = break_layout(dest, false); 3512e2aaee9cSDarrick J. Wong if (error) { 3513e2aaee9cSDarrick J. Wong inode_unlock(src); 3514e2aaee9cSDarrick J. Wong inode_unlock(dest); 3515e2aaee9cSDarrick J. Wong if (error == -EWOULDBLOCK) 3516e2aaee9cSDarrick J. Wong goto retry; 3517e2aaee9cSDarrick J. Wong return error; 3518e2aaee9cSDarrick J. Wong } 3519e2aaee9cSDarrick J. Wong 3520e2aaee9cSDarrick J. Wong return 0; 3521e2aaee9cSDarrick J. Wong } 3522e2aaee9cSDarrick J. Wong 352313f9e267SShiyang Ruan static int 352413f9e267SShiyang Ruan xfs_mmaplock_two_inodes_and_break_dax_layout( 352513f9e267SShiyang Ruan struct xfs_inode *ip1, 352613f9e267SShiyang Ruan struct xfs_inode *ip2) 352713f9e267SShiyang Ruan { 352813f9e267SShiyang Ruan int error; 352913f9e267SShiyang Ruan bool retry; 353013f9e267SShiyang Ruan struct page *page; 353113f9e267SShiyang Ruan 353213f9e267SShiyang Ruan if (ip1->i_ino > ip2->i_ino) 353313f9e267SShiyang Ruan swap(ip1, ip2); 353413f9e267SShiyang Ruan 353513f9e267SShiyang Ruan again: 353613f9e267SShiyang Ruan retry = false; 353713f9e267SShiyang Ruan /* Lock the first inode */ 353813f9e267SShiyang Ruan xfs_ilock(ip1, XFS_MMAPLOCK_EXCL); 353913f9e267SShiyang Ruan error = xfs_break_dax_layouts(VFS_I(ip1), &retry); 354013f9e267SShiyang Ruan if (error || retry) { 354113f9e267SShiyang Ruan xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL); 354213f9e267SShiyang Ruan if (error == 0 && retry) 354313f9e267SShiyang Ruan goto again; 354413f9e267SShiyang Ruan return error; 354513f9e267SShiyang Ruan } 354613f9e267SShiyang Ruan 354713f9e267SShiyang Ruan if (ip1 == ip2) 354813f9e267SShiyang Ruan return 0; 354913f9e267SShiyang Ruan 355013f9e267SShiyang Ruan /* Nested lock the second inode */ 355113f9e267SShiyang Ruan xfs_ilock(ip2, xfs_lock_inumorder(XFS_MMAPLOCK_EXCL, 1)); 355213f9e267SShiyang Ruan /* 355313f9e267SShiyang Ruan * We cannot use xfs_break_dax_layouts() directly here because it may 355413f9e267SShiyang Ruan * need to unlock & lock the XFS_MMAPLOCK_EXCL which is not suitable 355513f9e267SShiyang Ruan * for this nested lock case. 355613f9e267SShiyang Ruan */ 355713f9e267SShiyang Ruan page = dax_layout_busy_page(VFS_I(ip2)->i_mapping); 355813f9e267SShiyang Ruan if (page && page_ref_count(page) != 1) { 355913f9e267SShiyang Ruan xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL); 356013f9e267SShiyang Ruan xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL); 356113f9e267SShiyang Ruan goto again; 356213f9e267SShiyang Ruan } 356313f9e267SShiyang Ruan 356413f9e267SShiyang Ruan return 0; 356513f9e267SShiyang Ruan } 356613f9e267SShiyang Ruan 3567e2aaee9cSDarrick J. Wong /* 3568e2aaee9cSDarrick J. Wong * Lock two inodes so that userspace cannot initiate I/O via file syscalls or 3569e2aaee9cSDarrick J. Wong * mmap activity. 3570e2aaee9cSDarrick J. Wong */ 3571e2aaee9cSDarrick J. Wong int 3572e2aaee9cSDarrick J. Wong xfs_ilock2_io_mmap( 3573e2aaee9cSDarrick J. Wong struct xfs_inode *ip1, 3574e2aaee9cSDarrick J. Wong struct xfs_inode *ip2) 3575e2aaee9cSDarrick J. Wong { 3576e2aaee9cSDarrick J. Wong int ret; 3577e2aaee9cSDarrick J. Wong 3578e2aaee9cSDarrick J. Wong ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2)); 3579e2aaee9cSDarrick J. Wong if (ret) 3580e2aaee9cSDarrick J. Wong return ret; 358113f9e267SShiyang Ruan 358213f9e267SShiyang Ruan if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) { 358313f9e267SShiyang Ruan ret = xfs_mmaplock_two_inodes_and_break_dax_layout(ip1, ip2); 358413f9e267SShiyang Ruan if (ret) { 358513f9e267SShiyang Ruan inode_unlock(VFS_I(ip2)); 358613f9e267SShiyang Ruan if (ip1 != ip2) 358713f9e267SShiyang Ruan inode_unlock(VFS_I(ip1)); 358813f9e267SShiyang Ruan return ret; 358913f9e267SShiyang Ruan } 359013f9e267SShiyang Ruan } else 3591d2c292d8SJan Kara filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping, 3592d2c292d8SJan Kara VFS_I(ip2)->i_mapping); 359313f9e267SShiyang Ruan 3594e2aaee9cSDarrick J. Wong return 0; 3595e2aaee9cSDarrick J. Wong } 3596e2aaee9cSDarrick J. Wong 3597e2aaee9cSDarrick J. Wong /* Unlock both inodes to allow IO and mmap activity. */ 3598e2aaee9cSDarrick J. Wong void 3599e2aaee9cSDarrick J. Wong xfs_iunlock2_io_mmap( 3600e2aaee9cSDarrick J. Wong struct xfs_inode *ip1, 3601e2aaee9cSDarrick J. Wong struct xfs_inode *ip2) 3602e2aaee9cSDarrick J. Wong { 360313f9e267SShiyang Ruan if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) { 360413f9e267SShiyang Ruan xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL); 360513f9e267SShiyang Ruan if (ip1 != ip2) 360613f9e267SShiyang Ruan xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL); 360713f9e267SShiyang Ruan } else 3608d2c292d8SJan Kara filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping, 3609d2c292d8SJan Kara VFS_I(ip2)->i_mapping); 361013f9e267SShiyang Ruan 3611e2aaee9cSDarrick J. Wong inode_unlock(VFS_I(ip2)); 3612d2c292d8SJan Kara if (ip1 != ip2) 3613e2aaee9cSDarrick J. Wong inode_unlock(VFS_I(ip1)); 3614e2aaee9cSDarrick J. Wong } 361583771c50SDarrick J. Wong 361683771c50SDarrick J. Wong /* 361783771c50SDarrick J. Wong * Reload the incore inode list for this inode. Caller should ensure that 361883771c50SDarrick J. Wong * the link count cannot change, either by taking ILOCK_SHARED or otherwise 361983771c50SDarrick J. Wong * preventing other threads from executing. 362083771c50SDarrick J. Wong */ 362183771c50SDarrick J. Wong int 362283771c50SDarrick J. Wong xfs_inode_reload_unlinked_bucket( 362383771c50SDarrick J. Wong struct xfs_trans *tp, 362483771c50SDarrick J. Wong struct xfs_inode *ip) 362583771c50SDarrick J. Wong { 362683771c50SDarrick J. Wong struct xfs_mount *mp = tp->t_mountp; 362783771c50SDarrick J. Wong struct xfs_buf *agibp; 362883771c50SDarrick J. Wong struct xfs_agi *agi; 362983771c50SDarrick J. Wong struct xfs_perag *pag; 363083771c50SDarrick J. Wong xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino); 363183771c50SDarrick J. Wong xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 363283771c50SDarrick J. Wong xfs_agino_t prev_agino, next_agino; 363383771c50SDarrick J. Wong unsigned int bucket; 363483771c50SDarrick J. Wong bool foundit = false; 363583771c50SDarrick J. Wong int error; 363683771c50SDarrick J. Wong 363783771c50SDarrick J. Wong /* Grab the first inode in the list */ 363883771c50SDarrick J. Wong pag = xfs_perag_get(mp, agno); 363983771c50SDarrick J. Wong error = xfs_ialloc_read_agi(pag, tp, &agibp); 364083771c50SDarrick J. Wong xfs_perag_put(pag); 364183771c50SDarrick J. Wong if (error) 364283771c50SDarrick J. Wong return error; 364383771c50SDarrick J. Wong 364483771c50SDarrick J. Wong bucket = agino % XFS_AGI_UNLINKED_BUCKETS; 364583771c50SDarrick J. Wong agi = agibp->b_addr; 364683771c50SDarrick J. Wong 364783771c50SDarrick J. Wong trace_xfs_inode_reload_unlinked_bucket(ip); 364883771c50SDarrick J. Wong 364983771c50SDarrick J. Wong xfs_info_ratelimited(mp, 365083771c50SDarrick J. Wong "Found unrecovered unlinked inode 0x%x in AG 0x%x. Initiating list recovery.", 365183771c50SDarrick J. Wong agino, agno); 365283771c50SDarrick J. Wong 365383771c50SDarrick J. Wong prev_agino = NULLAGINO; 365483771c50SDarrick J. Wong next_agino = be32_to_cpu(agi->agi_unlinked[bucket]); 365583771c50SDarrick J. Wong while (next_agino != NULLAGINO) { 365683771c50SDarrick J. Wong struct xfs_inode *next_ip = NULL; 365783771c50SDarrick J. Wong 365883771c50SDarrick J. Wong if (next_agino == agino) { 365983771c50SDarrick J. Wong /* Found this inode, set its backlink. */ 366083771c50SDarrick J. Wong next_ip = ip; 366183771c50SDarrick J. Wong next_ip->i_prev_unlinked = prev_agino; 366283771c50SDarrick J. Wong foundit = true; 366383771c50SDarrick J. Wong } 366483771c50SDarrick J. Wong if (!next_ip) { 366583771c50SDarrick J. Wong /* Inode already in memory. */ 366683771c50SDarrick J. Wong next_ip = xfs_iunlink_lookup(pag, next_agino); 366783771c50SDarrick J. Wong } 366883771c50SDarrick J. Wong if (!next_ip) { 366983771c50SDarrick J. Wong /* Inode not in memory, reload. */ 367083771c50SDarrick J. Wong error = xfs_iunlink_reload_next(tp, agibp, prev_agino, 367183771c50SDarrick J. Wong next_agino); 367283771c50SDarrick J. Wong if (error) 367383771c50SDarrick J. Wong break; 367483771c50SDarrick J. Wong 367583771c50SDarrick J. Wong next_ip = xfs_iunlink_lookup(pag, next_agino); 367683771c50SDarrick J. Wong } 367783771c50SDarrick J. Wong if (!next_ip) { 367883771c50SDarrick J. Wong /* No incore inode at all? We reloaded it... */ 367983771c50SDarrick J. Wong ASSERT(next_ip != NULL); 368083771c50SDarrick J. Wong error = -EFSCORRUPTED; 368183771c50SDarrick J. Wong break; 368283771c50SDarrick J. Wong } 368383771c50SDarrick J. Wong 368483771c50SDarrick J. Wong prev_agino = next_agino; 368583771c50SDarrick J. Wong next_agino = next_ip->i_next_unlinked; 368683771c50SDarrick J. Wong } 368783771c50SDarrick J. Wong 368883771c50SDarrick J. Wong xfs_trans_brelse(tp, agibp); 368983771c50SDarrick J. Wong /* Should have found this inode somewhere in the iunlinked bucket. */ 369083771c50SDarrick J. Wong if (!error && !foundit) 369183771c50SDarrick J. Wong error = -EFSCORRUPTED; 369283771c50SDarrick J. Wong return error; 369383771c50SDarrick J. Wong } 369483771c50SDarrick J. Wong 369583771c50SDarrick J. Wong /* Decide if this inode is missing its unlinked list and reload it. */ 369683771c50SDarrick J. Wong int 369783771c50SDarrick J. Wong xfs_inode_reload_unlinked( 369883771c50SDarrick J. Wong struct xfs_inode *ip) 369983771c50SDarrick J. Wong { 370083771c50SDarrick J. Wong struct xfs_trans *tp; 370183771c50SDarrick J. Wong int error; 370283771c50SDarrick J. Wong 370383771c50SDarrick J. Wong error = xfs_trans_alloc_empty(ip->i_mount, &tp); 370483771c50SDarrick J. Wong if (error) 370583771c50SDarrick J. Wong return error; 370683771c50SDarrick J. Wong 370783771c50SDarrick J. Wong xfs_ilock(ip, XFS_ILOCK_SHARED); 370883771c50SDarrick J. Wong if (xfs_inode_unlinked_incomplete(ip)) 370983771c50SDarrick J. Wong error = xfs_inode_reload_unlinked_bucket(tp, ip); 371083771c50SDarrick J. Wong xfs_iunlock(ip, XFS_ILOCK_SHARED); 371183771c50SDarrick J. Wong xfs_trans_cancel(tp); 371283771c50SDarrick J. Wong 371383771c50SDarrick J. Wong return error; 371483771c50SDarrick J. Wong } 3715