11da177e4SLinus Torvalds /* 23e57ecf6SOlaf Weber * Copyright (c) 2000-2006 Silicon Graphics, Inc. 37b718769SNathan Scott * All Rights Reserved. 41da177e4SLinus Torvalds * 57b718769SNathan Scott * This program is free software; you can redistribute it and/or 67b718769SNathan Scott * modify it under the terms of the GNU General Public License as 71da177e4SLinus Torvalds * published by the Free Software Foundation. 81da177e4SLinus Torvalds * 97b718769SNathan Scott * This program is distributed in the hope that it would be useful, 107b718769SNathan Scott * but WITHOUT ANY WARRANTY; without even the implied warranty of 117b718769SNathan Scott * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 127b718769SNathan Scott * GNU General Public License for more details. 131da177e4SLinus Torvalds * 147b718769SNathan Scott * You should have received a copy of the GNU General Public License 157b718769SNathan Scott * along with this program; if not, write the Free Software Foundation, 167b718769SNathan Scott * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 171da177e4SLinus Torvalds */ 1840ebd81dSRobert P. J. Day #include <linux/log2.h> 1940ebd81dSRobert P. J. Day 201da177e4SLinus Torvalds #include "xfs.h" 21a844f451SNathan Scott #include "xfs_fs.h" 2270a9883cSDave Chinner #include "xfs_shared.h" 23239880efSDave Chinner #include "xfs_format.h" 24239880efSDave Chinner #include "xfs_log_format.h" 25239880efSDave Chinner #include "xfs_trans_resv.h" 261da177e4SLinus Torvalds #include "xfs_sb.h" 271da177e4SLinus Torvalds #include "xfs_mount.h" 283ab78df2SDarrick J. Wong #include "xfs_defer.h" 29a4fbe6abSDave Chinner #include "xfs_inode.h" 3057062787SDave Chinner #include "xfs_da_format.h" 31c24b5dfaSDave Chinner #include "xfs_da_btree.h" 32c24b5dfaSDave Chinner #include "xfs_dir2.h" 33a844f451SNathan Scott #include "xfs_attr_sf.h" 34c24b5dfaSDave Chinner #include "xfs_attr.h" 35239880efSDave Chinner #include "xfs_trans_space.h" 36239880efSDave Chinner #include "xfs_trans.h" 371da177e4SLinus Torvalds #include "xfs_buf_item.h" 38a844f451SNathan Scott #include "xfs_inode_item.h" 39a844f451SNathan Scott #include "xfs_ialloc.h" 40a844f451SNathan Scott #include "xfs_bmap.h" 4168988114SDave Chinner #include "xfs_bmap_util.h" 421da177e4SLinus Torvalds #include "xfs_error.h" 431da177e4SLinus Torvalds #include "xfs_quota.h" 442a82b8beSDavid Chinner #include "xfs_filestream.h" 4593848a99SChristoph Hellwig #include "xfs_cksum.h" 460b1b213fSChristoph Hellwig #include "xfs_trace.h" 4733479e05SDave Chinner #include "xfs_icache.h" 48c24b5dfaSDave Chinner #include "xfs_symlink.h" 49239880efSDave Chinner #include "xfs_trans_priv.h" 50239880efSDave Chinner #include "xfs_log.h" 51a4fbe6abSDave Chinner #include "xfs_bmap_btree.h" 52aa8968f2SDarrick J. Wong #include "xfs_reflink.h" 53005c5db8SDarrick J. Wong #include "xfs_dir2_priv.h" 541da177e4SLinus Torvalds 551da177e4SLinus Torvalds kmem_zone_t *xfs_inode_zone; 561da177e4SLinus Torvalds 571da177e4SLinus Torvalds /* 588f04c47aSChristoph Hellwig * Used in xfs_itruncate_extents(). This is the maximum number of extents 591da177e4SLinus Torvalds * freed from a file in a single transaction. 601da177e4SLinus Torvalds */ 611da177e4SLinus Torvalds #define XFS_ITRUNC_MAX_EXTENTS 2 621da177e4SLinus Torvalds 6354d7b5c1SDave Chinner STATIC int xfs_iflush_int(struct xfs_inode *, struct xfs_buf *); 6454d7b5c1SDave Chinner STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *); 6554d7b5c1SDave Chinner STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *); 66ab297431SZhi Yong Wu 672a0ec1d9SDave Chinner /* 682a0ec1d9SDave Chinner * helper function to extract extent size hint from inode 692a0ec1d9SDave Chinner */ 702a0ec1d9SDave Chinner xfs_extlen_t 712a0ec1d9SDave Chinner xfs_get_extsz_hint( 722a0ec1d9SDave Chinner struct xfs_inode *ip) 732a0ec1d9SDave Chinner { 742a0ec1d9SDave Chinner if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize) 752a0ec1d9SDave Chinner return ip->i_d.di_extsize; 762a0ec1d9SDave Chinner if (XFS_IS_REALTIME_INODE(ip)) 772a0ec1d9SDave Chinner return ip->i_mount->m_sb.sb_rextsize; 782a0ec1d9SDave Chinner return 0; 792a0ec1d9SDave Chinner } 802a0ec1d9SDave Chinner 81fa96acadSDave Chinner /* 82f7ca3522SDarrick J. Wong * Helper function to extract CoW extent size hint from inode. 83f7ca3522SDarrick J. Wong * Between the extent size hint and the CoW extent size hint, we 84e153aa79SDarrick J. Wong * return the greater of the two. If the value is zero (automatic), 85e153aa79SDarrick J. Wong * use the default size. 86f7ca3522SDarrick J. Wong */ 87f7ca3522SDarrick J. Wong xfs_extlen_t 88f7ca3522SDarrick J. Wong xfs_get_cowextsz_hint( 89f7ca3522SDarrick J. Wong struct xfs_inode *ip) 90f7ca3522SDarrick J. Wong { 91f7ca3522SDarrick J. Wong xfs_extlen_t a, b; 92f7ca3522SDarrick J. Wong 93f7ca3522SDarrick J. Wong a = 0; 94f7ca3522SDarrick J. Wong if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) 95f7ca3522SDarrick J. Wong a = ip->i_d.di_cowextsize; 96f7ca3522SDarrick J. Wong b = xfs_get_extsz_hint(ip); 97f7ca3522SDarrick J. Wong 98e153aa79SDarrick J. Wong a = max(a, b); 99e153aa79SDarrick J. Wong if (a == 0) 100e153aa79SDarrick J. Wong return XFS_DEFAULT_COWEXTSZ_HINT; 101f7ca3522SDarrick J. Wong return a; 102f7ca3522SDarrick J. Wong } 103f7ca3522SDarrick J. Wong 104f7ca3522SDarrick J. Wong /* 105efa70be1SChristoph Hellwig * These two are wrapper routines around the xfs_ilock() routine used to 106efa70be1SChristoph Hellwig * centralize some grungy code. They are used in places that wish to lock the 107efa70be1SChristoph Hellwig * inode solely for reading the extents. The reason these places can't just 108efa70be1SChristoph Hellwig * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to 109efa70be1SChristoph Hellwig * bringing in of the extents from disk for a file in b-tree format. If the 110efa70be1SChristoph Hellwig * inode is in b-tree format, then we need to lock the inode exclusively until 111efa70be1SChristoph Hellwig * the extents are read in. Locking it exclusively all the time would limit 112efa70be1SChristoph Hellwig * our parallelism unnecessarily, though. What we do instead is check to see 113efa70be1SChristoph Hellwig * if the extents have been read in yet, and only lock the inode exclusively 114efa70be1SChristoph Hellwig * if they have not. 115fa96acadSDave Chinner * 116efa70be1SChristoph Hellwig * The functions return a value which should be given to the corresponding 11701f4f327SChristoph Hellwig * xfs_iunlock() call. 118fa96acadSDave Chinner */ 119fa96acadSDave Chinner uint 120309ecac8SChristoph Hellwig xfs_ilock_data_map_shared( 121309ecac8SChristoph Hellwig struct xfs_inode *ip) 122fa96acadSDave Chinner { 123309ecac8SChristoph Hellwig uint lock_mode = XFS_ILOCK_SHARED; 124fa96acadSDave Chinner 125309ecac8SChristoph Hellwig if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE && 126309ecac8SChristoph Hellwig (ip->i_df.if_flags & XFS_IFEXTENTS) == 0) 127fa96acadSDave Chinner lock_mode = XFS_ILOCK_EXCL; 128fa96acadSDave Chinner xfs_ilock(ip, lock_mode); 129fa96acadSDave Chinner return lock_mode; 130fa96acadSDave Chinner } 131fa96acadSDave Chinner 132efa70be1SChristoph Hellwig uint 133efa70be1SChristoph Hellwig xfs_ilock_attr_map_shared( 134efa70be1SChristoph Hellwig struct xfs_inode *ip) 135fa96acadSDave Chinner { 136efa70be1SChristoph Hellwig uint lock_mode = XFS_ILOCK_SHARED; 137efa70be1SChristoph Hellwig 138efa70be1SChristoph Hellwig if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE && 139efa70be1SChristoph Hellwig (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0) 140efa70be1SChristoph Hellwig lock_mode = XFS_ILOCK_EXCL; 141efa70be1SChristoph Hellwig xfs_ilock(ip, lock_mode); 142efa70be1SChristoph Hellwig return lock_mode; 143fa96acadSDave Chinner } 144fa96acadSDave Chinner 145fa96acadSDave Chinner /* 14665523218SChristoph Hellwig * In addition to i_rwsem in the VFS inode, the xfs inode contains 2 14765523218SChristoph Hellwig * multi-reader locks: i_mmap_lock and the i_lock. This routine allows 14865523218SChristoph Hellwig * various combinations of the locks to be obtained. 149fa96acadSDave Chinner * 150653c60b6SDave Chinner * The 3 locks should always be ordered so that the IO lock is obtained first, 151653c60b6SDave Chinner * the mmap lock second and the ilock last in order to prevent deadlock. 152fa96acadSDave Chinner * 153653c60b6SDave Chinner * Basic locking order: 154653c60b6SDave Chinner * 15565523218SChristoph Hellwig * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock 156653c60b6SDave Chinner * 157653c60b6SDave Chinner * mmap_sem locking order: 158653c60b6SDave Chinner * 15965523218SChristoph Hellwig * i_rwsem -> page lock -> mmap_sem 160653c60b6SDave Chinner * mmap_sem -> i_mmap_lock -> page_lock 161653c60b6SDave Chinner * 162653c60b6SDave Chinner * The difference in mmap_sem locking order mean that we cannot hold the 163653c60b6SDave Chinner * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can 164653c60b6SDave Chinner * fault in pages during copy in/out (for buffered IO) or require the mmap_sem 165653c60b6SDave Chinner * in get_user_pages() to map the user pages into the kernel address space for 16665523218SChristoph Hellwig * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because 167653c60b6SDave Chinner * page faults already hold the mmap_sem. 168653c60b6SDave Chinner * 169653c60b6SDave Chinner * Hence to serialise fully against both syscall and mmap based IO, we need to 17065523218SChristoph Hellwig * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both 171653c60b6SDave Chinner * taken in places where we need to invalidate the page cache in a race 172653c60b6SDave Chinner * free manner (e.g. truncate, hole punch and other extent manipulation 173653c60b6SDave Chinner * functions). 174fa96acadSDave Chinner */ 175fa96acadSDave Chinner void 176fa96acadSDave Chinner xfs_ilock( 177fa96acadSDave Chinner xfs_inode_t *ip, 178fa96acadSDave Chinner uint lock_flags) 179fa96acadSDave Chinner { 180fa96acadSDave Chinner trace_xfs_ilock(ip, lock_flags, _RET_IP_); 181fa96acadSDave Chinner 182fa96acadSDave Chinner /* 183fa96acadSDave Chinner * You can't set both SHARED and EXCL for the same lock, 184fa96acadSDave Chinner * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, 185fa96acadSDave Chinner * and XFS_ILOCK_EXCL are valid values to set in lock_flags. 186fa96acadSDave Chinner */ 187fa96acadSDave Chinner ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != 188fa96acadSDave Chinner (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); 189653c60b6SDave Chinner ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != 190653c60b6SDave Chinner (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); 191fa96acadSDave Chinner ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != 192fa96acadSDave Chinner (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); 1930952c818SDave Chinner ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); 194fa96acadSDave Chinner 19565523218SChristoph Hellwig if (lock_flags & XFS_IOLOCK_EXCL) { 19665523218SChristoph Hellwig down_write_nested(&VFS_I(ip)->i_rwsem, 19765523218SChristoph Hellwig XFS_IOLOCK_DEP(lock_flags)); 19865523218SChristoph Hellwig } else if (lock_flags & XFS_IOLOCK_SHARED) { 19965523218SChristoph Hellwig down_read_nested(&VFS_I(ip)->i_rwsem, 20065523218SChristoph Hellwig XFS_IOLOCK_DEP(lock_flags)); 20165523218SChristoph Hellwig } 202fa96acadSDave Chinner 203653c60b6SDave Chinner if (lock_flags & XFS_MMAPLOCK_EXCL) 204653c60b6SDave Chinner mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags)); 205653c60b6SDave Chinner else if (lock_flags & XFS_MMAPLOCK_SHARED) 206653c60b6SDave Chinner mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags)); 207653c60b6SDave Chinner 208fa96acadSDave Chinner if (lock_flags & XFS_ILOCK_EXCL) 209fa96acadSDave Chinner mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); 210fa96acadSDave Chinner else if (lock_flags & XFS_ILOCK_SHARED) 211fa96acadSDave Chinner mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); 212fa96acadSDave Chinner } 213fa96acadSDave Chinner 214fa96acadSDave Chinner /* 215fa96acadSDave Chinner * This is just like xfs_ilock(), except that the caller 216fa96acadSDave Chinner * is guaranteed not to sleep. It returns 1 if it gets 217fa96acadSDave Chinner * the requested locks and 0 otherwise. If the IO lock is 218fa96acadSDave Chinner * obtained but the inode lock cannot be, then the IO lock 219fa96acadSDave Chinner * is dropped before returning. 220fa96acadSDave Chinner * 221fa96acadSDave Chinner * ip -- the inode being locked 222fa96acadSDave Chinner * lock_flags -- this parameter indicates the inode's locks to be 223fa96acadSDave Chinner * to be locked. See the comment for xfs_ilock() for a list 224fa96acadSDave Chinner * of valid values. 225fa96acadSDave Chinner */ 226fa96acadSDave Chinner int 227fa96acadSDave Chinner xfs_ilock_nowait( 228fa96acadSDave Chinner xfs_inode_t *ip, 229fa96acadSDave Chinner uint lock_flags) 230fa96acadSDave Chinner { 231fa96acadSDave Chinner trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_); 232fa96acadSDave Chinner 233fa96acadSDave Chinner /* 234fa96acadSDave Chinner * You can't set both SHARED and EXCL for the same lock, 235fa96acadSDave Chinner * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, 236fa96acadSDave Chinner * and XFS_ILOCK_EXCL are valid values to set in lock_flags. 237fa96acadSDave Chinner */ 238fa96acadSDave Chinner ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != 239fa96acadSDave Chinner (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); 240653c60b6SDave Chinner ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != 241653c60b6SDave Chinner (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); 242fa96acadSDave Chinner ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != 243fa96acadSDave Chinner (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); 2440952c818SDave Chinner ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); 245fa96acadSDave Chinner 246fa96acadSDave Chinner if (lock_flags & XFS_IOLOCK_EXCL) { 24765523218SChristoph Hellwig if (!down_write_trylock(&VFS_I(ip)->i_rwsem)) 248fa96acadSDave Chinner goto out; 249fa96acadSDave Chinner } else if (lock_flags & XFS_IOLOCK_SHARED) { 25065523218SChristoph Hellwig if (!down_read_trylock(&VFS_I(ip)->i_rwsem)) 251fa96acadSDave Chinner goto out; 252fa96acadSDave Chinner } 253653c60b6SDave Chinner 254653c60b6SDave Chinner if (lock_flags & XFS_MMAPLOCK_EXCL) { 255653c60b6SDave Chinner if (!mrtryupdate(&ip->i_mmaplock)) 256653c60b6SDave Chinner goto out_undo_iolock; 257653c60b6SDave Chinner } else if (lock_flags & XFS_MMAPLOCK_SHARED) { 258653c60b6SDave Chinner if (!mrtryaccess(&ip->i_mmaplock)) 259653c60b6SDave Chinner goto out_undo_iolock; 260653c60b6SDave Chinner } 261653c60b6SDave Chinner 262fa96acadSDave Chinner if (lock_flags & XFS_ILOCK_EXCL) { 263fa96acadSDave Chinner if (!mrtryupdate(&ip->i_lock)) 264653c60b6SDave Chinner goto out_undo_mmaplock; 265fa96acadSDave Chinner } else if (lock_flags & XFS_ILOCK_SHARED) { 266fa96acadSDave Chinner if (!mrtryaccess(&ip->i_lock)) 267653c60b6SDave Chinner goto out_undo_mmaplock; 268fa96acadSDave Chinner } 269fa96acadSDave Chinner return 1; 270fa96acadSDave Chinner 271653c60b6SDave Chinner out_undo_mmaplock: 272653c60b6SDave Chinner if (lock_flags & XFS_MMAPLOCK_EXCL) 273653c60b6SDave Chinner mrunlock_excl(&ip->i_mmaplock); 274653c60b6SDave Chinner else if (lock_flags & XFS_MMAPLOCK_SHARED) 275653c60b6SDave Chinner mrunlock_shared(&ip->i_mmaplock); 276fa96acadSDave Chinner out_undo_iolock: 277fa96acadSDave Chinner if (lock_flags & XFS_IOLOCK_EXCL) 27865523218SChristoph Hellwig up_write(&VFS_I(ip)->i_rwsem); 279fa96acadSDave Chinner else if (lock_flags & XFS_IOLOCK_SHARED) 28065523218SChristoph Hellwig up_read(&VFS_I(ip)->i_rwsem); 281fa96acadSDave Chinner out: 282fa96acadSDave Chinner return 0; 283fa96acadSDave Chinner } 284fa96acadSDave Chinner 285fa96acadSDave Chinner /* 286fa96acadSDave Chinner * xfs_iunlock() is used to drop the inode locks acquired with 287fa96acadSDave Chinner * xfs_ilock() and xfs_ilock_nowait(). The caller must pass 288fa96acadSDave Chinner * in the flags given to xfs_ilock() or xfs_ilock_nowait() so 289fa96acadSDave Chinner * that we know which locks to drop. 290fa96acadSDave Chinner * 291fa96acadSDave Chinner * ip -- the inode being unlocked 292fa96acadSDave Chinner * lock_flags -- this parameter indicates the inode's locks to be 293fa96acadSDave Chinner * to be unlocked. See the comment for xfs_ilock() for a list 294fa96acadSDave Chinner * of valid values for this parameter. 295fa96acadSDave Chinner * 296fa96acadSDave Chinner */ 297fa96acadSDave Chinner void 298fa96acadSDave Chinner xfs_iunlock( 299fa96acadSDave Chinner xfs_inode_t *ip, 300fa96acadSDave Chinner uint lock_flags) 301fa96acadSDave Chinner { 302fa96acadSDave Chinner /* 303fa96acadSDave Chinner * You can't set both SHARED and EXCL for the same lock, 304fa96acadSDave Chinner * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, 305fa96acadSDave Chinner * and XFS_ILOCK_EXCL are valid values to set in lock_flags. 306fa96acadSDave Chinner */ 307fa96acadSDave Chinner ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != 308fa96acadSDave Chinner (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); 309653c60b6SDave Chinner ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != 310653c60b6SDave Chinner (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); 311fa96acadSDave Chinner ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != 312fa96acadSDave Chinner (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); 3130952c818SDave Chinner ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); 314fa96acadSDave Chinner ASSERT(lock_flags != 0); 315fa96acadSDave Chinner 316fa96acadSDave Chinner if (lock_flags & XFS_IOLOCK_EXCL) 31765523218SChristoph Hellwig up_write(&VFS_I(ip)->i_rwsem); 318fa96acadSDave Chinner else if (lock_flags & XFS_IOLOCK_SHARED) 31965523218SChristoph Hellwig up_read(&VFS_I(ip)->i_rwsem); 320fa96acadSDave Chinner 321653c60b6SDave Chinner if (lock_flags & XFS_MMAPLOCK_EXCL) 322653c60b6SDave Chinner mrunlock_excl(&ip->i_mmaplock); 323653c60b6SDave Chinner else if (lock_flags & XFS_MMAPLOCK_SHARED) 324653c60b6SDave Chinner mrunlock_shared(&ip->i_mmaplock); 325653c60b6SDave Chinner 326fa96acadSDave Chinner if (lock_flags & XFS_ILOCK_EXCL) 327fa96acadSDave Chinner mrunlock_excl(&ip->i_lock); 328fa96acadSDave Chinner else if (lock_flags & XFS_ILOCK_SHARED) 329fa96acadSDave Chinner mrunlock_shared(&ip->i_lock); 330fa96acadSDave Chinner 331fa96acadSDave Chinner trace_xfs_iunlock(ip, lock_flags, _RET_IP_); 332fa96acadSDave Chinner } 333fa96acadSDave Chinner 334fa96acadSDave Chinner /* 335fa96acadSDave Chinner * give up write locks. the i/o lock cannot be held nested 336fa96acadSDave Chinner * if it is being demoted. 337fa96acadSDave Chinner */ 338fa96acadSDave Chinner void 339fa96acadSDave Chinner xfs_ilock_demote( 340fa96acadSDave Chinner xfs_inode_t *ip, 341fa96acadSDave Chinner uint lock_flags) 342fa96acadSDave Chinner { 343653c60b6SDave Chinner ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)); 344653c60b6SDave Chinner ASSERT((lock_flags & 345653c60b6SDave Chinner ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0); 346fa96acadSDave Chinner 347fa96acadSDave Chinner if (lock_flags & XFS_ILOCK_EXCL) 348fa96acadSDave Chinner mrdemote(&ip->i_lock); 349653c60b6SDave Chinner if (lock_flags & XFS_MMAPLOCK_EXCL) 350653c60b6SDave Chinner mrdemote(&ip->i_mmaplock); 351fa96acadSDave Chinner if (lock_flags & XFS_IOLOCK_EXCL) 35265523218SChristoph Hellwig downgrade_write(&VFS_I(ip)->i_rwsem); 353fa96acadSDave Chinner 354fa96acadSDave Chinner trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_); 355fa96acadSDave Chinner } 356fa96acadSDave Chinner 357742ae1e3SDave Chinner #if defined(DEBUG) || defined(XFS_WARN) 358fa96acadSDave Chinner int 359fa96acadSDave Chinner xfs_isilocked( 360fa96acadSDave Chinner xfs_inode_t *ip, 361fa96acadSDave Chinner uint lock_flags) 362fa96acadSDave Chinner { 363fa96acadSDave Chinner if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) { 364fa96acadSDave Chinner if (!(lock_flags & XFS_ILOCK_SHARED)) 365fa96acadSDave Chinner return !!ip->i_lock.mr_writer; 366fa96acadSDave Chinner return rwsem_is_locked(&ip->i_lock.mr_lock); 367fa96acadSDave Chinner } 368fa96acadSDave Chinner 369653c60b6SDave Chinner if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) { 370653c60b6SDave Chinner if (!(lock_flags & XFS_MMAPLOCK_SHARED)) 371653c60b6SDave Chinner return !!ip->i_mmaplock.mr_writer; 372653c60b6SDave Chinner return rwsem_is_locked(&ip->i_mmaplock.mr_lock); 373653c60b6SDave Chinner } 374653c60b6SDave Chinner 375fa96acadSDave Chinner if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) { 376fa96acadSDave Chinner if (!(lock_flags & XFS_IOLOCK_SHARED)) 37765523218SChristoph Hellwig return !debug_locks || 37865523218SChristoph Hellwig lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0); 37965523218SChristoph Hellwig return rwsem_is_locked(&VFS_I(ip)->i_rwsem); 380fa96acadSDave Chinner } 381fa96acadSDave Chinner 382fa96acadSDave Chinner ASSERT(0); 383fa96acadSDave Chinner return 0; 384fa96acadSDave Chinner } 385fa96acadSDave Chinner #endif 386fa96acadSDave Chinner 387c24b5dfaSDave Chinner #ifdef DEBUG 388c24b5dfaSDave Chinner int xfs_locked_n; 389c24b5dfaSDave Chinner int xfs_small_retries; 390c24b5dfaSDave Chinner int xfs_middle_retries; 391c24b5dfaSDave Chinner int xfs_lots_retries; 392c24b5dfaSDave Chinner int xfs_lock_delays; 393c24b5dfaSDave Chinner #endif 394c24b5dfaSDave Chinner 395b6a9947eSDave Chinner /* 396b6a9947eSDave Chinner * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when 397b6a9947eSDave Chinner * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined 398b6a9947eSDave Chinner * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build 399b6a9947eSDave Chinner * errors and warnings. 400b6a9947eSDave Chinner */ 401b6a9947eSDave Chinner #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP) 4023403ccc0SDave Chinner static bool 4033403ccc0SDave Chinner xfs_lockdep_subclass_ok( 4043403ccc0SDave Chinner int subclass) 4053403ccc0SDave Chinner { 4063403ccc0SDave Chinner return subclass < MAX_LOCKDEP_SUBCLASSES; 4073403ccc0SDave Chinner } 4083403ccc0SDave Chinner #else 4093403ccc0SDave Chinner #define xfs_lockdep_subclass_ok(subclass) (true) 4103403ccc0SDave Chinner #endif 4113403ccc0SDave Chinner 412c24b5dfaSDave Chinner /* 413653c60b6SDave Chinner * Bump the subclass so xfs_lock_inodes() acquires each lock with a different 4140952c818SDave Chinner * value. This can be called for any type of inode lock combination, including 4150952c818SDave Chinner * parent locking. Care must be taken to ensure we don't overrun the subclass 4160952c818SDave Chinner * storage fields in the class mask we build. 417c24b5dfaSDave Chinner */ 418c24b5dfaSDave Chinner static inline int 419c24b5dfaSDave Chinner xfs_lock_inumorder(int lock_mode, int subclass) 420c24b5dfaSDave Chinner { 4210952c818SDave Chinner int class = 0; 4220952c818SDave Chinner 4230952c818SDave Chinner ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP | 4240952c818SDave Chinner XFS_ILOCK_RTSUM))); 4253403ccc0SDave Chinner ASSERT(xfs_lockdep_subclass_ok(subclass)); 4260952c818SDave Chinner 427653c60b6SDave Chinner if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) { 4280952c818SDave Chinner ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS); 4290952c818SDave Chinner class += subclass << XFS_IOLOCK_SHIFT; 430653c60b6SDave Chinner } 431653c60b6SDave Chinner 432653c60b6SDave Chinner if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) { 4330952c818SDave Chinner ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS); 4340952c818SDave Chinner class += subclass << XFS_MMAPLOCK_SHIFT; 435653c60b6SDave Chinner } 436653c60b6SDave Chinner 4370952c818SDave Chinner if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) { 4380952c818SDave Chinner ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS); 4390952c818SDave Chinner class += subclass << XFS_ILOCK_SHIFT; 4400952c818SDave Chinner } 441c24b5dfaSDave Chinner 4420952c818SDave Chinner return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class; 443c24b5dfaSDave Chinner } 444c24b5dfaSDave Chinner 445c24b5dfaSDave Chinner /* 44695afcf5cSDave Chinner * The following routine will lock n inodes in exclusive mode. We assume the 44795afcf5cSDave Chinner * caller calls us with the inodes in i_ino order. 448c24b5dfaSDave Chinner * 44995afcf5cSDave Chinner * We need to detect deadlock where an inode that we lock is in the AIL and we 45095afcf5cSDave Chinner * start waiting for another inode that is locked by a thread in a long running 45195afcf5cSDave Chinner * transaction (such as truncate). This can result in deadlock since the long 45295afcf5cSDave Chinner * running trans might need to wait for the inode we just locked in order to 45395afcf5cSDave Chinner * push the tail and free space in the log. 4540952c818SDave Chinner * 4550952c818SDave Chinner * xfs_lock_inodes() can only be used to lock one type of lock at a time - 4560952c818SDave Chinner * the iolock, the mmaplock or the ilock, but not more than one at a time. If we 4570952c818SDave Chinner * lock more than one at a time, lockdep will report false positives saying we 4580952c818SDave Chinner * have violated locking orders. 459c24b5dfaSDave Chinner */ 4600d5a75e9SEric Sandeen static void 461c24b5dfaSDave Chinner xfs_lock_inodes( 462c24b5dfaSDave Chinner xfs_inode_t **ips, 463c24b5dfaSDave Chinner int inodes, 464c24b5dfaSDave Chinner uint lock_mode) 465c24b5dfaSDave Chinner { 466c24b5dfaSDave Chinner int attempts = 0, i, j, try_lock; 467c24b5dfaSDave Chinner xfs_log_item_t *lp; 468c24b5dfaSDave Chinner 4690952c818SDave Chinner /* 4700952c818SDave Chinner * Currently supports between 2 and 5 inodes with exclusive locking. We 4710952c818SDave Chinner * support an arbitrary depth of locking here, but absolute limits on 4720952c818SDave Chinner * inodes depend on the the type of locking and the limits placed by 4730952c818SDave Chinner * lockdep annotations in xfs_lock_inumorder. These are all checked by 4740952c818SDave Chinner * the asserts. 4750952c818SDave Chinner */ 47695afcf5cSDave Chinner ASSERT(ips && inodes >= 2 && inodes <= 5); 4770952c818SDave Chinner ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL | 4780952c818SDave Chinner XFS_ILOCK_EXCL)); 4790952c818SDave Chinner ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED | 4800952c818SDave Chinner XFS_ILOCK_SHARED))); 4810952c818SDave Chinner ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) || 4820952c818SDave Chinner inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1); 4830952c818SDave Chinner ASSERT(!(lock_mode & XFS_ILOCK_EXCL) || 4840952c818SDave Chinner inodes <= XFS_ILOCK_MAX_SUBCLASS + 1); 4850952c818SDave Chinner 4860952c818SDave Chinner if (lock_mode & XFS_IOLOCK_EXCL) { 4870952c818SDave Chinner ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL))); 4880952c818SDave Chinner } else if (lock_mode & XFS_MMAPLOCK_EXCL) 4890952c818SDave Chinner ASSERT(!(lock_mode & XFS_ILOCK_EXCL)); 490c24b5dfaSDave Chinner 491c24b5dfaSDave Chinner try_lock = 0; 492c24b5dfaSDave Chinner i = 0; 493c24b5dfaSDave Chinner again: 494c24b5dfaSDave Chinner for (; i < inodes; i++) { 495c24b5dfaSDave Chinner ASSERT(ips[i]); 496c24b5dfaSDave Chinner 497c24b5dfaSDave Chinner if (i && (ips[i] == ips[i - 1])) /* Already locked */ 498c24b5dfaSDave Chinner continue; 499c24b5dfaSDave Chinner 500c24b5dfaSDave Chinner /* 50195afcf5cSDave Chinner * If try_lock is not set yet, make sure all locked inodes are 50295afcf5cSDave Chinner * not in the AIL. If any are, set try_lock to be used later. 503c24b5dfaSDave Chinner */ 504c24b5dfaSDave Chinner if (!try_lock) { 505c24b5dfaSDave Chinner for (j = (i - 1); j >= 0 && !try_lock; j--) { 506c24b5dfaSDave Chinner lp = (xfs_log_item_t *)ips[j]->i_itemp; 50795afcf5cSDave Chinner if (lp && (lp->li_flags & XFS_LI_IN_AIL)) 508c24b5dfaSDave Chinner try_lock++; 509c24b5dfaSDave Chinner } 510c24b5dfaSDave Chinner } 511c24b5dfaSDave Chinner 512c24b5dfaSDave Chinner /* 513c24b5dfaSDave Chinner * If any of the previous locks we have locked is in the AIL, 514c24b5dfaSDave Chinner * we must TRY to get the second and subsequent locks. If 515c24b5dfaSDave Chinner * we can't get any, we must release all we have 516c24b5dfaSDave Chinner * and try again. 517c24b5dfaSDave Chinner */ 51895afcf5cSDave Chinner if (!try_lock) { 51995afcf5cSDave Chinner xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i)); 52095afcf5cSDave Chinner continue; 52195afcf5cSDave Chinner } 522c24b5dfaSDave Chinner 52395afcf5cSDave Chinner /* try_lock means we have an inode locked that is in the AIL. */ 524c24b5dfaSDave Chinner ASSERT(i != 0); 52595afcf5cSDave Chinner if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i))) 52695afcf5cSDave Chinner continue; 52795afcf5cSDave Chinner 52895afcf5cSDave Chinner /* 52995afcf5cSDave Chinner * Unlock all previous guys and try again. xfs_iunlock will try 53095afcf5cSDave Chinner * to push the tail if the inode is in the AIL. 53195afcf5cSDave Chinner */ 532c24b5dfaSDave Chinner attempts++; 533c24b5dfaSDave Chinner for (j = i - 1; j >= 0; j--) { 534c24b5dfaSDave Chinner /* 53595afcf5cSDave Chinner * Check to see if we've already unlocked this one. Not 53695afcf5cSDave Chinner * the first one going back, and the inode ptr is the 53795afcf5cSDave Chinner * same. 538c24b5dfaSDave Chinner */ 53995afcf5cSDave Chinner if (j != (i - 1) && ips[j] == ips[j + 1]) 540c24b5dfaSDave Chinner continue; 541c24b5dfaSDave Chinner 542c24b5dfaSDave Chinner xfs_iunlock(ips[j], lock_mode); 543c24b5dfaSDave Chinner } 544c24b5dfaSDave Chinner 545c24b5dfaSDave Chinner if ((attempts % 5) == 0) { 546c24b5dfaSDave Chinner delay(1); /* Don't just spin the CPU */ 547c24b5dfaSDave Chinner #ifdef DEBUG 548c24b5dfaSDave Chinner xfs_lock_delays++; 549c24b5dfaSDave Chinner #endif 550c24b5dfaSDave Chinner } 551c24b5dfaSDave Chinner i = 0; 552c24b5dfaSDave Chinner try_lock = 0; 553c24b5dfaSDave Chinner goto again; 554c24b5dfaSDave Chinner } 555c24b5dfaSDave Chinner 556c24b5dfaSDave Chinner #ifdef DEBUG 557c24b5dfaSDave Chinner if (attempts) { 558c24b5dfaSDave Chinner if (attempts < 5) xfs_small_retries++; 559c24b5dfaSDave Chinner else if (attempts < 100) xfs_middle_retries++; 560c24b5dfaSDave Chinner else xfs_lots_retries++; 561c24b5dfaSDave Chinner } else { 562c24b5dfaSDave Chinner xfs_locked_n++; 563c24b5dfaSDave Chinner } 564c24b5dfaSDave Chinner #endif 565c24b5dfaSDave Chinner } 566c24b5dfaSDave Chinner 567c24b5dfaSDave Chinner /* 568653c60b6SDave Chinner * xfs_lock_two_inodes() can only be used to lock one type of lock at a time - 569653c60b6SDave Chinner * the iolock, the mmaplock or the ilock, but not more than one at a time. If we 570653c60b6SDave Chinner * lock more than one at a time, lockdep will report false positives saying we 571653c60b6SDave Chinner * have violated locking orders. 572c24b5dfaSDave Chinner */ 573c24b5dfaSDave Chinner void 574c24b5dfaSDave Chinner xfs_lock_two_inodes( 575c24b5dfaSDave Chinner xfs_inode_t *ip0, 576c24b5dfaSDave Chinner xfs_inode_t *ip1, 577c24b5dfaSDave Chinner uint lock_mode) 578c24b5dfaSDave Chinner { 579c24b5dfaSDave Chinner xfs_inode_t *temp; 580c24b5dfaSDave Chinner int attempts = 0; 581c24b5dfaSDave Chinner xfs_log_item_t *lp; 582c24b5dfaSDave Chinner 58365523218SChristoph Hellwig ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))); 58465523218SChristoph Hellwig if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) 585653c60b6SDave Chinner ASSERT(!(lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))); 586653c60b6SDave Chinner 587c24b5dfaSDave Chinner ASSERT(ip0->i_ino != ip1->i_ino); 588c24b5dfaSDave Chinner 589c24b5dfaSDave Chinner if (ip0->i_ino > ip1->i_ino) { 590c24b5dfaSDave Chinner temp = ip0; 591c24b5dfaSDave Chinner ip0 = ip1; 592c24b5dfaSDave Chinner ip1 = temp; 593c24b5dfaSDave Chinner } 594c24b5dfaSDave Chinner 595c24b5dfaSDave Chinner again: 596c24b5dfaSDave Chinner xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0)); 597c24b5dfaSDave Chinner 598c24b5dfaSDave Chinner /* 599c24b5dfaSDave Chinner * If the first lock we have locked is in the AIL, we must TRY to get 600c24b5dfaSDave Chinner * the second lock. If we can't get it, we must release the first one 601c24b5dfaSDave Chinner * and try again. 602c24b5dfaSDave Chinner */ 603c24b5dfaSDave Chinner lp = (xfs_log_item_t *)ip0->i_itemp; 604c24b5dfaSDave Chinner if (lp && (lp->li_flags & XFS_LI_IN_AIL)) { 605c24b5dfaSDave Chinner if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) { 606c24b5dfaSDave Chinner xfs_iunlock(ip0, lock_mode); 607c24b5dfaSDave Chinner if ((++attempts % 5) == 0) 608c24b5dfaSDave Chinner delay(1); /* Don't just spin the CPU */ 609c24b5dfaSDave Chinner goto again; 610c24b5dfaSDave Chinner } 611c24b5dfaSDave Chinner } else { 612c24b5dfaSDave Chinner xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1)); 613c24b5dfaSDave Chinner } 614c24b5dfaSDave Chinner } 615c24b5dfaSDave Chinner 616c24b5dfaSDave Chinner 617fa96acadSDave Chinner void 618fa96acadSDave Chinner __xfs_iflock( 619fa96acadSDave Chinner struct xfs_inode *ip) 620fa96acadSDave Chinner { 621fa96acadSDave Chinner wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT); 622fa96acadSDave Chinner DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT); 623fa96acadSDave Chinner 624fa96acadSDave Chinner do { 625fa96acadSDave Chinner prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 626fa96acadSDave Chinner if (xfs_isiflocked(ip)) 627fa96acadSDave Chinner io_schedule(); 628fa96acadSDave Chinner } while (!xfs_iflock_nowait(ip)); 629fa96acadSDave Chinner 630fa96acadSDave Chinner finish_wait(wq, &wait.wait); 631fa96acadSDave Chinner } 632fa96acadSDave Chinner 6331da177e4SLinus Torvalds STATIC uint 6341da177e4SLinus Torvalds _xfs_dic2xflags( 63558f88ca2SDave Chinner __uint16_t di_flags, 63658f88ca2SDave Chinner uint64_t di_flags2, 63758f88ca2SDave Chinner bool has_attr) 6381da177e4SLinus Torvalds { 6391da177e4SLinus Torvalds uint flags = 0; 6401da177e4SLinus Torvalds 6411da177e4SLinus Torvalds if (di_flags & XFS_DIFLAG_ANY) { 6421da177e4SLinus Torvalds if (di_flags & XFS_DIFLAG_REALTIME) 643e7b89481SDave Chinner flags |= FS_XFLAG_REALTIME; 6441da177e4SLinus Torvalds if (di_flags & XFS_DIFLAG_PREALLOC) 645e7b89481SDave Chinner flags |= FS_XFLAG_PREALLOC; 6461da177e4SLinus Torvalds if (di_flags & XFS_DIFLAG_IMMUTABLE) 647e7b89481SDave Chinner flags |= FS_XFLAG_IMMUTABLE; 6481da177e4SLinus Torvalds if (di_flags & XFS_DIFLAG_APPEND) 649e7b89481SDave Chinner flags |= FS_XFLAG_APPEND; 6501da177e4SLinus Torvalds if (di_flags & XFS_DIFLAG_SYNC) 651e7b89481SDave Chinner flags |= FS_XFLAG_SYNC; 6521da177e4SLinus Torvalds if (di_flags & XFS_DIFLAG_NOATIME) 653e7b89481SDave Chinner flags |= FS_XFLAG_NOATIME; 6541da177e4SLinus Torvalds if (di_flags & XFS_DIFLAG_NODUMP) 655e7b89481SDave Chinner flags |= FS_XFLAG_NODUMP; 6561da177e4SLinus Torvalds if (di_flags & XFS_DIFLAG_RTINHERIT) 657e7b89481SDave Chinner flags |= FS_XFLAG_RTINHERIT; 6581da177e4SLinus Torvalds if (di_flags & XFS_DIFLAG_PROJINHERIT) 659e7b89481SDave Chinner flags |= FS_XFLAG_PROJINHERIT; 6601da177e4SLinus Torvalds if (di_flags & XFS_DIFLAG_NOSYMLINKS) 661e7b89481SDave Chinner flags |= FS_XFLAG_NOSYMLINKS; 662dd9f438eSNathan Scott if (di_flags & XFS_DIFLAG_EXTSIZE) 663e7b89481SDave Chinner flags |= FS_XFLAG_EXTSIZE; 664dd9f438eSNathan Scott if (di_flags & XFS_DIFLAG_EXTSZINHERIT) 665e7b89481SDave Chinner flags |= FS_XFLAG_EXTSZINHERIT; 666d3446eacSBarry Naujok if (di_flags & XFS_DIFLAG_NODEFRAG) 667e7b89481SDave Chinner flags |= FS_XFLAG_NODEFRAG; 6682a82b8beSDavid Chinner if (di_flags & XFS_DIFLAG_FILESTREAM) 669e7b89481SDave Chinner flags |= FS_XFLAG_FILESTREAM; 6701da177e4SLinus Torvalds } 6711da177e4SLinus Torvalds 67258f88ca2SDave Chinner if (di_flags2 & XFS_DIFLAG2_ANY) { 67358f88ca2SDave Chinner if (di_flags2 & XFS_DIFLAG2_DAX) 67458f88ca2SDave Chinner flags |= FS_XFLAG_DAX; 675f7ca3522SDarrick J. Wong if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE) 676f7ca3522SDarrick J. Wong flags |= FS_XFLAG_COWEXTSIZE; 67758f88ca2SDave Chinner } 67858f88ca2SDave Chinner 67958f88ca2SDave Chinner if (has_attr) 68058f88ca2SDave Chinner flags |= FS_XFLAG_HASATTR; 68158f88ca2SDave Chinner 6821da177e4SLinus Torvalds return flags; 6831da177e4SLinus Torvalds } 6841da177e4SLinus Torvalds 6851da177e4SLinus Torvalds uint 6861da177e4SLinus Torvalds xfs_ip2xflags( 68758f88ca2SDave Chinner struct xfs_inode *ip) 6881da177e4SLinus Torvalds { 68958f88ca2SDave Chinner struct xfs_icdinode *dic = &ip->i_d; 6901da177e4SLinus Torvalds 69158f88ca2SDave Chinner return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip)); 6921da177e4SLinus Torvalds } 6931da177e4SLinus Torvalds 6941da177e4SLinus Torvalds /* 695c24b5dfaSDave Chinner * Lookups up an inode from "name". If ci_name is not NULL, then a CI match 696c24b5dfaSDave Chinner * is allowed, otherwise it has to be an exact match. If a CI match is found, 697c24b5dfaSDave Chinner * ci_name->name will point to a the actual name (caller must free) or 698c24b5dfaSDave Chinner * will be set to NULL if an exact match is found. 699c24b5dfaSDave Chinner */ 700c24b5dfaSDave Chinner int 701c24b5dfaSDave Chinner xfs_lookup( 702c24b5dfaSDave Chinner xfs_inode_t *dp, 703c24b5dfaSDave Chinner struct xfs_name *name, 704c24b5dfaSDave Chinner xfs_inode_t **ipp, 705c24b5dfaSDave Chinner struct xfs_name *ci_name) 706c24b5dfaSDave Chinner { 707c24b5dfaSDave Chinner xfs_ino_t inum; 708c24b5dfaSDave Chinner int error; 709c24b5dfaSDave Chinner 710c24b5dfaSDave Chinner trace_xfs_lookup(dp, name); 711c24b5dfaSDave Chinner 712c24b5dfaSDave Chinner if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 7132451337dSDave Chinner return -EIO; 714c24b5dfaSDave Chinner 715c24b5dfaSDave Chinner error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name); 716c24b5dfaSDave Chinner if (error) 717dbad7c99SDave Chinner goto out_unlock; 718c24b5dfaSDave Chinner 719c24b5dfaSDave Chinner error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp); 720c24b5dfaSDave Chinner if (error) 721c24b5dfaSDave Chinner goto out_free_name; 722c24b5dfaSDave Chinner 723c24b5dfaSDave Chinner return 0; 724c24b5dfaSDave Chinner 725c24b5dfaSDave Chinner out_free_name: 726c24b5dfaSDave Chinner if (ci_name) 727c24b5dfaSDave Chinner kmem_free(ci_name->name); 728dbad7c99SDave Chinner out_unlock: 729c24b5dfaSDave Chinner *ipp = NULL; 730c24b5dfaSDave Chinner return error; 731c24b5dfaSDave Chinner } 732c24b5dfaSDave Chinner 733c24b5dfaSDave Chinner /* 7341da177e4SLinus Torvalds * Allocate an inode on disk and return a copy of its in-core version. 7351da177e4SLinus Torvalds * The in-core inode is locked exclusively. Set mode, nlink, and rdev 7361da177e4SLinus Torvalds * appropriately within the inode. The uid and gid for the inode are 7371da177e4SLinus Torvalds * set according to the contents of the given cred structure. 7381da177e4SLinus Torvalds * 7391da177e4SLinus Torvalds * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc() 740cd856db6SCarlos Maiolino * has a free inode available, call xfs_iget() to obtain the in-core 741cd856db6SCarlos Maiolino * version of the allocated inode. Finally, fill in the inode and 742cd856db6SCarlos Maiolino * log its initial contents. In this case, ialloc_context would be 743cd856db6SCarlos Maiolino * set to NULL. 7441da177e4SLinus Torvalds * 745cd856db6SCarlos Maiolino * If xfs_dialloc() does not have an available inode, it will replenish 746cd856db6SCarlos Maiolino * its supply by doing an allocation. Since we can only do one 747cd856db6SCarlos Maiolino * allocation within a transaction without deadlocks, we must commit 748cd856db6SCarlos Maiolino * the current transaction before returning the inode itself. 749cd856db6SCarlos Maiolino * In this case, therefore, we will set ialloc_context and return. 7501da177e4SLinus Torvalds * The caller should then commit the current transaction, start a new 7511da177e4SLinus Torvalds * transaction, and call xfs_ialloc() again to actually get the inode. 7521da177e4SLinus Torvalds * 7531da177e4SLinus Torvalds * To ensure that some other process does not grab the inode that 7541da177e4SLinus Torvalds * was allocated during the first call to xfs_ialloc(), this routine 7551da177e4SLinus Torvalds * also returns the [locked] bp pointing to the head of the freelist 7561da177e4SLinus Torvalds * as ialloc_context. The caller should hold this buffer across 7571da177e4SLinus Torvalds * the commit and pass it back into this routine on the second call. 758b11f94d5SDavid Chinner * 759b11f94d5SDavid Chinner * If we are allocating quota inodes, we do not have a parent inode 760b11f94d5SDavid Chinner * to attach to or associate with (i.e. pip == NULL) because they 761b11f94d5SDavid Chinner * are not linked into the directory structure - they are attached 762b11f94d5SDavid Chinner * directly to the superblock - and so have no parent. 7631da177e4SLinus Torvalds */ 7640d5a75e9SEric Sandeen static int 7651da177e4SLinus Torvalds xfs_ialloc( 7661da177e4SLinus Torvalds xfs_trans_t *tp, 7671da177e4SLinus Torvalds xfs_inode_t *pip, 768576b1d67SAl Viro umode_t mode, 76931b084aeSNathan Scott xfs_nlink_t nlink, 7701da177e4SLinus Torvalds xfs_dev_t rdev, 7716743099cSArkadiusz Mi?kiewicz prid_t prid, 7721da177e4SLinus Torvalds int okalloc, 7731da177e4SLinus Torvalds xfs_buf_t **ialloc_context, 7741da177e4SLinus Torvalds xfs_inode_t **ipp) 7751da177e4SLinus Torvalds { 77693848a99SChristoph Hellwig struct xfs_mount *mp = tp->t_mountp; 7771da177e4SLinus Torvalds xfs_ino_t ino; 7781da177e4SLinus Torvalds xfs_inode_t *ip; 7791da177e4SLinus Torvalds uint flags; 7801da177e4SLinus Torvalds int error; 781e076b0f3SDave Chinner struct timespec tv; 7823987848cSDave Chinner struct inode *inode; 7831da177e4SLinus Torvalds 7841da177e4SLinus Torvalds /* 7851da177e4SLinus Torvalds * Call the space management code to pick 7861da177e4SLinus Torvalds * the on-disk inode to be allocated. 7871da177e4SLinus Torvalds */ 788b11f94d5SDavid Chinner error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc, 78908358906SChristoph Hellwig ialloc_context, &ino); 790bf904248SDavid Chinner if (error) 7911da177e4SLinus Torvalds return error; 79208358906SChristoph Hellwig if (*ialloc_context || ino == NULLFSINO) { 7931da177e4SLinus Torvalds *ipp = NULL; 7941da177e4SLinus Torvalds return 0; 7951da177e4SLinus Torvalds } 7961da177e4SLinus Torvalds ASSERT(*ialloc_context == NULL); 7971da177e4SLinus Torvalds 7981da177e4SLinus Torvalds /* 7991da177e4SLinus Torvalds * Get the in-core inode with the lock held exclusively. 8001da177e4SLinus Torvalds * This is because we're setting fields here we need 8011da177e4SLinus Torvalds * to prevent others from looking at until we're done. 8021da177e4SLinus Torvalds */ 80393848a99SChristoph Hellwig error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, 804ec3ba85fSChristoph Hellwig XFS_ILOCK_EXCL, &ip); 805bf904248SDavid Chinner if (error) 8061da177e4SLinus Torvalds return error; 8071da177e4SLinus Torvalds ASSERT(ip != NULL); 8083987848cSDave Chinner inode = VFS_I(ip); 8091da177e4SLinus Torvalds 810263997a6SDave Chinner /* 811263997a6SDave Chinner * We always convert v1 inodes to v2 now - we only support filesystems 812263997a6SDave Chinner * with >= v2 inode capability, so there is no reason for ever leaving 813263997a6SDave Chinner * an inode in v1 format. 814263997a6SDave Chinner */ 815263997a6SDave Chinner if (ip->i_d.di_version == 1) 816263997a6SDave Chinner ip->i_d.di_version = 2; 817263997a6SDave Chinner 818c19b3b05SDave Chinner inode->i_mode = mode; 81954d7b5c1SDave Chinner set_nlink(inode, nlink); 8207aab1b28SDwight Engen ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid()); 8217aab1b28SDwight Engen ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid()); 8226743099cSArkadiusz Mi?kiewicz xfs_set_projid(ip, prid); 8231da177e4SLinus Torvalds 824bd186aa9SChristoph Hellwig if (pip && XFS_INHERIT_GID(pip)) { 8251da177e4SLinus Torvalds ip->i_d.di_gid = pip->i_d.di_gid; 826c19b3b05SDave Chinner if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode)) 827c19b3b05SDave Chinner inode->i_mode |= S_ISGID; 8281da177e4SLinus Torvalds } 8291da177e4SLinus Torvalds 8301da177e4SLinus Torvalds /* 8311da177e4SLinus Torvalds * If the group ID of the new file does not match the effective group 8321da177e4SLinus Torvalds * ID or one of the supplementary group IDs, the S_ISGID bit is cleared 8331da177e4SLinus Torvalds * (and only if the irix_sgid_inherit compatibility variable is set). 8341da177e4SLinus Torvalds */ 8351da177e4SLinus Torvalds if ((irix_sgid_inherit) && 836c19b3b05SDave Chinner (inode->i_mode & S_ISGID) && 837c19b3b05SDave Chinner (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid)))) 838c19b3b05SDave Chinner inode->i_mode &= ~S_ISGID; 8391da177e4SLinus Torvalds 8401da177e4SLinus Torvalds ip->i_d.di_size = 0; 8411da177e4SLinus Torvalds ip->i_d.di_nextents = 0; 8421da177e4SLinus Torvalds ASSERT(ip->i_d.di_nblocks == 0); 843dff35fd4SChristoph Hellwig 844c2050a45SDeepa Dinamani tv = current_time(inode); 8453987848cSDave Chinner inode->i_mtime = tv; 8463987848cSDave Chinner inode->i_atime = tv; 8473987848cSDave Chinner inode->i_ctime = tv; 848dff35fd4SChristoph Hellwig 8491da177e4SLinus Torvalds ip->i_d.di_extsize = 0; 8501da177e4SLinus Torvalds ip->i_d.di_dmevmask = 0; 8511da177e4SLinus Torvalds ip->i_d.di_dmstate = 0; 8521da177e4SLinus Torvalds ip->i_d.di_flags = 0; 85393848a99SChristoph Hellwig 85493848a99SChristoph Hellwig if (ip->i_d.di_version == 3) { 85583e06f21SDave Chinner inode->i_version = 1; 85693848a99SChristoph Hellwig ip->i_d.di_flags2 = 0; 857f7ca3522SDarrick J. Wong ip->i_d.di_cowextsize = 0; 8583987848cSDave Chinner ip->i_d.di_crtime.t_sec = (__int32_t)tv.tv_sec; 8593987848cSDave Chinner ip->i_d.di_crtime.t_nsec = (__int32_t)tv.tv_nsec; 86093848a99SChristoph Hellwig } 86193848a99SChristoph Hellwig 86293848a99SChristoph Hellwig 8631da177e4SLinus Torvalds flags = XFS_ILOG_CORE; 8641da177e4SLinus Torvalds switch (mode & S_IFMT) { 8651da177e4SLinus Torvalds case S_IFIFO: 8661da177e4SLinus Torvalds case S_IFCHR: 8671da177e4SLinus Torvalds case S_IFBLK: 8681da177e4SLinus Torvalds case S_IFSOCK: 8691da177e4SLinus Torvalds ip->i_d.di_format = XFS_DINODE_FMT_DEV; 8701da177e4SLinus Torvalds ip->i_df.if_u2.if_rdev = rdev; 8711da177e4SLinus Torvalds ip->i_df.if_flags = 0; 8721da177e4SLinus Torvalds flags |= XFS_ILOG_DEV; 8731da177e4SLinus Torvalds break; 8741da177e4SLinus Torvalds case S_IFREG: 8751da177e4SLinus Torvalds case S_IFDIR: 876b11f94d5SDavid Chinner if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) { 87758f88ca2SDave Chinner uint64_t di_flags2 = 0; 878365ca83dSNathan Scott uint di_flags = 0; 879365ca83dSNathan Scott 880abbede1bSAl Viro if (S_ISDIR(mode)) { 881365ca83dSNathan Scott if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 882365ca83dSNathan Scott di_flags |= XFS_DIFLAG_RTINHERIT; 883dd9f438eSNathan Scott if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 884dd9f438eSNathan Scott di_flags |= XFS_DIFLAG_EXTSZINHERIT; 885dd9f438eSNathan Scott ip->i_d.di_extsize = pip->i_d.di_extsize; 886dd9f438eSNathan Scott } 8879336e3a7SDave Chinner if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) 8889336e3a7SDave Chinner di_flags |= XFS_DIFLAG_PROJINHERIT; 889abbede1bSAl Viro } else if (S_ISREG(mode)) { 890613d7043SChristoph Hellwig if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 891365ca83dSNathan Scott di_flags |= XFS_DIFLAG_REALTIME; 892dd9f438eSNathan Scott if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 893dd9f438eSNathan Scott di_flags |= XFS_DIFLAG_EXTSIZE; 894dd9f438eSNathan Scott ip->i_d.di_extsize = pip->i_d.di_extsize; 895dd9f438eSNathan Scott } 8961da177e4SLinus Torvalds } 8971da177e4SLinus Torvalds if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) && 8981da177e4SLinus Torvalds xfs_inherit_noatime) 899365ca83dSNathan Scott di_flags |= XFS_DIFLAG_NOATIME; 9001da177e4SLinus Torvalds if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) && 9011da177e4SLinus Torvalds xfs_inherit_nodump) 902365ca83dSNathan Scott di_flags |= XFS_DIFLAG_NODUMP; 9031da177e4SLinus Torvalds if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) && 9041da177e4SLinus Torvalds xfs_inherit_sync) 905365ca83dSNathan Scott di_flags |= XFS_DIFLAG_SYNC; 9061da177e4SLinus Torvalds if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) && 9071da177e4SLinus Torvalds xfs_inherit_nosymlinks) 908365ca83dSNathan Scott di_flags |= XFS_DIFLAG_NOSYMLINKS; 909d3446eacSBarry Naujok if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) && 910d3446eacSBarry Naujok xfs_inherit_nodefrag) 911d3446eacSBarry Naujok di_flags |= XFS_DIFLAG_NODEFRAG; 9122a82b8beSDavid Chinner if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM) 9132a82b8beSDavid Chinner di_flags |= XFS_DIFLAG_FILESTREAM; 91458f88ca2SDave Chinner if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX) 91558f88ca2SDave Chinner di_flags2 |= XFS_DIFLAG2_DAX; 91658f88ca2SDave Chinner 917365ca83dSNathan Scott ip->i_d.di_flags |= di_flags; 91858f88ca2SDave Chinner ip->i_d.di_flags2 |= di_flags2; 9191da177e4SLinus Torvalds } 920f7ca3522SDarrick J. Wong if (pip && 921f7ca3522SDarrick J. Wong (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) && 922f7ca3522SDarrick J. Wong pip->i_d.di_version == 3 && 923f7ca3522SDarrick J. Wong ip->i_d.di_version == 3) { 924f7ca3522SDarrick J. Wong if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) { 925f7ca3522SDarrick J. Wong ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE; 926f7ca3522SDarrick J. Wong ip->i_d.di_cowextsize = pip->i_d.di_cowextsize; 927f7ca3522SDarrick J. Wong } 928f7ca3522SDarrick J. Wong } 9291da177e4SLinus Torvalds /* FALLTHROUGH */ 9301da177e4SLinus Torvalds case S_IFLNK: 9311da177e4SLinus Torvalds ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; 9321da177e4SLinus Torvalds ip->i_df.if_flags = XFS_IFEXTENTS; 9331da177e4SLinus Torvalds ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0; 9341da177e4SLinus Torvalds ip->i_df.if_u1.if_extents = NULL; 9351da177e4SLinus Torvalds break; 9361da177e4SLinus Torvalds default: 9371da177e4SLinus Torvalds ASSERT(0); 9381da177e4SLinus Torvalds } 9391da177e4SLinus Torvalds /* 9401da177e4SLinus Torvalds * Attribute fork settings for new inode. 9411da177e4SLinus Torvalds */ 9421da177e4SLinus Torvalds ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 9431da177e4SLinus Torvalds ip->i_d.di_anextents = 0; 9441da177e4SLinus Torvalds 9451da177e4SLinus Torvalds /* 9461da177e4SLinus Torvalds * Log the new values stuffed into the inode. 9471da177e4SLinus Torvalds */ 948ddc3415aSChristoph Hellwig xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 9491da177e4SLinus Torvalds xfs_trans_log_inode(tp, ip, flags); 9501da177e4SLinus Torvalds 95158c90473SDave Chinner /* now that we have an i_mode we can setup the inode structure */ 95241be8bedSChristoph Hellwig xfs_setup_inode(ip); 9531da177e4SLinus Torvalds 9541da177e4SLinus Torvalds *ipp = ip; 9551da177e4SLinus Torvalds return 0; 9561da177e4SLinus Torvalds } 9571da177e4SLinus Torvalds 958e546cb79SDave Chinner /* 959e546cb79SDave Chinner * Allocates a new inode from disk and return a pointer to the 960e546cb79SDave Chinner * incore copy. This routine will internally commit the current 961e546cb79SDave Chinner * transaction and allocate a new one if the Space Manager needed 962e546cb79SDave Chinner * to do an allocation to replenish the inode free-list. 963e546cb79SDave Chinner * 964e546cb79SDave Chinner * This routine is designed to be called from xfs_create and 965e546cb79SDave Chinner * xfs_create_dir. 966e546cb79SDave Chinner * 967e546cb79SDave Chinner */ 968e546cb79SDave Chinner int 969e546cb79SDave Chinner xfs_dir_ialloc( 970e546cb79SDave Chinner xfs_trans_t **tpp, /* input: current transaction; 971e546cb79SDave Chinner output: may be a new transaction. */ 972e546cb79SDave Chinner xfs_inode_t *dp, /* directory within whose allocate 973e546cb79SDave Chinner the inode. */ 974e546cb79SDave Chinner umode_t mode, 975e546cb79SDave Chinner xfs_nlink_t nlink, 976e546cb79SDave Chinner xfs_dev_t rdev, 977e546cb79SDave Chinner prid_t prid, /* project id */ 978e546cb79SDave Chinner int okalloc, /* ok to allocate new space */ 979e546cb79SDave Chinner xfs_inode_t **ipp, /* pointer to inode; it will be 980e546cb79SDave Chinner locked. */ 981e546cb79SDave Chinner int *committed) 982e546cb79SDave Chinner 983e546cb79SDave Chinner { 984e546cb79SDave Chinner xfs_trans_t *tp; 985e546cb79SDave Chinner xfs_inode_t *ip; 986e546cb79SDave Chinner xfs_buf_t *ialloc_context = NULL; 987e546cb79SDave Chinner int code; 988e546cb79SDave Chinner void *dqinfo; 989e546cb79SDave Chinner uint tflags; 990e546cb79SDave Chinner 991e546cb79SDave Chinner tp = *tpp; 992e546cb79SDave Chinner ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 993e546cb79SDave Chinner 994e546cb79SDave Chinner /* 995e546cb79SDave Chinner * xfs_ialloc will return a pointer to an incore inode if 996e546cb79SDave Chinner * the Space Manager has an available inode on the free 997e546cb79SDave Chinner * list. Otherwise, it will do an allocation and replenish 998e546cb79SDave Chinner * the freelist. Since we can only do one allocation per 999e546cb79SDave Chinner * transaction without deadlocks, we will need to commit the 1000e546cb79SDave Chinner * current transaction and start a new one. We will then 1001e546cb79SDave Chinner * need to call xfs_ialloc again to get the inode. 1002e546cb79SDave Chinner * 1003e546cb79SDave Chinner * If xfs_ialloc did an allocation to replenish the freelist, 1004e546cb79SDave Chinner * it returns the bp containing the head of the freelist as 1005e546cb79SDave Chinner * ialloc_context. We will hold a lock on it across the 1006e546cb79SDave Chinner * transaction commit so that no other process can steal 1007e546cb79SDave Chinner * the inode(s) that we've just allocated. 1008e546cb79SDave Chinner */ 1009e546cb79SDave Chinner code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, okalloc, 1010e546cb79SDave Chinner &ialloc_context, &ip); 1011e546cb79SDave Chinner 1012e546cb79SDave Chinner /* 1013e546cb79SDave Chinner * Return an error if we were unable to allocate a new inode. 1014e546cb79SDave Chinner * This should only happen if we run out of space on disk or 1015e546cb79SDave Chinner * encounter a disk error. 1016e546cb79SDave Chinner */ 1017e546cb79SDave Chinner if (code) { 1018e546cb79SDave Chinner *ipp = NULL; 1019e546cb79SDave Chinner return code; 1020e546cb79SDave Chinner } 1021e546cb79SDave Chinner if (!ialloc_context && !ip) { 1022e546cb79SDave Chinner *ipp = NULL; 10232451337dSDave Chinner return -ENOSPC; 1024e546cb79SDave Chinner } 1025e546cb79SDave Chinner 1026e546cb79SDave Chinner /* 1027e546cb79SDave Chinner * If the AGI buffer is non-NULL, then we were unable to get an 1028e546cb79SDave Chinner * inode in one operation. We need to commit the current 1029e546cb79SDave Chinner * transaction and call xfs_ialloc() again. It is guaranteed 1030e546cb79SDave Chinner * to succeed the second time. 1031e546cb79SDave Chinner */ 1032e546cb79SDave Chinner if (ialloc_context) { 1033e546cb79SDave Chinner /* 1034e546cb79SDave Chinner * Normally, xfs_trans_commit releases all the locks. 1035e546cb79SDave Chinner * We call bhold to hang on to the ialloc_context across 1036e546cb79SDave Chinner * the commit. Holding this buffer prevents any other 1037e546cb79SDave Chinner * processes from doing any allocations in this 1038e546cb79SDave Chinner * allocation group. 1039e546cb79SDave Chinner */ 1040e546cb79SDave Chinner xfs_trans_bhold(tp, ialloc_context); 1041e546cb79SDave Chinner 1042e546cb79SDave Chinner /* 1043e546cb79SDave Chinner * We want the quota changes to be associated with the next 1044e546cb79SDave Chinner * transaction, NOT this one. So, detach the dqinfo from this 1045e546cb79SDave Chinner * and attach it to the next transaction. 1046e546cb79SDave Chinner */ 1047e546cb79SDave Chinner dqinfo = NULL; 1048e546cb79SDave Chinner tflags = 0; 1049e546cb79SDave Chinner if (tp->t_dqinfo) { 1050e546cb79SDave Chinner dqinfo = (void *)tp->t_dqinfo; 1051e546cb79SDave Chinner tp->t_dqinfo = NULL; 1052e546cb79SDave Chinner tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY; 1053e546cb79SDave Chinner tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY); 1054e546cb79SDave Chinner } 1055e546cb79SDave Chinner 10566e3e6d55SEryu Guan code = xfs_trans_roll(&tp, NULL); 10572e6db6c4SChristoph Hellwig if (committed != NULL) 1058e546cb79SDave Chinner *committed = 1; 10593d3c8b52SJie Liu 1060e546cb79SDave Chinner /* 1061e546cb79SDave Chinner * Re-attach the quota info that we detached from prev trx. 1062e546cb79SDave Chinner */ 1063e546cb79SDave Chinner if (dqinfo) { 1064e546cb79SDave Chinner tp->t_dqinfo = dqinfo; 1065e546cb79SDave Chinner tp->t_flags |= tflags; 1066e546cb79SDave Chinner } 1067e546cb79SDave Chinner 1068e546cb79SDave Chinner if (code) { 1069e546cb79SDave Chinner xfs_buf_relse(ialloc_context); 10702e6db6c4SChristoph Hellwig *tpp = tp; 1071e546cb79SDave Chinner *ipp = NULL; 1072e546cb79SDave Chinner return code; 1073e546cb79SDave Chinner } 1074e546cb79SDave Chinner xfs_trans_bjoin(tp, ialloc_context); 1075e546cb79SDave Chinner 1076e546cb79SDave Chinner /* 1077e546cb79SDave Chinner * Call ialloc again. Since we've locked out all 1078e546cb79SDave Chinner * other allocations in this allocation group, 1079e546cb79SDave Chinner * this call should always succeed. 1080e546cb79SDave Chinner */ 1081e546cb79SDave Chinner code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, 1082e546cb79SDave Chinner okalloc, &ialloc_context, &ip); 1083e546cb79SDave Chinner 1084e546cb79SDave Chinner /* 1085e546cb79SDave Chinner * If we get an error at this point, return to the caller 1086e546cb79SDave Chinner * so that the current transaction can be aborted. 1087e546cb79SDave Chinner */ 1088e546cb79SDave Chinner if (code) { 1089e546cb79SDave Chinner *tpp = tp; 1090e546cb79SDave Chinner *ipp = NULL; 1091e546cb79SDave Chinner return code; 1092e546cb79SDave Chinner } 1093e546cb79SDave Chinner ASSERT(!ialloc_context && ip); 1094e546cb79SDave Chinner 1095e546cb79SDave Chinner } else { 1096e546cb79SDave Chinner if (committed != NULL) 1097e546cb79SDave Chinner *committed = 0; 1098e546cb79SDave Chinner } 1099e546cb79SDave Chinner 1100e546cb79SDave Chinner *ipp = ip; 1101e546cb79SDave Chinner *tpp = tp; 1102e546cb79SDave Chinner 1103e546cb79SDave Chinner return 0; 1104e546cb79SDave Chinner } 1105e546cb79SDave Chinner 1106e546cb79SDave Chinner /* 110754d7b5c1SDave Chinner * Decrement the link count on an inode & log the change. If this causes the 110854d7b5c1SDave Chinner * link count to go to zero, move the inode to AGI unlinked list so that it can 110954d7b5c1SDave Chinner * be freed when the last active reference goes away via xfs_inactive(). 1110e546cb79SDave Chinner */ 11110d5a75e9SEric Sandeen static int /* error */ 1112e546cb79SDave Chinner xfs_droplink( 1113e546cb79SDave Chinner xfs_trans_t *tp, 1114e546cb79SDave Chinner xfs_inode_t *ip) 1115e546cb79SDave Chinner { 1116e546cb79SDave Chinner xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); 1117e546cb79SDave Chinner 1118e546cb79SDave Chinner drop_nlink(VFS_I(ip)); 1119e546cb79SDave Chinner xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1120e546cb79SDave Chinner 112154d7b5c1SDave Chinner if (VFS_I(ip)->i_nlink) 112254d7b5c1SDave Chinner return 0; 112354d7b5c1SDave Chinner 112454d7b5c1SDave Chinner return xfs_iunlink(tp, ip); 1125e546cb79SDave Chinner } 1126e546cb79SDave Chinner 1127e546cb79SDave Chinner /* 1128e546cb79SDave Chinner * Increment the link count on an inode & log the change. 1129e546cb79SDave Chinner */ 11300d5a75e9SEric Sandeen static int 1131e546cb79SDave Chinner xfs_bumplink( 1132e546cb79SDave Chinner xfs_trans_t *tp, 1133e546cb79SDave Chinner xfs_inode_t *ip) 1134e546cb79SDave Chinner { 1135e546cb79SDave Chinner xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); 1136e546cb79SDave Chinner 1137263997a6SDave Chinner ASSERT(ip->i_d.di_version > 1); 1138e546cb79SDave Chinner inc_nlink(VFS_I(ip)); 1139e546cb79SDave Chinner xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1140e546cb79SDave Chinner return 0; 1141e546cb79SDave Chinner } 1142e546cb79SDave Chinner 1143c24b5dfaSDave Chinner int 1144c24b5dfaSDave Chinner xfs_create( 1145c24b5dfaSDave Chinner xfs_inode_t *dp, 1146c24b5dfaSDave Chinner struct xfs_name *name, 1147c24b5dfaSDave Chinner umode_t mode, 1148c24b5dfaSDave Chinner xfs_dev_t rdev, 1149c24b5dfaSDave Chinner xfs_inode_t **ipp) 1150c24b5dfaSDave Chinner { 1151c24b5dfaSDave Chinner int is_dir = S_ISDIR(mode); 1152c24b5dfaSDave Chinner struct xfs_mount *mp = dp->i_mount; 1153c24b5dfaSDave Chinner struct xfs_inode *ip = NULL; 1154c24b5dfaSDave Chinner struct xfs_trans *tp = NULL; 1155c24b5dfaSDave Chinner int error; 11562c3234d1SDarrick J. Wong struct xfs_defer_ops dfops; 1157c24b5dfaSDave Chinner xfs_fsblock_t first_block; 1158c24b5dfaSDave Chinner bool unlock_dp_on_error = false; 1159c24b5dfaSDave Chinner prid_t prid; 1160c24b5dfaSDave Chinner struct xfs_dquot *udqp = NULL; 1161c24b5dfaSDave Chinner struct xfs_dquot *gdqp = NULL; 1162c24b5dfaSDave Chinner struct xfs_dquot *pdqp = NULL; 1163062647a8SBrian Foster struct xfs_trans_res *tres; 1164c24b5dfaSDave Chinner uint resblks; 1165c24b5dfaSDave Chinner 1166c24b5dfaSDave Chinner trace_xfs_create(dp, name); 1167c24b5dfaSDave Chinner 1168c24b5dfaSDave Chinner if (XFS_FORCED_SHUTDOWN(mp)) 11692451337dSDave Chinner return -EIO; 1170c24b5dfaSDave Chinner 1171163467d3SZhi Yong Wu prid = xfs_get_initial_prid(dp); 1172c24b5dfaSDave Chinner 1173c24b5dfaSDave Chinner /* 1174c24b5dfaSDave Chinner * Make sure that we have allocated dquot(s) on disk. 1175c24b5dfaSDave Chinner */ 11767aab1b28SDwight Engen error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()), 11777aab1b28SDwight Engen xfs_kgid_to_gid(current_fsgid()), prid, 1178c24b5dfaSDave Chinner XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, 1179c24b5dfaSDave Chinner &udqp, &gdqp, &pdqp); 1180c24b5dfaSDave Chinner if (error) 1181c24b5dfaSDave Chinner return error; 1182c24b5dfaSDave Chinner 1183c24b5dfaSDave Chinner if (is_dir) { 1184c24b5dfaSDave Chinner rdev = 0; 1185c24b5dfaSDave Chinner resblks = XFS_MKDIR_SPACE_RES(mp, name->len); 1186062647a8SBrian Foster tres = &M_RES(mp)->tr_mkdir; 1187c24b5dfaSDave Chinner } else { 1188c24b5dfaSDave Chinner resblks = XFS_CREATE_SPACE_RES(mp, name->len); 1189062647a8SBrian Foster tres = &M_RES(mp)->tr_create; 1190c24b5dfaSDave Chinner } 1191c24b5dfaSDave Chinner 1192c24b5dfaSDave Chinner /* 1193c24b5dfaSDave Chinner * Initially assume that the file does not exist and 1194c24b5dfaSDave Chinner * reserve the resources for that case. If that is not 1195c24b5dfaSDave Chinner * the case we'll drop the one we have and get a more 1196c24b5dfaSDave Chinner * appropriate transaction later. 1197c24b5dfaSDave Chinner */ 1198253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp); 11992451337dSDave Chinner if (error == -ENOSPC) { 1200c24b5dfaSDave Chinner /* flush outstanding delalloc blocks and retry */ 1201c24b5dfaSDave Chinner xfs_flush_inodes(mp); 1202253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp); 1203c24b5dfaSDave Chinner } 12042451337dSDave Chinner if (error == -ENOSPC) { 1205c24b5dfaSDave Chinner /* No space at all so try a "no-allocation" reservation */ 1206c24b5dfaSDave Chinner resblks = 0; 1207253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, tres, 0, 0, 0, &tp); 1208c24b5dfaSDave Chinner } 12094906e215SChristoph Hellwig if (error) 1210253f4911SChristoph Hellwig goto out_release_inode; 1211c24b5dfaSDave Chinner 121265523218SChristoph Hellwig xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); 1213c24b5dfaSDave Chinner unlock_dp_on_error = true; 1214c24b5dfaSDave Chinner 12152c3234d1SDarrick J. Wong xfs_defer_init(&dfops, &first_block); 1216c24b5dfaSDave Chinner 1217c24b5dfaSDave Chinner /* 1218c24b5dfaSDave Chinner * Reserve disk quota and the inode. 1219c24b5dfaSDave Chinner */ 1220c24b5dfaSDave Chinner error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, 1221c24b5dfaSDave Chinner pdqp, resblks, 1, 0); 1222c24b5dfaSDave Chinner if (error) 1223c24b5dfaSDave Chinner goto out_trans_cancel; 1224c24b5dfaSDave Chinner 122594f3cad5SEric Sandeen if (!resblks) { 122694f3cad5SEric Sandeen error = xfs_dir_canenter(tp, dp, name); 1227c24b5dfaSDave Chinner if (error) 1228c24b5dfaSDave Chinner goto out_trans_cancel; 122994f3cad5SEric Sandeen } 1230c24b5dfaSDave Chinner 1231c24b5dfaSDave Chinner /* 1232c24b5dfaSDave Chinner * A newly created regular or special file just has one directory 1233c24b5dfaSDave Chinner * entry pointing to them, but a directory also the "." entry 1234c24b5dfaSDave Chinner * pointing to itself. 1235c24b5dfaSDave Chinner */ 1236c24b5dfaSDave Chinner error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, 1237f6106efaSEric Sandeen prid, resblks > 0, &ip, NULL); 1238d6077aa3SJan Kara if (error) 1239c24b5dfaSDave Chinner goto out_trans_cancel; 1240c24b5dfaSDave Chinner 1241c24b5dfaSDave Chinner /* 1242c24b5dfaSDave Chinner * Now we join the directory inode to the transaction. We do not do it 1243c24b5dfaSDave Chinner * earlier because xfs_dir_ialloc might commit the previous transaction 1244c24b5dfaSDave Chinner * (and release all the locks). An error from here on will result in 1245c24b5dfaSDave Chinner * the transaction cancel unlocking dp so don't do it explicitly in the 1246c24b5dfaSDave Chinner * error path. 1247c24b5dfaSDave Chinner */ 124865523218SChristoph Hellwig xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); 1249c24b5dfaSDave Chinner unlock_dp_on_error = false; 1250c24b5dfaSDave Chinner 1251c24b5dfaSDave Chinner error = xfs_dir_createname(tp, dp, name, ip->i_ino, 12522c3234d1SDarrick J. Wong &first_block, &dfops, resblks ? 1253c24b5dfaSDave Chinner resblks - XFS_IALLOC_SPACE_RES(mp) : 0); 1254c24b5dfaSDave Chinner if (error) { 12552451337dSDave Chinner ASSERT(error != -ENOSPC); 12564906e215SChristoph Hellwig goto out_trans_cancel; 1257c24b5dfaSDave Chinner } 1258c24b5dfaSDave Chinner xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 1259c24b5dfaSDave Chinner xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); 1260c24b5dfaSDave Chinner 1261c24b5dfaSDave Chinner if (is_dir) { 1262c24b5dfaSDave Chinner error = xfs_dir_init(tp, ip, dp); 1263c24b5dfaSDave Chinner if (error) 1264c24b5dfaSDave Chinner goto out_bmap_cancel; 1265c24b5dfaSDave Chinner 1266c24b5dfaSDave Chinner error = xfs_bumplink(tp, dp); 1267c24b5dfaSDave Chinner if (error) 1268c24b5dfaSDave Chinner goto out_bmap_cancel; 1269c24b5dfaSDave Chinner } 1270c24b5dfaSDave Chinner 1271c24b5dfaSDave Chinner /* 1272c24b5dfaSDave Chinner * If this is a synchronous mount, make sure that the 1273c24b5dfaSDave Chinner * create transaction goes to disk before returning to 1274c24b5dfaSDave Chinner * the user. 1275c24b5dfaSDave Chinner */ 1276c24b5dfaSDave Chinner if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) 1277c24b5dfaSDave Chinner xfs_trans_set_sync(tp); 1278c24b5dfaSDave Chinner 1279c24b5dfaSDave Chinner /* 1280c24b5dfaSDave Chinner * Attach the dquot(s) to the inodes and modify them incore. 1281c24b5dfaSDave Chinner * These ids of the inode couldn't have changed since the new 1282c24b5dfaSDave Chinner * inode has been locked ever since it was created. 1283c24b5dfaSDave Chinner */ 1284c24b5dfaSDave Chinner xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp); 1285c24b5dfaSDave Chinner 12862c3234d1SDarrick J. Wong error = xfs_defer_finish(&tp, &dfops, NULL); 1287c24b5dfaSDave Chinner if (error) 1288c24b5dfaSDave Chinner goto out_bmap_cancel; 1289c24b5dfaSDave Chinner 129070393313SChristoph Hellwig error = xfs_trans_commit(tp); 1291c24b5dfaSDave Chinner if (error) 1292c24b5dfaSDave Chinner goto out_release_inode; 1293c24b5dfaSDave Chinner 1294c24b5dfaSDave Chinner xfs_qm_dqrele(udqp); 1295c24b5dfaSDave Chinner xfs_qm_dqrele(gdqp); 1296c24b5dfaSDave Chinner xfs_qm_dqrele(pdqp); 1297c24b5dfaSDave Chinner 1298c24b5dfaSDave Chinner *ipp = ip; 1299c24b5dfaSDave Chinner return 0; 1300c24b5dfaSDave Chinner 1301c24b5dfaSDave Chinner out_bmap_cancel: 13022c3234d1SDarrick J. Wong xfs_defer_cancel(&dfops); 1303c24b5dfaSDave Chinner out_trans_cancel: 13044906e215SChristoph Hellwig xfs_trans_cancel(tp); 1305c24b5dfaSDave Chinner out_release_inode: 1306c24b5dfaSDave Chinner /* 130758c90473SDave Chinner * Wait until after the current transaction is aborted to finish the 130858c90473SDave Chinner * setup of the inode and release the inode. This prevents recursive 130958c90473SDave Chinner * transactions and deadlocks from xfs_inactive. 1310c24b5dfaSDave Chinner */ 131158c90473SDave Chinner if (ip) { 131258c90473SDave Chinner xfs_finish_inode_setup(ip); 1313c24b5dfaSDave Chinner IRELE(ip); 131458c90473SDave Chinner } 1315c24b5dfaSDave Chinner 1316c24b5dfaSDave Chinner xfs_qm_dqrele(udqp); 1317c24b5dfaSDave Chinner xfs_qm_dqrele(gdqp); 1318c24b5dfaSDave Chinner xfs_qm_dqrele(pdqp); 1319c24b5dfaSDave Chinner 1320c24b5dfaSDave Chinner if (unlock_dp_on_error) 132165523218SChristoph Hellwig xfs_iunlock(dp, XFS_ILOCK_EXCL); 1322c24b5dfaSDave Chinner return error; 1323c24b5dfaSDave Chinner } 1324c24b5dfaSDave Chinner 1325c24b5dfaSDave Chinner int 132699b6436bSZhi Yong Wu xfs_create_tmpfile( 132799b6436bSZhi Yong Wu struct xfs_inode *dp, 132899b6436bSZhi Yong Wu struct dentry *dentry, 1329330033d6SBrian Foster umode_t mode, 1330330033d6SBrian Foster struct xfs_inode **ipp) 133199b6436bSZhi Yong Wu { 133299b6436bSZhi Yong Wu struct xfs_mount *mp = dp->i_mount; 133399b6436bSZhi Yong Wu struct xfs_inode *ip = NULL; 133499b6436bSZhi Yong Wu struct xfs_trans *tp = NULL; 133599b6436bSZhi Yong Wu int error; 133699b6436bSZhi Yong Wu prid_t prid; 133799b6436bSZhi Yong Wu struct xfs_dquot *udqp = NULL; 133899b6436bSZhi Yong Wu struct xfs_dquot *gdqp = NULL; 133999b6436bSZhi Yong Wu struct xfs_dquot *pdqp = NULL; 134099b6436bSZhi Yong Wu struct xfs_trans_res *tres; 134199b6436bSZhi Yong Wu uint resblks; 134299b6436bSZhi Yong Wu 134399b6436bSZhi Yong Wu if (XFS_FORCED_SHUTDOWN(mp)) 13442451337dSDave Chinner return -EIO; 134599b6436bSZhi Yong Wu 134699b6436bSZhi Yong Wu prid = xfs_get_initial_prid(dp); 134799b6436bSZhi Yong Wu 134899b6436bSZhi Yong Wu /* 134999b6436bSZhi Yong Wu * Make sure that we have allocated dquot(s) on disk. 135099b6436bSZhi Yong Wu */ 135199b6436bSZhi Yong Wu error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()), 135299b6436bSZhi Yong Wu xfs_kgid_to_gid(current_fsgid()), prid, 135399b6436bSZhi Yong Wu XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, 135499b6436bSZhi Yong Wu &udqp, &gdqp, &pdqp); 135599b6436bSZhi Yong Wu if (error) 135699b6436bSZhi Yong Wu return error; 135799b6436bSZhi Yong Wu 135899b6436bSZhi Yong Wu resblks = XFS_IALLOC_SPACE_RES(mp); 135999b6436bSZhi Yong Wu tres = &M_RES(mp)->tr_create_tmpfile; 1360253f4911SChristoph Hellwig 1361253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp); 13622451337dSDave Chinner if (error == -ENOSPC) { 136399b6436bSZhi Yong Wu /* No space at all so try a "no-allocation" reservation */ 136499b6436bSZhi Yong Wu resblks = 0; 1365253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, tres, 0, 0, 0, &tp); 136699b6436bSZhi Yong Wu } 13674906e215SChristoph Hellwig if (error) 1368253f4911SChristoph Hellwig goto out_release_inode; 136999b6436bSZhi Yong Wu 137099b6436bSZhi Yong Wu error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, 137199b6436bSZhi Yong Wu pdqp, resblks, 1, 0); 137299b6436bSZhi Yong Wu if (error) 137399b6436bSZhi Yong Wu goto out_trans_cancel; 137499b6436bSZhi Yong Wu 137599b6436bSZhi Yong Wu error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, 137699b6436bSZhi Yong Wu prid, resblks > 0, &ip, NULL); 1377d6077aa3SJan Kara if (error) 137899b6436bSZhi Yong Wu goto out_trans_cancel; 137999b6436bSZhi Yong Wu 138099b6436bSZhi Yong Wu if (mp->m_flags & XFS_MOUNT_WSYNC) 138199b6436bSZhi Yong Wu xfs_trans_set_sync(tp); 138299b6436bSZhi Yong Wu 138399b6436bSZhi Yong Wu /* 138499b6436bSZhi Yong Wu * Attach the dquot(s) to the inodes and modify them incore. 138599b6436bSZhi Yong Wu * These ids of the inode couldn't have changed since the new 138699b6436bSZhi Yong Wu * inode has been locked ever since it was created. 138799b6436bSZhi Yong Wu */ 138899b6436bSZhi Yong Wu xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp); 138999b6436bSZhi Yong Wu 139099b6436bSZhi Yong Wu error = xfs_iunlink(tp, ip); 139199b6436bSZhi Yong Wu if (error) 13924906e215SChristoph Hellwig goto out_trans_cancel; 139399b6436bSZhi Yong Wu 139470393313SChristoph Hellwig error = xfs_trans_commit(tp); 139599b6436bSZhi Yong Wu if (error) 139699b6436bSZhi Yong Wu goto out_release_inode; 139799b6436bSZhi Yong Wu 139899b6436bSZhi Yong Wu xfs_qm_dqrele(udqp); 139999b6436bSZhi Yong Wu xfs_qm_dqrele(gdqp); 140099b6436bSZhi Yong Wu xfs_qm_dqrele(pdqp); 140199b6436bSZhi Yong Wu 1402330033d6SBrian Foster *ipp = ip; 140399b6436bSZhi Yong Wu return 0; 140499b6436bSZhi Yong Wu 140599b6436bSZhi Yong Wu out_trans_cancel: 14064906e215SChristoph Hellwig xfs_trans_cancel(tp); 140799b6436bSZhi Yong Wu out_release_inode: 140899b6436bSZhi Yong Wu /* 140958c90473SDave Chinner * Wait until after the current transaction is aborted to finish the 141058c90473SDave Chinner * setup of the inode and release the inode. This prevents recursive 141158c90473SDave Chinner * transactions and deadlocks from xfs_inactive. 141299b6436bSZhi Yong Wu */ 141358c90473SDave Chinner if (ip) { 141458c90473SDave Chinner xfs_finish_inode_setup(ip); 141599b6436bSZhi Yong Wu IRELE(ip); 141658c90473SDave Chinner } 141799b6436bSZhi Yong Wu 141899b6436bSZhi Yong Wu xfs_qm_dqrele(udqp); 141999b6436bSZhi Yong Wu xfs_qm_dqrele(gdqp); 142099b6436bSZhi Yong Wu xfs_qm_dqrele(pdqp); 142199b6436bSZhi Yong Wu 142299b6436bSZhi Yong Wu return error; 142399b6436bSZhi Yong Wu } 142499b6436bSZhi Yong Wu 142599b6436bSZhi Yong Wu int 1426c24b5dfaSDave Chinner xfs_link( 1427c24b5dfaSDave Chinner xfs_inode_t *tdp, 1428c24b5dfaSDave Chinner xfs_inode_t *sip, 1429c24b5dfaSDave Chinner struct xfs_name *target_name) 1430c24b5dfaSDave Chinner { 1431c24b5dfaSDave Chinner xfs_mount_t *mp = tdp->i_mount; 1432c24b5dfaSDave Chinner xfs_trans_t *tp; 1433c24b5dfaSDave Chinner int error; 14342c3234d1SDarrick J. Wong struct xfs_defer_ops dfops; 1435c24b5dfaSDave Chinner xfs_fsblock_t first_block; 1436c24b5dfaSDave Chinner int resblks; 1437c24b5dfaSDave Chinner 1438c24b5dfaSDave Chinner trace_xfs_link(tdp, target_name); 1439c24b5dfaSDave Chinner 1440c19b3b05SDave Chinner ASSERT(!S_ISDIR(VFS_I(sip)->i_mode)); 1441c24b5dfaSDave Chinner 1442c24b5dfaSDave Chinner if (XFS_FORCED_SHUTDOWN(mp)) 14432451337dSDave Chinner return -EIO; 1444c24b5dfaSDave Chinner 1445c24b5dfaSDave Chinner error = xfs_qm_dqattach(sip, 0); 1446c24b5dfaSDave Chinner if (error) 1447c24b5dfaSDave Chinner goto std_return; 1448c24b5dfaSDave Chinner 1449c24b5dfaSDave Chinner error = xfs_qm_dqattach(tdp, 0); 1450c24b5dfaSDave Chinner if (error) 1451c24b5dfaSDave Chinner goto std_return; 1452c24b5dfaSDave Chinner 1453c24b5dfaSDave Chinner resblks = XFS_LINK_SPACE_RES(mp, target_name->len); 1454253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp); 14552451337dSDave Chinner if (error == -ENOSPC) { 1456c24b5dfaSDave Chinner resblks = 0; 1457253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp); 1458c24b5dfaSDave Chinner } 14594906e215SChristoph Hellwig if (error) 1460253f4911SChristoph Hellwig goto std_return; 1461c24b5dfaSDave Chinner 1462c24b5dfaSDave Chinner xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL); 1463c24b5dfaSDave Chinner 1464c24b5dfaSDave Chinner xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL); 146565523218SChristoph Hellwig xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL); 1466c24b5dfaSDave Chinner 1467c24b5dfaSDave Chinner /* 1468c24b5dfaSDave Chinner * If we are using project inheritance, we only allow hard link 1469c24b5dfaSDave Chinner * creation in our tree when the project IDs are the same; else 1470c24b5dfaSDave Chinner * the tree quota mechanism could be circumvented. 1471c24b5dfaSDave Chinner */ 1472c24b5dfaSDave Chinner if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && 1473c24b5dfaSDave Chinner (xfs_get_projid(tdp) != xfs_get_projid(sip)))) { 14742451337dSDave Chinner error = -EXDEV; 1475c24b5dfaSDave Chinner goto error_return; 1476c24b5dfaSDave Chinner } 1477c24b5dfaSDave Chinner 147894f3cad5SEric Sandeen if (!resblks) { 147994f3cad5SEric Sandeen error = xfs_dir_canenter(tp, tdp, target_name); 1480c24b5dfaSDave Chinner if (error) 1481c24b5dfaSDave Chinner goto error_return; 148294f3cad5SEric Sandeen } 1483c24b5dfaSDave Chinner 14842c3234d1SDarrick J. Wong xfs_defer_init(&dfops, &first_block); 1485c24b5dfaSDave Chinner 148654d7b5c1SDave Chinner /* 148754d7b5c1SDave Chinner * Handle initial link state of O_TMPFILE inode 148854d7b5c1SDave Chinner */ 148954d7b5c1SDave Chinner if (VFS_I(sip)->i_nlink == 0) { 1490ab297431SZhi Yong Wu error = xfs_iunlink_remove(tp, sip); 1491ab297431SZhi Yong Wu if (error) 14924906e215SChristoph Hellwig goto error_return; 1493ab297431SZhi Yong Wu } 1494ab297431SZhi Yong Wu 1495c24b5dfaSDave Chinner error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino, 14962c3234d1SDarrick J. Wong &first_block, &dfops, resblks); 1497c24b5dfaSDave Chinner if (error) 14984906e215SChristoph Hellwig goto error_return; 1499c24b5dfaSDave Chinner xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 1500c24b5dfaSDave Chinner xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE); 1501c24b5dfaSDave Chinner 1502c24b5dfaSDave Chinner error = xfs_bumplink(tp, sip); 1503c24b5dfaSDave Chinner if (error) 15044906e215SChristoph Hellwig goto error_return; 1505c24b5dfaSDave Chinner 1506c24b5dfaSDave Chinner /* 1507c24b5dfaSDave Chinner * If this is a synchronous mount, make sure that the 1508c24b5dfaSDave Chinner * link transaction goes to disk before returning to 1509c24b5dfaSDave Chinner * the user. 1510c24b5dfaSDave Chinner */ 1511f6106efaSEric Sandeen if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) 1512c24b5dfaSDave Chinner xfs_trans_set_sync(tp); 1513c24b5dfaSDave Chinner 15142c3234d1SDarrick J. Wong error = xfs_defer_finish(&tp, &dfops, NULL); 1515c24b5dfaSDave Chinner if (error) { 15162c3234d1SDarrick J. Wong xfs_defer_cancel(&dfops); 15174906e215SChristoph Hellwig goto error_return; 1518c24b5dfaSDave Chinner } 1519c24b5dfaSDave Chinner 152070393313SChristoph Hellwig return xfs_trans_commit(tp); 1521c24b5dfaSDave Chinner 1522c24b5dfaSDave Chinner error_return: 15234906e215SChristoph Hellwig xfs_trans_cancel(tp); 1524c24b5dfaSDave Chinner std_return: 1525c24b5dfaSDave Chinner return error; 1526c24b5dfaSDave Chinner } 1527c24b5dfaSDave Chinner 15281da177e4SLinus Torvalds /* 15298f04c47aSChristoph Hellwig * Free up the underlying blocks past new_size. The new size must be smaller 15308f04c47aSChristoph Hellwig * than the current size. This routine can be used both for the attribute and 15318f04c47aSChristoph Hellwig * data fork, and does not modify the inode size, which is left to the caller. 15321da177e4SLinus Torvalds * 1533f6485057SDavid Chinner * The transaction passed to this routine must have made a permanent log 1534f6485057SDavid Chinner * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the 1535f6485057SDavid Chinner * given transaction and start new ones, so make sure everything involved in 1536f6485057SDavid Chinner * the transaction is tidy before calling here. Some transaction will be 1537f6485057SDavid Chinner * returned to the caller to be committed. The incoming transaction must 1538f6485057SDavid Chinner * already include the inode, and both inode locks must be held exclusively. 1539f6485057SDavid Chinner * The inode must also be "held" within the transaction. On return the inode 1540f6485057SDavid Chinner * will be "held" within the returned transaction. This routine does NOT 1541f6485057SDavid Chinner * require any disk space to be reserved for it within the transaction. 15421da177e4SLinus Torvalds * 1543f6485057SDavid Chinner * If we get an error, we must return with the inode locked and linked into the 1544f6485057SDavid Chinner * current transaction. This keeps things simple for the higher level code, 1545f6485057SDavid Chinner * because it always knows that the inode is locked and held in the transaction 1546f6485057SDavid Chinner * that returns to it whether errors occur or not. We don't mark the inode 1547f6485057SDavid Chinner * dirty on error so that transactions can be easily aborted if possible. 15481da177e4SLinus Torvalds */ 15491da177e4SLinus Torvalds int 15508f04c47aSChristoph Hellwig xfs_itruncate_extents( 15518f04c47aSChristoph Hellwig struct xfs_trans **tpp, 15528f04c47aSChristoph Hellwig struct xfs_inode *ip, 15538f04c47aSChristoph Hellwig int whichfork, 15548f04c47aSChristoph Hellwig xfs_fsize_t new_size) 15551da177e4SLinus Torvalds { 15568f04c47aSChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 15578f04c47aSChristoph Hellwig struct xfs_trans *tp = *tpp; 15582c3234d1SDarrick J. Wong struct xfs_defer_ops dfops; 15591da177e4SLinus Torvalds xfs_fsblock_t first_block; 15601da177e4SLinus Torvalds xfs_fileoff_t first_unmap_block; 15611da177e4SLinus Torvalds xfs_fileoff_t last_block; 15628f04c47aSChristoph Hellwig xfs_filblks_t unmap_len; 15638f04c47aSChristoph Hellwig int error = 0; 15648f04c47aSChristoph Hellwig int done = 0; 15651da177e4SLinus Torvalds 15660b56185bSChristoph Hellwig ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 15670b56185bSChristoph Hellwig ASSERT(!atomic_read(&VFS_I(ip)->i_count) || 15680b56185bSChristoph Hellwig xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 1569ce7ae151SChristoph Hellwig ASSERT(new_size <= XFS_ISIZE(ip)); 15708f04c47aSChristoph Hellwig ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 15711da177e4SLinus Torvalds ASSERT(ip->i_itemp != NULL); 1572898621d5SChristoph Hellwig ASSERT(ip->i_itemp->ili_lock_flags == 0); 15731da177e4SLinus Torvalds ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); 15741da177e4SLinus Torvalds 1575673e8e59SChristoph Hellwig trace_xfs_itruncate_extents_start(ip, new_size); 1576673e8e59SChristoph Hellwig 15771da177e4SLinus Torvalds /* 15781da177e4SLinus Torvalds * Since it is possible for space to become allocated beyond 15791da177e4SLinus Torvalds * the end of the file (in a crash where the space is allocated 15801da177e4SLinus Torvalds * but the inode size is not yet updated), simply remove any 15811da177e4SLinus Torvalds * blocks which show up between the new EOF and the maximum 15821da177e4SLinus Torvalds * possible file size. If the first block to be removed is 15831da177e4SLinus Torvalds * beyond the maximum file size (ie it is the same as last_block), 15841da177e4SLinus Torvalds * then there is nothing to do. 15851da177e4SLinus Torvalds */ 15868f04c47aSChristoph Hellwig first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 158732972383SDave Chinner last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 15888f04c47aSChristoph Hellwig if (first_unmap_block == last_block) 15898f04c47aSChristoph Hellwig return 0; 15908f04c47aSChristoph Hellwig 15918f04c47aSChristoph Hellwig ASSERT(first_unmap_block < last_block); 15921da177e4SLinus Torvalds unmap_len = last_block - first_unmap_block + 1; 15931da177e4SLinus Torvalds while (!done) { 15942c3234d1SDarrick J. Wong xfs_defer_init(&dfops, &first_block); 15958f04c47aSChristoph Hellwig error = xfs_bunmapi(tp, ip, 15963e57ecf6SOlaf Weber first_unmap_block, unmap_len, 15978f04c47aSChristoph Hellwig xfs_bmapi_aflag(whichfork), 15981da177e4SLinus Torvalds XFS_ITRUNC_MAX_EXTENTS, 15992c3234d1SDarrick J. Wong &first_block, &dfops, 1600b4e9181eSChristoph Hellwig &done); 16018f04c47aSChristoph Hellwig if (error) 16028f04c47aSChristoph Hellwig goto out_bmap_cancel; 16031da177e4SLinus Torvalds 16041da177e4SLinus Torvalds /* 16051da177e4SLinus Torvalds * Duplicate the transaction that has the permanent 16061da177e4SLinus Torvalds * reservation and commit the old transaction. 16071da177e4SLinus Torvalds */ 16082c3234d1SDarrick J. Wong error = xfs_defer_finish(&tp, &dfops, ip); 16098f04c47aSChristoph Hellwig if (error) 16108f04c47aSChristoph Hellwig goto out_bmap_cancel; 16111da177e4SLinus Torvalds 16122e6db6c4SChristoph Hellwig error = xfs_trans_roll(&tp, ip); 16131da177e4SLinus Torvalds if (error) 16148f04c47aSChristoph Hellwig goto out; 16151da177e4SLinus Torvalds } 16168f04c47aSChristoph Hellwig 1617aa8968f2SDarrick J. Wong /* Remove all pending CoW reservations. */ 1618aa8968f2SDarrick J. Wong error = xfs_reflink_cancel_cow_blocks(ip, &tp, first_unmap_block, 16193802a345SChristoph Hellwig last_block, true); 1620aa8968f2SDarrick J. Wong if (error) 1621aa8968f2SDarrick J. Wong goto out; 1622aa8968f2SDarrick J. Wong 1623aa8968f2SDarrick J. Wong /* 1624aa8968f2SDarrick J. Wong * Clear the reflink flag if we truncated everything. 1625aa8968f2SDarrick J. Wong */ 162683104d44SDarrick J. Wong if (ip->i_d.di_nblocks == 0 && xfs_is_reflink_inode(ip)) { 1627aa8968f2SDarrick J. Wong ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK; 162883104d44SDarrick J. Wong xfs_inode_clear_cowblocks_tag(ip); 162983104d44SDarrick J. Wong } 1630aa8968f2SDarrick J. Wong 1631673e8e59SChristoph Hellwig /* 1632673e8e59SChristoph Hellwig * Always re-log the inode so that our permanent transaction can keep 1633673e8e59SChristoph Hellwig * on rolling it forward in the log. 1634673e8e59SChristoph Hellwig */ 1635673e8e59SChristoph Hellwig xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1636673e8e59SChristoph Hellwig 1637673e8e59SChristoph Hellwig trace_xfs_itruncate_extents_end(ip, new_size); 1638673e8e59SChristoph Hellwig 16398f04c47aSChristoph Hellwig out: 16408f04c47aSChristoph Hellwig *tpp = tp; 16418f04c47aSChristoph Hellwig return error; 16428f04c47aSChristoph Hellwig out_bmap_cancel: 16431da177e4SLinus Torvalds /* 16448f04c47aSChristoph Hellwig * If the bunmapi call encounters an error, return to the caller where 16458f04c47aSChristoph Hellwig * the transaction can be properly aborted. We just need to make sure 16468f04c47aSChristoph Hellwig * we're not holding any resources that we were not when we came in. 16471da177e4SLinus Torvalds */ 16482c3234d1SDarrick J. Wong xfs_defer_cancel(&dfops); 16498f04c47aSChristoph Hellwig goto out; 16508f04c47aSChristoph Hellwig } 16518f04c47aSChristoph Hellwig 1652c24b5dfaSDave Chinner int 1653c24b5dfaSDave Chinner xfs_release( 1654c24b5dfaSDave Chinner xfs_inode_t *ip) 1655c24b5dfaSDave Chinner { 1656c24b5dfaSDave Chinner xfs_mount_t *mp = ip->i_mount; 1657c24b5dfaSDave Chinner int error; 1658c24b5dfaSDave Chinner 1659c19b3b05SDave Chinner if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0)) 1660c24b5dfaSDave Chinner return 0; 1661c24b5dfaSDave Chinner 1662c24b5dfaSDave Chinner /* If this is a read-only mount, don't do this (would generate I/O) */ 1663c24b5dfaSDave Chinner if (mp->m_flags & XFS_MOUNT_RDONLY) 1664c24b5dfaSDave Chinner return 0; 1665c24b5dfaSDave Chinner 1666c24b5dfaSDave Chinner if (!XFS_FORCED_SHUTDOWN(mp)) { 1667c24b5dfaSDave Chinner int truncated; 1668c24b5dfaSDave Chinner 1669c24b5dfaSDave Chinner /* 1670c24b5dfaSDave Chinner * If we previously truncated this file and removed old data 1671c24b5dfaSDave Chinner * in the process, we want to initiate "early" writeout on 1672c24b5dfaSDave Chinner * the last close. This is an attempt to combat the notorious 1673c24b5dfaSDave Chinner * NULL files problem which is particularly noticeable from a 1674c24b5dfaSDave Chinner * truncate down, buffered (re-)write (delalloc), followed by 1675c24b5dfaSDave Chinner * a crash. What we are effectively doing here is 1676c24b5dfaSDave Chinner * significantly reducing the time window where we'd otherwise 1677c24b5dfaSDave Chinner * be exposed to that problem. 1678c24b5dfaSDave Chinner */ 1679c24b5dfaSDave Chinner truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); 1680c24b5dfaSDave Chinner if (truncated) { 1681c24b5dfaSDave Chinner xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE); 1682eac152b4SDave Chinner if (ip->i_delayed_blks > 0) { 16832451337dSDave Chinner error = filemap_flush(VFS_I(ip)->i_mapping); 1684c24b5dfaSDave Chinner if (error) 1685c24b5dfaSDave Chinner return error; 1686c24b5dfaSDave Chinner } 1687c24b5dfaSDave Chinner } 1688c24b5dfaSDave Chinner } 1689c24b5dfaSDave Chinner 169054d7b5c1SDave Chinner if (VFS_I(ip)->i_nlink == 0) 1691c24b5dfaSDave Chinner return 0; 1692c24b5dfaSDave Chinner 1693c24b5dfaSDave Chinner if (xfs_can_free_eofblocks(ip, false)) { 1694c24b5dfaSDave Chinner 1695c24b5dfaSDave Chinner /* 1696a36b9261SBrian Foster * Check if the inode is being opened, written and closed 1697a36b9261SBrian Foster * frequently and we have delayed allocation blocks outstanding 1698a36b9261SBrian Foster * (e.g. streaming writes from the NFS server), truncating the 1699a36b9261SBrian Foster * blocks past EOF will cause fragmentation to occur. 1700a36b9261SBrian Foster * 1701a36b9261SBrian Foster * In this case don't do the truncation, but we have to be 1702a36b9261SBrian Foster * careful how we detect this case. Blocks beyond EOF show up as 1703a36b9261SBrian Foster * i_delayed_blks even when the inode is clean, so we need to 1704a36b9261SBrian Foster * truncate them away first before checking for a dirty release. 1705a36b9261SBrian Foster * Hence on the first dirty close we will still remove the 1706a36b9261SBrian Foster * speculative allocation, but after that we will leave it in 1707a36b9261SBrian Foster * place. 1708a36b9261SBrian Foster */ 1709a36b9261SBrian Foster if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE)) 1710a36b9261SBrian Foster return 0; 1711a36b9261SBrian Foster /* 1712c24b5dfaSDave Chinner * If we can't get the iolock just skip truncating the blocks 1713c24b5dfaSDave Chinner * past EOF because we could deadlock with the mmap_sem 1714c24b5dfaSDave Chinner * otherwise. We'll get another chance to drop them once the 1715c24b5dfaSDave Chinner * last reference to the inode is dropped, so we'll never leak 1716c24b5dfaSDave Chinner * blocks permanently. 1717c24b5dfaSDave Chinner */ 1718a36b9261SBrian Foster if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { 1719a36b9261SBrian Foster error = xfs_free_eofblocks(ip); 1720a36b9261SBrian Foster xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1721a36b9261SBrian Foster if (error) 1722c24b5dfaSDave Chinner return error; 1723a36b9261SBrian Foster } 1724c24b5dfaSDave Chinner 1725c24b5dfaSDave Chinner /* delalloc blocks after truncation means it really is dirty */ 1726c24b5dfaSDave Chinner if (ip->i_delayed_blks) 1727c24b5dfaSDave Chinner xfs_iflags_set(ip, XFS_IDIRTY_RELEASE); 1728c24b5dfaSDave Chinner } 1729c24b5dfaSDave Chinner return 0; 1730c24b5dfaSDave Chinner } 1731c24b5dfaSDave Chinner 1732c24b5dfaSDave Chinner /* 1733f7be2d7fSBrian Foster * xfs_inactive_truncate 1734f7be2d7fSBrian Foster * 1735f7be2d7fSBrian Foster * Called to perform a truncate when an inode becomes unlinked. 1736f7be2d7fSBrian Foster */ 1737f7be2d7fSBrian Foster STATIC int 1738f7be2d7fSBrian Foster xfs_inactive_truncate( 1739f7be2d7fSBrian Foster struct xfs_inode *ip) 1740f7be2d7fSBrian Foster { 1741f7be2d7fSBrian Foster struct xfs_mount *mp = ip->i_mount; 1742f7be2d7fSBrian Foster struct xfs_trans *tp; 1743f7be2d7fSBrian Foster int error; 1744f7be2d7fSBrian Foster 1745253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp); 1746f7be2d7fSBrian Foster if (error) { 1747f7be2d7fSBrian Foster ASSERT(XFS_FORCED_SHUTDOWN(mp)); 1748f7be2d7fSBrian Foster return error; 1749f7be2d7fSBrian Foster } 1750f7be2d7fSBrian Foster 1751f7be2d7fSBrian Foster xfs_ilock(ip, XFS_ILOCK_EXCL); 1752f7be2d7fSBrian Foster xfs_trans_ijoin(tp, ip, 0); 1753f7be2d7fSBrian Foster 1754f7be2d7fSBrian Foster /* 1755f7be2d7fSBrian Foster * Log the inode size first to prevent stale data exposure in the event 1756f7be2d7fSBrian Foster * of a system crash before the truncate completes. See the related 175769bca807SJan Kara * comment in xfs_vn_setattr_size() for details. 1758f7be2d7fSBrian Foster */ 1759f7be2d7fSBrian Foster ip->i_d.di_size = 0; 1760f7be2d7fSBrian Foster xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1761f7be2d7fSBrian Foster 1762f7be2d7fSBrian Foster error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0); 1763f7be2d7fSBrian Foster if (error) 1764f7be2d7fSBrian Foster goto error_trans_cancel; 1765f7be2d7fSBrian Foster 1766f7be2d7fSBrian Foster ASSERT(ip->i_d.di_nextents == 0); 1767f7be2d7fSBrian Foster 176870393313SChristoph Hellwig error = xfs_trans_commit(tp); 1769f7be2d7fSBrian Foster if (error) 1770f7be2d7fSBrian Foster goto error_unlock; 1771f7be2d7fSBrian Foster 1772f7be2d7fSBrian Foster xfs_iunlock(ip, XFS_ILOCK_EXCL); 1773f7be2d7fSBrian Foster return 0; 1774f7be2d7fSBrian Foster 1775f7be2d7fSBrian Foster error_trans_cancel: 17764906e215SChristoph Hellwig xfs_trans_cancel(tp); 1777f7be2d7fSBrian Foster error_unlock: 1778f7be2d7fSBrian Foster xfs_iunlock(ip, XFS_ILOCK_EXCL); 1779f7be2d7fSBrian Foster return error; 1780f7be2d7fSBrian Foster } 1781f7be2d7fSBrian Foster 1782f7be2d7fSBrian Foster /* 178388877d2bSBrian Foster * xfs_inactive_ifree() 178488877d2bSBrian Foster * 178588877d2bSBrian Foster * Perform the inode free when an inode is unlinked. 178688877d2bSBrian Foster */ 178788877d2bSBrian Foster STATIC int 178888877d2bSBrian Foster xfs_inactive_ifree( 178988877d2bSBrian Foster struct xfs_inode *ip) 179088877d2bSBrian Foster { 17912c3234d1SDarrick J. Wong struct xfs_defer_ops dfops; 179288877d2bSBrian Foster xfs_fsblock_t first_block; 179388877d2bSBrian Foster struct xfs_mount *mp = ip->i_mount; 179488877d2bSBrian Foster struct xfs_trans *tp; 179588877d2bSBrian Foster int error; 179688877d2bSBrian Foster 17979d43b180SBrian Foster /* 179876d771b4SChristoph Hellwig * We try to use a per-AG reservation for any block needed by the finobt 179976d771b4SChristoph Hellwig * tree, but as the finobt feature predates the per-AG reservation 180076d771b4SChristoph Hellwig * support a degraded file system might not have enough space for the 180176d771b4SChristoph Hellwig * reservation at mount time. In that case try to dip into the reserved 180276d771b4SChristoph Hellwig * pool and pray. 18039d43b180SBrian Foster * 18049d43b180SBrian Foster * Send a warning if the reservation does happen to fail, as the inode 18059d43b180SBrian Foster * now remains allocated and sits on the unlinked list until the fs is 18069d43b180SBrian Foster * repaired. 18079d43b180SBrian Foster */ 180876d771b4SChristoph Hellwig if (unlikely(mp->m_inotbt_nores)) { 1809253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 181076d771b4SChristoph Hellwig XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, 181176d771b4SChristoph Hellwig &tp); 181276d771b4SChristoph Hellwig } else { 181376d771b4SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp); 181476d771b4SChristoph Hellwig } 181588877d2bSBrian Foster if (error) { 18162451337dSDave Chinner if (error == -ENOSPC) { 18179d43b180SBrian Foster xfs_warn_ratelimited(mp, 18189d43b180SBrian Foster "Failed to remove inode(s) from unlinked list. " 18199d43b180SBrian Foster "Please free space, unmount and run xfs_repair."); 18209d43b180SBrian Foster } else { 182188877d2bSBrian Foster ASSERT(XFS_FORCED_SHUTDOWN(mp)); 18229d43b180SBrian Foster } 182388877d2bSBrian Foster return error; 182488877d2bSBrian Foster } 182588877d2bSBrian Foster 182688877d2bSBrian Foster xfs_ilock(ip, XFS_ILOCK_EXCL); 182788877d2bSBrian Foster xfs_trans_ijoin(tp, ip, 0); 182888877d2bSBrian Foster 18292c3234d1SDarrick J. Wong xfs_defer_init(&dfops, &first_block); 18302c3234d1SDarrick J. Wong error = xfs_ifree(tp, ip, &dfops); 183188877d2bSBrian Foster if (error) { 183288877d2bSBrian Foster /* 183388877d2bSBrian Foster * If we fail to free the inode, shut down. The cancel 183488877d2bSBrian Foster * might do that, we need to make sure. Otherwise the 183588877d2bSBrian Foster * inode might be lost for a long time or forever. 183688877d2bSBrian Foster */ 183788877d2bSBrian Foster if (!XFS_FORCED_SHUTDOWN(mp)) { 183888877d2bSBrian Foster xfs_notice(mp, "%s: xfs_ifree returned error %d", 183988877d2bSBrian Foster __func__, error); 184088877d2bSBrian Foster xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); 184188877d2bSBrian Foster } 18424906e215SChristoph Hellwig xfs_trans_cancel(tp); 184388877d2bSBrian Foster xfs_iunlock(ip, XFS_ILOCK_EXCL); 184488877d2bSBrian Foster return error; 184588877d2bSBrian Foster } 184688877d2bSBrian Foster 184788877d2bSBrian Foster /* 184888877d2bSBrian Foster * Credit the quota account(s). The inode is gone. 184988877d2bSBrian Foster */ 185088877d2bSBrian Foster xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1); 185188877d2bSBrian Foster 185288877d2bSBrian Foster /* 1853d4a97a04SBrian Foster * Just ignore errors at this point. There is nothing we can do except 1854d4a97a04SBrian Foster * to try to keep going. Make sure it's not a silent error. 185588877d2bSBrian Foster */ 18562c3234d1SDarrick J. Wong error = xfs_defer_finish(&tp, &dfops, NULL); 1857d4a97a04SBrian Foster if (error) { 1858310a75a3SDarrick J. Wong xfs_notice(mp, "%s: xfs_defer_finish returned error %d", 185988877d2bSBrian Foster __func__, error); 18602c3234d1SDarrick J. Wong xfs_defer_cancel(&dfops); 1861d4a97a04SBrian Foster } 186270393313SChristoph Hellwig error = xfs_trans_commit(tp); 186388877d2bSBrian Foster if (error) 186488877d2bSBrian Foster xfs_notice(mp, "%s: xfs_trans_commit returned error %d", 186588877d2bSBrian Foster __func__, error); 186688877d2bSBrian Foster 186788877d2bSBrian Foster xfs_iunlock(ip, XFS_ILOCK_EXCL); 186888877d2bSBrian Foster return 0; 186988877d2bSBrian Foster } 187088877d2bSBrian Foster 187188877d2bSBrian Foster /* 1872c24b5dfaSDave Chinner * xfs_inactive 1873c24b5dfaSDave Chinner * 1874c24b5dfaSDave Chinner * This is called when the vnode reference count for the vnode 1875c24b5dfaSDave Chinner * goes to zero. If the file has been unlinked, then it must 1876c24b5dfaSDave Chinner * now be truncated. Also, we clear all of the read-ahead state 1877c24b5dfaSDave Chinner * kept for the inode here since the file is now closed. 1878c24b5dfaSDave Chinner */ 187974564fb4SBrian Foster void 1880c24b5dfaSDave Chinner xfs_inactive( 1881c24b5dfaSDave Chinner xfs_inode_t *ip) 1882c24b5dfaSDave Chinner { 18833d3c8b52SJie Liu struct xfs_mount *mp; 1884c24b5dfaSDave Chinner int error; 1885c24b5dfaSDave Chinner int truncate = 0; 1886c24b5dfaSDave Chinner 1887c24b5dfaSDave Chinner /* 1888c24b5dfaSDave Chinner * If the inode is already free, then there can be nothing 1889c24b5dfaSDave Chinner * to clean up here. 1890c24b5dfaSDave Chinner */ 1891c19b3b05SDave Chinner if (VFS_I(ip)->i_mode == 0) { 1892c24b5dfaSDave Chinner ASSERT(ip->i_df.if_real_bytes == 0); 1893c24b5dfaSDave Chinner ASSERT(ip->i_df.if_broot_bytes == 0); 189474564fb4SBrian Foster return; 1895c24b5dfaSDave Chinner } 1896c24b5dfaSDave Chinner 1897c24b5dfaSDave Chinner mp = ip->i_mount; 189817c12bcdSDarrick J. Wong ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY)); 1899c24b5dfaSDave Chinner 1900c24b5dfaSDave Chinner /* If this is a read-only mount, don't do this (would generate I/O) */ 1901c24b5dfaSDave Chinner if (mp->m_flags & XFS_MOUNT_RDONLY) 190274564fb4SBrian Foster return; 1903c24b5dfaSDave Chinner 190454d7b5c1SDave Chinner if (VFS_I(ip)->i_nlink != 0) { 1905c24b5dfaSDave Chinner /* 1906c24b5dfaSDave Chinner * force is true because we are evicting an inode from the 1907c24b5dfaSDave Chinner * cache. Post-eof blocks must be freed, lest we end up with 1908c24b5dfaSDave Chinner * broken free space accounting. 1909*3b4683c2SBrian Foster * 1910*3b4683c2SBrian Foster * Note: don't bother with iolock here since lockdep complains 1911*3b4683c2SBrian Foster * about acquiring it in reclaim context. We have the only 1912*3b4683c2SBrian Foster * reference to the inode at this point anyways. 1913c24b5dfaSDave Chinner */ 1914*3b4683c2SBrian Foster if (xfs_can_free_eofblocks(ip, true)) 1915a36b9261SBrian Foster xfs_free_eofblocks(ip); 191674564fb4SBrian Foster 191774564fb4SBrian Foster return; 1918c24b5dfaSDave Chinner } 1919c24b5dfaSDave Chinner 1920c19b3b05SDave Chinner if (S_ISREG(VFS_I(ip)->i_mode) && 1921c24b5dfaSDave Chinner (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 || 1922c24b5dfaSDave Chinner ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0)) 1923c24b5dfaSDave Chinner truncate = 1; 1924c24b5dfaSDave Chinner 1925c24b5dfaSDave Chinner error = xfs_qm_dqattach(ip, 0); 1926c24b5dfaSDave Chinner if (error) 192774564fb4SBrian Foster return; 1928c24b5dfaSDave Chinner 1929c19b3b05SDave Chinner if (S_ISLNK(VFS_I(ip)->i_mode)) 193036b21ddeSBrian Foster error = xfs_inactive_symlink(ip); 1931f7be2d7fSBrian Foster else if (truncate) 1932f7be2d7fSBrian Foster error = xfs_inactive_truncate(ip); 193336b21ddeSBrian Foster if (error) 193474564fb4SBrian Foster return; 1935c24b5dfaSDave Chinner 1936c24b5dfaSDave Chinner /* 1937c24b5dfaSDave Chinner * If there are attributes associated with the file then blow them away 1938c24b5dfaSDave Chinner * now. The code calls a routine that recursively deconstructs the 19396dfe5a04SDave Chinner * attribute fork. If also blows away the in-core attribute fork. 1940c24b5dfaSDave Chinner */ 19416dfe5a04SDave Chinner if (XFS_IFORK_Q(ip)) { 1942c24b5dfaSDave Chinner error = xfs_attr_inactive(ip); 1943c24b5dfaSDave Chinner if (error) 194474564fb4SBrian Foster return; 1945c24b5dfaSDave Chinner } 1946c24b5dfaSDave Chinner 19476dfe5a04SDave Chinner ASSERT(!ip->i_afp); 1948c24b5dfaSDave Chinner ASSERT(ip->i_d.di_anextents == 0); 19496dfe5a04SDave Chinner ASSERT(ip->i_d.di_forkoff == 0); 1950c24b5dfaSDave Chinner 1951c24b5dfaSDave Chinner /* 1952c24b5dfaSDave Chinner * Free the inode. 1953c24b5dfaSDave Chinner */ 195488877d2bSBrian Foster error = xfs_inactive_ifree(ip); 1955c24b5dfaSDave Chinner if (error) 195674564fb4SBrian Foster return; 1957c24b5dfaSDave Chinner 1958c24b5dfaSDave Chinner /* 1959c24b5dfaSDave Chinner * Release the dquots held by inode, if any. 1960c24b5dfaSDave Chinner */ 1961c24b5dfaSDave Chinner xfs_qm_dqdetach(ip); 1962c24b5dfaSDave Chinner } 1963c24b5dfaSDave Chinner 19641da177e4SLinus Torvalds /* 196554d7b5c1SDave Chinner * This is called when the inode's link count goes to 0 or we are creating a 196654d7b5c1SDave Chinner * tmpfile via O_TMPFILE. In the case of a tmpfile, @ignore_linkcount will be 196754d7b5c1SDave Chinner * set to true as the link count is dropped to zero by the VFS after we've 196854d7b5c1SDave Chinner * created the file successfully, so we have to add it to the unlinked list 196954d7b5c1SDave Chinner * while the link count is non-zero. 197054d7b5c1SDave Chinner * 197154d7b5c1SDave Chinner * We place the on-disk inode on a list in the AGI. It will be pulled from this 197254d7b5c1SDave Chinner * list when the inode is freed. 19731da177e4SLinus Torvalds */ 197454d7b5c1SDave Chinner STATIC int 19751da177e4SLinus Torvalds xfs_iunlink( 197654d7b5c1SDave Chinner struct xfs_trans *tp, 197754d7b5c1SDave Chinner struct xfs_inode *ip) 19781da177e4SLinus Torvalds { 197954d7b5c1SDave Chinner xfs_mount_t *mp = tp->t_mountp; 19801da177e4SLinus Torvalds xfs_agi_t *agi; 19811da177e4SLinus Torvalds xfs_dinode_t *dip; 19821da177e4SLinus Torvalds xfs_buf_t *agibp; 19831da177e4SLinus Torvalds xfs_buf_t *ibp; 19841da177e4SLinus Torvalds xfs_agino_t agino; 19851da177e4SLinus Torvalds short bucket_index; 19861da177e4SLinus Torvalds int offset; 19871da177e4SLinus Torvalds int error; 19881da177e4SLinus Torvalds 1989c19b3b05SDave Chinner ASSERT(VFS_I(ip)->i_mode != 0); 19901da177e4SLinus Torvalds 19911da177e4SLinus Torvalds /* 19921da177e4SLinus Torvalds * Get the agi buffer first. It ensures lock ordering 19931da177e4SLinus Torvalds * on the list. 19941da177e4SLinus Torvalds */ 19955e1be0fbSChristoph Hellwig error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp); 1996859d7182SVlad Apostolov if (error) 19971da177e4SLinus Torvalds return error; 19981da177e4SLinus Torvalds agi = XFS_BUF_TO_AGI(agibp); 19995e1be0fbSChristoph Hellwig 20001da177e4SLinus Torvalds /* 20011da177e4SLinus Torvalds * Get the index into the agi hash table for the 20021da177e4SLinus Torvalds * list this inode will go on. 20031da177e4SLinus Torvalds */ 20041da177e4SLinus Torvalds agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 20051da177e4SLinus Torvalds ASSERT(agino != 0); 20061da177e4SLinus Torvalds bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 20071da177e4SLinus Torvalds ASSERT(agi->agi_unlinked[bucket_index]); 200816259e7dSChristoph Hellwig ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino); 20091da177e4SLinus Torvalds 201069ef921bSChristoph Hellwig if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) { 20111da177e4SLinus Torvalds /* 20121da177e4SLinus Torvalds * There is already another inode in the bucket we need 20131da177e4SLinus Torvalds * to add ourselves to. Add us at the front of the list. 20141da177e4SLinus Torvalds * Here we put the head pointer into our next pointer, 20151da177e4SLinus Torvalds * and then we fall through to point the head at us. 20161da177e4SLinus Torvalds */ 2017475ee413SChristoph Hellwig error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 2018475ee413SChristoph Hellwig 0, 0); 2019c319b58bSVlad Apostolov if (error) 2020c319b58bSVlad Apostolov return error; 2021c319b58bSVlad Apostolov 202269ef921bSChristoph Hellwig ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO)); 20231da177e4SLinus Torvalds dip->di_next_unlinked = agi->agi_unlinked[bucket_index]; 202492bfc6e7SChristoph Hellwig offset = ip->i_imap.im_boffset + 20251da177e4SLinus Torvalds offsetof(xfs_dinode_t, di_next_unlinked); 20260a32c26eSDave Chinner 20270a32c26eSDave Chinner /* need to recalc the inode CRC if appropriate */ 20280a32c26eSDave Chinner xfs_dinode_calc_crc(mp, dip); 20290a32c26eSDave Chinner 20301da177e4SLinus Torvalds xfs_trans_inode_buf(tp, ibp); 20311da177e4SLinus Torvalds xfs_trans_log_buf(tp, ibp, offset, 20321da177e4SLinus Torvalds (offset + sizeof(xfs_agino_t) - 1)); 20331da177e4SLinus Torvalds xfs_inobp_check(mp, ibp); 20341da177e4SLinus Torvalds } 20351da177e4SLinus Torvalds 20361da177e4SLinus Torvalds /* 20371da177e4SLinus Torvalds * Point the bucket head pointer at the inode being inserted. 20381da177e4SLinus Torvalds */ 20391da177e4SLinus Torvalds ASSERT(agino != 0); 204016259e7dSChristoph Hellwig agi->agi_unlinked[bucket_index] = cpu_to_be32(agino); 20411da177e4SLinus Torvalds offset = offsetof(xfs_agi_t, agi_unlinked) + 20421da177e4SLinus Torvalds (sizeof(xfs_agino_t) * bucket_index); 20431da177e4SLinus Torvalds xfs_trans_log_buf(tp, agibp, offset, 20441da177e4SLinus Torvalds (offset + sizeof(xfs_agino_t) - 1)); 20451da177e4SLinus Torvalds return 0; 20461da177e4SLinus Torvalds } 20471da177e4SLinus Torvalds 20481da177e4SLinus Torvalds /* 20491da177e4SLinus Torvalds * Pull the on-disk inode from the AGI unlinked list. 20501da177e4SLinus Torvalds */ 20511da177e4SLinus Torvalds STATIC int 20521da177e4SLinus Torvalds xfs_iunlink_remove( 20531da177e4SLinus Torvalds xfs_trans_t *tp, 20541da177e4SLinus Torvalds xfs_inode_t *ip) 20551da177e4SLinus Torvalds { 20561da177e4SLinus Torvalds xfs_ino_t next_ino; 20571da177e4SLinus Torvalds xfs_mount_t *mp; 20581da177e4SLinus Torvalds xfs_agi_t *agi; 20591da177e4SLinus Torvalds xfs_dinode_t *dip; 20601da177e4SLinus Torvalds xfs_buf_t *agibp; 20611da177e4SLinus Torvalds xfs_buf_t *ibp; 20621da177e4SLinus Torvalds xfs_agnumber_t agno; 20631da177e4SLinus Torvalds xfs_agino_t agino; 20641da177e4SLinus Torvalds xfs_agino_t next_agino; 20651da177e4SLinus Torvalds xfs_buf_t *last_ibp; 20666fdf8cccSNathan Scott xfs_dinode_t *last_dip = NULL; 20671da177e4SLinus Torvalds short bucket_index; 20686fdf8cccSNathan Scott int offset, last_offset = 0; 20691da177e4SLinus Torvalds int error; 20701da177e4SLinus Torvalds 20711da177e4SLinus Torvalds mp = tp->t_mountp; 20721da177e4SLinus Torvalds agno = XFS_INO_TO_AGNO(mp, ip->i_ino); 20731da177e4SLinus Torvalds 20741da177e4SLinus Torvalds /* 20751da177e4SLinus Torvalds * Get the agi buffer first. It ensures lock ordering 20761da177e4SLinus Torvalds * on the list. 20771da177e4SLinus Torvalds */ 20785e1be0fbSChristoph Hellwig error = xfs_read_agi(mp, tp, agno, &agibp); 20795e1be0fbSChristoph Hellwig if (error) 20801da177e4SLinus Torvalds return error; 20815e1be0fbSChristoph Hellwig 20821da177e4SLinus Torvalds agi = XFS_BUF_TO_AGI(agibp); 20835e1be0fbSChristoph Hellwig 20841da177e4SLinus Torvalds /* 20851da177e4SLinus Torvalds * Get the index into the agi hash table for the 20861da177e4SLinus Torvalds * list this inode will go on. 20871da177e4SLinus Torvalds */ 20881da177e4SLinus Torvalds agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 20891da177e4SLinus Torvalds ASSERT(agino != 0); 20901da177e4SLinus Torvalds bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 209169ef921bSChristoph Hellwig ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)); 20921da177e4SLinus Torvalds ASSERT(agi->agi_unlinked[bucket_index]); 20931da177e4SLinus Torvalds 209416259e7dSChristoph Hellwig if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) { 20951da177e4SLinus Torvalds /* 2096475ee413SChristoph Hellwig * We're at the head of the list. Get the inode's on-disk 2097475ee413SChristoph Hellwig * buffer to see if there is anyone after us on the list. 2098475ee413SChristoph Hellwig * Only modify our next pointer if it is not already NULLAGINO. 2099475ee413SChristoph Hellwig * This saves us the overhead of dealing with the buffer when 2100475ee413SChristoph Hellwig * there is no need to change it. 21011da177e4SLinus Torvalds */ 2102475ee413SChristoph Hellwig error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 2103475ee413SChristoph Hellwig 0, 0); 21041da177e4SLinus Torvalds if (error) { 2105475ee413SChristoph Hellwig xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.", 21060b932cccSDave Chinner __func__, error); 21071da177e4SLinus Torvalds return error; 21081da177e4SLinus Torvalds } 2109347d1c01SChristoph Hellwig next_agino = be32_to_cpu(dip->di_next_unlinked); 21101da177e4SLinus Torvalds ASSERT(next_agino != 0); 21111da177e4SLinus Torvalds if (next_agino != NULLAGINO) { 2112347d1c01SChristoph Hellwig dip->di_next_unlinked = cpu_to_be32(NULLAGINO); 211392bfc6e7SChristoph Hellwig offset = ip->i_imap.im_boffset + 21141da177e4SLinus Torvalds offsetof(xfs_dinode_t, di_next_unlinked); 21150a32c26eSDave Chinner 21160a32c26eSDave Chinner /* need to recalc the inode CRC if appropriate */ 21170a32c26eSDave Chinner xfs_dinode_calc_crc(mp, dip); 21180a32c26eSDave Chinner 21191da177e4SLinus Torvalds xfs_trans_inode_buf(tp, ibp); 21201da177e4SLinus Torvalds xfs_trans_log_buf(tp, ibp, offset, 21211da177e4SLinus Torvalds (offset + sizeof(xfs_agino_t) - 1)); 21221da177e4SLinus Torvalds xfs_inobp_check(mp, ibp); 21231da177e4SLinus Torvalds } else { 21241da177e4SLinus Torvalds xfs_trans_brelse(tp, ibp); 21251da177e4SLinus Torvalds } 21261da177e4SLinus Torvalds /* 21271da177e4SLinus Torvalds * Point the bucket head pointer at the next inode. 21281da177e4SLinus Torvalds */ 21291da177e4SLinus Torvalds ASSERT(next_agino != 0); 21301da177e4SLinus Torvalds ASSERT(next_agino != agino); 213116259e7dSChristoph Hellwig agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino); 21321da177e4SLinus Torvalds offset = offsetof(xfs_agi_t, agi_unlinked) + 21331da177e4SLinus Torvalds (sizeof(xfs_agino_t) * bucket_index); 21341da177e4SLinus Torvalds xfs_trans_log_buf(tp, agibp, offset, 21351da177e4SLinus Torvalds (offset + sizeof(xfs_agino_t) - 1)); 21361da177e4SLinus Torvalds } else { 21371da177e4SLinus Torvalds /* 21381da177e4SLinus Torvalds * We need to search the list for the inode being freed. 21391da177e4SLinus Torvalds */ 214016259e7dSChristoph Hellwig next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); 21411da177e4SLinus Torvalds last_ibp = NULL; 21421da177e4SLinus Torvalds while (next_agino != agino) { 2143129dbc9aSChristoph Hellwig struct xfs_imap imap; 2144129dbc9aSChristoph Hellwig 2145129dbc9aSChristoph Hellwig if (last_ibp) 21461da177e4SLinus Torvalds xfs_trans_brelse(tp, last_ibp); 2147129dbc9aSChristoph Hellwig 2148129dbc9aSChristoph Hellwig imap.im_blkno = 0; 21491da177e4SLinus Torvalds next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino); 2150129dbc9aSChristoph Hellwig 2151129dbc9aSChristoph Hellwig error = xfs_imap(mp, tp, next_ino, &imap, 0); 21521da177e4SLinus Torvalds if (error) { 21530b932cccSDave Chinner xfs_warn(mp, 2154129dbc9aSChristoph Hellwig "%s: xfs_imap returned error %d.", 21550b932cccSDave Chinner __func__, error); 21561da177e4SLinus Torvalds return error; 21571da177e4SLinus Torvalds } 2158129dbc9aSChristoph Hellwig 2159129dbc9aSChristoph Hellwig error = xfs_imap_to_bp(mp, tp, &imap, &last_dip, 2160129dbc9aSChristoph Hellwig &last_ibp, 0, 0); 2161129dbc9aSChristoph Hellwig if (error) { 2162129dbc9aSChristoph Hellwig xfs_warn(mp, 2163129dbc9aSChristoph Hellwig "%s: xfs_imap_to_bp returned error %d.", 2164129dbc9aSChristoph Hellwig __func__, error); 2165129dbc9aSChristoph Hellwig return error; 2166129dbc9aSChristoph Hellwig } 2167129dbc9aSChristoph Hellwig 2168129dbc9aSChristoph Hellwig last_offset = imap.im_boffset; 2169347d1c01SChristoph Hellwig next_agino = be32_to_cpu(last_dip->di_next_unlinked); 21701da177e4SLinus Torvalds ASSERT(next_agino != NULLAGINO); 21711da177e4SLinus Torvalds ASSERT(next_agino != 0); 21721da177e4SLinus Torvalds } 2173475ee413SChristoph Hellwig 21741da177e4SLinus Torvalds /* 2175475ee413SChristoph Hellwig * Now last_ibp points to the buffer previous to us on the 2176475ee413SChristoph Hellwig * unlinked list. Pull us from the list. 21771da177e4SLinus Torvalds */ 2178475ee413SChristoph Hellwig error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 2179475ee413SChristoph Hellwig 0, 0); 21801da177e4SLinus Torvalds if (error) { 2181475ee413SChristoph Hellwig xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.", 21820b932cccSDave Chinner __func__, error); 21831da177e4SLinus Torvalds return error; 21841da177e4SLinus Torvalds } 2185347d1c01SChristoph Hellwig next_agino = be32_to_cpu(dip->di_next_unlinked); 21861da177e4SLinus Torvalds ASSERT(next_agino != 0); 21871da177e4SLinus Torvalds ASSERT(next_agino != agino); 21881da177e4SLinus Torvalds if (next_agino != NULLAGINO) { 2189347d1c01SChristoph Hellwig dip->di_next_unlinked = cpu_to_be32(NULLAGINO); 219092bfc6e7SChristoph Hellwig offset = ip->i_imap.im_boffset + 21911da177e4SLinus Torvalds offsetof(xfs_dinode_t, di_next_unlinked); 21920a32c26eSDave Chinner 21930a32c26eSDave Chinner /* need to recalc the inode CRC if appropriate */ 21940a32c26eSDave Chinner xfs_dinode_calc_crc(mp, dip); 21950a32c26eSDave Chinner 21961da177e4SLinus Torvalds xfs_trans_inode_buf(tp, ibp); 21971da177e4SLinus Torvalds xfs_trans_log_buf(tp, ibp, offset, 21981da177e4SLinus Torvalds (offset + sizeof(xfs_agino_t) - 1)); 21991da177e4SLinus Torvalds xfs_inobp_check(mp, ibp); 22001da177e4SLinus Torvalds } else { 22011da177e4SLinus Torvalds xfs_trans_brelse(tp, ibp); 22021da177e4SLinus Torvalds } 22031da177e4SLinus Torvalds /* 22041da177e4SLinus Torvalds * Point the previous inode on the list to the next inode. 22051da177e4SLinus Torvalds */ 2206347d1c01SChristoph Hellwig last_dip->di_next_unlinked = cpu_to_be32(next_agino); 22071da177e4SLinus Torvalds ASSERT(next_agino != 0); 22081da177e4SLinus Torvalds offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked); 22090a32c26eSDave Chinner 22100a32c26eSDave Chinner /* need to recalc the inode CRC if appropriate */ 22110a32c26eSDave Chinner xfs_dinode_calc_crc(mp, last_dip); 22120a32c26eSDave Chinner 22131da177e4SLinus Torvalds xfs_trans_inode_buf(tp, last_ibp); 22141da177e4SLinus Torvalds xfs_trans_log_buf(tp, last_ibp, offset, 22151da177e4SLinus Torvalds (offset + sizeof(xfs_agino_t) - 1)); 22161da177e4SLinus Torvalds xfs_inobp_check(mp, last_ibp); 22171da177e4SLinus Torvalds } 22181da177e4SLinus Torvalds return 0; 22191da177e4SLinus Torvalds } 22201da177e4SLinus Torvalds 22215b3eed75SDave Chinner /* 22220b8182dbSZhi Yong Wu * A big issue when freeing the inode cluster is that we _cannot_ skip any 22235b3eed75SDave Chinner * inodes that are in memory - they all must be marked stale and attached to 22245b3eed75SDave Chinner * the cluster buffer. 22255b3eed75SDave Chinner */ 22262a30f36dSChandra Seetharaman STATIC int 22271da177e4SLinus Torvalds xfs_ifree_cluster( 22281da177e4SLinus Torvalds xfs_inode_t *free_ip, 22291da177e4SLinus Torvalds xfs_trans_t *tp, 223009b56604SBrian Foster struct xfs_icluster *xic) 22311da177e4SLinus Torvalds { 22321da177e4SLinus Torvalds xfs_mount_t *mp = free_ip->i_mount; 22331da177e4SLinus Torvalds int blks_per_cluster; 2234982e939eSJie Liu int inodes_per_cluster; 22351da177e4SLinus Torvalds int nbufs; 22365b257b4aSDave Chinner int i, j; 22373cdaa189SBrian Foster int ioffset; 22381da177e4SLinus Torvalds xfs_daddr_t blkno; 22391da177e4SLinus Torvalds xfs_buf_t *bp; 22405b257b4aSDave Chinner xfs_inode_t *ip; 22411da177e4SLinus Torvalds xfs_inode_log_item_t *iip; 22421da177e4SLinus Torvalds xfs_log_item_t *lip; 22435017e97dSDave Chinner struct xfs_perag *pag; 224409b56604SBrian Foster xfs_ino_t inum; 22451da177e4SLinus Torvalds 224609b56604SBrian Foster inum = xic->first_ino; 22475017e97dSDave Chinner pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum)); 2248982e939eSJie Liu blks_per_cluster = xfs_icluster_size_fsb(mp); 2249982e939eSJie Liu inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog; 2250126cd105SJie Liu nbufs = mp->m_ialloc_blks / blks_per_cluster; 22511da177e4SLinus Torvalds 2252982e939eSJie Liu for (j = 0; j < nbufs; j++, inum += inodes_per_cluster) { 225309b56604SBrian Foster /* 225409b56604SBrian Foster * The allocation bitmap tells us which inodes of the chunk were 225509b56604SBrian Foster * physically allocated. Skip the cluster if an inode falls into 225609b56604SBrian Foster * a sparse region. 225709b56604SBrian Foster */ 22583cdaa189SBrian Foster ioffset = inum - xic->first_ino; 22593cdaa189SBrian Foster if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) { 22603cdaa189SBrian Foster ASSERT(do_mod(ioffset, inodes_per_cluster) == 0); 226109b56604SBrian Foster continue; 226209b56604SBrian Foster } 226309b56604SBrian Foster 22641da177e4SLinus Torvalds blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), 22651da177e4SLinus Torvalds XFS_INO_TO_AGBNO(mp, inum)); 22661da177e4SLinus Torvalds 22671da177e4SLinus Torvalds /* 22685b257b4aSDave Chinner * We obtain and lock the backing buffer first in the process 22695b257b4aSDave Chinner * here, as we have to ensure that any dirty inode that we 22705b257b4aSDave Chinner * can't get the flush lock on is attached to the buffer. 22715b257b4aSDave Chinner * If we scan the in-memory inodes first, then buffer IO can 22725b257b4aSDave Chinner * complete before we get a lock on it, and hence we may fail 22735b257b4aSDave Chinner * to mark all the active inodes on the buffer stale. 22741da177e4SLinus Torvalds */ 22751da177e4SLinus Torvalds bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, 2276b6aff29fSDave Chinner mp->m_bsize * blks_per_cluster, 2277b6aff29fSDave Chinner XBF_UNMAPPED); 22781da177e4SLinus Torvalds 22792a30f36dSChandra Seetharaman if (!bp) 22802451337dSDave Chinner return -ENOMEM; 2281b0f539deSDave Chinner 2282b0f539deSDave Chinner /* 2283b0f539deSDave Chinner * This buffer may not have been correctly initialised as we 2284b0f539deSDave Chinner * didn't read it from disk. That's not important because we are 2285b0f539deSDave Chinner * only using to mark the buffer as stale in the log, and to 2286b0f539deSDave Chinner * attach stale cached inodes on it. That means it will never be 2287b0f539deSDave Chinner * dispatched for IO. If it is, we want to know about it, and we 2288b0f539deSDave Chinner * want it to fail. We can acheive this by adding a write 2289b0f539deSDave Chinner * verifier to the buffer. 2290b0f539deSDave Chinner */ 22911813dd64SDave Chinner bp->b_ops = &xfs_inode_buf_ops; 2292b0f539deSDave Chinner 22935b257b4aSDave Chinner /* 22945b257b4aSDave Chinner * Walk the inodes already attached to the buffer and mark them 22955b257b4aSDave Chinner * stale. These will all have the flush locks held, so an 22965b3eed75SDave Chinner * in-memory inode walk can't lock them. By marking them all 22975b3eed75SDave Chinner * stale first, we will not attempt to lock them in the loop 22985b3eed75SDave Chinner * below as the XFS_ISTALE flag will be set. 22995b257b4aSDave Chinner */ 2300adadbeefSChristoph Hellwig lip = bp->b_fspriv; 23011da177e4SLinus Torvalds while (lip) { 23021da177e4SLinus Torvalds if (lip->li_type == XFS_LI_INODE) { 23031da177e4SLinus Torvalds iip = (xfs_inode_log_item_t *)lip; 23041da177e4SLinus Torvalds ASSERT(iip->ili_logged == 1); 2305ca30b2a7SChristoph Hellwig lip->li_cb = xfs_istale_done; 23067b2e2a31SDavid Chinner xfs_trans_ail_copy_lsn(mp->m_ail, 23077b2e2a31SDavid Chinner &iip->ili_flush_lsn, 23087b2e2a31SDavid Chinner &iip->ili_item.li_lsn); 2309e5ffd2bbSDavid Chinner xfs_iflags_set(iip->ili_inode, XFS_ISTALE); 23101da177e4SLinus Torvalds } 23111da177e4SLinus Torvalds lip = lip->li_bio_list; 23121da177e4SLinus Torvalds } 23131da177e4SLinus Torvalds 23145b3eed75SDave Chinner 23155b257b4aSDave Chinner /* 23165b257b4aSDave Chinner * For each inode in memory attempt to add it to the inode 23175b257b4aSDave Chinner * buffer and set it up for being staled on buffer IO 23185b257b4aSDave Chinner * completion. This is safe as we've locked out tail pushing 23195b257b4aSDave Chinner * and flushing by locking the buffer. 23205b257b4aSDave Chinner * 23215b257b4aSDave Chinner * We have already marked every inode that was part of a 23225b257b4aSDave Chinner * transaction stale above, which means there is no point in 23235b257b4aSDave Chinner * even trying to lock them. 23245b257b4aSDave Chinner */ 2325982e939eSJie Liu for (i = 0; i < inodes_per_cluster; i++) { 23265b3eed75SDave Chinner retry: 23271a3e8f3dSDave Chinner rcu_read_lock(); 23285b257b4aSDave Chinner ip = radix_tree_lookup(&pag->pag_ici_root, 23295b257b4aSDave Chinner XFS_INO_TO_AGINO(mp, (inum + i))); 23301da177e4SLinus Torvalds 23311a3e8f3dSDave Chinner /* Inode not in memory, nothing to do */ 23321a3e8f3dSDave Chinner if (!ip) { 23331a3e8f3dSDave Chinner rcu_read_unlock(); 23345b257b4aSDave Chinner continue; 23355b257b4aSDave Chinner } 23365b257b4aSDave Chinner 23375b3eed75SDave Chinner /* 23381a3e8f3dSDave Chinner * because this is an RCU protected lookup, we could 23391a3e8f3dSDave Chinner * find a recently freed or even reallocated inode 23401a3e8f3dSDave Chinner * during the lookup. We need to check under the 23411a3e8f3dSDave Chinner * i_flags_lock for a valid inode here. Skip it if it 23421a3e8f3dSDave Chinner * is not valid, the wrong inode or stale. 23431a3e8f3dSDave Chinner */ 23441a3e8f3dSDave Chinner spin_lock(&ip->i_flags_lock); 23451a3e8f3dSDave Chinner if (ip->i_ino != inum + i || 23461a3e8f3dSDave Chinner __xfs_iflags_test(ip, XFS_ISTALE)) { 23471a3e8f3dSDave Chinner spin_unlock(&ip->i_flags_lock); 23481a3e8f3dSDave Chinner rcu_read_unlock(); 23491a3e8f3dSDave Chinner continue; 23501a3e8f3dSDave Chinner } 23511a3e8f3dSDave Chinner spin_unlock(&ip->i_flags_lock); 23521a3e8f3dSDave Chinner 23531a3e8f3dSDave Chinner /* 23545b3eed75SDave Chinner * Don't try to lock/unlock the current inode, but we 23555b3eed75SDave Chinner * _cannot_ skip the other inodes that we did not find 23565b3eed75SDave Chinner * in the list attached to the buffer and are not 23575b3eed75SDave Chinner * already marked stale. If we can't lock it, back off 23585b3eed75SDave Chinner * and retry. 23595b3eed75SDave Chinner */ 23605b257b4aSDave Chinner if (ip != free_ip && 23615b257b4aSDave Chinner !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { 23621a3e8f3dSDave Chinner rcu_read_unlock(); 23635b3eed75SDave Chinner delay(1); 23645b3eed75SDave Chinner goto retry; 23655b257b4aSDave Chinner } 23661a3e8f3dSDave Chinner rcu_read_unlock(); 23675b257b4aSDave Chinner 23685b3eed75SDave Chinner xfs_iflock(ip); 23695b257b4aSDave Chinner xfs_iflags_set(ip, XFS_ISTALE); 23705b257b4aSDave Chinner 23715b3eed75SDave Chinner /* 23725b3eed75SDave Chinner * we don't need to attach clean inodes or those only 23735b3eed75SDave Chinner * with unlogged changes (which we throw away, anyway). 23745b3eed75SDave Chinner */ 23755b257b4aSDave Chinner iip = ip->i_itemp; 23765b3eed75SDave Chinner if (!iip || xfs_inode_clean(ip)) { 23775b257b4aSDave Chinner ASSERT(ip != free_ip); 23781da177e4SLinus Torvalds xfs_ifunlock(ip); 23791da177e4SLinus Torvalds xfs_iunlock(ip, XFS_ILOCK_EXCL); 23801da177e4SLinus Torvalds continue; 23811da177e4SLinus Torvalds } 23821da177e4SLinus Torvalds 2383f5d8d5c4SChristoph Hellwig iip->ili_last_fields = iip->ili_fields; 2384f5d8d5c4SChristoph Hellwig iip->ili_fields = 0; 2385fc0561ceSDave Chinner iip->ili_fsync_fields = 0; 23861da177e4SLinus Torvalds iip->ili_logged = 1; 23877b2e2a31SDavid Chinner xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, 23887b2e2a31SDavid Chinner &iip->ili_item.li_lsn); 23891da177e4SLinus Torvalds 2390ca30b2a7SChristoph Hellwig xfs_buf_attach_iodone(bp, xfs_istale_done, 2391ca30b2a7SChristoph Hellwig &iip->ili_item); 23925b257b4aSDave Chinner 23935b257b4aSDave Chinner if (ip != free_ip) 23941da177e4SLinus Torvalds xfs_iunlock(ip, XFS_ILOCK_EXCL); 23951da177e4SLinus Torvalds } 23961da177e4SLinus Torvalds 23971da177e4SLinus Torvalds xfs_trans_stale_inode_buf(tp, bp); 23981da177e4SLinus Torvalds xfs_trans_binval(tp, bp); 23991da177e4SLinus Torvalds } 24001da177e4SLinus Torvalds 24015017e97dSDave Chinner xfs_perag_put(pag); 24022a30f36dSChandra Seetharaman return 0; 24031da177e4SLinus Torvalds } 24041da177e4SLinus Torvalds 24051da177e4SLinus Torvalds /* 24061da177e4SLinus Torvalds * This is called to return an inode to the inode free list. 24071da177e4SLinus Torvalds * The inode should already be truncated to 0 length and have 24081da177e4SLinus Torvalds * no pages associated with it. This routine also assumes that 24091da177e4SLinus Torvalds * the inode is already a part of the transaction. 24101da177e4SLinus Torvalds * 24111da177e4SLinus Torvalds * The on-disk copy of the inode will have been added to the list 24121da177e4SLinus Torvalds * of unlinked inodes in the AGI. We need to remove the inode from 24131da177e4SLinus Torvalds * that list atomically with respect to freeing it here. 24141da177e4SLinus Torvalds */ 24151da177e4SLinus Torvalds int 24161da177e4SLinus Torvalds xfs_ifree( 24171da177e4SLinus Torvalds xfs_trans_t *tp, 24181da177e4SLinus Torvalds xfs_inode_t *ip, 24192c3234d1SDarrick J. Wong struct xfs_defer_ops *dfops) 24201da177e4SLinus Torvalds { 24211da177e4SLinus Torvalds int error; 242209b56604SBrian Foster struct xfs_icluster xic = { 0 }; 24231da177e4SLinus Torvalds 2424579aa9caSChristoph Hellwig ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 242554d7b5c1SDave Chinner ASSERT(VFS_I(ip)->i_nlink == 0); 24261da177e4SLinus Torvalds ASSERT(ip->i_d.di_nextents == 0); 24271da177e4SLinus Torvalds ASSERT(ip->i_d.di_anextents == 0); 2428c19b3b05SDave Chinner ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode)); 24291da177e4SLinus Torvalds ASSERT(ip->i_d.di_nblocks == 0); 24301da177e4SLinus Torvalds 24311da177e4SLinus Torvalds /* 24321da177e4SLinus Torvalds * Pull the on-disk inode from the AGI unlinked list. 24331da177e4SLinus Torvalds */ 24341da177e4SLinus Torvalds error = xfs_iunlink_remove(tp, ip); 24351baaed8fSDave Chinner if (error) 24361da177e4SLinus Torvalds return error; 24371da177e4SLinus Torvalds 24382c3234d1SDarrick J. Wong error = xfs_difree(tp, ip->i_ino, dfops, &xic); 24391baaed8fSDave Chinner if (error) 24401da177e4SLinus Torvalds return error; 24411baaed8fSDave Chinner 2442c19b3b05SDave Chinner VFS_I(ip)->i_mode = 0; /* mark incore inode as free */ 24431da177e4SLinus Torvalds ip->i_d.di_flags = 0; 24441da177e4SLinus Torvalds ip->i_d.di_dmevmask = 0; 24451da177e4SLinus Torvalds ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */ 24461da177e4SLinus Torvalds ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; 24471da177e4SLinus Torvalds ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 24481da177e4SLinus Torvalds /* 24491da177e4SLinus Torvalds * Bump the generation count so no one will be confused 24501da177e4SLinus Torvalds * by reincarnations of this inode. 24511da177e4SLinus Torvalds */ 24529e9a2674SDave Chinner VFS_I(ip)->i_generation++; 24531da177e4SLinus Torvalds xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 24541da177e4SLinus Torvalds 245509b56604SBrian Foster if (xic.deleted) 245609b56604SBrian Foster error = xfs_ifree_cluster(ip, tp, &xic); 24571da177e4SLinus Torvalds 24582a30f36dSChandra Seetharaman return error; 24591da177e4SLinus Torvalds } 24601da177e4SLinus Torvalds 24611da177e4SLinus Torvalds /* 246260ec6783SChristoph Hellwig * This is called to unpin an inode. The caller must have the inode locked 246360ec6783SChristoph Hellwig * in at least shared mode so that the buffer cannot be subsequently pinned 246460ec6783SChristoph Hellwig * once someone is waiting for it to be unpinned. 24651da177e4SLinus Torvalds */ 246660ec6783SChristoph Hellwig static void 2467f392e631SChristoph Hellwig xfs_iunpin( 246860ec6783SChristoph Hellwig struct xfs_inode *ip) 2469a3f74ffbSDavid Chinner { 2470579aa9caSChristoph Hellwig ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2471a3f74ffbSDavid Chinner 24724aaf15d1SDave Chinner trace_xfs_inode_unpin_nowait(ip, _RET_IP_); 24734aaf15d1SDave Chinner 2474a3f74ffbSDavid Chinner /* Give the log a push to start the unpinning I/O */ 247560ec6783SChristoph Hellwig xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0); 2476a14a348bSChristoph Hellwig 2477a3f74ffbSDavid Chinner } 2478a3f74ffbSDavid Chinner 2479f392e631SChristoph Hellwig static void 2480f392e631SChristoph Hellwig __xfs_iunpin_wait( 2481f392e631SChristoph Hellwig struct xfs_inode *ip) 2482f392e631SChristoph Hellwig { 2483f392e631SChristoph Hellwig wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT); 2484f392e631SChristoph Hellwig DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT); 2485f392e631SChristoph Hellwig 2486f392e631SChristoph Hellwig xfs_iunpin(ip); 2487f392e631SChristoph Hellwig 2488f392e631SChristoph Hellwig do { 2489f392e631SChristoph Hellwig prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 2490f392e631SChristoph Hellwig if (xfs_ipincount(ip)) 2491f392e631SChristoph Hellwig io_schedule(); 2492f392e631SChristoph Hellwig } while (xfs_ipincount(ip)); 2493f392e631SChristoph Hellwig finish_wait(wq, &wait.wait); 2494f392e631SChristoph Hellwig } 2495f392e631SChristoph Hellwig 2496777df5afSDave Chinner void 24971da177e4SLinus Torvalds xfs_iunpin_wait( 249860ec6783SChristoph Hellwig struct xfs_inode *ip) 24991da177e4SLinus Torvalds { 2500f392e631SChristoph Hellwig if (xfs_ipincount(ip)) 2501f392e631SChristoph Hellwig __xfs_iunpin_wait(ip); 25021da177e4SLinus Torvalds } 25031da177e4SLinus Torvalds 250427320369SDave Chinner /* 250527320369SDave Chinner * Removing an inode from the namespace involves removing the directory entry 250627320369SDave Chinner * and dropping the link count on the inode. Removing the directory entry can 250727320369SDave Chinner * result in locking an AGF (directory blocks were freed) and removing a link 250827320369SDave Chinner * count can result in placing the inode on an unlinked list which results in 250927320369SDave Chinner * locking an AGI. 251027320369SDave Chinner * 251127320369SDave Chinner * The big problem here is that we have an ordering constraint on AGF and AGI 251227320369SDave Chinner * locking - inode allocation locks the AGI, then can allocate a new extent for 251327320369SDave Chinner * new inodes, locking the AGF after the AGI. Similarly, freeing the inode 251427320369SDave Chinner * removes the inode from the unlinked list, requiring that we lock the AGI 251527320369SDave Chinner * first, and then freeing the inode can result in an inode chunk being freed 251627320369SDave Chinner * and hence freeing disk space requiring that we lock an AGF. 251727320369SDave Chinner * 251827320369SDave Chinner * Hence the ordering that is imposed by other parts of the code is AGI before 251927320369SDave Chinner * AGF. This means we cannot remove the directory entry before we drop the inode 252027320369SDave Chinner * reference count and put it on the unlinked list as this results in a lock 252127320369SDave Chinner * order of AGF then AGI, and this can deadlock against inode allocation and 252227320369SDave Chinner * freeing. Therefore we must drop the link counts before we remove the 252327320369SDave Chinner * directory entry. 252427320369SDave Chinner * 252527320369SDave Chinner * This is still safe from a transactional point of view - it is not until we 2526310a75a3SDarrick J. Wong * get to xfs_defer_finish() that we have the possibility of multiple 252727320369SDave Chinner * transactions in this operation. Hence as long as we remove the directory 252827320369SDave Chinner * entry and drop the link count in the first transaction of the remove 252927320369SDave Chinner * operation, there are no transactional constraints on the ordering here. 253027320369SDave Chinner */ 2531c24b5dfaSDave Chinner int 2532c24b5dfaSDave Chinner xfs_remove( 2533c24b5dfaSDave Chinner xfs_inode_t *dp, 2534c24b5dfaSDave Chinner struct xfs_name *name, 2535c24b5dfaSDave Chinner xfs_inode_t *ip) 2536c24b5dfaSDave Chinner { 2537c24b5dfaSDave Chinner xfs_mount_t *mp = dp->i_mount; 2538c24b5dfaSDave Chinner xfs_trans_t *tp = NULL; 2539c19b3b05SDave Chinner int is_dir = S_ISDIR(VFS_I(ip)->i_mode); 2540c24b5dfaSDave Chinner int error = 0; 25412c3234d1SDarrick J. Wong struct xfs_defer_ops dfops; 2542c24b5dfaSDave Chinner xfs_fsblock_t first_block; 2543c24b5dfaSDave Chinner uint resblks; 2544c24b5dfaSDave Chinner 2545c24b5dfaSDave Chinner trace_xfs_remove(dp, name); 2546c24b5dfaSDave Chinner 2547c24b5dfaSDave Chinner if (XFS_FORCED_SHUTDOWN(mp)) 25482451337dSDave Chinner return -EIO; 2549c24b5dfaSDave Chinner 2550c24b5dfaSDave Chinner error = xfs_qm_dqattach(dp, 0); 2551c24b5dfaSDave Chinner if (error) 2552c24b5dfaSDave Chinner goto std_return; 2553c24b5dfaSDave Chinner 2554c24b5dfaSDave Chinner error = xfs_qm_dqattach(ip, 0); 2555c24b5dfaSDave Chinner if (error) 2556c24b5dfaSDave Chinner goto std_return; 2557c24b5dfaSDave Chinner 2558c24b5dfaSDave Chinner /* 2559c24b5dfaSDave Chinner * We try to get the real space reservation first, 2560c24b5dfaSDave Chinner * allowing for directory btree deletion(s) implying 2561c24b5dfaSDave Chinner * possible bmap insert(s). If we can't get the space 2562c24b5dfaSDave Chinner * reservation then we use 0 instead, and avoid the bmap 2563c24b5dfaSDave Chinner * btree insert(s) in the directory code by, if the bmap 2564c24b5dfaSDave Chinner * insert tries to happen, instead trimming the LAST 2565c24b5dfaSDave Chinner * block from the directory. 2566c24b5dfaSDave Chinner */ 2567c24b5dfaSDave Chinner resblks = XFS_REMOVE_SPACE_RES(mp); 2568253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp); 25692451337dSDave Chinner if (error == -ENOSPC) { 2570c24b5dfaSDave Chinner resblks = 0; 2571253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0, 2572253f4911SChristoph Hellwig &tp); 2573c24b5dfaSDave Chinner } 2574c24b5dfaSDave Chinner if (error) { 25752451337dSDave Chinner ASSERT(error != -ENOSPC); 2576253f4911SChristoph Hellwig goto std_return; 2577c24b5dfaSDave Chinner } 2578c24b5dfaSDave Chinner 2579c24b5dfaSDave Chinner xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL); 2580c24b5dfaSDave Chinner 258165523218SChristoph Hellwig xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); 2582c24b5dfaSDave Chinner xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 2583c24b5dfaSDave Chinner 2584c24b5dfaSDave Chinner /* 2585c24b5dfaSDave Chinner * If we're removing a directory perform some additional validation. 2586c24b5dfaSDave Chinner */ 2587c24b5dfaSDave Chinner if (is_dir) { 258854d7b5c1SDave Chinner ASSERT(VFS_I(ip)->i_nlink >= 2); 258954d7b5c1SDave Chinner if (VFS_I(ip)->i_nlink != 2) { 25902451337dSDave Chinner error = -ENOTEMPTY; 2591c24b5dfaSDave Chinner goto out_trans_cancel; 2592c24b5dfaSDave Chinner } 2593c24b5dfaSDave Chinner if (!xfs_dir_isempty(ip)) { 25942451337dSDave Chinner error = -ENOTEMPTY; 2595c24b5dfaSDave Chinner goto out_trans_cancel; 2596c24b5dfaSDave Chinner } 2597c24b5dfaSDave Chinner 259827320369SDave Chinner /* Drop the link from ip's "..". */ 2599c24b5dfaSDave Chinner error = xfs_droplink(tp, dp); 2600c24b5dfaSDave Chinner if (error) 260127320369SDave Chinner goto out_trans_cancel; 2602c24b5dfaSDave Chinner 260327320369SDave Chinner /* Drop the "." link from ip to self. */ 2604c24b5dfaSDave Chinner error = xfs_droplink(tp, ip); 2605c24b5dfaSDave Chinner if (error) 260627320369SDave Chinner goto out_trans_cancel; 2607c24b5dfaSDave Chinner } else { 2608c24b5dfaSDave Chinner /* 2609c24b5dfaSDave Chinner * When removing a non-directory we need to log the parent 2610c24b5dfaSDave Chinner * inode here. For a directory this is done implicitly 2611c24b5dfaSDave Chinner * by the xfs_droplink call for the ".." entry. 2612c24b5dfaSDave Chinner */ 2613c24b5dfaSDave Chinner xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); 2614c24b5dfaSDave Chinner } 261527320369SDave Chinner xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 2616c24b5dfaSDave Chinner 261727320369SDave Chinner /* Drop the link from dp to ip. */ 2618c24b5dfaSDave Chinner error = xfs_droplink(tp, ip); 2619c24b5dfaSDave Chinner if (error) 262027320369SDave Chinner goto out_trans_cancel; 2621c24b5dfaSDave Chinner 26222c3234d1SDarrick J. Wong xfs_defer_init(&dfops, &first_block); 262327320369SDave Chinner error = xfs_dir_removename(tp, dp, name, ip->i_ino, 26242c3234d1SDarrick J. Wong &first_block, &dfops, resblks); 262527320369SDave Chinner if (error) { 26262451337dSDave Chinner ASSERT(error != -ENOENT); 262727320369SDave Chinner goto out_bmap_cancel; 262827320369SDave Chinner } 262927320369SDave Chinner 2630c24b5dfaSDave Chinner /* 2631c24b5dfaSDave Chinner * If this is a synchronous mount, make sure that the 2632c24b5dfaSDave Chinner * remove transaction goes to disk before returning to 2633c24b5dfaSDave Chinner * the user. 2634c24b5dfaSDave Chinner */ 2635c24b5dfaSDave Chinner if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) 2636c24b5dfaSDave Chinner xfs_trans_set_sync(tp); 2637c24b5dfaSDave Chinner 26382c3234d1SDarrick J. Wong error = xfs_defer_finish(&tp, &dfops, NULL); 2639c24b5dfaSDave Chinner if (error) 2640c24b5dfaSDave Chinner goto out_bmap_cancel; 2641c24b5dfaSDave Chinner 264270393313SChristoph Hellwig error = xfs_trans_commit(tp); 2643c24b5dfaSDave Chinner if (error) 2644c24b5dfaSDave Chinner goto std_return; 2645c24b5dfaSDave Chinner 26462cd2ef6aSChristoph Hellwig if (is_dir && xfs_inode_is_filestream(ip)) 2647c24b5dfaSDave Chinner xfs_filestream_deassociate(ip); 2648c24b5dfaSDave Chinner 2649c24b5dfaSDave Chinner return 0; 2650c24b5dfaSDave Chinner 2651c24b5dfaSDave Chinner out_bmap_cancel: 26522c3234d1SDarrick J. Wong xfs_defer_cancel(&dfops); 2653c24b5dfaSDave Chinner out_trans_cancel: 26544906e215SChristoph Hellwig xfs_trans_cancel(tp); 2655c24b5dfaSDave Chinner std_return: 2656c24b5dfaSDave Chinner return error; 2657c24b5dfaSDave Chinner } 2658c24b5dfaSDave Chinner 2659f6bba201SDave Chinner /* 2660f6bba201SDave Chinner * Enter all inodes for a rename transaction into a sorted array. 2661f6bba201SDave Chinner */ 266295afcf5cSDave Chinner #define __XFS_SORT_INODES 5 2663f6bba201SDave Chinner STATIC void 2664f6bba201SDave Chinner xfs_sort_for_rename( 266595afcf5cSDave Chinner struct xfs_inode *dp1, /* in: old (source) directory inode */ 266695afcf5cSDave Chinner struct xfs_inode *dp2, /* in: new (target) directory inode */ 266795afcf5cSDave Chinner struct xfs_inode *ip1, /* in: inode of old entry */ 266895afcf5cSDave Chinner struct xfs_inode *ip2, /* in: inode of new entry */ 266995afcf5cSDave Chinner struct xfs_inode *wip, /* in: whiteout inode */ 267095afcf5cSDave Chinner struct xfs_inode **i_tab,/* out: sorted array of inodes */ 267195afcf5cSDave Chinner int *num_inodes) /* in/out: inodes in array */ 2672f6bba201SDave Chinner { 2673f6bba201SDave Chinner int i, j; 2674f6bba201SDave Chinner 267595afcf5cSDave Chinner ASSERT(*num_inodes == __XFS_SORT_INODES); 267695afcf5cSDave Chinner memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *)); 267795afcf5cSDave Chinner 2678f6bba201SDave Chinner /* 2679f6bba201SDave Chinner * i_tab contains a list of pointers to inodes. We initialize 2680f6bba201SDave Chinner * the table here & we'll sort it. We will then use it to 2681f6bba201SDave Chinner * order the acquisition of the inode locks. 2682f6bba201SDave Chinner * 2683f6bba201SDave Chinner * Note that the table may contain duplicates. e.g., dp1 == dp2. 2684f6bba201SDave Chinner */ 268595afcf5cSDave Chinner i = 0; 268695afcf5cSDave Chinner i_tab[i++] = dp1; 268795afcf5cSDave Chinner i_tab[i++] = dp2; 268895afcf5cSDave Chinner i_tab[i++] = ip1; 268995afcf5cSDave Chinner if (ip2) 269095afcf5cSDave Chinner i_tab[i++] = ip2; 269195afcf5cSDave Chinner if (wip) 269295afcf5cSDave Chinner i_tab[i++] = wip; 269395afcf5cSDave Chinner *num_inodes = i; 2694f6bba201SDave Chinner 2695f6bba201SDave Chinner /* 2696f6bba201SDave Chinner * Sort the elements via bubble sort. (Remember, there are at 269795afcf5cSDave Chinner * most 5 elements to sort, so this is adequate.) 2698f6bba201SDave Chinner */ 2699f6bba201SDave Chinner for (i = 0; i < *num_inodes; i++) { 2700f6bba201SDave Chinner for (j = 1; j < *num_inodes; j++) { 2701f6bba201SDave Chinner if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) { 270295afcf5cSDave Chinner struct xfs_inode *temp = i_tab[j]; 2703f6bba201SDave Chinner i_tab[j] = i_tab[j-1]; 2704f6bba201SDave Chinner i_tab[j-1] = temp; 2705f6bba201SDave Chinner } 2706f6bba201SDave Chinner } 2707f6bba201SDave Chinner } 2708f6bba201SDave Chinner } 2709f6bba201SDave Chinner 2710310606b0SDave Chinner static int 2711310606b0SDave Chinner xfs_finish_rename( 2712310606b0SDave Chinner struct xfs_trans *tp, 27132c3234d1SDarrick J. Wong struct xfs_defer_ops *dfops) 2714310606b0SDave Chinner { 2715310606b0SDave Chinner int error; 2716310606b0SDave Chinner 2717310606b0SDave Chinner /* 2718310606b0SDave Chinner * If this is a synchronous mount, make sure that the rename transaction 2719310606b0SDave Chinner * goes to disk before returning to the user. 2720310606b0SDave Chinner */ 2721310606b0SDave Chinner if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) 2722310606b0SDave Chinner xfs_trans_set_sync(tp); 2723310606b0SDave Chinner 27242c3234d1SDarrick J. Wong error = xfs_defer_finish(&tp, dfops, NULL); 2725310606b0SDave Chinner if (error) { 27262c3234d1SDarrick J. Wong xfs_defer_cancel(dfops); 27274906e215SChristoph Hellwig xfs_trans_cancel(tp); 2728310606b0SDave Chinner return error; 2729310606b0SDave Chinner } 2730310606b0SDave Chinner 273170393313SChristoph Hellwig return xfs_trans_commit(tp); 2732310606b0SDave Chinner } 2733310606b0SDave Chinner 2734f6bba201SDave Chinner /* 2735d31a1825SCarlos Maiolino * xfs_cross_rename() 2736d31a1825SCarlos Maiolino * 2737d31a1825SCarlos Maiolino * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall 2738d31a1825SCarlos Maiolino */ 2739d31a1825SCarlos Maiolino STATIC int 2740d31a1825SCarlos Maiolino xfs_cross_rename( 2741d31a1825SCarlos Maiolino struct xfs_trans *tp, 2742d31a1825SCarlos Maiolino struct xfs_inode *dp1, 2743d31a1825SCarlos Maiolino struct xfs_name *name1, 2744d31a1825SCarlos Maiolino struct xfs_inode *ip1, 2745d31a1825SCarlos Maiolino struct xfs_inode *dp2, 2746d31a1825SCarlos Maiolino struct xfs_name *name2, 2747d31a1825SCarlos Maiolino struct xfs_inode *ip2, 27482c3234d1SDarrick J. Wong struct xfs_defer_ops *dfops, 2749d31a1825SCarlos Maiolino xfs_fsblock_t *first_block, 2750d31a1825SCarlos Maiolino int spaceres) 2751d31a1825SCarlos Maiolino { 2752d31a1825SCarlos Maiolino int error = 0; 2753d31a1825SCarlos Maiolino int ip1_flags = 0; 2754d31a1825SCarlos Maiolino int ip2_flags = 0; 2755d31a1825SCarlos Maiolino int dp2_flags = 0; 2756d31a1825SCarlos Maiolino 2757d31a1825SCarlos Maiolino /* Swap inode number for dirent in first parent */ 2758d31a1825SCarlos Maiolino error = xfs_dir_replace(tp, dp1, name1, 2759d31a1825SCarlos Maiolino ip2->i_ino, 27602c3234d1SDarrick J. Wong first_block, dfops, spaceres); 2761d31a1825SCarlos Maiolino if (error) 2762eeacd321SDave Chinner goto out_trans_abort; 2763d31a1825SCarlos Maiolino 2764d31a1825SCarlos Maiolino /* Swap inode number for dirent in second parent */ 2765d31a1825SCarlos Maiolino error = xfs_dir_replace(tp, dp2, name2, 2766d31a1825SCarlos Maiolino ip1->i_ino, 27672c3234d1SDarrick J. Wong first_block, dfops, spaceres); 2768d31a1825SCarlos Maiolino if (error) 2769eeacd321SDave Chinner goto out_trans_abort; 2770d31a1825SCarlos Maiolino 2771d31a1825SCarlos Maiolino /* 2772d31a1825SCarlos Maiolino * If we're renaming one or more directories across different parents, 2773d31a1825SCarlos Maiolino * update the respective ".." entries (and link counts) to match the new 2774d31a1825SCarlos Maiolino * parents. 2775d31a1825SCarlos Maiolino */ 2776d31a1825SCarlos Maiolino if (dp1 != dp2) { 2777d31a1825SCarlos Maiolino dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; 2778d31a1825SCarlos Maiolino 2779c19b3b05SDave Chinner if (S_ISDIR(VFS_I(ip2)->i_mode)) { 2780d31a1825SCarlos Maiolino error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot, 2781d31a1825SCarlos Maiolino dp1->i_ino, first_block, 27822c3234d1SDarrick J. Wong dfops, spaceres); 2783d31a1825SCarlos Maiolino if (error) 2784eeacd321SDave Chinner goto out_trans_abort; 2785d31a1825SCarlos Maiolino 2786d31a1825SCarlos Maiolino /* transfer ip2 ".." reference to dp1 */ 2787c19b3b05SDave Chinner if (!S_ISDIR(VFS_I(ip1)->i_mode)) { 2788d31a1825SCarlos Maiolino error = xfs_droplink(tp, dp2); 2789d31a1825SCarlos Maiolino if (error) 2790eeacd321SDave Chinner goto out_trans_abort; 2791d31a1825SCarlos Maiolino error = xfs_bumplink(tp, dp1); 2792d31a1825SCarlos Maiolino if (error) 2793eeacd321SDave Chinner goto out_trans_abort; 2794d31a1825SCarlos Maiolino } 2795d31a1825SCarlos Maiolino 2796d31a1825SCarlos Maiolino /* 2797d31a1825SCarlos Maiolino * Although ip1 isn't changed here, userspace needs 2798d31a1825SCarlos Maiolino * to be warned about the change, so that applications 2799d31a1825SCarlos Maiolino * relying on it (like backup ones), will properly 2800d31a1825SCarlos Maiolino * notify the change 2801d31a1825SCarlos Maiolino */ 2802d31a1825SCarlos Maiolino ip1_flags |= XFS_ICHGTIME_CHG; 2803d31a1825SCarlos Maiolino ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; 2804d31a1825SCarlos Maiolino } 2805d31a1825SCarlos Maiolino 2806c19b3b05SDave Chinner if (S_ISDIR(VFS_I(ip1)->i_mode)) { 2807d31a1825SCarlos Maiolino error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot, 2808d31a1825SCarlos Maiolino dp2->i_ino, first_block, 28092c3234d1SDarrick J. Wong dfops, spaceres); 2810d31a1825SCarlos Maiolino if (error) 2811eeacd321SDave Chinner goto out_trans_abort; 2812d31a1825SCarlos Maiolino 2813d31a1825SCarlos Maiolino /* transfer ip1 ".." reference to dp2 */ 2814c19b3b05SDave Chinner if (!S_ISDIR(VFS_I(ip2)->i_mode)) { 2815d31a1825SCarlos Maiolino error = xfs_droplink(tp, dp1); 2816d31a1825SCarlos Maiolino if (error) 2817eeacd321SDave Chinner goto out_trans_abort; 2818d31a1825SCarlos Maiolino error = xfs_bumplink(tp, dp2); 2819d31a1825SCarlos Maiolino if (error) 2820eeacd321SDave Chinner goto out_trans_abort; 2821d31a1825SCarlos Maiolino } 2822d31a1825SCarlos Maiolino 2823d31a1825SCarlos Maiolino /* 2824d31a1825SCarlos Maiolino * Although ip2 isn't changed here, userspace needs 2825d31a1825SCarlos Maiolino * to be warned about the change, so that applications 2826d31a1825SCarlos Maiolino * relying on it (like backup ones), will properly 2827d31a1825SCarlos Maiolino * notify the change 2828d31a1825SCarlos Maiolino */ 2829d31a1825SCarlos Maiolino ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; 2830d31a1825SCarlos Maiolino ip2_flags |= XFS_ICHGTIME_CHG; 2831d31a1825SCarlos Maiolino } 2832d31a1825SCarlos Maiolino } 2833d31a1825SCarlos Maiolino 2834d31a1825SCarlos Maiolino if (ip1_flags) { 2835d31a1825SCarlos Maiolino xfs_trans_ichgtime(tp, ip1, ip1_flags); 2836d31a1825SCarlos Maiolino xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE); 2837d31a1825SCarlos Maiolino } 2838d31a1825SCarlos Maiolino if (ip2_flags) { 2839d31a1825SCarlos Maiolino xfs_trans_ichgtime(tp, ip2, ip2_flags); 2840d31a1825SCarlos Maiolino xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE); 2841d31a1825SCarlos Maiolino } 2842d31a1825SCarlos Maiolino if (dp2_flags) { 2843d31a1825SCarlos Maiolino xfs_trans_ichgtime(tp, dp2, dp2_flags); 2844d31a1825SCarlos Maiolino xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE); 2845d31a1825SCarlos Maiolino } 2846d31a1825SCarlos Maiolino xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 2847d31a1825SCarlos Maiolino xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE); 28482c3234d1SDarrick J. Wong return xfs_finish_rename(tp, dfops); 2849eeacd321SDave Chinner 2850eeacd321SDave Chinner out_trans_abort: 28512c3234d1SDarrick J. Wong xfs_defer_cancel(dfops); 28524906e215SChristoph Hellwig xfs_trans_cancel(tp); 2853d31a1825SCarlos Maiolino return error; 2854d31a1825SCarlos Maiolino } 2855d31a1825SCarlos Maiolino 2856d31a1825SCarlos Maiolino /* 28577dcf5c3eSDave Chinner * xfs_rename_alloc_whiteout() 28587dcf5c3eSDave Chinner * 28597dcf5c3eSDave Chinner * Return a referenced, unlinked, unlocked inode that that can be used as a 28607dcf5c3eSDave Chinner * whiteout in a rename transaction. We use a tmpfile inode here so that if we 28617dcf5c3eSDave Chinner * crash between allocating the inode and linking it into the rename transaction 28627dcf5c3eSDave Chinner * recovery will free the inode and we won't leak it. 28637dcf5c3eSDave Chinner */ 28647dcf5c3eSDave Chinner static int 28657dcf5c3eSDave Chinner xfs_rename_alloc_whiteout( 28667dcf5c3eSDave Chinner struct xfs_inode *dp, 28677dcf5c3eSDave Chinner struct xfs_inode **wip) 28687dcf5c3eSDave Chinner { 28697dcf5c3eSDave Chinner struct xfs_inode *tmpfile; 28707dcf5c3eSDave Chinner int error; 28717dcf5c3eSDave Chinner 28727dcf5c3eSDave Chinner error = xfs_create_tmpfile(dp, NULL, S_IFCHR | WHITEOUT_MODE, &tmpfile); 28737dcf5c3eSDave Chinner if (error) 28747dcf5c3eSDave Chinner return error; 28757dcf5c3eSDave Chinner 287622419ac9SBrian Foster /* 287722419ac9SBrian Foster * Prepare the tmpfile inode as if it were created through the VFS. 287822419ac9SBrian Foster * Otherwise, the link increment paths will complain about nlink 0->1. 287922419ac9SBrian Foster * Drop the link count as done by d_tmpfile(), complete the inode setup 288022419ac9SBrian Foster * and flag it as linkable. 288122419ac9SBrian Foster */ 288222419ac9SBrian Foster drop_nlink(VFS_I(tmpfile)); 28832b3d1d41SChristoph Hellwig xfs_setup_iops(tmpfile); 28847dcf5c3eSDave Chinner xfs_finish_inode_setup(tmpfile); 28857dcf5c3eSDave Chinner VFS_I(tmpfile)->i_state |= I_LINKABLE; 28867dcf5c3eSDave Chinner 28877dcf5c3eSDave Chinner *wip = tmpfile; 28887dcf5c3eSDave Chinner return 0; 28897dcf5c3eSDave Chinner } 28907dcf5c3eSDave Chinner 28917dcf5c3eSDave Chinner /* 2892f6bba201SDave Chinner * xfs_rename 2893f6bba201SDave Chinner */ 2894f6bba201SDave Chinner int 2895f6bba201SDave Chinner xfs_rename( 28967dcf5c3eSDave Chinner struct xfs_inode *src_dp, 2897f6bba201SDave Chinner struct xfs_name *src_name, 28987dcf5c3eSDave Chinner struct xfs_inode *src_ip, 28997dcf5c3eSDave Chinner struct xfs_inode *target_dp, 2900f6bba201SDave Chinner struct xfs_name *target_name, 29017dcf5c3eSDave Chinner struct xfs_inode *target_ip, 2902d31a1825SCarlos Maiolino unsigned int flags) 2903f6bba201SDave Chinner { 29047dcf5c3eSDave Chinner struct xfs_mount *mp = src_dp->i_mount; 29057dcf5c3eSDave Chinner struct xfs_trans *tp; 29062c3234d1SDarrick J. Wong struct xfs_defer_ops dfops; 2907f6bba201SDave Chinner xfs_fsblock_t first_block; 29087dcf5c3eSDave Chinner struct xfs_inode *wip = NULL; /* whiteout inode */ 29097dcf5c3eSDave Chinner struct xfs_inode *inodes[__XFS_SORT_INODES]; 291095afcf5cSDave Chinner int num_inodes = __XFS_SORT_INODES; 29112b93681fSDave Chinner bool new_parent = (src_dp != target_dp); 2912c19b3b05SDave Chinner bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode); 2913f6bba201SDave Chinner int spaceres; 29147dcf5c3eSDave Chinner int error; 2915f6bba201SDave Chinner 2916f6bba201SDave Chinner trace_xfs_rename(src_dp, target_dp, src_name, target_name); 2917f6bba201SDave Chinner 2918eeacd321SDave Chinner if ((flags & RENAME_EXCHANGE) && !target_ip) 2919eeacd321SDave Chinner return -EINVAL; 2920f6bba201SDave Chinner 29217dcf5c3eSDave Chinner /* 29227dcf5c3eSDave Chinner * If we are doing a whiteout operation, allocate the whiteout inode 29237dcf5c3eSDave Chinner * we will be placing at the target and ensure the type is set 29247dcf5c3eSDave Chinner * appropriately. 29257dcf5c3eSDave Chinner */ 29267dcf5c3eSDave Chinner if (flags & RENAME_WHITEOUT) { 29277dcf5c3eSDave Chinner ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE))); 29287dcf5c3eSDave Chinner error = xfs_rename_alloc_whiteout(target_dp, &wip); 29297dcf5c3eSDave Chinner if (error) 29307dcf5c3eSDave Chinner return error; 2931f6bba201SDave Chinner 29327dcf5c3eSDave Chinner /* setup target dirent info as whiteout */ 29337dcf5c3eSDave Chinner src_name->type = XFS_DIR3_FT_CHRDEV; 29347dcf5c3eSDave Chinner } 29357dcf5c3eSDave Chinner 29367dcf5c3eSDave Chinner xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip, 2937f6bba201SDave Chinner inodes, &num_inodes); 2938f6bba201SDave Chinner 2939f6bba201SDave Chinner spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len); 2940253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp); 29412451337dSDave Chinner if (error == -ENOSPC) { 2942f6bba201SDave Chinner spaceres = 0; 2943253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0, 2944253f4911SChristoph Hellwig &tp); 2945f6bba201SDave Chinner } 2946445883e8SDave Chinner if (error) 2947253f4911SChristoph Hellwig goto out_release_wip; 2948f6bba201SDave Chinner 2949f6bba201SDave Chinner /* 2950f6bba201SDave Chinner * Attach the dquots to the inodes 2951f6bba201SDave Chinner */ 2952f6bba201SDave Chinner error = xfs_qm_vop_rename_dqattach(inodes); 2953445883e8SDave Chinner if (error) 2954445883e8SDave Chinner goto out_trans_cancel; 2955f6bba201SDave Chinner 2956f6bba201SDave Chinner /* 2957f6bba201SDave Chinner * Lock all the participating inodes. Depending upon whether 2958f6bba201SDave Chinner * the target_name exists in the target directory, and 2959f6bba201SDave Chinner * whether the target directory is the same as the source 2960f6bba201SDave Chinner * directory, we can lock from 2 to 4 inodes. 2961f6bba201SDave Chinner */ 2962f6bba201SDave Chinner xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL); 2963f6bba201SDave Chinner 2964f6bba201SDave Chinner /* 2965f6bba201SDave Chinner * Join all the inodes to the transaction. From this point on, 2966f6bba201SDave Chinner * we can rely on either trans_commit or trans_cancel to unlock 2967f6bba201SDave Chinner * them. 2968f6bba201SDave Chinner */ 296965523218SChristoph Hellwig xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL); 2970f6bba201SDave Chinner if (new_parent) 297165523218SChristoph Hellwig xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL); 2972f6bba201SDave Chinner xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL); 2973f6bba201SDave Chinner if (target_ip) 2974f6bba201SDave Chinner xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL); 29757dcf5c3eSDave Chinner if (wip) 29767dcf5c3eSDave Chinner xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL); 2977f6bba201SDave Chinner 2978f6bba201SDave Chinner /* 2979f6bba201SDave Chinner * If we are using project inheritance, we only allow renames 2980f6bba201SDave Chinner * into our tree when the project IDs are the same; else the 2981f6bba201SDave Chinner * tree quota mechanism would be circumvented. 2982f6bba201SDave Chinner */ 2983f6bba201SDave Chinner if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && 2984f6bba201SDave Chinner (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) { 29852451337dSDave Chinner error = -EXDEV; 2986445883e8SDave Chinner goto out_trans_cancel; 2987f6bba201SDave Chinner } 2988f6bba201SDave Chinner 29892c3234d1SDarrick J. Wong xfs_defer_init(&dfops, &first_block); 2990445883e8SDave Chinner 2991eeacd321SDave Chinner /* RENAME_EXCHANGE is unique from here on. */ 2992eeacd321SDave Chinner if (flags & RENAME_EXCHANGE) 2993eeacd321SDave Chinner return xfs_cross_rename(tp, src_dp, src_name, src_ip, 2994d31a1825SCarlos Maiolino target_dp, target_name, target_ip, 29952c3234d1SDarrick J. Wong &dfops, &first_block, spaceres); 2996d31a1825SCarlos Maiolino 2997d31a1825SCarlos Maiolino /* 2998f6bba201SDave Chinner * Set up the target. 2999f6bba201SDave Chinner */ 3000f6bba201SDave Chinner if (target_ip == NULL) { 3001f6bba201SDave Chinner /* 3002f6bba201SDave Chinner * If there's no space reservation, check the entry will 3003f6bba201SDave Chinner * fit before actually inserting it. 3004f6bba201SDave Chinner */ 300594f3cad5SEric Sandeen if (!spaceres) { 300694f3cad5SEric Sandeen error = xfs_dir_canenter(tp, target_dp, target_name); 3007f6bba201SDave Chinner if (error) 3008445883e8SDave Chinner goto out_trans_cancel; 300994f3cad5SEric Sandeen } 3010f6bba201SDave Chinner /* 3011f6bba201SDave Chinner * If target does not exist and the rename crosses 3012f6bba201SDave Chinner * directories, adjust the target directory link count 3013f6bba201SDave Chinner * to account for the ".." reference from the new entry. 3014f6bba201SDave Chinner */ 3015f6bba201SDave Chinner error = xfs_dir_createname(tp, target_dp, target_name, 3016f6bba201SDave Chinner src_ip->i_ino, &first_block, 30172c3234d1SDarrick J. Wong &dfops, spaceres); 3018f6bba201SDave Chinner if (error) 30194906e215SChristoph Hellwig goto out_bmap_cancel; 3020f6bba201SDave Chinner 3021f6bba201SDave Chinner xfs_trans_ichgtime(tp, target_dp, 3022f6bba201SDave Chinner XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 3023f6bba201SDave Chinner 3024f6bba201SDave Chinner if (new_parent && src_is_directory) { 3025f6bba201SDave Chinner error = xfs_bumplink(tp, target_dp); 3026f6bba201SDave Chinner if (error) 30274906e215SChristoph Hellwig goto out_bmap_cancel; 3028f6bba201SDave Chinner } 3029f6bba201SDave Chinner } else { /* target_ip != NULL */ 3030f6bba201SDave Chinner /* 3031f6bba201SDave Chinner * If target exists and it's a directory, check that both 3032f6bba201SDave Chinner * target and source are directories and that target can be 3033f6bba201SDave Chinner * destroyed, or that neither is a directory. 3034f6bba201SDave Chinner */ 3035c19b3b05SDave Chinner if (S_ISDIR(VFS_I(target_ip)->i_mode)) { 3036f6bba201SDave Chinner /* 3037f6bba201SDave Chinner * Make sure target dir is empty. 3038f6bba201SDave Chinner */ 3039f6bba201SDave Chinner if (!(xfs_dir_isempty(target_ip)) || 304054d7b5c1SDave Chinner (VFS_I(target_ip)->i_nlink > 2)) { 30412451337dSDave Chinner error = -EEXIST; 3042445883e8SDave Chinner goto out_trans_cancel; 3043f6bba201SDave Chinner } 3044f6bba201SDave Chinner } 3045f6bba201SDave Chinner 3046f6bba201SDave Chinner /* 3047f6bba201SDave Chinner * Link the source inode under the target name. 3048f6bba201SDave Chinner * If the source inode is a directory and we are moving 3049f6bba201SDave Chinner * it across directories, its ".." entry will be 3050f6bba201SDave Chinner * inconsistent until we replace that down below. 3051f6bba201SDave Chinner * 3052f6bba201SDave Chinner * In case there is already an entry with the same 3053f6bba201SDave Chinner * name at the destination directory, remove it first. 3054f6bba201SDave Chinner */ 3055f6bba201SDave Chinner error = xfs_dir_replace(tp, target_dp, target_name, 3056f6bba201SDave Chinner src_ip->i_ino, 30572c3234d1SDarrick J. Wong &first_block, &dfops, spaceres); 3058f6bba201SDave Chinner if (error) 30594906e215SChristoph Hellwig goto out_bmap_cancel; 3060f6bba201SDave Chinner 3061f6bba201SDave Chinner xfs_trans_ichgtime(tp, target_dp, 3062f6bba201SDave Chinner XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 3063f6bba201SDave Chinner 3064f6bba201SDave Chinner /* 3065f6bba201SDave Chinner * Decrement the link count on the target since the target 3066f6bba201SDave Chinner * dir no longer points to it. 3067f6bba201SDave Chinner */ 3068f6bba201SDave Chinner error = xfs_droplink(tp, target_ip); 3069f6bba201SDave Chinner if (error) 30704906e215SChristoph Hellwig goto out_bmap_cancel; 3071f6bba201SDave Chinner 3072f6bba201SDave Chinner if (src_is_directory) { 3073f6bba201SDave Chinner /* 3074f6bba201SDave Chinner * Drop the link from the old "." entry. 3075f6bba201SDave Chinner */ 3076f6bba201SDave Chinner error = xfs_droplink(tp, target_ip); 3077f6bba201SDave Chinner if (error) 30784906e215SChristoph Hellwig goto out_bmap_cancel; 3079f6bba201SDave Chinner } 3080f6bba201SDave Chinner } /* target_ip != NULL */ 3081f6bba201SDave Chinner 3082f6bba201SDave Chinner /* 3083f6bba201SDave Chinner * Remove the source. 3084f6bba201SDave Chinner */ 3085f6bba201SDave Chinner if (new_parent && src_is_directory) { 3086f6bba201SDave Chinner /* 3087f6bba201SDave Chinner * Rewrite the ".." entry to point to the new 3088f6bba201SDave Chinner * directory. 3089f6bba201SDave Chinner */ 3090f6bba201SDave Chinner error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot, 3091f6bba201SDave Chinner target_dp->i_ino, 30922c3234d1SDarrick J. Wong &first_block, &dfops, spaceres); 30932451337dSDave Chinner ASSERT(error != -EEXIST); 3094f6bba201SDave Chinner if (error) 30954906e215SChristoph Hellwig goto out_bmap_cancel; 3096f6bba201SDave Chinner } 3097f6bba201SDave Chinner 3098f6bba201SDave Chinner /* 3099f6bba201SDave Chinner * We always want to hit the ctime on the source inode. 3100f6bba201SDave Chinner * 3101f6bba201SDave Chinner * This isn't strictly required by the standards since the source 3102f6bba201SDave Chinner * inode isn't really being changed, but old unix file systems did 3103f6bba201SDave Chinner * it and some incremental backup programs won't work without it. 3104f6bba201SDave Chinner */ 3105f6bba201SDave Chinner xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG); 3106f6bba201SDave Chinner xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE); 3107f6bba201SDave Chinner 3108f6bba201SDave Chinner /* 3109f6bba201SDave Chinner * Adjust the link count on src_dp. This is necessary when 3110f6bba201SDave Chinner * renaming a directory, either within one parent when 3111f6bba201SDave Chinner * the target existed, or across two parent directories. 3112f6bba201SDave Chinner */ 3113f6bba201SDave Chinner if (src_is_directory && (new_parent || target_ip != NULL)) { 3114f6bba201SDave Chinner 3115f6bba201SDave Chinner /* 3116f6bba201SDave Chinner * Decrement link count on src_directory since the 3117f6bba201SDave Chinner * entry that's moved no longer points to it. 3118f6bba201SDave Chinner */ 3119f6bba201SDave Chinner error = xfs_droplink(tp, src_dp); 3120f6bba201SDave Chinner if (error) 31214906e215SChristoph Hellwig goto out_bmap_cancel; 3122f6bba201SDave Chinner } 3123f6bba201SDave Chinner 31247dcf5c3eSDave Chinner /* 31257dcf5c3eSDave Chinner * For whiteouts, we only need to update the source dirent with the 31267dcf5c3eSDave Chinner * inode number of the whiteout inode rather than removing it 31277dcf5c3eSDave Chinner * altogether. 31287dcf5c3eSDave Chinner */ 31297dcf5c3eSDave Chinner if (wip) { 31307dcf5c3eSDave Chinner error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino, 31312c3234d1SDarrick J. Wong &first_block, &dfops, spaceres); 31327dcf5c3eSDave Chinner } else 3133f6bba201SDave Chinner error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino, 31342c3234d1SDarrick J. Wong &first_block, &dfops, spaceres); 3135f6bba201SDave Chinner if (error) 31364906e215SChristoph Hellwig goto out_bmap_cancel; 3137f6bba201SDave Chinner 31387dcf5c3eSDave Chinner /* 31397dcf5c3eSDave Chinner * For whiteouts, we need to bump the link count on the whiteout inode. 31407dcf5c3eSDave Chinner * This means that failures all the way up to this point leave the inode 31417dcf5c3eSDave Chinner * on the unlinked list and so cleanup is a simple matter of dropping 31427dcf5c3eSDave Chinner * the remaining reference to it. If we fail here after bumping the link 31437dcf5c3eSDave Chinner * count, we're shutting down the filesystem so we'll never see the 31447dcf5c3eSDave Chinner * intermediate state on disk. 31457dcf5c3eSDave Chinner */ 31467dcf5c3eSDave Chinner if (wip) { 314754d7b5c1SDave Chinner ASSERT(VFS_I(wip)->i_nlink == 0); 31487dcf5c3eSDave Chinner error = xfs_bumplink(tp, wip); 31497dcf5c3eSDave Chinner if (error) 31504906e215SChristoph Hellwig goto out_bmap_cancel; 31517dcf5c3eSDave Chinner error = xfs_iunlink_remove(tp, wip); 31527dcf5c3eSDave Chinner if (error) 31534906e215SChristoph Hellwig goto out_bmap_cancel; 31547dcf5c3eSDave Chinner xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE); 31557dcf5c3eSDave Chinner 31567dcf5c3eSDave Chinner /* 31577dcf5c3eSDave Chinner * Now we have a real link, clear the "I'm a tmpfile" state 31587dcf5c3eSDave Chinner * flag from the inode so it doesn't accidentally get misused in 31597dcf5c3eSDave Chinner * future. 31607dcf5c3eSDave Chinner */ 31617dcf5c3eSDave Chinner VFS_I(wip)->i_state &= ~I_LINKABLE; 31627dcf5c3eSDave Chinner } 3163f6bba201SDave Chinner 3164f6bba201SDave Chinner xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 3165f6bba201SDave Chinner xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE); 3166f6bba201SDave Chinner if (new_parent) 3167f6bba201SDave Chinner xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE); 3168f6bba201SDave Chinner 31692c3234d1SDarrick J. Wong error = xfs_finish_rename(tp, &dfops); 31707dcf5c3eSDave Chinner if (wip) 31717dcf5c3eSDave Chinner IRELE(wip); 31727dcf5c3eSDave Chinner return error; 3173f6bba201SDave Chinner 3174445883e8SDave Chinner out_bmap_cancel: 31752c3234d1SDarrick J. Wong xfs_defer_cancel(&dfops); 3176445883e8SDave Chinner out_trans_cancel: 31774906e215SChristoph Hellwig xfs_trans_cancel(tp); 3178253f4911SChristoph Hellwig out_release_wip: 31797dcf5c3eSDave Chinner if (wip) 31807dcf5c3eSDave Chinner IRELE(wip); 3181f6bba201SDave Chinner return error; 3182f6bba201SDave Chinner } 3183f6bba201SDave Chinner 3184bad55843SDavid Chinner STATIC int 3185bad55843SDavid Chinner xfs_iflush_cluster( 318619429363SDave Chinner struct xfs_inode *ip, 318719429363SDave Chinner struct xfs_buf *bp) 3188bad55843SDavid Chinner { 318919429363SDave Chinner struct xfs_mount *mp = ip->i_mount; 31905017e97dSDave Chinner struct xfs_perag *pag; 3191bad55843SDavid Chinner unsigned long first_index, mask; 3192c8f5f12eSDavid Chinner unsigned long inodes_per_cluster; 319319429363SDave Chinner int cilist_size; 319419429363SDave Chinner struct xfs_inode **cilist; 319519429363SDave Chinner struct xfs_inode *cip; 3196bad55843SDavid Chinner int nr_found; 3197bad55843SDavid Chinner int clcount = 0; 3198bad55843SDavid Chinner int bufwasdelwri; 3199bad55843SDavid Chinner int i; 3200bad55843SDavid Chinner 32015017e97dSDave Chinner pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 3202bad55843SDavid Chinner 32030f49efd8SJie Liu inodes_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog; 320419429363SDave Chinner cilist_size = inodes_per_cluster * sizeof(xfs_inode_t *); 320519429363SDave Chinner cilist = kmem_alloc(cilist_size, KM_MAYFAIL|KM_NOFS); 320619429363SDave Chinner if (!cilist) 320744b56e0aSDave Chinner goto out_put; 3208bad55843SDavid Chinner 32090f49efd8SJie Liu mask = ~(((mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog)) - 1); 3210bad55843SDavid Chinner first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; 32111a3e8f3dSDave Chinner rcu_read_lock(); 3212bad55843SDavid Chinner /* really need a gang lookup range call here */ 321319429363SDave Chinner nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)cilist, 3214c8f5f12eSDavid Chinner first_index, inodes_per_cluster); 3215bad55843SDavid Chinner if (nr_found == 0) 3216bad55843SDavid Chinner goto out_free; 3217bad55843SDavid Chinner 3218bad55843SDavid Chinner for (i = 0; i < nr_found; i++) { 321919429363SDave Chinner cip = cilist[i]; 322019429363SDave Chinner if (cip == ip) 3221bad55843SDavid Chinner continue; 32221a3e8f3dSDave Chinner 32231a3e8f3dSDave Chinner /* 32241a3e8f3dSDave Chinner * because this is an RCU protected lookup, we could find a 32251a3e8f3dSDave Chinner * recently freed or even reallocated inode during the lookup. 32261a3e8f3dSDave Chinner * We need to check under the i_flags_lock for a valid inode 32271a3e8f3dSDave Chinner * here. Skip it if it is not valid or the wrong inode. 32281a3e8f3dSDave Chinner */ 322919429363SDave Chinner spin_lock(&cip->i_flags_lock); 323019429363SDave Chinner if (!cip->i_ino || 323119429363SDave Chinner __xfs_iflags_test(cip, XFS_ISTALE)) { 323219429363SDave Chinner spin_unlock(&cip->i_flags_lock); 32331a3e8f3dSDave Chinner continue; 32341a3e8f3dSDave Chinner } 32355a90e53eSDave Chinner 32365a90e53eSDave Chinner /* 32375a90e53eSDave Chinner * Once we fall off the end of the cluster, no point checking 32385a90e53eSDave Chinner * any more inodes in the list because they will also all be 32395a90e53eSDave Chinner * outside the cluster. 32405a90e53eSDave Chinner */ 324119429363SDave Chinner if ((XFS_INO_TO_AGINO(mp, cip->i_ino) & mask) != first_index) { 324219429363SDave Chinner spin_unlock(&cip->i_flags_lock); 32435a90e53eSDave Chinner break; 32445a90e53eSDave Chinner } 324519429363SDave Chinner spin_unlock(&cip->i_flags_lock); 32461a3e8f3dSDave Chinner 3247bad55843SDavid Chinner /* 3248bad55843SDavid Chinner * Do an un-protected check to see if the inode is dirty and 3249bad55843SDavid Chinner * is a candidate for flushing. These checks will be repeated 3250bad55843SDavid Chinner * later after the appropriate locks are acquired. 3251bad55843SDavid Chinner */ 325219429363SDave Chinner if (xfs_inode_clean(cip) && xfs_ipincount(cip) == 0) 3253bad55843SDavid Chinner continue; 3254bad55843SDavid Chinner 3255bad55843SDavid Chinner /* 3256bad55843SDavid Chinner * Try to get locks. If any are unavailable or it is pinned, 3257bad55843SDavid Chinner * then this inode cannot be flushed and is skipped. 3258bad55843SDavid Chinner */ 3259bad55843SDavid Chinner 326019429363SDave Chinner if (!xfs_ilock_nowait(cip, XFS_ILOCK_SHARED)) 3261bad55843SDavid Chinner continue; 326219429363SDave Chinner if (!xfs_iflock_nowait(cip)) { 326319429363SDave Chinner xfs_iunlock(cip, XFS_ILOCK_SHARED); 3264bad55843SDavid Chinner continue; 3265bad55843SDavid Chinner } 326619429363SDave Chinner if (xfs_ipincount(cip)) { 326719429363SDave Chinner xfs_ifunlock(cip); 326819429363SDave Chinner xfs_iunlock(cip, XFS_ILOCK_SHARED); 3269bad55843SDavid Chinner continue; 3270bad55843SDavid Chinner } 3271bad55843SDavid Chinner 32728a17d7ddSDave Chinner 32738a17d7ddSDave Chinner /* 32748a17d7ddSDave Chinner * Check the inode number again, just to be certain we are not 32758a17d7ddSDave Chinner * racing with freeing in xfs_reclaim_inode(). See the comments 32768a17d7ddSDave Chinner * in that function for more information as to why the initial 32778a17d7ddSDave Chinner * check is not sufficient. 32788a17d7ddSDave Chinner */ 327919429363SDave Chinner if (!cip->i_ino) { 328019429363SDave Chinner xfs_ifunlock(cip); 328119429363SDave Chinner xfs_iunlock(cip, XFS_ILOCK_SHARED); 3282bad55843SDavid Chinner continue; 3283bad55843SDavid Chinner } 3284bad55843SDavid Chinner 3285bad55843SDavid Chinner /* 3286bad55843SDavid Chinner * arriving here means that this inode can be flushed. First 3287bad55843SDavid Chinner * re-check that it's dirty before flushing. 3288bad55843SDavid Chinner */ 328919429363SDave Chinner if (!xfs_inode_clean(cip)) { 3290bad55843SDavid Chinner int error; 329119429363SDave Chinner error = xfs_iflush_int(cip, bp); 3292bad55843SDavid Chinner if (error) { 329319429363SDave Chinner xfs_iunlock(cip, XFS_ILOCK_SHARED); 3294bad55843SDavid Chinner goto cluster_corrupt_out; 3295bad55843SDavid Chinner } 3296bad55843SDavid Chinner clcount++; 3297bad55843SDavid Chinner } else { 329819429363SDave Chinner xfs_ifunlock(cip); 3299bad55843SDavid Chinner } 330019429363SDave Chinner xfs_iunlock(cip, XFS_ILOCK_SHARED); 3301bad55843SDavid Chinner } 3302bad55843SDavid Chinner 3303bad55843SDavid Chinner if (clcount) { 3304ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, xs_icluster_flushcnt); 3305ff6d6af2SBill O'Donnell XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount); 3306bad55843SDavid Chinner } 3307bad55843SDavid Chinner 3308bad55843SDavid Chinner out_free: 33091a3e8f3dSDave Chinner rcu_read_unlock(); 331019429363SDave Chinner kmem_free(cilist); 331144b56e0aSDave Chinner out_put: 331244b56e0aSDave Chinner xfs_perag_put(pag); 3313bad55843SDavid Chinner return 0; 3314bad55843SDavid Chinner 3315bad55843SDavid Chinner 3316bad55843SDavid Chinner cluster_corrupt_out: 3317bad55843SDavid Chinner /* 3318bad55843SDavid Chinner * Corruption detected in the clustering loop. Invalidate the 3319bad55843SDavid Chinner * inode buffer and shut down the filesystem. 3320bad55843SDavid Chinner */ 33211a3e8f3dSDave Chinner rcu_read_unlock(); 3322bad55843SDavid Chinner /* 332343ff2122SChristoph Hellwig * Clean up the buffer. If it was delwri, just release it -- 3324bad55843SDavid Chinner * brelse can handle it with no problems. If not, shut down the 3325bad55843SDavid Chinner * filesystem before releasing the buffer. 3326bad55843SDavid Chinner */ 332743ff2122SChristoph Hellwig bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q); 3328bad55843SDavid Chinner if (bufwasdelwri) 3329bad55843SDavid Chinner xfs_buf_relse(bp); 3330bad55843SDavid Chinner 3331bad55843SDavid Chinner xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 3332bad55843SDavid Chinner 3333bad55843SDavid Chinner if (!bufwasdelwri) { 3334bad55843SDavid Chinner /* 3335bad55843SDavid Chinner * Just like incore_relse: if we have b_iodone functions, 3336bad55843SDavid Chinner * mark the buffer as an error and call them. Otherwise 3337bad55843SDavid Chinner * mark it as stale and brelse. 3338bad55843SDavid Chinner */ 3339cb669ca5SChristoph Hellwig if (bp->b_iodone) { 3340b0388bf1SDave Chinner bp->b_flags &= ~XBF_DONE; 3341c867cb61SChristoph Hellwig xfs_buf_stale(bp); 33422451337dSDave Chinner xfs_buf_ioerror(bp, -EIO); 3343e8aaba9aSDave Chinner xfs_buf_ioend(bp); 3344bad55843SDavid Chinner } else { 3345c867cb61SChristoph Hellwig xfs_buf_stale(bp); 3346bad55843SDavid Chinner xfs_buf_relse(bp); 3347bad55843SDavid Chinner } 3348bad55843SDavid Chinner } 3349bad55843SDavid Chinner 3350bad55843SDavid Chinner /* 3351bad55843SDavid Chinner * Unlocks the flush lock 3352bad55843SDavid Chinner */ 335319429363SDave Chinner xfs_iflush_abort(cip, false); 335419429363SDave Chinner kmem_free(cilist); 335544b56e0aSDave Chinner xfs_perag_put(pag); 33562451337dSDave Chinner return -EFSCORRUPTED; 3357bad55843SDavid Chinner } 3358bad55843SDavid Chinner 33591da177e4SLinus Torvalds /* 33604c46819aSChristoph Hellwig * Flush dirty inode metadata into the backing buffer. 33614c46819aSChristoph Hellwig * 33624c46819aSChristoph Hellwig * The caller must have the inode lock and the inode flush lock held. The 33634c46819aSChristoph Hellwig * inode lock will still be held upon return to the caller, and the inode 33644c46819aSChristoph Hellwig * flush lock will be released after the inode has reached the disk. 33654c46819aSChristoph Hellwig * 33664c46819aSChristoph Hellwig * The caller must write out the buffer returned in *bpp and release it. 33671da177e4SLinus Torvalds */ 33681da177e4SLinus Torvalds int 33691da177e4SLinus Torvalds xfs_iflush( 33704c46819aSChristoph Hellwig struct xfs_inode *ip, 33714c46819aSChristoph Hellwig struct xfs_buf **bpp) 33721da177e4SLinus Torvalds { 33734c46819aSChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 3374b1438f47SDave Chinner struct xfs_buf *bp = NULL; 33754c46819aSChristoph Hellwig struct xfs_dinode *dip; 33761da177e4SLinus Torvalds int error; 33771da177e4SLinus Torvalds 3378ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, xs_iflush_count); 33791da177e4SLinus Torvalds 3380579aa9caSChristoph Hellwig ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 3381474fce06SChristoph Hellwig ASSERT(xfs_isiflocked(ip)); 33821da177e4SLinus Torvalds ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 33838096b1ebSChristoph Hellwig ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); 33841da177e4SLinus Torvalds 33854c46819aSChristoph Hellwig *bpp = NULL; 33861da177e4SLinus Torvalds 33871da177e4SLinus Torvalds xfs_iunpin_wait(ip); 33881da177e4SLinus Torvalds 33891da177e4SLinus Torvalds /* 33904b6a4688SDave Chinner * For stale inodes we cannot rely on the backing buffer remaining 33914b6a4688SDave Chinner * stale in cache for the remaining life of the stale inode and so 3392475ee413SChristoph Hellwig * xfs_imap_to_bp() below may give us a buffer that no longer contains 33934b6a4688SDave Chinner * inodes below. We have to check this after ensuring the inode is 33944b6a4688SDave Chinner * unpinned so that it is safe to reclaim the stale inode after the 33954b6a4688SDave Chinner * flush call. 33964b6a4688SDave Chinner */ 33974b6a4688SDave Chinner if (xfs_iflags_test(ip, XFS_ISTALE)) { 33984b6a4688SDave Chinner xfs_ifunlock(ip); 33994b6a4688SDave Chinner return 0; 34004b6a4688SDave Chinner } 34014b6a4688SDave Chinner 34024b6a4688SDave Chinner /* 34031da177e4SLinus Torvalds * This may have been unpinned because the filesystem is shutting 34041da177e4SLinus Torvalds * down forcibly. If that's the case we must not write this inode 340532ce90a4SChristoph Hellwig * to disk, because the log record didn't make it to disk. 340632ce90a4SChristoph Hellwig * 340732ce90a4SChristoph Hellwig * We also have to remove the log item from the AIL in this case, 340832ce90a4SChristoph Hellwig * as we wait for an empty AIL as part of the unmount process. 34091da177e4SLinus Torvalds */ 34101da177e4SLinus Torvalds if (XFS_FORCED_SHUTDOWN(mp)) { 34112451337dSDave Chinner error = -EIO; 341232ce90a4SChristoph Hellwig goto abort_out; 34131da177e4SLinus Torvalds } 34141da177e4SLinus Torvalds 34151da177e4SLinus Torvalds /* 3416b1438f47SDave Chinner * Get the buffer containing the on-disk inode. We are doing a try-lock 3417b1438f47SDave Chinner * operation here, so we may get an EAGAIN error. In that case, we 3418b1438f47SDave Chinner * simply want to return with the inode still dirty. 3419b1438f47SDave Chinner * 3420b1438f47SDave Chinner * If we get any other error, we effectively have a corruption situation 3421b1438f47SDave Chinner * and we cannot flush the inode, so we treat it the same as failing 3422b1438f47SDave Chinner * xfs_iflush_int(). 3423a3f74ffbSDavid Chinner */ 3424475ee413SChristoph Hellwig error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK, 3425475ee413SChristoph Hellwig 0); 3426b1438f47SDave Chinner if (error == -EAGAIN) { 3427a3f74ffbSDavid Chinner xfs_ifunlock(ip); 3428a3f74ffbSDavid Chinner return error; 3429a3f74ffbSDavid Chinner } 3430b1438f47SDave Chinner if (error) 3431b1438f47SDave Chinner goto corrupt_out; 3432a3f74ffbSDavid Chinner 3433a3f74ffbSDavid Chinner /* 34341da177e4SLinus Torvalds * First flush out the inode that xfs_iflush was called with. 34351da177e4SLinus Torvalds */ 34361da177e4SLinus Torvalds error = xfs_iflush_int(ip, bp); 3437bad55843SDavid Chinner if (error) 34381da177e4SLinus Torvalds goto corrupt_out; 34391da177e4SLinus Torvalds 34401da177e4SLinus Torvalds /* 3441a3f74ffbSDavid Chinner * If the buffer is pinned then push on the log now so we won't 3442a3f74ffbSDavid Chinner * get stuck waiting in the write for too long. 3443a3f74ffbSDavid Chinner */ 3444811e64c7SChandra Seetharaman if (xfs_buf_ispinned(bp)) 3445a14a348bSChristoph Hellwig xfs_log_force(mp, 0); 3446a3f74ffbSDavid Chinner 3447a3f74ffbSDavid Chinner /* 34481da177e4SLinus Torvalds * inode clustering: 34491da177e4SLinus Torvalds * see if other inodes can be gathered into this write 34501da177e4SLinus Torvalds */ 3451bad55843SDavid Chinner error = xfs_iflush_cluster(ip, bp); 3452bad55843SDavid Chinner if (error) 34531da177e4SLinus Torvalds goto cluster_corrupt_out; 34541da177e4SLinus Torvalds 34554c46819aSChristoph Hellwig *bpp = bp; 34564c46819aSChristoph Hellwig return 0; 34571da177e4SLinus Torvalds 34581da177e4SLinus Torvalds corrupt_out: 3459b1438f47SDave Chinner if (bp) 34601da177e4SLinus Torvalds xfs_buf_relse(bp); 34617d04a335SNathan Scott xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 34621da177e4SLinus Torvalds cluster_corrupt_out: 34632451337dSDave Chinner error = -EFSCORRUPTED; 346432ce90a4SChristoph Hellwig abort_out: 34651da177e4SLinus Torvalds /* 34661da177e4SLinus Torvalds * Unlocks the flush lock 34671da177e4SLinus Torvalds */ 346804913fddSDave Chinner xfs_iflush_abort(ip, false); 346932ce90a4SChristoph Hellwig return error; 34701da177e4SLinus Torvalds } 34711da177e4SLinus Torvalds 34721da177e4SLinus Torvalds STATIC int 34731da177e4SLinus Torvalds xfs_iflush_int( 347493848a99SChristoph Hellwig struct xfs_inode *ip, 347593848a99SChristoph Hellwig struct xfs_buf *bp) 34761da177e4SLinus Torvalds { 347793848a99SChristoph Hellwig struct xfs_inode_log_item *iip = ip->i_itemp; 347893848a99SChristoph Hellwig struct xfs_dinode *dip; 347993848a99SChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 34801da177e4SLinus Torvalds 3481579aa9caSChristoph Hellwig ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 3482474fce06SChristoph Hellwig ASSERT(xfs_isiflocked(ip)); 34831da177e4SLinus Torvalds ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 34848096b1ebSChristoph Hellwig ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); 348593848a99SChristoph Hellwig ASSERT(iip != NULL && iip->ili_fields != 0); 3486263997a6SDave Chinner ASSERT(ip->i_d.di_version > 1); 34871da177e4SLinus Torvalds 34881da177e4SLinus Torvalds /* set *dip = inode's place in the buffer */ 348988ee2df7SChristoph Hellwig dip = xfs_buf_offset(bp, ip->i_imap.im_boffset); 34901da177e4SLinus Torvalds 349169ef921bSChristoph Hellwig if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC), 34921da177e4SLinus Torvalds mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) { 34936a19d939SDave Chinner xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 34946a19d939SDave Chinner "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p", 34956a19d939SDave Chinner __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip); 34961da177e4SLinus Torvalds goto corrupt_out; 34971da177e4SLinus Torvalds } 3498c19b3b05SDave Chinner if (S_ISREG(VFS_I(ip)->i_mode)) { 34991da177e4SLinus Torvalds if (XFS_TEST_ERROR( 35001da177e4SLinus Torvalds (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 35011da177e4SLinus Torvalds (ip->i_d.di_format != XFS_DINODE_FMT_BTREE), 35021da177e4SLinus Torvalds mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) { 35036a19d939SDave Chinner xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 35046a19d939SDave Chinner "%s: Bad regular inode %Lu, ptr 0x%p", 35056a19d939SDave Chinner __func__, ip->i_ino, ip); 35061da177e4SLinus Torvalds goto corrupt_out; 35071da177e4SLinus Torvalds } 3508c19b3b05SDave Chinner } else if (S_ISDIR(VFS_I(ip)->i_mode)) { 35091da177e4SLinus Torvalds if (XFS_TEST_ERROR( 35101da177e4SLinus Torvalds (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 35111da177e4SLinus Torvalds (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) && 35121da177e4SLinus Torvalds (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL), 35131da177e4SLinus Torvalds mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) { 35146a19d939SDave Chinner xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 35156a19d939SDave Chinner "%s: Bad directory inode %Lu, ptr 0x%p", 35166a19d939SDave Chinner __func__, ip->i_ino, ip); 35171da177e4SLinus Torvalds goto corrupt_out; 35181da177e4SLinus Torvalds } 35191da177e4SLinus Torvalds } 35201da177e4SLinus Torvalds if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents > 35211da177e4SLinus Torvalds ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5, 35221da177e4SLinus Torvalds XFS_RANDOM_IFLUSH_5)) { 35236a19d939SDave Chinner xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 35246a19d939SDave Chinner "%s: detected corrupt incore inode %Lu, " 35256a19d939SDave Chinner "total extents = %d, nblocks = %Ld, ptr 0x%p", 35266a19d939SDave Chinner __func__, ip->i_ino, 35271da177e4SLinus Torvalds ip->i_d.di_nextents + ip->i_d.di_anextents, 35286a19d939SDave Chinner ip->i_d.di_nblocks, ip); 35291da177e4SLinus Torvalds goto corrupt_out; 35301da177e4SLinus Torvalds } 35311da177e4SLinus Torvalds if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize, 35321da177e4SLinus Torvalds mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) { 35336a19d939SDave Chinner xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 35346a19d939SDave Chinner "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p", 35356a19d939SDave Chinner __func__, ip->i_ino, ip->i_d.di_forkoff, ip); 35361da177e4SLinus Torvalds goto corrupt_out; 35371da177e4SLinus Torvalds } 3538e60896d8SDave Chinner 35391da177e4SLinus Torvalds /* 3540263997a6SDave Chinner * Inode item log recovery for v2 inodes are dependent on the 3541e60896d8SDave Chinner * di_flushiter count for correct sequencing. We bump the flush 3542e60896d8SDave Chinner * iteration count so we can detect flushes which postdate a log record 3543e60896d8SDave Chinner * during recovery. This is redundant as we now log every change and 3544e60896d8SDave Chinner * hence this can't happen but we need to still do it to ensure 3545e60896d8SDave Chinner * backwards compatibility with old kernels that predate logging all 3546e60896d8SDave Chinner * inode changes. 35471da177e4SLinus Torvalds */ 3548e60896d8SDave Chinner if (ip->i_d.di_version < 3) 35491da177e4SLinus Torvalds ip->i_d.di_flushiter++; 35501da177e4SLinus Torvalds 3551005c5db8SDarrick J. Wong /* Check the inline directory data. */ 3552005c5db8SDarrick J. Wong if (S_ISDIR(VFS_I(ip)->i_mode) && 3553005c5db8SDarrick J. Wong ip->i_d.di_format == XFS_DINODE_FMT_LOCAL && 3554005c5db8SDarrick J. Wong xfs_dir2_sf_verify(ip)) 3555005c5db8SDarrick J. Wong goto corrupt_out; 3556005c5db8SDarrick J. Wong 35571da177e4SLinus Torvalds /* 35583987848cSDave Chinner * Copy the dirty parts of the inode into the on-disk inode. We always 35593987848cSDave Chinner * copy out the core of the inode, because if the inode is dirty at all 35603987848cSDave Chinner * the core must be. 35611da177e4SLinus Torvalds */ 356293f958f9SDave Chinner xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn); 35631da177e4SLinus Torvalds 35641da177e4SLinus Torvalds /* Wrap, we never let the log put out DI_MAX_FLUSH */ 35651da177e4SLinus Torvalds if (ip->i_d.di_flushiter == DI_MAX_FLUSH) 35661da177e4SLinus Torvalds ip->i_d.di_flushiter = 0; 35671da177e4SLinus Torvalds 3568005c5db8SDarrick J. Wong xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK); 3569005c5db8SDarrick J. Wong if (XFS_IFORK_Q(ip)) 3570005c5db8SDarrick J. Wong xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK); 35711da177e4SLinus Torvalds xfs_inobp_check(mp, bp); 35721da177e4SLinus Torvalds 35731da177e4SLinus Torvalds /* 3574f5d8d5c4SChristoph Hellwig * We've recorded everything logged in the inode, so we'd like to clear 3575f5d8d5c4SChristoph Hellwig * the ili_fields bits so we don't log and flush things unnecessarily. 3576f5d8d5c4SChristoph Hellwig * However, we can't stop logging all this information until the data 3577f5d8d5c4SChristoph Hellwig * we've copied into the disk buffer is written to disk. If we did we 3578f5d8d5c4SChristoph Hellwig * might overwrite the copy of the inode in the log with all the data 3579f5d8d5c4SChristoph Hellwig * after re-logging only part of it, and in the face of a crash we 3580f5d8d5c4SChristoph Hellwig * wouldn't have all the data we need to recover. 35811da177e4SLinus Torvalds * 3582f5d8d5c4SChristoph Hellwig * What we do is move the bits to the ili_last_fields field. When 3583f5d8d5c4SChristoph Hellwig * logging the inode, these bits are moved back to the ili_fields field. 3584f5d8d5c4SChristoph Hellwig * In the xfs_iflush_done() routine we clear ili_last_fields, since we 3585f5d8d5c4SChristoph Hellwig * know that the information those bits represent is permanently on 3586f5d8d5c4SChristoph Hellwig * disk. As long as the flush completes before the inode is logged 3587f5d8d5c4SChristoph Hellwig * again, then both ili_fields and ili_last_fields will be cleared. 35881da177e4SLinus Torvalds * 3589f5d8d5c4SChristoph Hellwig * We can play with the ili_fields bits here, because the inode lock 3590f5d8d5c4SChristoph Hellwig * must be held exclusively in order to set bits there and the flush 3591f5d8d5c4SChristoph Hellwig * lock protects the ili_last_fields bits. Set ili_logged so the flush 3592f5d8d5c4SChristoph Hellwig * done routine can tell whether or not to look in the AIL. Also, store 3593f5d8d5c4SChristoph Hellwig * the current LSN of the inode so that we can tell whether the item has 3594f5d8d5c4SChristoph Hellwig * moved in the AIL from xfs_iflush_done(). In order to read the lsn we 3595f5d8d5c4SChristoph Hellwig * need the AIL lock, because it is a 64 bit value that cannot be read 3596f5d8d5c4SChristoph Hellwig * atomically. 35971da177e4SLinus Torvalds */ 3598f5d8d5c4SChristoph Hellwig iip->ili_last_fields = iip->ili_fields; 3599f5d8d5c4SChristoph Hellwig iip->ili_fields = 0; 3600fc0561ceSDave Chinner iip->ili_fsync_fields = 0; 36011da177e4SLinus Torvalds iip->ili_logged = 1; 36021da177e4SLinus Torvalds 36037b2e2a31SDavid Chinner xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, 36047b2e2a31SDavid Chinner &iip->ili_item.li_lsn); 36051da177e4SLinus Torvalds 36061da177e4SLinus Torvalds /* 36071da177e4SLinus Torvalds * Attach the function xfs_iflush_done to the inode's 36081da177e4SLinus Torvalds * buffer. This will remove the inode from the AIL 36091da177e4SLinus Torvalds * and unlock the inode's flush lock when the inode is 36101da177e4SLinus Torvalds * completely written to disk. 36111da177e4SLinus Torvalds */ 3612ca30b2a7SChristoph Hellwig xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item); 36131da177e4SLinus Torvalds 361493848a99SChristoph Hellwig /* generate the checksum. */ 361593848a99SChristoph Hellwig xfs_dinode_calc_crc(mp, dip); 361693848a99SChristoph Hellwig 3617adadbeefSChristoph Hellwig ASSERT(bp->b_fspriv != NULL); 3618cb669ca5SChristoph Hellwig ASSERT(bp->b_iodone != NULL); 36191da177e4SLinus Torvalds return 0; 36201da177e4SLinus Torvalds 36211da177e4SLinus Torvalds corrupt_out: 36222451337dSDave Chinner return -EFSCORRUPTED; 36231da177e4SLinus Torvalds } 3624