10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 33e57ecf6SOlaf Weber * Copyright (c) 2000-2006 Silicon Graphics, Inc. 47b718769SNathan Scott * All Rights Reserved. 51da177e4SLinus Torvalds */ 640ebd81dSRobert P. J. Day #include <linux/log2.h> 7f0e28280SJeff Layton #include <linux/iversion.h> 840ebd81dSRobert P. J. Day 91da177e4SLinus Torvalds #include "xfs.h" 10a844f451SNathan Scott #include "xfs_fs.h" 1170a9883cSDave Chinner #include "xfs_shared.h" 12239880efSDave Chinner #include "xfs_format.h" 13239880efSDave Chinner #include "xfs_log_format.h" 14239880efSDave Chinner #include "xfs_trans_resv.h" 151da177e4SLinus Torvalds #include "xfs_sb.h" 161da177e4SLinus Torvalds #include "xfs_mount.h" 173ab78df2SDarrick J. Wong #include "xfs_defer.h" 18a4fbe6abSDave Chinner #include "xfs_inode.h" 1957062787SDave Chinner #include "xfs_da_format.h" 20c24b5dfaSDave Chinner #include "xfs_da_btree.h" 21c24b5dfaSDave Chinner #include "xfs_dir2.h" 22a844f451SNathan Scott #include "xfs_attr_sf.h" 23c24b5dfaSDave Chinner #include "xfs_attr.h" 24239880efSDave Chinner #include "xfs_trans_space.h" 25239880efSDave Chinner #include "xfs_trans.h" 261da177e4SLinus Torvalds #include "xfs_buf_item.h" 27a844f451SNathan Scott #include "xfs_inode_item.h" 28a844f451SNathan Scott #include "xfs_ialloc.h" 29a844f451SNathan Scott #include "xfs_bmap.h" 3068988114SDave Chinner #include "xfs_bmap_util.h" 31e9e899a2SDarrick J. Wong #include "xfs_errortag.h" 321da177e4SLinus Torvalds #include "xfs_error.h" 331da177e4SLinus Torvalds #include "xfs_quota.h" 342a82b8beSDavid Chinner #include "xfs_filestream.h" 3593848a99SChristoph Hellwig #include "xfs_cksum.h" 360b1b213fSChristoph Hellwig #include "xfs_trace.h" 3733479e05SDave Chinner #include "xfs_icache.h" 38c24b5dfaSDave Chinner #include "xfs_symlink.h" 39239880efSDave Chinner #include "xfs_trans_priv.h" 40239880efSDave Chinner #include "xfs_log.h" 41a4fbe6abSDave Chinner #include "xfs_bmap_btree.h" 42aa8968f2SDarrick J. Wong #include "xfs_reflink.h" 43005c5db8SDarrick J. Wong #include "xfs_dir2_priv.h" 441da177e4SLinus Torvalds 451da177e4SLinus Torvalds kmem_zone_t *xfs_inode_zone; 461da177e4SLinus Torvalds 471da177e4SLinus Torvalds /* 488f04c47aSChristoph Hellwig * Used in xfs_itruncate_extents(). This is the maximum number of extents 491da177e4SLinus Torvalds * freed from a file in a single transaction. 501da177e4SLinus Torvalds */ 511da177e4SLinus Torvalds #define XFS_ITRUNC_MAX_EXTENTS 2 521da177e4SLinus Torvalds 5354d7b5c1SDave Chinner STATIC int xfs_iflush_int(struct xfs_inode *, struct xfs_buf *); 5454d7b5c1SDave Chinner STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *); 5554d7b5c1SDave Chinner STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *); 56ab297431SZhi Yong Wu 572a0ec1d9SDave Chinner /* 582a0ec1d9SDave Chinner * helper function to extract extent size hint from inode 592a0ec1d9SDave Chinner */ 602a0ec1d9SDave Chinner xfs_extlen_t 612a0ec1d9SDave Chinner xfs_get_extsz_hint( 622a0ec1d9SDave Chinner struct xfs_inode *ip) 632a0ec1d9SDave Chinner { 642a0ec1d9SDave Chinner if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize) 652a0ec1d9SDave Chinner return ip->i_d.di_extsize; 662a0ec1d9SDave Chinner if (XFS_IS_REALTIME_INODE(ip)) 672a0ec1d9SDave Chinner return ip->i_mount->m_sb.sb_rextsize; 682a0ec1d9SDave Chinner return 0; 692a0ec1d9SDave Chinner } 702a0ec1d9SDave Chinner 71fa96acadSDave Chinner /* 72f7ca3522SDarrick J. Wong * Helper function to extract CoW extent size hint from inode. 73f7ca3522SDarrick J. Wong * Between the extent size hint and the CoW extent size hint, we 74e153aa79SDarrick J. Wong * return the greater of the two. If the value is zero (automatic), 75e153aa79SDarrick J. Wong * use the default size. 76f7ca3522SDarrick J. Wong */ 77f7ca3522SDarrick J. Wong xfs_extlen_t 78f7ca3522SDarrick J. Wong xfs_get_cowextsz_hint( 79f7ca3522SDarrick J. Wong struct xfs_inode *ip) 80f7ca3522SDarrick J. Wong { 81f7ca3522SDarrick J. Wong xfs_extlen_t a, b; 82f7ca3522SDarrick J. Wong 83f7ca3522SDarrick J. Wong a = 0; 84f7ca3522SDarrick J. Wong if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) 85f7ca3522SDarrick J. Wong a = ip->i_d.di_cowextsize; 86f7ca3522SDarrick J. Wong b = xfs_get_extsz_hint(ip); 87f7ca3522SDarrick J. Wong 88e153aa79SDarrick J. Wong a = max(a, b); 89e153aa79SDarrick J. Wong if (a == 0) 90e153aa79SDarrick J. Wong return XFS_DEFAULT_COWEXTSZ_HINT; 91f7ca3522SDarrick J. Wong return a; 92f7ca3522SDarrick J. Wong } 93f7ca3522SDarrick J. Wong 94f7ca3522SDarrick J. Wong /* 95efa70be1SChristoph Hellwig * These two are wrapper routines around the xfs_ilock() routine used to 96efa70be1SChristoph Hellwig * centralize some grungy code. They are used in places that wish to lock the 97efa70be1SChristoph Hellwig * inode solely for reading the extents. The reason these places can't just 98efa70be1SChristoph Hellwig * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to 99efa70be1SChristoph Hellwig * bringing in of the extents from disk for a file in b-tree format. If the 100efa70be1SChristoph Hellwig * inode is in b-tree format, then we need to lock the inode exclusively until 101efa70be1SChristoph Hellwig * the extents are read in. Locking it exclusively all the time would limit 102efa70be1SChristoph Hellwig * our parallelism unnecessarily, though. What we do instead is check to see 103efa70be1SChristoph Hellwig * if the extents have been read in yet, and only lock the inode exclusively 104efa70be1SChristoph Hellwig * if they have not. 105fa96acadSDave Chinner * 106efa70be1SChristoph Hellwig * The functions return a value which should be given to the corresponding 10701f4f327SChristoph Hellwig * xfs_iunlock() call. 108fa96acadSDave Chinner */ 109fa96acadSDave Chinner uint 110309ecac8SChristoph Hellwig xfs_ilock_data_map_shared( 111309ecac8SChristoph Hellwig struct xfs_inode *ip) 112fa96acadSDave Chinner { 113309ecac8SChristoph Hellwig uint lock_mode = XFS_ILOCK_SHARED; 114fa96acadSDave Chinner 115309ecac8SChristoph Hellwig if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE && 116309ecac8SChristoph Hellwig (ip->i_df.if_flags & XFS_IFEXTENTS) == 0) 117fa96acadSDave Chinner lock_mode = XFS_ILOCK_EXCL; 118fa96acadSDave Chinner xfs_ilock(ip, lock_mode); 119fa96acadSDave Chinner return lock_mode; 120fa96acadSDave Chinner } 121fa96acadSDave Chinner 122efa70be1SChristoph Hellwig uint 123efa70be1SChristoph Hellwig xfs_ilock_attr_map_shared( 124efa70be1SChristoph Hellwig struct xfs_inode *ip) 125fa96acadSDave Chinner { 126efa70be1SChristoph Hellwig uint lock_mode = XFS_ILOCK_SHARED; 127efa70be1SChristoph Hellwig 128efa70be1SChristoph Hellwig if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE && 129efa70be1SChristoph Hellwig (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0) 130efa70be1SChristoph Hellwig lock_mode = XFS_ILOCK_EXCL; 131efa70be1SChristoph Hellwig xfs_ilock(ip, lock_mode); 132efa70be1SChristoph Hellwig return lock_mode; 133fa96acadSDave Chinner } 134fa96acadSDave Chinner 135fa96acadSDave Chinner /* 13665523218SChristoph Hellwig * In addition to i_rwsem in the VFS inode, the xfs inode contains 2 13765523218SChristoph Hellwig * multi-reader locks: i_mmap_lock and the i_lock. This routine allows 13865523218SChristoph Hellwig * various combinations of the locks to be obtained. 139fa96acadSDave Chinner * 140653c60b6SDave Chinner * The 3 locks should always be ordered so that the IO lock is obtained first, 141653c60b6SDave Chinner * the mmap lock second and the ilock last in order to prevent deadlock. 142fa96acadSDave Chinner * 143653c60b6SDave Chinner * Basic locking order: 144653c60b6SDave Chinner * 14565523218SChristoph Hellwig * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock 146653c60b6SDave Chinner * 147653c60b6SDave Chinner * mmap_sem locking order: 148653c60b6SDave Chinner * 14965523218SChristoph Hellwig * i_rwsem -> page lock -> mmap_sem 150653c60b6SDave Chinner * mmap_sem -> i_mmap_lock -> page_lock 151653c60b6SDave Chinner * 152653c60b6SDave Chinner * The difference in mmap_sem locking order mean that we cannot hold the 153653c60b6SDave Chinner * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can 154653c60b6SDave Chinner * fault in pages during copy in/out (for buffered IO) or require the mmap_sem 155653c60b6SDave Chinner * in get_user_pages() to map the user pages into the kernel address space for 15665523218SChristoph Hellwig * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because 157653c60b6SDave Chinner * page faults already hold the mmap_sem. 158653c60b6SDave Chinner * 159653c60b6SDave Chinner * Hence to serialise fully against both syscall and mmap based IO, we need to 16065523218SChristoph Hellwig * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both 161653c60b6SDave Chinner * taken in places where we need to invalidate the page cache in a race 162653c60b6SDave Chinner * free manner (e.g. truncate, hole punch and other extent manipulation 163653c60b6SDave Chinner * functions). 164fa96acadSDave Chinner */ 165fa96acadSDave Chinner void 166fa96acadSDave Chinner xfs_ilock( 167fa96acadSDave Chinner xfs_inode_t *ip, 168fa96acadSDave Chinner uint lock_flags) 169fa96acadSDave Chinner { 170fa96acadSDave Chinner trace_xfs_ilock(ip, lock_flags, _RET_IP_); 171fa96acadSDave Chinner 172fa96acadSDave Chinner /* 173fa96acadSDave Chinner * You can't set both SHARED and EXCL for the same lock, 174fa96acadSDave Chinner * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, 175fa96acadSDave Chinner * and XFS_ILOCK_EXCL are valid values to set in lock_flags. 176fa96acadSDave Chinner */ 177fa96acadSDave Chinner ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != 178fa96acadSDave Chinner (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); 179653c60b6SDave Chinner ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != 180653c60b6SDave Chinner (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); 181fa96acadSDave Chinner ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != 182fa96acadSDave Chinner (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); 1830952c818SDave Chinner ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); 184fa96acadSDave Chinner 18565523218SChristoph Hellwig if (lock_flags & XFS_IOLOCK_EXCL) { 18665523218SChristoph Hellwig down_write_nested(&VFS_I(ip)->i_rwsem, 18765523218SChristoph Hellwig XFS_IOLOCK_DEP(lock_flags)); 18865523218SChristoph Hellwig } else if (lock_flags & XFS_IOLOCK_SHARED) { 18965523218SChristoph Hellwig down_read_nested(&VFS_I(ip)->i_rwsem, 19065523218SChristoph Hellwig XFS_IOLOCK_DEP(lock_flags)); 19165523218SChristoph Hellwig } 192fa96acadSDave Chinner 193653c60b6SDave Chinner if (lock_flags & XFS_MMAPLOCK_EXCL) 194653c60b6SDave Chinner mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags)); 195653c60b6SDave Chinner else if (lock_flags & XFS_MMAPLOCK_SHARED) 196653c60b6SDave Chinner mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags)); 197653c60b6SDave Chinner 198fa96acadSDave Chinner if (lock_flags & XFS_ILOCK_EXCL) 199fa96acadSDave Chinner mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); 200fa96acadSDave Chinner else if (lock_flags & XFS_ILOCK_SHARED) 201fa96acadSDave Chinner mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); 202fa96acadSDave Chinner } 203fa96acadSDave Chinner 204fa96acadSDave Chinner /* 205fa96acadSDave Chinner * This is just like xfs_ilock(), except that the caller 206fa96acadSDave Chinner * is guaranteed not to sleep. It returns 1 if it gets 207fa96acadSDave Chinner * the requested locks and 0 otherwise. If the IO lock is 208fa96acadSDave Chinner * obtained but the inode lock cannot be, then the IO lock 209fa96acadSDave Chinner * is dropped before returning. 210fa96acadSDave Chinner * 211fa96acadSDave Chinner * ip -- the inode being locked 212fa96acadSDave Chinner * lock_flags -- this parameter indicates the inode's locks to be 213fa96acadSDave Chinner * to be locked. See the comment for xfs_ilock() for a list 214fa96acadSDave Chinner * of valid values. 215fa96acadSDave Chinner */ 216fa96acadSDave Chinner int 217fa96acadSDave Chinner xfs_ilock_nowait( 218fa96acadSDave Chinner xfs_inode_t *ip, 219fa96acadSDave Chinner uint lock_flags) 220fa96acadSDave Chinner { 221fa96acadSDave Chinner trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_); 222fa96acadSDave Chinner 223fa96acadSDave Chinner /* 224fa96acadSDave Chinner * You can't set both SHARED and EXCL for the same lock, 225fa96acadSDave Chinner * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, 226fa96acadSDave Chinner * and XFS_ILOCK_EXCL are valid values to set in lock_flags. 227fa96acadSDave Chinner */ 228fa96acadSDave Chinner ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != 229fa96acadSDave Chinner (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); 230653c60b6SDave Chinner ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != 231653c60b6SDave Chinner (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); 232fa96acadSDave Chinner ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != 233fa96acadSDave Chinner (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); 2340952c818SDave Chinner ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); 235fa96acadSDave Chinner 236fa96acadSDave Chinner if (lock_flags & XFS_IOLOCK_EXCL) { 23765523218SChristoph Hellwig if (!down_write_trylock(&VFS_I(ip)->i_rwsem)) 238fa96acadSDave Chinner goto out; 239fa96acadSDave Chinner } else if (lock_flags & XFS_IOLOCK_SHARED) { 24065523218SChristoph Hellwig if (!down_read_trylock(&VFS_I(ip)->i_rwsem)) 241fa96acadSDave Chinner goto out; 242fa96acadSDave Chinner } 243653c60b6SDave Chinner 244653c60b6SDave Chinner if (lock_flags & XFS_MMAPLOCK_EXCL) { 245653c60b6SDave Chinner if (!mrtryupdate(&ip->i_mmaplock)) 246653c60b6SDave Chinner goto out_undo_iolock; 247653c60b6SDave Chinner } else if (lock_flags & XFS_MMAPLOCK_SHARED) { 248653c60b6SDave Chinner if (!mrtryaccess(&ip->i_mmaplock)) 249653c60b6SDave Chinner goto out_undo_iolock; 250653c60b6SDave Chinner } 251653c60b6SDave Chinner 252fa96acadSDave Chinner if (lock_flags & XFS_ILOCK_EXCL) { 253fa96acadSDave Chinner if (!mrtryupdate(&ip->i_lock)) 254653c60b6SDave Chinner goto out_undo_mmaplock; 255fa96acadSDave Chinner } else if (lock_flags & XFS_ILOCK_SHARED) { 256fa96acadSDave Chinner if (!mrtryaccess(&ip->i_lock)) 257653c60b6SDave Chinner goto out_undo_mmaplock; 258fa96acadSDave Chinner } 259fa96acadSDave Chinner return 1; 260fa96acadSDave Chinner 261653c60b6SDave Chinner out_undo_mmaplock: 262653c60b6SDave Chinner if (lock_flags & XFS_MMAPLOCK_EXCL) 263653c60b6SDave Chinner mrunlock_excl(&ip->i_mmaplock); 264653c60b6SDave Chinner else if (lock_flags & XFS_MMAPLOCK_SHARED) 265653c60b6SDave Chinner mrunlock_shared(&ip->i_mmaplock); 266fa96acadSDave Chinner out_undo_iolock: 267fa96acadSDave Chinner if (lock_flags & XFS_IOLOCK_EXCL) 26865523218SChristoph Hellwig up_write(&VFS_I(ip)->i_rwsem); 269fa96acadSDave Chinner else if (lock_flags & XFS_IOLOCK_SHARED) 27065523218SChristoph Hellwig up_read(&VFS_I(ip)->i_rwsem); 271fa96acadSDave Chinner out: 272fa96acadSDave Chinner return 0; 273fa96acadSDave Chinner } 274fa96acadSDave Chinner 275fa96acadSDave Chinner /* 276fa96acadSDave Chinner * xfs_iunlock() is used to drop the inode locks acquired with 277fa96acadSDave Chinner * xfs_ilock() and xfs_ilock_nowait(). The caller must pass 278fa96acadSDave Chinner * in the flags given to xfs_ilock() or xfs_ilock_nowait() so 279fa96acadSDave Chinner * that we know which locks to drop. 280fa96acadSDave Chinner * 281fa96acadSDave Chinner * ip -- the inode being unlocked 282fa96acadSDave Chinner * lock_flags -- this parameter indicates the inode's locks to be 283fa96acadSDave Chinner * to be unlocked. See the comment for xfs_ilock() for a list 284fa96acadSDave Chinner * of valid values for this parameter. 285fa96acadSDave Chinner * 286fa96acadSDave Chinner */ 287fa96acadSDave Chinner void 288fa96acadSDave Chinner xfs_iunlock( 289fa96acadSDave Chinner xfs_inode_t *ip, 290fa96acadSDave Chinner uint lock_flags) 291fa96acadSDave Chinner { 292fa96acadSDave Chinner /* 293fa96acadSDave Chinner * You can't set both SHARED and EXCL for the same lock, 294fa96acadSDave Chinner * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, 295fa96acadSDave Chinner * and XFS_ILOCK_EXCL are valid values to set in lock_flags. 296fa96acadSDave Chinner */ 297fa96acadSDave Chinner ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != 298fa96acadSDave Chinner (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); 299653c60b6SDave Chinner ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != 300653c60b6SDave Chinner (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); 301fa96acadSDave Chinner ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != 302fa96acadSDave Chinner (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); 3030952c818SDave Chinner ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); 304fa96acadSDave Chinner ASSERT(lock_flags != 0); 305fa96acadSDave Chinner 306fa96acadSDave Chinner if (lock_flags & XFS_IOLOCK_EXCL) 30765523218SChristoph Hellwig up_write(&VFS_I(ip)->i_rwsem); 308fa96acadSDave Chinner else if (lock_flags & XFS_IOLOCK_SHARED) 30965523218SChristoph Hellwig up_read(&VFS_I(ip)->i_rwsem); 310fa96acadSDave Chinner 311653c60b6SDave Chinner if (lock_flags & XFS_MMAPLOCK_EXCL) 312653c60b6SDave Chinner mrunlock_excl(&ip->i_mmaplock); 313653c60b6SDave Chinner else if (lock_flags & XFS_MMAPLOCK_SHARED) 314653c60b6SDave Chinner mrunlock_shared(&ip->i_mmaplock); 315653c60b6SDave Chinner 316fa96acadSDave Chinner if (lock_flags & XFS_ILOCK_EXCL) 317fa96acadSDave Chinner mrunlock_excl(&ip->i_lock); 318fa96acadSDave Chinner else if (lock_flags & XFS_ILOCK_SHARED) 319fa96acadSDave Chinner mrunlock_shared(&ip->i_lock); 320fa96acadSDave Chinner 321fa96acadSDave Chinner trace_xfs_iunlock(ip, lock_flags, _RET_IP_); 322fa96acadSDave Chinner } 323fa96acadSDave Chinner 324fa96acadSDave Chinner /* 325fa96acadSDave Chinner * give up write locks. the i/o lock cannot be held nested 326fa96acadSDave Chinner * if it is being demoted. 327fa96acadSDave Chinner */ 328fa96acadSDave Chinner void 329fa96acadSDave Chinner xfs_ilock_demote( 330fa96acadSDave Chinner xfs_inode_t *ip, 331fa96acadSDave Chinner uint lock_flags) 332fa96acadSDave Chinner { 333653c60b6SDave Chinner ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)); 334653c60b6SDave Chinner ASSERT((lock_flags & 335653c60b6SDave Chinner ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0); 336fa96acadSDave Chinner 337fa96acadSDave Chinner if (lock_flags & XFS_ILOCK_EXCL) 338fa96acadSDave Chinner mrdemote(&ip->i_lock); 339653c60b6SDave Chinner if (lock_flags & XFS_MMAPLOCK_EXCL) 340653c60b6SDave Chinner mrdemote(&ip->i_mmaplock); 341fa96acadSDave Chinner if (lock_flags & XFS_IOLOCK_EXCL) 34265523218SChristoph Hellwig downgrade_write(&VFS_I(ip)->i_rwsem); 343fa96acadSDave Chinner 344fa96acadSDave Chinner trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_); 345fa96acadSDave Chinner } 346fa96acadSDave Chinner 347742ae1e3SDave Chinner #if defined(DEBUG) || defined(XFS_WARN) 348fa96acadSDave Chinner int 349fa96acadSDave Chinner xfs_isilocked( 350fa96acadSDave Chinner xfs_inode_t *ip, 351fa96acadSDave Chinner uint lock_flags) 352fa96acadSDave Chinner { 353fa96acadSDave Chinner if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) { 354fa96acadSDave Chinner if (!(lock_flags & XFS_ILOCK_SHARED)) 355fa96acadSDave Chinner return !!ip->i_lock.mr_writer; 356fa96acadSDave Chinner return rwsem_is_locked(&ip->i_lock.mr_lock); 357fa96acadSDave Chinner } 358fa96acadSDave Chinner 359653c60b6SDave Chinner if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) { 360653c60b6SDave Chinner if (!(lock_flags & XFS_MMAPLOCK_SHARED)) 361653c60b6SDave Chinner return !!ip->i_mmaplock.mr_writer; 362653c60b6SDave Chinner return rwsem_is_locked(&ip->i_mmaplock.mr_lock); 363653c60b6SDave Chinner } 364653c60b6SDave Chinner 365fa96acadSDave Chinner if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) { 366fa96acadSDave Chinner if (!(lock_flags & XFS_IOLOCK_SHARED)) 36765523218SChristoph Hellwig return !debug_locks || 36865523218SChristoph Hellwig lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0); 36965523218SChristoph Hellwig return rwsem_is_locked(&VFS_I(ip)->i_rwsem); 370fa96acadSDave Chinner } 371fa96acadSDave Chinner 372fa96acadSDave Chinner ASSERT(0); 373fa96acadSDave Chinner return 0; 374fa96acadSDave Chinner } 375fa96acadSDave Chinner #endif 376fa96acadSDave Chinner 377b6a9947eSDave Chinner /* 378b6a9947eSDave Chinner * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when 379b6a9947eSDave Chinner * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined 380b6a9947eSDave Chinner * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build 381b6a9947eSDave Chinner * errors and warnings. 382b6a9947eSDave Chinner */ 383b6a9947eSDave Chinner #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP) 3843403ccc0SDave Chinner static bool 3853403ccc0SDave Chinner xfs_lockdep_subclass_ok( 3863403ccc0SDave Chinner int subclass) 3873403ccc0SDave Chinner { 3883403ccc0SDave Chinner return subclass < MAX_LOCKDEP_SUBCLASSES; 3893403ccc0SDave Chinner } 3903403ccc0SDave Chinner #else 3913403ccc0SDave Chinner #define xfs_lockdep_subclass_ok(subclass) (true) 3923403ccc0SDave Chinner #endif 3933403ccc0SDave Chinner 394c24b5dfaSDave Chinner /* 395653c60b6SDave Chinner * Bump the subclass so xfs_lock_inodes() acquires each lock with a different 3960952c818SDave Chinner * value. This can be called for any type of inode lock combination, including 3970952c818SDave Chinner * parent locking. Care must be taken to ensure we don't overrun the subclass 3980952c818SDave Chinner * storage fields in the class mask we build. 399c24b5dfaSDave Chinner */ 400c24b5dfaSDave Chinner static inline int 401c24b5dfaSDave Chinner xfs_lock_inumorder(int lock_mode, int subclass) 402c24b5dfaSDave Chinner { 4030952c818SDave Chinner int class = 0; 4040952c818SDave Chinner 4050952c818SDave Chinner ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP | 4060952c818SDave Chinner XFS_ILOCK_RTSUM))); 4073403ccc0SDave Chinner ASSERT(xfs_lockdep_subclass_ok(subclass)); 4080952c818SDave Chinner 409653c60b6SDave Chinner if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) { 4100952c818SDave Chinner ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS); 4110952c818SDave Chinner class += subclass << XFS_IOLOCK_SHIFT; 412653c60b6SDave Chinner } 413653c60b6SDave Chinner 414653c60b6SDave Chinner if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) { 4150952c818SDave Chinner ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS); 4160952c818SDave Chinner class += subclass << XFS_MMAPLOCK_SHIFT; 417653c60b6SDave Chinner } 418653c60b6SDave Chinner 4190952c818SDave Chinner if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) { 4200952c818SDave Chinner ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS); 4210952c818SDave Chinner class += subclass << XFS_ILOCK_SHIFT; 4220952c818SDave Chinner } 423c24b5dfaSDave Chinner 4240952c818SDave Chinner return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class; 425c24b5dfaSDave Chinner } 426c24b5dfaSDave Chinner 427c24b5dfaSDave Chinner /* 42895afcf5cSDave Chinner * The following routine will lock n inodes in exclusive mode. We assume the 42995afcf5cSDave Chinner * caller calls us with the inodes in i_ino order. 430c24b5dfaSDave Chinner * 43195afcf5cSDave Chinner * We need to detect deadlock where an inode that we lock is in the AIL and we 43295afcf5cSDave Chinner * start waiting for another inode that is locked by a thread in a long running 43395afcf5cSDave Chinner * transaction (such as truncate). This can result in deadlock since the long 43495afcf5cSDave Chinner * running trans might need to wait for the inode we just locked in order to 43595afcf5cSDave Chinner * push the tail and free space in the log. 4360952c818SDave Chinner * 4370952c818SDave Chinner * xfs_lock_inodes() can only be used to lock one type of lock at a time - 4380952c818SDave Chinner * the iolock, the mmaplock or the ilock, but not more than one at a time. If we 4390952c818SDave Chinner * lock more than one at a time, lockdep will report false positives saying we 4400952c818SDave Chinner * have violated locking orders. 441c24b5dfaSDave Chinner */ 4420d5a75e9SEric Sandeen static void 443c24b5dfaSDave Chinner xfs_lock_inodes( 444c24b5dfaSDave Chinner xfs_inode_t **ips, 445c24b5dfaSDave Chinner int inodes, 446c24b5dfaSDave Chinner uint lock_mode) 447c24b5dfaSDave Chinner { 448c24b5dfaSDave Chinner int attempts = 0, i, j, try_lock; 449c24b5dfaSDave Chinner xfs_log_item_t *lp; 450c24b5dfaSDave Chinner 4510952c818SDave Chinner /* 4520952c818SDave Chinner * Currently supports between 2 and 5 inodes with exclusive locking. We 4530952c818SDave Chinner * support an arbitrary depth of locking here, but absolute limits on 4540952c818SDave Chinner * inodes depend on the the type of locking and the limits placed by 4550952c818SDave Chinner * lockdep annotations in xfs_lock_inumorder. These are all checked by 4560952c818SDave Chinner * the asserts. 4570952c818SDave Chinner */ 45895afcf5cSDave Chinner ASSERT(ips && inodes >= 2 && inodes <= 5); 4590952c818SDave Chinner ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL | 4600952c818SDave Chinner XFS_ILOCK_EXCL)); 4610952c818SDave Chinner ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED | 4620952c818SDave Chinner XFS_ILOCK_SHARED))); 4630952c818SDave Chinner ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) || 4640952c818SDave Chinner inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1); 4650952c818SDave Chinner ASSERT(!(lock_mode & XFS_ILOCK_EXCL) || 4660952c818SDave Chinner inodes <= XFS_ILOCK_MAX_SUBCLASS + 1); 4670952c818SDave Chinner 4680952c818SDave Chinner if (lock_mode & XFS_IOLOCK_EXCL) { 4690952c818SDave Chinner ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL))); 4700952c818SDave Chinner } else if (lock_mode & XFS_MMAPLOCK_EXCL) 4710952c818SDave Chinner ASSERT(!(lock_mode & XFS_ILOCK_EXCL)); 472c24b5dfaSDave Chinner 473c24b5dfaSDave Chinner try_lock = 0; 474c24b5dfaSDave Chinner i = 0; 475c24b5dfaSDave Chinner again: 476c24b5dfaSDave Chinner for (; i < inodes; i++) { 477c24b5dfaSDave Chinner ASSERT(ips[i]); 478c24b5dfaSDave Chinner 479c24b5dfaSDave Chinner if (i && (ips[i] == ips[i - 1])) /* Already locked */ 480c24b5dfaSDave Chinner continue; 481c24b5dfaSDave Chinner 482c24b5dfaSDave Chinner /* 48395afcf5cSDave Chinner * If try_lock is not set yet, make sure all locked inodes are 48495afcf5cSDave Chinner * not in the AIL. If any are, set try_lock to be used later. 485c24b5dfaSDave Chinner */ 486c24b5dfaSDave Chinner if (!try_lock) { 487c24b5dfaSDave Chinner for (j = (i - 1); j >= 0 && !try_lock; j--) { 488*b3b14aacSChristoph Hellwig lp = &ips[j]->i_itemp->ili_item; 48922525c17SDave Chinner if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) 490c24b5dfaSDave Chinner try_lock++; 491c24b5dfaSDave Chinner } 492c24b5dfaSDave Chinner } 493c24b5dfaSDave Chinner 494c24b5dfaSDave Chinner /* 495c24b5dfaSDave Chinner * If any of the previous locks we have locked is in the AIL, 496c24b5dfaSDave Chinner * we must TRY to get the second and subsequent locks. If 497c24b5dfaSDave Chinner * we can't get any, we must release all we have 498c24b5dfaSDave Chinner * and try again. 499c24b5dfaSDave Chinner */ 50095afcf5cSDave Chinner if (!try_lock) { 50195afcf5cSDave Chinner xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i)); 50295afcf5cSDave Chinner continue; 50395afcf5cSDave Chinner } 504c24b5dfaSDave Chinner 50595afcf5cSDave Chinner /* try_lock means we have an inode locked that is in the AIL. */ 506c24b5dfaSDave Chinner ASSERT(i != 0); 50795afcf5cSDave Chinner if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i))) 50895afcf5cSDave Chinner continue; 50995afcf5cSDave Chinner 51095afcf5cSDave Chinner /* 51195afcf5cSDave Chinner * Unlock all previous guys and try again. xfs_iunlock will try 51295afcf5cSDave Chinner * to push the tail if the inode is in the AIL. 51395afcf5cSDave Chinner */ 514c24b5dfaSDave Chinner attempts++; 515c24b5dfaSDave Chinner for (j = i - 1; j >= 0; j--) { 516c24b5dfaSDave Chinner /* 51795afcf5cSDave Chinner * Check to see if we've already unlocked this one. Not 51895afcf5cSDave Chinner * the first one going back, and the inode ptr is the 51995afcf5cSDave Chinner * same. 520c24b5dfaSDave Chinner */ 52195afcf5cSDave Chinner if (j != (i - 1) && ips[j] == ips[j + 1]) 522c24b5dfaSDave Chinner continue; 523c24b5dfaSDave Chinner 524c24b5dfaSDave Chinner xfs_iunlock(ips[j], lock_mode); 525c24b5dfaSDave Chinner } 526c24b5dfaSDave Chinner 527c24b5dfaSDave Chinner if ((attempts % 5) == 0) { 528c24b5dfaSDave Chinner delay(1); /* Don't just spin the CPU */ 529c24b5dfaSDave Chinner } 530c24b5dfaSDave Chinner i = 0; 531c24b5dfaSDave Chinner try_lock = 0; 532c24b5dfaSDave Chinner goto again; 533c24b5dfaSDave Chinner } 534c24b5dfaSDave Chinner } 535c24b5dfaSDave Chinner 536c24b5dfaSDave Chinner /* 537653c60b6SDave Chinner * xfs_lock_two_inodes() can only be used to lock one type of lock at a time - 5387c2d238aSDarrick J. Wong * the mmaplock or the ilock, but not more than one type at a time. If we lock 5397c2d238aSDarrick J. Wong * more than one at a time, lockdep will report false positives saying we have 5407c2d238aSDarrick J. Wong * violated locking orders. The iolock must be double-locked separately since 5417c2d238aSDarrick J. Wong * we use i_rwsem for that. We now support taking one lock EXCL and the other 5427c2d238aSDarrick J. Wong * SHARED. 543c24b5dfaSDave Chinner */ 544c24b5dfaSDave Chinner void 545c24b5dfaSDave Chinner xfs_lock_two_inodes( 5467c2d238aSDarrick J. Wong struct xfs_inode *ip0, 5477c2d238aSDarrick J. Wong uint ip0_mode, 5487c2d238aSDarrick J. Wong struct xfs_inode *ip1, 5497c2d238aSDarrick J. Wong uint ip1_mode) 550c24b5dfaSDave Chinner { 5517c2d238aSDarrick J. Wong struct xfs_inode *temp; 5527c2d238aSDarrick J. Wong uint mode_temp; 553c24b5dfaSDave Chinner int attempts = 0; 554c24b5dfaSDave Chinner xfs_log_item_t *lp; 555c24b5dfaSDave Chinner 5567c2d238aSDarrick J. Wong ASSERT(hweight32(ip0_mode) == 1); 5577c2d238aSDarrick J. Wong ASSERT(hweight32(ip1_mode) == 1); 5587c2d238aSDarrick J. Wong ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))); 5597c2d238aSDarrick J. Wong ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))); 5607c2d238aSDarrick J. Wong ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) || 5617c2d238aSDarrick J. Wong !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))); 5627c2d238aSDarrick J. Wong ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) || 5637c2d238aSDarrick J. Wong !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))); 5647c2d238aSDarrick J. Wong ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) || 5657c2d238aSDarrick J. Wong !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))); 5667c2d238aSDarrick J. Wong ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) || 5677c2d238aSDarrick J. Wong !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))); 568653c60b6SDave Chinner 569c24b5dfaSDave Chinner ASSERT(ip0->i_ino != ip1->i_ino); 570c24b5dfaSDave Chinner 571c24b5dfaSDave Chinner if (ip0->i_ino > ip1->i_ino) { 572c24b5dfaSDave Chinner temp = ip0; 573c24b5dfaSDave Chinner ip0 = ip1; 574c24b5dfaSDave Chinner ip1 = temp; 5757c2d238aSDarrick J. Wong mode_temp = ip0_mode; 5767c2d238aSDarrick J. Wong ip0_mode = ip1_mode; 5777c2d238aSDarrick J. Wong ip1_mode = mode_temp; 578c24b5dfaSDave Chinner } 579c24b5dfaSDave Chinner 580c24b5dfaSDave Chinner again: 5817c2d238aSDarrick J. Wong xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0)); 582c24b5dfaSDave Chinner 583c24b5dfaSDave Chinner /* 584c24b5dfaSDave Chinner * If the first lock we have locked is in the AIL, we must TRY to get 585c24b5dfaSDave Chinner * the second lock. If we can't get it, we must release the first one 586c24b5dfaSDave Chinner * and try again. 587c24b5dfaSDave Chinner */ 588*b3b14aacSChristoph Hellwig lp = &ip0->i_itemp->ili_item; 58922525c17SDave Chinner if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) { 5907c2d238aSDarrick J. Wong if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) { 5917c2d238aSDarrick J. Wong xfs_iunlock(ip0, ip0_mode); 592c24b5dfaSDave Chinner if ((++attempts % 5) == 0) 593c24b5dfaSDave Chinner delay(1); /* Don't just spin the CPU */ 594c24b5dfaSDave Chinner goto again; 595c24b5dfaSDave Chinner } 596c24b5dfaSDave Chinner } else { 5977c2d238aSDarrick J. Wong xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1)); 598c24b5dfaSDave Chinner } 599c24b5dfaSDave Chinner } 600c24b5dfaSDave Chinner 601fa96acadSDave Chinner void 602fa96acadSDave Chinner __xfs_iflock( 603fa96acadSDave Chinner struct xfs_inode *ip) 604fa96acadSDave Chinner { 605fa96acadSDave Chinner wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT); 606fa96acadSDave Chinner DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT); 607fa96acadSDave Chinner 608fa96acadSDave Chinner do { 60921417136SIngo Molnar prepare_to_wait_exclusive(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); 610fa96acadSDave Chinner if (xfs_isiflocked(ip)) 611fa96acadSDave Chinner io_schedule(); 612fa96acadSDave Chinner } while (!xfs_iflock_nowait(ip)); 613fa96acadSDave Chinner 61421417136SIngo Molnar finish_wait(wq, &wait.wq_entry); 615fa96acadSDave Chinner } 616fa96acadSDave Chinner 6171da177e4SLinus Torvalds STATIC uint 6181da177e4SLinus Torvalds _xfs_dic2xflags( 619c8ce540dSDarrick J. Wong uint16_t di_flags, 62058f88ca2SDave Chinner uint64_t di_flags2, 62158f88ca2SDave Chinner bool has_attr) 6221da177e4SLinus Torvalds { 6231da177e4SLinus Torvalds uint flags = 0; 6241da177e4SLinus Torvalds 6251da177e4SLinus Torvalds if (di_flags & XFS_DIFLAG_ANY) { 6261da177e4SLinus Torvalds if (di_flags & XFS_DIFLAG_REALTIME) 627e7b89481SDave Chinner flags |= FS_XFLAG_REALTIME; 6281da177e4SLinus Torvalds if (di_flags & XFS_DIFLAG_PREALLOC) 629e7b89481SDave Chinner flags |= FS_XFLAG_PREALLOC; 6301da177e4SLinus Torvalds if (di_flags & XFS_DIFLAG_IMMUTABLE) 631e7b89481SDave Chinner flags |= FS_XFLAG_IMMUTABLE; 6321da177e4SLinus Torvalds if (di_flags & XFS_DIFLAG_APPEND) 633e7b89481SDave Chinner flags |= FS_XFLAG_APPEND; 6341da177e4SLinus Torvalds if (di_flags & XFS_DIFLAG_SYNC) 635e7b89481SDave Chinner flags |= FS_XFLAG_SYNC; 6361da177e4SLinus Torvalds if (di_flags & XFS_DIFLAG_NOATIME) 637e7b89481SDave Chinner flags |= FS_XFLAG_NOATIME; 6381da177e4SLinus Torvalds if (di_flags & XFS_DIFLAG_NODUMP) 639e7b89481SDave Chinner flags |= FS_XFLAG_NODUMP; 6401da177e4SLinus Torvalds if (di_flags & XFS_DIFLAG_RTINHERIT) 641e7b89481SDave Chinner flags |= FS_XFLAG_RTINHERIT; 6421da177e4SLinus Torvalds if (di_flags & XFS_DIFLAG_PROJINHERIT) 643e7b89481SDave Chinner flags |= FS_XFLAG_PROJINHERIT; 6441da177e4SLinus Torvalds if (di_flags & XFS_DIFLAG_NOSYMLINKS) 645e7b89481SDave Chinner flags |= FS_XFLAG_NOSYMLINKS; 646dd9f438eSNathan Scott if (di_flags & XFS_DIFLAG_EXTSIZE) 647e7b89481SDave Chinner flags |= FS_XFLAG_EXTSIZE; 648dd9f438eSNathan Scott if (di_flags & XFS_DIFLAG_EXTSZINHERIT) 649e7b89481SDave Chinner flags |= FS_XFLAG_EXTSZINHERIT; 650d3446eacSBarry Naujok if (di_flags & XFS_DIFLAG_NODEFRAG) 651e7b89481SDave Chinner flags |= FS_XFLAG_NODEFRAG; 6522a82b8beSDavid Chinner if (di_flags & XFS_DIFLAG_FILESTREAM) 653e7b89481SDave Chinner flags |= FS_XFLAG_FILESTREAM; 6541da177e4SLinus Torvalds } 6551da177e4SLinus Torvalds 65658f88ca2SDave Chinner if (di_flags2 & XFS_DIFLAG2_ANY) { 65758f88ca2SDave Chinner if (di_flags2 & XFS_DIFLAG2_DAX) 65858f88ca2SDave Chinner flags |= FS_XFLAG_DAX; 659f7ca3522SDarrick J. Wong if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE) 660f7ca3522SDarrick J. Wong flags |= FS_XFLAG_COWEXTSIZE; 66158f88ca2SDave Chinner } 66258f88ca2SDave Chinner 66358f88ca2SDave Chinner if (has_attr) 66458f88ca2SDave Chinner flags |= FS_XFLAG_HASATTR; 66558f88ca2SDave Chinner 6661da177e4SLinus Torvalds return flags; 6671da177e4SLinus Torvalds } 6681da177e4SLinus Torvalds 6691da177e4SLinus Torvalds uint 6701da177e4SLinus Torvalds xfs_ip2xflags( 67158f88ca2SDave Chinner struct xfs_inode *ip) 6721da177e4SLinus Torvalds { 67358f88ca2SDave Chinner struct xfs_icdinode *dic = &ip->i_d; 6741da177e4SLinus Torvalds 67558f88ca2SDave Chinner return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip)); 6761da177e4SLinus Torvalds } 6771da177e4SLinus Torvalds 6781da177e4SLinus Torvalds /* 679c24b5dfaSDave Chinner * Lookups up an inode from "name". If ci_name is not NULL, then a CI match 680c24b5dfaSDave Chinner * is allowed, otherwise it has to be an exact match. If a CI match is found, 681c24b5dfaSDave Chinner * ci_name->name will point to a the actual name (caller must free) or 682c24b5dfaSDave Chinner * will be set to NULL if an exact match is found. 683c24b5dfaSDave Chinner */ 684c24b5dfaSDave Chinner int 685c24b5dfaSDave Chinner xfs_lookup( 686c24b5dfaSDave Chinner xfs_inode_t *dp, 687c24b5dfaSDave Chinner struct xfs_name *name, 688c24b5dfaSDave Chinner xfs_inode_t **ipp, 689c24b5dfaSDave Chinner struct xfs_name *ci_name) 690c24b5dfaSDave Chinner { 691c24b5dfaSDave Chinner xfs_ino_t inum; 692c24b5dfaSDave Chinner int error; 693c24b5dfaSDave Chinner 694c24b5dfaSDave Chinner trace_xfs_lookup(dp, name); 695c24b5dfaSDave Chinner 696c24b5dfaSDave Chinner if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 6972451337dSDave Chinner return -EIO; 698c24b5dfaSDave Chinner 699c24b5dfaSDave Chinner error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name); 700c24b5dfaSDave Chinner if (error) 701dbad7c99SDave Chinner goto out_unlock; 702c24b5dfaSDave Chinner 703c24b5dfaSDave Chinner error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp); 704c24b5dfaSDave Chinner if (error) 705c24b5dfaSDave Chinner goto out_free_name; 706c24b5dfaSDave Chinner 707c24b5dfaSDave Chinner return 0; 708c24b5dfaSDave Chinner 709c24b5dfaSDave Chinner out_free_name: 710c24b5dfaSDave Chinner if (ci_name) 711c24b5dfaSDave Chinner kmem_free(ci_name->name); 712dbad7c99SDave Chinner out_unlock: 713c24b5dfaSDave Chinner *ipp = NULL; 714c24b5dfaSDave Chinner return error; 715c24b5dfaSDave Chinner } 716c24b5dfaSDave Chinner 717c24b5dfaSDave Chinner /* 7181da177e4SLinus Torvalds * Allocate an inode on disk and return a copy of its in-core version. 7191da177e4SLinus Torvalds * The in-core inode is locked exclusively. Set mode, nlink, and rdev 7201da177e4SLinus Torvalds * appropriately within the inode. The uid and gid for the inode are 7211da177e4SLinus Torvalds * set according to the contents of the given cred structure. 7221da177e4SLinus Torvalds * 7231da177e4SLinus Torvalds * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc() 724cd856db6SCarlos Maiolino * has a free inode available, call xfs_iget() to obtain the in-core 725cd856db6SCarlos Maiolino * version of the allocated inode. Finally, fill in the inode and 726cd856db6SCarlos Maiolino * log its initial contents. In this case, ialloc_context would be 727cd856db6SCarlos Maiolino * set to NULL. 7281da177e4SLinus Torvalds * 729cd856db6SCarlos Maiolino * If xfs_dialloc() does not have an available inode, it will replenish 730cd856db6SCarlos Maiolino * its supply by doing an allocation. Since we can only do one 731cd856db6SCarlos Maiolino * allocation within a transaction without deadlocks, we must commit 732cd856db6SCarlos Maiolino * the current transaction before returning the inode itself. 733cd856db6SCarlos Maiolino * In this case, therefore, we will set ialloc_context and return. 7341da177e4SLinus Torvalds * The caller should then commit the current transaction, start a new 7351da177e4SLinus Torvalds * transaction, and call xfs_ialloc() again to actually get the inode. 7361da177e4SLinus Torvalds * 7371da177e4SLinus Torvalds * To ensure that some other process does not grab the inode that 7381da177e4SLinus Torvalds * was allocated during the first call to xfs_ialloc(), this routine 7391da177e4SLinus Torvalds * also returns the [locked] bp pointing to the head of the freelist 7401da177e4SLinus Torvalds * as ialloc_context. The caller should hold this buffer across 7411da177e4SLinus Torvalds * the commit and pass it back into this routine on the second call. 742b11f94d5SDavid Chinner * 743b11f94d5SDavid Chinner * If we are allocating quota inodes, we do not have a parent inode 744b11f94d5SDavid Chinner * to attach to or associate with (i.e. pip == NULL) because they 745b11f94d5SDavid Chinner * are not linked into the directory structure - they are attached 746b11f94d5SDavid Chinner * directly to the superblock - and so have no parent. 7471da177e4SLinus Torvalds */ 7480d5a75e9SEric Sandeen static int 7491da177e4SLinus Torvalds xfs_ialloc( 7501da177e4SLinus Torvalds xfs_trans_t *tp, 7511da177e4SLinus Torvalds xfs_inode_t *pip, 752576b1d67SAl Viro umode_t mode, 75331b084aeSNathan Scott xfs_nlink_t nlink, 75466f36464SChristoph Hellwig dev_t rdev, 7556743099cSArkadiusz Mi?kiewicz prid_t prid, 7561da177e4SLinus Torvalds xfs_buf_t **ialloc_context, 7571da177e4SLinus Torvalds xfs_inode_t **ipp) 7581da177e4SLinus Torvalds { 75993848a99SChristoph Hellwig struct xfs_mount *mp = tp->t_mountp; 7601da177e4SLinus Torvalds xfs_ino_t ino; 7611da177e4SLinus Torvalds xfs_inode_t *ip; 7621da177e4SLinus Torvalds uint flags; 7631da177e4SLinus Torvalds int error; 76495582b00SDeepa Dinamani struct timespec64 tv; 7653987848cSDave Chinner struct inode *inode; 7661da177e4SLinus Torvalds 7671da177e4SLinus Torvalds /* 7681da177e4SLinus Torvalds * Call the space management code to pick 7691da177e4SLinus Torvalds * the on-disk inode to be allocated. 7701da177e4SLinus Torvalds */ 771f59cf5c2SChristoph Hellwig error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, 77208358906SChristoph Hellwig ialloc_context, &ino); 773bf904248SDavid Chinner if (error) 7741da177e4SLinus Torvalds return error; 77508358906SChristoph Hellwig if (*ialloc_context || ino == NULLFSINO) { 7761da177e4SLinus Torvalds *ipp = NULL; 7771da177e4SLinus Torvalds return 0; 7781da177e4SLinus Torvalds } 7791da177e4SLinus Torvalds ASSERT(*ialloc_context == NULL); 7801da177e4SLinus Torvalds 7811da177e4SLinus Torvalds /* 7828b26984dSDave Chinner * Protect against obviously corrupt allocation btree records. Later 7838b26984dSDave Chinner * xfs_iget checks will catch re-allocation of other active in-memory 7848b26984dSDave Chinner * and on-disk inodes. If we don't catch reallocating the parent inode 7858b26984dSDave Chinner * here we will deadlock in xfs_iget() so we have to do these checks 7868b26984dSDave Chinner * first. 7878b26984dSDave Chinner */ 7888b26984dSDave Chinner if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) { 7898b26984dSDave Chinner xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino); 7908b26984dSDave Chinner return -EFSCORRUPTED; 7918b26984dSDave Chinner } 7928b26984dSDave Chinner 7938b26984dSDave Chinner /* 7941da177e4SLinus Torvalds * Get the in-core inode with the lock held exclusively. 7951da177e4SLinus Torvalds * This is because we're setting fields here we need 7961da177e4SLinus Torvalds * to prevent others from looking at until we're done. 7971da177e4SLinus Torvalds */ 79893848a99SChristoph Hellwig error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, 799ec3ba85fSChristoph Hellwig XFS_ILOCK_EXCL, &ip); 800bf904248SDavid Chinner if (error) 8011da177e4SLinus Torvalds return error; 8021da177e4SLinus Torvalds ASSERT(ip != NULL); 8033987848cSDave Chinner inode = VFS_I(ip); 8041da177e4SLinus Torvalds 805263997a6SDave Chinner /* 806263997a6SDave Chinner * We always convert v1 inodes to v2 now - we only support filesystems 807263997a6SDave Chinner * with >= v2 inode capability, so there is no reason for ever leaving 808263997a6SDave Chinner * an inode in v1 format. 809263997a6SDave Chinner */ 810263997a6SDave Chinner if (ip->i_d.di_version == 1) 811263997a6SDave Chinner ip->i_d.di_version = 2; 812263997a6SDave Chinner 813c19b3b05SDave Chinner inode->i_mode = mode; 81454d7b5c1SDave Chinner set_nlink(inode, nlink); 8157aab1b28SDwight Engen ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid()); 8167aab1b28SDwight Engen ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid()); 81766f36464SChristoph Hellwig inode->i_rdev = rdev; 8186743099cSArkadiusz Mi?kiewicz xfs_set_projid(ip, prid); 8191da177e4SLinus Torvalds 820bd186aa9SChristoph Hellwig if (pip && XFS_INHERIT_GID(pip)) { 8211da177e4SLinus Torvalds ip->i_d.di_gid = pip->i_d.di_gid; 822c19b3b05SDave Chinner if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode)) 823c19b3b05SDave Chinner inode->i_mode |= S_ISGID; 8241da177e4SLinus Torvalds } 8251da177e4SLinus Torvalds 8261da177e4SLinus Torvalds /* 8271da177e4SLinus Torvalds * If the group ID of the new file does not match the effective group 8281da177e4SLinus Torvalds * ID or one of the supplementary group IDs, the S_ISGID bit is cleared 8291da177e4SLinus Torvalds * (and only if the irix_sgid_inherit compatibility variable is set). 8301da177e4SLinus Torvalds */ 8311da177e4SLinus Torvalds if ((irix_sgid_inherit) && 832c19b3b05SDave Chinner (inode->i_mode & S_ISGID) && 833c19b3b05SDave Chinner (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid)))) 834c19b3b05SDave Chinner inode->i_mode &= ~S_ISGID; 8351da177e4SLinus Torvalds 8361da177e4SLinus Torvalds ip->i_d.di_size = 0; 8371da177e4SLinus Torvalds ip->i_d.di_nextents = 0; 8381da177e4SLinus Torvalds ASSERT(ip->i_d.di_nblocks == 0); 839dff35fd4SChristoph Hellwig 840c2050a45SDeepa Dinamani tv = current_time(inode); 8413987848cSDave Chinner inode->i_mtime = tv; 8423987848cSDave Chinner inode->i_atime = tv; 8433987848cSDave Chinner inode->i_ctime = tv; 844dff35fd4SChristoph Hellwig 8451da177e4SLinus Torvalds ip->i_d.di_extsize = 0; 8461da177e4SLinus Torvalds ip->i_d.di_dmevmask = 0; 8471da177e4SLinus Torvalds ip->i_d.di_dmstate = 0; 8481da177e4SLinus Torvalds ip->i_d.di_flags = 0; 84993848a99SChristoph Hellwig 85093848a99SChristoph Hellwig if (ip->i_d.di_version == 3) { 851f0e28280SJeff Layton inode_set_iversion(inode, 1); 85293848a99SChristoph Hellwig ip->i_d.di_flags2 = 0; 853f7ca3522SDarrick J. Wong ip->i_d.di_cowextsize = 0; 854c8ce540dSDarrick J. Wong ip->i_d.di_crtime.t_sec = (int32_t)tv.tv_sec; 855c8ce540dSDarrick J. Wong ip->i_d.di_crtime.t_nsec = (int32_t)tv.tv_nsec; 85693848a99SChristoph Hellwig } 85793848a99SChristoph Hellwig 85893848a99SChristoph Hellwig 8591da177e4SLinus Torvalds flags = XFS_ILOG_CORE; 8601da177e4SLinus Torvalds switch (mode & S_IFMT) { 8611da177e4SLinus Torvalds case S_IFIFO: 8621da177e4SLinus Torvalds case S_IFCHR: 8631da177e4SLinus Torvalds case S_IFBLK: 8641da177e4SLinus Torvalds case S_IFSOCK: 8651da177e4SLinus Torvalds ip->i_d.di_format = XFS_DINODE_FMT_DEV; 8661da177e4SLinus Torvalds ip->i_df.if_flags = 0; 8671da177e4SLinus Torvalds flags |= XFS_ILOG_DEV; 8681da177e4SLinus Torvalds break; 8691da177e4SLinus Torvalds case S_IFREG: 8701da177e4SLinus Torvalds case S_IFDIR: 871b11f94d5SDavid Chinner if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) { 872365ca83dSNathan Scott uint di_flags = 0; 873365ca83dSNathan Scott 874abbede1bSAl Viro if (S_ISDIR(mode)) { 875365ca83dSNathan Scott if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 876365ca83dSNathan Scott di_flags |= XFS_DIFLAG_RTINHERIT; 877dd9f438eSNathan Scott if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 878dd9f438eSNathan Scott di_flags |= XFS_DIFLAG_EXTSZINHERIT; 879dd9f438eSNathan Scott ip->i_d.di_extsize = pip->i_d.di_extsize; 880dd9f438eSNathan Scott } 8819336e3a7SDave Chinner if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) 8829336e3a7SDave Chinner di_flags |= XFS_DIFLAG_PROJINHERIT; 883abbede1bSAl Viro } else if (S_ISREG(mode)) { 884613d7043SChristoph Hellwig if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 885365ca83dSNathan Scott di_flags |= XFS_DIFLAG_REALTIME; 886dd9f438eSNathan Scott if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 887dd9f438eSNathan Scott di_flags |= XFS_DIFLAG_EXTSIZE; 888dd9f438eSNathan Scott ip->i_d.di_extsize = pip->i_d.di_extsize; 889dd9f438eSNathan Scott } 8901da177e4SLinus Torvalds } 8911da177e4SLinus Torvalds if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) && 8921da177e4SLinus Torvalds xfs_inherit_noatime) 893365ca83dSNathan Scott di_flags |= XFS_DIFLAG_NOATIME; 8941da177e4SLinus Torvalds if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) && 8951da177e4SLinus Torvalds xfs_inherit_nodump) 896365ca83dSNathan Scott di_flags |= XFS_DIFLAG_NODUMP; 8971da177e4SLinus Torvalds if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) && 8981da177e4SLinus Torvalds xfs_inherit_sync) 899365ca83dSNathan Scott di_flags |= XFS_DIFLAG_SYNC; 9001da177e4SLinus Torvalds if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) && 9011da177e4SLinus Torvalds xfs_inherit_nosymlinks) 902365ca83dSNathan Scott di_flags |= XFS_DIFLAG_NOSYMLINKS; 903d3446eacSBarry Naujok if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) && 904d3446eacSBarry Naujok xfs_inherit_nodefrag) 905d3446eacSBarry Naujok di_flags |= XFS_DIFLAG_NODEFRAG; 9062a82b8beSDavid Chinner if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM) 9072a82b8beSDavid Chinner di_flags |= XFS_DIFLAG_FILESTREAM; 90858f88ca2SDave Chinner 909365ca83dSNathan Scott ip->i_d.di_flags |= di_flags; 9101da177e4SLinus Torvalds } 911f7ca3522SDarrick J. Wong if (pip && 912f7ca3522SDarrick J. Wong (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) && 913f7ca3522SDarrick J. Wong pip->i_d.di_version == 3 && 914f7ca3522SDarrick J. Wong ip->i_d.di_version == 3) { 91556bdf855SLukas Czerner uint64_t di_flags2 = 0; 91656bdf855SLukas Czerner 917f7ca3522SDarrick J. Wong if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) { 91856bdf855SLukas Czerner di_flags2 |= XFS_DIFLAG2_COWEXTSIZE; 919f7ca3522SDarrick J. Wong ip->i_d.di_cowextsize = pip->i_d.di_cowextsize; 920f7ca3522SDarrick J. Wong } 92156bdf855SLukas Czerner if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX) 92256bdf855SLukas Czerner di_flags2 |= XFS_DIFLAG2_DAX; 92356bdf855SLukas Czerner 92456bdf855SLukas Czerner ip->i_d.di_flags2 |= di_flags2; 925f7ca3522SDarrick J. Wong } 9261da177e4SLinus Torvalds /* FALLTHROUGH */ 9271da177e4SLinus Torvalds case S_IFLNK: 9281da177e4SLinus Torvalds ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; 9291da177e4SLinus Torvalds ip->i_df.if_flags = XFS_IFEXTENTS; 930fcacbc3fSChristoph Hellwig ip->i_df.if_bytes = 0; 9316bdcf26aSChristoph Hellwig ip->i_df.if_u1.if_root = NULL; 9321da177e4SLinus Torvalds break; 9331da177e4SLinus Torvalds default: 9341da177e4SLinus Torvalds ASSERT(0); 9351da177e4SLinus Torvalds } 9361da177e4SLinus Torvalds /* 9371da177e4SLinus Torvalds * Attribute fork settings for new inode. 9381da177e4SLinus Torvalds */ 9391da177e4SLinus Torvalds ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 9401da177e4SLinus Torvalds ip->i_d.di_anextents = 0; 9411da177e4SLinus Torvalds 9421da177e4SLinus Torvalds /* 9431da177e4SLinus Torvalds * Log the new values stuffed into the inode. 9441da177e4SLinus Torvalds */ 945ddc3415aSChristoph Hellwig xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 9461da177e4SLinus Torvalds xfs_trans_log_inode(tp, ip, flags); 9471da177e4SLinus Torvalds 94858c90473SDave Chinner /* now that we have an i_mode we can setup the inode structure */ 94941be8bedSChristoph Hellwig xfs_setup_inode(ip); 9501da177e4SLinus Torvalds 9511da177e4SLinus Torvalds *ipp = ip; 9521da177e4SLinus Torvalds return 0; 9531da177e4SLinus Torvalds } 9541da177e4SLinus Torvalds 955e546cb79SDave Chinner /* 956e546cb79SDave Chinner * Allocates a new inode from disk and return a pointer to the 957e546cb79SDave Chinner * incore copy. This routine will internally commit the current 958e546cb79SDave Chinner * transaction and allocate a new one if the Space Manager needed 959e546cb79SDave Chinner * to do an allocation to replenish the inode free-list. 960e546cb79SDave Chinner * 961e546cb79SDave Chinner * This routine is designed to be called from xfs_create and 962e546cb79SDave Chinner * xfs_create_dir. 963e546cb79SDave Chinner * 964e546cb79SDave Chinner */ 965e546cb79SDave Chinner int 966e546cb79SDave Chinner xfs_dir_ialloc( 967e546cb79SDave Chinner xfs_trans_t **tpp, /* input: current transaction; 968e546cb79SDave Chinner output: may be a new transaction. */ 969e546cb79SDave Chinner xfs_inode_t *dp, /* directory within whose allocate 970e546cb79SDave Chinner the inode. */ 971e546cb79SDave Chinner umode_t mode, 972e546cb79SDave Chinner xfs_nlink_t nlink, 97366f36464SChristoph Hellwig dev_t rdev, 974e546cb79SDave Chinner prid_t prid, /* project id */ 975c959025eSChandan Rajendra xfs_inode_t **ipp) /* pointer to inode; it will be 976e546cb79SDave Chinner locked. */ 977e546cb79SDave Chinner { 978e546cb79SDave Chinner xfs_trans_t *tp; 979e546cb79SDave Chinner xfs_inode_t *ip; 980e546cb79SDave Chinner xfs_buf_t *ialloc_context = NULL; 981e546cb79SDave Chinner int code; 982e546cb79SDave Chinner void *dqinfo; 983e546cb79SDave Chinner uint tflags; 984e546cb79SDave Chinner 985e546cb79SDave Chinner tp = *tpp; 986e546cb79SDave Chinner ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 987e546cb79SDave Chinner 988e546cb79SDave Chinner /* 989e546cb79SDave Chinner * xfs_ialloc will return a pointer to an incore inode if 990e546cb79SDave Chinner * the Space Manager has an available inode on the free 991e546cb79SDave Chinner * list. Otherwise, it will do an allocation and replenish 992e546cb79SDave Chinner * the freelist. Since we can only do one allocation per 993e546cb79SDave Chinner * transaction without deadlocks, we will need to commit the 994e546cb79SDave Chinner * current transaction and start a new one. We will then 995e546cb79SDave Chinner * need to call xfs_ialloc again to get the inode. 996e546cb79SDave Chinner * 997e546cb79SDave Chinner * If xfs_ialloc did an allocation to replenish the freelist, 998e546cb79SDave Chinner * it returns the bp containing the head of the freelist as 999e546cb79SDave Chinner * ialloc_context. We will hold a lock on it across the 1000e546cb79SDave Chinner * transaction commit so that no other process can steal 1001e546cb79SDave Chinner * the inode(s) that we've just allocated. 1002e546cb79SDave Chinner */ 1003f59cf5c2SChristoph Hellwig code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, &ialloc_context, 1004f59cf5c2SChristoph Hellwig &ip); 1005e546cb79SDave Chinner 1006e546cb79SDave Chinner /* 1007e546cb79SDave Chinner * Return an error if we were unable to allocate a new inode. 1008e546cb79SDave Chinner * This should only happen if we run out of space on disk or 1009e546cb79SDave Chinner * encounter a disk error. 1010e546cb79SDave Chinner */ 1011e546cb79SDave Chinner if (code) { 1012e546cb79SDave Chinner *ipp = NULL; 1013e546cb79SDave Chinner return code; 1014e546cb79SDave Chinner } 1015e546cb79SDave Chinner if (!ialloc_context && !ip) { 1016e546cb79SDave Chinner *ipp = NULL; 10172451337dSDave Chinner return -ENOSPC; 1018e546cb79SDave Chinner } 1019e546cb79SDave Chinner 1020e546cb79SDave Chinner /* 1021e546cb79SDave Chinner * If the AGI buffer is non-NULL, then we were unable to get an 1022e546cb79SDave Chinner * inode in one operation. We need to commit the current 1023e546cb79SDave Chinner * transaction and call xfs_ialloc() again. It is guaranteed 1024e546cb79SDave Chinner * to succeed the second time. 1025e546cb79SDave Chinner */ 1026e546cb79SDave Chinner if (ialloc_context) { 1027e546cb79SDave Chinner /* 1028e546cb79SDave Chinner * Normally, xfs_trans_commit releases all the locks. 1029e546cb79SDave Chinner * We call bhold to hang on to the ialloc_context across 1030e546cb79SDave Chinner * the commit. Holding this buffer prevents any other 1031e546cb79SDave Chinner * processes from doing any allocations in this 1032e546cb79SDave Chinner * allocation group. 1033e546cb79SDave Chinner */ 1034e546cb79SDave Chinner xfs_trans_bhold(tp, ialloc_context); 1035e546cb79SDave Chinner 1036e546cb79SDave Chinner /* 1037e546cb79SDave Chinner * We want the quota changes to be associated with the next 1038e546cb79SDave Chinner * transaction, NOT this one. So, detach the dqinfo from this 1039e546cb79SDave Chinner * and attach it to the next transaction. 1040e546cb79SDave Chinner */ 1041e546cb79SDave Chinner dqinfo = NULL; 1042e546cb79SDave Chinner tflags = 0; 1043e546cb79SDave Chinner if (tp->t_dqinfo) { 1044e546cb79SDave Chinner dqinfo = (void *)tp->t_dqinfo; 1045e546cb79SDave Chinner tp->t_dqinfo = NULL; 1046e546cb79SDave Chinner tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY; 1047e546cb79SDave Chinner tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY); 1048e546cb79SDave Chinner } 1049e546cb79SDave Chinner 1050411350dfSChristoph Hellwig code = xfs_trans_roll(&tp); 10513d3c8b52SJie Liu 1052e546cb79SDave Chinner /* 1053e546cb79SDave Chinner * Re-attach the quota info that we detached from prev trx. 1054e546cb79SDave Chinner */ 1055e546cb79SDave Chinner if (dqinfo) { 1056e546cb79SDave Chinner tp->t_dqinfo = dqinfo; 1057e546cb79SDave Chinner tp->t_flags |= tflags; 1058e546cb79SDave Chinner } 1059e546cb79SDave Chinner 1060e546cb79SDave Chinner if (code) { 1061e546cb79SDave Chinner xfs_buf_relse(ialloc_context); 10622e6db6c4SChristoph Hellwig *tpp = tp; 1063e546cb79SDave Chinner *ipp = NULL; 1064e546cb79SDave Chinner return code; 1065e546cb79SDave Chinner } 1066e546cb79SDave Chinner xfs_trans_bjoin(tp, ialloc_context); 1067e546cb79SDave Chinner 1068e546cb79SDave Chinner /* 1069e546cb79SDave Chinner * Call ialloc again. Since we've locked out all 1070e546cb79SDave Chinner * other allocations in this allocation group, 1071e546cb79SDave Chinner * this call should always succeed. 1072e546cb79SDave Chinner */ 1073e546cb79SDave Chinner code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, 1074f59cf5c2SChristoph Hellwig &ialloc_context, &ip); 1075e546cb79SDave Chinner 1076e546cb79SDave Chinner /* 1077e546cb79SDave Chinner * If we get an error at this point, return to the caller 1078e546cb79SDave Chinner * so that the current transaction can be aborted. 1079e546cb79SDave Chinner */ 1080e546cb79SDave Chinner if (code) { 1081e546cb79SDave Chinner *tpp = tp; 1082e546cb79SDave Chinner *ipp = NULL; 1083e546cb79SDave Chinner return code; 1084e546cb79SDave Chinner } 1085e546cb79SDave Chinner ASSERT(!ialloc_context && ip); 1086e546cb79SDave Chinner 1087e546cb79SDave Chinner } 1088e546cb79SDave Chinner 1089e546cb79SDave Chinner *ipp = ip; 1090e546cb79SDave Chinner *tpp = tp; 1091e546cb79SDave Chinner 1092e546cb79SDave Chinner return 0; 1093e546cb79SDave Chinner } 1094e546cb79SDave Chinner 1095e546cb79SDave Chinner /* 109654d7b5c1SDave Chinner * Decrement the link count on an inode & log the change. If this causes the 109754d7b5c1SDave Chinner * link count to go to zero, move the inode to AGI unlinked list so that it can 109854d7b5c1SDave Chinner * be freed when the last active reference goes away via xfs_inactive(). 1099e546cb79SDave Chinner */ 11000d5a75e9SEric Sandeen static int /* error */ 1101e546cb79SDave Chinner xfs_droplink( 1102e546cb79SDave Chinner xfs_trans_t *tp, 1103e546cb79SDave Chinner xfs_inode_t *ip) 1104e546cb79SDave Chinner { 1105e546cb79SDave Chinner xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); 1106e546cb79SDave Chinner 1107e546cb79SDave Chinner drop_nlink(VFS_I(ip)); 1108e546cb79SDave Chinner xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1109e546cb79SDave Chinner 111054d7b5c1SDave Chinner if (VFS_I(ip)->i_nlink) 111154d7b5c1SDave Chinner return 0; 111254d7b5c1SDave Chinner 111354d7b5c1SDave Chinner return xfs_iunlink(tp, ip); 1114e546cb79SDave Chinner } 1115e546cb79SDave Chinner 1116e546cb79SDave Chinner /* 1117e546cb79SDave Chinner * Increment the link count on an inode & log the change. 1118e546cb79SDave Chinner */ 111991083269SEric Sandeen static void 1120e546cb79SDave Chinner xfs_bumplink( 1121e546cb79SDave Chinner xfs_trans_t *tp, 1122e546cb79SDave Chinner xfs_inode_t *ip) 1123e546cb79SDave Chinner { 1124e546cb79SDave Chinner xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); 1125e546cb79SDave Chinner 1126263997a6SDave Chinner ASSERT(ip->i_d.di_version > 1); 1127e546cb79SDave Chinner inc_nlink(VFS_I(ip)); 1128e546cb79SDave Chinner xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1129e546cb79SDave Chinner } 1130e546cb79SDave Chinner 1131c24b5dfaSDave Chinner int 1132c24b5dfaSDave Chinner xfs_create( 1133c24b5dfaSDave Chinner xfs_inode_t *dp, 1134c24b5dfaSDave Chinner struct xfs_name *name, 1135c24b5dfaSDave Chinner umode_t mode, 113666f36464SChristoph Hellwig dev_t rdev, 1137c24b5dfaSDave Chinner xfs_inode_t **ipp) 1138c24b5dfaSDave Chinner { 1139c24b5dfaSDave Chinner int is_dir = S_ISDIR(mode); 1140c24b5dfaSDave Chinner struct xfs_mount *mp = dp->i_mount; 1141c24b5dfaSDave Chinner struct xfs_inode *ip = NULL; 1142c24b5dfaSDave Chinner struct xfs_trans *tp = NULL; 1143c24b5dfaSDave Chinner int error; 1144c24b5dfaSDave Chinner bool unlock_dp_on_error = false; 1145c24b5dfaSDave Chinner prid_t prid; 1146c24b5dfaSDave Chinner struct xfs_dquot *udqp = NULL; 1147c24b5dfaSDave Chinner struct xfs_dquot *gdqp = NULL; 1148c24b5dfaSDave Chinner struct xfs_dquot *pdqp = NULL; 1149062647a8SBrian Foster struct xfs_trans_res *tres; 1150c24b5dfaSDave Chinner uint resblks; 1151c24b5dfaSDave Chinner 1152c24b5dfaSDave Chinner trace_xfs_create(dp, name); 1153c24b5dfaSDave Chinner 1154c24b5dfaSDave Chinner if (XFS_FORCED_SHUTDOWN(mp)) 11552451337dSDave Chinner return -EIO; 1156c24b5dfaSDave Chinner 1157163467d3SZhi Yong Wu prid = xfs_get_initial_prid(dp); 1158c24b5dfaSDave Chinner 1159c24b5dfaSDave Chinner /* 1160c24b5dfaSDave Chinner * Make sure that we have allocated dquot(s) on disk. 1161c24b5dfaSDave Chinner */ 11627aab1b28SDwight Engen error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()), 11637aab1b28SDwight Engen xfs_kgid_to_gid(current_fsgid()), prid, 1164c24b5dfaSDave Chinner XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, 1165c24b5dfaSDave Chinner &udqp, &gdqp, &pdqp); 1166c24b5dfaSDave Chinner if (error) 1167c24b5dfaSDave Chinner return error; 1168c24b5dfaSDave Chinner 1169c24b5dfaSDave Chinner if (is_dir) { 1170c24b5dfaSDave Chinner resblks = XFS_MKDIR_SPACE_RES(mp, name->len); 1171062647a8SBrian Foster tres = &M_RES(mp)->tr_mkdir; 1172c24b5dfaSDave Chinner } else { 1173c24b5dfaSDave Chinner resblks = XFS_CREATE_SPACE_RES(mp, name->len); 1174062647a8SBrian Foster tres = &M_RES(mp)->tr_create; 1175c24b5dfaSDave Chinner } 1176c24b5dfaSDave Chinner 1177c24b5dfaSDave Chinner /* 1178c24b5dfaSDave Chinner * Initially assume that the file does not exist and 1179c24b5dfaSDave Chinner * reserve the resources for that case. If that is not 1180c24b5dfaSDave Chinner * the case we'll drop the one we have and get a more 1181c24b5dfaSDave Chinner * appropriate transaction later. 1182c24b5dfaSDave Chinner */ 1183253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp); 11842451337dSDave Chinner if (error == -ENOSPC) { 1185c24b5dfaSDave Chinner /* flush outstanding delalloc blocks and retry */ 1186c24b5dfaSDave Chinner xfs_flush_inodes(mp); 1187253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp); 1188c24b5dfaSDave Chinner } 11894906e215SChristoph Hellwig if (error) 1190253f4911SChristoph Hellwig goto out_release_inode; 1191c24b5dfaSDave Chinner 119265523218SChristoph Hellwig xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); 1193c24b5dfaSDave Chinner unlock_dp_on_error = true; 1194c24b5dfaSDave Chinner 1195c24b5dfaSDave Chinner /* 1196c24b5dfaSDave Chinner * Reserve disk quota and the inode. 1197c24b5dfaSDave Chinner */ 1198c24b5dfaSDave Chinner error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, 1199c24b5dfaSDave Chinner pdqp, resblks, 1, 0); 1200c24b5dfaSDave Chinner if (error) 1201c24b5dfaSDave Chinner goto out_trans_cancel; 1202c24b5dfaSDave Chinner 1203c24b5dfaSDave Chinner /* 1204c24b5dfaSDave Chinner * A newly created regular or special file just has one directory 1205c24b5dfaSDave Chinner * entry pointing to them, but a directory also the "." entry 1206c24b5dfaSDave Chinner * pointing to itself. 1207c24b5dfaSDave Chinner */ 1208c959025eSChandan Rajendra error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip); 1209d6077aa3SJan Kara if (error) 1210c24b5dfaSDave Chinner goto out_trans_cancel; 1211c24b5dfaSDave Chinner 1212c24b5dfaSDave Chinner /* 1213c24b5dfaSDave Chinner * Now we join the directory inode to the transaction. We do not do it 1214c24b5dfaSDave Chinner * earlier because xfs_dir_ialloc might commit the previous transaction 1215c24b5dfaSDave Chinner * (and release all the locks). An error from here on will result in 1216c24b5dfaSDave Chinner * the transaction cancel unlocking dp so don't do it explicitly in the 1217c24b5dfaSDave Chinner * error path. 1218c24b5dfaSDave Chinner */ 121965523218SChristoph Hellwig xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); 1220c24b5dfaSDave Chinner unlock_dp_on_error = false; 1221c24b5dfaSDave Chinner 1222381eee69SBrian Foster error = xfs_dir_createname(tp, dp, name, ip->i_ino, 1223c9cfdb38SBrian Foster resblks ? 1224c24b5dfaSDave Chinner resblks - XFS_IALLOC_SPACE_RES(mp) : 0); 1225c24b5dfaSDave Chinner if (error) { 12262451337dSDave Chinner ASSERT(error != -ENOSPC); 12274906e215SChristoph Hellwig goto out_trans_cancel; 1228c24b5dfaSDave Chinner } 1229c24b5dfaSDave Chinner xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 1230c24b5dfaSDave Chinner xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); 1231c24b5dfaSDave Chinner 1232c24b5dfaSDave Chinner if (is_dir) { 1233c24b5dfaSDave Chinner error = xfs_dir_init(tp, ip, dp); 1234c24b5dfaSDave Chinner if (error) 1235c8eac49eSBrian Foster goto out_trans_cancel; 1236c24b5dfaSDave Chinner 123791083269SEric Sandeen xfs_bumplink(tp, dp); 1238c24b5dfaSDave Chinner } 1239c24b5dfaSDave Chinner 1240c24b5dfaSDave Chinner /* 1241c24b5dfaSDave Chinner * If this is a synchronous mount, make sure that the 1242c24b5dfaSDave Chinner * create transaction goes to disk before returning to 1243c24b5dfaSDave Chinner * the user. 1244c24b5dfaSDave Chinner */ 1245c24b5dfaSDave Chinner if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) 1246c24b5dfaSDave Chinner xfs_trans_set_sync(tp); 1247c24b5dfaSDave Chinner 1248c24b5dfaSDave Chinner /* 1249c24b5dfaSDave Chinner * Attach the dquot(s) to the inodes and modify them incore. 1250c24b5dfaSDave Chinner * These ids of the inode couldn't have changed since the new 1251c24b5dfaSDave Chinner * inode has been locked ever since it was created. 1252c24b5dfaSDave Chinner */ 1253c24b5dfaSDave Chinner xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp); 1254c24b5dfaSDave Chinner 125570393313SChristoph Hellwig error = xfs_trans_commit(tp); 1256c24b5dfaSDave Chinner if (error) 1257c24b5dfaSDave Chinner goto out_release_inode; 1258c24b5dfaSDave Chinner 1259c24b5dfaSDave Chinner xfs_qm_dqrele(udqp); 1260c24b5dfaSDave Chinner xfs_qm_dqrele(gdqp); 1261c24b5dfaSDave Chinner xfs_qm_dqrele(pdqp); 1262c24b5dfaSDave Chinner 1263c24b5dfaSDave Chinner *ipp = ip; 1264c24b5dfaSDave Chinner return 0; 1265c24b5dfaSDave Chinner 1266c24b5dfaSDave Chinner out_trans_cancel: 12674906e215SChristoph Hellwig xfs_trans_cancel(tp); 1268c24b5dfaSDave Chinner out_release_inode: 1269c24b5dfaSDave Chinner /* 127058c90473SDave Chinner * Wait until after the current transaction is aborted to finish the 127158c90473SDave Chinner * setup of the inode and release the inode. This prevents recursive 127258c90473SDave Chinner * transactions and deadlocks from xfs_inactive. 1273c24b5dfaSDave Chinner */ 127458c90473SDave Chinner if (ip) { 127558c90473SDave Chinner xfs_finish_inode_setup(ip); 127644a8736bSDarrick J. Wong xfs_irele(ip); 127758c90473SDave Chinner } 1278c24b5dfaSDave Chinner 1279c24b5dfaSDave Chinner xfs_qm_dqrele(udqp); 1280c24b5dfaSDave Chinner xfs_qm_dqrele(gdqp); 1281c24b5dfaSDave Chinner xfs_qm_dqrele(pdqp); 1282c24b5dfaSDave Chinner 1283c24b5dfaSDave Chinner if (unlock_dp_on_error) 128465523218SChristoph Hellwig xfs_iunlock(dp, XFS_ILOCK_EXCL); 1285c24b5dfaSDave Chinner return error; 1286c24b5dfaSDave Chinner } 1287c24b5dfaSDave Chinner 1288c24b5dfaSDave Chinner int 128999b6436bSZhi Yong Wu xfs_create_tmpfile( 129099b6436bSZhi Yong Wu struct xfs_inode *dp, 1291330033d6SBrian Foster umode_t mode, 1292330033d6SBrian Foster struct xfs_inode **ipp) 129399b6436bSZhi Yong Wu { 129499b6436bSZhi Yong Wu struct xfs_mount *mp = dp->i_mount; 129599b6436bSZhi Yong Wu struct xfs_inode *ip = NULL; 129699b6436bSZhi Yong Wu struct xfs_trans *tp = NULL; 129799b6436bSZhi Yong Wu int error; 129899b6436bSZhi Yong Wu prid_t prid; 129999b6436bSZhi Yong Wu struct xfs_dquot *udqp = NULL; 130099b6436bSZhi Yong Wu struct xfs_dquot *gdqp = NULL; 130199b6436bSZhi Yong Wu struct xfs_dquot *pdqp = NULL; 130299b6436bSZhi Yong Wu struct xfs_trans_res *tres; 130399b6436bSZhi Yong Wu uint resblks; 130499b6436bSZhi Yong Wu 130599b6436bSZhi Yong Wu if (XFS_FORCED_SHUTDOWN(mp)) 13062451337dSDave Chinner return -EIO; 130799b6436bSZhi Yong Wu 130899b6436bSZhi Yong Wu prid = xfs_get_initial_prid(dp); 130999b6436bSZhi Yong Wu 131099b6436bSZhi Yong Wu /* 131199b6436bSZhi Yong Wu * Make sure that we have allocated dquot(s) on disk. 131299b6436bSZhi Yong Wu */ 131399b6436bSZhi Yong Wu error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()), 131499b6436bSZhi Yong Wu xfs_kgid_to_gid(current_fsgid()), prid, 131599b6436bSZhi Yong Wu XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, 131699b6436bSZhi Yong Wu &udqp, &gdqp, &pdqp); 131799b6436bSZhi Yong Wu if (error) 131899b6436bSZhi Yong Wu return error; 131999b6436bSZhi Yong Wu 132099b6436bSZhi Yong Wu resblks = XFS_IALLOC_SPACE_RES(mp); 132199b6436bSZhi Yong Wu tres = &M_RES(mp)->tr_create_tmpfile; 1322253f4911SChristoph Hellwig 1323253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp); 13244906e215SChristoph Hellwig if (error) 1325253f4911SChristoph Hellwig goto out_release_inode; 132699b6436bSZhi Yong Wu 132799b6436bSZhi Yong Wu error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, 132899b6436bSZhi Yong Wu pdqp, resblks, 1, 0); 132999b6436bSZhi Yong Wu if (error) 133099b6436bSZhi Yong Wu goto out_trans_cancel; 133199b6436bSZhi Yong Wu 1332c4a6bf7fSDarrick J. Wong error = xfs_dir_ialloc(&tp, dp, mode, 0, 0, prid, &ip); 1333d6077aa3SJan Kara if (error) 133499b6436bSZhi Yong Wu goto out_trans_cancel; 133599b6436bSZhi Yong Wu 133699b6436bSZhi Yong Wu if (mp->m_flags & XFS_MOUNT_WSYNC) 133799b6436bSZhi Yong Wu xfs_trans_set_sync(tp); 133899b6436bSZhi Yong Wu 133999b6436bSZhi Yong Wu /* 134099b6436bSZhi Yong Wu * Attach the dquot(s) to the inodes and modify them incore. 134199b6436bSZhi Yong Wu * These ids of the inode couldn't have changed since the new 134299b6436bSZhi Yong Wu * inode has been locked ever since it was created. 134399b6436bSZhi Yong Wu */ 134499b6436bSZhi Yong Wu xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp); 134599b6436bSZhi Yong Wu 134699b6436bSZhi Yong Wu error = xfs_iunlink(tp, ip); 134799b6436bSZhi Yong Wu if (error) 13484906e215SChristoph Hellwig goto out_trans_cancel; 134999b6436bSZhi Yong Wu 135070393313SChristoph Hellwig error = xfs_trans_commit(tp); 135199b6436bSZhi Yong Wu if (error) 135299b6436bSZhi Yong Wu goto out_release_inode; 135399b6436bSZhi Yong Wu 135499b6436bSZhi Yong Wu xfs_qm_dqrele(udqp); 135599b6436bSZhi Yong Wu xfs_qm_dqrele(gdqp); 135699b6436bSZhi Yong Wu xfs_qm_dqrele(pdqp); 135799b6436bSZhi Yong Wu 1358330033d6SBrian Foster *ipp = ip; 135999b6436bSZhi Yong Wu return 0; 136099b6436bSZhi Yong Wu 136199b6436bSZhi Yong Wu out_trans_cancel: 13624906e215SChristoph Hellwig xfs_trans_cancel(tp); 136399b6436bSZhi Yong Wu out_release_inode: 136499b6436bSZhi Yong Wu /* 136558c90473SDave Chinner * Wait until after the current transaction is aborted to finish the 136658c90473SDave Chinner * setup of the inode and release the inode. This prevents recursive 136758c90473SDave Chinner * transactions and deadlocks from xfs_inactive. 136899b6436bSZhi Yong Wu */ 136958c90473SDave Chinner if (ip) { 137058c90473SDave Chinner xfs_finish_inode_setup(ip); 137144a8736bSDarrick J. Wong xfs_irele(ip); 137258c90473SDave Chinner } 137399b6436bSZhi Yong Wu 137499b6436bSZhi Yong Wu xfs_qm_dqrele(udqp); 137599b6436bSZhi Yong Wu xfs_qm_dqrele(gdqp); 137699b6436bSZhi Yong Wu xfs_qm_dqrele(pdqp); 137799b6436bSZhi Yong Wu 137899b6436bSZhi Yong Wu return error; 137999b6436bSZhi Yong Wu } 138099b6436bSZhi Yong Wu 138199b6436bSZhi Yong Wu int 1382c24b5dfaSDave Chinner xfs_link( 1383c24b5dfaSDave Chinner xfs_inode_t *tdp, 1384c24b5dfaSDave Chinner xfs_inode_t *sip, 1385c24b5dfaSDave Chinner struct xfs_name *target_name) 1386c24b5dfaSDave Chinner { 1387c24b5dfaSDave Chinner xfs_mount_t *mp = tdp->i_mount; 1388c24b5dfaSDave Chinner xfs_trans_t *tp; 1389c24b5dfaSDave Chinner int error; 1390c24b5dfaSDave Chinner int resblks; 1391c24b5dfaSDave Chinner 1392c24b5dfaSDave Chinner trace_xfs_link(tdp, target_name); 1393c24b5dfaSDave Chinner 1394c19b3b05SDave Chinner ASSERT(!S_ISDIR(VFS_I(sip)->i_mode)); 1395c24b5dfaSDave Chinner 1396c24b5dfaSDave Chinner if (XFS_FORCED_SHUTDOWN(mp)) 13972451337dSDave Chinner return -EIO; 1398c24b5dfaSDave Chinner 1399c14cfccaSDarrick J. Wong error = xfs_qm_dqattach(sip); 1400c24b5dfaSDave Chinner if (error) 1401c24b5dfaSDave Chinner goto std_return; 1402c24b5dfaSDave Chinner 1403c14cfccaSDarrick J. Wong error = xfs_qm_dqattach(tdp); 1404c24b5dfaSDave Chinner if (error) 1405c24b5dfaSDave Chinner goto std_return; 1406c24b5dfaSDave Chinner 1407c24b5dfaSDave Chinner resblks = XFS_LINK_SPACE_RES(mp, target_name->len); 1408253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp); 14092451337dSDave Chinner if (error == -ENOSPC) { 1410c24b5dfaSDave Chinner resblks = 0; 1411253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp); 1412c24b5dfaSDave Chinner } 14134906e215SChristoph Hellwig if (error) 1414253f4911SChristoph Hellwig goto std_return; 1415c24b5dfaSDave Chinner 14167c2d238aSDarrick J. Wong xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL); 1417c24b5dfaSDave Chinner 1418c24b5dfaSDave Chinner xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL); 141965523218SChristoph Hellwig xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL); 1420c24b5dfaSDave Chinner 1421c24b5dfaSDave Chinner /* 1422c24b5dfaSDave Chinner * If we are using project inheritance, we only allow hard link 1423c24b5dfaSDave Chinner * creation in our tree when the project IDs are the same; else 1424c24b5dfaSDave Chinner * the tree quota mechanism could be circumvented. 1425c24b5dfaSDave Chinner */ 1426c24b5dfaSDave Chinner if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && 1427c24b5dfaSDave Chinner (xfs_get_projid(tdp) != xfs_get_projid(sip)))) { 14282451337dSDave Chinner error = -EXDEV; 1429c24b5dfaSDave Chinner goto error_return; 1430c24b5dfaSDave Chinner } 1431c24b5dfaSDave Chinner 143294f3cad5SEric Sandeen if (!resblks) { 143394f3cad5SEric Sandeen error = xfs_dir_canenter(tp, tdp, target_name); 1434c24b5dfaSDave Chinner if (error) 1435c24b5dfaSDave Chinner goto error_return; 143694f3cad5SEric Sandeen } 1437c24b5dfaSDave Chinner 143854d7b5c1SDave Chinner /* 143954d7b5c1SDave Chinner * Handle initial link state of O_TMPFILE inode 144054d7b5c1SDave Chinner */ 144154d7b5c1SDave Chinner if (VFS_I(sip)->i_nlink == 0) { 1442ab297431SZhi Yong Wu error = xfs_iunlink_remove(tp, sip); 1443ab297431SZhi Yong Wu if (error) 14444906e215SChristoph Hellwig goto error_return; 1445ab297431SZhi Yong Wu } 1446ab297431SZhi Yong Wu 1447c24b5dfaSDave Chinner error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino, 1448381eee69SBrian Foster resblks); 1449c24b5dfaSDave Chinner if (error) 14504906e215SChristoph Hellwig goto error_return; 1451c24b5dfaSDave Chinner xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 1452c24b5dfaSDave Chinner xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE); 1453c24b5dfaSDave Chinner 145491083269SEric Sandeen xfs_bumplink(tp, sip); 1455c24b5dfaSDave Chinner 1456c24b5dfaSDave Chinner /* 1457c24b5dfaSDave Chinner * If this is a synchronous mount, make sure that the 1458c24b5dfaSDave Chinner * link transaction goes to disk before returning to 1459c24b5dfaSDave Chinner * the user. 1460c24b5dfaSDave Chinner */ 1461f6106efaSEric Sandeen if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) 1462c24b5dfaSDave Chinner xfs_trans_set_sync(tp); 1463c24b5dfaSDave Chinner 146470393313SChristoph Hellwig return xfs_trans_commit(tp); 1465c24b5dfaSDave Chinner 1466c24b5dfaSDave Chinner error_return: 14674906e215SChristoph Hellwig xfs_trans_cancel(tp); 1468c24b5dfaSDave Chinner std_return: 1469c24b5dfaSDave Chinner return error; 1470c24b5dfaSDave Chinner } 1471c24b5dfaSDave Chinner 1472363e59baSDarrick J. Wong /* Clear the reflink flag and the cowblocks tag if possible. */ 1473363e59baSDarrick J. Wong static void 1474363e59baSDarrick J. Wong xfs_itruncate_clear_reflink_flags( 1475363e59baSDarrick J. Wong struct xfs_inode *ip) 1476363e59baSDarrick J. Wong { 1477363e59baSDarrick J. Wong struct xfs_ifork *dfork; 1478363e59baSDarrick J. Wong struct xfs_ifork *cfork; 1479363e59baSDarrick J. Wong 1480363e59baSDarrick J. Wong if (!xfs_is_reflink_inode(ip)) 1481363e59baSDarrick J. Wong return; 1482363e59baSDarrick J. Wong dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 1483363e59baSDarrick J. Wong cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK); 1484363e59baSDarrick J. Wong if (dfork->if_bytes == 0 && cfork->if_bytes == 0) 1485363e59baSDarrick J. Wong ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK; 1486363e59baSDarrick J. Wong if (cfork->if_bytes == 0) 1487363e59baSDarrick J. Wong xfs_inode_clear_cowblocks_tag(ip); 1488363e59baSDarrick J. Wong } 1489363e59baSDarrick J. Wong 14901da177e4SLinus Torvalds /* 14918f04c47aSChristoph Hellwig * Free up the underlying blocks past new_size. The new size must be smaller 14928f04c47aSChristoph Hellwig * than the current size. This routine can be used both for the attribute and 14938f04c47aSChristoph Hellwig * data fork, and does not modify the inode size, which is left to the caller. 14941da177e4SLinus Torvalds * 1495f6485057SDavid Chinner * The transaction passed to this routine must have made a permanent log 1496f6485057SDavid Chinner * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the 1497f6485057SDavid Chinner * given transaction and start new ones, so make sure everything involved in 1498f6485057SDavid Chinner * the transaction is tidy before calling here. Some transaction will be 1499f6485057SDavid Chinner * returned to the caller to be committed. The incoming transaction must 1500f6485057SDavid Chinner * already include the inode, and both inode locks must be held exclusively. 1501f6485057SDavid Chinner * The inode must also be "held" within the transaction. On return the inode 1502f6485057SDavid Chinner * will be "held" within the returned transaction. This routine does NOT 1503f6485057SDavid Chinner * require any disk space to be reserved for it within the transaction. 15041da177e4SLinus Torvalds * 1505f6485057SDavid Chinner * If we get an error, we must return with the inode locked and linked into the 1506f6485057SDavid Chinner * current transaction. This keeps things simple for the higher level code, 1507f6485057SDavid Chinner * because it always knows that the inode is locked and held in the transaction 1508f6485057SDavid Chinner * that returns to it whether errors occur or not. We don't mark the inode 1509f6485057SDavid Chinner * dirty on error so that transactions can be easily aborted if possible. 15101da177e4SLinus Torvalds */ 15111da177e4SLinus Torvalds int 15124e529339SBrian Foster xfs_itruncate_extents_flags( 15138f04c47aSChristoph Hellwig struct xfs_trans **tpp, 15148f04c47aSChristoph Hellwig struct xfs_inode *ip, 15158f04c47aSChristoph Hellwig int whichfork, 151613b86fc3SBrian Foster xfs_fsize_t new_size, 15174e529339SBrian Foster int flags) 15181da177e4SLinus Torvalds { 15198f04c47aSChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 15208f04c47aSChristoph Hellwig struct xfs_trans *tp = *tpp; 15211da177e4SLinus Torvalds xfs_fileoff_t first_unmap_block; 15221da177e4SLinus Torvalds xfs_fileoff_t last_block; 15238f04c47aSChristoph Hellwig xfs_filblks_t unmap_len; 15248f04c47aSChristoph Hellwig int error = 0; 15258f04c47aSChristoph Hellwig int done = 0; 15261da177e4SLinus Torvalds 15270b56185bSChristoph Hellwig ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 15280b56185bSChristoph Hellwig ASSERT(!atomic_read(&VFS_I(ip)->i_count) || 15290b56185bSChristoph Hellwig xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 1530ce7ae151SChristoph Hellwig ASSERT(new_size <= XFS_ISIZE(ip)); 15318f04c47aSChristoph Hellwig ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 15321da177e4SLinus Torvalds ASSERT(ip->i_itemp != NULL); 1533898621d5SChristoph Hellwig ASSERT(ip->i_itemp->ili_lock_flags == 0); 15341da177e4SLinus Torvalds ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); 15351da177e4SLinus Torvalds 1536673e8e59SChristoph Hellwig trace_xfs_itruncate_extents_start(ip, new_size); 1537673e8e59SChristoph Hellwig 15384e529339SBrian Foster flags |= xfs_bmapi_aflag(whichfork); 153913b86fc3SBrian Foster 15401da177e4SLinus Torvalds /* 15411da177e4SLinus Torvalds * Since it is possible for space to become allocated beyond 15421da177e4SLinus Torvalds * the end of the file (in a crash where the space is allocated 15431da177e4SLinus Torvalds * but the inode size is not yet updated), simply remove any 15441da177e4SLinus Torvalds * blocks which show up between the new EOF and the maximum 15451da177e4SLinus Torvalds * possible file size. If the first block to be removed is 15461da177e4SLinus Torvalds * beyond the maximum file size (ie it is the same as last_block), 15471da177e4SLinus Torvalds * then there is nothing to do. 15481da177e4SLinus Torvalds */ 15498f04c47aSChristoph Hellwig first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 155032972383SDave Chinner last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 15518f04c47aSChristoph Hellwig if (first_unmap_block == last_block) 15528f04c47aSChristoph Hellwig return 0; 15538f04c47aSChristoph Hellwig 15548f04c47aSChristoph Hellwig ASSERT(first_unmap_block < last_block); 15551da177e4SLinus Torvalds unmap_len = last_block - first_unmap_block + 1; 15561da177e4SLinus Torvalds while (!done) { 155702dff7bfSBrian Foster ASSERT(tp->t_firstblock == NULLFSBLOCK); 155813b86fc3SBrian Foster error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, flags, 15592af52842SBrian Foster XFS_ITRUNC_MAX_EXTENTS, &done); 15608f04c47aSChristoph Hellwig if (error) 1561d5a2e289SBrian Foster goto out; 15621da177e4SLinus Torvalds 15631da177e4SLinus Torvalds /* 15641da177e4SLinus Torvalds * Duplicate the transaction that has the permanent 15651da177e4SLinus Torvalds * reservation and commit the old transaction. 15661da177e4SLinus Torvalds */ 15679e28a242SBrian Foster error = xfs_defer_finish(&tp); 15688f04c47aSChristoph Hellwig if (error) 15699b1f4e98SBrian Foster goto out; 15701da177e4SLinus Torvalds 1571411350dfSChristoph Hellwig error = xfs_trans_roll_inode(&tp, ip); 15721da177e4SLinus Torvalds if (error) 15738f04c47aSChristoph Hellwig goto out; 15741da177e4SLinus Torvalds } 15758f04c47aSChristoph Hellwig 15764919d42aSDarrick J. Wong if (whichfork == XFS_DATA_FORK) { 1577aa8968f2SDarrick J. Wong /* Remove all pending CoW reservations. */ 15784919d42aSDarrick J. Wong error = xfs_reflink_cancel_cow_blocks(ip, &tp, 15794919d42aSDarrick J. Wong first_unmap_block, last_block, true); 1580aa8968f2SDarrick J. Wong if (error) 1581aa8968f2SDarrick J. Wong goto out; 1582aa8968f2SDarrick J. Wong 1583363e59baSDarrick J. Wong xfs_itruncate_clear_reflink_flags(ip); 15844919d42aSDarrick J. Wong } 1585aa8968f2SDarrick J. Wong 1586673e8e59SChristoph Hellwig /* 1587673e8e59SChristoph Hellwig * Always re-log the inode so that our permanent transaction can keep 1588673e8e59SChristoph Hellwig * on rolling it forward in the log. 1589673e8e59SChristoph Hellwig */ 1590673e8e59SChristoph Hellwig xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1591673e8e59SChristoph Hellwig 1592673e8e59SChristoph Hellwig trace_xfs_itruncate_extents_end(ip, new_size); 1593673e8e59SChristoph Hellwig 15948f04c47aSChristoph Hellwig out: 15958f04c47aSChristoph Hellwig *tpp = tp; 15968f04c47aSChristoph Hellwig return error; 15978f04c47aSChristoph Hellwig } 15988f04c47aSChristoph Hellwig 1599c24b5dfaSDave Chinner int 1600c24b5dfaSDave Chinner xfs_release( 1601c24b5dfaSDave Chinner xfs_inode_t *ip) 1602c24b5dfaSDave Chinner { 1603c24b5dfaSDave Chinner xfs_mount_t *mp = ip->i_mount; 1604c24b5dfaSDave Chinner int error; 1605c24b5dfaSDave Chinner 1606c19b3b05SDave Chinner if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0)) 1607c24b5dfaSDave Chinner return 0; 1608c24b5dfaSDave Chinner 1609c24b5dfaSDave Chinner /* If this is a read-only mount, don't do this (would generate I/O) */ 1610c24b5dfaSDave Chinner if (mp->m_flags & XFS_MOUNT_RDONLY) 1611c24b5dfaSDave Chinner return 0; 1612c24b5dfaSDave Chinner 1613c24b5dfaSDave Chinner if (!XFS_FORCED_SHUTDOWN(mp)) { 1614c24b5dfaSDave Chinner int truncated; 1615c24b5dfaSDave Chinner 1616c24b5dfaSDave Chinner /* 1617c24b5dfaSDave Chinner * If we previously truncated this file and removed old data 1618c24b5dfaSDave Chinner * in the process, we want to initiate "early" writeout on 1619c24b5dfaSDave Chinner * the last close. This is an attempt to combat the notorious 1620c24b5dfaSDave Chinner * NULL files problem which is particularly noticeable from a 1621c24b5dfaSDave Chinner * truncate down, buffered (re-)write (delalloc), followed by 1622c24b5dfaSDave Chinner * a crash. What we are effectively doing here is 1623c24b5dfaSDave Chinner * significantly reducing the time window where we'd otherwise 1624c24b5dfaSDave Chinner * be exposed to that problem. 1625c24b5dfaSDave Chinner */ 1626c24b5dfaSDave Chinner truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); 1627c24b5dfaSDave Chinner if (truncated) { 1628c24b5dfaSDave Chinner xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE); 1629eac152b4SDave Chinner if (ip->i_delayed_blks > 0) { 16302451337dSDave Chinner error = filemap_flush(VFS_I(ip)->i_mapping); 1631c24b5dfaSDave Chinner if (error) 1632c24b5dfaSDave Chinner return error; 1633c24b5dfaSDave Chinner } 1634c24b5dfaSDave Chinner } 1635c24b5dfaSDave Chinner } 1636c24b5dfaSDave Chinner 163754d7b5c1SDave Chinner if (VFS_I(ip)->i_nlink == 0) 1638c24b5dfaSDave Chinner return 0; 1639c24b5dfaSDave Chinner 1640c24b5dfaSDave Chinner if (xfs_can_free_eofblocks(ip, false)) { 1641c24b5dfaSDave Chinner 1642c24b5dfaSDave Chinner /* 1643a36b9261SBrian Foster * Check if the inode is being opened, written and closed 1644a36b9261SBrian Foster * frequently and we have delayed allocation blocks outstanding 1645a36b9261SBrian Foster * (e.g. streaming writes from the NFS server), truncating the 1646a36b9261SBrian Foster * blocks past EOF will cause fragmentation to occur. 1647a36b9261SBrian Foster * 1648a36b9261SBrian Foster * In this case don't do the truncation, but we have to be 1649a36b9261SBrian Foster * careful how we detect this case. Blocks beyond EOF show up as 1650a36b9261SBrian Foster * i_delayed_blks even when the inode is clean, so we need to 1651a36b9261SBrian Foster * truncate them away first before checking for a dirty release. 1652a36b9261SBrian Foster * Hence on the first dirty close we will still remove the 1653a36b9261SBrian Foster * speculative allocation, but after that we will leave it in 1654a36b9261SBrian Foster * place. 1655a36b9261SBrian Foster */ 1656a36b9261SBrian Foster if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE)) 1657a36b9261SBrian Foster return 0; 1658a36b9261SBrian Foster /* 1659c24b5dfaSDave Chinner * If we can't get the iolock just skip truncating the blocks 1660c24b5dfaSDave Chinner * past EOF because we could deadlock with the mmap_sem 1661c24b5dfaSDave Chinner * otherwise. We'll get another chance to drop them once the 1662c24b5dfaSDave Chinner * last reference to the inode is dropped, so we'll never leak 1663c24b5dfaSDave Chinner * blocks permanently. 1664c24b5dfaSDave Chinner */ 1665a36b9261SBrian Foster if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { 1666a36b9261SBrian Foster error = xfs_free_eofblocks(ip); 1667a36b9261SBrian Foster xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1668a36b9261SBrian Foster if (error) 1669c24b5dfaSDave Chinner return error; 1670a36b9261SBrian Foster } 1671c24b5dfaSDave Chinner 1672c24b5dfaSDave Chinner /* delalloc blocks after truncation means it really is dirty */ 1673c24b5dfaSDave Chinner if (ip->i_delayed_blks) 1674c24b5dfaSDave Chinner xfs_iflags_set(ip, XFS_IDIRTY_RELEASE); 1675c24b5dfaSDave Chinner } 1676c24b5dfaSDave Chinner return 0; 1677c24b5dfaSDave Chinner } 1678c24b5dfaSDave Chinner 1679c24b5dfaSDave Chinner /* 1680f7be2d7fSBrian Foster * xfs_inactive_truncate 1681f7be2d7fSBrian Foster * 1682f7be2d7fSBrian Foster * Called to perform a truncate when an inode becomes unlinked. 1683f7be2d7fSBrian Foster */ 1684f7be2d7fSBrian Foster STATIC int 1685f7be2d7fSBrian Foster xfs_inactive_truncate( 1686f7be2d7fSBrian Foster struct xfs_inode *ip) 1687f7be2d7fSBrian Foster { 1688f7be2d7fSBrian Foster struct xfs_mount *mp = ip->i_mount; 1689f7be2d7fSBrian Foster struct xfs_trans *tp; 1690f7be2d7fSBrian Foster int error; 1691f7be2d7fSBrian Foster 1692253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp); 1693f7be2d7fSBrian Foster if (error) { 1694f7be2d7fSBrian Foster ASSERT(XFS_FORCED_SHUTDOWN(mp)); 1695f7be2d7fSBrian Foster return error; 1696f7be2d7fSBrian Foster } 1697f7be2d7fSBrian Foster xfs_ilock(ip, XFS_ILOCK_EXCL); 1698f7be2d7fSBrian Foster xfs_trans_ijoin(tp, ip, 0); 1699f7be2d7fSBrian Foster 1700f7be2d7fSBrian Foster /* 1701f7be2d7fSBrian Foster * Log the inode size first to prevent stale data exposure in the event 1702f7be2d7fSBrian Foster * of a system crash before the truncate completes. See the related 170369bca807SJan Kara * comment in xfs_vn_setattr_size() for details. 1704f7be2d7fSBrian Foster */ 1705f7be2d7fSBrian Foster ip->i_d.di_size = 0; 1706f7be2d7fSBrian Foster xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1707f7be2d7fSBrian Foster 1708f7be2d7fSBrian Foster error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0); 1709f7be2d7fSBrian Foster if (error) 1710f7be2d7fSBrian Foster goto error_trans_cancel; 1711f7be2d7fSBrian Foster 1712f7be2d7fSBrian Foster ASSERT(ip->i_d.di_nextents == 0); 1713f7be2d7fSBrian Foster 171470393313SChristoph Hellwig error = xfs_trans_commit(tp); 1715f7be2d7fSBrian Foster if (error) 1716f7be2d7fSBrian Foster goto error_unlock; 1717f7be2d7fSBrian Foster 1718f7be2d7fSBrian Foster xfs_iunlock(ip, XFS_ILOCK_EXCL); 1719f7be2d7fSBrian Foster return 0; 1720f7be2d7fSBrian Foster 1721f7be2d7fSBrian Foster error_trans_cancel: 17224906e215SChristoph Hellwig xfs_trans_cancel(tp); 1723f7be2d7fSBrian Foster error_unlock: 1724f7be2d7fSBrian Foster xfs_iunlock(ip, XFS_ILOCK_EXCL); 1725f7be2d7fSBrian Foster return error; 1726f7be2d7fSBrian Foster } 1727f7be2d7fSBrian Foster 1728f7be2d7fSBrian Foster /* 172988877d2bSBrian Foster * xfs_inactive_ifree() 173088877d2bSBrian Foster * 173188877d2bSBrian Foster * Perform the inode free when an inode is unlinked. 173288877d2bSBrian Foster */ 173388877d2bSBrian Foster STATIC int 173488877d2bSBrian Foster xfs_inactive_ifree( 173588877d2bSBrian Foster struct xfs_inode *ip) 173688877d2bSBrian Foster { 173788877d2bSBrian Foster struct xfs_mount *mp = ip->i_mount; 173888877d2bSBrian Foster struct xfs_trans *tp; 173988877d2bSBrian Foster int error; 174088877d2bSBrian Foster 17419d43b180SBrian Foster /* 174276d771b4SChristoph Hellwig * We try to use a per-AG reservation for any block needed by the finobt 174376d771b4SChristoph Hellwig * tree, but as the finobt feature predates the per-AG reservation 174476d771b4SChristoph Hellwig * support a degraded file system might not have enough space for the 174576d771b4SChristoph Hellwig * reservation at mount time. In that case try to dip into the reserved 174676d771b4SChristoph Hellwig * pool and pray. 17479d43b180SBrian Foster * 17489d43b180SBrian Foster * Send a warning if the reservation does happen to fail, as the inode 17499d43b180SBrian Foster * now remains allocated and sits on the unlinked list until the fs is 17509d43b180SBrian Foster * repaired. 17519d43b180SBrian Foster */ 1752e1f6ca11SDarrick J. Wong if (unlikely(mp->m_finobt_nores)) { 1753253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 175476d771b4SChristoph Hellwig XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, 175576d771b4SChristoph Hellwig &tp); 175676d771b4SChristoph Hellwig } else { 175776d771b4SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp); 175876d771b4SChristoph Hellwig } 175988877d2bSBrian Foster if (error) { 17602451337dSDave Chinner if (error == -ENOSPC) { 17619d43b180SBrian Foster xfs_warn_ratelimited(mp, 17629d43b180SBrian Foster "Failed to remove inode(s) from unlinked list. " 17639d43b180SBrian Foster "Please free space, unmount and run xfs_repair."); 17649d43b180SBrian Foster } else { 176588877d2bSBrian Foster ASSERT(XFS_FORCED_SHUTDOWN(mp)); 17669d43b180SBrian Foster } 176788877d2bSBrian Foster return error; 176888877d2bSBrian Foster } 176988877d2bSBrian Foster 177088877d2bSBrian Foster xfs_ilock(ip, XFS_ILOCK_EXCL); 177188877d2bSBrian Foster xfs_trans_ijoin(tp, ip, 0); 177288877d2bSBrian Foster 17730e0417f3SBrian Foster error = xfs_ifree(tp, ip); 177488877d2bSBrian Foster if (error) { 177588877d2bSBrian Foster /* 177688877d2bSBrian Foster * If we fail to free the inode, shut down. The cancel 177788877d2bSBrian Foster * might do that, we need to make sure. Otherwise the 177888877d2bSBrian Foster * inode might be lost for a long time or forever. 177988877d2bSBrian Foster */ 178088877d2bSBrian Foster if (!XFS_FORCED_SHUTDOWN(mp)) { 178188877d2bSBrian Foster xfs_notice(mp, "%s: xfs_ifree returned error %d", 178288877d2bSBrian Foster __func__, error); 178388877d2bSBrian Foster xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); 178488877d2bSBrian Foster } 17854906e215SChristoph Hellwig xfs_trans_cancel(tp); 178688877d2bSBrian Foster xfs_iunlock(ip, XFS_ILOCK_EXCL); 178788877d2bSBrian Foster return error; 178888877d2bSBrian Foster } 178988877d2bSBrian Foster 179088877d2bSBrian Foster /* 179188877d2bSBrian Foster * Credit the quota account(s). The inode is gone. 179288877d2bSBrian Foster */ 179388877d2bSBrian Foster xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1); 179488877d2bSBrian Foster 179588877d2bSBrian Foster /* 1796d4a97a04SBrian Foster * Just ignore errors at this point. There is nothing we can do except 1797d4a97a04SBrian Foster * to try to keep going. Make sure it's not a silent error. 179888877d2bSBrian Foster */ 179970393313SChristoph Hellwig error = xfs_trans_commit(tp); 180088877d2bSBrian Foster if (error) 180188877d2bSBrian Foster xfs_notice(mp, "%s: xfs_trans_commit returned error %d", 180288877d2bSBrian Foster __func__, error); 180388877d2bSBrian Foster 180488877d2bSBrian Foster xfs_iunlock(ip, XFS_ILOCK_EXCL); 180588877d2bSBrian Foster return 0; 180688877d2bSBrian Foster } 180788877d2bSBrian Foster 180888877d2bSBrian Foster /* 1809c24b5dfaSDave Chinner * xfs_inactive 1810c24b5dfaSDave Chinner * 1811c24b5dfaSDave Chinner * This is called when the vnode reference count for the vnode 1812c24b5dfaSDave Chinner * goes to zero. If the file has been unlinked, then it must 1813c24b5dfaSDave Chinner * now be truncated. Also, we clear all of the read-ahead state 1814c24b5dfaSDave Chinner * kept for the inode here since the file is now closed. 1815c24b5dfaSDave Chinner */ 181674564fb4SBrian Foster void 1817c24b5dfaSDave Chinner xfs_inactive( 1818c24b5dfaSDave Chinner xfs_inode_t *ip) 1819c24b5dfaSDave Chinner { 18203d3c8b52SJie Liu struct xfs_mount *mp; 1821c24b5dfaSDave Chinner int error; 1822c24b5dfaSDave Chinner int truncate = 0; 1823c24b5dfaSDave Chinner 1824c24b5dfaSDave Chinner /* 1825c24b5dfaSDave Chinner * If the inode is already free, then there can be nothing 1826c24b5dfaSDave Chinner * to clean up here. 1827c24b5dfaSDave Chinner */ 1828c19b3b05SDave Chinner if (VFS_I(ip)->i_mode == 0) { 1829c24b5dfaSDave Chinner ASSERT(ip->i_df.if_broot_bytes == 0); 183074564fb4SBrian Foster return; 1831c24b5dfaSDave Chinner } 1832c24b5dfaSDave Chinner 1833c24b5dfaSDave Chinner mp = ip->i_mount; 183417c12bcdSDarrick J. Wong ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY)); 1835c24b5dfaSDave Chinner 1836c24b5dfaSDave Chinner /* If this is a read-only mount, don't do this (would generate I/O) */ 1837c24b5dfaSDave Chinner if (mp->m_flags & XFS_MOUNT_RDONLY) 183874564fb4SBrian Foster return; 1839c24b5dfaSDave Chinner 18406231848cSDarrick J. Wong /* Try to clean out the cow blocks if there are any. */ 184151d62690SChristoph Hellwig if (xfs_inode_has_cow_data(ip)) 18426231848cSDarrick J. Wong xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true); 18436231848cSDarrick J. Wong 184454d7b5c1SDave Chinner if (VFS_I(ip)->i_nlink != 0) { 1845c24b5dfaSDave Chinner /* 1846c24b5dfaSDave Chinner * force is true because we are evicting an inode from the 1847c24b5dfaSDave Chinner * cache. Post-eof blocks must be freed, lest we end up with 1848c24b5dfaSDave Chinner * broken free space accounting. 18493b4683c2SBrian Foster * 18503b4683c2SBrian Foster * Note: don't bother with iolock here since lockdep complains 18513b4683c2SBrian Foster * about acquiring it in reclaim context. We have the only 18523b4683c2SBrian Foster * reference to the inode at this point anyways. 1853c24b5dfaSDave Chinner */ 18543b4683c2SBrian Foster if (xfs_can_free_eofblocks(ip, true)) 1855a36b9261SBrian Foster xfs_free_eofblocks(ip); 185674564fb4SBrian Foster 185774564fb4SBrian Foster return; 1858c24b5dfaSDave Chinner } 1859c24b5dfaSDave Chinner 1860c19b3b05SDave Chinner if (S_ISREG(VFS_I(ip)->i_mode) && 1861c24b5dfaSDave Chinner (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 || 1862c24b5dfaSDave Chinner ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0)) 1863c24b5dfaSDave Chinner truncate = 1; 1864c24b5dfaSDave Chinner 1865c14cfccaSDarrick J. Wong error = xfs_qm_dqattach(ip); 1866c24b5dfaSDave Chinner if (error) 186774564fb4SBrian Foster return; 1868c24b5dfaSDave Chinner 1869c19b3b05SDave Chinner if (S_ISLNK(VFS_I(ip)->i_mode)) 187036b21ddeSBrian Foster error = xfs_inactive_symlink(ip); 1871f7be2d7fSBrian Foster else if (truncate) 1872f7be2d7fSBrian Foster error = xfs_inactive_truncate(ip); 187336b21ddeSBrian Foster if (error) 187474564fb4SBrian Foster return; 1875c24b5dfaSDave Chinner 1876c24b5dfaSDave Chinner /* 1877c24b5dfaSDave Chinner * If there are attributes associated with the file then blow them away 1878c24b5dfaSDave Chinner * now. The code calls a routine that recursively deconstructs the 18796dfe5a04SDave Chinner * attribute fork. If also blows away the in-core attribute fork. 1880c24b5dfaSDave Chinner */ 18816dfe5a04SDave Chinner if (XFS_IFORK_Q(ip)) { 1882c24b5dfaSDave Chinner error = xfs_attr_inactive(ip); 1883c24b5dfaSDave Chinner if (error) 188474564fb4SBrian Foster return; 1885c24b5dfaSDave Chinner } 1886c24b5dfaSDave Chinner 18876dfe5a04SDave Chinner ASSERT(!ip->i_afp); 1888c24b5dfaSDave Chinner ASSERT(ip->i_d.di_anextents == 0); 18896dfe5a04SDave Chinner ASSERT(ip->i_d.di_forkoff == 0); 1890c24b5dfaSDave Chinner 1891c24b5dfaSDave Chinner /* 1892c24b5dfaSDave Chinner * Free the inode. 1893c24b5dfaSDave Chinner */ 189488877d2bSBrian Foster error = xfs_inactive_ifree(ip); 1895c24b5dfaSDave Chinner if (error) 189674564fb4SBrian Foster return; 1897c24b5dfaSDave Chinner 1898c24b5dfaSDave Chinner /* 1899c24b5dfaSDave Chinner * Release the dquots held by inode, if any. 1900c24b5dfaSDave Chinner */ 1901c24b5dfaSDave Chinner xfs_qm_dqdetach(ip); 1902c24b5dfaSDave Chinner } 1903c24b5dfaSDave Chinner 19041da177e4SLinus Torvalds /* 19059b247179SDarrick J. Wong * In-Core Unlinked List Lookups 19069b247179SDarrick J. Wong * ============================= 19079b247179SDarrick J. Wong * 19089b247179SDarrick J. Wong * Every inode is supposed to be reachable from some other piece of metadata 19099b247179SDarrick J. Wong * with the exception of the root directory. Inodes with a connection to a 19109b247179SDarrick J. Wong * file descriptor but not linked from anywhere in the on-disk directory tree 19119b247179SDarrick J. Wong * are collectively known as unlinked inodes, though the filesystem itself 19129b247179SDarrick J. Wong * maintains links to these inodes so that on-disk metadata are consistent. 19139b247179SDarrick J. Wong * 19149b247179SDarrick J. Wong * XFS implements a per-AG on-disk hash table of unlinked inodes. The AGI 19159b247179SDarrick J. Wong * header contains a number of buckets that point to an inode, and each inode 19169b247179SDarrick J. Wong * record has a pointer to the next inode in the hash chain. This 19179b247179SDarrick J. Wong * singly-linked list causes scaling problems in the iunlink remove function 19189b247179SDarrick J. Wong * because we must walk that list to find the inode that points to the inode 19199b247179SDarrick J. Wong * being removed from the unlinked hash bucket list. 19209b247179SDarrick J. Wong * 19219b247179SDarrick J. Wong * What if we modelled the unlinked list as a collection of records capturing 19229b247179SDarrick J. Wong * "X.next_unlinked = Y" relations? If we indexed those records on Y, we'd 19239b247179SDarrick J. Wong * have a fast way to look up unlinked list predecessors, which avoids the 19249b247179SDarrick J. Wong * slow list walk. That's exactly what we do here (in-core) with a per-AG 19259b247179SDarrick J. Wong * rhashtable. 19269b247179SDarrick J. Wong * 19279b247179SDarrick J. Wong * Because this is a backref cache, we ignore operational failures since the 19289b247179SDarrick J. Wong * iunlink code can fall back to the slow bucket walk. The only errors that 19299b247179SDarrick J. Wong * should bubble out are for obviously incorrect situations. 19309b247179SDarrick J. Wong * 19319b247179SDarrick J. Wong * All users of the backref cache MUST hold the AGI buffer lock to serialize 19329b247179SDarrick J. Wong * access or have otherwise provided for concurrency control. 19339b247179SDarrick J. Wong */ 19349b247179SDarrick J. Wong 19359b247179SDarrick J. Wong /* Capture a "X.next_unlinked = Y" relationship. */ 19369b247179SDarrick J. Wong struct xfs_iunlink { 19379b247179SDarrick J. Wong struct rhash_head iu_rhash_head; 19389b247179SDarrick J. Wong xfs_agino_t iu_agino; /* X */ 19399b247179SDarrick J. Wong xfs_agino_t iu_next_unlinked; /* Y */ 19409b247179SDarrick J. Wong }; 19419b247179SDarrick J. Wong 19429b247179SDarrick J. Wong /* Unlinked list predecessor lookup hashtable construction */ 19439b247179SDarrick J. Wong static int 19449b247179SDarrick J. Wong xfs_iunlink_obj_cmpfn( 19459b247179SDarrick J. Wong struct rhashtable_compare_arg *arg, 19469b247179SDarrick J. Wong const void *obj) 19479b247179SDarrick J. Wong { 19489b247179SDarrick J. Wong const xfs_agino_t *key = arg->key; 19499b247179SDarrick J. Wong const struct xfs_iunlink *iu = obj; 19509b247179SDarrick J. Wong 19519b247179SDarrick J. Wong if (iu->iu_next_unlinked != *key) 19529b247179SDarrick J. Wong return 1; 19539b247179SDarrick J. Wong return 0; 19549b247179SDarrick J. Wong } 19559b247179SDarrick J. Wong 19569b247179SDarrick J. Wong static const struct rhashtable_params xfs_iunlink_hash_params = { 19579b247179SDarrick J. Wong .min_size = XFS_AGI_UNLINKED_BUCKETS, 19589b247179SDarrick J. Wong .key_len = sizeof(xfs_agino_t), 19599b247179SDarrick J. Wong .key_offset = offsetof(struct xfs_iunlink, 19609b247179SDarrick J. Wong iu_next_unlinked), 19619b247179SDarrick J. Wong .head_offset = offsetof(struct xfs_iunlink, iu_rhash_head), 19629b247179SDarrick J. Wong .automatic_shrinking = true, 19639b247179SDarrick J. Wong .obj_cmpfn = xfs_iunlink_obj_cmpfn, 19649b247179SDarrick J. Wong }; 19659b247179SDarrick J. Wong 19669b247179SDarrick J. Wong /* 19679b247179SDarrick J. Wong * Return X, where X.next_unlinked == @agino. Returns NULLAGINO if no such 19689b247179SDarrick J. Wong * relation is found. 19699b247179SDarrick J. Wong */ 19709b247179SDarrick J. Wong static xfs_agino_t 19719b247179SDarrick J. Wong xfs_iunlink_lookup_backref( 19729b247179SDarrick J. Wong struct xfs_perag *pag, 19739b247179SDarrick J. Wong xfs_agino_t agino) 19749b247179SDarrick J. Wong { 19759b247179SDarrick J. Wong struct xfs_iunlink *iu; 19769b247179SDarrick J. Wong 19779b247179SDarrick J. Wong iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino, 19789b247179SDarrick J. Wong xfs_iunlink_hash_params); 19799b247179SDarrick J. Wong return iu ? iu->iu_agino : NULLAGINO; 19809b247179SDarrick J. Wong } 19819b247179SDarrick J. Wong 19829b247179SDarrick J. Wong /* 19839b247179SDarrick J. Wong * Take ownership of an iunlink cache entry and insert it into the hash table. 19849b247179SDarrick J. Wong * If successful, the entry will be owned by the cache; if not, it is freed. 19859b247179SDarrick J. Wong * Either way, the caller does not own @iu after this call. 19869b247179SDarrick J. Wong */ 19879b247179SDarrick J. Wong static int 19889b247179SDarrick J. Wong xfs_iunlink_insert_backref( 19899b247179SDarrick J. Wong struct xfs_perag *pag, 19909b247179SDarrick J. Wong struct xfs_iunlink *iu) 19919b247179SDarrick J. Wong { 19929b247179SDarrick J. Wong int error; 19939b247179SDarrick J. Wong 19949b247179SDarrick J. Wong error = rhashtable_insert_fast(&pag->pagi_unlinked_hash, 19959b247179SDarrick J. Wong &iu->iu_rhash_head, xfs_iunlink_hash_params); 19969b247179SDarrick J. Wong /* 19979b247179SDarrick J. Wong * Fail loudly if there already was an entry because that's a sign of 19989b247179SDarrick J. Wong * corruption of in-memory data. Also fail loudly if we see an error 19999b247179SDarrick J. Wong * code we didn't anticipate from the rhashtable code. Currently we 20009b247179SDarrick J. Wong * only anticipate ENOMEM. 20019b247179SDarrick J. Wong */ 20029b247179SDarrick J. Wong if (error) { 20039b247179SDarrick J. Wong WARN(error != -ENOMEM, "iunlink cache insert error %d", error); 20049b247179SDarrick J. Wong kmem_free(iu); 20059b247179SDarrick J. Wong } 20069b247179SDarrick J. Wong /* 20079b247179SDarrick J. Wong * Absorb any runtime errors that aren't a result of corruption because 20089b247179SDarrick J. Wong * this is a cache and we can always fall back to bucket list scanning. 20099b247179SDarrick J. Wong */ 20109b247179SDarrick J. Wong if (error != 0 && error != -EEXIST) 20119b247179SDarrick J. Wong error = 0; 20129b247179SDarrick J. Wong return error; 20139b247179SDarrick J. Wong } 20149b247179SDarrick J. Wong 20159b247179SDarrick J. Wong /* Remember that @prev_agino.next_unlinked = @this_agino. */ 20169b247179SDarrick J. Wong static int 20179b247179SDarrick J. Wong xfs_iunlink_add_backref( 20189b247179SDarrick J. Wong struct xfs_perag *pag, 20199b247179SDarrick J. Wong xfs_agino_t prev_agino, 20209b247179SDarrick J. Wong xfs_agino_t this_agino) 20219b247179SDarrick J. Wong { 20229b247179SDarrick J. Wong struct xfs_iunlink *iu; 20239b247179SDarrick J. Wong 20249b247179SDarrick J. Wong if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK)) 20259b247179SDarrick J. Wong return 0; 20269b247179SDarrick J. Wong 20279b247179SDarrick J. Wong iu = kmem_zalloc(sizeof(*iu), KM_SLEEP | KM_NOFS); 20289b247179SDarrick J. Wong iu->iu_agino = prev_agino; 20299b247179SDarrick J. Wong iu->iu_next_unlinked = this_agino; 20309b247179SDarrick J. Wong 20319b247179SDarrick J. Wong return xfs_iunlink_insert_backref(pag, iu); 20329b247179SDarrick J. Wong } 20339b247179SDarrick J. Wong 20349b247179SDarrick J. Wong /* 20359b247179SDarrick J. Wong * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked. 20369b247179SDarrick J. Wong * If @next_unlinked is NULLAGINO, we drop the backref and exit. If there 20379b247179SDarrick J. Wong * wasn't any such entry then we don't bother. 20389b247179SDarrick J. Wong */ 20399b247179SDarrick J. Wong static int 20409b247179SDarrick J. Wong xfs_iunlink_change_backref( 20419b247179SDarrick J. Wong struct xfs_perag *pag, 20429b247179SDarrick J. Wong xfs_agino_t agino, 20439b247179SDarrick J. Wong xfs_agino_t next_unlinked) 20449b247179SDarrick J. Wong { 20459b247179SDarrick J. Wong struct xfs_iunlink *iu; 20469b247179SDarrick J. Wong int error; 20479b247179SDarrick J. Wong 20489b247179SDarrick J. Wong /* Look up the old entry; if there wasn't one then exit. */ 20499b247179SDarrick J. Wong iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino, 20509b247179SDarrick J. Wong xfs_iunlink_hash_params); 20519b247179SDarrick J. Wong if (!iu) 20529b247179SDarrick J. Wong return 0; 20539b247179SDarrick J. Wong 20549b247179SDarrick J. Wong /* 20559b247179SDarrick J. Wong * Remove the entry. This shouldn't ever return an error, but if we 20569b247179SDarrick J. Wong * couldn't remove the old entry we don't want to add it again to the 20579b247179SDarrick J. Wong * hash table, and if the entry disappeared on us then someone's 20589b247179SDarrick J. Wong * violated the locking rules and we need to fail loudly. Either way 20599b247179SDarrick J. Wong * we cannot remove the inode because internal state is or would have 20609b247179SDarrick J. Wong * been corrupt. 20619b247179SDarrick J. Wong */ 20629b247179SDarrick J. Wong error = rhashtable_remove_fast(&pag->pagi_unlinked_hash, 20639b247179SDarrick J. Wong &iu->iu_rhash_head, xfs_iunlink_hash_params); 20649b247179SDarrick J. Wong if (error) 20659b247179SDarrick J. Wong return error; 20669b247179SDarrick J. Wong 20679b247179SDarrick J. Wong /* If there is no new next entry just free our item and return. */ 20689b247179SDarrick J. Wong if (next_unlinked == NULLAGINO) { 20699b247179SDarrick J. Wong kmem_free(iu); 20709b247179SDarrick J. Wong return 0; 20719b247179SDarrick J. Wong } 20729b247179SDarrick J. Wong 20739b247179SDarrick J. Wong /* Update the entry and re-add it to the hash table. */ 20749b247179SDarrick J. Wong iu->iu_next_unlinked = next_unlinked; 20759b247179SDarrick J. Wong return xfs_iunlink_insert_backref(pag, iu); 20769b247179SDarrick J. Wong } 20779b247179SDarrick J. Wong 20789b247179SDarrick J. Wong /* Set up the in-core predecessor structures. */ 20799b247179SDarrick J. Wong int 20809b247179SDarrick J. Wong xfs_iunlink_init( 20819b247179SDarrick J. Wong struct xfs_perag *pag) 20829b247179SDarrick J. Wong { 20839b247179SDarrick J. Wong return rhashtable_init(&pag->pagi_unlinked_hash, 20849b247179SDarrick J. Wong &xfs_iunlink_hash_params); 20859b247179SDarrick J. Wong } 20869b247179SDarrick J. Wong 20879b247179SDarrick J. Wong /* Free the in-core predecessor structures. */ 20889b247179SDarrick J. Wong static void 20899b247179SDarrick J. Wong xfs_iunlink_free_item( 20909b247179SDarrick J. Wong void *ptr, 20919b247179SDarrick J. Wong void *arg) 20929b247179SDarrick J. Wong { 20939b247179SDarrick J. Wong struct xfs_iunlink *iu = ptr; 20949b247179SDarrick J. Wong bool *freed_anything = arg; 20959b247179SDarrick J. Wong 20969b247179SDarrick J. Wong *freed_anything = true; 20979b247179SDarrick J. Wong kmem_free(iu); 20989b247179SDarrick J. Wong } 20999b247179SDarrick J. Wong 21009b247179SDarrick J. Wong void 21019b247179SDarrick J. Wong xfs_iunlink_destroy( 21029b247179SDarrick J. Wong struct xfs_perag *pag) 21039b247179SDarrick J. Wong { 21049b247179SDarrick J. Wong bool freed_anything = false; 21059b247179SDarrick J. Wong 21069b247179SDarrick J. Wong rhashtable_free_and_destroy(&pag->pagi_unlinked_hash, 21079b247179SDarrick J. Wong xfs_iunlink_free_item, &freed_anything); 21089b247179SDarrick J. Wong 21099b247179SDarrick J. Wong ASSERT(freed_anything == false || XFS_FORCED_SHUTDOWN(pag->pag_mount)); 21109b247179SDarrick J. Wong } 21119b247179SDarrick J. Wong 21129b247179SDarrick J. Wong /* 21139a4a5118SDarrick J. Wong * Point the AGI unlinked bucket at an inode and log the results. The caller 21149a4a5118SDarrick J. Wong * is responsible for validating the old value. 21159a4a5118SDarrick J. Wong */ 21169a4a5118SDarrick J. Wong STATIC int 21179a4a5118SDarrick J. Wong xfs_iunlink_update_bucket( 21189a4a5118SDarrick J. Wong struct xfs_trans *tp, 21199a4a5118SDarrick J. Wong xfs_agnumber_t agno, 21209a4a5118SDarrick J. Wong struct xfs_buf *agibp, 21219a4a5118SDarrick J. Wong unsigned int bucket_index, 21229a4a5118SDarrick J. Wong xfs_agino_t new_agino) 21239a4a5118SDarrick J. Wong { 21249a4a5118SDarrick J. Wong struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp); 21259a4a5118SDarrick J. Wong xfs_agino_t old_value; 21269a4a5118SDarrick J. Wong int offset; 21279a4a5118SDarrick J. Wong 21289a4a5118SDarrick J. Wong ASSERT(xfs_verify_agino_or_null(tp->t_mountp, agno, new_agino)); 21299a4a5118SDarrick J. Wong 21309a4a5118SDarrick J. Wong old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]); 21319a4a5118SDarrick J. Wong trace_xfs_iunlink_update_bucket(tp->t_mountp, agno, bucket_index, 21329a4a5118SDarrick J. Wong old_value, new_agino); 21339a4a5118SDarrick J. Wong 21349a4a5118SDarrick J. Wong /* 21359a4a5118SDarrick J. Wong * We should never find the head of the list already set to the value 21369a4a5118SDarrick J. Wong * passed in because either we're adding or removing ourselves from the 21379a4a5118SDarrick J. Wong * head of the list. 21389a4a5118SDarrick J. Wong */ 21399a4a5118SDarrick J. Wong if (old_value == new_agino) 21409a4a5118SDarrick J. Wong return -EFSCORRUPTED; 21419a4a5118SDarrick J. Wong 21429a4a5118SDarrick J. Wong agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino); 21439a4a5118SDarrick J. Wong offset = offsetof(struct xfs_agi, agi_unlinked) + 21449a4a5118SDarrick J. Wong (sizeof(xfs_agino_t) * bucket_index); 21459a4a5118SDarrick J. Wong xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1); 21469a4a5118SDarrick J. Wong return 0; 21479a4a5118SDarrick J. Wong } 21489a4a5118SDarrick J. Wong 2149f2fc16a3SDarrick J. Wong /* Set an on-disk inode's next_unlinked pointer. */ 2150f2fc16a3SDarrick J. Wong STATIC void 2151f2fc16a3SDarrick J. Wong xfs_iunlink_update_dinode( 2152f2fc16a3SDarrick J. Wong struct xfs_trans *tp, 2153f2fc16a3SDarrick J. Wong xfs_agnumber_t agno, 2154f2fc16a3SDarrick J. Wong xfs_agino_t agino, 2155f2fc16a3SDarrick J. Wong struct xfs_buf *ibp, 2156f2fc16a3SDarrick J. Wong struct xfs_dinode *dip, 2157f2fc16a3SDarrick J. Wong struct xfs_imap *imap, 2158f2fc16a3SDarrick J. Wong xfs_agino_t next_agino) 2159f2fc16a3SDarrick J. Wong { 2160f2fc16a3SDarrick J. Wong struct xfs_mount *mp = tp->t_mountp; 2161f2fc16a3SDarrick J. Wong int offset; 2162f2fc16a3SDarrick J. Wong 2163f2fc16a3SDarrick J. Wong ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino)); 2164f2fc16a3SDarrick J. Wong 2165f2fc16a3SDarrick J. Wong trace_xfs_iunlink_update_dinode(mp, agno, agino, 2166f2fc16a3SDarrick J. Wong be32_to_cpu(dip->di_next_unlinked), next_agino); 2167f2fc16a3SDarrick J. Wong 2168f2fc16a3SDarrick J. Wong dip->di_next_unlinked = cpu_to_be32(next_agino); 2169f2fc16a3SDarrick J. Wong offset = imap->im_boffset + 2170f2fc16a3SDarrick J. Wong offsetof(struct xfs_dinode, di_next_unlinked); 2171f2fc16a3SDarrick J. Wong 2172f2fc16a3SDarrick J. Wong /* need to recalc the inode CRC if appropriate */ 2173f2fc16a3SDarrick J. Wong xfs_dinode_calc_crc(mp, dip); 2174f2fc16a3SDarrick J. Wong xfs_trans_inode_buf(tp, ibp); 2175f2fc16a3SDarrick J. Wong xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1); 2176f2fc16a3SDarrick J. Wong xfs_inobp_check(mp, ibp); 2177f2fc16a3SDarrick J. Wong } 2178f2fc16a3SDarrick J. Wong 2179f2fc16a3SDarrick J. Wong /* Set an in-core inode's unlinked pointer and return the old value. */ 2180f2fc16a3SDarrick J. Wong STATIC int 2181f2fc16a3SDarrick J. Wong xfs_iunlink_update_inode( 2182f2fc16a3SDarrick J. Wong struct xfs_trans *tp, 2183f2fc16a3SDarrick J. Wong struct xfs_inode *ip, 2184f2fc16a3SDarrick J. Wong xfs_agnumber_t agno, 2185f2fc16a3SDarrick J. Wong xfs_agino_t next_agino, 2186f2fc16a3SDarrick J. Wong xfs_agino_t *old_next_agino) 2187f2fc16a3SDarrick J. Wong { 2188f2fc16a3SDarrick J. Wong struct xfs_mount *mp = tp->t_mountp; 2189f2fc16a3SDarrick J. Wong struct xfs_dinode *dip; 2190f2fc16a3SDarrick J. Wong struct xfs_buf *ibp; 2191f2fc16a3SDarrick J. Wong xfs_agino_t old_value; 2192f2fc16a3SDarrick J. Wong int error; 2193f2fc16a3SDarrick J. Wong 2194f2fc16a3SDarrick J. Wong ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino)); 2195f2fc16a3SDarrick J. Wong 2196f2fc16a3SDarrick J. Wong error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 0, 0); 2197f2fc16a3SDarrick J. Wong if (error) 2198f2fc16a3SDarrick J. Wong return error; 2199f2fc16a3SDarrick J. Wong 2200f2fc16a3SDarrick J. Wong /* Make sure the old pointer isn't garbage. */ 2201f2fc16a3SDarrick J. Wong old_value = be32_to_cpu(dip->di_next_unlinked); 2202f2fc16a3SDarrick J. Wong if (!xfs_verify_agino_or_null(mp, agno, old_value)) { 2203f2fc16a3SDarrick J. Wong error = -EFSCORRUPTED; 2204f2fc16a3SDarrick J. Wong goto out; 2205f2fc16a3SDarrick J. Wong } 2206f2fc16a3SDarrick J. Wong 2207f2fc16a3SDarrick J. Wong /* 2208f2fc16a3SDarrick J. Wong * Since we're updating a linked list, we should never find that the 2209f2fc16a3SDarrick J. Wong * current pointer is the same as the new value, unless we're 2210f2fc16a3SDarrick J. Wong * terminating the list. 2211f2fc16a3SDarrick J. Wong */ 2212f2fc16a3SDarrick J. Wong *old_next_agino = old_value; 2213f2fc16a3SDarrick J. Wong if (old_value == next_agino) { 2214f2fc16a3SDarrick J. Wong if (next_agino != NULLAGINO) 2215f2fc16a3SDarrick J. Wong error = -EFSCORRUPTED; 2216f2fc16a3SDarrick J. Wong goto out; 2217f2fc16a3SDarrick J. Wong } 2218f2fc16a3SDarrick J. Wong 2219f2fc16a3SDarrick J. Wong /* Ok, update the new pointer. */ 2220f2fc16a3SDarrick J. Wong xfs_iunlink_update_dinode(tp, agno, XFS_INO_TO_AGINO(mp, ip->i_ino), 2221f2fc16a3SDarrick J. Wong ibp, dip, &ip->i_imap, next_agino); 2222f2fc16a3SDarrick J. Wong return 0; 2223f2fc16a3SDarrick J. Wong out: 2224f2fc16a3SDarrick J. Wong xfs_trans_brelse(tp, ibp); 2225f2fc16a3SDarrick J. Wong return error; 2226f2fc16a3SDarrick J. Wong } 2227f2fc16a3SDarrick J. Wong 22289a4a5118SDarrick J. Wong /* 2229c4a6bf7fSDarrick J. Wong * This is called when the inode's link count has gone to 0 or we are creating 2230c4a6bf7fSDarrick J. Wong * a tmpfile via O_TMPFILE. The inode @ip must have nlink == 0. 223154d7b5c1SDave Chinner * 223254d7b5c1SDave Chinner * We place the on-disk inode on a list in the AGI. It will be pulled from this 223354d7b5c1SDave Chinner * list when the inode is freed. 22341da177e4SLinus Torvalds */ 223554d7b5c1SDave Chinner STATIC int 22361da177e4SLinus Torvalds xfs_iunlink( 223754d7b5c1SDave Chinner struct xfs_trans *tp, 223854d7b5c1SDave Chinner struct xfs_inode *ip) 22391da177e4SLinus Torvalds { 22405837f625SDarrick J. Wong struct xfs_mount *mp = tp->t_mountp; 22415837f625SDarrick J. Wong struct xfs_agi *agi; 22425837f625SDarrick J. Wong struct xfs_buf *agibp; 224386bfd375SDarrick J. Wong xfs_agino_t next_agino; 22445837f625SDarrick J. Wong xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino); 22455837f625SDarrick J. Wong xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 22465837f625SDarrick J. Wong short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 22471da177e4SLinus Torvalds int error; 22481da177e4SLinus Torvalds 2249c4a6bf7fSDarrick J. Wong ASSERT(VFS_I(ip)->i_nlink == 0); 2250c19b3b05SDave Chinner ASSERT(VFS_I(ip)->i_mode != 0); 22514664c66cSDarrick J. Wong trace_xfs_iunlink(ip); 22521da177e4SLinus Torvalds 22535837f625SDarrick J. Wong /* Get the agi buffer first. It ensures lock ordering on the list. */ 22545837f625SDarrick J. Wong error = xfs_read_agi(mp, tp, agno, &agibp); 2255859d7182SVlad Apostolov if (error) 22561da177e4SLinus Torvalds return error; 22571da177e4SLinus Torvalds agi = XFS_BUF_TO_AGI(agibp); 22585e1be0fbSChristoph Hellwig 22591da177e4SLinus Torvalds /* 226086bfd375SDarrick J. Wong * Get the index into the agi hash table for the list this inode will 226186bfd375SDarrick J. Wong * go on. Make sure the pointer isn't garbage and that this inode 226286bfd375SDarrick J. Wong * isn't already on the list. 22631da177e4SLinus Torvalds */ 226486bfd375SDarrick J. Wong next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); 226586bfd375SDarrick J. Wong if (next_agino == agino || 226686bfd375SDarrick J. Wong !xfs_verify_agino_or_null(mp, agno, next_agino)) 226786bfd375SDarrick J. Wong return -EFSCORRUPTED; 22681da177e4SLinus Torvalds 226986bfd375SDarrick J. Wong if (next_agino != NULLAGINO) { 22709b247179SDarrick J. Wong struct xfs_perag *pag; 2271f2fc16a3SDarrick J. Wong xfs_agino_t old_agino; 2272f2fc16a3SDarrick J. Wong 22731da177e4SLinus Torvalds /* 2274f2fc16a3SDarrick J. Wong * There is already another inode in the bucket, so point this 2275f2fc16a3SDarrick J. Wong * inode to the current head of the list. 22761da177e4SLinus Torvalds */ 2277f2fc16a3SDarrick J. Wong error = xfs_iunlink_update_inode(tp, ip, agno, next_agino, 2278f2fc16a3SDarrick J. Wong &old_agino); 2279c319b58bSVlad Apostolov if (error) 2280c319b58bSVlad Apostolov return error; 2281f2fc16a3SDarrick J. Wong ASSERT(old_agino == NULLAGINO); 22829b247179SDarrick J. Wong 22839b247179SDarrick J. Wong /* 22849b247179SDarrick J. Wong * agino has been unlinked, add a backref from the next inode 22859b247179SDarrick J. Wong * back to agino. 22869b247179SDarrick J. Wong */ 22879b247179SDarrick J. Wong pag = xfs_perag_get(mp, agno); 22889b247179SDarrick J. Wong error = xfs_iunlink_add_backref(pag, agino, next_agino); 22899b247179SDarrick J. Wong xfs_perag_put(pag); 22909b247179SDarrick J. Wong if (error) 22919b247179SDarrick J. Wong return error; 22921da177e4SLinus Torvalds } 22931da177e4SLinus Torvalds 22949a4a5118SDarrick J. Wong /* Point the head of the list to point to this inode. */ 22959a4a5118SDarrick J. Wong return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index, agino); 22961da177e4SLinus Torvalds } 22971da177e4SLinus Torvalds 229823ffa52cSDarrick J. Wong /* Return the imap, dinode pointer, and buffer for an inode. */ 229923ffa52cSDarrick J. Wong STATIC int 230023ffa52cSDarrick J. Wong xfs_iunlink_map_ino( 230123ffa52cSDarrick J. Wong struct xfs_trans *tp, 230223ffa52cSDarrick J. Wong xfs_agnumber_t agno, 230323ffa52cSDarrick J. Wong xfs_agino_t agino, 230423ffa52cSDarrick J. Wong struct xfs_imap *imap, 230523ffa52cSDarrick J. Wong struct xfs_dinode **dipp, 230623ffa52cSDarrick J. Wong struct xfs_buf **bpp) 230723ffa52cSDarrick J. Wong { 230823ffa52cSDarrick J. Wong struct xfs_mount *mp = tp->t_mountp; 230923ffa52cSDarrick J. Wong int error; 231023ffa52cSDarrick J. Wong 231123ffa52cSDarrick J. Wong imap->im_blkno = 0; 231223ffa52cSDarrick J. Wong error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0); 231323ffa52cSDarrick J. Wong if (error) { 231423ffa52cSDarrick J. Wong xfs_warn(mp, "%s: xfs_imap returned error %d.", 231523ffa52cSDarrick J. Wong __func__, error); 231623ffa52cSDarrick J. Wong return error; 231723ffa52cSDarrick J. Wong } 231823ffa52cSDarrick J. Wong 231923ffa52cSDarrick J. Wong error = xfs_imap_to_bp(mp, tp, imap, dipp, bpp, 0, 0); 232023ffa52cSDarrick J. Wong if (error) { 232123ffa52cSDarrick J. Wong xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.", 232223ffa52cSDarrick J. Wong __func__, error); 232323ffa52cSDarrick J. Wong return error; 232423ffa52cSDarrick J. Wong } 232523ffa52cSDarrick J. Wong 232623ffa52cSDarrick J. Wong return 0; 232723ffa52cSDarrick J. Wong } 232823ffa52cSDarrick J. Wong 232923ffa52cSDarrick J. Wong /* 233023ffa52cSDarrick J. Wong * Walk the unlinked chain from @head_agino until we find the inode that 233123ffa52cSDarrick J. Wong * points to @target_agino. Return the inode number, map, dinode pointer, 233223ffa52cSDarrick J. Wong * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp. 233323ffa52cSDarrick J. Wong * 233423ffa52cSDarrick J. Wong * @tp, @pag, @head_agino, and @target_agino are input parameters. 233523ffa52cSDarrick J. Wong * @agino, @imap, @dipp, and @bpp are all output parameters. 233623ffa52cSDarrick J. Wong * 233723ffa52cSDarrick J. Wong * Do not call this function if @target_agino is the head of the list. 233823ffa52cSDarrick J. Wong */ 233923ffa52cSDarrick J. Wong STATIC int 234023ffa52cSDarrick J. Wong xfs_iunlink_map_prev( 234123ffa52cSDarrick J. Wong struct xfs_trans *tp, 234223ffa52cSDarrick J. Wong xfs_agnumber_t agno, 234323ffa52cSDarrick J. Wong xfs_agino_t head_agino, 234423ffa52cSDarrick J. Wong xfs_agino_t target_agino, 234523ffa52cSDarrick J. Wong xfs_agino_t *agino, 234623ffa52cSDarrick J. Wong struct xfs_imap *imap, 234723ffa52cSDarrick J. Wong struct xfs_dinode **dipp, 23489b247179SDarrick J. Wong struct xfs_buf **bpp, 23499b247179SDarrick J. Wong struct xfs_perag *pag) 235023ffa52cSDarrick J. Wong { 235123ffa52cSDarrick J. Wong struct xfs_mount *mp = tp->t_mountp; 235223ffa52cSDarrick J. Wong xfs_agino_t next_agino; 235323ffa52cSDarrick J. Wong int error; 235423ffa52cSDarrick J. Wong 235523ffa52cSDarrick J. Wong ASSERT(head_agino != target_agino); 235623ffa52cSDarrick J. Wong *bpp = NULL; 235723ffa52cSDarrick J. Wong 23589b247179SDarrick J. Wong /* See if our backref cache can find it faster. */ 23599b247179SDarrick J. Wong *agino = xfs_iunlink_lookup_backref(pag, target_agino); 23609b247179SDarrick J. Wong if (*agino != NULLAGINO) { 23619b247179SDarrick J. Wong error = xfs_iunlink_map_ino(tp, agno, *agino, imap, dipp, bpp); 23629b247179SDarrick J. Wong if (error) 23639b247179SDarrick J. Wong return error; 23649b247179SDarrick J. Wong 23659b247179SDarrick J. Wong if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino) 23669b247179SDarrick J. Wong return 0; 23679b247179SDarrick J. Wong 23689b247179SDarrick J. Wong /* 23699b247179SDarrick J. Wong * If we get here the cache contents were corrupt, so drop the 23709b247179SDarrick J. Wong * buffer and fall back to walking the bucket list. 23719b247179SDarrick J. Wong */ 23729b247179SDarrick J. Wong xfs_trans_brelse(tp, *bpp); 23739b247179SDarrick J. Wong *bpp = NULL; 23749b247179SDarrick J. Wong WARN_ON_ONCE(1); 23759b247179SDarrick J. Wong } 23769b247179SDarrick J. Wong 23779b247179SDarrick J. Wong trace_xfs_iunlink_map_prev_fallback(mp, agno); 23789b247179SDarrick J. Wong 23799b247179SDarrick J. Wong /* Otherwise, walk the entire bucket until we find it. */ 238023ffa52cSDarrick J. Wong next_agino = head_agino; 238123ffa52cSDarrick J. Wong while (next_agino != target_agino) { 238223ffa52cSDarrick J. Wong xfs_agino_t unlinked_agino; 238323ffa52cSDarrick J. Wong 238423ffa52cSDarrick J. Wong if (*bpp) 238523ffa52cSDarrick J. Wong xfs_trans_brelse(tp, *bpp); 238623ffa52cSDarrick J. Wong 238723ffa52cSDarrick J. Wong *agino = next_agino; 238823ffa52cSDarrick J. Wong error = xfs_iunlink_map_ino(tp, agno, next_agino, imap, dipp, 238923ffa52cSDarrick J. Wong bpp); 239023ffa52cSDarrick J. Wong if (error) 239123ffa52cSDarrick J. Wong return error; 239223ffa52cSDarrick J. Wong 239323ffa52cSDarrick J. Wong unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked); 239423ffa52cSDarrick J. Wong /* 239523ffa52cSDarrick J. Wong * Make sure this pointer is valid and isn't an obvious 239623ffa52cSDarrick J. Wong * infinite loop. 239723ffa52cSDarrick J. Wong */ 239823ffa52cSDarrick J. Wong if (!xfs_verify_agino(mp, agno, unlinked_agino) || 239923ffa52cSDarrick J. Wong next_agino == unlinked_agino) { 240023ffa52cSDarrick J. Wong XFS_CORRUPTION_ERROR(__func__, 240123ffa52cSDarrick J. Wong XFS_ERRLEVEL_LOW, mp, 240223ffa52cSDarrick J. Wong *dipp, sizeof(**dipp)); 240323ffa52cSDarrick J. Wong error = -EFSCORRUPTED; 240423ffa52cSDarrick J. Wong return error; 240523ffa52cSDarrick J. Wong } 240623ffa52cSDarrick J. Wong next_agino = unlinked_agino; 240723ffa52cSDarrick J. Wong } 240823ffa52cSDarrick J. Wong 240923ffa52cSDarrick J. Wong return 0; 241023ffa52cSDarrick J. Wong } 241123ffa52cSDarrick J. Wong 24121da177e4SLinus Torvalds /* 24131da177e4SLinus Torvalds * Pull the on-disk inode from the AGI unlinked list. 24141da177e4SLinus Torvalds */ 24151da177e4SLinus Torvalds STATIC int 24161da177e4SLinus Torvalds xfs_iunlink_remove( 24175837f625SDarrick J. Wong struct xfs_trans *tp, 24185837f625SDarrick J. Wong struct xfs_inode *ip) 24191da177e4SLinus Torvalds { 24205837f625SDarrick J. Wong struct xfs_mount *mp = tp->t_mountp; 24215837f625SDarrick J. Wong struct xfs_agi *agi; 24225837f625SDarrick J. Wong struct xfs_buf *agibp; 24235837f625SDarrick J. Wong struct xfs_buf *last_ibp; 24245837f625SDarrick J. Wong struct xfs_dinode *last_dip = NULL; 24259b247179SDarrick J. Wong struct xfs_perag *pag = NULL; 24265837f625SDarrick J. Wong xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino); 24275837f625SDarrick J. Wong xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 24281da177e4SLinus Torvalds xfs_agino_t next_agino; 2429b1d2a068SDarrick J. Wong xfs_agino_t head_agino; 24305837f625SDarrick J. Wong short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 24311da177e4SLinus Torvalds int error; 24321da177e4SLinus Torvalds 24334664c66cSDarrick J. Wong trace_xfs_iunlink_remove(ip); 24344664c66cSDarrick J. Wong 24355837f625SDarrick J. Wong /* Get the agi buffer first. It ensures lock ordering on the list. */ 24365e1be0fbSChristoph Hellwig error = xfs_read_agi(mp, tp, agno, &agibp); 24375e1be0fbSChristoph Hellwig if (error) 24381da177e4SLinus Torvalds return error; 24391da177e4SLinus Torvalds agi = XFS_BUF_TO_AGI(agibp); 24405e1be0fbSChristoph Hellwig 24411da177e4SLinus Torvalds /* 244286bfd375SDarrick J. Wong * Get the index into the agi hash table for the list this inode will 244386bfd375SDarrick J. Wong * go on. Make sure the head pointer isn't garbage. 24441da177e4SLinus Torvalds */ 2445b1d2a068SDarrick J. Wong head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); 2446b1d2a068SDarrick J. Wong if (!xfs_verify_agino(mp, agno, head_agino)) { 2447d2e73665SDarrick J. Wong XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 2448d2e73665SDarrick J. Wong agi, sizeof(*agi)); 2449d2e73665SDarrick J. Wong return -EFSCORRUPTED; 2450d2e73665SDarrick J. Wong } 24511da177e4SLinus Torvalds 24521da177e4SLinus Torvalds /* 2453b1d2a068SDarrick J. Wong * Set our inode's next_unlinked pointer to NULL and then return 2454b1d2a068SDarrick J. Wong * the old pointer value so that we can update whatever was previous 2455b1d2a068SDarrick J. Wong * to us in the list to point to whatever was next in the list. 24561da177e4SLinus Torvalds */ 2457b1d2a068SDarrick J. Wong error = xfs_iunlink_update_inode(tp, ip, agno, NULLAGINO, &next_agino); 2458f2fc16a3SDarrick J. Wong if (error) 24591da177e4SLinus Torvalds return error; 24609a4a5118SDarrick J. Wong 24619b247179SDarrick J. Wong /* 24629b247179SDarrick J. Wong * If there was a backref pointing from the next inode back to this 24639b247179SDarrick J. Wong * one, remove it because we've removed this inode from the list. 24649b247179SDarrick J. Wong * 24659b247179SDarrick J. Wong * Later, if this inode was in the middle of the list we'll update 24669b247179SDarrick J. Wong * this inode's backref to point from the next inode. 24679b247179SDarrick J. Wong */ 24689b247179SDarrick J. Wong if (next_agino != NULLAGINO) { 24699b247179SDarrick J. Wong pag = xfs_perag_get(mp, agno); 24709b247179SDarrick J. Wong error = xfs_iunlink_change_backref(pag, next_agino, 24719b247179SDarrick J. Wong NULLAGINO); 24729b247179SDarrick J. Wong if (error) 24739b247179SDarrick J. Wong goto out; 24749b247179SDarrick J. Wong } 24759b247179SDarrick J. Wong 2476b1d2a068SDarrick J. Wong if (head_agino == agino) { 24779a4a5118SDarrick J. Wong /* Point the head of the list to the next unlinked inode. */ 24789a4a5118SDarrick J. Wong error = xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index, 24799a4a5118SDarrick J. Wong next_agino); 24809a4a5118SDarrick J. Wong if (error) 24819b247179SDarrick J. Wong goto out; 24821da177e4SLinus Torvalds } else { 2483f2fc16a3SDarrick J. Wong struct xfs_imap imap; 2484f2fc16a3SDarrick J. Wong xfs_agino_t prev_agino; 2485f2fc16a3SDarrick J. Wong 24869b247179SDarrick J. Wong if (!pag) 24879b247179SDarrick J. Wong pag = xfs_perag_get(mp, agno); 24889b247179SDarrick J. Wong 248923ffa52cSDarrick J. Wong /* We need to search the list for the inode being freed. */ 2490b1d2a068SDarrick J. Wong error = xfs_iunlink_map_prev(tp, agno, head_agino, agino, 24919b247179SDarrick J. Wong &prev_agino, &imap, &last_dip, &last_ibp, 24929b247179SDarrick J. Wong pag); 249323ffa52cSDarrick J. Wong if (error) 24949b247179SDarrick J. Wong goto out; 2495475ee413SChristoph Hellwig 2496f2fc16a3SDarrick J. Wong /* Point the previous inode on the list to the next inode. */ 2497f2fc16a3SDarrick J. Wong xfs_iunlink_update_dinode(tp, agno, prev_agino, last_ibp, 2498f2fc16a3SDarrick J. Wong last_dip, &imap, next_agino); 24999b247179SDarrick J. Wong 25009b247179SDarrick J. Wong /* 25019b247179SDarrick J. Wong * Now we deal with the backref for this inode. If this inode 25029b247179SDarrick J. Wong * pointed at a real inode, change the backref that pointed to 25039b247179SDarrick J. Wong * us to point to our old next. If this inode was the end of 25049b247179SDarrick J. Wong * the list, delete the backref that pointed to us. Note that 25059b247179SDarrick J. Wong * change_backref takes care of deleting the backref if 25069b247179SDarrick J. Wong * next_agino is NULLAGINO. 25079b247179SDarrick J. Wong */ 25089b247179SDarrick J. Wong error = xfs_iunlink_change_backref(pag, agino, next_agino); 25099b247179SDarrick J. Wong if (error) 25109b247179SDarrick J. Wong goto out; 25111da177e4SLinus Torvalds } 25129b247179SDarrick J. Wong 25139b247179SDarrick J. Wong out: 25149b247179SDarrick J. Wong if (pag) 25159b247179SDarrick J. Wong xfs_perag_put(pag); 25169b247179SDarrick J. Wong return error; 25171da177e4SLinus Torvalds } 25181da177e4SLinus Torvalds 25195b3eed75SDave Chinner /* 25200b8182dbSZhi Yong Wu * A big issue when freeing the inode cluster is that we _cannot_ skip any 25215b3eed75SDave Chinner * inodes that are in memory - they all must be marked stale and attached to 25225b3eed75SDave Chinner * the cluster buffer. 25235b3eed75SDave Chinner */ 25242a30f36dSChandra Seetharaman STATIC int 25251da177e4SLinus Torvalds xfs_ifree_cluster( 25261da177e4SLinus Torvalds xfs_inode_t *free_ip, 25271da177e4SLinus Torvalds xfs_trans_t *tp, 252809b56604SBrian Foster struct xfs_icluster *xic) 25291da177e4SLinus Torvalds { 25301da177e4SLinus Torvalds xfs_mount_t *mp = free_ip->i_mount; 25311da177e4SLinus Torvalds int nbufs; 25325b257b4aSDave Chinner int i, j; 25333cdaa189SBrian Foster int ioffset; 25341da177e4SLinus Torvalds xfs_daddr_t blkno; 25351da177e4SLinus Torvalds xfs_buf_t *bp; 25365b257b4aSDave Chinner xfs_inode_t *ip; 25371da177e4SLinus Torvalds xfs_inode_log_item_t *iip; 2538643c8c05SCarlos Maiolino struct xfs_log_item *lip; 25395017e97dSDave Chinner struct xfs_perag *pag; 2540ef325959SDarrick J. Wong struct xfs_ino_geometry *igeo = M_IGEO(mp); 254109b56604SBrian Foster xfs_ino_t inum; 25421da177e4SLinus Torvalds 254309b56604SBrian Foster inum = xic->first_ino; 25445017e97dSDave Chinner pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum)); 2545ef325959SDarrick J. Wong nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster; 25461da177e4SLinus Torvalds 2547ef325959SDarrick J. Wong for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) { 254809b56604SBrian Foster /* 254909b56604SBrian Foster * The allocation bitmap tells us which inodes of the chunk were 255009b56604SBrian Foster * physically allocated. Skip the cluster if an inode falls into 255109b56604SBrian Foster * a sparse region. 255209b56604SBrian Foster */ 25533cdaa189SBrian Foster ioffset = inum - xic->first_ino; 25543cdaa189SBrian Foster if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) { 2555ef325959SDarrick J. Wong ASSERT(ioffset % igeo->inodes_per_cluster == 0); 255609b56604SBrian Foster continue; 255709b56604SBrian Foster } 255809b56604SBrian Foster 25591da177e4SLinus Torvalds blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), 25601da177e4SLinus Torvalds XFS_INO_TO_AGBNO(mp, inum)); 25611da177e4SLinus Torvalds 25621da177e4SLinus Torvalds /* 25635b257b4aSDave Chinner * We obtain and lock the backing buffer first in the process 25645b257b4aSDave Chinner * here, as we have to ensure that any dirty inode that we 25655b257b4aSDave Chinner * can't get the flush lock on is attached to the buffer. 25665b257b4aSDave Chinner * If we scan the in-memory inodes first, then buffer IO can 25675b257b4aSDave Chinner * complete before we get a lock on it, and hence we may fail 25685b257b4aSDave Chinner * to mark all the active inodes on the buffer stale. 25691da177e4SLinus Torvalds */ 25701da177e4SLinus Torvalds bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, 2571ef325959SDarrick J. Wong mp->m_bsize * igeo->blocks_per_cluster, 2572b6aff29fSDave Chinner XBF_UNMAPPED); 25731da177e4SLinus Torvalds 25742a30f36dSChandra Seetharaman if (!bp) 25752451337dSDave Chinner return -ENOMEM; 2576b0f539deSDave Chinner 2577b0f539deSDave Chinner /* 2578b0f539deSDave Chinner * This buffer may not have been correctly initialised as we 2579b0f539deSDave Chinner * didn't read it from disk. That's not important because we are 2580b0f539deSDave Chinner * only using to mark the buffer as stale in the log, and to 2581b0f539deSDave Chinner * attach stale cached inodes on it. That means it will never be 2582b0f539deSDave Chinner * dispatched for IO. If it is, we want to know about it, and we 2583b0f539deSDave Chinner * want it to fail. We can acheive this by adding a write 2584b0f539deSDave Chinner * verifier to the buffer. 2585b0f539deSDave Chinner */ 25861813dd64SDave Chinner bp->b_ops = &xfs_inode_buf_ops; 2587b0f539deSDave Chinner 25885b257b4aSDave Chinner /* 25895b257b4aSDave Chinner * Walk the inodes already attached to the buffer and mark them 25905b257b4aSDave Chinner * stale. These will all have the flush locks held, so an 25915b3eed75SDave Chinner * in-memory inode walk can't lock them. By marking them all 25925b3eed75SDave Chinner * stale first, we will not attempt to lock them in the loop 25935b3eed75SDave Chinner * below as the XFS_ISTALE flag will be set. 25945b257b4aSDave Chinner */ 2595643c8c05SCarlos Maiolino list_for_each_entry(lip, &bp->b_li_list, li_bio_list) { 25961da177e4SLinus Torvalds if (lip->li_type == XFS_LI_INODE) { 25971da177e4SLinus Torvalds iip = (xfs_inode_log_item_t *)lip; 25981da177e4SLinus Torvalds ASSERT(iip->ili_logged == 1); 2599ca30b2a7SChristoph Hellwig lip->li_cb = xfs_istale_done; 26007b2e2a31SDavid Chinner xfs_trans_ail_copy_lsn(mp->m_ail, 26017b2e2a31SDavid Chinner &iip->ili_flush_lsn, 26027b2e2a31SDavid Chinner &iip->ili_item.li_lsn); 2603e5ffd2bbSDavid Chinner xfs_iflags_set(iip->ili_inode, XFS_ISTALE); 26041da177e4SLinus Torvalds } 26051da177e4SLinus Torvalds } 26061da177e4SLinus Torvalds 26075b3eed75SDave Chinner 26085b257b4aSDave Chinner /* 26095b257b4aSDave Chinner * For each inode in memory attempt to add it to the inode 26105b257b4aSDave Chinner * buffer and set it up for being staled on buffer IO 26115b257b4aSDave Chinner * completion. This is safe as we've locked out tail pushing 26125b257b4aSDave Chinner * and flushing by locking the buffer. 26135b257b4aSDave Chinner * 26145b257b4aSDave Chinner * We have already marked every inode that was part of a 26155b257b4aSDave Chinner * transaction stale above, which means there is no point in 26165b257b4aSDave Chinner * even trying to lock them. 26175b257b4aSDave Chinner */ 2618ef325959SDarrick J. Wong for (i = 0; i < igeo->inodes_per_cluster; i++) { 26195b3eed75SDave Chinner retry: 26201a3e8f3dSDave Chinner rcu_read_lock(); 26215b257b4aSDave Chinner ip = radix_tree_lookup(&pag->pag_ici_root, 26225b257b4aSDave Chinner XFS_INO_TO_AGINO(mp, (inum + i))); 26231da177e4SLinus Torvalds 26241a3e8f3dSDave Chinner /* Inode not in memory, nothing to do */ 26251a3e8f3dSDave Chinner if (!ip) { 26261a3e8f3dSDave Chinner rcu_read_unlock(); 26275b257b4aSDave Chinner continue; 26285b257b4aSDave Chinner } 26295b257b4aSDave Chinner 26305b3eed75SDave Chinner /* 26311a3e8f3dSDave Chinner * because this is an RCU protected lookup, we could 26321a3e8f3dSDave Chinner * find a recently freed or even reallocated inode 26331a3e8f3dSDave Chinner * during the lookup. We need to check under the 26341a3e8f3dSDave Chinner * i_flags_lock for a valid inode here. Skip it if it 26351a3e8f3dSDave Chinner * is not valid, the wrong inode or stale. 26361a3e8f3dSDave Chinner */ 26371a3e8f3dSDave Chinner spin_lock(&ip->i_flags_lock); 26381a3e8f3dSDave Chinner if (ip->i_ino != inum + i || 26391a3e8f3dSDave Chinner __xfs_iflags_test(ip, XFS_ISTALE)) { 26401a3e8f3dSDave Chinner spin_unlock(&ip->i_flags_lock); 26411a3e8f3dSDave Chinner rcu_read_unlock(); 26421a3e8f3dSDave Chinner continue; 26431a3e8f3dSDave Chinner } 26441a3e8f3dSDave Chinner spin_unlock(&ip->i_flags_lock); 26451a3e8f3dSDave Chinner 26461a3e8f3dSDave Chinner /* 26475b3eed75SDave Chinner * Don't try to lock/unlock the current inode, but we 26485b3eed75SDave Chinner * _cannot_ skip the other inodes that we did not find 26495b3eed75SDave Chinner * in the list attached to the buffer and are not 26505b3eed75SDave Chinner * already marked stale. If we can't lock it, back off 26515b3eed75SDave Chinner * and retry. 26525b3eed75SDave Chinner */ 2653f2e9ad21SOmar Sandoval if (ip != free_ip) { 2654f2e9ad21SOmar Sandoval if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { 26551a3e8f3dSDave Chinner rcu_read_unlock(); 26565b3eed75SDave Chinner delay(1); 26575b3eed75SDave Chinner goto retry; 26585b257b4aSDave Chinner } 2659f2e9ad21SOmar Sandoval 2660f2e9ad21SOmar Sandoval /* 2661f2e9ad21SOmar Sandoval * Check the inode number again in case we're 2662f2e9ad21SOmar Sandoval * racing with freeing in xfs_reclaim_inode(). 2663f2e9ad21SOmar Sandoval * See the comments in that function for more 2664f2e9ad21SOmar Sandoval * information as to why the initial check is 2665f2e9ad21SOmar Sandoval * not sufficient. 2666f2e9ad21SOmar Sandoval */ 2667f2e9ad21SOmar Sandoval if (ip->i_ino != inum + i) { 2668f2e9ad21SOmar Sandoval xfs_iunlock(ip, XFS_ILOCK_EXCL); 2669962cc1adSDarrick J. Wong rcu_read_unlock(); 2670f2e9ad21SOmar Sandoval continue; 2671f2e9ad21SOmar Sandoval } 2672f2e9ad21SOmar Sandoval } 26731a3e8f3dSDave Chinner rcu_read_unlock(); 26745b257b4aSDave Chinner 26755b3eed75SDave Chinner xfs_iflock(ip); 26765b257b4aSDave Chinner xfs_iflags_set(ip, XFS_ISTALE); 26775b257b4aSDave Chinner 26785b3eed75SDave Chinner /* 26795b3eed75SDave Chinner * we don't need to attach clean inodes or those only 26805b3eed75SDave Chinner * with unlogged changes (which we throw away, anyway). 26815b3eed75SDave Chinner */ 26825b257b4aSDave Chinner iip = ip->i_itemp; 26835b3eed75SDave Chinner if (!iip || xfs_inode_clean(ip)) { 26845b257b4aSDave Chinner ASSERT(ip != free_ip); 26851da177e4SLinus Torvalds xfs_ifunlock(ip); 26861da177e4SLinus Torvalds xfs_iunlock(ip, XFS_ILOCK_EXCL); 26871da177e4SLinus Torvalds continue; 26881da177e4SLinus Torvalds } 26891da177e4SLinus Torvalds 2690f5d8d5c4SChristoph Hellwig iip->ili_last_fields = iip->ili_fields; 2691f5d8d5c4SChristoph Hellwig iip->ili_fields = 0; 2692fc0561ceSDave Chinner iip->ili_fsync_fields = 0; 26931da177e4SLinus Torvalds iip->ili_logged = 1; 26947b2e2a31SDavid Chinner xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, 26957b2e2a31SDavid Chinner &iip->ili_item.li_lsn); 26961da177e4SLinus Torvalds 2697ca30b2a7SChristoph Hellwig xfs_buf_attach_iodone(bp, xfs_istale_done, 2698ca30b2a7SChristoph Hellwig &iip->ili_item); 26995b257b4aSDave Chinner 27005b257b4aSDave Chinner if (ip != free_ip) 27011da177e4SLinus Torvalds xfs_iunlock(ip, XFS_ILOCK_EXCL); 27021da177e4SLinus Torvalds } 27031da177e4SLinus Torvalds 27041da177e4SLinus Torvalds xfs_trans_stale_inode_buf(tp, bp); 27051da177e4SLinus Torvalds xfs_trans_binval(tp, bp); 27061da177e4SLinus Torvalds } 27071da177e4SLinus Torvalds 27085017e97dSDave Chinner xfs_perag_put(pag); 27092a30f36dSChandra Seetharaman return 0; 27101da177e4SLinus Torvalds } 27111da177e4SLinus Torvalds 27121da177e4SLinus Torvalds /* 271398c4f78dSDarrick J. Wong * Free any local-format buffers sitting around before we reset to 271498c4f78dSDarrick J. Wong * extents format. 271598c4f78dSDarrick J. Wong */ 271698c4f78dSDarrick J. Wong static inline void 271798c4f78dSDarrick J. Wong xfs_ifree_local_data( 271898c4f78dSDarrick J. Wong struct xfs_inode *ip, 271998c4f78dSDarrick J. Wong int whichfork) 272098c4f78dSDarrick J. Wong { 272198c4f78dSDarrick J. Wong struct xfs_ifork *ifp; 272298c4f78dSDarrick J. Wong 272398c4f78dSDarrick J. Wong if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL) 272498c4f78dSDarrick J. Wong return; 272598c4f78dSDarrick J. Wong 272698c4f78dSDarrick J. Wong ifp = XFS_IFORK_PTR(ip, whichfork); 272798c4f78dSDarrick J. Wong xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); 272898c4f78dSDarrick J. Wong } 272998c4f78dSDarrick J. Wong 273098c4f78dSDarrick J. Wong /* 27311da177e4SLinus Torvalds * This is called to return an inode to the inode free list. 27321da177e4SLinus Torvalds * The inode should already be truncated to 0 length and have 27331da177e4SLinus Torvalds * no pages associated with it. This routine also assumes that 27341da177e4SLinus Torvalds * the inode is already a part of the transaction. 27351da177e4SLinus Torvalds * 27361da177e4SLinus Torvalds * The on-disk copy of the inode will have been added to the list 27371da177e4SLinus Torvalds * of unlinked inodes in the AGI. We need to remove the inode from 27381da177e4SLinus Torvalds * that list atomically with respect to freeing it here. 27391da177e4SLinus Torvalds */ 27401da177e4SLinus Torvalds int 27411da177e4SLinus Torvalds xfs_ifree( 27420e0417f3SBrian Foster struct xfs_trans *tp, 27430e0417f3SBrian Foster struct xfs_inode *ip) 27441da177e4SLinus Torvalds { 27451da177e4SLinus Torvalds int error; 274609b56604SBrian Foster struct xfs_icluster xic = { 0 }; 27471da177e4SLinus Torvalds 2748579aa9caSChristoph Hellwig ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 274954d7b5c1SDave Chinner ASSERT(VFS_I(ip)->i_nlink == 0); 27501da177e4SLinus Torvalds ASSERT(ip->i_d.di_nextents == 0); 27511da177e4SLinus Torvalds ASSERT(ip->i_d.di_anextents == 0); 2752c19b3b05SDave Chinner ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode)); 27531da177e4SLinus Torvalds ASSERT(ip->i_d.di_nblocks == 0); 27541da177e4SLinus Torvalds 27551da177e4SLinus Torvalds /* 27561da177e4SLinus Torvalds * Pull the on-disk inode from the AGI unlinked list. 27571da177e4SLinus Torvalds */ 27581da177e4SLinus Torvalds error = xfs_iunlink_remove(tp, ip); 27591baaed8fSDave Chinner if (error) 27601da177e4SLinus Torvalds return error; 27611da177e4SLinus Torvalds 27620e0417f3SBrian Foster error = xfs_difree(tp, ip->i_ino, &xic); 27631baaed8fSDave Chinner if (error) 27641da177e4SLinus Torvalds return error; 27651baaed8fSDave Chinner 276698c4f78dSDarrick J. Wong xfs_ifree_local_data(ip, XFS_DATA_FORK); 276798c4f78dSDarrick J. Wong xfs_ifree_local_data(ip, XFS_ATTR_FORK); 276898c4f78dSDarrick J. Wong 2769c19b3b05SDave Chinner VFS_I(ip)->i_mode = 0; /* mark incore inode as free */ 27701da177e4SLinus Torvalds ip->i_d.di_flags = 0; 2771beaae8cdSDarrick J. Wong ip->i_d.di_flags2 = 0; 27721da177e4SLinus Torvalds ip->i_d.di_dmevmask = 0; 27731da177e4SLinus Torvalds ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */ 27741da177e4SLinus Torvalds ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; 27751da177e4SLinus Torvalds ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 2776dc1baa71SEric Sandeen 2777dc1baa71SEric Sandeen /* Don't attempt to replay owner changes for a deleted inode */ 2778dc1baa71SEric Sandeen ip->i_itemp->ili_fields &= ~(XFS_ILOG_AOWNER|XFS_ILOG_DOWNER); 2779dc1baa71SEric Sandeen 27801da177e4SLinus Torvalds /* 27811da177e4SLinus Torvalds * Bump the generation count so no one will be confused 27821da177e4SLinus Torvalds * by reincarnations of this inode. 27831da177e4SLinus Torvalds */ 27849e9a2674SDave Chinner VFS_I(ip)->i_generation++; 27851da177e4SLinus Torvalds xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 27861da177e4SLinus Torvalds 278709b56604SBrian Foster if (xic.deleted) 278809b56604SBrian Foster error = xfs_ifree_cluster(ip, tp, &xic); 27891da177e4SLinus Torvalds 27902a30f36dSChandra Seetharaman return error; 27911da177e4SLinus Torvalds } 27921da177e4SLinus Torvalds 27931da177e4SLinus Torvalds /* 279460ec6783SChristoph Hellwig * This is called to unpin an inode. The caller must have the inode locked 279560ec6783SChristoph Hellwig * in at least shared mode so that the buffer cannot be subsequently pinned 279660ec6783SChristoph Hellwig * once someone is waiting for it to be unpinned. 27971da177e4SLinus Torvalds */ 279860ec6783SChristoph Hellwig static void 2799f392e631SChristoph Hellwig xfs_iunpin( 280060ec6783SChristoph Hellwig struct xfs_inode *ip) 2801a3f74ffbSDavid Chinner { 2802579aa9caSChristoph Hellwig ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2803a3f74ffbSDavid Chinner 28044aaf15d1SDave Chinner trace_xfs_inode_unpin_nowait(ip, _RET_IP_); 28054aaf15d1SDave Chinner 2806a3f74ffbSDavid Chinner /* Give the log a push to start the unpinning I/O */ 2807656de4ffSChristoph Hellwig xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0, NULL); 2808a14a348bSChristoph Hellwig 2809a3f74ffbSDavid Chinner } 2810a3f74ffbSDavid Chinner 2811f392e631SChristoph Hellwig static void 2812f392e631SChristoph Hellwig __xfs_iunpin_wait( 2813f392e631SChristoph Hellwig struct xfs_inode *ip) 2814f392e631SChristoph Hellwig { 2815f392e631SChristoph Hellwig wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT); 2816f392e631SChristoph Hellwig DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT); 2817f392e631SChristoph Hellwig 2818f392e631SChristoph Hellwig xfs_iunpin(ip); 2819f392e631SChristoph Hellwig 2820f392e631SChristoph Hellwig do { 282121417136SIngo Molnar prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); 2822f392e631SChristoph Hellwig if (xfs_ipincount(ip)) 2823f392e631SChristoph Hellwig io_schedule(); 2824f392e631SChristoph Hellwig } while (xfs_ipincount(ip)); 282521417136SIngo Molnar finish_wait(wq, &wait.wq_entry); 2826f392e631SChristoph Hellwig } 2827f392e631SChristoph Hellwig 2828777df5afSDave Chinner void 28291da177e4SLinus Torvalds xfs_iunpin_wait( 283060ec6783SChristoph Hellwig struct xfs_inode *ip) 28311da177e4SLinus Torvalds { 2832f392e631SChristoph Hellwig if (xfs_ipincount(ip)) 2833f392e631SChristoph Hellwig __xfs_iunpin_wait(ip); 28341da177e4SLinus Torvalds } 28351da177e4SLinus Torvalds 283627320369SDave Chinner /* 283727320369SDave Chinner * Removing an inode from the namespace involves removing the directory entry 283827320369SDave Chinner * and dropping the link count on the inode. Removing the directory entry can 283927320369SDave Chinner * result in locking an AGF (directory blocks were freed) and removing a link 284027320369SDave Chinner * count can result in placing the inode on an unlinked list which results in 284127320369SDave Chinner * locking an AGI. 284227320369SDave Chinner * 284327320369SDave Chinner * The big problem here is that we have an ordering constraint on AGF and AGI 284427320369SDave Chinner * locking - inode allocation locks the AGI, then can allocate a new extent for 284527320369SDave Chinner * new inodes, locking the AGF after the AGI. Similarly, freeing the inode 284627320369SDave Chinner * removes the inode from the unlinked list, requiring that we lock the AGI 284727320369SDave Chinner * first, and then freeing the inode can result in an inode chunk being freed 284827320369SDave Chinner * and hence freeing disk space requiring that we lock an AGF. 284927320369SDave Chinner * 285027320369SDave Chinner * Hence the ordering that is imposed by other parts of the code is AGI before 285127320369SDave Chinner * AGF. This means we cannot remove the directory entry before we drop the inode 285227320369SDave Chinner * reference count and put it on the unlinked list as this results in a lock 285327320369SDave Chinner * order of AGF then AGI, and this can deadlock against inode allocation and 285427320369SDave Chinner * freeing. Therefore we must drop the link counts before we remove the 285527320369SDave Chinner * directory entry. 285627320369SDave Chinner * 285727320369SDave Chinner * This is still safe from a transactional point of view - it is not until we 2858310a75a3SDarrick J. Wong * get to xfs_defer_finish() that we have the possibility of multiple 285927320369SDave Chinner * transactions in this operation. Hence as long as we remove the directory 286027320369SDave Chinner * entry and drop the link count in the first transaction of the remove 286127320369SDave Chinner * operation, there are no transactional constraints on the ordering here. 286227320369SDave Chinner */ 2863c24b5dfaSDave Chinner int 2864c24b5dfaSDave Chinner xfs_remove( 2865c24b5dfaSDave Chinner xfs_inode_t *dp, 2866c24b5dfaSDave Chinner struct xfs_name *name, 2867c24b5dfaSDave Chinner xfs_inode_t *ip) 2868c24b5dfaSDave Chinner { 2869c24b5dfaSDave Chinner xfs_mount_t *mp = dp->i_mount; 2870c24b5dfaSDave Chinner xfs_trans_t *tp = NULL; 2871c19b3b05SDave Chinner int is_dir = S_ISDIR(VFS_I(ip)->i_mode); 2872c24b5dfaSDave Chinner int error = 0; 2873c24b5dfaSDave Chinner uint resblks; 2874c24b5dfaSDave Chinner 2875c24b5dfaSDave Chinner trace_xfs_remove(dp, name); 2876c24b5dfaSDave Chinner 2877c24b5dfaSDave Chinner if (XFS_FORCED_SHUTDOWN(mp)) 28782451337dSDave Chinner return -EIO; 2879c24b5dfaSDave Chinner 2880c14cfccaSDarrick J. Wong error = xfs_qm_dqattach(dp); 2881c24b5dfaSDave Chinner if (error) 2882c24b5dfaSDave Chinner goto std_return; 2883c24b5dfaSDave Chinner 2884c14cfccaSDarrick J. Wong error = xfs_qm_dqattach(ip); 2885c24b5dfaSDave Chinner if (error) 2886c24b5dfaSDave Chinner goto std_return; 2887c24b5dfaSDave Chinner 2888c24b5dfaSDave Chinner /* 2889c24b5dfaSDave Chinner * We try to get the real space reservation first, 2890c24b5dfaSDave Chinner * allowing for directory btree deletion(s) implying 2891c24b5dfaSDave Chinner * possible bmap insert(s). If we can't get the space 2892c24b5dfaSDave Chinner * reservation then we use 0 instead, and avoid the bmap 2893c24b5dfaSDave Chinner * btree insert(s) in the directory code by, if the bmap 2894c24b5dfaSDave Chinner * insert tries to happen, instead trimming the LAST 2895c24b5dfaSDave Chinner * block from the directory. 2896c24b5dfaSDave Chinner */ 2897c24b5dfaSDave Chinner resblks = XFS_REMOVE_SPACE_RES(mp); 2898253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp); 28992451337dSDave Chinner if (error == -ENOSPC) { 2900c24b5dfaSDave Chinner resblks = 0; 2901253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0, 2902253f4911SChristoph Hellwig &tp); 2903c24b5dfaSDave Chinner } 2904c24b5dfaSDave Chinner if (error) { 29052451337dSDave Chinner ASSERT(error != -ENOSPC); 2906253f4911SChristoph Hellwig goto std_return; 2907c24b5dfaSDave Chinner } 2908c24b5dfaSDave Chinner 29097c2d238aSDarrick J. Wong xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL); 2910c24b5dfaSDave Chinner 291165523218SChristoph Hellwig xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); 2912c24b5dfaSDave Chinner xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 2913c24b5dfaSDave Chinner 2914c24b5dfaSDave Chinner /* 2915c24b5dfaSDave Chinner * If we're removing a directory perform some additional validation. 2916c24b5dfaSDave Chinner */ 2917c24b5dfaSDave Chinner if (is_dir) { 291854d7b5c1SDave Chinner ASSERT(VFS_I(ip)->i_nlink >= 2); 291954d7b5c1SDave Chinner if (VFS_I(ip)->i_nlink != 2) { 29202451337dSDave Chinner error = -ENOTEMPTY; 2921c24b5dfaSDave Chinner goto out_trans_cancel; 2922c24b5dfaSDave Chinner } 2923c24b5dfaSDave Chinner if (!xfs_dir_isempty(ip)) { 29242451337dSDave Chinner error = -ENOTEMPTY; 2925c24b5dfaSDave Chinner goto out_trans_cancel; 2926c24b5dfaSDave Chinner } 2927c24b5dfaSDave Chinner 292827320369SDave Chinner /* Drop the link from ip's "..". */ 2929c24b5dfaSDave Chinner error = xfs_droplink(tp, dp); 2930c24b5dfaSDave Chinner if (error) 293127320369SDave Chinner goto out_trans_cancel; 2932c24b5dfaSDave Chinner 293327320369SDave Chinner /* Drop the "." link from ip to self. */ 2934c24b5dfaSDave Chinner error = xfs_droplink(tp, ip); 2935c24b5dfaSDave Chinner if (error) 293627320369SDave Chinner goto out_trans_cancel; 2937c24b5dfaSDave Chinner } else { 2938c24b5dfaSDave Chinner /* 2939c24b5dfaSDave Chinner * When removing a non-directory we need to log the parent 2940c24b5dfaSDave Chinner * inode here. For a directory this is done implicitly 2941c24b5dfaSDave Chinner * by the xfs_droplink call for the ".." entry. 2942c24b5dfaSDave Chinner */ 2943c24b5dfaSDave Chinner xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); 2944c24b5dfaSDave Chinner } 294527320369SDave Chinner xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 2946c24b5dfaSDave Chinner 294727320369SDave Chinner /* Drop the link from dp to ip. */ 2948c24b5dfaSDave Chinner error = xfs_droplink(tp, ip); 2949c24b5dfaSDave Chinner if (error) 295027320369SDave Chinner goto out_trans_cancel; 2951c24b5dfaSDave Chinner 2952381eee69SBrian Foster error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks); 295327320369SDave Chinner if (error) { 29542451337dSDave Chinner ASSERT(error != -ENOENT); 2955c8eac49eSBrian Foster goto out_trans_cancel; 295627320369SDave Chinner } 295727320369SDave Chinner 2958c24b5dfaSDave Chinner /* 2959c24b5dfaSDave Chinner * If this is a synchronous mount, make sure that the 2960c24b5dfaSDave Chinner * remove transaction goes to disk before returning to 2961c24b5dfaSDave Chinner * the user. 2962c24b5dfaSDave Chinner */ 2963c24b5dfaSDave Chinner if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) 2964c24b5dfaSDave Chinner xfs_trans_set_sync(tp); 2965c24b5dfaSDave Chinner 296670393313SChristoph Hellwig error = xfs_trans_commit(tp); 2967c24b5dfaSDave Chinner if (error) 2968c24b5dfaSDave Chinner goto std_return; 2969c24b5dfaSDave Chinner 29702cd2ef6aSChristoph Hellwig if (is_dir && xfs_inode_is_filestream(ip)) 2971c24b5dfaSDave Chinner xfs_filestream_deassociate(ip); 2972c24b5dfaSDave Chinner 2973c24b5dfaSDave Chinner return 0; 2974c24b5dfaSDave Chinner 2975c24b5dfaSDave Chinner out_trans_cancel: 29764906e215SChristoph Hellwig xfs_trans_cancel(tp); 2977c24b5dfaSDave Chinner std_return: 2978c24b5dfaSDave Chinner return error; 2979c24b5dfaSDave Chinner } 2980c24b5dfaSDave Chinner 2981f6bba201SDave Chinner /* 2982f6bba201SDave Chinner * Enter all inodes for a rename transaction into a sorted array. 2983f6bba201SDave Chinner */ 298495afcf5cSDave Chinner #define __XFS_SORT_INODES 5 2985f6bba201SDave Chinner STATIC void 2986f6bba201SDave Chinner xfs_sort_for_rename( 298795afcf5cSDave Chinner struct xfs_inode *dp1, /* in: old (source) directory inode */ 298895afcf5cSDave Chinner struct xfs_inode *dp2, /* in: new (target) directory inode */ 298995afcf5cSDave Chinner struct xfs_inode *ip1, /* in: inode of old entry */ 299095afcf5cSDave Chinner struct xfs_inode *ip2, /* in: inode of new entry */ 299195afcf5cSDave Chinner struct xfs_inode *wip, /* in: whiteout inode */ 299295afcf5cSDave Chinner struct xfs_inode **i_tab,/* out: sorted array of inodes */ 299395afcf5cSDave Chinner int *num_inodes) /* in/out: inodes in array */ 2994f6bba201SDave Chinner { 2995f6bba201SDave Chinner int i, j; 2996f6bba201SDave Chinner 299795afcf5cSDave Chinner ASSERT(*num_inodes == __XFS_SORT_INODES); 299895afcf5cSDave Chinner memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *)); 299995afcf5cSDave Chinner 3000f6bba201SDave Chinner /* 3001f6bba201SDave Chinner * i_tab contains a list of pointers to inodes. We initialize 3002f6bba201SDave Chinner * the table here & we'll sort it. We will then use it to 3003f6bba201SDave Chinner * order the acquisition of the inode locks. 3004f6bba201SDave Chinner * 3005f6bba201SDave Chinner * Note that the table may contain duplicates. e.g., dp1 == dp2. 3006f6bba201SDave Chinner */ 300795afcf5cSDave Chinner i = 0; 300895afcf5cSDave Chinner i_tab[i++] = dp1; 300995afcf5cSDave Chinner i_tab[i++] = dp2; 301095afcf5cSDave Chinner i_tab[i++] = ip1; 301195afcf5cSDave Chinner if (ip2) 301295afcf5cSDave Chinner i_tab[i++] = ip2; 301395afcf5cSDave Chinner if (wip) 301495afcf5cSDave Chinner i_tab[i++] = wip; 301595afcf5cSDave Chinner *num_inodes = i; 3016f6bba201SDave Chinner 3017f6bba201SDave Chinner /* 3018f6bba201SDave Chinner * Sort the elements via bubble sort. (Remember, there are at 301995afcf5cSDave Chinner * most 5 elements to sort, so this is adequate.) 3020f6bba201SDave Chinner */ 3021f6bba201SDave Chinner for (i = 0; i < *num_inodes; i++) { 3022f6bba201SDave Chinner for (j = 1; j < *num_inodes; j++) { 3023f6bba201SDave Chinner if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) { 302495afcf5cSDave Chinner struct xfs_inode *temp = i_tab[j]; 3025f6bba201SDave Chinner i_tab[j] = i_tab[j-1]; 3026f6bba201SDave Chinner i_tab[j-1] = temp; 3027f6bba201SDave Chinner } 3028f6bba201SDave Chinner } 3029f6bba201SDave Chinner } 3030f6bba201SDave Chinner } 3031f6bba201SDave Chinner 3032310606b0SDave Chinner static int 3033310606b0SDave Chinner xfs_finish_rename( 3034c9cfdb38SBrian Foster struct xfs_trans *tp) 3035310606b0SDave Chinner { 3036310606b0SDave Chinner /* 3037310606b0SDave Chinner * If this is a synchronous mount, make sure that the rename transaction 3038310606b0SDave Chinner * goes to disk before returning to the user. 3039310606b0SDave Chinner */ 3040310606b0SDave Chinner if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) 3041310606b0SDave Chinner xfs_trans_set_sync(tp); 3042310606b0SDave Chinner 304370393313SChristoph Hellwig return xfs_trans_commit(tp); 3044310606b0SDave Chinner } 3045310606b0SDave Chinner 3046f6bba201SDave Chinner /* 3047d31a1825SCarlos Maiolino * xfs_cross_rename() 3048d31a1825SCarlos Maiolino * 3049d31a1825SCarlos Maiolino * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall 3050d31a1825SCarlos Maiolino */ 3051d31a1825SCarlos Maiolino STATIC int 3052d31a1825SCarlos Maiolino xfs_cross_rename( 3053d31a1825SCarlos Maiolino struct xfs_trans *tp, 3054d31a1825SCarlos Maiolino struct xfs_inode *dp1, 3055d31a1825SCarlos Maiolino struct xfs_name *name1, 3056d31a1825SCarlos Maiolino struct xfs_inode *ip1, 3057d31a1825SCarlos Maiolino struct xfs_inode *dp2, 3058d31a1825SCarlos Maiolino struct xfs_name *name2, 3059d31a1825SCarlos Maiolino struct xfs_inode *ip2, 3060d31a1825SCarlos Maiolino int spaceres) 3061d31a1825SCarlos Maiolino { 3062d31a1825SCarlos Maiolino int error = 0; 3063d31a1825SCarlos Maiolino int ip1_flags = 0; 3064d31a1825SCarlos Maiolino int ip2_flags = 0; 3065d31a1825SCarlos Maiolino int dp2_flags = 0; 3066d31a1825SCarlos Maiolino 3067d31a1825SCarlos Maiolino /* Swap inode number for dirent in first parent */ 3068381eee69SBrian Foster error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres); 3069d31a1825SCarlos Maiolino if (error) 3070eeacd321SDave Chinner goto out_trans_abort; 3071d31a1825SCarlos Maiolino 3072d31a1825SCarlos Maiolino /* Swap inode number for dirent in second parent */ 3073381eee69SBrian Foster error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres); 3074d31a1825SCarlos Maiolino if (error) 3075eeacd321SDave Chinner goto out_trans_abort; 3076d31a1825SCarlos Maiolino 3077d31a1825SCarlos Maiolino /* 3078d31a1825SCarlos Maiolino * If we're renaming one or more directories across different parents, 3079d31a1825SCarlos Maiolino * update the respective ".." entries (and link counts) to match the new 3080d31a1825SCarlos Maiolino * parents. 3081d31a1825SCarlos Maiolino */ 3082d31a1825SCarlos Maiolino if (dp1 != dp2) { 3083d31a1825SCarlos Maiolino dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; 3084d31a1825SCarlos Maiolino 3085c19b3b05SDave Chinner if (S_ISDIR(VFS_I(ip2)->i_mode)) { 3086d31a1825SCarlos Maiolino error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot, 3087381eee69SBrian Foster dp1->i_ino, spaceres); 3088d31a1825SCarlos Maiolino if (error) 3089eeacd321SDave Chinner goto out_trans_abort; 3090d31a1825SCarlos Maiolino 3091d31a1825SCarlos Maiolino /* transfer ip2 ".." reference to dp1 */ 3092c19b3b05SDave Chinner if (!S_ISDIR(VFS_I(ip1)->i_mode)) { 3093d31a1825SCarlos Maiolino error = xfs_droplink(tp, dp2); 3094d31a1825SCarlos Maiolino if (error) 3095eeacd321SDave Chinner goto out_trans_abort; 309691083269SEric Sandeen xfs_bumplink(tp, dp1); 3097d31a1825SCarlos Maiolino } 3098d31a1825SCarlos Maiolino 3099d31a1825SCarlos Maiolino /* 3100d31a1825SCarlos Maiolino * Although ip1 isn't changed here, userspace needs 3101d31a1825SCarlos Maiolino * to be warned about the change, so that applications 3102d31a1825SCarlos Maiolino * relying on it (like backup ones), will properly 3103d31a1825SCarlos Maiolino * notify the change 3104d31a1825SCarlos Maiolino */ 3105d31a1825SCarlos Maiolino ip1_flags |= XFS_ICHGTIME_CHG; 3106d31a1825SCarlos Maiolino ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; 3107d31a1825SCarlos Maiolino } 3108d31a1825SCarlos Maiolino 3109c19b3b05SDave Chinner if (S_ISDIR(VFS_I(ip1)->i_mode)) { 3110d31a1825SCarlos Maiolino error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot, 3111381eee69SBrian Foster dp2->i_ino, spaceres); 3112d31a1825SCarlos Maiolino if (error) 3113eeacd321SDave Chinner goto out_trans_abort; 3114d31a1825SCarlos Maiolino 3115d31a1825SCarlos Maiolino /* transfer ip1 ".." reference to dp2 */ 3116c19b3b05SDave Chinner if (!S_ISDIR(VFS_I(ip2)->i_mode)) { 3117d31a1825SCarlos Maiolino error = xfs_droplink(tp, dp1); 3118d31a1825SCarlos Maiolino if (error) 3119eeacd321SDave Chinner goto out_trans_abort; 312091083269SEric Sandeen xfs_bumplink(tp, dp2); 3121d31a1825SCarlos Maiolino } 3122d31a1825SCarlos Maiolino 3123d31a1825SCarlos Maiolino /* 3124d31a1825SCarlos Maiolino * Although ip2 isn't changed here, userspace needs 3125d31a1825SCarlos Maiolino * to be warned about the change, so that applications 3126d31a1825SCarlos Maiolino * relying on it (like backup ones), will properly 3127d31a1825SCarlos Maiolino * notify the change 3128d31a1825SCarlos Maiolino */ 3129d31a1825SCarlos Maiolino ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; 3130d31a1825SCarlos Maiolino ip2_flags |= XFS_ICHGTIME_CHG; 3131d31a1825SCarlos Maiolino } 3132d31a1825SCarlos Maiolino } 3133d31a1825SCarlos Maiolino 3134d31a1825SCarlos Maiolino if (ip1_flags) { 3135d31a1825SCarlos Maiolino xfs_trans_ichgtime(tp, ip1, ip1_flags); 3136d31a1825SCarlos Maiolino xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE); 3137d31a1825SCarlos Maiolino } 3138d31a1825SCarlos Maiolino if (ip2_flags) { 3139d31a1825SCarlos Maiolino xfs_trans_ichgtime(tp, ip2, ip2_flags); 3140d31a1825SCarlos Maiolino xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE); 3141d31a1825SCarlos Maiolino } 3142d31a1825SCarlos Maiolino if (dp2_flags) { 3143d31a1825SCarlos Maiolino xfs_trans_ichgtime(tp, dp2, dp2_flags); 3144d31a1825SCarlos Maiolino xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE); 3145d31a1825SCarlos Maiolino } 3146d31a1825SCarlos Maiolino xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 3147d31a1825SCarlos Maiolino xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE); 3148c9cfdb38SBrian Foster return xfs_finish_rename(tp); 3149eeacd321SDave Chinner 3150eeacd321SDave Chinner out_trans_abort: 31514906e215SChristoph Hellwig xfs_trans_cancel(tp); 3152d31a1825SCarlos Maiolino return error; 3153d31a1825SCarlos Maiolino } 3154d31a1825SCarlos Maiolino 3155d31a1825SCarlos Maiolino /* 31567dcf5c3eSDave Chinner * xfs_rename_alloc_whiteout() 31577dcf5c3eSDave Chinner * 31587dcf5c3eSDave Chinner * Return a referenced, unlinked, unlocked inode that that can be used as a 31597dcf5c3eSDave Chinner * whiteout in a rename transaction. We use a tmpfile inode here so that if we 31607dcf5c3eSDave Chinner * crash between allocating the inode and linking it into the rename transaction 31617dcf5c3eSDave Chinner * recovery will free the inode and we won't leak it. 31627dcf5c3eSDave Chinner */ 31637dcf5c3eSDave Chinner static int 31647dcf5c3eSDave Chinner xfs_rename_alloc_whiteout( 31657dcf5c3eSDave Chinner struct xfs_inode *dp, 31667dcf5c3eSDave Chinner struct xfs_inode **wip) 31677dcf5c3eSDave Chinner { 31687dcf5c3eSDave Chinner struct xfs_inode *tmpfile; 31697dcf5c3eSDave Chinner int error; 31707dcf5c3eSDave Chinner 3171a1f69417SEric Sandeen error = xfs_create_tmpfile(dp, S_IFCHR | WHITEOUT_MODE, &tmpfile); 31727dcf5c3eSDave Chinner if (error) 31737dcf5c3eSDave Chinner return error; 31747dcf5c3eSDave Chinner 317522419ac9SBrian Foster /* 317622419ac9SBrian Foster * Prepare the tmpfile inode as if it were created through the VFS. 3177c4a6bf7fSDarrick J. Wong * Complete the inode setup and flag it as linkable. nlink is already 3178c4a6bf7fSDarrick J. Wong * zero, so we can skip the drop_nlink. 317922419ac9SBrian Foster */ 31802b3d1d41SChristoph Hellwig xfs_setup_iops(tmpfile); 31817dcf5c3eSDave Chinner xfs_finish_inode_setup(tmpfile); 31827dcf5c3eSDave Chinner VFS_I(tmpfile)->i_state |= I_LINKABLE; 31837dcf5c3eSDave Chinner 31847dcf5c3eSDave Chinner *wip = tmpfile; 31857dcf5c3eSDave Chinner return 0; 31867dcf5c3eSDave Chinner } 31877dcf5c3eSDave Chinner 31887dcf5c3eSDave Chinner /* 3189f6bba201SDave Chinner * xfs_rename 3190f6bba201SDave Chinner */ 3191f6bba201SDave Chinner int 3192f6bba201SDave Chinner xfs_rename( 31937dcf5c3eSDave Chinner struct xfs_inode *src_dp, 3194f6bba201SDave Chinner struct xfs_name *src_name, 31957dcf5c3eSDave Chinner struct xfs_inode *src_ip, 31967dcf5c3eSDave Chinner struct xfs_inode *target_dp, 3197f6bba201SDave Chinner struct xfs_name *target_name, 31987dcf5c3eSDave Chinner struct xfs_inode *target_ip, 3199d31a1825SCarlos Maiolino unsigned int flags) 3200f6bba201SDave Chinner { 32017dcf5c3eSDave Chinner struct xfs_mount *mp = src_dp->i_mount; 32027dcf5c3eSDave Chinner struct xfs_trans *tp; 32037dcf5c3eSDave Chinner struct xfs_inode *wip = NULL; /* whiteout inode */ 32047dcf5c3eSDave Chinner struct xfs_inode *inodes[__XFS_SORT_INODES]; 320595afcf5cSDave Chinner int num_inodes = __XFS_SORT_INODES; 32062b93681fSDave Chinner bool new_parent = (src_dp != target_dp); 3207c19b3b05SDave Chinner bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode); 3208f6bba201SDave Chinner int spaceres; 32097dcf5c3eSDave Chinner int error; 3210f6bba201SDave Chinner 3211f6bba201SDave Chinner trace_xfs_rename(src_dp, target_dp, src_name, target_name); 3212f6bba201SDave Chinner 3213eeacd321SDave Chinner if ((flags & RENAME_EXCHANGE) && !target_ip) 3214eeacd321SDave Chinner return -EINVAL; 3215f6bba201SDave Chinner 32167dcf5c3eSDave Chinner /* 32177dcf5c3eSDave Chinner * If we are doing a whiteout operation, allocate the whiteout inode 32187dcf5c3eSDave Chinner * we will be placing at the target and ensure the type is set 32197dcf5c3eSDave Chinner * appropriately. 32207dcf5c3eSDave Chinner */ 32217dcf5c3eSDave Chinner if (flags & RENAME_WHITEOUT) { 32227dcf5c3eSDave Chinner ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE))); 32237dcf5c3eSDave Chinner error = xfs_rename_alloc_whiteout(target_dp, &wip); 32247dcf5c3eSDave Chinner if (error) 32257dcf5c3eSDave Chinner return error; 3226f6bba201SDave Chinner 32277dcf5c3eSDave Chinner /* setup target dirent info as whiteout */ 32287dcf5c3eSDave Chinner src_name->type = XFS_DIR3_FT_CHRDEV; 32297dcf5c3eSDave Chinner } 32307dcf5c3eSDave Chinner 32317dcf5c3eSDave Chinner xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip, 3232f6bba201SDave Chinner inodes, &num_inodes); 3233f6bba201SDave Chinner 3234f6bba201SDave Chinner spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len); 3235253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp); 32362451337dSDave Chinner if (error == -ENOSPC) { 3237f6bba201SDave Chinner spaceres = 0; 3238253f4911SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0, 3239253f4911SChristoph Hellwig &tp); 3240f6bba201SDave Chinner } 3241445883e8SDave Chinner if (error) 3242253f4911SChristoph Hellwig goto out_release_wip; 3243f6bba201SDave Chinner 3244f6bba201SDave Chinner /* 3245f6bba201SDave Chinner * Attach the dquots to the inodes 3246f6bba201SDave Chinner */ 3247f6bba201SDave Chinner error = xfs_qm_vop_rename_dqattach(inodes); 3248445883e8SDave Chinner if (error) 3249445883e8SDave Chinner goto out_trans_cancel; 3250f6bba201SDave Chinner 3251f6bba201SDave Chinner /* 3252f6bba201SDave Chinner * Lock all the participating inodes. Depending upon whether 3253f6bba201SDave Chinner * the target_name exists in the target directory, and 3254f6bba201SDave Chinner * whether the target directory is the same as the source 3255f6bba201SDave Chinner * directory, we can lock from 2 to 4 inodes. 3256f6bba201SDave Chinner */ 3257f6bba201SDave Chinner xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL); 3258f6bba201SDave Chinner 3259f6bba201SDave Chinner /* 3260f6bba201SDave Chinner * Join all the inodes to the transaction. From this point on, 3261f6bba201SDave Chinner * we can rely on either trans_commit or trans_cancel to unlock 3262f6bba201SDave Chinner * them. 3263f6bba201SDave Chinner */ 326465523218SChristoph Hellwig xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL); 3265f6bba201SDave Chinner if (new_parent) 326665523218SChristoph Hellwig xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL); 3267f6bba201SDave Chinner xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL); 3268f6bba201SDave Chinner if (target_ip) 3269f6bba201SDave Chinner xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL); 32707dcf5c3eSDave Chinner if (wip) 32717dcf5c3eSDave Chinner xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL); 3272f6bba201SDave Chinner 3273f6bba201SDave Chinner /* 3274f6bba201SDave Chinner * If we are using project inheritance, we only allow renames 3275f6bba201SDave Chinner * into our tree when the project IDs are the same; else the 3276f6bba201SDave Chinner * tree quota mechanism would be circumvented. 3277f6bba201SDave Chinner */ 3278f6bba201SDave Chinner if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && 3279f6bba201SDave Chinner (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) { 32802451337dSDave Chinner error = -EXDEV; 3281445883e8SDave Chinner goto out_trans_cancel; 3282f6bba201SDave Chinner } 3283f6bba201SDave Chinner 3284eeacd321SDave Chinner /* RENAME_EXCHANGE is unique from here on. */ 3285eeacd321SDave Chinner if (flags & RENAME_EXCHANGE) 3286eeacd321SDave Chinner return xfs_cross_rename(tp, src_dp, src_name, src_ip, 3287d31a1825SCarlos Maiolino target_dp, target_name, target_ip, 3288f16dea54SBrian Foster spaceres); 3289d31a1825SCarlos Maiolino 3290d31a1825SCarlos Maiolino /* 3291f6bba201SDave Chinner * Set up the target. 3292f6bba201SDave Chinner */ 3293f6bba201SDave Chinner if (target_ip == NULL) { 3294f6bba201SDave Chinner /* 3295f6bba201SDave Chinner * If there's no space reservation, check the entry will 3296f6bba201SDave Chinner * fit before actually inserting it. 3297f6bba201SDave Chinner */ 329894f3cad5SEric Sandeen if (!spaceres) { 329994f3cad5SEric Sandeen error = xfs_dir_canenter(tp, target_dp, target_name); 3300f6bba201SDave Chinner if (error) 3301445883e8SDave Chinner goto out_trans_cancel; 330294f3cad5SEric Sandeen } 3303f6bba201SDave Chinner /* 3304f6bba201SDave Chinner * If target does not exist and the rename crosses 3305f6bba201SDave Chinner * directories, adjust the target directory link count 3306f6bba201SDave Chinner * to account for the ".." reference from the new entry. 3307f6bba201SDave Chinner */ 3308f6bba201SDave Chinner error = xfs_dir_createname(tp, target_dp, target_name, 3309381eee69SBrian Foster src_ip->i_ino, spaceres); 3310f6bba201SDave Chinner if (error) 3311c8eac49eSBrian Foster goto out_trans_cancel; 3312f6bba201SDave Chinner 3313f6bba201SDave Chinner xfs_trans_ichgtime(tp, target_dp, 3314f6bba201SDave Chinner XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 3315f6bba201SDave Chinner 3316f6bba201SDave Chinner if (new_parent && src_is_directory) { 331791083269SEric Sandeen xfs_bumplink(tp, target_dp); 3318f6bba201SDave Chinner } 3319f6bba201SDave Chinner } else { /* target_ip != NULL */ 3320f6bba201SDave Chinner /* 3321f6bba201SDave Chinner * If target exists and it's a directory, check that both 3322f6bba201SDave Chinner * target and source are directories and that target can be 3323f6bba201SDave Chinner * destroyed, or that neither is a directory. 3324f6bba201SDave Chinner */ 3325c19b3b05SDave Chinner if (S_ISDIR(VFS_I(target_ip)->i_mode)) { 3326f6bba201SDave Chinner /* 3327f6bba201SDave Chinner * Make sure target dir is empty. 3328f6bba201SDave Chinner */ 3329f6bba201SDave Chinner if (!(xfs_dir_isempty(target_ip)) || 333054d7b5c1SDave Chinner (VFS_I(target_ip)->i_nlink > 2)) { 33312451337dSDave Chinner error = -EEXIST; 3332445883e8SDave Chinner goto out_trans_cancel; 3333f6bba201SDave Chinner } 3334f6bba201SDave Chinner } 3335f6bba201SDave Chinner 3336f6bba201SDave Chinner /* 3337f6bba201SDave Chinner * Link the source inode under the target name. 3338f6bba201SDave Chinner * If the source inode is a directory and we are moving 3339f6bba201SDave Chinner * it across directories, its ".." entry will be 3340f6bba201SDave Chinner * inconsistent until we replace that down below. 3341f6bba201SDave Chinner * 3342f6bba201SDave Chinner * In case there is already an entry with the same 3343f6bba201SDave Chinner * name at the destination directory, remove it first. 3344f6bba201SDave Chinner */ 3345f6bba201SDave Chinner error = xfs_dir_replace(tp, target_dp, target_name, 3346381eee69SBrian Foster src_ip->i_ino, spaceres); 3347f6bba201SDave Chinner if (error) 3348c8eac49eSBrian Foster goto out_trans_cancel; 3349f6bba201SDave Chinner 3350f6bba201SDave Chinner xfs_trans_ichgtime(tp, target_dp, 3351f6bba201SDave Chinner XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 3352f6bba201SDave Chinner 3353f6bba201SDave Chinner /* 3354f6bba201SDave Chinner * Decrement the link count on the target since the target 3355f6bba201SDave Chinner * dir no longer points to it. 3356f6bba201SDave Chinner */ 3357f6bba201SDave Chinner error = xfs_droplink(tp, target_ip); 3358f6bba201SDave Chinner if (error) 3359c8eac49eSBrian Foster goto out_trans_cancel; 3360f6bba201SDave Chinner 3361f6bba201SDave Chinner if (src_is_directory) { 3362f6bba201SDave Chinner /* 3363f6bba201SDave Chinner * Drop the link from the old "." entry. 3364f6bba201SDave Chinner */ 3365f6bba201SDave Chinner error = xfs_droplink(tp, target_ip); 3366f6bba201SDave Chinner if (error) 3367c8eac49eSBrian Foster goto out_trans_cancel; 3368f6bba201SDave Chinner } 3369f6bba201SDave Chinner } /* target_ip != NULL */ 3370f6bba201SDave Chinner 3371f6bba201SDave Chinner /* 3372f6bba201SDave Chinner * Remove the source. 3373f6bba201SDave Chinner */ 3374f6bba201SDave Chinner if (new_parent && src_is_directory) { 3375f6bba201SDave Chinner /* 3376f6bba201SDave Chinner * Rewrite the ".." entry to point to the new 3377f6bba201SDave Chinner * directory. 3378f6bba201SDave Chinner */ 3379f6bba201SDave Chinner error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot, 3380381eee69SBrian Foster target_dp->i_ino, spaceres); 33812451337dSDave Chinner ASSERT(error != -EEXIST); 3382f6bba201SDave Chinner if (error) 3383c8eac49eSBrian Foster goto out_trans_cancel; 3384f6bba201SDave Chinner } 3385f6bba201SDave Chinner 3386f6bba201SDave Chinner /* 3387f6bba201SDave Chinner * We always want to hit the ctime on the source inode. 3388f6bba201SDave Chinner * 3389f6bba201SDave Chinner * This isn't strictly required by the standards since the source 3390f6bba201SDave Chinner * inode isn't really being changed, but old unix file systems did 3391f6bba201SDave Chinner * it and some incremental backup programs won't work without it. 3392f6bba201SDave Chinner */ 3393f6bba201SDave Chinner xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG); 3394f6bba201SDave Chinner xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE); 3395f6bba201SDave Chinner 3396f6bba201SDave Chinner /* 3397f6bba201SDave Chinner * Adjust the link count on src_dp. This is necessary when 3398f6bba201SDave Chinner * renaming a directory, either within one parent when 3399f6bba201SDave Chinner * the target existed, or across two parent directories. 3400f6bba201SDave Chinner */ 3401f6bba201SDave Chinner if (src_is_directory && (new_parent || target_ip != NULL)) { 3402f6bba201SDave Chinner 3403f6bba201SDave Chinner /* 3404f6bba201SDave Chinner * Decrement link count on src_directory since the 3405f6bba201SDave Chinner * entry that's moved no longer points to it. 3406f6bba201SDave Chinner */ 3407f6bba201SDave Chinner error = xfs_droplink(tp, src_dp); 3408f6bba201SDave Chinner if (error) 3409c8eac49eSBrian Foster goto out_trans_cancel; 3410f6bba201SDave Chinner } 3411f6bba201SDave Chinner 34127dcf5c3eSDave Chinner /* 34137dcf5c3eSDave Chinner * For whiteouts, we only need to update the source dirent with the 34147dcf5c3eSDave Chinner * inode number of the whiteout inode rather than removing it 34157dcf5c3eSDave Chinner * altogether. 34167dcf5c3eSDave Chinner */ 34177dcf5c3eSDave Chinner if (wip) { 34187dcf5c3eSDave Chinner error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino, 3419381eee69SBrian Foster spaceres); 34207dcf5c3eSDave Chinner } else 3421f6bba201SDave Chinner error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino, 3422381eee69SBrian Foster spaceres); 3423f6bba201SDave Chinner if (error) 3424c8eac49eSBrian Foster goto out_trans_cancel; 3425f6bba201SDave Chinner 34267dcf5c3eSDave Chinner /* 34277dcf5c3eSDave Chinner * For whiteouts, we need to bump the link count on the whiteout inode. 34287dcf5c3eSDave Chinner * This means that failures all the way up to this point leave the inode 34297dcf5c3eSDave Chinner * on the unlinked list and so cleanup is a simple matter of dropping 34307dcf5c3eSDave Chinner * the remaining reference to it. If we fail here after bumping the link 34317dcf5c3eSDave Chinner * count, we're shutting down the filesystem so we'll never see the 34327dcf5c3eSDave Chinner * intermediate state on disk. 34337dcf5c3eSDave Chinner */ 34347dcf5c3eSDave Chinner if (wip) { 343554d7b5c1SDave Chinner ASSERT(VFS_I(wip)->i_nlink == 0); 343691083269SEric Sandeen xfs_bumplink(tp, wip); 34377dcf5c3eSDave Chinner error = xfs_iunlink_remove(tp, wip); 34387dcf5c3eSDave Chinner if (error) 3439c8eac49eSBrian Foster goto out_trans_cancel; 34407dcf5c3eSDave Chinner xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE); 34417dcf5c3eSDave Chinner 34427dcf5c3eSDave Chinner /* 34437dcf5c3eSDave Chinner * Now we have a real link, clear the "I'm a tmpfile" state 34447dcf5c3eSDave Chinner * flag from the inode so it doesn't accidentally get misused in 34457dcf5c3eSDave Chinner * future. 34467dcf5c3eSDave Chinner */ 34477dcf5c3eSDave Chinner VFS_I(wip)->i_state &= ~I_LINKABLE; 34487dcf5c3eSDave Chinner } 3449f6bba201SDave Chinner 3450f6bba201SDave Chinner xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 3451f6bba201SDave Chinner xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE); 3452f6bba201SDave Chinner if (new_parent) 3453f6bba201SDave Chinner xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE); 3454f6bba201SDave Chinner 3455c9cfdb38SBrian Foster error = xfs_finish_rename(tp); 34567dcf5c3eSDave Chinner if (wip) 345744a8736bSDarrick J. Wong xfs_irele(wip); 34587dcf5c3eSDave Chinner return error; 3459f6bba201SDave Chinner 3460445883e8SDave Chinner out_trans_cancel: 34614906e215SChristoph Hellwig xfs_trans_cancel(tp); 3462253f4911SChristoph Hellwig out_release_wip: 34637dcf5c3eSDave Chinner if (wip) 346444a8736bSDarrick J. Wong xfs_irele(wip); 3465f6bba201SDave Chinner return error; 3466f6bba201SDave Chinner } 3467f6bba201SDave Chinner 3468bad55843SDavid Chinner STATIC int 3469bad55843SDavid Chinner xfs_iflush_cluster( 347019429363SDave Chinner struct xfs_inode *ip, 347119429363SDave Chinner struct xfs_buf *bp) 3472bad55843SDavid Chinner { 347319429363SDave Chinner struct xfs_mount *mp = ip->i_mount; 34745017e97dSDave Chinner struct xfs_perag *pag; 3475bad55843SDavid Chinner unsigned long first_index, mask; 347619429363SDave Chinner int cilist_size; 347719429363SDave Chinner struct xfs_inode **cilist; 347819429363SDave Chinner struct xfs_inode *cip; 3479ef325959SDarrick J. Wong struct xfs_ino_geometry *igeo = M_IGEO(mp); 3480bad55843SDavid Chinner int nr_found; 3481bad55843SDavid Chinner int clcount = 0; 3482bad55843SDavid Chinner int i; 3483bad55843SDavid Chinner 34845017e97dSDave Chinner pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 3485bad55843SDavid Chinner 34864b4d98ccSDarrick J. Wong cilist_size = igeo->inodes_per_cluster * sizeof(struct xfs_inode *); 348719429363SDave Chinner cilist = kmem_alloc(cilist_size, KM_MAYFAIL|KM_NOFS); 348819429363SDave Chinner if (!cilist) 348944b56e0aSDave Chinner goto out_put; 3490bad55843SDavid Chinner 34914b4d98ccSDarrick J. Wong mask = ~(igeo->inodes_per_cluster - 1); 3492bad55843SDavid Chinner first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; 34931a3e8f3dSDave Chinner rcu_read_lock(); 3494bad55843SDavid Chinner /* really need a gang lookup range call here */ 349519429363SDave Chinner nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)cilist, 34964b4d98ccSDarrick J. Wong first_index, igeo->inodes_per_cluster); 3497bad55843SDavid Chinner if (nr_found == 0) 3498bad55843SDavid Chinner goto out_free; 3499bad55843SDavid Chinner 3500bad55843SDavid Chinner for (i = 0; i < nr_found; i++) { 350119429363SDave Chinner cip = cilist[i]; 350219429363SDave Chinner if (cip == ip) 3503bad55843SDavid Chinner continue; 35041a3e8f3dSDave Chinner 35051a3e8f3dSDave Chinner /* 35061a3e8f3dSDave Chinner * because this is an RCU protected lookup, we could find a 35071a3e8f3dSDave Chinner * recently freed or even reallocated inode during the lookup. 35081a3e8f3dSDave Chinner * We need to check under the i_flags_lock for a valid inode 35091a3e8f3dSDave Chinner * here. Skip it if it is not valid or the wrong inode. 35101a3e8f3dSDave Chinner */ 351119429363SDave Chinner spin_lock(&cip->i_flags_lock); 351219429363SDave Chinner if (!cip->i_ino || 351319429363SDave Chinner __xfs_iflags_test(cip, XFS_ISTALE)) { 351419429363SDave Chinner spin_unlock(&cip->i_flags_lock); 35151a3e8f3dSDave Chinner continue; 35161a3e8f3dSDave Chinner } 35175a90e53eSDave Chinner 35185a90e53eSDave Chinner /* 35195a90e53eSDave Chinner * Once we fall off the end of the cluster, no point checking 35205a90e53eSDave Chinner * any more inodes in the list because they will also all be 35215a90e53eSDave Chinner * outside the cluster. 35225a90e53eSDave Chinner */ 352319429363SDave Chinner if ((XFS_INO_TO_AGINO(mp, cip->i_ino) & mask) != first_index) { 352419429363SDave Chinner spin_unlock(&cip->i_flags_lock); 35255a90e53eSDave Chinner break; 35265a90e53eSDave Chinner } 352719429363SDave Chinner spin_unlock(&cip->i_flags_lock); 35281a3e8f3dSDave Chinner 3529bad55843SDavid Chinner /* 3530bad55843SDavid Chinner * Do an un-protected check to see if the inode is dirty and 3531bad55843SDavid Chinner * is a candidate for flushing. These checks will be repeated 3532bad55843SDavid Chinner * later after the appropriate locks are acquired. 3533bad55843SDavid Chinner */ 353419429363SDave Chinner if (xfs_inode_clean(cip) && xfs_ipincount(cip) == 0) 3535bad55843SDavid Chinner continue; 3536bad55843SDavid Chinner 3537bad55843SDavid Chinner /* 3538bad55843SDavid Chinner * Try to get locks. If any are unavailable or it is pinned, 3539bad55843SDavid Chinner * then this inode cannot be flushed and is skipped. 3540bad55843SDavid Chinner */ 3541bad55843SDavid Chinner 354219429363SDave Chinner if (!xfs_ilock_nowait(cip, XFS_ILOCK_SHARED)) 3543bad55843SDavid Chinner continue; 354419429363SDave Chinner if (!xfs_iflock_nowait(cip)) { 354519429363SDave Chinner xfs_iunlock(cip, XFS_ILOCK_SHARED); 3546bad55843SDavid Chinner continue; 3547bad55843SDavid Chinner } 354819429363SDave Chinner if (xfs_ipincount(cip)) { 354919429363SDave Chinner xfs_ifunlock(cip); 355019429363SDave Chinner xfs_iunlock(cip, XFS_ILOCK_SHARED); 3551bad55843SDavid Chinner continue; 3552bad55843SDavid Chinner } 3553bad55843SDavid Chinner 35548a17d7ddSDave Chinner 35558a17d7ddSDave Chinner /* 35568a17d7ddSDave Chinner * Check the inode number again, just to be certain we are not 35578a17d7ddSDave Chinner * racing with freeing in xfs_reclaim_inode(). See the comments 35588a17d7ddSDave Chinner * in that function for more information as to why the initial 35598a17d7ddSDave Chinner * check is not sufficient. 35608a17d7ddSDave Chinner */ 356119429363SDave Chinner if (!cip->i_ino) { 356219429363SDave Chinner xfs_ifunlock(cip); 356319429363SDave Chinner xfs_iunlock(cip, XFS_ILOCK_SHARED); 3564bad55843SDavid Chinner continue; 3565bad55843SDavid Chinner } 3566bad55843SDavid Chinner 3567bad55843SDavid Chinner /* 3568bad55843SDavid Chinner * arriving here means that this inode can be flushed. First 3569bad55843SDavid Chinner * re-check that it's dirty before flushing. 3570bad55843SDavid Chinner */ 357119429363SDave Chinner if (!xfs_inode_clean(cip)) { 3572bad55843SDavid Chinner int error; 357319429363SDave Chinner error = xfs_iflush_int(cip, bp); 3574bad55843SDavid Chinner if (error) { 357519429363SDave Chinner xfs_iunlock(cip, XFS_ILOCK_SHARED); 3576bad55843SDavid Chinner goto cluster_corrupt_out; 3577bad55843SDavid Chinner } 3578bad55843SDavid Chinner clcount++; 3579bad55843SDavid Chinner } else { 358019429363SDave Chinner xfs_ifunlock(cip); 3581bad55843SDavid Chinner } 358219429363SDave Chinner xfs_iunlock(cip, XFS_ILOCK_SHARED); 3583bad55843SDavid Chinner } 3584bad55843SDavid Chinner 3585bad55843SDavid Chinner if (clcount) { 3586ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, xs_icluster_flushcnt); 3587ff6d6af2SBill O'Donnell XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount); 3588bad55843SDavid Chinner } 3589bad55843SDavid Chinner 3590bad55843SDavid Chinner out_free: 35911a3e8f3dSDave Chinner rcu_read_unlock(); 359219429363SDave Chinner kmem_free(cilist); 359344b56e0aSDave Chinner out_put: 359444b56e0aSDave Chinner xfs_perag_put(pag); 3595bad55843SDavid Chinner return 0; 3596bad55843SDavid Chinner 3597bad55843SDavid Chinner 3598bad55843SDavid Chinner cluster_corrupt_out: 3599bad55843SDavid Chinner /* 3600bad55843SDavid Chinner * Corruption detected in the clustering loop. Invalidate the 3601bad55843SDavid Chinner * inode buffer and shut down the filesystem. 3602bad55843SDavid Chinner */ 36031a3e8f3dSDave Chinner rcu_read_unlock(); 3604bad55843SDavid Chinner 3605bad55843SDavid Chinner /* 3606e53946dbSDave Chinner * We'll always have an inode attached to the buffer for completion 3607e53946dbSDave Chinner * process by the time we are called from xfs_iflush(). Hence we have 3608e53946dbSDave Chinner * always need to do IO completion processing to abort the inodes 3609e53946dbSDave Chinner * attached to the buffer. handle them just like the shutdown case in 3610e53946dbSDave Chinner * xfs_buf_submit(). 3611bad55843SDavid Chinner */ 3612e53946dbSDave Chinner ASSERT(bp->b_iodone); 361322fedd80SBrian Foster bp->b_flags |= XBF_ASYNC; 3614b0388bf1SDave Chinner bp->b_flags &= ~XBF_DONE; 3615c867cb61SChristoph Hellwig xfs_buf_stale(bp); 36162451337dSDave Chinner xfs_buf_ioerror(bp, -EIO); 3617e8aaba9aSDave Chinner xfs_buf_ioend(bp); 3618bad55843SDavid Chinner 361922fedd80SBrian Foster xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 362022fedd80SBrian Foster 3621e53946dbSDave Chinner /* abort the corrupt inode, as it was not attached to the buffer */ 362219429363SDave Chinner xfs_iflush_abort(cip, false); 362319429363SDave Chinner kmem_free(cilist); 362444b56e0aSDave Chinner xfs_perag_put(pag); 36252451337dSDave Chinner return -EFSCORRUPTED; 3626bad55843SDavid Chinner } 3627bad55843SDavid Chinner 36281da177e4SLinus Torvalds /* 36294c46819aSChristoph Hellwig * Flush dirty inode metadata into the backing buffer. 36304c46819aSChristoph Hellwig * 36314c46819aSChristoph Hellwig * The caller must have the inode lock and the inode flush lock held. The 36324c46819aSChristoph Hellwig * inode lock will still be held upon return to the caller, and the inode 36334c46819aSChristoph Hellwig * flush lock will be released after the inode has reached the disk. 36344c46819aSChristoph Hellwig * 36354c46819aSChristoph Hellwig * The caller must write out the buffer returned in *bpp and release it. 36361da177e4SLinus Torvalds */ 36371da177e4SLinus Torvalds int 36381da177e4SLinus Torvalds xfs_iflush( 36394c46819aSChristoph Hellwig struct xfs_inode *ip, 36404c46819aSChristoph Hellwig struct xfs_buf **bpp) 36411da177e4SLinus Torvalds { 36424c46819aSChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 3643b1438f47SDave Chinner struct xfs_buf *bp = NULL; 36444c46819aSChristoph Hellwig struct xfs_dinode *dip; 36451da177e4SLinus Torvalds int error; 36461da177e4SLinus Torvalds 3647ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, xs_iflush_count); 36481da177e4SLinus Torvalds 3649579aa9caSChristoph Hellwig ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 3650474fce06SChristoph Hellwig ASSERT(xfs_isiflocked(ip)); 36511da177e4SLinus Torvalds ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 36528096b1ebSChristoph Hellwig ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); 36531da177e4SLinus Torvalds 36544c46819aSChristoph Hellwig *bpp = NULL; 36551da177e4SLinus Torvalds 36561da177e4SLinus Torvalds xfs_iunpin_wait(ip); 36571da177e4SLinus Torvalds 36581da177e4SLinus Torvalds /* 36594b6a4688SDave Chinner * For stale inodes we cannot rely on the backing buffer remaining 36604b6a4688SDave Chinner * stale in cache for the remaining life of the stale inode and so 3661475ee413SChristoph Hellwig * xfs_imap_to_bp() below may give us a buffer that no longer contains 36624b6a4688SDave Chinner * inodes below. We have to check this after ensuring the inode is 36634b6a4688SDave Chinner * unpinned so that it is safe to reclaim the stale inode after the 36644b6a4688SDave Chinner * flush call. 36654b6a4688SDave Chinner */ 36664b6a4688SDave Chinner if (xfs_iflags_test(ip, XFS_ISTALE)) { 36674b6a4688SDave Chinner xfs_ifunlock(ip); 36684b6a4688SDave Chinner return 0; 36694b6a4688SDave Chinner } 36704b6a4688SDave Chinner 36714b6a4688SDave Chinner /* 36721da177e4SLinus Torvalds * This may have been unpinned because the filesystem is shutting 36731da177e4SLinus Torvalds * down forcibly. If that's the case we must not write this inode 367432ce90a4SChristoph Hellwig * to disk, because the log record didn't make it to disk. 367532ce90a4SChristoph Hellwig * 367632ce90a4SChristoph Hellwig * We also have to remove the log item from the AIL in this case, 367732ce90a4SChristoph Hellwig * as we wait for an empty AIL as part of the unmount process. 36781da177e4SLinus Torvalds */ 36791da177e4SLinus Torvalds if (XFS_FORCED_SHUTDOWN(mp)) { 36802451337dSDave Chinner error = -EIO; 368132ce90a4SChristoph Hellwig goto abort_out; 36821da177e4SLinus Torvalds } 36831da177e4SLinus Torvalds 36841da177e4SLinus Torvalds /* 3685b1438f47SDave Chinner * Get the buffer containing the on-disk inode. We are doing a try-lock 3686b1438f47SDave Chinner * operation here, so we may get an EAGAIN error. In that case, we 3687b1438f47SDave Chinner * simply want to return with the inode still dirty. 3688b1438f47SDave Chinner * 3689b1438f47SDave Chinner * If we get any other error, we effectively have a corruption situation 3690b1438f47SDave Chinner * and we cannot flush the inode, so we treat it the same as failing 3691b1438f47SDave Chinner * xfs_iflush_int(). 3692a3f74ffbSDavid Chinner */ 3693475ee413SChristoph Hellwig error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK, 3694475ee413SChristoph Hellwig 0); 3695b1438f47SDave Chinner if (error == -EAGAIN) { 3696a3f74ffbSDavid Chinner xfs_ifunlock(ip); 3697a3f74ffbSDavid Chinner return error; 3698a3f74ffbSDavid Chinner } 3699b1438f47SDave Chinner if (error) 3700b1438f47SDave Chinner goto corrupt_out; 3701a3f74ffbSDavid Chinner 3702a3f74ffbSDavid Chinner /* 37031da177e4SLinus Torvalds * First flush out the inode that xfs_iflush was called with. 37041da177e4SLinus Torvalds */ 37051da177e4SLinus Torvalds error = xfs_iflush_int(ip, bp); 3706bad55843SDavid Chinner if (error) 37071da177e4SLinus Torvalds goto corrupt_out; 37081da177e4SLinus Torvalds 37091da177e4SLinus Torvalds /* 3710a3f74ffbSDavid Chinner * If the buffer is pinned then push on the log now so we won't 3711a3f74ffbSDavid Chinner * get stuck waiting in the write for too long. 3712a3f74ffbSDavid Chinner */ 3713811e64c7SChandra Seetharaman if (xfs_buf_ispinned(bp)) 3714a14a348bSChristoph Hellwig xfs_log_force(mp, 0); 3715a3f74ffbSDavid Chinner 3716a3f74ffbSDavid Chinner /* 3717e53946dbSDave Chinner * inode clustering: try to gather other inodes into this write 3718e53946dbSDave Chinner * 3719e53946dbSDave Chinner * Note: Any error during clustering will result in the filesystem 3720e53946dbSDave Chinner * being shut down and completion callbacks run on the cluster buffer. 3721e53946dbSDave Chinner * As we have already flushed and attached this inode to the buffer, 3722e53946dbSDave Chinner * it has already been aborted and released by xfs_iflush_cluster() and 3723e53946dbSDave Chinner * so we have no further error handling to do here. 37241da177e4SLinus Torvalds */ 3725bad55843SDavid Chinner error = xfs_iflush_cluster(ip, bp); 3726bad55843SDavid Chinner if (error) 3727e53946dbSDave Chinner return error; 37281da177e4SLinus Torvalds 37294c46819aSChristoph Hellwig *bpp = bp; 37304c46819aSChristoph Hellwig return 0; 37311da177e4SLinus Torvalds 37321da177e4SLinus Torvalds corrupt_out: 3733b1438f47SDave Chinner if (bp) 37341da177e4SLinus Torvalds xfs_buf_relse(bp); 37357d04a335SNathan Scott xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 373632ce90a4SChristoph Hellwig abort_out: 3737e53946dbSDave Chinner /* abort the corrupt inode, as it was not attached to the buffer */ 373804913fddSDave Chinner xfs_iflush_abort(ip, false); 373932ce90a4SChristoph Hellwig return error; 37401da177e4SLinus Torvalds } 37411da177e4SLinus Torvalds 37429cfb9b47SDarrick J. Wong /* 37439cfb9b47SDarrick J. Wong * If there are inline format data / attr forks attached to this inode, 37449cfb9b47SDarrick J. Wong * make sure they're not corrupt. 37459cfb9b47SDarrick J. Wong */ 37469cfb9b47SDarrick J. Wong bool 37479cfb9b47SDarrick J. Wong xfs_inode_verify_forks( 37489cfb9b47SDarrick J. Wong struct xfs_inode *ip) 37499cfb9b47SDarrick J. Wong { 375022431bf3SDarrick J. Wong struct xfs_ifork *ifp; 37519cfb9b47SDarrick J. Wong xfs_failaddr_t fa; 37529cfb9b47SDarrick J. Wong 37539cfb9b47SDarrick J. Wong fa = xfs_ifork_verify_data(ip, &xfs_default_ifork_ops); 37549cfb9b47SDarrick J. Wong if (fa) { 375522431bf3SDarrick J. Wong ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 375622431bf3SDarrick J. Wong xfs_inode_verifier_error(ip, -EFSCORRUPTED, "data fork", 375722431bf3SDarrick J. Wong ifp->if_u1.if_data, ifp->if_bytes, fa); 37589cfb9b47SDarrick J. Wong return false; 37599cfb9b47SDarrick J. Wong } 37609cfb9b47SDarrick J. Wong 37619cfb9b47SDarrick J. Wong fa = xfs_ifork_verify_attr(ip, &xfs_default_ifork_ops); 37629cfb9b47SDarrick J. Wong if (fa) { 376322431bf3SDarrick J. Wong ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK); 376422431bf3SDarrick J. Wong xfs_inode_verifier_error(ip, -EFSCORRUPTED, "attr fork", 376522431bf3SDarrick J. Wong ifp ? ifp->if_u1.if_data : NULL, 376622431bf3SDarrick J. Wong ifp ? ifp->if_bytes : 0, fa); 37679cfb9b47SDarrick J. Wong return false; 37689cfb9b47SDarrick J. Wong } 37699cfb9b47SDarrick J. Wong return true; 37709cfb9b47SDarrick J. Wong } 37719cfb9b47SDarrick J. Wong 37721da177e4SLinus Torvalds STATIC int 37731da177e4SLinus Torvalds xfs_iflush_int( 377493848a99SChristoph Hellwig struct xfs_inode *ip, 377593848a99SChristoph Hellwig struct xfs_buf *bp) 37761da177e4SLinus Torvalds { 377793848a99SChristoph Hellwig struct xfs_inode_log_item *iip = ip->i_itemp; 377893848a99SChristoph Hellwig struct xfs_dinode *dip; 377993848a99SChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 37801da177e4SLinus Torvalds 3781579aa9caSChristoph Hellwig ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 3782474fce06SChristoph Hellwig ASSERT(xfs_isiflocked(ip)); 37831da177e4SLinus Torvalds ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 37848096b1ebSChristoph Hellwig ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); 378593848a99SChristoph Hellwig ASSERT(iip != NULL && iip->ili_fields != 0); 3786263997a6SDave Chinner ASSERT(ip->i_d.di_version > 1); 37871da177e4SLinus Torvalds 37881da177e4SLinus Torvalds /* set *dip = inode's place in the buffer */ 378988ee2df7SChristoph Hellwig dip = xfs_buf_offset(bp, ip->i_imap.im_boffset); 37901da177e4SLinus Torvalds 379169ef921bSChristoph Hellwig if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC), 37929e24cfd0SDarrick J. Wong mp, XFS_ERRTAG_IFLUSH_1)) { 37936a19d939SDave Chinner xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 3794c9690043SDarrick J. Wong "%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT, 37956a19d939SDave Chinner __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip); 37961da177e4SLinus Torvalds goto corrupt_out; 37971da177e4SLinus Torvalds } 3798c19b3b05SDave Chinner if (S_ISREG(VFS_I(ip)->i_mode)) { 37991da177e4SLinus Torvalds if (XFS_TEST_ERROR( 38001da177e4SLinus Torvalds (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 38011da177e4SLinus Torvalds (ip->i_d.di_format != XFS_DINODE_FMT_BTREE), 38029e24cfd0SDarrick J. Wong mp, XFS_ERRTAG_IFLUSH_3)) { 38036a19d939SDave Chinner xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 3804c9690043SDarrick J. Wong "%s: Bad regular inode %Lu, ptr "PTR_FMT, 38056a19d939SDave Chinner __func__, ip->i_ino, ip); 38061da177e4SLinus Torvalds goto corrupt_out; 38071da177e4SLinus Torvalds } 3808c19b3b05SDave Chinner } else if (S_ISDIR(VFS_I(ip)->i_mode)) { 38091da177e4SLinus Torvalds if (XFS_TEST_ERROR( 38101da177e4SLinus Torvalds (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 38111da177e4SLinus Torvalds (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) && 38121da177e4SLinus Torvalds (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL), 38139e24cfd0SDarrick J. Wong mp, XFS_ERRTAG_IFLUSH_4)) { 38146a19d939SDave Chinner xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 3815c9690043SDarrick J. Wong "%s: Bad directory inode %Lu, ptr "PTR_FMT, 38166a19d939SDave Chinner __func__, ip->i_ino, ip); 38171da177e4SLinus Torvalds goto corrupt_out; 38181da177e4SLinus Torvalds } 38191da177e4SLinus Torvalds } 38201da177e4SLinus Torvalds if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents > 38219e24cfd0SDarrick J. Wong ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) { 38226a19d939SDave Chinner xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 38236a19d939SDave Chinner "%s: detected corrupt incore inode %Lu, " 3824c9690043SDarrick J. Wong "total extents = %d, nblocks = %Ld, ptr "PTR_FMT, 38256a19d939SDave Chinner __func__, ip->i_ino, 38261da177e4SLinus Torvalds ip->i_d.di_nextents + ip->i_d.di_anextents, 38276a19d939SDave Chinner ip->i_d.di_nblocks, ip); 38281da177e4SLinus Torvalds goto corrupt_out; 38291da177e4SLinus Torvalds } 38301da177e4SLinus Torvalds if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize, 38319e24cfd0SDarrick J. Wong mp, XFS_ERRTAG_IFLUSH_6)) { 38326a19d939SDave Chinner xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 3833c9690043SDarrick J. Wong "%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT, 38346a19d939SDave Chinner __func__, ip->i_ino, ip->i_d.di_forkoff, ip); 38351da177e4SLinus Torvalds goto corrupt_out; 38361da177e4SLinus Torvalds } 3837e60896d8SDave Chinner 38381da177e4SLinus Torvalds /* 3839263997a6SDave Chinner * Inode item log recovery for v2 inodes are dependent on the 3840e60896d8SDave Chinner * di_flushiter count for correct sequencing. We bump the flush 3841e60896d8SDave Chinner * iteration count so we can detect flushes which postdate a log record 3842e60896d8SDave Chinner * during recovery. This is redundant as we now log every change and 3843e60896d8SDave Chinner * hence this can't happen but we need to still do it to ensure 3844e60896d8SDave Chinner * backwards compatibility with old kernels that predate logging all 3845e60896d8SDave Chinner * inode changes. 38461da177e4SLinus Torvalds */ 3847e60896d8SDave Chinner if (ip->i_d.di_version < 3) 38481da177e4SLinus Torvalds ip->i_d.di_flushiter++; 38491da177e4SLinus Torvalds 38509cfb9b47SDarrick J. Wong /* Check the inline fork data before we write out. */ 38519cfb9b47SDarrick J. Wong if (!xfs_inode_verify_forks(ip)) 3852005c5db8SDarrick J. Wong goto corrupt_out; 3853005c5db8SDarrick J. Wong 38541da177e4SLinus Torvalds /* 38553987848cSDave Chinner * Copy the dirty parts of the inode into the on-disk inode. We always 38563987848cSDave Chinner * copy out the core of the inode, because if the inode is dirty at all 38573987848cSDave Chinner * the core must be. 38581da177e4SLinus Torvalds */ 385993f958f9SDave Chinner xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn); 38601da177e4SLinus Torvalds 38611da177e4SLinus Torvalds /* Wrap, we never let the log put out DI_MAX_FLUSH */ 38621da177e4SLinus Torvalds if (ip->i_d.di_flushiter == DI_MAX_FLUSH) 38631da177e4SLinus Torvalds ip->i_d.di_flushiter = 0; 38641da177e4SLinus Torvalds 3865005c5db8SDarrick J. Wong xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK); 3866005c5db8SDarrick J. Wong if (XFS_IFORK_Q(ip)) 3867005c5db8SDarrick J. Wong xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK); 38681da177e4SLinus Torvalds xfs_inobp_check(mp, bp); 38691da177e4SLinus Torvalds 38701da177e4SLinus Torvalds /* 3871f5d8d5c4SChristoph Hellwig * We've recorded everything logged in the inode, so we'd like to clear 3872f5d8d5c4SChristoph Hellwig * the ili_fields bits so we don't log and flush things unnecessarily. 3873f5d8d5c4SChristoph Hellwig * However, we can't stop logging all this information until the data 3874f5d8d5c4SChristoph Hellwig * we've copied into the disk buffer is written to disk. If we did we 3875f5d8d5c4SChristoph Hellwig * might overwrite the copy of the inode in the log with all the data 3876f5d8d5c4SChristoph Hellwig * after re-logging only part of it, and in the face of a crash we 3877f5d8d5c4SChristoph Hellwig * wouldn't have all the data we need to recover. 38781da177e4SLinus Torvalds * 3879f5d8d5c4SChristoph Hellwig * What we do is move the bits to the ili_last_fields field. When 3880f5d8d5c4SChristoph Hellwig * logging the inode, these bits are moved back to the ili_fields field. 3881f5d8d5c4SChristoph Hellwig * In the xfs_iflush_done() routine we clear ili_last_fields, since we 3882f5d8d5c4SChristoph Hellwig * know that the information those bits represent is permanently on 3883f5d8d5c4SChristoph Hellwig * disk. As long as the flush completes before the inode is logged 3884f5d8d5c4SChristoph Hellwig * again, then both ili_fields and ili_last_fields will be cleared. 38851da177e4SLinus Torvalds * 3886f5d8d5c4SChristoph Hellwig * We can play with the ili_fields bits here, because the inode lock 3887f5d8d5c4SChristoph Hellwig * must be held exclusively in order to set bits there and the flush 3888f5d8d5c4SChristoph Hellwig * lock protects the ili_last_fields bits. Set ili_logged so the flush 3889f5d8d5c4SChristoph Hellwig * done routine can tell whether or not to look in the AIL. Also, store 3890f5d8d5c4SChristoph Hellwig * the current LSN of the inode so that we can tell whether the item has 3891f5d8d5c4SChristoph Hellwig * moved in the AIL from xfs_iflush_done(). In order to read the lsn we 3892f5d8d5c4SChristoph Hellwig * need the AIL lock, because it is a 64 bit value that cannot be read 3893f5d8d5c4SChristoph Hellwig * atomically. 38941da177e4SLinus Torvalds */ 3895f5d8d5c4SChristoph Hellwig iip->ili_last_fields = iip->ili_fields; 3896f5d8d5c4SChristoph Hellwig iip->ili_fields = 0; 3897fc0561ceSDave Chinner iip->ili_fsync_fields = 0; 38981da177e4SLinus Torvalds iip->ili_logged = 1; 38991da177e4SLinus Torvalds 39007b2e2a31SDavid Chinner xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, 39017b2e2a31SDavid Chinner &iip->ili_item.li_lsn); 39021da177e4SLinus Torvalds 39031da177e4SLinus Torvalds /* 39041da177e4SLinus Torvalds * Attach the function xfs_iflush_done to the inode's 39051da177e4SLinus Torvalds * buffer. This will remove the inode from the AIL 39061da177e4SLinus Torvalds * and unlock the inode's flush lock when the inode is 39071da177e4SLinus Torvalds * completely written to disk. 39081da177e4SLinus Torvalds */ 3909ca30b2a7SChristoph Hellwig xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item); 39101da177e4SLinus Torvalds 391193848a99SChristoph Hellwig /* generate the checksum. */ 391293848a99SChristoph Hellwig xfs_dinode_calc_crc(mp, dip); 391393848a99SChristoph Hellwig 3914643c8c05SCarlos Maiolino ASSERT(!list_empty(&bp->b_li_list)); 3915cb669ca5SChristoph Hellwig ASSERT(bp->b_iodone != NULL); 39161da177e4SLinus Torvalds return 0; 39171da177e4SLinus Torvalds 39181da177e4SLinus Torvalds corrupt_out: 39192451337dSDave Chinner return -EFSCORRUPTED; 39201da177e4SLinus Torvalds } 392144a8736bSDarrick J. Wong 392244a8736bSDarrick J. Wong /* Release an inode. */ 392344a8736bSDarrick J. Wong void 392444a8736bSDarrick J. Wong xfs_irele( 392544a8736bSDarrick J. Wong struct xfs_inode *ip) 392644a8736bSDarrick J. Wong { 392744a8736bSDarrick J. Wong trace_xfs_irele(ip, _RET_IP_); 392844a8736bSDarrick J. Wong iput(VFS_I(ip)); 392944a8736bSDarrick J. Wong } 3930