xref: /openbmc/linux/fs/xfs/xfs_inode.c (revision db66795f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include <linux/iversion.h>
7 
8 #include "xfs.h"
9 #include "xfs_fs.h"
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_dir2.h"
18 #include "xfs_attr.h"
19 #include "xfs_trans_space.h"
20 #include "xfs_trans.h"
21 #include "xfs_buf_item.h"
22 #include "xfs_inode_item.h"
23 #include "xfs_iunlink_item.h"
24 #include "xfs_ialloc.h"
25 #include "xfs_bmap.h"
26 #include "xfs_bmap_util.h"
27 #include "xfs_errortag.h"
28 #include "xfs_error.h"
29 #include "xfs_quota.h"
30 #include "xfs_filestream.h"
31 #include "xfs_trace.h"
32 #include "xfs_icache.h"
33 #include "xfs_symlink.h"
34 #include "xfs_trans_priv.h"
35 #include "xfs_log.h"
36 #include "xfs_bmap_btree.h"
37 #include "xfs_reflink.h"
38 #include "xfs_ag.h"
39 #include "xfs_log_priv.h"
40 
41 struct kmem_cache *xfs_inode_cache;
42 
43 /*
44  * Used in xfs_itruncate_extents().  This is the maximum number of extents
45  * freed from a file in a single transaction.
46  */
47 #define	XFS_ITRUNC_MAX_EXTENTS	2
48 
49 STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
50 STATIC int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag,
51 	struct xfs_inode *);
52 
53 /*
54  * helper function to extract extent size hint from inode
55  */
56 xfs_extlen_t
57 xfs_get_extsz_hint(
58 	struct xfs_inode	*ip)
59 {
60 	/*
61 	 * No point in aligning allocations if we need to COW to actually
62 	 * write to them.
63 	 */
64 	if (xfs_is_always_cow_inode(ip))
65 		return 0;
66 	if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
67 		return ip->i_extsize;
68 	if (XFS_IS_REALTIME_INODE(ip))
69 		return ip->i_mount->m_sb.sb_rextsize;
70 	return 0;
71 }
72 
73 /*
74  * Helper function to extract CoW extent size hint from inode.
75  * Between the extent size hint and the CoW extent size hint, we
76  * return the greater of the two.  If the value is zero (automatic),
77  * use the default size.
78  */
79 xfs_extlen_t
80 xfs_get_cowextsz_hint(
81 	struct xfs_inode	*ip)
82 {
83 	xfs_extlen_t		a, b;
84 
85 	a = 0;
86 	if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
87 		a = ip->i_cowextsize;
88 	b = xfs_get_extsz_hint(ip);
89 
90 	a = max(a, b);
91 	if (a == 0)
92 		return XFS_DEFAULT_COWEXTSZ_HINT;
93 	return a;
94 }
95 
96 /*
97  * These two are wrapper routines around the xfs_ilock() routine used to
98  * centralize some grungy code.  They are used in places that wish to lock the
99  * inode solely for reading the extents.  The reason these places can't just
100  * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
101  * bringing in of the extents from disk for a file in b-tree format.  If the
102  * inode is in b-tree format, then we need to lock the inode exclusively until
103  * the extents are read in.  Locking it exclusively all the time would limit
104  * our parallelism unnecessarily, though.  What we do instead is check to see
105  * if the extents have been read in yet, and only lock the inode exclusively
106  * if they have not.
107  *
108  * The functions return a value which should be given to the corresponding
109  * xfs_iunlock() call.
110  */
111 uint
112 xfs_ilock_data_map_shared(
113 	struct xfs_inode	*ip)
114 {
115 	uint			lock_mode = XFS_ILOCK_SHARED;
116 
117 	if (xfs_need_iread_extents(&ip->i_df))
118 		lock_mode = XFS_ILOCK_EXCL;
119 	xfs_ilock(ip, lock_mode);
120 	return lock_mode;
121 }
122 
123 uint
124 xfs_ilock_attr_map_shared(
125 	struct xfs_inode	*ip)
126 {
127 	uint			lock_mode = XFS_ILOCK_SHARED;
128 
129 	if (xfs_inode_has_attr_fork(ip) && xfs_need_iread_extents(&ip->i_af))
130 		lock_mode = XFS_ILOCK_EXCL;
131 	xfs_ilock(ip, lock_mode);
132 	return lock_mode;
133 }
134 
135 /*
136  * You can't set both SHARED and EXCL for the same lock,
137  * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_MMAPLOCK_SHARED,
138  * XFS_MMAPLOCK_EXCL, XFS_ILOCK_SHARED, XFS_ILOCK_EXCL are valid values
139  * to set in lock_flags.
140  */
141 static inline void
142 xfs_lock_flags_assert(
143 	uint		lock_flags)
144 {
145 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
146 		(XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
147 	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
148 		(XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
149 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
150 		(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
151 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
152 	ASSERT(lock_flags != 0);
153 }
154 
155 /*
156  * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
157  * multi-reader locks: invalidate_lock and the i_lock.  This routine allows
158  * various combinations of the locks to be obtained.
159  *
160  * The 3 locks should always be ordered so that the IO lock is obtained first,
161  * the mmap lock second and the ilock last in order to prevent deadlock.
162  *
163  * Basic locking order:
164  *
165  * i_rwsem -> invalidate_lock -> page_lock -> i_ilock
166  *
167  * mmap_lock locking order:
168  *
169  * i_rwsem -> page lock -> mmap_lock
170  * mmap_lock -> invalidate_lock -> page_lock
171  *
172  * The difference in mmap_lock locking order mean that we cannot hold the
173  * invalidate_lock over syscall based read(2)/write(2) based IO. These IO paths
174  * can fault in pages during copy in/out (for buffered IO) or require the
175  * mmap_lock in get_user_pages() to map the user pages into the kernel address
176  * space for direct IO. Similarly the i_rwsem cannot be taken inside a page
177  * fault because page faults already hold the mmap_lock.
178  *
179  * Hence to serialise fully against both syscall and mmap based IO, we need to
180  * take both the i_rwsem and the invalidate_lock. These locks should *only* be
181  * both taken in places where we need to invalidate the page cache in a race
182  * free manner (e.g. truncate, hole punch and other extent manipulation
183  * functions).
184  */
185 void
186 xfs_ilock(
187 	xfs_inode_t		*ip,
188 	uint			lock_flags)
189 {
190 	trace_xfs_ilock(ip, lock_flags, _RET_IP_);
191 
192 	xfs_lock_flags_assert(lock_flags);
193 
194 	if (lock_flags & XFS_IOLOCK_EXCL) {
195 		down_write_nested(&VFS_I(ip)->i_rwsem,
196 				  XFS_IOLOCK_DEP(lock_flags));
197 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
198 		down_read_nested(&VFS_I(ip)->i_rwsem,
199 				 XFS_IOLOCK_DEP(lock_flags));
200 	}
201 
202 	if (lock_flags & XFS_MMAPLOCK_EXCL) {
203 		down_write_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
204 				  XFS_MMAPLOCK_DEP(lock_flags));
205 	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
206 		down_read_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
207 				 XFS_MMAPLOCK_DEP(lock_flags));
208 	}
209 
210 	if (lock_flags & XFS_ILOCK_EXCL)
211 		mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
212 	else if (lock_flags & XFS_ILOCK_SHARED)
213 		mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
214 }
215 
216 /*
217  * This is just like xfs_ilock(), except that the caller
218  * is guaranteed not to sleep.  It returns 1 if it gets
219  * the requested locks and 0 otherwise.  If the IO lock is
220  * obtained but the inode lock cannot be, then the IO lock
221  * is dropped before returning.
222  *
223  * ip -- the inode being locked
224  * lock_flags -- this parameter indicates the inode's locks to be
225  *       to be locked.  See the comment for xfs_ilock() for a list
226  *	 of valid values.
227  */
228 int
229 xfs_ilock_nowait(
230 	xfs_inode_t		*ip,
231 	uint			lock_flags)
232 {
233 	trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
234 
235 	xfs_lock_flags_assert(lock_flags);
236 
237 	if (lock_flags & XFS_IOLOCK_EXCL) {
238 		if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
239 			goto out;
240 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
241 		if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
242 			goto out;
243 	}
244 
245 	if (lock_flags & XFS_MMAPLOCK_EXCL) {
246 		if (!down_write_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
247 			goto out_undo_iolock;
248 	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
249 		if (!down_read_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
250 			goto out_undo_iolock;
251 	}
252 
253 	if (lock_flags & XFS_ILOCK_EXCL) {
254 		if (!mrtryupdate(&ip->i_lock))
255 			goto out_undo_mmaplock;
256 	} else if (lock_flags & XFS_ILOCK_SHARED) {
257 		if (!mrtryaccess(&ip->i_lock))
258 			goto out_undo_mmaplock;
259 	}
260 	return 1;
261 
262 out_undo_mmaplock:
263 	if (lock_flags & XFS_MMAPLOCK_EXCL)
264 		up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
265 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
266 		up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
267 out_undo_iolock:
268 	if (lock_flags & XFS_IOLOCK_EXCL)
269 		up_write(&VFS_I(ip)->i_rwsem);
270 	else if (lock_flags & XFS_IOLOCK_SHARED)
271 		up_read(&VFS_I(ip)->i_rwsem);
272 out:
273 	return 0;
274 }
275 
276 /*
277  * xfs_iunlock() is used to drop the inode locks acquired with
278  * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
279  * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
280  * that we know which locks to drop.
281  *
282  * ip -- the inode being unlocked
283  * lock_flags -- this parameter indicates the inode's locks to be
284  *       to be unlocked.  See the comment for xfs_ilock() for a list
285  *	 of valid values for this parameter.
286  *
287  */
288 void
289 xfs_iunlock(
290 	xfs_inode_t		*ip,
291 	uint			lock_flags)
292 {
293 	xfs_lock_flags_assert(lock_flags);
294 
295 	if (lock_flags & XFS_IOLOCK_EXCL)
296 		up_write(&VFS_I(ip)->i_rwsem);
297 	else if (lock_flags & XFS_IOLOCK_SHARED)
298 		up_read(&VFS_I(ip)->i_rwsem);
299 
300 	if (lock_flags & XFS_MMAPLOCK_EXCL)
301 		up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
302 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
303 		up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
304 
305 	if (lock_flags & XFS_ILOCK_EXCL)
306 		mrunlock_excl(&ip->i_lock);
307 	else if (lock_flags & XFS_ILOCK_SHARED)
308 		mrunlock_shared(&ip->i_lock);
309 
310 	trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
311 }
312 
313 /*
314  * give up write locks.  the i/o lock cannot be held nested
315  * if it is being demoted.
316  */
317 void
318 xfs_ilock_demote(
319 	xfs_inode_t		*ip,
320 	uint			lock_flags)
321 {
322 	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
323 	ASSERT((lock_flags &
324 		~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
325 
326 	if (lock_flags & XFS_ILOCK_EXCL)
327 		mrdemote(&ip->i_lock);
328 	if (lock_flags & XFS_MMAPLOCK_EXCL)
329 		downgrade_write(&VFS_I(ip)->i_mapping->invalidate_lock);
330 	if (lock_flags & XFS_IOLOCK_EXCL)
331 		downgrade_write(&VFS_I(ip)->i_rwsem);
332 
333 	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
334 }
335 
336 #if defined(DEBUG) || defined(XFS_WARN)
337 static inline bool
338 __xfs_rwsem_islocked(
339 	struct rw_semaphore	*rwsem,
340 	bool			shared)
341 {
342 	if (!debug_locks)
343 		return rwsem_is_locked(rwsem);
344 
345 	if (!shared)
346 		return lockdep_is_held_type(rwsem, 0);
347 
348 	/*
349 	 * We are checking that the lock is held at least in shared
350 	 * mode but don't care that it might be held exclusively
351 	 * (i.e. shared | excl). Hence we check if the lock is held
352 	 * in any mode rather than an explicit shared mode.
353 	 */
354 	return lockdep_is_held_type(rwsem, -1);
355 }
356 
357 bool
358 xfs_isilocked(
359 	struct xfs_inode	*ip,
360 	uint			lock_flags)
361 {
362 	if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
363 		if (!(lock_flags & XFS_ILOCK_SHARED))
364 			return !!ip->i_lock.mr_writer;
365 		return rwsem_is_locked(&ip->i_lock.mr_lock);
366 	}
367 
368 	if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
369 		return __xfs_rwsem_islocked(&VFS_I(ip)->i_mapping->invalidate_lock,
370 				(lock_flags & XFS_MMAPLOCK_SHARED));
371 	}
372 
373 	if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) {
374 		return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
375 				(lock_flags & XFS_IOLOCK_SHARED));
376 	}
377 
378 	ASSERT(0);
379 	return false;
380 }
381 #endif
382 
383 /*
384  * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
385  * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
386  * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
387  * errors and warnings.
388  */
389 #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
390 static bool
391 xfs_lockdep_subclass_ok(
392 	int subclass)
393 {
394 	return subclass < MAX_LOCKDEP_SUBCLASSES;
395 }
396 #else
397 #define xfs_lockdep_subclass_ok(subclass)	(true)
398 #endif
399 
400 /*
401  * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
402  * value. This can be called for any type of inode lock combination, including
403  * parent locking. Care must be taken to ensure we don't overrun the subclass
404  * storage fields in the class mask we build.
405  */
406 static inline uint
407 xfs_lock_inumorder(
408 	uint	lock_mode,
409 	uint	subclass)
410 {
411 	uint	class = 0;
412 
413 	ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
414 			      XFS_ILOCK_RTSUM)));
415 	ASSERT(xfs_lockdep_subclass_ok(subclass));
416 
417 	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
418 		ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
419 		class += subclass << XFS_IOLOCK_SHIFT;
420 	}
421 
422 	if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
423 		ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
424 		class += subclass << XFS_MMAPLOCK_SHIFT;
425 	}
426 
427 	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
428 		ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
429 		class += subclass << XFS_ILOCK_SHIFT;
430 	}
431 
432 	return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
433 }
434 
435 /*
436  * The following routine will lock n inodes in exclusive mode.  We assume the
437  * caller calls us with the inodes in i_ino order.
438  *
439  * We need to detect deadlock where an inode that we lock is in the AIL and we
440  * start waiting for another inode that is locked by a thread in a long running
441  * transaction (such as truncate). This can result in deadlock since the long
442  * running trans might need to wait for the inode we just locked in order to
443  * push the tail and free space in the log.
444  *
445  * xfs_lock_inodes() can only be used to lock one type of lock at a time -
446  * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
447  * lock more than one at a time, lockdep will report false positives saying we
448  * have violated locking orders.
449  */
450 static void
451 xfs_lock_inodes(
452 	struct xfs_inode	**ips,
453 	int			inodes,
454 	uint			lock_mode)
455 {
456 	int			attempts = 0;
457 	uint			i;
458 	int			j;
459 	bool			try_lock;
460 	struct xfs_log_item	*lp;
461 
462 	/*
463 	 * Currently supports between 2 and 5 inodes with exclusive locking.  We
464 	 * support an arbitrary depth of locking here, but absolute limits on
465 	 * inodes depend on the type of locking and the limits placed by
466 	 * lockdep annotations in xfs_lock_inumorder.  These are all checked by
467 	 * the asserts.
468 	 */
469 	ASSERT(ips && inodes >= 2 && inodes <= 5);
470 	ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
471 			    XFS_ILOCK_EXCL));
472 	ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
473 			      XFS_ILOCK_SHARED)));
474 	ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
475 		inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
476 	ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
477 		inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
478 
479 	if (lock_mode & XFS_IOLOCK_EXCL) {
480 		ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
481 	} else if (lock_mode & XFS_MMAPLOCK_EXCL)
482 		ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
483 
484 again:
485 	try_lock = false;
486 	i = 0;
487 	for (; i < inodes; i++) {
488 		ASSERT(ips[i]);
489 
490 		if (i && (ips[i] == ips[i - 1]))	/* Already locked */
491 			continue;
492 
493 		/*
494 		 * If try_lock is not set yet, make sure all locked inodes are
495 		 * not in the AIL.  If any are, set try_lock to be used later.
496 		 */
497 		if (!try_lock) {
498 			for (j = (i - 1); j >= 0 && !try_lock; j--) {
499 				lp = &ips[j]->i_itemp->ili_item;
500 				if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
501 					try_lock = true;
502 			}
503 		}
504 
505 		/*
506 		 * If any of the previous locks we have locked is in the AIL,
507 		 * we must TRY to get the second and subsequent locks. If
508 		 * we can't get any, we must release all we have
509 		 * and try again.
510 		 */
511 		if (!try_lock) {
512 			xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
513 			continue;
514 		}
515 
516 		/* try_lock means we have an inode locked that is in the AIL. */
517 		ASSERT(i != 0);
518 		if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
519 			continue;
520 
521 		/*
522 		 * Unlock all previous guys and try again.  xfs_iunlock will try
523 		 * to push the tail if the inode is in the AIL.
524 		 */
525 		attempts++;
526 		for (j = i - 1; j >= 0; j--) {
527 			/*
528 			 * Check to see if we've already unlocked this one.  Not
529 			 * the first one going back, and the inode ptr is the
530 			 * same.
531 			 */
532 			if (j != (i - 1) && ips[j] == ips[j + 1])
533 				continue;
534 
535 			xfs_iunlock(ips[j], lock_mode);
536 		}
537 
538 		if ((attempts % 5) == 0) {
539 			delay(1); /* Don't just spin the CPU */
540 		}
541 		goto again;
542 	}
543 }
544 
545 /*
546  * xfs_lock_two_inodes() can only be used to lock ilock. The iolock and
547  * mmaplock must be double-locked separately since we use i_rwsem and
548  * invalidate_lock for that. We now support taking one lock EXCL and the
549  * other SHARED.
550  */
551 void
552 xfs_lock_two_inodes(
553 	struct xfs_inode	*ip0,
554 	uint			ip0_mode,
555 	struct xfs_inode	*ip1,
556 	uint			ip1_mode)
557 {
558 	int			attempts = 0;
559 	struct xfs_log_item	*lp;
560 
561 	ASSERT(hweight32(ip0_mode) == 1);
562 	ASSERT(hweight32(ip1_mode) == 1);
563 	ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
564 	ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
565 	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
566 	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
567 	ASSERT(ip0->i_ino != ip1->i_ino);
568 
569 	if (ip0->i_ino > ip1->i_ino) {
570 		swap(ip0, ip1);
571 		swap(ip0_mode, ip1_mode);
572 	}
573 
574  again:
575 	xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
576 
577 	/*
578 	 * If the first lock we have locked is in the AIL, we must TRY to get
579 	 * the second lock. If we can't get it, we must release the first one
580 	 * and try again.
581 	 */
582 	lp = &ip0->i_itemp->ili_item;
583 	if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
584 		if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
585 			xfs_iunlock(ip0, ip0_mode);
586 			if ((++attempts % 5) == 0)
587 				delay(1); /* Don't just spin the CPU */
588 			goto again;
589 		}
590 	} else {
591 		xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
592 	}
593 }
594 
595 uint
596 xfs_ip2xflags(
597 	struct xfs_inode	*ip)
598 {
599 	uint			flags = 0;
600 
601 	if (ip->i_diflags & XFS_DIFLAG_ANY) {
602 		if (ip->i_diflags & XFS_DIFLAG_REALTIME)
603 			flags |= FS_XFLAG_REALTIME;
604 		if (ip->i_diflags & XFS_DIFLAG_PREALLOC)
605 			flags |= FS_XFLAG_PREALLOC;
606 		if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE)
607 			flags |= FS_XFLAG_IMMUTABLE;
608 		if (ip->i_diflags & XFS_DIFLAG_APPEND)
609 			flags |= FS_XFLAG_APPEND;
610 		if (ip->i_diflags & XFS_DIFLAG_SYNC)
611 			flags |= FS_XFLAG_SYNC;
612 		if (ip->i_diflags & XFS_DIFLAG_NOATIME)
613 			flags |= FS_XFLAG_NOATIME;
614 		if (ip->i_diflags & XFS_DIFLAG_NODUMP)
615 			flags |= FS_XFLAG_NODUMP;
616 		if (ip->i_diflags & XFS_DIFLAG_RTINHERIT)
617 			flags |= FS_XFLAG_RTINHERIT;
618 		if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT)
619 			flags |= FS_XFLAG_PROJINHERIT;
620 		if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS)
621 			flags |= FS_XFLAG_NOSYMLINKS;
622 		if (ip->i_diflags & XFS_DIFLAG_EXTSIZE)
623 			flags |= FS_XFLAG_EXTSIZE;
624 		if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT)
625 			flags |= FS_XFLAG_EXTSZINHERIT;
626 		if (ip->i_diflags & XFS_DIFLAG_NODEFRAG)
627 			flags |= FS_XFLAG_NODEFRAG;
628 		if (ip->i_diflags & XFS_DIFLAG_FILESTREAM)
629 			flags |= FS_XFLAG_FILESTREAM;
630 	}
631 
632 	if (ip->i_diflags2 & XFS_DIFLAG2_ANY) {
633 		if (ip->i_diflags2 & XFS_DIFLAG2_DAX)
634 			flags |= FS_XFLAG_DAX;
635 		if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
636 			flags |= FS_XFLAG_COWEXTSIZE;
637 	}
638 
639 	if (xfs_inode_has_attr_fork(ip))
640 		flags |= FS_XFLAG_HASATTR;
641 	return flags;
642 }
643 
644 /*
645  * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
646  * is allowed, otherwise it has to be an exact match. If a CI match is found,
647  * ci_name->name will point to a the actual name (caller must free) or
648  * will be set to NULL if an exact match is found.
649  */
650 int
651 xfs_lookup(
652 	struct xfs_inode	*dp,
653 	const struct xfs_name	*name,
654 	struct xfs_inode	**ipp,
655 	struct xfs_name		*ci_name)
656 {
657 	xfs_ino_t		inum;
658 	int			error;
659 
660 	trace_xfs_lookup(dp, name);
661 
662 	if (xfs_is_shutdown(dp->i_mount))
663 		return -EIO;
664 
665 	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
666 	if (error)
667 		goto out_unlock;
668 
669 	error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
670 	if (error)
671 		goto out_free_name;
672 
673 	return 0;
674 
675 out_free_name:
676 	if (ci_name)
677 		kmem_free(ci_name->name);
678 out_unlock:
679 	*ipp = NULL;
680 	return error;
681 }
682 
683 /* Propagate di_flags from a parent inode to a child inode. */
684 static void
685 xfs_inode_inherit_flags(
686 	struct xfs_inode	*ip,
687 	const struct xfs_inode	*pip)
688 {
689 	unsigned int		di_flags = 0;
690 	xfs_failaddr_t		failaddr;
691 	umode_t			mode = VFS_I(ip)->i_mode;
692 
693 	if (S_ISDIR(mode)) {
694 		if (pip->i_diflags & XFS_DIFLAG_RTINHERIT)
695 			di_flags |= XFS_DIFLAG_RTINHERIT;
696 		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
697 			di_flags |= XFS_DIFLAG_EXTSZINHERIT;
698 			ip->i_extsize = pip->i_extsize;
699 		}
700 		if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT)
701 			di_flags |= XFS_DIFLAG_PROJINHERIT;
702 	} else if (S_ISREG(mode)) {
703 		if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
704 		    xfs_has_realtime(ip->i_mount))
705 			di_flags |= XFS_DIFLAG_REALTIME;
706 		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
707 			di_flags |= XFS_DIFLAG_EXTSIZE;
708 			ip->i_extsize = pip->i_extsize;
709 		}
710 	}
711 	if ((pip->i_diflags & XFS_DIFLAG_NOATIME) &&
712 	    xfs_inherit_noatime)
713 		di_flags |= XFS_DIFLAG_NOATIME;
714 	if ((pip->i_diflags & XFS_DIFLAG_NODUMP) &&
715 	    xfs_inherit_nodump)
716 		di_flags |= XFS_DIFLAG_NODUMP;
717 	if ((pip->i_diflags & XFS_DIFLAG_SYNC) &&
718 	    xfs_inherit_sync)
719 		di_flags |= XFS_DIFLAG_SYNC;
720 	if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) &&
721 	    xfs_inherit_nosymlinks)
722 		di_flags |= XFS_DIFLAG_NOSYMLINKS;
723 	if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) &&
724 	    xfs_inherit_nodefrag)
725 		di_flags |= XFS_DIFLAG_NODEFRAG;
726 	if (pip->i_diflags & XFS_DIFLAG_FILESTREAM)
727 		di_flags |= XFS_DIFLAG_FILESTREAM;
728 
729 	ip->i_diflags |= di_flags;
730 
731 	/*
732 	 * Inode verifiers on older kernels only check that the extent size
733 	 * hint is an integer multiple of the rt extent size on realtime files.
734 	 * They did not check the hint alignment on a directory with both
735 	 * rtinherit and extszinherit flags set.  If the misaligned hint is
736 	 * propagated from a directory into a new realtime file, new file
737 	 * allocations will fail due to math errors in the rt allocator and/or
738 	 * trip the verifiers.  Validate the hint settings in the new file so
739 	 * that we don't let broken hints propagate.
740 	 */
741 	failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize,
742 			VFS_I(ip)->i_mode, ip->i_diflags);
743 	if (failaddr) {
744 		ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
745 				   XFS_DIFLAG_EXTSZINHERIT);
746 		ip->i_extsize = 0;
747 	}
748 }
749 
750 /* Propagate di_flags2 from a parent inode to a child inode. */
751 static void
752 xfs_inode_inherit_flags2(
753 	struct xfs_inode	*ip,
754 	const struct xfs_inode	*pip)
755 {
756 	xfs_failaddr_t		failaddr;
757 
758 	if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
759 		ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
760 		ip->i_cowextsize = pip->i_cowextsize;
761 	}
762 	if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
763 		ip->i_diflags2 |= XFS_DIFLAG2_DAX;
764 
765 	/* Don't let invalid cowextsize hints propagate. */
766 	failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize,
767 			VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2);
768 	if (failaddr) {
769 		ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
770 		ip->i_cowextsize = 0;
771 	}
772 }
773 
774 /*
775  * Initialise a newly allocated inode and return the in-core inode to the
776  * caller locked exclusively.
777  */
778 int
779 xfs_init_new_inode(
780 	struct mnt_idmap	*idmap,
781 	struct xfs_trans	*tp,
782 	struct xfs_inode	*pip,
783 	xfs_ino_t		ino,
784 	umode_t			mode,
785 	xfs_nlink_t		nlink,
786 	dev_t			rdev,
787 	prid_t			prid,
788 	bool			init_xattrs,
789 	struct xfs_inode	**ipp)
790 {
791 	struct inode		*dir = pip ? VFS_I(pip) : NULL;
792 	struct xfs_mount	*mp = tp->t_mountp;
793 	struct xfs_inode	*ip;
794 	unsigned int		flags;
795 	int			error;
796 	struct timespec64	tv;
797 	struct inode		*inode;
798 
799 	/*
800 	 * Protect against obviously corrupt allocation btree records. Later
801 	 * xfs_iget checks will catch re-allocation of other active in-memory
802 	 * and on-disk inodes. If we don't catch reallocating the parent inode
803 	 * here we will deadlock in xfs_iget() so we have to do these checks
804 	 * first.
805 	 */
806 	if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
807 		xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
808 		return -EFSCORRUPTED;
809 	}
810 
811 	/*
812 	 * Get the in-core inode with the lock held exclusively to prevent
813 	 * others from looking at until we're done.
814 	 */
815 	error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
816 	if (error)
817 		return error;
818 
819 	ASSERT(ip != NULL);
820 	inode = VFS_I(ip);
821 	set_nlink(inode, nlink);
822 	inode->i_rdev = rdev;
823 	ip->i_projid = prid;
824 
825 	if (dir && !(dir->i_mode & S_ISGID) && xfs_has_grpid(mp)) {
826 		inode_fsuid_set(inode, idmap);
827 		inode->i_gid = dir->i_gid;
828 		inode->i_mode = mode;
829 	} else {
830 		inode_init_owner(idmap, inode, dir, mode);
831 	}
832 
833 	/*
834 	 * If the group ID of the new file does not match the effective group
835 	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
836 	 * (and only if the irix_sgid_inherit compatibility variable is set).
837 	 */
838 	if (irix_sgid_inherit && (inode->i_mode & S_ISGID) &&
839 	    !vfsgid_in_group_p(i_gid_into_vfsgid(idmap, inode)))
840 		inode->i_mode &= ~S_ISGID;
841 
842 	ip->i_disk_size = 0;
843 	ip->i_df.if_nextents = 0;
844 	ASSERT(ip->i_nblocks == 0);
845 
846 	tv = current_time(inode);
847 	inode->i_mtime = tv;
848 	inode->i_atime = tv;
849 	inode->i_ctime = tv;
850 
851 	ip->i_extsize = 0;
852 	ip->i_diflags = 0;
853 
854 	if (xfs_has_v3inodes(mp)) {
855 		inode_set_iversion(inode, 1);
856 		ip->i_cowextsize = 0;
857 		ip->i_crtime = tv;
858 	}
859 
860 	flags = XFS_ILOG_CORE;
861 	switch (mode & S_IFMT) {
862 	case S_IFIFO:
863 	case S_IFCHR:
864 	case S_IFBLK:
865 	case S_IFSOCK:
866 		ip->i_df.if_format = XFS_DINODE_FMT_DEV;
867 		flags |= XFS_ILOG_DEV;
868 		break;
869 	case S_IFREG:
870 	case S_IFDIR:
871 		if (pip && (pip->i_diflags & XFS_DIFLAG_ANY))
872 			xfs_inode_inherit_flags(ip, pip);
873 		if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY))
874 			xfs_inode_inherit_flags2(ip, pip);
875 		fallthrough;
876 	case S_IFLNK:
877 		ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
878 		ip->i_df.if_bytes = 0;
879 		ip->i_df.if_u1.if_root = NULL;
880 		break;
881 	default:
882 		ASSERT(0);
883 	}
884 
885 	/*
886 	 * If we need to create attributes immediately after allocating the
887 	 * inode, initialise an empty attribute fork right now. We use the
888 	 * default fork offset for attributes here as we don't know exactly what
889 	 * size or how many attributes we might be adding. We can do this
890 	 * safely here because we know the data fork is completely empty and
891 	 * this saves us from needing to run a separate transaction to set the
892 	 * fork offset in the immediate future.
893 	 */
894 	if (init_xattrs && xfs_has_attr(mp)) {
895 		ip->i_forkoff = xfs_default_attroffset(ip) >> 3;
896 		xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
897 	}
898 
899 	/*
900 	 * Log the new values stuffed into the inode.
901 	 */
902 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
903 	xfs_trans_log_inode(tp, ip, flags);
904 
905 	/* now that we have an i_mode we can setup the inode structure */
906 	xfs_setup_inode(ip);
907 
908 	*ipp = ip;
909 	return 0;
910 }
911 
912 /*
913  * Decrement the link count on an inode & log the change.  If this causes the
914  * link count to go to zero, move the inode to AGI unlinked list so that it can
915  * be freed when the last active reference goes away via xfs_inactive().
916  */
917 static int			/* error */
918 xfs_droplink(
919 	xfs_trans_t *tp,
920 	xfs_inode_t *ip)
921 {
922 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
923 
924 	drop_nlink(VFS_I(ip));
925 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
926 
927 	if (VFS_I(ip)->i_nlink)
928 		return 0;
929 
930 	return xfs_iunlink(tp, ip);
931 }
932 
933 /*
934  * Increment the link count on an inode & log the change.
935  */
936 static void
937 xfs_bumplink(
938 	xfs_trans_t *tp,
939 	xfs_inode_t *ip)
940 {
941 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
942 
943 	inc_nlink(VFS_I(ip));
944 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
945 }
946 
947 int
948 xfs_create(
949 	struct mnt_idmap	*idmap,
950 	xfs_inode_t		*dp,
951 	struct xfs_name		*name,
952 	umode_t			mode,
953 	dev_t			rdev,
954 	bool			init_xattrs,
955 	xfs_inode_t		**ipp)
956 {
957 	int			is_dir = S_ISDIR(mode);
958 	struct xfs_mount	*mp = dp->i_mount;
959 	struct xfs_inode	*ip = NULL;
960 	struct xfs_trans	*tp = NULL;
961 	int			error;
962 	bool                    unlock_dp_on_error = false;
963 	prid_t			prid;
964 	struct xfs_dquot	*udqp = NULL;
965 	struct xfs_dquot	*gdqp = NULL;
966 	struct xfs_dquot	*pdqp = NULL;
967 	struct xfs_trans_res	*tres;
968 	uint			resblks;
969 	xfs_ino_t		ino;
970 
971 	trace_xfs_create(dp, name);
972 
973 	if (xfs_is_shutdown(mp))
974 		return -EIO;
975 
976 	prid = xfs_get_initial_prid(dp);
977 
978 	/*
979 	 * Make sure that we have allocated dquot(s) on disk.
980 	 */
981 	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(idmap, &init_user_ns),
982 			mapped_fsgid(idmap, &init_user_ns), prid,
983 			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
984 			&udqp, &gdqp, &pdqp);
985 	if (error)
986 		return error;
987 
988 	if (is_dir) {
989 		resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
990 		tres = &M_RES(mp)->tr_mkdir;
991 	} else {
992 		resblks = XFS_CREATE_SPACE_RES(mp, name->len);
993 		tres = &M_RES(mp)->tr_create;
994 	}
995 
996 	/*
997 	 * Initially assume that the file does not exist and
998 	 * reserve the resources for that case.  If that is not
999 	 * the case we'll drop the one we have and get a more
1000 	 * appropriate transaction later.
1001 	 */
1002 	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1003 			&tp);
1004 	if (error == -ENOSPC) {
1005 		/* flush outstanding delalloc blocks and retry */
1006 		xfs_flush_inodes(mp);
1007 		error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp,
1008 				resblks, &tp);
1009 	}
1010 	if (error)
1011 		goto out_release_dquots;
1012 
1013 	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1014 	unlock_dp_on_error = true;
1015 
1016 	/*
1017 	 * A newly created regular or special file just has one directory
1018 	 * entry pointing to them, but a directory also the "." entry
1019 	 * pointing to itself.
1020 	 */
1021 	error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1022 	if (!error)
1023 		error = xfs_init_new_inode(idmap, tp, dp, ino, mode,
1024 				is_dir ? 2 : 1, rdev, prid, init_xattrs, &ip);
1025 	if (error)
1026 		goto out_trans_cancel;
1027 
1028 	/*
1029 	 * Now we join the directory inode to the transaction.  We do not do it
1030 	 * earlier because xfs_dialloc might commit the previous transaction
1031 	 * (and release all the locks).  An error from here on will result in
1032 	 * the transaction cancel unlocking dp so don't do it explicitly in the
1033 	 * error path.
1034 	 */
1035 	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1036 	unlock_dp_on_error = false;
1037 
1038 	error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1039 					resblks - XFS_IALLOC_SPACE_RES(mp));
1040 	if (error) {
1041 		ASSERT(error != -ENOSPC);
1042 		goto out_trans_cancel;
1043 	}
1044 	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1045 	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1046 
1047 	if (is_dir) {
1048 		error = xfs_dir_init(tp, ip, dp);
1049 		if (error)
1050 			goto out_trans_cancel;
1051 
1052 		xfs_bumplink(tp, dp);
1053 	}
1054 
1055 	/*
1056 	 * If this is a synchronous mount, make sure that the
1057 	 * create transaction goes to disk before returning to
1058 	 * the user.
1059 	 */
1060 	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1061 		xfs_trans_set_sync(tp);
1062 
1063 	/*
1064 	 * Attach the dquot(s) to the inodes and modify them incore.
1065 	 * These ids of the inode couldn't have changed since the new
1066 	 * inode has been locked ever since it was created.
1067 	 */
1068 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1069 
1070 	error = xfs_trans_commit(tp);
1071 	if (error)
1072 		goto out_release_inode;
1073 
1074 	xfs_qm_dqrele(udqp);
1075 	xfs_qm_dqrele(gdqp);
1076 	xfs_qm_dqrele(pdqp);
1077 
1078 	*ipp = ip;
1079 	return 0;
1080 
1081  out_trans_cancel:
1082 	xfs_trans_cancel(tp);
1083  out_release_inode:
1084 	/*
1085 	 * Wait until after the current transaction is aborted to finish the
1086 	 * setup of the inode and release the inode.  This prevents recursive
1087 	 * transactions and deadlocks from xfs_inactive.
1088 	 */
1089 	if (ip) {
1090 		xfs_finish_inode_setup(ip);
1091 		xfs_irele(ip);
1092 	}
1093  out_release_dquots:
1094 	xfs_qm_dqrele(udqp);
1095 	xfs_qm_dqrele(gdqp);
1096 	xfs_qm_dqrele(pdqp);
1097 
1098 	if (unlock_dp_on_error)
1099 		xfs_iunlock(dp, XFS_ILOCK_EXCL);
1100 	return error;
1101 }
1102 
1103 int
1104 xfs_create_tmpfile(
1105 	struct mnt_idmap	*idmap,
1106 	struct xfs_inode	*dp,
1107 	umode_t			mode,
1108 	struct xfs_inode	**ipp)
1109 {
1110 	struct xfs_mount	*mp = dp->i_mount;
1111 	struct xfs_inode	*ip = NULL;
1112 	struct xfs_trans	*tp = NULL;
1113 	int			error;
1114 	prid_t                  prid;
1115 	struct xfs_dquot	*udqp = NULL;
1116 	struct xfs_dquot	*gdqp = NULL;
1117 	struct xfs_dquot	*pdqp = NULL;
1118 	struct xfs_trans_res	*tres;
1119 	uint			resblks;
1120 	xfs_ino_t		ino;
1121 
1122 	if (xfs_is_shutdown(mp))
1123 		return -EIO;
1124 
1125 	prid = xfs_get_initial_prid(dp);
1126 
1127 	/*
1128 	 * Make sure that we have allocated dquot(s) on disk.
1129 	 */
1130 	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(idmap, &init_user_ns),
1131 			mapped_fsgid(idmap, &init_user_ns), prid,
1132 			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1133 			&udqp, &gdqp, &pdqp);
1134 	if (error)
1135 		return error;
1136 
1137 	resblks = XFS_IALLOC_SPACE_RES(mp);
1138 	tres = &M_RES(mp)->tr_create_tmpfile;
1139 
1140 	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1141 			&tp);
1142 	if (error)
1143 		goto out_release_dquots;
1144 
1145 	error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1146 	if (!error)
1147 		error = xfs_init_new_inode(idmap, tp, dp, ino, mode,
1148 				0, 0, prid, false, &ip);
1149 	if (error)
1150 		goto out_trans_cancel;
1151 
1152 	if (xfs_has_wsync(mp))
1153 		xfs_trans_set_sync(tp);
1154 
1155 	/*
1156 	 * Attach the dquot(s) to the inodes and modify them incore.
1157 	 * These ids of the inode couldn't have changed since the new
1158 	 * inode has been locked ever since it was created.
1159 	 */
1160 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1161 
1162 	error = xfs_iunlink(tp, ip);
1163 	if (error)
1164 		goto out_trans_cancel;
1165 
1166 	error = xfs_trans_commit(tp);
1167 	if (error)
1168 		goto out_release_inode;
1169 
1170 	xfs_qm_dqrele(udqp);
1171 	xfs_qm_dqrele(gdqp);
1172 	xfs_qm_dqrele(pdqp);
1173 
1174 	*ipp = ip;
1175 	return 0;
1176 
1177  out_trans_cancel:
1178 	xfs_trans_cancel(tp);
1179  out_release_inode:
1180 	/*
1181 	 * Wait until after the current transaction is aborted to finish the
1182 	 * setup of the inode and release the inode.  This prevents recursive
1183 	 * transactions and deadlocks from xfs_inactive.
1184 	 */
1185 	if (ip) {
1186 		xfs_finish_inode_setup(ip);
1187 		xfs_irele(ip);
1188 	}
1189  out_release_dquots:
1190 	xfs_qm_dqrele(udqp);
1191 	xfs_qm_dqrele(gdqp);
1192 	xfs_qm_dqrele(pdqp);
1193 
1194 	return error;
1195 }
1196 
1197 int
1198 xfs_link(
1199 	xfs_inode_t		*tdp,
1200 	xfs_inode_t		*sip,
1201 	struct xfs_name		*target_name)
1202 {
1203 	xfs_mount_t		*mp = tdp->i_mount;
1204 	xfs_trans_t		*tp;
1205 	int			error, nospace_error = 0;
1206 	int			resblks;
1207 
1208 	trace_xfs_link(tdp, target_name);
1209 
1210 	ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1211 
1212 	if (xfs_is_shutdown(mp))
1213 		return -EIO;
1214 
1215 	error = xfs_qm_dqattach(sip);
1216 	if (error)
1217 		goto std_return;
1218 
1219 	error = xfs_qm_dqattach(tdp);
1220 	if (error)
1221 		goto std_return;
1222 
1223 	resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1224 	error = xfs_trans_alloc_dir(tdp, &M_RES(mp)->tr_link, sip, &resblks,
1225 			&tp, &nospace_error);
1226 	if (error)
1227 		goto std_return;
1228 
1229 	/*
1230 	 * If we are using project inheritance, we only allow hard link
1231 	 * creation in our tree when the project IDs are the same; else
1232 	 * the tree quota mechanism could be circumvented.
1233 	 */
1234 	if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
1235 		     tdp->i_projid != sip->i_projid)) {
1236 		error = -EXDEV;
1237 		goto error_return;
1238 	}
1239 
1240 	if (!resblks) {
1241 		error = xfs_dir_canenter(tp, tdp, target_name);
1242 		if (error)
1243 			goto error_return;
1244 	}
1245 
1246 	/*
1247 	 * Handle initial link state of O_TMPFILE inode
1248 	 */
1249 	if (VFS_I(sip)->i_nlink == 0) {
1250 		struct xfs_perag	*pag;
1251 
1252 		pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino));
1253 		error = xfs_iunlink_remove(tp, pag, sip);
1254 		xfs_perag_put(pag);
1255 		if (error)
1256 			goto error_return;
1257 	}
1258 
1259 	error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1260 				   resblks);
1261 	if (error)
1262 		goto error_return;
1263 	xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1264 	xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1265 
1266 	xfs_bumplink(tp, sip);
1267 
1268 	/*
1269 	 * If this is a synchronous mount, make sure that the
1270 	 * link transaction goes to disk before returning to
1271 	 * the user.
1272 	 */
1273 	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1274 		xfs_trans_set_sync(tp);
1275 
1276 	return xfs_trans_commit(tp);
1277 
1278  error_return:
1279 	xfs_trans_cancel(tp);
1280  std_return:
1281 	if (error == -ENOSPC && nospace_error)
1282 		error = nospace_error;
1283 	return error;
1284 }
1285 
1286 /* Clear the reflink flag and the cowblocks tag if possible. */
1287 static void
1288 xfs_itruncate_clear_reflink_flags(
1289 	struct xfs_inode	*ip)
1290 {
1291 	struct xfs_ifork	*dfork;
1292 	struct xfs_ifork	*cfork;
1293 
1294 	if (!xfs_is_reflink_inode(ip))
1295 		return;
1296 	dfork = xfs_ifork_ptr(ip, XFS_DATA_FORK);
1297 	cfork = xfs_ifork_ptr(ip, XFS_COW_FORK);
1298 	if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1299 		ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1300 	if (cfork->if_bytes == 0)
1301 		xfs_inode_clear_cowblocks_tag(ip);
1302 }
1303 
1304 /*
1305  * Free up the underlying blocks past new_size.  The new size must be smaller
1306  * than the current size.  This routine can be used both for the attribute and
1307  * data fork, and does not modify the inode size, which is left to the caller.
1308  *
1309  * The transaction passed to this routine must have made a permanent log
1310  * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
1311  * given transaction and start new ones, so make sure everything involved in
1312  * the transaction is tidy before calling here.  Some transaction will be
1313  * returned to the caller to be committed.  The incoming transaction must
1314  * already include the inode, and both inode locks must be held exclusively.
1315  * The inode must also be "held" within the transaction.  On return the inode
1316  * will be "held" within the returned transaction.  This routine does NOT
1317  * require any disk space to be reserved for it within the transaction.
1318  *
1319  * If we get an error, we must return with the inode locked and linked into the
1320  * current transaction. This keeps things simple for the higher level code,
1321  * because it always knows that the inode is locked and held in the transaction
1322  * that returns to it whether errors occur or not.  We don't mark the inode
1323  * dirty on error so that transactions can be easily aborted if possible.
1324  */
1325 int
1326 xfs_itruncate_extents_flags(
1327 	struct xfs_trans	**tpp,
1328 	struct xfs_inode	*ip,
1329 	int			whichfork,
1330 	xfs_fsize_t		new_size,
1331 	int			flags)
1332 {
1333 	struct xfs_mount	*mp = ip->i_mount;
1334 	struct xfs_trans	*tp = *tpp;
1335 	xfs_fileoff_t		first_unmap_block;
1336 	xfs_filblks_t		unmap_len;
1337 	int			error = 0;
1338 
1339 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1340 	ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1341 	       xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1342 	ASSERT(new_size <= XFS_ISIZE(ip));
1343 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1344 	ASSERT(ip->i_itemp != NULL);
1345 	ASSERT(ip->i_itemp->ili_lock_flags == 0);
1346 	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1347 
1348 	trace_xfs_itruncate_extents_start(ip, new_size);
1349 
1350 	flags |= xfs_bmapi_aflag(whichfork);
1351 
1352 	/*
1353 	 * Since it is possible for space to become allocated beyond
1354 	 * the end of the file (in a crash where the space is allocated
1355 	 * but the inode size is not yet updated), simply remove any
1356 	 * blocks which show up between the new EOF and the maximum
1357 	 * possible file size.
1358 	 *
1359 	 * We have to free all the blocks to the bmbt maximum offset, even if
1360 	 * the page cache can't scale that far.
1361 	 */
1362 	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1363 	if (!xfs_verify_fileoff(mp, first_unmap_block)) {
1364 		WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
1365 		return 0;
1366 	}
1367 
1368 	unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
1369 	while (unmap_len > 0) {
1370 		ASSERT(tp->t_highest_agno == NULLAGNUMBER);
1371 		error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
1372 				flags, XFS_ITRUNC_MAX_EXTENTS);
1373 		if (error)
1374 			goto out;
1375 
1376 		/* free the just unmapped extents */
1377 		error = xfs_defer_finish(&tp);
1378 		if (error)
1379 			goto out;
1380 	}
1381 
1382 	if (whichfork == XFS_DATA_FORK) {
1383 		/* Remove all pending CoW reservations. */
1384 		error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1385 				first_unmap_block, XFS_MAX_FILEOFF, true);
1386 		if (error)
1387 			goto out;
1388 
1389 		xfs_itruncate_clear_reflink_flags(ip);
1390 	}
1391 
1392 	/*
1393 	 * Always re-log the inode so that our permanent transaction can keep
1394 	 * on rolling it forward in the log.
1395 	 */
1396 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1397 
1398 	trace_xfs_itruncate_extents_end(ip, new_size);
1399 
1400 out:
1401 	*tpp = tp;
1402 	return error;
1403 }
1404 
1405 int
1406 xfs_release(
1407 	xfs_inode_t	*ip)
1408 {
1409 	xfs_mount_t	*mp = ip->i_mount;
1410 	int		error = 0;
1411 
1412 	if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1413 		return 0;
1414 
1415 	/* If this is a read-only mount, don't do this (would generate I/O) */
1416 	if (xfs_is_readonly(mp))
1417 		return 0;
1418 
1419 	if (!xfs_is_shutdown(mp)) {
1420 		int truncated;
1421 
1422 		/*
1423 		 * If we previously truncated this file and removed old data
1424 		 * in the process, we want to initiate "early" writeout on
1425 		 * the last close.  This is an attempt to combat the notorious
1426 		 * NULL files problem which is particularly noticeable from a
1427 		 * truncate down, buffered (re-)write (delalloc), followed by
1428 		 * a crash.  What we are effectively doing here is
1429 		 * significantly reducing the time window where we'd otherwise
1430 		 * be exposed to that problem.
1431 		 */
1432 		truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1433 		if (truncated) {
1434 			xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1435 			if (ip->i_delayed_blks > 0) {
1436 				error = filemap_flush(VFS_I(ip)->i_mapping);
1437 				if (error)
1438 					return error;
1439 			}
1440 		}
1441 	}
1442 
1443 	if (VFS_I(ip)->i_nlink == 0)
1444 		return 0;
1445 
1446 	/*
1447 	 * If we can't get the iolock just skip truncating the blocks past EOF
1448 	 * because we could deadlock with the mmap_lock otherwise. We'll get
1449 	 * another chance to drop them once the last reference to the inode is
1450 	 * dropped, so we'll never leak blocks permanently.
1451 	 */
1452 	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
1453 		return 0;
1454 
1455 	if (xfs_can_free_eofblocks(ip, false)) {
1456 		/*
1457 		 * Check if the inode is being opened, written and closed
1458 		 * frequently and we have delayed allocation blocks outstanding
1459 		 * (e.g. streaming writes from the NFS server), truncating the
1460 		 * blocks past EOF will cause fragmentation to occur.
1461 		 *
1462 		 * In this case don't do the truncation, but we have to be
1463 		 * careful how we detect this case. Blocks beyond EOF show up as
1464 		 * i_delayed_blks even when the inode is clean, so we need to
1465 		 * truncate them away first before checking for a dirty release.
1466 		 * Hence on the first dirty close we will still remove the
1467 		 * speculative allocation, but after that we will leave it in
1468 		 * place.
1469 		 */
1470 		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1471 			goto out_unlock;
1472 
1473 		error = xfs_free_eofblocks(ip);
1474 		if (error)
1475 			goto out_unlock;
1476 
1477 		/* delalloc blocks after truncation means it really is dirty */
1478 		if (ip->i_delayed_blks)
1479 			xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1480 	}
1481 
1482 out_unlock:
1483 	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1484 	return error;
1485 }
1486 
1487 /*
1488  * xfs_inactive_truncate
1489  *
1490  * Called to perform a truncate when an inode becomes unlinked.
1491  */
1492 STATIC int
1493 xfs_inactive_truncate(
1494 	struct xfs_inode *ip)
1495 {
1496 	struct xfs_mount	*mp = ip->i_mount;
1497 	struct xfs_trans	*tp;
1498 	int			error;
1499 
1500 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1501 	if (error) {
1502 		ASSERT(xfs_is_shutdown(mp));
1503 		return error;
1504 	}
1505 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1506 	xfs_trans_ijoin(tp, ip, 0);
1507 
1508 	/*
1509 	 * Log the inode size first to prevent stale data exposure in the event
1510 	 * of a system crash before the truncate completes. See the related
1511 	 * comment in xfs_vn_setattr_size() for details.
1512 	 */
1513 	ip->i_disk_size = 0;
1514 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1515 
1516 	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1517 	if (error)
1518 		goto error_trans_cancel;
1519 
1520 	ASSERT(ip->i_df.if_nextents == 0);
1521 
1522 	error = xfs_trans_commit(tp);
1523 	if (error)
1524 		goto error_unlock;
1525 
1526 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1527 	return 0;
1528 
1529 error_trans_cancel:
1530 	xfs_trans_cancel(tp);
1531 error_unlock:
1532 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1533 	return error;
1534 }
1535 
1536 /*
1537  * xfs_inactive_ifree()
1538  *
1539  * Perform the inode free when an inode is unlinked.
1540  */
1541 STATIC int
1542 xfs_inactive_ifree(
1543 	struct xfs_inode *ip)
1544 {
1545 	struct xfs_mount	*mp = ip->i_mount;
1546 	struct xfs_trans	*tp;
1547 	int			error;
1548 
1549 	/*
1550 	 * We try to use a per-AG reservation for any block needed by the finobt
1551 	 * tree, but as the finobt feature predates the per-AG reservation
1552 	 * support a degraded file system might not have enough space for the
1553 	 * reservation at mount time.  In that case try to dip into the reserved
1554 	 * pool and pray.
1555 	 *
1556 	 * Send a warning if the reservation does happen to fail, as the inode
1557 	 * now remains allocated and sits on the unlinked list until the fs is
1558 	 * repaired.
1559 	 */
1560 	if (unlikely(mp->m_finobt_nores)) {
1561 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1562 				XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1563 				&tp);
1564 	} else {
1565 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1566 	}
1567 	if (error) {
1568 		if (error == -ENOSPC) {
1569 			xfs_warn_ratelimited(mp,
1570 			"Failed to remove inode(s) from unlinked list. "
1571 			"Please free space, unmount and run xfs_repair.");
1572 		} else {
1573 			ASSERT(xfs_is_shutdown(mp));
1574 		}
1575 		return error;
1576 	}
1577 
1578 	/*
1579 	 * We do not hold the inode locked across the entire rolling transaction
1580 	 * here. We only need to hold it for the first transaction that
1581 	 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
1582 	 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
1583 	 * here breaks the relationship between cluster buffer invalidation and
1584 	 * stale inode invalidation on cluster buffer item journal commit
1585 	 * completion, and can result in leaving dirty stale inodes hanging
1586 	 * around in memory.
1587 	 *
1588 	 * We have no need for serialising this inode operation against other
1589 	 * operations - we freed the inode and hence reallocation is required
1590 	 * and that will serialise on reallocating the space the deferops need
1591 	 * to free. Hence we can unlock the inode on the first commit of
1592 	 * the transaction rather than roll it right through the deferops. This
1593 	 * avoids relogging the XFS_ISTALE inode.
1594 	 *
1595 	 * We check that xfs_ifree() hasn't grown an internal transaction roll
1596 	 * by asserting that the inode is still locked when it returns.
1597 	 */
1598 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1599 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1600 
1601 	error = xfs_ifree(tp, ip);
1602 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1603 	if (error) {
1604 		/*
1605 		 * If we fail to free the inode, shut down.  The cancel
1606 		 * might do that, we need to make sure.  Otherwise the
1607 		 * inode might be lost for a long time or forever.
1608 		 */
1609 		if (!xfs_is_shutdown(mp)) {
1610 			xfs_notice(mp, "%s: xfs_ifree returned error %d",
1611 				__func__, error);
1612 			xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1613 		}
1614 		xfs_trans_cancel(tp);
1615 		return error;
1616 	}
1617 
1618 	/*
1619 	 * Credit the quota account(s). The inode is gone.
1620 	 */
1621 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1622 
1623 	return xfs_trans_commit(tp);
1624 }
1625 
1626 /*
1627  * Returns true if we need to update the on-disk metadata before we can free
1628  * the memory used by this inode.  Updates include freeing post-eof
1629  * preallocations; freeing COW staging extents; and marking the inode free in
1630  * the inobt if it is on the unlinked list.
1631  */
1632 bool
1633 xfs_inode_needs_inactive(
1634 	struct xfs_inode	*ip)
1635 {
1636 	struct xfs_mount	*mp = ip->i_mount;
1637 	struct xfs_ifork	*cow_ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
1638 
1639 	/*
1640 	 * If the inode is already free, then there can be nothing
1641 	 * to clean up here.
1642 	 */
1643 	if (VFS_I(ip)->i_mode == 0)
1644 		return false;
1645 
1646 	/* If this is a read-only mount, don't do this (would generate I/O) */
1647 	if (xfs_is_readonly(mp))
1648 		return false;
1649 
1650 	/* If the log isn't running, push inodes straight to reclaim. */
1651 	if (xfs_is_shutdown(mp) || xfs_has_norecovery(mp))
1652 		return false;
1653 
1654 	/* Metadata inodes require explicit resource cleanup. */
1655 	if (xfs_is_metadata_inode(ip))
1656 		return false;
1657 
1658 	/* Want to clean out the cow blocks if there are any. */
1659 	if (cow_ifp && cow_ifp->if_bytes > 0)
1660 		return true;
1661 
1662 	/* Unlinked files must be freed. */
1663 	if (VFS_I(ip)->i_nlink == 0)
1664 		return true;
1665 
1666 	/*
1667 	 * This file isn't being freed, so check if there are post-eof blocks
1668 	 * to free.  @force is true because we are evicting an inode from the
1669 	 * cache.  Post-eof blocks must be freed, lest we end up with broken
1670 	 * free space accounting.
1671 	 *
1672 	 * Note: don't bother with iolock here since lockdep complains about
1673 	 * acquiring it in reclaim context. We have the only reference to the
1674 	 * inode at this point anyways.
1675 	 */
1676 	return xfs_can_free_eofblocks(ip, true);
1677 }
1678 
1679 /*
1680  * xfs_inactive
1681  *
1682  * This is called when the vnode reference count for the vnode
1683  * goes to zero.  If the file has been unlinked, then it must
1684  * now be truncated.  Also, we clear all of the read-ahead state
1685  * kept for the inode here since the file is now closed.
1686  */
1687 int
1688 xfs_inactive(
1689 	xfs_inode_t	*ip)
1690 {
1691 	struct xfs_mount	*mp;
1692 	int			error = 0;
1693 	int			truncate = 0;
1694 
1695 	/*
1696 	 * If the inode is already free, then there can be nothing
1697 	 * to clean up here.
1698 	 */
1699 	if (VFS_I(ip)->i_mode == 0) {
1700 		ASSERT(ip->i_df.if_broot_bytes == 0);
1701 		goto out;
1702 	}
1703 
1704 	mp = ip->i_mount;
1705 	ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1706 
1707 	/* If this is a read-only mount, don't do this (would generate I/O) */
1708 	if (xfs_is_readonly(mp))
1709 		goto out;
1710 
1711 	/* Metadata inodes require explicit resource cleanup. */
1712 	if (xfs_is_metadata_inode(ip))
1713 		goto out;
1714 
1715 	/* Try to clean out the cow blocks if there are any. */
1716 	if (xfs_inode_has_cow_data(ip))
1717 		xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1718 
1719 	if (VFS_I(ip)->i_nlink != 0) {
1720 		/*
1721 		 * force is true because we are evicting an inode from the
1722 		 * cache. Post-eof blocks must be freed, lest we end up with
1723 		 * broken free space accounting.
1724 		 *
1725 		 * Note: don't bother with iolock here since lockdep complains
1726 		 * about acquiring it in reclaim context. We have the only
1727 		 * reference to the inode at this point anyways.
1728 		 */
1729 		if (xfs_can_free_eofblocks(ip, true))
1730 			error = xfs_free_eofblocks(ip);
1731 
1732 		goto out;
1733 	}
1734 
1735 	if (S_ISREG(VFS_I(ip)->i_mode) &&
1736 	    (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 ||
1737 	     ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
1738 		truncate = 1;
1739 
1740 	error = xfs_qm_dqattach(ip);
1741 	if (error)
1742 		goto out;
1743 
1744 	if (S_ISLNK(VFS_I(ip)->i_mode))
1745 		error = xfs_inactive_symlink(ip);
1746 	else if (truncate)
1747 		error = xfs_inactive_truncate(ip);
1748 	if (error)
1749 		goto out;
1750 
1751 	/*
1752 	 * If there are attributes associated with the file then blow them away
1753 	 * now.  The code calls a routine that recursively deconstructs the
1754 	 * attribute fork. If also blows away the in-core attribute fork.
1755 	 */
1756 	if (xfs_inode_has_attr_fork(ip)) {
1757 		error = xfs_attr_inactive(ip);
1758 		if (error)
1759 			goto out;
1760 	}
1761 
1762 	ASSERT(ip->i_forkoff == 0);
1763 
1764 	/*
1765 	 * Free the inode.
1766 	 */
1767 	error = xfs_inactive_ifree(ip);
1768 
1769 out:
1770 	/*
1771 	 * We're done making metadata updates for this inode, so we can release
1772 	 * the attached dquots.
1773 	 */
1774 	xfs_qm_dqdetach(ip);
1775 	return error;
1776 }
1777 
1778 /*
1779  * In-Core Unlinked List Lookups
1780  * =============================
1781  *
1782  * Every inode is supposed to be reachable from some other piece of metadata
1783  * with the exception of the root directory.  Inodes with a connection to a
1784  * file descriptor but not linked from anywhere in the on-disk directory tree
1785  * are collectively known as unlinked inodes, though the filesystem itself
1786  * maintains links to these inodes so that on-disk metadata are consistent.
1787  *
1788  * XFS implements a per-AG on-disk hash table of unlinked inodes.  The AGI
1789  * header contains a number of buckets that point to an inode, and each inode
1790  * record has a pointer to the next inode in the hash chain.  This
1791  * singly-linked list causes scaling problems in the iunlink remove function
1792  * because we must walk that list to find the inode that points to the inode
1793  * being removed from the unlinked hash bucket list.
1794  *
1795  * Hence we keep an in-memory double linked list to link each inode on an
1796  * unlinked list. Because there are 64 unlinked lists per AGI, keeping pointer
1797  * based lists would require having 64 list heads in the perag, one for each
1798  * list. This is expensive in terms of memory (think millions of AGs) and cache
1799  * misses on lookups. Instead, use the fact that inodes on the unlinked list
1800  * must be referenced at the VFS level to keep them on the list and hence we
1801  * have an existence guarantee for inodes on the unlinked list.
1802  *
1803  * Given we have an existence guarantee, we can use lockless inode cache lookups
1804  * to resolve aginos to xfs inodes. This means we only need 8 bytes per inode
1805  * for the double linked unlinked list, and we don't need any extra locking to
1806  * keep the list safe as all manipulations are done under the AGI buffer lock.
1807  * Keeping the list up to date does not require memory allocation, just finding
1808  * the XFS inode and updating the next/prev unlinked list aginos.
1809  */
1810 
1811 /*
1812  * Find an inode on the unlinked list. This does not take references to the
1813  * inode as we have existence guarantees by holding the AGI buffer lock and that
1814  * only unlinked, referenced inodes can be on the unlinked inode list.  If we
1815  * don't find the inode in cache, then let the caller handle the situation.
1816  */
1817 static struct xfs_inode *
1818 xfs_iunlink_lookup(
1819 	struct xfs_perag	*pag,
1820 	xfs_agino_t		agino)
1821 {
1822 	struct xfs_inode	*ip;
1823 
1824 	rcu_read_lock();
1825 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
1826 
1827 	/*
1828 	 * Inode not in memory or in RCU freeing limbo should not happen.
1829 	 * Warn about this and let the caller handle the failure.
1830 	 */
1831 	if (WARN_ON_ONCE(!ip || !ip->i_ino)) {
1832 		rcu_read_unlock();
1833 		return NULL;
1834 	}
1835 	ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM));
1836 	rcu_read_unlock();
1837 	return ip;
1838 }
1839 
1840 /* Update the prev pointer of the next agino. */
1841 static int
1842 xfs_iunlink_update_backref(
1843 	struct xfs_perag	*pag,
1844 	xfs_agino_t		prev_agino,
1845 	xfs_agino_t		next_agino)
1846 {
1847 	struct xfs_inode	*ip;
1848 
1849 	/* No update necessary if we are at the end of the list. */
1850 	if (next_agino == NULLAGINO)
1851 		return 0;
1852 
1853 	ip = xfs_iunlink_lookup(pag, next_agino);
1854 	if (!ip)
1855 		return -EFSCORRUPTED;
1856 	ip->i_prev_unlinked = prev_agino;
1857 	return 0;
1858 }
1859 
1860 /*
1861  * Point the AGI unlinked bucket at an inode and log the results.  The caller
1862  * is responsible for validating the old value.
1863  */
1864 STATIC int
1865 xfs_iunlink_update_bucket(
1866 	struct xfs_trans	*tp,
1867 	struct xfs_perag	*pag,
1868 	struct xfs_buf		*agibp,
1869 	unsigned int		bucket_index,
1870 	xfs_agino_t		new_agino)
1871 {
1872 	struct xfs_agi		*agi = agibp->b_addr;
1873 	xfs_agino_t		old_value;
1874 	int			offset;
1875 
1876 	ASSERT(xfs_verify_agino_or_null(pag, new_agino));
1877 
1878 	old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1879 	trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index,
1880 			old_value, new_agino);
1881 
1882 	/*
1883 	 * We should never find the head of the list already set to the value
1884 	 * passed in because either we're adding or removing ourselves from the
1885 	 * head of the list.
1886 	 */
1887 	if (old_value == new_agino) {
1888 		xfs_buf_mark_corrupt(agibp);
1889 		return -EFSCORRUPTED;
1890 	}
1891 
1892 	agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
1893 	offset = offsetof(struct xfs_agi, agi_unlinked) +
1894 			(sizeof(xfs_agino_t) * bucket_index);
1895 	xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
1896 	return 0;
1897 }
1898 
1899 static int
1900 xfs_iunlink_insert_inode(
1901 	struct xfs_trans	*tp,
1902 	struct xfs_perag	*pag,
1903 	struct xfs_buf		*agibp,
1904 	struct xfs_inode	*ip)
1905 {
1906 	struct xfs_mount	*mp = tp->t_mountp;
1907 	struct xfs_agi		*agi = agibp->b_addr;
1908 	xfs_agino_t		next_agino;
1909 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1910 	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1911 	int			error;
1912 
1913 	/*
1914 	 * Get the index into the agi hash table for the list this inode will
1915 	 * go on.  Make sure the pointer isn't garbage and that this inode
1916 	 * isn't already on the list.
1917 	 */
1918 	next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1919 	if (next_agino == agino ||
1920 	    !xfs_verify_agino_or_null(pag, next_agino)) {
1921 		xfs_buf_mark_corrupt(agibp);
1922 		return -EFSCORRUPTED;
1923 	}
1924 
1925 	/*
1926 	 * Update the prev pointer in the next inode to point back to this
1927 	 * inode.
1928 	 */
1929 	error = xfs_iunlink_update_backref(pag, agino, next_agino);
1930 	if (error)
1931 		return error;
1932 
1933 	if (next_agino != NULLAGINO) {
1934 		/*
1935 		 * There is already another inode in the bucket, so point this
1936 		 * inode to the current head of the list.
1937 		 */
1938 		error = xfs_iunlink_log_inode(tp, ip, pag, next_agino);
1939 		if (error)
1940 			return error;
1941 		ip->i_next_unlinked = next_agino;
1942 	}
1943 
1944 	/* Point the head of the list to point to this inode. */
1945 	return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
1946 }
1947 
1948 /*
1949  * This is called when the inode's link count has gone to 0 or we are creating
1950  * a tmpfile via O_TMPFILE.  The inode @ip must have nlink == 0.
1951  *
1952  * We place the on-disk inode on a list in the AGI.  It will be pulled from this
1953  * list when the inode is freed.
1954  */
1955 STATIC int
1956 xfs_iunlink(
1957 	struct xfs_trans	*tp,
1958 	struct xfs_inode	*ip)
1959 {
1960 	struct xfs_mount	*mp = tp->t_mountp;
1961 	struct xfs_perag	*pag;
1962 	struct xfs_buf		*agibp;
1963 	int			error;
1964 
1965 	ASSERT(VFS_I(ip)->i_nlink == 0);
1966 	ASSERT(VFS_I(ip)->i_mode != 0);
1967 	trace_xfs_iunlink(ip);
1968 
1969 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1970 
1971 	/* Get the agi buffer first.  It ensures lock ordering on the list. */
1972 	error = xfs_read_agi(pag, tp, &agibp);
1973 	if (error)
1974 		goto out;
1975 
1976 	error = xfs_iunlink_insert_inode(tp, pag, agibp, ip);
1977 out:
1978 	xfs_perag_put(pag);
1979 	return error;
1980 }
1981 
1982 static int
1983 xfs_iunlink_remove_inode(
1984 	struct xfs_trans	*tp,
1985 	struct xfs_perag	*pag,
1986 	struct xfs_buf		*agibp,
1987 	struct xfs_inode	*ip)
1988 {
1989 	struct xfs_mount	*mp = tp->t_mountp;
1990 	struct xfs_agi		*agi = agibp->b_addr;
1991 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1992 	xfs_agino_t		head_agino;
1993 	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1994 	int			error;
1995 
1996 	trace_xfs_iunlink_remove(ip);
1997 
1998 	/*
1999 	 * Get the index into the agi hash table for the list this inode will
2000 	 * go on.  Make sure the head pointer isn't garbage.
2001 	 */
2002 	head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2003 	if (!xfs_verify_agino(pag, head_agino)) {
2004 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2005 				agi, sizeof(*agi));
2006 		return -EFSCORRUPTED;
2007 	}
2008 
2009 	/*
2010 	 * Set our inode's next_unlinked pointer to NULL and then return
2011 	 * the old pointer value so that we can update whatever was previous
2012 	 * to us in the list to point to whatever was next in the list.
2013 	 */
2014 	error = xfs_iunlink_log_inode(tp, ip, pag, NULLAGINO);
2015 	if (error)
2016 		return error;
2017 
2018 	/*
2019 	 * Update the prev pointer in the next inode to point back to previous
2020 	 * inode in the chain.
2021 	 */
2022 	error = xfs_iunlink_update_backref(pag, ip->i_prev_unlinked,
2023 			ip->i_next_unlinked);
2024 	if (error)
2025 		return error;
2026 
2027 	if (head_agino != agino) {
2028 		struct xfs_inode	*prev_ip;
2029 
2030 		prev_ip = xfs_iunlink_lookup(pag, ip->i_prev_unlinked);
2031 		if (!prev_ip)
2032 			return -EFSCORRUPTED;
2033 
2034 		error = xfs_iunlink_log_inode(tp, prev_ip, pag,
2035 				ip->i_next_unlinked);
2036 		prev_ip->i_next_unlinked = ip->i_next_unlinked;
2037 	} else {
2038 		/* Point the head of the list to the next unlinked inode. */
2039 		error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index,
2040 				ip->i_next_unlinked);
2041 	}
2042 
2043 	ip->i_next_unlinked = NULLAGINO;
2044 	ip->i_prev_unlinked = NULLAGINO;
2045 	return error;
2046 }
2047 
2048 /*
2049  * Pull the on-disk inode from the AGI unlinked list.
2050  */
2051 STATIC int
2052 xfs_iunlink_remove(
2053 	struct xfs_trans	*tp,
2054 	struct xfs_perag	*pag,
2055 	struct xfs_inode	*ip)
2056 {
2057 	struct xfs_buf		*agibp;
2058 	int			error;
2059 
2060 	trace_xfs_iunlink_remove(ip);
2061 
2062 	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2063 	error = xfs_read_agi(pag, tp, &agibp);
2064 	if (error)
2065 		return error;
2066 
2067 	return xfs_iunlink_remove_inode(tp, pag, agibp, ip);
2068 }
2069 
2070 /*
2071  * Look up the inode number specified and if it is not already marked XFS_ISTALE
2072  * mark it stale. We should only find clean inodes in this lookup that aren't
2073  * already stale.
2074  */
2075 static void
2076 xfs_ifree_mark_inode_stale(
2077 	struct xfs_perag	*pag,
2078 	struct xfs_inode	*free_ip,
2079 	xfs_ino_t		inum)
2080 {
2081 	struct xfs_mount	*mp = pag->pag_mount;
2082 	struct xfs_inode_log_item *iip;
2083 	struct xfs_inode	*ip;
2084 
2085 retry:
2086 	rcu_read_lock();
2087 	ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
2088 
2089 	/* Inode not in memory, nothing to do */
2090 	if (!ip) {
2091 		rcu_read_unlock();
2092 		return;
2093 	}
2094 
2095 	/*
2096 	 * because this is an RCU protected lookup, we could find a recently
2097 	 * freed or even reallocated inode during the lookup. We need to check
2098 	 * under the i_flags_lock for a valid inode here. Skip it if it is not
2099 	 * valid, the wrong inode or stale.
2100 	 */
2101 	spin_lock(&ip->i_flags_lock);
2102 	if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
2103 		goto out_iflags_unlock;
2104 
2105 	/*
2106 	 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
2107 	 * other inodes that we did not find in the list attached to the buffer
2108 	 * and are not already marked stale. If we can't lock it, back off and
2109 	 * retry.
2110 	 */
2111 	if (ip != free_ip) {
2112 		if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2113 			spin_unlock(&ip->i_flags_lock);
2114 			rcu_read_unlock();
2115 			delay(1);
2116 			goto retry;
2117 		}
2118 	}
2119 	ip->i_flags |= XFS_ISTALE;
2120 
2121 	/*
2122 	 * If the inode is flushing, it is already attached to the buffer.  All
2123 	 * we needed to do here is mark the inode stale so buffer IO completion
2124 	 * will remove it from the AIL.
2125 	 */
2126 	iip = ip->i_itemp;
2127 	if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
2128 		ASSERT(!list_empty(&iip->ili_item.li_bio_list));
2129 		ASSERT(iip->ili_last_fields);
2130 		goto out_iunlock;
2131 	}
2132 
2133 	/*
2134 	 * Inodes not attached to the buffer can be released immediately.
2135 	 * Everything else has to go through xfs_iflush_abort() on journal
2136 	 * commit as the flock synchronises removal of the inode from the
2137 	 * cluster buffer against inode reclaim.
2138 	 */
2139 	if (!iip || list_empty(&iip->ili_item.li_bio_list))
2140 		goto out_iunlock;
2141 
2142 	__xfs_iflags_set(ip, XFS_IFLUSHING);
2143 	spin_unlock(&ip->i_flags_lock);
2144 	rcu_read_unlock();
2145 
2146 	/* we have a dirty inode in memory that has not yet been flushed. */
2147 	spin_lock(&iip->ili_lock);
2148 	iip->ili_last_fields = iip->ili_fields;
2149 	iip->ili_fields = 0;
2150 	iip->ili_fsync_fields = 0;
2151 	spin_unlock(&iip->ili_lock);
2152 	ASSERT(iip->ili_last_fields);
2153 
2154 	if (ip != free_ip)
2155 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2156 	return;
2157 
2158 out_iunlock:
2159 	if (ip != free_ip)
2160 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2161 out_iflags_unlock:
2162 	spin_unlock(&ip->i_flags_lock);
2163 	rcu_read_unlock();
2164 }
2165 
2166 /*
2167  * A big issue when freeing the inode cluster is that we _cannot_ skip any
2168  * inodes that are in memory - they all must be marked stale and attached to
2169  * the cluster buffer.
2170  */
2171 static int
2172 xfs_ifree_cluster(
2173 	struct xfs_trans	*tp,
2174 	struct xfs_perag	*pag,
2175 	struct xfs_inode	*free_ip,
2176 	struct xfs_icluster	*xic)
2177 {
2178 	struct xfs_mount	*mp = free_ip->i_mount;
2179 	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
2180 	struct xfs_buf		*bp;
2181 	xfs_daddr_t		blkno;
2182 	xfs_ino_t		inum = xic->first_ino;
2183 	int			nbufs;
2184 	int			i, j;
2185 	int			ioffset;
2186 	int			error;
2187 
2188 	nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
2189 
2190 	for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
2191 		/*
2192 		 * The allocation bitmap tells us which inodes of the chunk were
2193 		 * physically allocated. Skip the cluster if an inode falls into
2194 		 * a sparse region.
2195 		 */
2196 		ioffset = inum - xic->first_ino;
2197 		if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2198 			ASSERT(ioffset % igeo->inodes_per_cluster == 0);
2199 			continue;
2200 		}
2201 
2202 		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2203 					 XFS_INO_TO_AGBNO(mp, inum));
2204 
2205 		/*
2206 		 * We obtain and lock the backing buffer first in the process
2207 		 * here to ensure dirty inodes attached to the buffer remain in
2208 		 * the flushing state while we mark them stale.
2209 		 *
2210 		 * If we scan the in-memory inodes first, then buffer IO can
2211 		 * complete before we get a lock on it, and hence we may fail
2212 		 * to mark all the active inodes on the buffer stale.
2213 		 */
2214 		error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2215 				mp->m_bsize * igeo->blocks_per_cluster,
2216 				XBF_UNMAPPED, &bp);
2217 		if (error)
2218 			return error;
2219 
2220 		/*
2221 		 * This buffer may not have been correctly initialised as we
2222 		 * didn't read it from disk. That's not important because we are
2223 		 * only using to mark the buffer as stale in the log, and to
2224 		 * attach stale cached inodes on it. That means it will never be
2225 		 * dispatched for IO. If it is, we want to know about it, and we
2226 		 * want it to fail. We can acheive this by adding a write
2227 		 * verifier to the buffer.
2228 		 */
2229 		bp->b_ops = &xfs_inode_buf_ops;
2230 
2231 		/*
2232 		 * Now we need to set all the cached clean inodes as XFS_ISTALE,
2233 		 * too. This requires lookups, and will skip inodes that we've
2234 		 * already marked XFS_ISTALE.
2235 		 */
2236 		for (i = 0; i < igeo->inodes_per_cluster; i++)
2237 			xfs_ifree_mark_inode_stale(pag, free_ip, inum + i);
2238 
2239 		xfs_trans_stale_inode_buf(tp, bp);
2240 		xfs_trans_binval(tp, bp);
2241 	}
2242 	return 0;
2243 }
2244 
2245 /*
2246  * This is called to return an inode to the inode free list.  The inode should
2247  * already be truncated to 0 length and have no pages associated with it.  This
2248  * routine also assumes that the inode is already a part of the transaction.
2249  *
2250  * The on-disk copy of the inode will have been added to the list of unlinked
2251  * inodes in the AGI. We need to remove the inode from that list atomically with
2252  * respect to freeing it here.
2253  */
2254 int
2255 xfs_ifree(
2256 	struct xfs_trans	*tp,
2257 	struct xfs_inode	*ip)
2258 {
2259 	struct xfs_mount	*mp = ip->i_mount;
2260 	struct xfs_perag	*pag;
2261 	struct xfs_icluster	xic = { 0 };
2262 	struct xfs_inode_log_item *iip = ip->i_itemp;
2263 	int			error;
2264 
2265 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2266 	ASSERT(VFS_I(ip)->i_nlink == 0);
2267 	ASSERT(ip->i_df.if_nextents == 0);
2268 	ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
2269 	ASSERT(ip->i_nblocks == 0);
2270 
2271 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2272 
2273 	/*
2274 	 * Free the inode first so that we guarantee that the AGI lock is going
2275 	 * to be taken before we remove the inode from the unlinked list. This
2276 	 * makes the AGI lock -> unlinked list modification order the same as
2277 	 * used in O_TMPFILE creation.
2278 	 */
2279 	error = xfs_difree(tp, pag, ip->i_ino, &xic);
2280 	if (error)
2281 		goto out;
2282 
2283 	error = xfs_iunlink_remove(tp, pag, ip);
2284 	if (error)
2285 		goto out;
2286 
2287 	/*
2288 	 * Free any local-format data sitting around before we reset the
2289 	 * data fork to extents format.  Note that the attr fork data has
2290 	 * already been freed by xfs_attr_inactive.
2291 	 */
2292 	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
2293 		kmem_free(ip->i_df.if_u1.if_data);
2294 		ip->i_df.if_u1.if_data = NULL;
2295 		ip->i_df.if_bytes = 0;
2296 	}
2297 
2298 	VFS_I(ip)->i_mode = 0;		/* mark incore inode as free */
2299 	ip->i_diflags = 0;
2300 	ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
2301 	ip->i_forkoff = 0;		/* mark the attr fork not in use */
2302 	ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
2303 	if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
2304 		xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS);
2305 
2306 	/* Don't attempt to replay owner changes for a deleted inode */
2307 	spin_lock(&iip->ili_lock);
2308 	iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
2309 	spin_unlock(&iip->ili_lock);
2310 
2311 	/*
2312 	 * Bump the generation count so no one will be confused
2313 	 * by reincarnations of this inode.
2314 	 */
2315 	VFS_I(ip)->i_generation++;
2316 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2317 
2318 	if (xic.deleted)
2319 		error = xfs_ifree_cluster(tp, pag, ip, &xic);
2320 out:
2321 	xfs_perag_put(pag);
2322 	return error;
2323 }
2324 
2325 /*
2326  * This is called to unpin an inode.  The caller must have the inode locked
2327  * in at least shared mode so that the buffer cannot be subsequently pinned
2328  * once someone is waiting for it to be unpinned.
2329  */
2330 static void
2331 xfs_iunpin(
2332 	struct xfs_inode	*ip)
2333 {
2334 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2335 
2336 	trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2337 
2338 	/* Give the log a push to start the unpinning I/O */
2339 	xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL);
2340 
2341 }
2342 
2343 static void
2344 __xfs_iunpin_wait(
2345 	struct xfs_inode	*ip)
2346 {
2347 	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2348 	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2349 
2350 	xfs_iunpin(ip);
2351 
2352 	do {
2353 		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2354 		if (xfs_ipincount(ip))
2355 			io_schedule();
2356 	} while (xfs_ipincount(ip));
2357 	finish_wait(wq, &wait.wq_entry);
2358 }
2359 
2360 void
2361 xfs_iunpin_wait(
2362 	struct xfs_inode	*ip)
2363 {
2364 	if (xfs_ipincount(ip))
2365 		__xfs_iunpin_wait(ip);
2366 }
2367 
2368 /*
2369  * Removing an inode from the namespace involves removing the directory entry
2370  * and dropping the link count on the inode. Removing the directory entry can
2371  * result in locking an AGF (directory blocks were freed) and removing a link
2372  * count can result in placing the inode on an unlinked list which results in
2373  * locking an AGI.
2374  *
2375  * The big problem here is that we have an ordering constraint on AGF and AGI
2376  * locking - inode allocation locks the AGI, then can allocate a new extent for
2377  * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2378  * removes the inode from the unlinked list, requiring that we lock the AGI
2379  * first, and then freeing the inode can result in an inode chunk being freed
2380  * and hence freeing disk space requiring that we lock an AGF.
2381  *
2382  * Hence the ordering that is imposed by other parts of the code is AGI before
2383  * AGF. This means we cannot remove the directory entry before we drop the inode
2384  * reference count and put it on the unlinked list as this results in a lock
2385  * order of AGF then AGI, and this can deadlock against inode allocation and
2386  * freeing. Therefore we must drop the link counts before we remove the
2387  * directory entry.
2388  *
2389  * This is still safe from a transactional point of view - it is not until we
2390  * get to xfs_defer_finish() that we have the possibility of multiple
2391  * transactions in this operation. Hence as long as we remove the directory
2392  * entry and drop the link count in the first transaction of the remove
2393  * operation, there are no transactional constraints on the ordering here.
2394  */
2395 int
2396 xfs_remove(
2397 	xfs_inode_t             *dp,
2398 	struct xfs_name		*name,
2399 	xfs_inode_t		*ip)
2400 {
2401 	xfs_mount_t		*mp = dp->i_mount;
2402 	xfs_trans_t             *tp = NULL;
2403 	int			is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2404 	int			dontcare;
2405 	int                     error = 0;
2406 	uint			resblks;
2407 
2408 	trace_xfs_remove(dp, name);
2409 
2410 	if (xfs_is_shutdown(mp))
2411 		return -EIO;
2412 
2413 	error = xfs_qm_dqattach(dp);
2414 	if (error)
2415 		goto std_return;
2416 
2417 	error = xfs_qm_dqattach(ip);
2418 	if (error)
2419 		goto std_return;
2420 
2421 	/*
2422 	 * We try to get the real space reservation first, allowing for
2423 	 * directory btree deletion(s) implying possible bmap insert(s).  If we
2424 	 * can't get the space reservation then we use 0 instead, and avoid the
2425 	 * bmap btree insert(s) in the directory code by, if the bmap insert
2426 	 * tries to happen, instead trimming the LAST block from the directory.
2427 	 *
2428 	 * Ignore EDQUOT and ENOSPC being returned via nospace_error because
2429 	 * the directory code can handle a reservationless update and we don't
2430 	 * want to prevent a user from trying to free space by deleting things.
2431 	 */
2432 	resblks = XFS_REMOVE_SPACE_RES(mp);
2433 	error = xfs_trans_alloc_dir(dp, &M_RES(mp)->tr_remove, ip, &resblks,
2434 			&tp, &dontcare);
2435 	if (error) {
2436 		ASSERT(error != -ENOSPC);
2437 		goto std_return;
2438 	}
2439 
2440 	/*
2441 	 * If we're removing a directory perform some additional validation.
2442 	 */
2443 	if (is_dir) {
2444 		ASSERT(VFS_I(ip)->i_nlink >= 2);
2445 		if (VFS_I(ip)->i_nlink != 2) {
2446 			error = -ENOTEMPTY;
2447 			goto out_trans_cancel;
2448 		}
2449 		if (!xfs_dir_isempty(ip)) {
2450 			error = -ENOTEMPTY;
2451 			goto out_trans_cancel;
2452 		}
2453 
2454 		/* Drop the link from ip's "..".  */
2455 		error = xfs_droplink(tp, dp);
2456 		if (error)
2457 			goto out_trans_cancel;
2458 
2459 		/* Drop the "." link from ip to self.  */
2460 		error = xfs_droplink(tp, ip);
2461 		if (error)
2462 			goto out_trans_cancel;
2463 
2464 		/*
2465 		 * Point the unlinked child directory's ".." entry to the root
2466 		 * directory to eliminate back-references to inodes that may
2467 		 * get freed before the child directory is closed.  If the fs
2468 		 * gets shrunk, this can lead to dirent inode validation errors.
2469 		 */
2470 		if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) {
2471 			error = xfs_dir_replace(tp, ip, &xfs_name_dotdot,
2472 					tp->t_mountp->m_sb.sb_rootino, 0);
2473 			if (error)
2474 				goto out_trans_cancel;
2475 		}
2476 	} else {
2477 		/*
2478 		 * When removing a non-directory we need to log the parent
2479 		 * inode here.  For a directory this is done implicitly
2480 		 * by the xfs_droplink call for the ".." entry.
2481 		 */
2482 		xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2483 	}
2484 	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2485 
2486 	/* Drop the link from dp to ip. */
2487 	error = xfs_droplink(tp, ip);
2488 	if (error)
2489 		goto out_trans_cancel;
2490 
2491 	error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
2492 	if (error) {
2493 		ASSERT(error != -ENOENT);
2494 		goto out_trans_cancel;
2495 	}
2496 
2497 	/*
2498 	 * If this is a synchronous mount, make sure that the
2499 	 * remove transaction goes to disk before returning to
2500 	 * the user.
2501 	 */
2502 	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
2503 		xfs_trans_set_sync(tp);
2504 
2505 	error = xfs_trans_commit(tp);
2506 	if (error)
2507 		goto std_return;
2508 
2509 	if (is_dir && xfs_inode_is_filestream(ip))
2510 		xfs_filestream_deassociate(ip);
2511 
2512 	return 0;
2513 
2514  out_trans_cancel:
2515 	xfs_trans_cancel(tp);
2516  std_return:
2517 	return error;
2518 }
2519 
2520 /*
2521  * Enter all inodes for a rename transaction into a sorted array.
2522  */
2523 #define __XFS_SORT_INODES	5
2524 STATIC void
2525 xfs_sort_for_rename(
2526 	struct xfs_inode	*dp1,	/* in: old (source) directory inode */
2527 	struct xfs_inode	*dp2,	/* in: new (target) directory inode */
2528 	struct xfs_inode	*ip1,	/* in: inode of old entry */
2529 	struct xfs_inode	*ip2,	/* in: inode of new entry */
2530 	struct xfs_inode	*wip,	/* in: whiteout inode */
2531 	struct xfs_inode	**i_tab,/* out: sorted array of inodes */
2532 	int			*num_inodes)  /* in/out: inodes in array */
2533 {
2534 	int			i, j;
2535 
2536 	ASSERT(*num_inodes == __XFS_SORT_INODES);
2537 	memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2538 
2539 	/*
2540 	 * i_tab contains a list of pointers to inodes.  We initialize
2541 	 * the table here & we'll sort it.  We will then use it to
2542 	 * order the acquisition of the inode locks.
2543 	 *
2544 	 * Note that the table may contain duplicates.  e.g., dp1 == dp2.
2545 	 */
2546 	i = 0;
2547 	i_tab[i++] = dp1;
2548 	i_tab[i++] = dp2;
2549 	i_tab[i++] = ip1;
2550 	if (ip2)
2551 		i_tab[i++] = ip2;
2552 	if (wip)
2553 		i_tab[i++] = wip;
2554 	*num_inodes = i;
2555 
2556 	/*
2557 	 * Sort the elements via bubble sort.  (Remember, there are at
2558 	 * most 5 elements to sort, so this is adequate.)
2559 	 */
2560 	for (i = 0; i < *num_inodes; i++) {
2561 		for (j = 1; j < *num_inodes; j++) {
2562 			if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
2563 				struct xfs_inode *temp = i_tab[j];
2564 				i_tab[j] = i_tab[j-1];
2565 				i_tab[j-1] = temp;
2566 			}
2567 		}
2568 	}
2569 }
2570 
2571 static int
2572 xfs_finish_rename(
2573 	struct xfs_trans	*tp)
2574 {
2575 	/*
2576 	 * If this is a synchronous mount, make sure that the rename transaction
2577 	 * goes to disk before returning to the user.
2578 	 */
2579 	if (xfs_has_wsync(tp->t_mountp) || xfs_has_dirsync(tp->t_mountp))
2580 		xfs_trans_set_sync(tp);
2581 
2582 	return xfs_trans_commit(tp);
2583 }
2584 
2585 /*
2586  * xfs_cross_rename()
2587  *
2588  * responsible for handling RENAME_EXCHANGE flag in renameat2() syscall
2589  */
2590 STATIC int
2591 xfs_cross_rename(
2592 	struct xfs_trans	*tp,
2593 	struct xfs_inode	*dp1,
2594 	struct xfs_name		*name1,
2595 	struct xfs_inode	*ip1,
2596 	struct xfs_inode	*dp2,
2597 	struct xfs_name		*name2,
2598 	struct xfs_inode	*ip2,
2599 	int			spaceres)
2600 {
2601 	int		error = 0;
2602 	int		ip1_flags = 0;
2603 	int		ip2_flags = 0;
2604 	int		dp2_flags = 0;
2605 
2606 	/* Swap inode number for dirent in first parent */
2607 	error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
2608 	if (error)
2609 		goto out_trans_abort;
2610 
2611 	/* Swap inode number for dirent in second parent */
2612 	error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
2613 	if (error)
2614 		goto out_trans_abort;
2615 
2616 	/*
2617 	 * If we're renaming one or more directories across different parents,
2618 	 * update the respective ".." entries (and link counts) to match the new
2619 	 * parents.
2620 	 */
2621 	if (dp1 != dp2) {
2622 		dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2623 
2624 		if (S_ISDIR(VFS_I(ip2)->i_mode)) {
2625 			error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2626 						dp1->i_ino, spaceres);
2627 			if (error)
2628 				goto out_trans_abort;
2629 
2630 			/* transfer ip2 ".." reference to dp1 */
2631 			if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
2632 				error = xfs_droplink(tp, dp2);
2633 				if (error)
2634 					goto out_trans_abort;
2635 				xfs_bumplink(tp, dp1);
2636 			}
2637 
2638 			/*
2639 			 * Although ip1 isn't changed here, userspace needs
2640 			 * to be warned about the change, so that applications
2641 			 * relying on it (like backup ones), will properly
2642 			 * notify the change
2643 			 */
2644 			ip1_flags |= XFS_ICHGTIME_CHG;
2645 			ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2646 		}
2647 
2648 		if (S_ISDIR(VFS_I(ip1)->i_mode)) {
2649 			error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2650 						dp2->i_ino, spaceres);
2651 			if (error)
2652 				goto out_trans_abort;
2653 
2654 			/* transfer ip1 ".." reference to dp2 */
2655 			if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
2656 				error = xfs_droplink(tp, dp1);
2657 				if (error)
2658 					goto out_trans_abort;
2659 				xfs_bumplink(tp, dp2);
2660 			}
2661 
2662 			/*
2663 			 * Although ip2 isn't changed here, userspace needs
2664 			 * to be warned about the change, so that applications
2665 			 * relying on it (like backup ones), will properly
2666 			 * notify the change
2667 			 */
2668 			ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2669 			ip2_flags |= XFS_ICHGTIME_CHG;
2670 		}
2671 	}
2672 
2673 	if (ip1_flags) {
2674 		xfs_trans_ichgtime(tp, ip1, ip1_flags);
2675 		xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
2676 	}
2677 	if (ip2_flags) {
2678 		xfs_trans_ichgtime(tp, ip2, ip2_flags);
2679 		xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
2680 	}
2681 	if (dp2_flags) {
2682 		xfs_trans_ichgtime(tp, dp2, dp2_flags);
2683 		xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
2684 	}
2685 	xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2686 	xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
2687 	return xfs_finish_rename(tp);
2688 
2689 out_trans_abort:
2690 	xfs_trans_cancel(tp);
2691 	return error;
2692 }
2693 
2694 /*
2695  * xfs_rename_alloc_whiteout()
2696  *
2697  * Return a referenced, unlinked, unlocked inode that can be used as a
2698  * whiteout in a rename transaction. We use a tmpfile inode here so that if we
2699  * crash between allocating the inode and linking it into the rename transaction
2700  * recovery will free the inode and we won't leak it.
2701  */
2702 static int
2703 xfs_rename_alloc_whiteout(
2704 	struct mnt_idmap	*idmap,
2705 	struct xfs_name		*src_name,
2706 	struct xfs_inode	*dp,
2707 	struct xfs_inode	**wip)
2708 {
2709 	struct xfs_inode	*tmpfile;
2710 	struct qstr		name;
2711 	int			error;
2712 
2713 	error = xfs_create_tmpfile(idmap, dp, S_IFCHR | WHITEOUT_MODE,
2714 				   &tmpfile);
2715 	if (error)
2716 		return error;
2717 
2718 	name.name = src_name->name;
2719 	name.len = src_name->len;
2720 	error = xfs_inode_init_security(VFS_I(tmpfile), VFS_I(dp), &name);
2721 	if (error) {
2722 		xfs_finish_inode_setup(tmpfile);
2723 		xfs_irele(tmpfile);
2724 		return error;
2725 	}
2726 
2727 	/*
2728 	 * Prepare the tmpfile inode as if it were created through the VFS.
2729 	 * Complete the inode setup and flag it as linkable.  nlink is already
2730 	 * zero, so we can skip the drop_nlink.
2731 	 */
2732 	xfs_setup_iops(tmpfile);
2733 	xfs_finish_inode_setup(tmpfile);
2734 	VFS_I(tmpfile)->i_state |= I_LINKABLE;
2735 
2736 	*wip = tmpfile;
2737 	return 0;
2738 }
2739 
2740 /*
2741  * xfs_rename
2742  */
2743 int
2744 xfs_rename(
2745 	struct mnt_idmap	*idmap,
2746 	struct xfs_inode	*src_dp,
2747 	struct xfs_name		*src_name,
2748 	struct xfs_inode	*src_ip,
2749 	struct xfs_inode	*target_dp,
2750 	struct xfs_name		*target_name,
2751 	struct xfs_inode	*target_ip,
2752 	unsigned int		flags)
2753 {
2754 	struct xfs_mount	*mp = src_dp->i_mount;
2755 	struct xfs_trans	*tp;
2756 	struct xfs_inode	*wip = NULL;		/* whiteout inode */
2757 	struct xfs_inode	*inodes[__XFS_SORT_INODES];
2758 	int			i;
2759 	int			num_inodes = __XFS_SORT_INODES;
2760 	bool			new_parent = (src_dp != target_dp);
2761 	bool			src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
2762 	int			spaceres;
2763 	bool			retried = false;
2764 	int			error, nospace_error = 0;
2765 
2766 	trace_xfs_rename(src_dp, target_dp, src_name, target_name);
2767 
2768 	if ((flags & RENAME_EXCHANGE) && !target_ip)
2769 		return -EINVAL;
2770 
2771 	/*
2772 	 * If we are doing a whiteout operation, allocate the whiteout inode
2773 	 * we will be placing at the target and ensure the type is set
2774 	 * appropriately.
2775 	 */
2776 	if (flags & RENAME_WHITEOUT) {
2777 		error = xfs_rename_alloc_whiteout(idmap, src_name,
2778 						  target_dp, &wip);
2779 		if (error)
2780 			return error;
2781 
2782 		/* setup target dirent info as whiteout */
2783 		src_name->type = XFS_DIR3_FT_CHRDEV;
2784 	}
2785 
2786 	xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
2787 				inodes, &num_inodes);
2788 
2789 retry:
2790 	nospace_error = 0;
2791 	spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
2792 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
2793 	if (error == -ENOSPC) {
2794 		nospace_error = error;
2795 		spaceres = 0;
2796 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
2797 				&tp);
2798 	}
2799 	if (error)
2800 		goto out_release_wip;
2801 
2802 	/*
2803 	 * Attach the dquots to the inodes
2804 	 */
2805 	error = xfs_qm_vop_rename_dqattach(inodes);
2806 	if (error)
2807 		goto out_trans_cancel;
2808 
2809 	/*
2810 	 * Lock all the participating inodes. Depending upon whether
2811 	 * the target_name exists in the target directory, and
2812 	 * whether the target directory is the same as the source
2813 	 * directory, we can lock from 2 to 5 inodes.
2814 	 */
2815 	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
2816 
2817 	/*
2818 	 * Join all the inodes to the transaction. From this point on,
2819 	 * we can rely on either trans_commit or trans_cancel to unlock
2820 	 * them.
2821 	 */
2822 	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
2823 	if (new_parent)
2824 		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
2825 	xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
2826 	if (target_ip)
2827 		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
2828 	if (wip)
2829 		xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
2830 
2831 	/*
2832 	 * If we are using project inheritance, we only allow renames
2833 	 * into our tree when the project IDs are the same; else the
2834 	 * tree quota mechanism would be circumvented.
2835 	 */
2836 	if (unlikely((target_dp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
2837 		     target_dp->i_projid != src_ip->i_projid)) {
2838 		error = -EXDEV;
2839 		goto out_trans_cancel;
2840 	}
2841 
2842 	/* RENAME_EXCHANGE is unique from here on. */
2843 	if (flags & RENAME_EXCHANGE)
2844 		return xfs_cross_rename(tp, src_dp, src_name, src_ip,
2845 					target_dp, target_name, target_ip,
2846 					spaceres);
2847 
2848 	/*
2849 	 * Try to reserve quota to handle an expansion of the target directory.
2850 	 * We'll allow the rename to continue in reservationless mode if we hit
2851 	 * a space usage constraint.  If we trigger reservationless mode, save
2852 	 * the errno if there isn't any free space in the target directory.
2853 	 */
2854 	if (spaceres != 0) {
2855 		error = xfs_trans_reserve_quota_nblks(tp, target_dp, spaceres,
2856 				0, false);
2857 		if (error == -EDQUOT || error == -ENOSPC) {
2858 			if (!retried) {
2859 				xfs_trans_cancel(tp);
2860 				xfs_blockgc_free_quota(target_dp, 0);
2861 				retried = true;
2862 				goto retry;
2863 			}
2864 
2865 			nospace_error = error;
2866 			spaceres = 0;
2867 			error = 0;
2868 		}
2869 		if (error)
2870 			goto out_trans_cancel;
2871 	}
2872 
2873 	/*
2874 	 * Check for expected errors before we dirty the transaction
2875 	 * so we can return an error without a transaction abort.
2876 	 */
2877 	if (target_ip == NULL) {
2878 		/*
2879 		 * If there's no space reservation, check the entry will
2880 		 * fit before actually inserting it.
2881 		 */
2882 		if (!spaceres) {
2883 			error = xfs_dir_canenter(tp, target_dp, target_name);
2884 			if (error)
2885 				goto out_trans_cancel;
2886 		}
2887 	} else {
2888 		/*
2889 		 * If target exists and it's a directory, check that whether
2890 		 * it can be destroyed.
2891 		 */
2892 		if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
2893 		    (!xfs_dir_isempty(target_ip) ||
2894 		     (VFS_I(target_ip)->i_nlink > 2))) {
2895 			error = -EEXIST;
2896 			goto out_trans_cancel;
2897 		}
2898 	}
2899 
2900 	/*
2901 	 * Lock the AGI buffers we need to handle bumping the nlink of the
2902 	 * whiteout inode off the unlinked list and to handle dropping the
2903 	 * nlink of the target inode.  Per locking order rules, do this in
2904 	 * increasing AG order and before directory block allocation tries to
2905 	 * grab AGFs because we grab AGIs before AGFs.
2906 	 *
2907 	 * The (vfs) caller must ensure that if src is a directory then
2908 	 * target_ip is either null or an empty directory.
2909 	 */
2910 	for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
2911 		if (inodes[i] == wip ||
2912 		    (inodes[i] == target_ip &&
2913 		     (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
2914 			struct xfs_perag	*pag;
2915 			struct xfs_buf		*bp;
2916 
2917 			pag = xfs_perag_get(mp,
2918 					XFS_INO_TO_AGNO(mp, inodes[i]->i_ino));
2919 			error = xfs_read_agi(pag, tp, &bp);
2920 			xfs_perag_put(pag);
2921 			if (error)
2922 				goto out_trans_cancel;
2923 		}
2924 	}
2925 
2926 	/*
2927 	 * Directory entry creation below may acquire the AGF. Remove
2928 	 * the whiteout from the unlinked list first to preserve correct
2929 	 * AGI/AGF locking order. This dirties the transaction so failures
2930 	 * after this point will abort and log recovery will clean up the
2931 	 * mess.
2932 	 *
2933 	 * For whiteouts, we need to bump the link count on the whiteout
2934 	 * inode. After this point, we have a real link, clear the tmpfile
2935 	 * state flag from the inode so it doesn't accidentally get misused
2936 	 * in future.
2937 	 */
2938 	if (wip) {
2939 		struct xfs_perag	*pag;
2940 
2941 		ASSERT(VFS_I(wip)->i_nlink == 0);
2942 
2943 		pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, wip->i_ino));
2944 		error = xfs_iunlink_remove(tp, pag, wip);
2945 		xfs_perag_put(pag);
2946 		if (error)
2947 			goto out_trans_cancel;
2948 
2949 		xfs_bumplink(tp, wip);
2950 		VFS_I(wip)->i_state &= ~I_LINKABLE;
2951 	}
2952 
2953 	/*
2954 	 * Set up the target.
2955 	 */
2956 	if (target_ip == NULL) {
2957 		/*
2958 		 * If target does not exist and the rename crosses
2959 		 * directories, adjust the target directory link count
2960 		 * to account for the ".." reference from the new entry.
2961 		 */
2962 		error = xfs_dir_createname(tp, target_dp, target_name,
2963 					   src_ip->i_ino, spaceres);
2964 		if (error)
2965 			goto out_trans_cancel;
2966 
2967 		xfs_trans_ichgtime(tp, target_dp,
2968 					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2969 
2970 		if (new_parent && src_is_directory) {
2971 			xfs_bumplink(tp, target_dp);
2972 		}
2973 	} else { /* target_ip != NULL */
2974 		/*
2975 		 * Link the source inode under the target name.
2976 		 * If the source inode is a directory and we are moving
2977 		 * it across directories, its ".." entry will be
2978 		 * inconsistent until we replace that down below.
2979 		 *
2980 		 * In case there is already an entry with the same
2981 		 * name at the destination directory, remove it first.
2982 		 */
2983 		error = xfs_dir_replace(tp, target_dp, target_name,
2984 					src_ip->i_ino, spaceres);
2985 		if (error)
2986 			goto out_trans_cancel;
2987 
2988 		xfs_trans_ichgtime(tp, target_dp,
2989 					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2990 
2991 		/*
2992 		 * Decrement the link count on the target since the target
2993 		 * dir no longer points to it.
2994 		 */
2995 		error = xfs_droplink(tp, target_ip);
2996 		if (error)
2997 			goto out_trans_cancel;
2998 
2999 		if (src_is_directory) {
3000 			/*
3001 			 * Drop the link from the old "." entry.
3002 			 */
3003 			error = xfs_droplink(tp, target_ip);
3004 			if (error)
3005 				goto out_trans_cancel;
3006 		}
3007 	} /* target_ip != NULL */
3008 
3009 	/*
3010 	 * Remove the source.
3011 	 */
3012 	if (new_parent && src_is_directory) {
3013 		/*
3014 		 * Rewrite the ".." entry to point to the new
3015 		 * directory.
3016 		 */
3017 		error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3018 					target_dp->i_ino, spaceres);
3019 		ASSERT(error != -EEXIST);
3020 		if (error)
3021 			goto out_trans_cancel;
3022 	}
3023 
3024 	/*
3025 	 * We always want to hit the ctime on the source inode.
3026 	 *
3027 	 * This isn't strictly required by the standards since the source
3028 	 * inode isn't really being changed, but old unix file systems did
3029 	 * it and some incremental backup programs won't work without it.
3030 	 */
3031 	xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3032 	xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3033 
3034 	/*
3035 	 * Adjust the link count on src_dp.  This is necessary when
3036 	 * renaming a directory, either within one parent when
3037 	 * the target existed, or across two parent directories.
3038 	 */
3039 	if (src_is_directory && (new_parent || target_ip != NULL)) {
3040 
3041 		/*
3042 		 * Decrement link count on src_directory since the
3043 		 * entry that's moved no longer points to it.
3044 		 */
3045 		error = xfs_droplink(tp, src_dp);
3046 		if (error)
3047 			goto out_trans_cancel;
3048 	}
3049 
3050 	/*
3051 	 * For whiteouts, we only need to update the source dirent with the
3052 	 * inode number of the whiteout inode rather than removing it
3053 	 * altogether.
3054 	 */
3055 	if (wip)
3056 		error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3057 					spaceres);
3058 	else
3059 		error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3060 					   spaceres);
3061 
3062 	if (error)
3063 		goto out_trans_cancel;
3064 
3065 	xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3066 	xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3067 	if (new_parent)
3068 		xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3069 
3070 	error = xfs_finish_rename(tp);
3071 	if (wip)
3072 		xfs_irele(wip);
3073 	return error;
3074 
3075 out_trans_cancel:
3076 	xfs_trans_cancel(tp);
3077 out_release_wip:
3078 	if (wip)
3079 		xfs_irele(wip);
3080 	if (error == -ENOSPC && nospace_error)
3081 		error = nospace_error;
3082 	return error;
3083 }
3084 
3085 static int
3086 xfs_iflush(
3087 	struct xfs_inode	*ip,
3088 	struct xfs_buf		*bp)
3089 {
3090 	struct xfs_inode_log_item *iip = ip->i_itemp;
3091 	struct xfs_dinode	*dip;
3092 	struct xfs_mount	*mp = ip->i_mount;
3093 	int			error;
3094 
3095 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3096 	ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING));
3097 	ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3098 	       ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3099 	ASSERT(iip->ili_item.li_buf == bp);
3100 
3101 	dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
3102 
3103 	/*
3104 	 * We don't flush the inode if any of the following checks fail, but we
3105 	 * do still update the log item and attach to the backing buffer as if
3106 	 * the flush happened. This is a formality to facilitate predictable
3107 	 * error handling as the caller will shutdown and fail the buffer.
3108 	 */
3109 	error = -EFSCORRUPTED;
3110 	if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3111 			       mp, XFS_ERRTAG_IFLUSH_1)) {
3112 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3113 			"%s: Bad inode %llu magic number 0x%x, ptr "PTR_FMT,
3114 			__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3115 		goto flush_out;
3116 	}
3117 	if (S_ISREG(VFS_I(ip)->i_mode)) {
3118 		if (XFS_TEST_ERROR(
3119 		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3120 		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
3121 		    mp, XFS_ERRTAG_IFLUSH_3)) {
3122 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3123 				"%s: Bad regular inode %llu, ptr "PTR_FMT,
3124 				__func__, ip->i_ino, ip);
3125 			goto flush_out;
3126 		}
3127 	} else if (S_ISDIR(VFS_I(ip)->i_mode)) {
3128 		if (XFS_TEST_ERROR(
3129 		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3130 		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
3131 		    ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
3132 		    mp, XFS_ERRTAG_IFLUSH_4)) {
3133 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3134 				"%s: Bad directory inode %llu, ptr "PTR_FMT,
3135 				__func__, ip->i_ino, ip);
3136 			goto flush_out;
3137 		}
3138 	}
3139 	if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af) >
3140 				ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
3141 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3142 			"%s: detected corrupt incore inode %llu, "
3143 			"total extents = %llu nblocks = %lld, ptr "PTR_FMT,
3144 			__func__, ip->i_ino,
3145 			ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af),
3146 			ip->i_nblocks, ip);
3147 		goto flush_out;
3148 	}
3149 	if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize,
3150 				mp, XFS_ERRTAG_IFLUSH_6)) {
3151 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3152 			"%s: bad inode %llu, forkoff 0x%x, ptr "PTR_FMT,
3153 			__func__, ip->i_ino, ip->i_forkoff, ip);
3154 		goto flush_out;
3155 	}
3156 
3157 	/*
3158 	 * Inode item log recovery for v2 inodes are dependent on the flushiter
3159 	 * count for correct sequencing.  We bump the flush iteration count so
3160 	 * we can detect flushes which postdate a log record during recovery.
3161 	 * This is redundant as we now log every change and hence this can't
3162 	 * happen but we need to still do it to ensure backwards compatibility
3163 	 * with old kernels that predate logging all inode changes.
3164 	 */
3165 	if (!xfs_has_v3inodes(mp))
3166 		ip->i_flushiter++;
3167 
3168 	/*
3169 	 * If there are inline format data / attr forks attached to this inode,
3170 	 * make sure they are not corrupt.
3171 	 */
3172 	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
3173 	    xfs_ifork_verify_local_data(ip))
3174 		goto flush_out;
3175 	if (xfs_inode_has_attr_fork(ip) &&
3176 	    ip->i_af.if_format == XFS_DINODE_FMT_LOCAL &&
3177 	    xfs_ifork_verify_local_attr(ip))
3178 		goto flush_out;
3179 
3180 	/*
3181 	 * Copy the dirty parts of the inode into the on-disk inode.  We always
3182 	 * copy out the core of the inode, because if the inode is dirty at all
3183 	 * the core must be.
3184 	 */
3185 	xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
3186 
3187 	/* Wrap, we never let the log put out DI_MAX_FLUSH */
3188 	if (!xfs_has_v3inodes(mp)) {
3189 		if (ip->i_flushiter == DI_MAX_FLUSH)
3190 			ip->i_flushiter = 0;
3191 	}
3192 
3193 	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3194 	if (xfs_inode_has_attr_fork(ip))
3195 		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3196 
3197 	/*
3198 	 * We've recorded everything logged in the inode, so we'd like to clear
3199 	 * the ili_fields bits so we don't log and flush things unnecessarily.
3200 	 * However, we can't stop logging all this information until the data
3201 	 * we've copied into the disk buffer is written to disk.  If we did we
3202 	 * might overwrite the copy of the inode in the log with all the data
3203 	 * after re-logging only part of it, and in the face of a crash we
3204 	 * wouldn't have all the data we need to recover.
3205 	 *
3206 	 * What we do is move the bits to the ili_last_fields field.  When
3207 	 * logging the inode, these bits are moved back to the ili_fields field.
3208 	 * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since
3209 	 * we know that the information those bits represent is permanently on
3210 	 * disk.  As long as the flush completes before the inode is logged
3211 	 * again, then both ili_fields and ili_last_fields will be cleared.
3212 	 */
3213 	error = 0;
3214 flush_out:
3215 	spin_lock(&iip->ili_lock);
3216 	iip->ili_last_fields = iip->ili_fields;
3217 	iip->ili_fields = 0;
3218 	iip->ili_fsync_fields = 0;
3219 	spin_unlock(&iip->ili_lock);
3220 
3221 	/*
3222 	 * Store the current LSN of the inode so that we can tell whether the
3223 	 * item has moved in the AIL from xfs_buf_inode_iodone().
3224 	 */
3225 	xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3226 				&iip->ili_item.li_lsn);
3227 
3228 	/* generate the checksum. */
3229 	xfs_dinode_calc_crc(mp, dip);
3230 	return error;
3231 }
3232 
3233 /*
3234  * Non-blocking flush of dirty inode metadata into the backing buffer.
3235  *
3236  * The caller must have a reference to the inode and hold the cluster buffer
3237  * locked. The function will walk across all the inodes on the cluster buffer it
3238  * can find and lock without blocking, and flush them to the cluster buffer.
3239  *
3240  * On successful flushing of at least one inode, the caller must write out the
3241  * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
3242  * the caller needs to release the buffer. On failure, the filesystem will be
3243  * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
3244  * will be returned.
3245  */
3246 int
3247 xfs_iflush_cluster(
3248 	struct xfs_buf		*bp)
3249 {
3250 	struct xfs_mount	*mp = bp->b_mount;
3251 	struct xfs_log_item	*lip, *n;
3252 	struct xfs_inode	*ip;
3253 	struct xfs_inode_log_item *iip;
3254 	int			clcount = 0;
3255 	int			error = 0;
3256 
3257 	/*
3258 	 * We must use the safe variant here as on shutdown xfs_iflush_abort()
3259 	 * will remove itself from the list.
3260 	 */
3261 	list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
3262 		iip = (struct xfs_inode_log_item *)lip;
3263 		ip = iip->ili_inode;
3264 
3265 		/*
3266 		 * Quick and dirty check to avoid locks if possible.
3267 		 */
3268 		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING))
3269 			continue;
3270 		if (xfs_ipincount(ip))
3271 			continue;
3272 
3273 		/*
3274 		 * The inode is still attached to the buffer, which means it is
3275 		 * dirty but reclaim might try to grab it. Check carefully for
3276 		 * that, and grab the ilock while still holding the i_flags_lock
3277 		 * to guarantee reclaim will not be able to reclaim this inode
3278 		 * once we drop the i_flags_lock.
3279 		 */
3280 		spin_lock(&ip->i_flags_lock);
3281 		ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
3282 		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) {
3283 			spin_unlock(&ip->i_flags_lock);
3284 			continue;
3285 		}
3286 
3287 		/*
3288 		 * ILOCK will pin the inode against reclaim and prevent
3289 		 * concurrent transactions modifying the inode while we are
3290 		 * flushing the inode. If we get the lock, set the flushing
3291 		 * state before we drop the i_flags_lock.
3292 		 */
3293 		if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
3294 			spin_unlock(&ip->i_flags_lock);
3295 			continue;
3296 		}
3297 		__xfs_iflags_set(ip, XFS_IFLUSHING);
3298 		spin_unlock(&ip->i_flags_lock);
3299 
3300 		/*
3301 		 * Abort flushing this inode if we are shut down because the
3302 		 * inode may not currently be in the AIL. This can occur when
3303 		 * log I/O failure unpins the inode without inserting into the
3304 		 * AIL, leaving a dirty/unpinned inode attached to the buffer
3305 		 * that otherwise looks like it should be flushed.
3306 		 */
3307 		if (xlog_is_shutdown(mp->m_log)) {
3308 			xfs_iunpin_wait(ip);
3309 			xfs_iflush_abort(ip);
3310 			xfs_iunlock(ip, XFS_ILOCK_SHARED);
3311 			error = -EIO;
3312 			continue;
3313 		}
3314 
3315 		/* don't block waiting on a log force to unpin dirty inodes */
3316 		if (xfs_ipincount(ip)) {
3317 			xfs_iflags_clear(ip, XFS_IFLUSHING);
3318 			xfs_iunlock(ip, XFS_ILOCK_SHARED);
3319 			continue;
3320 		}
3321 
3322 		if (!xfs_inode_clean(ip))
3323 			error = xfs_iflush(ip, bp);
3324 		else
3325 			xfs_iflags_clear(ip, XFS_IFLUSHING);
3326 		xfs_iunlock(ip, XFS_ILOCK_SHARED);
3327 		if (error)
3328 			break;
3329 		clcount++;
3330 	}
3331 
3332 	if (error) {
3333 		/*
3334 		 * Shutdown first so we kill the log before we release this
3335 		 * buffer. If it is an INODE_ALLOC buffer and pins the tail
3336 		 * of the log, failing it before the _log_ is shut down can
3337 		 * result in the log tail being moved forward in the journal
3338 		 * on disk because log writes can still be taking place. Hence
3339 		 * unpinning the tail will allow the ICREATE intent to be
3340 		 * removed from the log an recovery will fail with uninitialised
3341 		 * inode cluster buffers.
3342 		 */
3343 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3344 		bp->b_flags |= XBF_ASYNC;
3345 		xfs_buf_ioend_fail(bp);
3346 		return error;
3347 	}
3348 
3349 	if (!clcount)
3350 		return -EAGAIN;
3351 
3352 	XFS_STATS_INC(mp, xs_icluster_flushcnt);
3353 	XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
3354 	return 0;
3355 
3356 }
3357 
3358 /* Release an inode. */
3359 void
3360 xfs_irele(
3361 	struct xfs_inode	*ip)
3362 {
3363 	trace_xfs_irele(ip, _RET_IP_);
3364 	iput(VFS_I(ip));
3365 }
3366 
3367 /*
3368  * Ensure all commited transactions touching the inode are written to the log.
3369  */
3370 int
3371 xfs_log_force_inode(
3372 	struct xfs_inode	*ip)
3373 {
3374 	xfs_csn_t		seq = 0;
3375 
3376 	xfs_ilock(ip, XFS_ILOCK_SHARED);
3377 	if (xfs_ipincount(ip))
3378 		seq = ip->i_itemp->ili_commit_seq;
3379 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
3380 
3381 	if (!seq)
3382 		return 0;
3383 	return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL);
3384 }
3385 
3386 /*
3387  * Grab the exclusive iolock for a data copy from src to dest, making sure to
3388  * abide vfs locking order (lowest pointer value goes first) and breaking the
3389  * layout leases before proceeding.  The loop is needed because we cannot call
3390  * the blocking break_layout() with the iolocks held, and therefore have to
3391  * back out both locks.
3392  */
3393 static int
3394 xfs_iolock_two_inodes_and_break_layout(
3395 	struct inode		*src,
3396 	struct inode		*dest)
3397 {
3398 	int			error;
3399 
3400 	if (src > dest)
3401 		swap(src, dest);
3402 
3403 retry:
3404 	/* Wait to break both inodes' layouts before we start locking. */
3405 	error = break_layout(src, true);
3406 	if (error)
3407 		return error;
3408 	if (src != dest) {
3409 		error = break_layout(dest, true);
3410 		if (error)
3411 			return error;
3412 	}
3413 
3414 	/* Lock one inode and make sure nobody got in and leased it. */
3415 	inode_lock(src);
3416 	error = break_layout(src, false);
3417 	if (error) {
3418 		inode_unlock(src);
3419 		if (error == -EWOULDBLOCK)
3420 			goto retry;
3421 		return error;
3422 	}
3423 
3424 	if (src == dest)
3425 		return 0;
3426 
3427 	/* Lock the other inode and make sure nobody got in and leased it. */
3428 	inode_lock_nested(dest, I_MUTEX_NONDIR2);
3429 	error = break_layout(dest, false);
3430 	if (error) {
3431 		inode_unlock(src);
3432 		inode_unlock(dest);
3433 		if (error == -EWOULDBLOCK)
3434 			goto retry;
3435 		return error;
3436 	}
3437 
3438 	return 0;
3439 }
3440 
3441 static int
3442 xfs_mmaplock_two_inodes_and_break_dax_layout(
3443 	struct xfs_inode	*ip1,
3444 	struct xfs_inode	*ip2)
3445 {
3446 	int			error;
3447 	bool			retry;
3448 	struct page		*page;
3449 
3450 	if (ip1->i_ino > ip2->i_ino)
3451 		swap(ip1, ip2);
3452 
3453 again:
3454 	retry = false;
3455 	/* Lock the first inode */
3456 	xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
3457 	error = xfs_break_dax_layouts(VFS_I(ip1), &retry);
3458 	if (error || retry) {
3459 		xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3460 		if (error == 0 && retry)
3461 			goto again;
3462 		return error;
3463 	}
3464 
3465 	if (ip1 == ip2)
3466 		return 0;
3467 
3468 	/* Nested lock the second inode */
3469 	xfs_ilock(ip2, xfs_lock_inumorder(XFS_MMAPLOCK_EXCL, 1));
3470 	/*
3471 	 * We cannot use xfs_break_dax_layouts() directly here because it may
3472 	 * need to unlock & lock the XFS_MMAPLOCK_EXCL which is not suitable
3473 	 * for this nested lock case.
3474 	 */
3475 	page = dax_layout_busy_page(VFS_I(ip2)->i_mapping);
3476 	if (page && page_ref_count(page) != 1) {
3477 		xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3478 		xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3479 		goto again;
3480 	}
3481 
3482 	return 0;
3483 }
3484 
3485 /*
3486  * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
3487  * mmap activity.
3488  */
3489 int
3490 xfs_ilock2_io_mmap(
3491 	struct xfs_inode	*ip1,
3492 	struct xfs_inode	*ip2)
3493 {
3494 	int			ret;
3495 
3496 	ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
3497 	if (ret)
3498 		return ret;
3499 
3500 	if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
3501 		ret = xfs_mmaplock_two_inodes_and_break_dax_layout(ip1, ip2);
3502 		if (ret) {
3503 			inode_unlock(VFS_I(ip2));
3504 			if (ip1 != ip2)
3505 				inode_unlock(VFS_I(ip1));
3506 			return ret;
3507 		}
3508 	} else
3509 		filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping,
3510 					    VFS_I(ip2)->i_mapping);
3511 
3512 	return 0;
3513 }
3514 
3515 /* Unlock both inodes to allow IO and mmap activity. */
3516 void
3517 xfs_iunlock2_io_mmap(
3518 	struct xfs_inode	*ip1,
3519 	struct xfs_inode	*ip2)
3520 {
3521 	if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
3522 		xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3523 		if (ip1 != ip2)
3524 			xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3525 	} else
3526 		filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping,
3527 					      VFS_I(ip2)->i_mapping);
3528 
3529 	inode_unlock(VFS_I(ip2));
3530 	if (ip1 != ip2)
3531 		inode_unlock(VFS_I(ip1));
3532 }
3533