xref: /openbmc/linux/fs/xfs/xfs_inode.c (revision 4bb1eb3c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include <linux/iversion.h>
7 
8 #include "xfs.h"
9 #include "xfs_fs.h"
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_sb.h"
15 #include "xfs_mount.h"
16 #include "xfs_defer.h"
17 #include "xfs_inode.h"
18 #include "xfs_dir2.h"
19 #include "xfs_attr.h"
20 #include "xfs_trans_space.h"
21 #include "xfs_trans.h"
22 #include "xfs_buf_item.h"
23 #include "xfs_inode_item.h"
24 #include "xfs_ialloc.h"
25 #include "xfs_bmap.h"
26 #include "xfs_bmap_util.h"
27 #include "xfs_errortag.h"
28 #include "xfs_error.h"
29 #include "xfs_quota.h"
30 #include "xfs_filestream.h"
31 #include "xfs_trace.h"
32 #include "xfs_icache.h"
33 #include "xfs_symlink.h"
34 #include "xfs_trans_priv.h"
35 #include "xfs_log.h"
36 #include "xfs_bmap_btree.h"
37 #include "xfs_reflink.h"
38 
39 kmem_zone_t *xfs_inode_zone;
40 
41 /*
42  * Used in xfs_itruncate_extents().  This is the maximum number of extents
43  * freed from a file in a single transaction.
44  */
45 #define	XFS_ITRUNC_MAX_EXTENTS	2
46 
47 STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
48 STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
49 
50 /*
51  * helper function to extract extent size hint from inode
52  */
53 xfs_extlen_t
54 xfs_get_extsz_hint(
55 	struct xfs_inode	*ip)
56 {
57 	/*
58 	 * No point in aligning allocations if we need to COW to actually
59 	 * write to them.
60 	 */
61 	if (xfs_is_always_cow_inode(ip))
62 		return 0;
63 	if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
64 		return ip->i_d.di_extsize;
65 	if (XFS_IS_REALTIME_INODE(ip))
66 		return ip->i_mount->m_sb.sb_rextsize;
67 	return 0;
68 }
69 
70 /*
71  * Helper function to extract CoW extent size hint from inode.
72  * Between the extent size hint and the CoW extent size hint, we
73  * return the greater of the two.  If the value is zero (automatic),
74  * use the default size.
75  */
76 xfs_extlen_t
77 xfs_get_cowextsz_hint(
78 	struct xfs_inode	*ip)
79 {
80 	xfs_extlen_t		a, b;
81 
82 	a = 0;
83 	if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
84 		a = ip->i_d.di_cowextsize;
85 	b = xfs_get_extsz_hint(ip);
86 
87 	a = max(a, b);
88 	if (a == 0)
89 		return XFS_DEFAULT_COWEXTSZ_HINT;
90 	return a;
91 }
92 
93 /*
94  * These two are wrapper routines around the xfs_ilock() routine used to
95  * centralize some grungy code.  They are used in places that wish to lock the
96  * inode solely for reading the extents.  The reason these places can't just
97  * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
98  * bringing in of the extents from disk for a file in b-tree format.  If the
99  * inode is in b-tree format, then we need to lock the inode exclusively until
100  * the extents are read in.  Locking it exclusively all the time would limit
101  * our parallelism unnecessarily, though.  What we do instead is check to see
102  * if the extents have been read in yet, and only lock the inode exclusively
103  * if they have not.
104  *
105  * The functions return a value which should be given to the corresponding
106  * xfs_iunlock() call.
107  */
108 uint
109 xfs_ilock_data_map_shared(
110 	struct xfs_inode	*ip)
111 {
112 	uint			lock_mode = XFS_ILOCK_SHARED;
113 
114 	if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE &&
115 	    (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
116 		lock_mode = XFS_ILOCK_EXCL;
117 	xfs_ilock(ip, lock_mode);
118 	return lock_mode;
119 }
120 
121 uint
122 xfs_ilock_attr_map_shared(
123 	struct xfs_inode	*ip)
124 {
125 	uint			lock_mode = XFS_ILOCK_SHARED;
126 
127 	if (ip->i_afp &&
128 	    ip->i_afp->if_format == XFS_DINODE_FMT_BTREE &&
129 	    (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
130 		lock_mode = XFS_ILOCK_EXCL;
131 	xfs_ilock(ip, lock_mode);
132 	return lock_mode;
133 }
134 
135 /*
136  * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
137  * multi-reader locks: i_mmap_lock and the i_lock.  This routine allows
138  * various combinations of the locks to be obtained.
139  *
140  * The 3 locks should always be ordered so that the IO lock is obtained first,
141  * the mmap lock second and the ilock last in order to prevent deadlock.
142  *
143  * Basic locking order:
144  *
145  * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
146  *
147  * mmap_lock locking order:
148  *
149  * i_rwsem -> page lock -> mmap_lock
150  * mmap_lock -> i_mmap_lock -> page_lock
151  *
152  * The difference in mmap_lock locking order mean that we cannot hold the
153  * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
154  * fault in pages during copy in/out (for buffered IO) or require the mmap_lock
155  * in get_user_pages() to map the user pages into the kernel address space for
156  * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
157  * page faults already hold the mmap_lock.
158  *
159  * Hence to serialise fully against both syscall and mmap based IO, we need to
160  * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
161  * taken in places where we need to invalidate the page cache in a race
162  * free manner (e.g. truncate, hole punch and other extent manipulation
163  * functions).
164  */
165 void
166 xfs_ilock(
167 	xfs_inode_t		*ip,
168 	uint			lock_flags)
169 {
170 	trace_xfs_ilock(ip, lock_flags, _RET_IP_);
171 
172 	/*
173 	 * You can't set both SHARED and EXCL for the same lock,
174 	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
175 	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
176 	 */
177 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
178 	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
179 	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
180 	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
181 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
182 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
183 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
184 
185 	if (lock_flags & XFS_IOLOCK_EXCL) {
186 		down_write_nested(&VFS_I(ip)->i_rwsem,
187 				  XFS_IOLOCK_DEP(lock_flags));
188 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
189 		down_read_nested(&VFS_I(ip)->i_rwsem,
190 				 XFS_IOLOCK_DEP(lock_flags));
191 	}
192 
193 	if (lock_flags & XFS_MMAPLOCK_EXCL)
194 		mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
195 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
196 		mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
197 
198 	if (lock_flags & XFS_ILOCK_EXCL)
199 		mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
200 	else if (lock_flags & XFS_ILOCK_SHARED)
201 		mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
202 }
203 
204 /*
205  * This is just like xfs_ilock(), except that the caller
206  * is guaranteed not to sleep.  It returns 1 if it gets
207  * the requested locks and 0 otherwise.  If the IO lock is
208  * obtained but the inode lock cannot be, then the IO lock
209  * is dropped before returning.
210  *
211  * ip -- the inode being locked
212  * lock_flags -- this parameter indicates the inode's locks to be
213  *       to be locked.  See the comment for xfs_ilock() for a list
214  *	 of valid values.
215  */
216 int
217 xfs_ilock_nowait(
218 	xfs_inode_t		*ip,
219 	uint			lock_flags)
220 {
221 	trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
222 
223 	/*
224 	 * You can't set both SHARED and EXCL for the same lock,
225 	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
226 	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
227 	 */
228 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
229 	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
230 	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
231 	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
232 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
233 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
234 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
235 
236 	if (lock_flags & XFS_IOLOCK_EXCL) {
237 		if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
238 			goto out;
239 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
240 		if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
241 			goto out;
242 	}
243 
244 	if (lock_flags & XFS_MMAPLOCK_EXCL) {
245 		if (!mrtryupdate(&ip->i_mmaplock))
246 			goto out_undo_iolock;
247 	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
248 		if (!mrtryaccess(&ip->i_mmaplock))
249 			goto out_undo_iolock;
250 	}
251 
252 	if (lock_flags & XFS_ILOCK_EXCL) {
253 		if (!mrtryupdate(&ip->i_lock))
254 			goto out_undo_mmaplock;
255 	} else if (lock_flags & XFS_ILOCK_SHARED) {
256 		if (!mrtryaccess(&ip->i_lock))
257 			goto out_undo_mmaplock;
258 	}
259 	return 1;
260 
261 out_undo_mmaplock:
262 	if (lock_flags & XFS_MMAPLOCK_EXCL)
263 		mrunlock_excl(&ip->i_mmaplock);
264 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
265 		mrunlock_shared(&ip->i_mmaplock);
266 out_undo_iolock:
267 	if (lock_flags & XFS_IOLOCK_EXCL)
268 		up_write(&VFS_I(ip)->i_rwsem);
269 	else if (lock_flags & XFS_IOLOCK_SHARED)
270 		up_read(&VFS_I(ip)->i_rwsem);
271 out:
272 	return 0;
273 }
274 
275 /*
276  * xfs_iunlock() is used to drop the inode locks acquired with
277  * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
278  * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
279  * that we know which locks to drop.
280  *
281  * ip -- the inode being unlocked
282  * lock_flags -- this parameter indicates the inode's locks to be
283  *       to be unlocked.  See the comment for xfs_ilock() for a list
284  *	 of valid values for this parameter.
285  *
286  */
287 void
288 xfs_iunlock(
289 	xfs_inode_t		*ip,
290 	uint			lock_flags)
291 {
292 	/*
293 	 * You can't set both SHARED and EXCL for the same lock,
294 	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
295 	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
296 	 */
297 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
298 	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
299 	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
300 	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
301 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
302 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
303 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
304 	ASSERT(lock_flags != 0);
305 
306 	if (lock_flags & XFS_IOLOCK_EXCL)
307 		up_write(&VFS_I(ip)->i_rwsem);
308 	else if (lock_flags & XFS_IOLOCK_SHARED)
309 		up_read(&VFS_I(ip)->i_rwsem);
310 
311 	if (lock_flags & XFS_MMAPLOCK_EXCL)
312 		mrunlock_excl(&ip->i_mmaplock);
313 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
314 		mrunlock_shared(&ip->i_mmaplock);
315 
316 	if (lock_flags & XFS_ILOCK_EXCL)
317 		mrunlock_excl(&ip->i_lock);
318 	else if (lock_flags & XFS_ILOCK_SHARED)
319 		mrunlock_shared(&ip->i_lock);
320 
321 	trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
322 }
323 
324 /*
325  * give up write locks.  the i/o lock cannot be held nested
326  * if it is being demoted.
327  */
328 void
329 xfs_ilock_demote(
330 	xfs_inode_t		*ip,
331 	uint			lock_flags)
332 {
333 	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
334 	ASSERT((lock_flags &
335 		~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
336 
337 	if (lock_flags & XFS_ILOCK_EXCL)
338 		mrdemote(&ip->i_lock);
339 	if (lock_flags & XFS_MMAPLOCK_EXCL)
340 		mrdemote(&ip->i_mmaplock);
341 	if (lock_flags & XFS_IOLOCK_EXCL)
342 		downgrade_write(&VFS_I(ip)->i_rwsem);
343 
344 	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
345 }
346 
347 #if defined(DEBUG) || defined(XFS_WARN)
348 int
349 xfs_isilocked(
350 	xfs_inode_t		*ip,
351 	uint			lock_flags)
352 {
353 	if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
354 		if (!(lock_flags & XFS_ILOCK_SHARED))
355 			return !!ip->i_lock.mr_writer;
356 		return rwsem_is_locked(&ip->i_lock.mr_lock);
357 	}
358 
359 	if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
360 		if (!(lock_flags & XFS_MMAPLOCK_SHARED))
361 			return !!ip->i_mmaplock.mr_writer;
362 		return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
363 	}
364 
365 	if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
366 		if (!(lock_flags & XFS_IOLOCK_SHARED))
367 			return !debug_locks ||
368 				lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
369 		return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
370 	}
371 
372 	ASSERT(0);
373 	return 0;
374 }
375 #endif
376 
377 /*
378  * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
379  * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
380  * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
381  * errors and warnings.
382  */
383 #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
384 static bool
385 xfs_lockdep_subclass_ok(
386 	int subclass)
387 {
388 	return subclass < MAX_LOCKDEP_SUBCLASSES;
389 }
390 #else
391 #define xfs_lockdep_subclass_ok(subclass)	(true)
392 #endif
393 
394 /*
395  * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
396  * value. This can be called for any type of inode lock combination, including
397  * parent locking. Care must be taken to ensure we don't overrun the subclass
398  * storage fields in the class mask we build.
399  */
400 static inline int
401 xfs_lock_inumorder(int lock_mode, int subclass)
402 {
403 	int	class = 0;
404 
405 	ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
406 			      XFS_ILOCK_RTSUM)));
407 	ASSERT(xfs_lockdep_subclass_ok(subclass));
408 
409 	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
410 		ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
411 		class += subclass << XFS_IOLOCK_SHIFT;
412 	}
413 
414 	if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
415 		ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
416 		class += subclass << XFS_MMAPLOCK_SHIFT;
417 	}
418 
419 	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
420 		ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
421 		class += subclass << XFS_ILOCK_SHIFT;
422 	}
423 
424 	return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
425 }
426 
427 /*
428  * The following routine will lock n inodes in exclusive mode.  We assume the
429  * caller calls us with the inodes in i_ino order.
430  *
431  * We need to detect deadlock where an inode that we lock is in the AIL and we
432  * start waiting for another inode that is locked by a thread in a long running
433  * transaction (such as truncate). This can result in deadlock since the long
434  * running trans might need to wait for the inode we just locked in order to
435  * push the tail and free space in the log.
436  *
437  * xfs_lock_inodes() can only be used to lock one type of lock at a time -
438  * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
439  * lock more than one at a time, lockdep will report false positives saying we
440  * have violated locking orders.
441  */
442 static void
443 xfs_lock_inodes(
444 	struct xfs_inode	**ips,
445 	int			inodes,
446 	uint			lock_mode)
447 {
448 	int			attempts = 0, i, j, try_lock;
449 	struct xfs_log_item	*lp;
450 
451 	/*
452 	 * Currently supports between 2 and 5 inodes with exclusive locking.  We
453 	 * support an arbitrary depth of locking here, but absolute limits on
454 	 * inodes depend on the type of locking and the limits placed by
455 	 * lockdep annotations in xfs_lock_inumorder.  These are all checked by
456 	 * the asserts.
457 	 */
458 	ASSERT(ips && inodes >= 2 && inodes <= 5);
459 	ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
460 			    XFS_ILOCK_EXCL));
461 	ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
462 			      XFS_ILOCK_SHARED)));
463 	ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
464 		inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
465 	ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
466 		inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
467 
468 	if (lock_mode & XFS_IOLOCK_EXCL) {
469 		ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
470 	} else if (lock_mode & XFS_MMAPLOCK_EXCL)
471 		ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
472 
473 	try_lock = 0;
474 	i = 0;
475 again:
476 	for (; i < inodes; i++) {
477 		ASSERT(ips[i]);
478 
479 		if (i && (ips[i] == ips[i - 1]))	/* Already locked */
480 			continue;
481 
482 		/*
483 		 * If try_lock is not set yet, make sure all locked inodes are
484 		 * not in the AIL.  If any are, set try_lock to be used later.
485 		 */
486 		if (!try_lock) {
487 			for (j = (i - 1); j >= 0 && !try_lock; j--) {
488 				lp = &ips[j]->i_itemp->ili_item;
489 				if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
490 					try_lock++;
491 			}
492 		}
493 
494 		/*
495 		 * If any of the previous locks we have locked is in the AIL,
496 		 * we must TRY to get the second and subsequent locks. If
497 		 * we can't get any, we must release all we have
498 		 * and try again.
499 		 */
500 		if (!try_lock) {
501 			xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
502 			continue;
503 		}
504 
505 		/* try_lock means we have an inode locked that is in the AIL. */
506 		ASSERT(i != 0);
507 		if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
508 			continue;
509 
510 		/*
511 		 * Unlock all previous guys and try again.  xfs_iunlock will try
512 		 * to push the tail if the inode is in the AIL.
513 		 */
514 		attempts++;
515 		for (j = i - 1; j >= 0; j--) {
516 			/*
517 			 * Check to see if we've already unlocked this one.  Not
518 			 * the first one going back, and the inode ptr is the
519 			 * same.
520 			 */
521 			if (j != (i - 1) && ips[j] == ips[j + 1])
522 				continue;
523 
524 			xfs_iunlock(ips[j], lock_mode);
525 		}
526 
527 		if ((attempts % 5) == 0) {
528 			delay(1); /* Don't just spin the CPU */
529 		}
530 		i = 0;
531 		try_lock = 0;
532 		goto again;
533 	}
534 }
535 
536 /*
537  * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
538  * the mmaplock or the ilock, but not more than one type at a time. If we lock
539  * more than one at a time, lockdep will report false positives saying we have
540  * violated locking orders.  The iolock must be double-locked separately since
541  * we use i_rwsem for that.  We now support taking one lock EXCL and the other
542  * SHARED.
543  */
544 void
545 xfs_lock_two_inodes(
546 	struct xfs_inode	*ip0,
547 	uint			ip0_mode,
548 	struct xfs_inode	*ip1,
549 	uint			ip1_mode)
550 {
551 	struct xfs_inode	*temp;
552 	uint			mode_temp;
553 	int			attempts = 0;
554 	struct xfs_log_item	*lp;
555 
556 	ASSERT(hweight32(ip0_mode) == 1);
557 	ASSERT(hweight32(ip1_mode) == 1);
558 	ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
559 	ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
560 	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
561 	       !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
562 	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
563 	       !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
564 	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
565 	       !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
566 	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
567 	       !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
568 
569 	ASSERT(ip0->i_ino != ip1->i_ino);
570 
571 	if (ip0->i_ino > ip1->i_ino) {
572 		temp = ip0;
573 		ip0 = ip1;
574 		ip1 = temp;
575 		mode_temp = ip0_mode;
576 		ip0_mode = ip1_mode;
577 		ip1_mode = mode_temp;
578 	}
579 
580  again:
581 	xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
582 
583 	/*
584 	 * If the first lock we have locked is in the AIL, we must TRY to get
585 	 * the second lock. If we can't get it, we must release the first one
586 	 * and try again.
587 	 */
588 	lp = &ip0->i_itemp->ili_item;
589 	if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
590 		if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
591 			xfs_iunlock(ip0, ip0_mode);
592 			if ((++attempts % 5) == 0)
593 				delay(1); /* Don't just spin the CPU */
594 			goto again;
595 		}
596 	} else {
597 		xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
598 	}
599 }
600 
601 void
602 __xfs_iflock(
603 	struct xfs_inode	*ip)
604 {
605 	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
606 	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
607 
608 	do {
609 		prepare_to_wait_exclusive(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
610 		if (xfs_isiflocked(ip))
611 			io_schedule();
612 	} while (!xfs_iflock_nowait(ip));
613 
614 	finish_wait(wq, &wait.wq_entry);
615 }
616 
617 STATIC uint
618 _xfs_dic2xflags(
619 	uint16_t		di_flags,
620 	uint64_t		di_flags2,
621 	bool			has_attr)
622 {
623 	uint			flags = 0;
624 
625 	if (di_flags & XFS_DIFLAG_ANY) {
626 		if (di_flags & XFS_DIFLAG_REALTIME)
627 			flags |= FS_XFLAG_REALTIME;
628 		if (di_flags & XFS_DIFLAG_PREALLOC)
629 			flags |= FS_XFLAG_PREALLOC;
630 		if (di_flags & XFS_DIFLAG_IMMUTABLE)
631 			flags |= FS_XFLAG_IMMUTABLE;
632 		if (di_flags & XFS_DIFLAG_APPEND)
633 			flags |= FS_XFLAG_APPEND;
634 		if (di_flags & XFS_DIFLAG_SYNC)
635 			flags |= FS_XFLAG_SYNC;
636 		if (di_flags & XFS_DIFLAG_NOATIME)
637 			flags |= FS_XFLAG_NOATIME;
638 		if (di_flags & XFS_DIFLAG_NODUMP)
639 			flags |= FS_XFLAG_NODUMP;
640 		if (di_flags & XFS_DIFLAG_RTINHERIT)
641 			flags |= FS_XFLAG_RTINHERIT;
642 		if (di_flags & XFS_DIFLAG_PROJINHERIT)
643 			flags |= FS_XFLAG_PROJINHERIT;
644 		if (di_flags & XFS_DIFLAG_NOSYMLINKS)
645 			flags |= FS_XFLAG_NOSYMLINKS;
646 		if (di_flags & XFS_DIFLAG_EXTSIZE)
647 			flags |= FS_XFLAG_EXTSIZE;
648 		if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
649 			flags |= FS_XFLAG_EXTSZINHERIT;
650 		if (di_flags & XFS_DIFLAG_NODEFRAG)
651 			flags |= FS_XFLAG_NODEFRAG;
652 		if (di_flags & XFS_DIFLAG_FILESTREAM)
653 			flags |= FS_XFLAG_FILESTREAM;
654 	}
655 
656 	if (di_flags2 & XFS_DIFLAG2_ANY) {
657 		if (di_flags2 & XFS_DIFLAG2_DAX)
658 			flags |= FS_XFLAG_DAX;
659 		if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
660 			flags |= FS_XFLAG_COWEXTSIZE;
661 	}
662 
663 	if (has_attr)
664 		flags |= FS_XFLAG_HASATTR;
665 
666 	return flags;
667 }
668 
669 uint
670 xfs_ip2xflags(
671 	struct xfs_inode	*ip)
672 {
673 	struct xfs_icdinode	*dic = &ip->i_d;
674 
675 	return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip));
676 }
677 
678 /*
679  * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
680  * is allowed, otherwise it has to be an exact match. If a CI match is found,
681  * ci_name->name will point to a the actual name (caller must free) or
682  * will be set to NULL if an exact match is found.
683  */
684 int
685 xfs_lookup(
686 	xfs_inode_t		*dp,
687 	struct xfs_name		*name,
688 	xfs_inode_t		**ipp,
689 	struct xfs_name		*ci_name)
690 {
691 	xfs_ino_t		inum;
692 	int			error;
693 
694 	trace_xfs_lookup(dp, name);
695 
696 	if (XFS_FORCED_SHUTDOWN(dp->i_mount))
697 		return -EIO;
698 
699 	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
700 	if (error)
701 		goto out_unlock;
702 
703 	error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
704 	if (error)
705 		goto out_free_name;
706 
707 	return 0;
708 
709 out_free_name:
710 	if (ci_name)
711 		kmem_free(ci_name->name);
712 out_unlock:
713 	*ipp = NULL;
714 	return error;
715 }
716 
717 /*
718  * Allocate an inode on disk and return a copy of its in-core version.
719  * The in-core inode is locked exclusively.  Set mode, nlink, and rdev
720  * appropriately within the inode.  The uid and gid for the inode are
721  * set according to the contents of the given cred structure.
722  *
723  * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
724  * has a free inode available, call xfs_iget() to obtain the in-core
725  * version of the allocated inode.  Finally, fill in the inode and
726  * log its initial contents.  In this case, ialloc_context would be
727  * set to NULL.
728  *
729  * If xfs_dialloc() does not have an available inode, it will replenish
730  * its supply by doing an allocation. Since we can only do one
731  * allocation within a transaction without deadlocks, we must commit
732  * the current transaction before returning the inode itself.
733  * In this case, therefore, we will set ialloc_context and return.
734  * The caller should then commit the current transaction, start a new
735  * transaction, and call xfs_ialloc() again to actually get the inode.
736  *
737  * To ensure that some other process does not grab the inode that
738  * was allocated during the first call to xfs_ialloc(), this routine
739  * also returns the [locked] bp pointing to the head of the freelist
740  * as ialloc_context.  The caller should hold this buffer across
741  * the commit and pass it back into this routine on the second call.
742  *
743  * If we are allocating quota inodes, we do not have a parent inode
744  * to attach to or associate with (i.e. pip == NULL) because they
745  * are not linked into the directory structure - they are attached
746  * directly to the superblock - and so have no parent.
747  */
748 static int
749 xfs_ialloc(
750 	xfs_trans_t	*tp,
751 	xfs_inode_t	*pip,
752 	umode_t		mode,
753 	xfs_nlink_t	nlink,
754 	dev_t		rdev,
755 	prid_t		prid,
756 	xfs_buf_t	**ialloc_context,
757 	xfs_inode_t	**ipp)
758 {
759 	struct xfs_mount *mp = tp->t_mountp;
760 	xfs_ino_t	ino;
761 	xfs_inode_t	*ip;
762 	uint		flags;
763 	int		error;
764 	struct timespec64 tv;
765 	struct inode	*inode;
766 
767 	/*
768 	 * Call the space management code to pick
769 	 * the on-disk inode to be allocated.
770 	 */
771 	error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode,
772 			    ialloc_context, &ino);
773 	if (error)
774 		return error;
775 	if (*ialloc_context || ino == NULLFSINO) {
776 		*ipp = NULL;
777 		return 0;
778 	}
779 	ASSERT(*ialloc_context == NULL);
780 
781 	/*
782 	 * Protect against obviously corrupt allocation btree records. Later
783 	 * xfs_iget checks will catch re-allocation of other active in-memory
784 	 * and on-disk inodes. If we don't catch reallocating the parent inode
785 	 * here we will deadlock in xfs_iget() so we have to do these checks
786 	 * first.
787 	 */
788 	if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
789 		xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
790 		return -EFSCORRUPTED;
791 	}
792 
793 	/*
794 	 * Get the in-core inode with the lock held exclusively.
795 	 * This is because we're setting fields here we need
796 	 * to prevent others from looking at until we're done.
797 	 */
798 	error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
799 			 XFS_ILOCK_EXCL, &ip);
800 	if (error)
801 		return error;
802 	ASSERT(ip != NULL);
803 	inode = VFS_I(ip);
804 	inode->i_mode = mode;
805 	set_nlink(inode, nlink);
806 	inode->i_uid = current_fsuid();
807 	inode->i_rdev = rdev;
808 	ip->i_d.di_projid = prid;
809 
810 	if (pip && XFS_INHERIT_GID(pip)) {
811 		inode->i_gid = VFS_I(pip)->i_gid;
812 		if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
813 			inode->i_mode |= S_ISGID;
814 	} else {
815 		inode->i_gid = current_fsgid();
816 	}
817 
818 	/*
819 	 * If the group ID of the new file does not match the effective group
820 	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
821 	 * (and only if the irix_sgid_inherit compatibility variable is set).
822 	 */
823 	if (irix_sgid_inherit &&
824 	    (inode->i_mode & S_ISGID) && !in_group_p(inode->i_gid))
825 		inode->i_mode &= ~S_ISGID;
826 
827 	ip->i_d.di_size = 0;
828 	ip->i_df.if_nextents = 0;
829 	ASSERT(ip->i_d.di_nblocks == 0);
830 
831 	tv = current_time(inode);
832 	inode->i_mtime = tv;
833 	inode->i_atime = tv;
834 	inode->i_ctime = tv;
835 
836 	ip->i_d.di_extsize = 0;
837 	ip->i_d.di_dmevmask = 0;
838 	ip->i_d.di_dmstate = 0;
839 	ip->i_d.di_flags = 0;
840 
841 	if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
842 		inode_set_iversion(inode, 1);
843 		ip->i_d.di_flags2 = 0;
844 		ip->i_d.di_cowextsize = 0;
845 		ip->i_d.di_crtime = tv;
846 	}
847 
848 	flags = XFS_ILOG_CORE;
849 	switch (mode & S_IFMT) {
850 	case S_IFIFO:
851 	case S_IFCHR:
852 	case S_IFBLK:
853 	case S_IFSOCK:
854 		ip->i_df.if_format = XFS_DINODE_FMT_DEV;
855 		ip->i_df.if_flags = 0;
856 		flags |= XFS_ILOG_DEV;
857 		break;
858 	case S_IFREG:
859 	case S_IFDIR:
860 		if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
861 			uint		di_flags = 0;
862 
863 			if (S_ISDIR(mode)) {
864 				if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
865 					di_flags |= XFS_DIFLAG_RTINHERIT;
866 				if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
867 					di_flags |= XFS_DIFLAG_EXTSZINHERIT;
868 					ip->i_d.di_extsize = pip->i_d.di_extsize;
869 				}
870 				if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
871 					di_flags |= XFS_DIFLAG_PROJINHERIT;
872 			} else if (S_ISREG(mode)) {
873 				if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
874 					di_flags |= XFS_DIFLAG_REALTIME;
875 				if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
876 					di_flags |= XFS_DIFLAG_EXTSIZE;
877 					ip->i_d.di_extsize = pip->i_d.di_extsize;
878 				}
879 			}
880 			if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
881 			    xfs_inherit_noatime)
882 				di_flags |= XFS_DIFLAG_NOATIME;
883 			if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
884 			    xfs_inherit_nodump)
885 				di_flags |= XFS_DIFLAG_NODUMP;
886 			if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
887 			    xfs_inherit_sync)
888 				di_flags |= XFS_DIFLAG_SYNC;
889 			if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
890 			    xfs_inherit_nosymlinks)
891 				di_flags |= XFS_DIFLAG_NOSYMLINKS;
892 			if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
893 			    xfs_inherit_nodefrag)
894 				di_flags |= XFS_DIFLAG_NODEFRAG;
895 			if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
896 				di_flags |= XFS_DIFLAG_FILESTREAM;
897 
898 			ip->i_d.di_flags |= di_flags;
899 		}
900 		if (pip && (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY)) {
901 			if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
902 				ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
903 				ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
904 			}
905 			if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
906 				ip->i_d.di_flags2 |= XFS_DIFLAG2_DAX;
907 		}
908 		/* FALLTHROUGH */
909 	case S_IFLNK:
910 		ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
911 		ip->i_df.if_flags = XFS_IFEXTENTS;
912 		ip->i_df.if_bytes = 0;
913 		ip->i_df.if_u1.if_root = NULL;
914 		break;
915 	default:
916 		ASSERT(0);
917 	}
918 
919 	/*
920 	 * Log the new values stuffed into the inode.
921 	 */
922 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
923 	xfs_trans_log_inode(tp, ip, flags);
924 
925 	/* now that we have an i_mode we can setup the inode structure */
926 	xfs_setup_inode(ip);
927 
928 	*ipp = ip;
929 	return 0;
930 }
931 
932 /*
933  * Allocates a new inode from disk and return a pointer to the
934  * incore copy. This routine will internally commit the current
935  * transaction and allocate a new one if the Space Manager needed
936  * to do an allocation to replenish the inode free-list.
937  *
938  * This routine is designed to be called from xfs_create and
939  * xfs_create_dir.
940  *
941  */
942 int
943 xfs_dir_ialloc(
944 	xfs_trans_t	**tpp,		/* input: current transaction;
945 					   output: may be a new transaction. */
946 	xfs_inode_t	*dp,		/* directory within whose allocate
947 					   the inode. */
948 	umode_t		mode,
949 	xfs_nlink_t	nlink,
950 	dev_t		rdev,
951 	prid_t		prid,		/* project id */
952 	xfs_inode_t	**ipp)		/* pointer to inode; it will be
953 					   locked. */
954 {
955 	xfs_trans_t	*tp;
956 	xfs_inode_t	*ip;
957 	xfs_buf_t	*ialloc_context = NULL;
958 	int		code;
959 	void		*dqinfo;
960 	uint		tflags;
961 
962 	tp = *tpp;
963 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
964 
965 	/*
966 	 * xfs_ialloc will return a pointer to an incore inode if
967 	 * the Space Manager has an available inode on the free
968 	 * list. Otherwise, it will do an allocation and replenish
969 	 * the freelist.  Since we can only do one allocation per
970 	 * transaction without deadlocks, we will need to commit the
971 	 * current transaction and start a new one.  We will then
972 	 * need to call xfs_ialloc again to get the inode.
973 	 *
974 	 * If xfs_ialloc did an allocation to replenish the freelist,
975 	 * it returns the bp containing the head of the freelist as
976 	 * ialloc_context. We will hold a lock on it across the
977 	 * transaction commit so that no other process can steal
978 	 * the inode(s) that we've just allocated.
979 	 */
980 	code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, &ialloc_context,
981 			&ip);
982 
983 	/*
984 	 * Return an error if we were unable to allocate a new inode.
985 	 * This should only happen if we run out of space on disk or
986 	 * encounter a disk error.
987 	 */
988 	if (code) {
989 		*ipp = NULL;
990 		return code;
991 	}
992 	if (!ialloc_context && !ip) {
993 		*ipp = NULL;
994 		return -ENOSPC;
995 	}
996 
997 	/*
998 	 * If the AGI buffer is non-NULL, then we were unable to get an
999 	 * inode in one operation.  We need to commit the current
1000 	 * transaction and call xfs_ialloc() again.  It is guaranteed
1001 	 * to succeed the second time.
1002 	 */
1003 	if (ialloc_context) {
1004 		/*
1005 		 * Normally, xfs_trans_commit releases all the locks.
1006 		 * We call bhold to hang on to the ialloc_context across
1007 		 * the commit.  Holding this buffer prevents any other
1008 		 * processes from doing any allocations in this
1009 		 * allocation group.
1010 		 */
1011 		xfs_trans_bhold(tp, ialloc_context);
1012 
1013 		/*
1014 		 * We want the quota changes to be associated with the next
1015 		 * transaction, NOT this one. So, detach the dqinfo from this
1016 		 * and attach it to the next transaction.
1017 		 */
1018 		dqinfo = NULL;
1019 		tflags = 0;
1020 		if (tp->t_dqinfo) {
1021 			dqinfo = (void *)tp->t_dqinfo;
1022 			tp->t_dqinfo = NULL;
1023 			tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
1024 			tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
1025 		}
1026 
1027 		code = xfs_trans_roll(&tp);
1028 
1029 		/*
1030 		 * Re-attach the quota info that we detached from prev trx.
1031 		 */
1032 		if (dqinfo) {
1033 			tp->t_dqinfo = dqinfo;
1034 			tp->t_flags |= tflags;
1035 		}
1036 
1037 		if (code) {
1038 			xfs_buf_relse(ialloc_context);
1039 			*tpp = tp;
1040 			*ipp = NULL;
1041 			return code;
1042 		}
1043 		xfs_trans_bjoin(tp, ialloc_context);
1044 
1045 		/*
1046 		 * Call ialloc again. Since we've locked out all
1047 		 * other allocations in this allocation group,
1048 		 * this call should always succeed.
1049 		 */
1050 		code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
1051 				  &ialloc_context, &ip);
1052 
1053 		/*
1054 		 * If we get an error at this point, return to the caller
1055 		 * so that the current transaction can be aborted.
1056 		 */
1057 		if (code) {
1058 			*tpp = tp;
1059 			*ipp = NULL;
1060 			return code;
1061 		}
1062 		ASSERT(!ialloc_context && ip);
1063 
1064 	}
1065 
1066 	*ipp = ip;
1067 	*tpp = tp;
1068 
1069 	return 0;
1070 }
1071 
1072 /*
1073  * Decrement the link count on an inode & log the change.  If this causes the
1074  * link count to go to zero, move the inode to AGI unlinked list so that it can
1075  * be freed when the last active reference goes away via xfs_inactive().
1076  */
1077 static int			/* error */
1078 xfs_droplink(
1079 	xfs_trans_t *tp,
1080 	xfs_inode_t *ip)
1081 {
1082 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1083 
1084 	drop_nlink(VFS_I(ip));
1085 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1086 
1087 	if (VFS_I(ip)->i_nlink)
1088 		return 0;
1089 
1090 	return xfs_iunlink(tp, ip);
1091 }
1092 
1093 /*
1094  * Increment the link count on an inode & log the change.
1095  */
1096 static void
1097 xfs_bumplink(
1098 	xfs_trans_t *tp,
1099 	xfs_inode_t *ip)
1100 {
1101 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1102 
1103 	inc_nlink(VFS_I(ip));
1104 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1105 }
1106 
1107 int
1108 xfs_create(
1109 	xfs_inode_t		*dp,
1110 	struct xfs_name		*name,
1111 	umode_t			mode,
1112 	dev_t			rdev,
1113 	xfs_inode_t		**ipp)
1114 {
1115 	int			is_dir = S_ISDIR(mode);
1116 	struct xfs_mount	*mp = dp->i_mount;
1117 	struct xfs_inode	*ip = NULL;
1118 	struct xfs_trans	*tp = NULL;
1119 	int			error;
1120 	bool                    unlock_dp_on_error = false;
1121 	prid_t			prid;
1122 	struct xfs_dquot	*udqp = NULL;
1123 	struct xfs_dquot	*gdqp = NULL;
1124 	struct xfs_dquot	*pdqp = NULL;
1125 	struct xfs_trans_res	*tres;
1126 	uint			resblks;
1127 
1128 	trace_xfs_create(dp, name);
1129 
1130 	if (XFS_FORCED_SHUTDOWN(mp))
1131 		return -EIO;
1132 
1133 	prid = xfs_get_initial_prid(dp);
1134 
1135 	/*
1136 	 * Make sure that we have allocated dquot(s) on disk.
1137 	 */
1138 	error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
1139 					XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1140 					&udqp, &gdqp, &pdqp);
1141 	if (error)
1142 		return error;
1143 
1144 	if (is_dir) {
1145 		resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
1146 		tres = &M_RES(mp)->tr_mkdir;
1147 	} else {
1148 		resblks = XFS_CREATE_SPACE_RES(mp, name->len);
1149 		tres = &M_RES(mp)->tr_create;
1150 	}
1151 
1152 	/*
1153 	 * Initially assume that the file does not exist and
1154 	 * reserve the resources for that case.  If that is not
1155 	 * the case we'll drop the one we have and get a more
1156 	 * appropriate transaction later.
1157 	 */
1158 	error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1159 	if (error == -ENOSPC) {
1160 		/* flush outstanding delalloc blocks and retry */
1161 		xfs_flush_inodes(mp);
1162 		error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1163 	}
1164 	if (error)
1165 		goto out_release_inode;
1166 
1167 	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1168 	unlock_dp_on_error = true;
1169 
1170 	/*
1171 	 * Reserve disk quota and the inode.
1172 	 */
1173 	error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1174 						pdqp, resblks, 1, 0);
1175 	if (error)
1176 		goto out_trans_cancel;
1177 
1178 	/*
1179 	 * A newly created regular or special file just has one directory
1180 	 * entry pointing to them, but a directory also the "." entry
1181 	 * pointing to itself.
1182 	 */
1183 	error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip);
1184 	if (error)
1185 		goto out_trans_cancel;
1186 
1187 	/*
1188 	 * Now we join the directory inode to the transaction.  We do not do it
1189 	 * earlier because xfs_dir_ialloc might commit the previous transaction
1190 	 * (and release all the locks).  An error from here on will result in
1191 	 * the transaction cancel unlocking dp so don't do it explicitly in the
1192 	 * error path.
1193 	 */
1194 	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1195 	unlock_dp_on_error = false;
1196 
1197 	error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1198 					resblks - XFS_IALLOC_SPACE_RES(mp));
1199 	if (error) {
1200 		ASSERT(error != -ENOSPC);
1201 		goto out_trans_cancel;
1202 	}
1203 	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1204 	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1205 
1206 	if (is_dir) {
1207 		error = xfs_dir_init(tp, ip, dp);
1208 		if (error)
1209 			goto out_trans_cancel;
1210 
1211 		xfs_bumplink(tp, dp);
1212 	}
1213 
1214 	/*
1215 	 * If this is a synchronous mount, make sure that the
1216 	 * create transaction goes to disk before returning to
1217 	 * the user.
1218 	 */
1219 	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1220 		xfs_trans_set_sync(tp);
1221 
1222 	/*
1223 	 * Attach the dquot(s) to the inodes and modify them incore.
1224 	 * These ids of the inode couldn't have changed since the new
1225 	 * inode has been locked ever since it was created.
1226 	 */
1227 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1228 
1229 	error = xfs_trans_commit(tp);
1230 	if (error)
1231 		goto out_release_inode;
1232 
1233 	xfs_qm_dqrele(udqp);
1234 	xfs_qm_dqrele(gdqp);
1235 	xfs_qm_dqrele(pdqp);
1236 
1237 	*ipp = ip;
1238 	return 0;
1239 
1240  out_trans_cancel:
1241 	xfs_trans_cancel(tp);
1242  out_release_inode:
1243 	/*
1244 	 * Wait until after the current transaction is aborted to finish the
1245 	 * setup of the inode and release the inode.  This prevents recursive
1246 	 * transactions and deadlocks from xfs_inactive.
1247 	 */
1248 	if (ip) {
1249 		xfs_finish_inode_setup(ip);
1250 		xfs_irele(ip);
1251 	}
1252 
1253 	xfs_qm_dqrele(udqp);
1254 	xfs_qm_dqrele(gdqp);
1255 	xfs_qm_dqrele(pdqp);
1256 
1257 	if (unlock_dp_on_error)
1258 		xfs_iunlock(dp, XFS_ILOCK_EXCL);
1259 	return error;
1260 }
1261 
1262 int
1263 xfs_create_tmpfile(
1264 	struct xfs_inode	*dp,
1265 	umode_t			mode,
1266 	struct xfs_inode	**ipp)
1267 {
1268 	struct xfs_mount	*mp = dp->i_mount;
1269 	struct xfs_inode	*ip = NULL;
1270 	struct xfs_trans	*tp = NULL;
1271 	int			error;
1272 	prid_t                  prid;
1273 	struct xfs_dquot	*udqp = NULL;
1274 	struct xfs_dquot	*gdqp = NULL;
1275 	struct xfs_dquot	*pdqp = NULL;
1276 	struct xfs_trans_res	*tres;
1277 	uint			resblks;
1278 
1279 	if (XFS_FORCED_SHUTDOWN(mp))
1280 		return -EIO;
1281 
1282 	prid = xfs_get_initial_prid(dp);
1283 
1284 	/*
1285 	 * Make sure that we have allocated dquot(s) on disk.
1286 	 */
1287 	error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
1288 				XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1289 				&udqp, &gdqp, &pdqp);
1290 	if (error)
1291 		return error;
1292 
1293 	resblks = XFS_IALLOC_SPACE_RES(mp);
1294 	tres = &M_RES(mp)->tr_create_tmpfile;
1295 
1296 	error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1297 	if (error)
1298 		goto out_release_inode;
1299 
1300 	error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1301 						pdqp, resblks, 1, 0);
1302 	if (error)
1303 		goto out_trans_cancel;
1304 
1305 	error = xfs_dir_ialloc(&tp, dp, mode, 0, 0, prid, &ip);
1306 	if (error)
1307 		goto out_trans_cancel;
1308 
1309 	if (mp->m_flags & XFS_MOUNT_WSYNC)
1310 		xfs_trans_set_sync(tp);
1311 
1312 	/*
1313 	 * Attach the dquot(s) to the inodes and modify them incore.
1314 	 * These ids of the inode couldn't have changed since the new
1315 	 * inode has been locked ever since it was created.
1316 	 */
1317 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1318 
1319 	error = xfs_iunlink(tp, ip);
1320 	if (error)
1321 		goto out_trans_cancel;
1322 
1323 	error = xfs_trans_commit(tp);
1324 	if (error)
1325 		goto out_release_inode;
1326 
1327 	xfs_qm_dqrele(udqp);
1328 	xfs_qm_dqrele(gdqp);
1329 	xfs_qm_dqrele(pdqp);
1330 
1331 	*ipp = ip;
1332 	return 0;
1333 
1334  out_trans_cancel:
1335 	xfs_trans_cancel(tp);
1336  out_release_inode:
1337 	/*
1338 	 * Wait until after the current transaction is aborted to finish the
1339 	 * setup of the inode and release the inode.  This prevents recursive
1340 	 * transactions and deadlocks from xfs_inactive.
1341 	 */
1342 	if (ip) {
1343 		xfs_finish_inode_setup(ip);
1344 		xfs_irele(ip);
1345 	}
1346 
1347 	xfs_qm_dqrele(udqp);
1348 	xfs_qm_dqrele(gdqp);
1349 	xfs_qm_dqrele(pdqp);
1350 
1351 	return error;
1352 }
1353 
1354 int
1355 xfs_link(
1356 	xfs_inode_t		*tdp,
1357 	xfs_inode_t		*sip,
1358 	struct xfs_name		*target_name)
1359 {
1360 	xfs_mount_t		*mp = tdp->i_mount;
1361 	xfs_trans_t		*tp;
1362 	int			error;
1363 	int			resblks;
1364 
1365 	trace_xfs_link(tdp, target_name);
1366 
1367 	ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1368 
1369 	if (XFS_FORCED_SHUTDOWN(mp))
1370 		return -EIO;
1371 
1372 	error = xfs_qm_dqattach(sip);
1373 	if (error)
1374 		goto std_return;
1375 
1376 	error = xfs_qm_dqattach(tdp);
1377 	if (error)
1378 		goto std_return;
1379 
1380 	resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1381 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
1382 	if (error == -ENOSPC) {
1383 		resblks = 0;
1384 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
1385 	}
1386 	if (error)
1387 		goto std_return;
1388 
1389 	xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
1390 
1391 	xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
1392 	xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
1393 
1394 	/*
1395 	 * If we are using project inheritance, we only allow hard link
1396 	 * creation in our tree when the project IDs are the same; else
1397 	 * the tree quota mechanism could be circumvented.
1398 	 */
1399 	if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1400 		     tdp->i_d.di_projid != sip->i_d.di_projid)) {
1401 		error = -EXDEV;
1402 		goto error_return;
1403 	}
1404 
1405 	if (!resblks) {
1406 		error = xfs_dir_canenter(tp, tdp, target_name);
1407 		if (error)
1408 			goto error_return;
1409 	}
1410 
1411 	/*
1412 	 * Handle initial link state of O_TMPFILE inode
1413 	 */
1414 	if (VFS_I(sip)->i_nlink == 0) {
1415 		error = xfs_iunlink_remove(tp, sip);
1416 		if (error)
1417 			goto error_return;
1418 	}
1419 
1420 	error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1421 				   resblks);
1422 	if (error)
1423 		goto error_return;
1424 	xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1425 	xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1426 
1427 	xfs_bumplink(tp, sip);
1428 
1429 	/*
1430 	 * If this is a synchronous mount, make sure that the
1431 	 * link transaction goes to disk before returning to
1432 	 * the user.
1433 	 */
1434 	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1435 		xfs_trans_set_sync(tp);
1436 
1437 	return xfs_trans_commit(tp);
1438 
1439  error_return:
1440 	xfs_trans_cancel(tp);
1441  std_return:
1442 	return error;
1443 }
1444 
1445 /* Clear the reflink flag and the cowblocks tag if possible. */
1446 static void
1447 xfs_itruncate_clear_reflink_flags(
1448 	struct xfs_inode	*ip)
1449 {
1450 	struct xfs_ifork	*dfork;
1451 	struct xfs_ifork	*cfork;
1452 
1453 	if (!xfs_is_reflink_inode(ip))
1454 		return;
1455 	dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1456 	cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1457 	if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1458 		ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1459 	if (cfork->if_bytes == 0)
1460 		xfs_inode_clear_cowblocks_tag(ip);
1461 }
1462 
1463 /*
1464  * Free up the underlying blocks past new_size.  The new size must be smaller
1465  * than the current size.  This routine can be used both for the attribute and
1466  * data fork, and does not modify the inode size, which is left to the caller.
1467  *
1468  * The transaction passed to this routine must have made a permanent log
1469  * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
1470  * given transaction and start new ones, so make sure everything involved in
1471  * the transaction is tidy before calling here.  Some transaction will be
1472  * returned to the caller to be committed.  The incoming transaction must
1473  * already include the inode, and both inode locks must be held exclusively.
1474  * The inode must also be "held" within the transaction.  On return the inode
1475  * will be "held" within the returned transaction.  This routine does NOT
1476  * require any disk space to be reserved for it within the transaction.
1477  *
1478  * If we get an error, we must return with the inode locked and linked into the
1479  * current transaction. This keeps things simple for the higher level code,
1480  * because it always knows that the inode is locked and held in the transaction
1481  * that returns to it whether errors occur or not.  We don't mark the inode
1482  * dirty on error so that transactions can be easily aborted if possible.
1483  */
1484 int
1485 xfs_itruncate_extents_flags(
1486 	struct xfs_trans	**tpp,
1487 	struct xfs_inode	*ip,
1488 	int			whichfork,
1489 	xfs_fsize_t		new_size,
1490 	int			flags)
1491 {
1492 	struct xfs_mount	*mp = ip->i_mount;
1493 	struct xfs_trans	*tp = *tpp;
1494 	xfs_fileoff_t		first_unmap_block;
1495 	xfs_filblks_t		unmap_len;
1496 	int			error = 0;
1497 
1498 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1499 	ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1500 	       xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1501 	ASSERT(new_size <= XFS_ISIZE(ip));
1502 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1503 	ASSERT(ip->i_itemp != NULL);
1504 	ASSERT(ip->i_itemp->ili_lock_flags == 0);
1505 	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1506 
1507 	trace_xfs_itruncate_extents_start(ip, new_size);
1508 
1509 	flags |= xfs_bmapi_aflag(whichfork);
1510 
1511 	/*
1512 	 * Since it is possible for space to become allocated beyond
1513 	 * the end of the file (in a crash where the space is allocated
1514 	 * but the inode size is not yet updated), simply remove any
1515 	 * blocks which show up between the new EOF and the maximum
1516 	 * possible file size.
1517 	 *
1518 	 * We have to free all the blocks to the bmbt maximum offset, even if
1519 	 * the page cache can't scale that far.
1520 	 */
1521 	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1522 	if (first_unmap_block >= XFS_MAX_FILEOFF) {
1523 		WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
1524 		return 0;
1525 	}
1526 
1527 	unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
1528 	while (unmap_len > 0) {
1529 		ASSERT(tp->t_firstblock == NULLFSBLOCK);
1530 		error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
1531 				flags, XFS_ITRUNC_MAX_EXTENTS);
1532 		if (error)
1533 			goto out;
1534 
1535 		/*
1536 		 * Duplicate the transaction that has the permanent
1537 		 * reservation and commit the old transaction.
1538 		 */
1539 		error = xfs_defer_finish(&tp);
1540 		if (error)
1541 			goto out;
1542 
1543 		error = xfs_trans_roll_inode(&tp, ip);
1544 		if (error)
1545 			goto out;
1546 	}
1547 
1548 	if (whichfork == XFS_DATA_FORK) {
1549 		/* Remove all pending CoW reservations. */
1550 		error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1551 				first_unmap_block, XFS_MAX_FILEOFF, true);
1552 		if (error)
1553 			goto out;
1554 
1555 		xfs_itruncate_clear_reflink_flags(ip);
1556 	}
1557 
1558 	/*
1559 	 * Always re-log the inode so that our permanent transaction can keep
1560 	 * on rolling it forward in the log.
1561 	 */
1562 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1563 
1564 	trace_xfs_itruncate_extents_end(ip, new_size);
1565 
1566 out:
1567 	*tpp = tp;
1568 	return error;
1569 }
1570 
1571 int
1572 xfs_release(
1573 	xfs_inode_t	*ip)
1574 {
1575 	xfs_mount_t	*mp = ip->i_mount;
1576 	int		error;
1577 
1578 	if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1579 		return 0;
1580 
1581 	/* If this is a read-only mount, don't do this (would generate I/O) */
1582 	if (mp->m_flags & XFS_MOUNT_RDONLY)
1583 		return 0;
1584 
1585 	if (!XFS_FORCED_SHUTDOWN(mp)) {
1586 		int truncated;
1587 
1588 		/*
1589 		 * If we previously truncated this file and removed old data
1590 		 * in the process, we want to initiate "early" writeout on
1591 		 * the last close.  This is an attempt to combat the notorious
1592 		 * NULL files problem which is particularly noticeable from a
1593 		 * truncate down, buffered (re-)write (delalloc), followed by
1594 		 * a crash.  What we are effectively doing here is
1595 		 * significantly reducing the time window where we'd otherwise
1596 		 * be exposed to that problem.
1597 		 */
1598 		truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1599 		if (truncated) {
1600 			xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1601 			if (ip->i_delayed_blks > 0) {
1602 				error = filemap_flush(VFS_I(ip)->i_mapping);
1603 				if (error)
1604 					return error;
1605 			}
1606 		}
1607 	}
1608 
1609 	if (VFS_I(ip)->i_nlink == 0)
1610 		return 0;
1611 
1612 	if (xfs_can_free_eofblocks(ip, false)) {
1613 
1614 		/*
1615 		 * Check if the inode is being opened, written and closed
1616 		 * frequently and we have delayed allocation blocks outstanding
1617 		 * (e.g. streaming writes from the NFS server), truncating the
1618 		 * blocks past EOF will cause fragmentation to occur.
1619 		 *
1620 		 * In this case don't do the truncation, but we have to be
1621 		 * careful how we detect this case. Blocks beyond EOF show up as
1622 		 * i_delayed_blks even when the inode is clean, so we need to
1623 		 * truncate them away first before checking for a dirty release.
1624 		 * Hence on the first dirty close we will still remove the
1625 		 * speculative allocation, but after that we will leave it in
1626 		 * place.
1627 		 */
1628 		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1629 			return 0;
1630 		/*
1631 		 * If we can't get the iolock just skip truncating the blocks
1632 		 * past EOF because we could deadlock with the mmap_lock
1633 		 * otherwise. We'll get another chance to drop them once the
1634 		 * last reference to the inode is dropped, so we'll never leak
1635 		 * blocks permanently.
1636 		 */
1637 		if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1638 			error = xfs_free_eofblocks(ip);
1639 			xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1640 			if (error)
1641 				return error;
1642 		}
1643 
1644 		/* delalloc blocks after truncation means it really is dirty */
1645 		if (ip->i_delayed_blks)
1646 			xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1647 	}
1648 	return 0;
1649 }
1650 
1651 /*
1652  * xfs_inactive_truncate
1653  *
1654  * Called to perform a truncate when an inode becomes unlinked.
1655  */
1656 STATIC int
1657 xfs_inactive_truncate(
1658 	struct xfs_inode *ip)
1659 {
1660 	struct xfs_mount	*mp = ip->i_mount;
1661 	struct xfs_trans	*tp;
1662 	int			error;
1663 
1664 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1665 	if (error) {
1666 		ASSERT(XFS_FORCED_SHUTDOWN(mp));
1667 		return error;
1668 	}
1669 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1670 	xfs_trans_ijoin(tp, ip, 0);
1671 
1672 	/*
1673 	 * Log the inode size first to prevent stale data exposure in the event
1674 	 * of a system crash before the truncate completes. See the related
1675 	 * comment in xfs_vn_setattr_size() for details.
1676 	 */
1677 	ip->i_d.di_size = 0;
1678 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1679 
1680 	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1681 	if (error)
1682 		goto error_trans_cancel;
1683 
1684 	ASSERT(ip->i_df.if_nextents == 0);
1685 
1686 	error = xfs_trans_commit(tp);
1687 	if (error)
1688 		goto error_unlock;
1689 
1690 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1691 	return 0;
1692 
1693 error_trans_cancel:
1694 	xfs_trans_cancel(tp);
1695 error_unlock:
1696 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1697 	return error;
1698 }
1699 
1700 /*
1701  * xfs_inactive_ifree()
1702  *
1703  * Perform the inode free when an inode is unlinked.
1704  */
1705 STATIC int
1706 xfs_inactive_ifree(
1707 	struct xfs_inode *ip)
1708 {
1709 	struct xfs_mount	*mp = ip->i_mount;
1710 	struct xfs_trans	*tp;
1711 	int			error;
1712 
1713 	/*
1714 	 * We try to use a per-AG reservation for any block needed by the finobt
1715 	 * tree, but as the finobt feature predates the per-AG reservation
1716 	 * support a degraded file system might not have enough space for the
1717 	 * reservation at mount time.  In that case try to dip into the reserved
1718 	 * pool and pray.
1719 	 *
1720 	 * Send a warning if the reservation does happen to fail, as the inode
1721 	 * now remains allocated and sits on the unlinked list until the fs is
1722 	 * repaired.
1723 	 */
1724 	if (unlikely(mp->m_finobt_nores)) {
1725 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1726 				XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1727 				&tp);
1728 	} else {
1729 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1730 	}
1731 	if (error) {
1732 		if (error == -ENOSPC) {
1733 			xfs_warn_ratelimited(mp,
1734 			"Failed to remove inode(s) from unlinked list. "
1735 			"Please free space, unmount and run xfs_repair.");
1736 		} else {
1737 			ASSERT(XFS_FORCED_SHUTDOWN(mp));
1738 		}
1739 		return error;
1740 	}
1741 
1742 	/*
1743 	 * We do not hold the inode locked across the entire rolling transaction
1744 	 * here. We only need to hold it for the first transaction that
1745 	 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
1746 	 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
1747 	 * here breaks the relationship between cluster buffer invalidation and
1748 	 * stale inode invalidation on cluster buffer item journal commit
1749 	 * completion, and can result in leaving dirty stale inodes hanging
1750 	 * around in memory.
1751 	 *
1752 	 * We have no need for serialising this inode operation against other
1753 	 * operations - we freed the inode and hence reallocation is required
1754 	 * and that will serialise on reallocating the space the deferops need
1755 	 * to free. Hence we can unlock the inode on the first commit of
1756 	 * the transaction rather than roll it right through the deferops. This
1757 	 * avoids relogging the XFS_ISTALE inode.
1758 	 *
1759 	 * We check that xfs_ifree() hasn't grown an internal transaction roll
1760 	 * by asserting that the inode is still locked when it returns.
1761 	 */
1762 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1763 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1764 
1765 	error = xfs_ifree(tp, ip);
1766 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1767 	if (error) {
1768 		/*
1769 		 * If we fail to free the inode, shut down.  The cancel
1770 		 * might do that, we need to make sure.  Otherwise the
1771 		 * inode might be lost for a long time or forever.
1772 		 */
1773 		if (!XFS_FORCED_SHUTDOWN(mp)) {
1774 			xfs_notice(mp, "%s: xfs_ifree returned error %d",
1775 				__func__, error);
1776 			xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1777 		}
1778 		xfs_trans_cancel(tp);
1779 		return error;
1780 	}
1781 
1782 	/*
1783 	 * Credit the quota account(s). The inode is gone.
1784 	 */
1785 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1786 
1787 	/*
1788 	 * Just ignore errors at this point.  There is nothing we can do except
1789 	 * to try to keep going. Make sure it's not a silent error.
1790 	 */
1791 	error = xfs_trans_commit(tp);
1792 	if (error)
1793 		xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
1794 			__func__, error);
1795 
1796 	return 0;
1797 }
1798 
1799 /*
1800  * xfs_inactive
1801  *
1802  * This is called when the vnode reference count for the vnode
1803  * goes to zero.  If the file has been unlinked, then it must
1804  * now be truncated.  Also, we clear all of the read-ahead state
1805  * kept for the inode here since the file is now closed.
1806  */
1807 void
1808 xfs_inactive(
1809 	xfs_inode_t	*ip)
1810 {
1811 	struct xfs_mount	*mp;
1812 	int			error;
1813 	int			truncate = 0;
1814 
1815 	/*
1816 	 * If the inode is already free, then there can be nothing
1817 	 * to clean up here.
1818 	 */
1819 	if (VFS_I(ip)->i_mode == 0) {
1820 		ASSERT(ip->i_df.if_broot_bytes == 0);
1821 		return;
1822 	}
1823 
1824 	mp = ip->i_mount;
1825 	ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1826 
1827 	/* If this is a read-only mount, don't do this (would generate I/O) */
1828 	if (mp->m_flags & XFS_MOUNT_RDONLY)
1829 		return;
1830 
1831 	/* Try to clean out the cow blocks if there are any. */
1832 	if (xfs_inode_has_cow_data(ip))
1833 		xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1834 
1835 	if (VFS_I(ip)->i_nlink != 0) {
1836 		/*
1837 		 * force is true because we are evicting an inode from the
1838 		 * cache. Post-eof blocks must be freed, lest we end up with
1839 		 * broken free space accounting.
1840 		 *
1841 		 * Note: don't bother with iolock here since lockdep complains
1842 		 * about acquiring it in reclaim context. We have the only
1843 		 * reference to the inode at this point anyways.
1844 		 */
1845 		if (xfs_can_free_eofblocks(ip, true))
1846 			xfs_free_eofblocks(ip);
1847 
1848 		return;
1849 	}
1850 
1851 	if (S_ISREG(VFS_I(ip)->i_mode) &&
1852 	    (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
1853 	     ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
1854 		truncate = 1;
1855 
1856 	error = xfs_qm_dqattach(ip);
1857 	if (error)
1858 		return;
1859 
1860 	if (S_ISLNK(VFS_I(ip)->i_mode))
1861 		error = xfs_inactive_symlink(ip);
1862 	else if (truncate)
1863 		error = xfs_inactive_truncate(ip);
1864 	if (error)
1865 		return;
1866 
1867 	/*
1868 	 * If there are attributes associated with the file then blow them away
1869 	 * now.  The code calls a routine that recursively deconstructs the
1870 	 * attribute fork. If also blows away the in-core attribute fork.
1871 	 */
1872 	if (XFS_IFORK_Q(ip)) {
1873 		error = xfs_attr_inactive(ip);
1874 		if (error)
1875 			return;
1876 	}
1877 
1878 	ASSERT(!ip->i_afp);
1879 	ASSERT(ip->i_d.di_forkoff == 0);
1880 
1881 	/*
1882 	 * Free the inode.
1883 	 */
1884 	error = xfs_inactive_ifree(ip);
1885 	if (error)
1886 		return;
1887 
1888 	/*
1889 	 * Release the dquots held by inode, if any.
1890 	 */
1891 	xfs_qm_dqdetach(ip);
1892 }
1893 
1894 /*
1895  * In-Core Unlinked List Lookups
1896  * =============================
1897  *
1898  * Every inode is supposed to be reachable from some other piece of metadata
1899  * with the exception of the root directory.  Inodes with a connection to a
1900  * file descriptor but not linked from anywhere in the on-disk directory tree
1901  * are collectively known as unlinked inodes, though the filesystem itself
1902  * maintains links to these inodes so that on-disk metadata are consistent.
1903  *
1904  * XFS implements a per-AG on-disk hash table of unlinked inodes.  The AGI
1905  * header contains a number of buckets that point to an inode, and each inode
1906  * record has a pointer to the next inode in the hash chain.  This
1907  * singly-linked list causes scaling problems in the iunlink remove function
1908  * because we must walk that list to find the inode that points to the inode
1909  * being removed from the unlinked hash bucket list.
1910  *
1911  * What if we modelled the unlinked list as a collection of records capturing
1912  * "X.next_unlinked = Y" relations?  If we indexed those records on Y, we'd
1913  * have a fast way to look up unlinked list predecessors, which avoids the
1914  * slow list walk.  That's exactly what we do here (in-core) with a per-AG
1915  * rhashtable.
1916  *
1917  * Because this is a backref cache, we ignore operational failures since the
1918  * iunlink code can fall back to the slow bucket walk.  The only errors that
1919  * should bubble out are for obviously incorrect situations.
1920  *
1921  * All users of the backref cache MUST hold the AGI buffer lock to serialize
1922  * access or have otherwise provided for concurrency control.
1923  */
1924 
1925 /* Capture a "X.next_unlinked = Y" relationship. */
1926 struct xfs_iunlink {
1927 	struct rhash_head	iu_rhash_head;
1928 	xfs_agino_t		iu_agino;		/* X */
1929 	xfs_agino_t		iu_next_unlinked;	/* Y */
1930 };
1931 
1932 /* Unlinked list predecessor lookup hashtable construction */
1933 static int
1934 xfs_iunlink_obj_cmpfn(
1935 	struct rhashtable_compare_arg	*arg,
1936 	const void			*obj)
1937 {
1938 	const xfs_agino_t		*key = arg->key;
1939 	const struct xfs_iunlink	*iu = obj;
1940 
1941 	if (iu->iu_next_unlinked != *key)
1942 		return 1;
1943 	return 0;
1944 }
1945 
1946 static const struct rhashtable_params xfs_iunlink_hash_params = {
1947 	.min_size		= XFS_AGI_UNLINKED_BUCKETS,
1948 	.key_len		= sizeof(xfs_agino_t),
1949 	.key_offset		= offsetof(struct xfs_iunlink,
1950 					   iu_next_unlinked),
1951 	.head_offset		= offsetof(struct xfs_iunlink, iu_rhash_head),
1952 	.automatic_shrinking	= true,
1953 	.obj_cmpfn		= xfs_iunlink_obj_cmpfn,
1954 };
1955 
1956 /*
1957  * Return X, where X.next_unlinked == @agino.  Returns NULLAGINO if no such
1958  * relation is found.
1959  */
1960 static xfs_agino_t
1961 xfs_iunlink_lookup_backref(
1962 	struct xfs_perag	*pag,
1963 	xfs_agino_t		agino)
1964 {
1965 	struct xfs_iunlink	*iu;
1966 
1967 	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
1968 			xfs_iunlink_hash_params);
1969 	return iu ? iu->iu_agino : NULLAGINO;
1970 }
1971 
1972 /*
1973  * Take ownership of an iunlink cache entry and insert it into the hash table.
1974  * If successful, the entry will be owned by the cache; if not, it is freed.
1975  * Either way, the caller does not own @iu after this call.
1976  */
1977 static int
1978 xfs_iunlink_insert_backref(
1979 	struct xfs_perag	*pag,
1980 	struct xfs_iunlink	*iu)
1981 {
1982 	int			error;
1983 
1984 	error = rhashtable_insert_fast(&pag->pagi_unlinked_hash,
1985 			&iu->iu_rhash_head, xfs_iunlink_hash_params);
1986 	/*
1987 	 * Fail loudly if there already was an entry because that's a sign of
1988 	 * corruption of in-memory data.  Also fail loudly if we see an error
1989 	 * code we didn't anticipate from the rhashtable code.  Currently we
1990 	 * only anticipate ENOMEM.
1991 	 */
1992 	if (error) {
1993 		WARN(error != -ENOMEM, "iunlink cache insert error %d", error);
1994 		kmem_free(iu);
1995 	}
1996 	/*
1997 	 * Absorb any runtime errors that aren't a result of corruption because
1998 	 * this is a cache and we can always fall back to bucket list scanning.
1999 	 */
2000 	if (error != 0 && error != -EEXIST)
2001 		error = 0;
2002 	return error;
2003 }
2004 
2005 /* Remember that @prev_agino.next_unlinked = @this_agino. */
2006 static int
2007 xfs_iunlink_add_backref(
2008 	struct xfs_perag	*pag,
2009 	xfs_agino_t		prev_agino,
2010 	xfs_agino_t		this_agino)
2011 {
2012 	struct xfs_iunlink	*iu;
2013 
2014 	if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK))
2015 		return 0;
2016 
2017 	iu = kmem_zalloc(sizeof(*iu), KM_NOFS);
2018 	iu->iu_agino = prev_agino;
2019 	iu->iu_next_unlinked = this_agino;
2020 
2021 	return xfs_iunlink_insert_backref(pag, iu);
2022 }
2023 
2024 /*
2025  * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked.
2026  * If @next_unlinked is NULLAGINO, we drop the backref and exit.  If there
2027  * wasn't any such entry then we don't bother.
2028  */
2029 static int
2030 xfs_iunlink_change_backref(
2031 	struct xfs_perag	*pag,
2032 	xfs_agino_t		agino,
2033 	xfs_agino_t		next_unlinked)
2034 {
2035 	struct xfs_iunlink	*iu;
2036 	int			error;
2037 
2038 	/* Look up the old entry; if there wasn't one then exit. */
2039 	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
2040 			xfs_iunlink_hash_params);
2041 	if (!iu)
2042 		return 0;
2043 
2044 	/*
2045 	 * Remove the entry.  This shouldn't ever return an error, but if we
2046 	 * couldn't remove the old entry we don't want to add it again to the
2047 	 * hash table, and if the entry disappeared on us then someone's
2048 	 * violated the locking rules and we need to fail loudly.  Either way
2049 	 * we cannot remove the inode because internal state is or would have
2050 	 * been corrupt.
2051 	 */
2052 	error = rhashtable_remove_fast(&pag->pagi_unlinked_hash,
2053 			&iu->iu_rhash_head, xfs_iunlink_hash_params);
2054 	if (error)
2055 		return error;
2056 
2057 	/* If there is no new next entry just free our item and return. */
2058 	if (next_unlinked == NULLAGINO) {
2059 		kmem_free(iu);
2060 		return 0;
2061 	}
2062 
2063 	/* Update the entry and re-add it to the hash table. */
2064 	iu->iu_next_unlinked = next_unlinked;
2065 	return xfs_iunlink_insert_backref(pag, iu);
2066 }
2067 
2068 /* Set up the in-core predecessor structures. */
2069 int
2070 xfs_iunlink_init(
2071 	struct xfs_perag	*pag)
2072 {
2073 	return rhashtable_init(&pag->pagi_unlinked_hash,
2074 			&xfs_iunlink_hash_params);
2075 }
2076 
2077 /* Free the in-core predecessor structures. */
2078 static void
2079 xfs_iunlink_free_item(
2080 	void			*ptr,
2081 	void			*arg)
2082 {
2083 	struct xfs_iunlink	*iu = ptr;
2084 	bool			*freed_anything = arg;
2085 
2086 	*freed_anything = true;
2087 	kmem_free(iu);
2088 }
2089 
2090 void
2091 xfs_iunlink_destroy(
2092 	struct xfs_perag	*pag)
2093 {
2094 	bool			freed_anything = false;
2095 
2096 	rhashtable_free_and_destroy(&pag->pagi_unlinked_hash,
2097 			xfs_iunlink_free_item, &freed_anything);
2098 
2099 	ASSERT(freed_anything == false || XFS_FORCED_SHUTDOWN(pag->pag_mount));
2100 }
2101 
2102 /*
2103  * Point the AGI unlinked bucket at an inode and log the results.  The caller
2104  * is responsible for validating the old value.
2105  */
2106 STATIC int
2107 xfs_iunlink_update_bucket(
2108 	struct xfs_trans	*tp,
2109 	xfs_agnumber_t		agno,
2110 	struct xfs_buf		*agibp,
2111 	unsigned int		bucket_index,
2112 	xfs_agino_t		new_agino)
2113 {
2114 	struct xfs_agi		*agi = agibp->b_addr;
2115 	xfs_agino_t		old_value;
2116 	int			offset;
2117 
2118 	ASSERT(xfs_verify_agino_or_null(tp->t_mountp, agno, new_agino));
2119 
2120 	old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2121 	trace_xfs_iunlink_update_bucket(tp->t_mountp, agno, bucket_index,
2122 			old_value, new_agino);
2123 
2124 	/*
2125 	 * We should never find the head of the list already set to the value
2126 	 * passed in because either we're adding or removing ourselves from the
2127 	 * head of the list.
2128 	 */
2129 	if (old_value == new_agino) {
2130 		xfs_buf_mark_corrupt(agibp);
2131 		return -EFSCORRUPTED;
2132 	}
2133 
2134 	agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
2135 	offset = offsetof(struct xfs_agi, agi_unlinked) +
2136 			(sizeof(xfs_agino_t) * bucket_index);
2137 	xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
2138 	return 0;
2139 }
2140 
2141 /* Set an on-disk inode's next_unlinked pointer. */
2142 STATIC void
2143 xfs_iunlink_update_dinode(
2144 	struct xfs_trans	*tp,
2145 	xfs_agnumber_t		agno,
2146 	xfs_agino_t		agino,
2147 	struct xfs_buf		*ibp,
2148 	struct xfs_dinode	*dip,
2149 	struct xfs_imap		*imap,
2150 	xfs_agino_t		next_agino)
2151 {
2152 	struct xfs_mount	*mp = tp->t_mountp;
2153 	int			offset;
2154 
2155 	ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
2156 
2157 	trace_xfs_iunlink_update_dinode(mp, agno, agino,
2158 			be32_to_cpu(dip->di_next_unlinked), next_agino);
2159 
2160 	dip->di_next_unlinked = cpu_to_be32(next_agino);
2161 	offset = imap->im_boffset +
2162 			offsetof(struct xfs_dinode, di_next_unlinked);
2163 
2164 	/* need to recalc the inode CRC if appropriate */
2165 	xfs_dinode_calc_crc(mp, dip);
2166 	xfs_trans_inode_buf(tp, ibp);
2167 	xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
2168 }
2169 
2170 /* Set an in-core inode's unlinked pointer and return the old value. */
2171 STATIC int
2172 xfs_iunlink_update_inode(
2173 	struct xfs_trans	*tp,
2174 	struct xfs_inode	*ip,
2175 	xfs_agnumber_t		agno,
2176 	xfs_agino_t		next_agino,
2177 	xfs_agino_t		*old_next_agino)
2178 {
2179 	struct xfs_mount	*mp = tp->t_mountp;
2180 	struct xfs_dinode	*dip;
2181 	struct xfs_buf		*ibp;
2182 	xfs_agino_t		old_value;
2183 	int			error;
2184 
2185 	ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
2186 
2187 	error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 0);
2188 	if (error)
2189 		return error;
2190 
2191 	/* Make sure the old pointer isn't garbage. */
2192 	old_value = be32_to_cpu(dip->di_next_unlinked);
2193 	if (!xfs_verify_agino_or_null(mp, agno, old_value)) {
2194 		xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
2195 				sizeof(*dip), __this_address);
2196 		error = -EFSCORRUPTED;
2197 		goto out;
2198 	}
2199 
2200 	/*
2201 	 * Since we're updating a linked list, we should never find that the
2202 	 * current pointer is the same as the new value, unless we're
2203 	 * terminating the list.
2204 	 */
2205 	*old_next_agino = old_value;
2206 	if (old_value == next_agino) {
2207 		if (next_agino != NULLAGINO) {
2208 			xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
2209 					dip, sizeof(*dip), __this_address);
2210 			error = -EFSCORRUPTED;
2211 		}
2212 		goto out;
2213 	}
2214 
2215 	/* Ok, update the new pointer. */
2216 	xfs_iunlink_update_dinode(tp, agno, XFS_INO_TO_AGINO(mp, ip->i_ino),
2217 			ibp, dip, &ip->i_imap, next_agino);
2218 	return 0;
2219 out:
2220 	xfs_trans_brelse(tp, ibp);
2221 	return error;
2222 }
2223 
2224 /*
2225  * This is called when the inode's link count has gone to 0 or we are creating
2226  * a tmpfile via O_TMPFILE.  The inode @ip must have nlink == 0.
2227  *
2228  * We place the on-disk inode on a list in the AGI.  It will be pulled from this
2229  * list when the inode is freed.
2230  */
2231 STATIC int
2232 xfs_iunlink(
2233 	struct xfs_trans	*tp,
2234 	struct xfs_inode	*ip)
2235 {
2236 	struct xfs_mount	*mp = tp->t_mountp;
2237 	struct xfs_agi		*agi;
2238 	struct xfs_buf		*agibp;
2239 	xfs_agino_t		next_agino;
2240 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2241 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2242 	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2243 	int			error;
2244 
2245 	ASSERT(VFS_I(ip)->i_nlink == 0);
2246 	ASSERT(VFS_I(ip)->i_mode != 0);
2247 	trace_xfs_iunlink(ip);
2248 
2249 	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2250 	error = xfs_read_agi(mp, tp, agno, &agibp);
2251 	if (error)
2252 		return error;
2253 	agi = agibp->b_addr;
2254 
2255 	/*
2256 	 * Get the index into the agi hash table for the list this inode will
2257 	 * go on.  Make sure the pointer isn't garbage and that this inode
2258 	 * isn't already on the list.
2259 	 */
2260 	next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2261 	if (next_agino == agino ||
2262 	    !xfs_verify_agino_or_null(mp, agno, next_agino)) {
2263 		xfs_buf_mark_corrupt(agibp);
2264 		return -EFSCORRUPTED;
2265 	}
2266 
2267 	if (next_agino != NULLAGINO) {
2268 		xfs_agino_t		old_agino;
2269 
2270 		/*
2271 		 * There is already another inode in the bucket, so point this
2272 		 * inode to the current head of the list.
2273 		 */
2274 		error = xfs_iunlink_update_inode(tp, ip, agno, next_agino,
2275 				&old_agino);
2276 		if (error)
2277 			return error;
2278 		ASSERT(old_agino == NULLAGINO);
2279 
2280 		/*
2281 		 * agino has been unlinked, add a backref from the next inode
2282 		 * back to agino.
2283 		 */
2284 		error = xfs_iunlink_add_backref(agibp->b_pag, agino, next_agino);
2285 		if (error)
2286 			return error;
2287 	}
2288 
2289 	/* Point the head of the list to point to this inode. */
2290 	return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index, agino);
2291 }
2292 
2293 /* Return the imap, dinode pointer, and buffer for an inode. */
2294 STATIC int
2295 xfs_iunlink_map_ino(
2296 	struct xfs_trans	*tp,
2297 	xfs_agnumber_t		agno,
2298 	xfs_agino_t		agino,
2299 	struct xfs_imap		*imap,
2300 	struct xfs_dinode	**dipp,
2301 	struct xfs_buf		**bpp)
2302 {
2303 	struct xfs_mount	*mp = tp->t_mountp;
2304 	int			error;
2305 
2306 	imap->im_blkno = 0;
2307 	error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0);
2308 	if (error) {
2309 		xfs_warn(mp, "%s: xfs_imap returned error %d.",
2310 				__func__, error);
2311 		return error;
2312 	}
2313 
2314 	error = xfs_imap_to_bp(mp, tp, imap, dipp, bpp, 0);
2315 	if (error) {
2316 		xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
2317 				__func__, error);
2318 		return error;
2319 	}
2320 
2321 	return 0;
2322 }
2323 
2324 /*
2325  * Walk the unlinked chain from @head_agino until we find the inode that
2326  * points to @target_agino.  Return the inode number, map, dinode pointer,
2327  * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp.
2328  *
2329  * @tp, @pag, @head_agino, and @target_agino are input parameters.
2330  * @agino, @imap, @dipp, and @bpp are all output parameters.
2331  *
2332  * Do not call this function if @target_agino is the head of the list.
2333  */
2334 STATIC int
2335 xfs_iunlink_map_prev(
2336 	struct xfs_trans	*tp,
2337 	xfs_agnumber_t		agno,
2338 	xfs_agino_t		head_agino,
2339 	xfs_agino_t		target_agino,
2340 	xfs_agino_t		*agino,
2341 	struct xfs_imap		*imap,
2342 	struct xfs_dinode	**dipp,
2343 	struct xfs_buf		**bpp,
2344 	struct xfs_perag	*pag)
2345 {
2346 	struct xfs_mount	*mp = tp->t_mountp;
2347 	xfs_agino_t		next_agino;
2348 	int			error;
2349 
2350 	ASSERT(head_agino != target_agino);
2351 	*bpp = NULL;
2352 
2353 	/* See if our backref cache can find it faster. */
2354 	*agino = xfs_iunlink_lookup_backref(pag, target_agino);
2355 	if (*agino != NULLAGINO) {
2356 		error = xfs_iunlink_map_ino(tp, agno, *agino, imap, dipp, bpp);
2357 		if (error)
2358 			return error;
2359 
2360 		if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino)
2361 			return 0;
2362 
2363 		/*
2364 		 * If we get here the cache contents were corrupt, so drop the
2365 		 * buffer and fall back to walking the bucket list.
2366 		 */
2367 		xfs_trans_brelse(tp, *bpp);
2368 		*bpp = NULL;
2369 		WARN_ON_ONCE(1);
2370 	}
2371 
2372 	trace_xfs_iunlink_map_prev_fallback(mp, agno);
2373 
2374 	/* Otherwise, walk the entire bucket until we find it. */
2375 	next_agino = head_agino;
2376 	while (next_agino != target_agino) {
2377 		xfs_agino_t	unlinked_agino;
2378 
2379 		if (*bpp)
2380 			xfs_trans_brelse(tp, *bpp);
2381 
2382 		*agino = next_agino;
2383 		error = xfs_iunlink_map_ino(tp, agno, next_agino, imap, dipp,
2384 				bpp);
2385 		if (error)
2386 			return error;
2387 
2388 		unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked);
2389 		/*
2390 		 * Make sure this pointer is valid and isn't an obvious
2391 		 * infinite loop.
2392 		 */
2393 		if (!xfs_verify_agino(mp, agno, unlinked_agino) ||
2394 		    next_agino == unlinked_agino) {
2395 			XFS_CORRUPTION_ERROR(__func__,
2396 					XFS_ERRLEVEL_LOW, mp,
2397 					*dipp, sizeof(**dipp));
2398 			error = -EFSCORRUPTED;
2399 			return error;
2400 		}
2401 		next_agino = unlinked_agino;
2402 	}
2403 
2404 	return 0;
2405 }
2406 
2407 /*
2408  * Pull the on-disk inode from the AGI unlinked list.
2409  */
2410 STATIC int
2411 xfs_iunlink_remove(
2412 	struct xfs_trans	*tp,
2413 	struct xfs_inode	*ip)
2414 {
2415 	struct xfs_mount	*mp = tp->t_mountp;
2416 	struct xfs_agi		*agi;
2417 	struct xfs_buf		*agibp;
2418 	struct xfs_buf		*last_ibp;
2419 	struct xfs_dinode	*last_dip = NULL;
2420 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2421 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2422 	xfs_agino_t		next_agino;
2423 	xfs_agino_t		head_agino;
2424 	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2425 	int			error;
2426 
2427 	trace_xfs_iunlink_remove(ip);
2428 
2429 	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2430 	error = xfs_read_agi(mp, tp, agno, &agibp);
2431 	if (error)
2432 		return error;
2433 	agi = agibp->b_addr;
2434 
2435 	/*
2436 	 * Get the index into the agi hash table for the list this inode will
2437 	 * go on.  Make sure the head pointer isn't garbage.
2438 	 */
2439 	head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2440 	if (!xfs_verify_agino(mp, agno, head_agino)) {
2441 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2442 				agi, sizeof(*agi));
2443 		return -EFSCORRUPTED;
2444 	}
2445 
2446 	/*
2447 	 * Set our inode's next_unlinked pointer to NULL and then return
2448 	 * the old pointer value so that we can update whatever was previous
2449 	 * to us in the list to point to whatever was next in the list.
2450 	 */
2451 	error = xfs_iunlink_update_inode(tp, ip, agno, NULLAGINO, &next_agino);
2452 	if (error)
2453 		return error;
2454 
2455 	/*
2456 	 * If there was a backref pointing from the next inode back to this
2457 	 * one, remove it because we've removed this inode from the list.
2458 	 *
2459 	 * Later, if this inode was in the middle of the list we'll update
2460 	 * this inode's backref to point from the next inode.
2461 	 */
2462 	if (next_agino != NULLAGINO) {
2463 		error = xfs_iunlink_change_backref(agibp->b_pag, next_agino,
2464 				NULLAGINO);
2465 		if (error)
2466 			return error;
2467 	}
2468 
2469 	if (head_agino != agino) {
2470 		struct xfs_imap	imap;
2471 		xfs_agino_t	prev_agino;
2472 
2473 		/* We need to search the list for the inode being freed. */
2474 		error = xfs_iunlink_map_prev(tp, agno, head_agino, agino,
2475 				&prev_agino, &imap, &last_dip, &last_ibp,
2476 				agibp->b_pag);
2477 		if (error)
2478 			return error;
2479 
2480 		/* Point the previous inode on the list to the next inode. */
2481 		xfs_iunlink_update_dinode(tp, agno, prev_agino, last_ibp,
2482 				last_dip, &imap, next_agino);
2483 
2484 		/*
2485 		 * Now we deal with the backref for this inode.  If this inode
2486 		 * pointed at a real inode, change the backref that pointed to
2487 		 * us to point to our old next.  If this inode was the end of
2488 		 * the list, delete the backref that pointed to us.  Note that
2489 		 * change_backref takes care of deleting the backref if
2490 		 * next_agino is NULLAGINO.
2491 		 */
2492 		return xfs_iunlink_change_backref(agibp->b_pag, agino,
2493 				next_agino);
2494 	}
2495 
2496 	/* Point the head of the list to the next unlinked inode. */
2497 	return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index,
2498 			next_agino);
2499 }
2500 
2501 /*
2502  * Look up the inode number specified and if it is not already marked XFS_ISTALE
2503  * mark it stale. We should only find clean inodes in this lookup that aren't
2504  * already stale.
2505  */
2506 static void
2507 xfs_ifree_mark_inode_stale(
2508 	struct xfs_buf		*bp,
2509 	struct xfs_inode	*free_ip,
2510 	xfs_ino_t		inum)
2511 {
2512 	struct xfs_mount	*mp = bp->b_mount;
2513 	struct xfs_perag	*pag = bp->b_pag;
2514 	struct xfs_inode_log_item *iip;
2515 	struct xfs_inode	*ip;
2516 
2517 retry:
2518 	rcu_read_lock();
2519 	ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
2520 
2521 	/* Inode not in memory, nothing to do */
2522 	if (!ip) {
2523 		rcu_read_unlock();
2524 		return;
2525 	}
2526 
2527 	/*
2528 	 * because this is an RCU protected lookup, we could find a recently
2529 	 * freed or even reallocated inode during the lookup. We need to check
2530 	 * under the i_flags_lock for a valid inode here. Skip it if it is not
2531 	 * valid, the wrong inode or stale.
2532 	 */
2533 	spin_lock(&ip->i_flags_lock);
2534 	if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE)) {
2535 		spin_unlock(&ip->i_flags_lock);
2536 		rcu_read_unlock();
2537 		return;
2538 	}
2539 
2540 	/*
2541 	 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
2542 	 * other inodes that we did not find in the list attached to the buffer
2543 	 * and are not already marked stale. If we can't lock it, back off and
2544 	 * retry.
2545 	 */
2546 	if (ip != free_ip) {
2547 		if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2548 			spin_unlock(&ip->i_flags_lock);
2549 			rcu_read_unlock();
2550 			delay(1);
2551 			goto retry;
2552 		}
2553 	}
2554 	ip->i_flags |= XFS_ISTALE;
2555 	spin_unlock(&ip->i_flags_lock);
2556 	rcu_read_unlock();
2557 
2558 	/*
2559 	 * If we can't get the flush lock, the inode is already attached.  All
2560 	 * we needed to do here is mark the inode stale so buffer IO completion
2561 	 * will remove it from the AIL.
2562 	 */
2563 	iip = ip->i_itemp;
2564 	if (!xfs_iflock_nowait(ip)) {
2565 		ASSERT(!list_empty(&iip->ili_item.li_bio_list));
2566 		ASSERT(iip->ili_last_fields);
2567 		goto out_iunlock;
2568 	}
2569 
2570 	/*
2571 	 * Inodes not attached to the buffer can be released immediately.
2572 	 * Everything else has to go through xfs_iflush_abort() on journal
2573 	 * commit as the flock synchronises removal of the inode from the
2574 	 * cluster buffer against inode reclaim.
2575 	 */
2576 	if (!iip || list_empty(&iip->ili_item.li_bio_list)) {
2577 		xfs_ifunlock(ip);
2578 		goto out_iunlock;
2579 	}
2580 
2581 	/* we have a dirty inode in memory that has not yet been flushed. */
2582 	spin_lock(&iip->ili_lock);
2583 	iip->ili_last_fields = iip->ili_fields;
2584 	iip->ili_fields = 0;
2585 	iip->ili_fsync_fields = 0;
2586 	spin_unlock(&iip->ili_lock);
2587 	ASSERT(iip->ili_last_fields);
2588 
2589 out_iunlock:
2590 	if (ip != free_ip)
2591 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2592 }
2593 
2594 /*
2595  * A big issue when freeing the inode cluster is that we _cannot_ skip any
2596  * inodes that are in memory - they all must be marked stale and attached to
2597  * the cluster buffer.
2598  */
2599 STATIC int
2600 xfs_ifree_cluster(
2601 	struct xfs_inode	*free_ip,
2602 	struct xfs_trans	*tp,
2603 	struct xfs_icluster	*xic)
2604 {
2605 	struct xfs_mount	*mp = free_ip->i_mount;
2606 	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
2607 	struct xfs_buf		*bp;
2608 	xfs_daddr_t		blkno;
2609 	xfs_ino_t		inum = xic->first_ino;
2610 	int			nbufs;
2611 	int			i, j;
2612 	int			ioffset;
2613 	int			error;
2614 
2615 	nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
2616 
2617 	for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
2618 		/*
2619 		 * The allocation bitmap tells us which inodes of the chunk were
2620 		 * physically allocated. Skip the cluster if an inode falls into
2621 		 * a sparse region.
2622 		 */
2623 		ioffset = inum - xic->first_ino;
2624 		if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2625 			ASSERT(ioffset % igeo->inodes_per_cluster == 0);
2626 			continue;
2627 		}
2628 
2629 		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2630 					 XFS_INO_TO_AGBNO(mp, inum));
2631 
2632 		/*
2633 		 * We obtain and lock the backing buffer first in the process
2634 		 * here, as we have to ensure that any dirty inode that we
2635 		 * can't get the flush lock on is attached to the buffer.
2636 		 * If we scan the in-memory inodes first, then buffer IO can
2637 		 * complete before we get a lock on it, and hence we may fail
2638 		 * to mark all the active inodes on the buffer stale.
2639 		 */
2640 		error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2641 				mp->m_bsize * igeo->blocks_per_cluster,
2642 				XBF_UNMAPPED, &bp);
2643 		if (error)
2644 			return error;
2645 
2646 		/*
2647 		 * This buffer may not have been correctly initialised as we
2648 		 * didn't read it from disk. That's not important because we are
2649 		 * only using to mark the buffer as stale in the log, and to
2650 		 * attach stale cached inodes on it. That means it will never be
2651 		 * dispatched for IO. If it is, we want to know about it, and we
2652 		 * want it to fail. We can acheive this by adding a write
2653 		 * verifier to the buffer.
2654 		 */
2655 		bp->b_ops = &xfs_inode_buf_ops;
2656 
2657 		/*
2658 		 * Now we need to set all the cached clean inodes as XFS_ISTALE,
2659 		 * too. This requires lookups, and will skip inodes that we've
2660 		 * already marked XFS_ISTALE.
2661 		 */
2662 		for (i = 0; i < igeo->inodes_per_cluster; i++)
2663 			xfs_ifree_mark_inode_stale(bp, free_ip, inum + i);
2664 
2665 		xfs_trans_stale_inode_buf(tp, bp);
2666 		xfs_trans_binval(tp, bp);
2667 	}
2668 	return 0;
2669 }
2670 
2671 /*
2672  * This is called to return an inode to the inode free list.
2673  * The inode should already be truncated to 0 length and have
2674  * no pages associated with it.  This routine also assumes that
2675  * the inode is already a part of the transaction.
2676  *
2677  * The on-disk copy of the inode will have been added to the list
2678  * of unlinked inodes in the AGI. We need to remove the inode from
2679  * that list atomically with respect to freeing it here.
2680  */
2681 int
2682 xfs_ifree(
2683 	struct xfs_trans	*tp,
2684 	struct xfs_inode	*ip)
2685 {
2686 	int			error;
2687 	struct xfs_icluster	xic = { 0 };
2688 	struct xfs_inode_log_item *iip = ip->i_itemp;
2689 
2690 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2691 	ASSERT(VFS_I(ip)->i_nlink == 0);
2692 	ASSERT(ip->i_df.if_nextents == 0);
2693 	ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
2694 	ASSERT(ip->i_d.di_nblocks == 0);
2695 
2696 	/*
2697 	 * Pull the on-disk inode from the AGI unlinked list.
2698 	 */
2699 	error = xfs_iunlink_remove(tp, ip);
2700 	if (error)
2701 		return error;
2702 
2703 	error = xfs_difree(tp, ip->i_ino, &xic);
2704 	if (error)
2705 		return error;
2706 
2707 	/*
2708 	 * Free any local-format data sitting around before we reset the
2709 	 * data fork to extents format.  Note that the attr fork data has
2710 	 * already been freed by xfs_attr_inactive.
2711 	 */
2712 	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
2713 		kmem_free(ip->i_df.if_u1.if_data);
2714 		ip->i_df.if_u1.if_data = NULL;
2715 		ip->i_df.if_bytes = 0;
2716 	}
2717 
2718 	VFS_I(ip)->i_mode = 0;		/* mark incore inode as free */
2719 	ip->i_d.di_flags = 0;
2720 	ip->i_d.di_flags2 = 0;
2721 	ip->i_d.di_dmevmask = 0;
2722 	ip->i_d.di_forkoff = 0;		/* mark the attr fork not in use */
2723 	ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
2724 
2725 	/* Don't attempt to replay owner changes for a deleted inode */
2726 	spin_lock(&iip->ili_lock);
2727 	iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
2728 	spin_unlock(&iip->ili_lock);
2729 
2730 	/*
2731 	 * Bump the generation count so no one will be confused
2732 	 * by reincarnations of this inode.
2733 	 */
2734 	VFS_I(ip)->i_generation++;
2735 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2736 
2737 	if (xic.deleted)
2738 		error = xfs_ifree_cluster(ip, tp, &xic);
2739 
2740 	return error;
2741 }
2742 
2743 /*
2744  * This is called to unpin an inode.  The caller must have the inode locked
2745  * in at least shared mode so that the buffer cannot be subsequently pinned
2746  * once someone is waiting for it to be unpinned.
2747  */
2748 static void
2749 xfs_iunpin(
2750 	struct xfs_inode	*ip)
2751 {
2752 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2753 
2754 	trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2755 
2756 	/* Give the log a push to start the unpinning I/O */
2757 	xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0, NULL);
2758 
2759 }
2760 
2761 static void
2762 __xfs_iunpin_wait(
2763 	struct xfs_inode	*ip)
2764 {
2765 	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2766 	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2767 
2768 	xfs_iunpin(ip);
2769 
2770 	do {
2771 		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2772 		if (xfs_ipincount(ip))
2773 			io_schedule();
2774 	} while (xfs_ipincount(ip));
2775 	finish_wait(wq, &wait.wq_entry);
2776 }
2777 
2778 void
2779 xfs_iunpin_wait(
2780 	struct xfs_inode	*ip)
2781 {
2782 	if (xfs_ipincount(ip))
2783 		__xfs_iunpin_wait(ip);
2784 }
2785 
2786 /*
2787  * Removing an inode from the namespace involves removing the directory entry
2788  * and dropping the link count on the inode. Removing the directory entry can
2789  * result in locking an AGF (directory blocks were freed) and removing a link
2790  * count can result in placing the inode on an unlinked list which results in
2791  * locking an AGI.
2792  *
2793  * The big problem here is that we have an ordering constraint on AGF and AGI
2794  * locking - inode allocation locks the AGI, then can allocate a new extent for
2795  * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2796  * removes the inode from the unlinked list, requiring that we lock the AGI
2797  * first, and then freeing the inode can result in an inode chunk being freed
2798  * and hence freeing disk space requiring that we lock an AGF.
2799  *
2800  * Hence the ordering that is imposed by other parts of the code is AGI before
2801  * AGF. This means we cannot remove the directory entry before we drop the inode
2802  * reference count and put it on the unlinked list as this results in a lock
2803  * order of AGF then AGI, and this can deadlock against inode allocation and
2804  * freeing. Therefore we must drop the link counts before we remove the
2805  * directory entry.
2806  *
2807  * This is still safe from a transactional point of view - it is not until we
2808  * get to xfs_defer_finish() that we have the possibility of multiple
2809  * transactions in this operation. Hence as long as we remove the directory
2810  * entry and drop the link count in the first transaction of the remove
2811  * operation, there are no transactional constraints on the ordering here.
2812  */
2813 int
2814 xfs_remove(
2815 	xfs_inode_t             *dp,
2816 	struct xfs_name		*name,
2817 	xfs_inode_t		*ip)
2818 {
2819 	xfs_mount_t		*mp = dp->i_mount;
2820 	xfs_trans_t             *tp = NULL;
2821 	int			is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2822 	int                     error = 0;
2823 	uint			resblks;
2824 
2825 	trace_xfs_remove(dp, name);
2826 
2827 	if (XFS_FORCED_SHUTDOWN(mp))
2828 		return -EIO;
2829 
2830 	error = xfs_qm_dqattach(dp);
2831 	if (error)
2832 		goto std_return;
2833 
2834 	error = xfs_qm_dqattach(ip);
2835 	if (error)
2836 		goto std_return;
2837 
2838 	/*
2839 	 * We try to get the real space reservation first,
2840 	 * allowing for directory btree deletion(s) implying
2841 	 * possible bmap insert(s).  If we can't get the space
2842 	 * reservation then we use 0 instead, and avoid the bmap
2843 	 * btree insert(s) in the directory code by, if the bmap
2844 	 * insert tries to happen, instead trimming the LAST
2845 	 * block from the directory.
2846 	 */
2847 	resblks = XFS_REMOVE_SPACE_RES(mp);
2848 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
2849 	if (error == -ENOSPC) {
2850 		resblks = 0;
2851 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
2852 				&tp);
2853 	}
2854 	if (error) {
2855 		ASSERT(error != -ENOSPC);
2856 		goto std_return;
2857 	}
2858 
2859 	xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
2860 
2861 	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
2862 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2863 
2864 	/*
2865 	 * If we're removing a directory perform some additional validation.
2866 	 */
2867 	if (is_dir) {
2868 		ASSERT(VFS_I(ip)->i_nlink >= 2);
2869 		if (VFS_I(ip)->i_nlink != 2) {
2870 			error = -ENOTEMPTY;
2871 			goto out_trans_cancel;
2872 		}
2873 		if (!xfs_dir_isempty(ip)) {
2874 			error = -ENOTEMPTY;
2875 			goto out_trans_cancel;
2876 		}
2877 
2878 		/* Drop the link from ip's "..".  */
2879 		error = xfs_droplink(tp, dp);
2880 		if (error)
2881 			goto out_trans_cancel;
2882 
2883 		/* Drop the "." link from ip to self.  */
2884 		error = xfs_droplink(tp, ip);
2885 		if (error)
2886 			goto out_trans_cancel;
2887 	} else {
2888 		/*
2889 		 * When removing a non-directory we need to log the parent
2890 		 * inode here.  For a directory this is done implicitly
2891 		 * by the xfs_droplink call for the ".." entry.
2892 		 */
2893 		xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2894 	}
2895 	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2896 
2897 	/* Drop the link from dp to ip. */
2898 	error = xfs_droplink(tp, ip);
2899 	if (error)
2900 		goto out_trans_cancel;
2901 
2902 	error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
2903 	if (error) {
2904 		ASSERT(error != -ENOENT);
2905 		goto out_trans_cancel;
2906 	}
2907 
2908 	/*
2909 	 * If this is a synchronous mount, make sure that the
2910 	 * remove transaction goes to disk before returning to
2911 	 * the user.
2912 	 */
2913 	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2914 		xfs_trans_set_sync(tp);
2915 
2916 	error = xfs_trans_commit(tp);
2917 	if (error)
2918 		goto std_return;
2919 
2920 	if (is_dir && xfs_inode_is_filestream(ip))
2921 		xfs_filestream_deassociate(ip);
2922 
2923 	return 0;
2924 
2925  out_trans_cancel:
2926 	xfs_trans_cancel(tp);
2927  std_return:
2928 	return error;
2929 }
2930 
2931 /*
2932  * Enter all inodes for a rename transaction into a sorted array.
2933  */
2934 #define __XFS_SORT_INODES	5
2935 STATIC void
2936 xfs_sort_for_rename(
2937 	struct xfs_inode	*dp1,	/* in: old (source) directory inode */
2938 	struct xfs_inode	*dp2,	/* in: new (target) directory inode */
2939 	struct xfs_inode	*ip1,	/* in: inode of old entry */
2940 	struct xfs_inode	*ip2,	/* in: inode of new entry */
2941 	struct xfs_inode	*wip,	/* in: whiteout inode */
2942 	struct xfs_inode	**i_tab,/* out: sorted array of inodes */
2943 	int			*num_inodes)  /* in/out: inodes in array */
2944 {
2945 	int			i, j;
2946 
2947 	ASSERT(*num_inodes == __XFS_SORT_INODES);
2948 	memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2949 
2950 	/*
2951 	 * i_tab contains a list of pointers to inodes.  We initialize
2952 	 * the table here & we'll sort it.  We will then use it to
2953 	 * order the acquisition of the inode locks.
2954 	 *
2955 	 * Note that the table may contain duplicates.  e.g., dp1 == dp2.
2956 	 */
2957 	i = 0;
2958 	i_tab[i++] = dp1;
2959 	i_tab[i++] = dp2;
2960 	i_tab[i++] = ip1;
2961 	if (ip2)
2962 		i_tab[i++] = ip2;
2963 	if (wip)
2964 		i_tab[i++] = wip;
2965 	*num_inodes = i;
2966 
2967 	/*
2968 	 * Sort the elements via bubble sort.  (Remember, there are at
2969 	 * most 5 elements to sort, so this is adequate.)
2970 	 */
2971 	for (i = 0; i < *num_inodes; i++) {
2972 		for (j = 1; j < *num_inodes; j++) {
2973 			if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
2974 				struct xfs_inode *temp = i_tab[j];
2975 				i_tab[j] = i_tab[j-1];
2976 				i_tab[j-1] = temp;
2977 			}
2978 		}
2979 	}
2980 }
2981 
2982 static int
2983 xfs_finish_rename(
2984 	struct xfs_trans	*tp)
2985 {
2986 	/*
2987 	 * If this is a synchronous mount, make sure that the rename transaction
2988 	 * goes to disk before returning to the user.
2989 	 */
2990 	if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2991 		xfs_trans_set_sync(tp);
2992 
2993 	return xfs_trans_commit(tp);
2994 }
2995 
2996 /*
2997  * xfs_cross_rename()
2998  *
2999  * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
3000  */
3001 STATIC int
3002 xfs_cross_rename(
3003 	struct xfs_trans	*tp,
3004 	struct xfs_inode	*dp1,
3005 	struct xfs_name		*name1,
3006 	struct xfs_inode	*ip1,
3007 	struct xfs_inode	*dp2,
3008 	struct xfs_name		*name2,
3009 	struct xfs_inode	*ip2,
3010 	int			spaceres)
3011 {
3012 	int		error = 0;
3013 	int		ip1_flags = 0;
3014 	int		ip2_flags = 0;
3015 	int		dp2_flags = 0;
3016 
3017 	/* Swap inode number for dirent in first parent */
3018 	error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
3019 	if (error)
3020 		goto out_trans_abort;
3021 
3022 	/* Swap inode number for dirent in second parent */
3023 	error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
3024 	if (error)
3025 		goto out_trans_abort;
3026 
3027 	/*
3028 	 * If we're renaming one or more directories across different parents,
3029 	 * update the respective ".." entries (and link counts) to match the new
3030 	 * parents.
3031 	 */
3032 	if (dp1 != dp2) {
3033 		dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3034 
3035 		if (S_ISDIR(VFS_I(ip2)->i_mode)) {
3036 			error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
3037 						dp1->i_ino, spaceres);
3038 			if (error)
3039 				goto out_trans_abort;
3040 
3041 			/* transfer ip2 ".." reference to dp1 */
3042 			if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
3043 				error = xfs_droplink(tp, dp2);
3044 				if (error)
3045 					goto out_trans_abort;
3046 				xfs_bumplink(tp, dp1);
3047 			}
3048 
3049 			/*
3050 			 * Although ip1 isn't changed here, userspace needs
3051 			 * to be warned about the change, so that applications
3052 			 * relying on it (like backup ones), will properly
3053 			 * notify the change
3054 			 */
3055 			ip1_flags |= XFS_ICHGTIME_CHG;
3056 			ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3057 		}
3058 
3059 		if (S_ISDIR(VFS_I(ip1)->i_mode)) {
3060 			error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
3061 						dp2->i_ino, spaceres);
3062 			if (error)
3063 				goto out_trans_abort;
3064 
3065 			/* transfer ip1 ".." reference to dp2 */
3066 			if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
3067 				error = xfs_droplink(tp, dp1);
3068 				if (error)
3069 					goto out_trans_abort;
3070 				xfs_bumplink(tp, dp2);
3071 			}
3072 
3073 			/*
3074 			 * Although ip2 isn't changed here, userspace needs
3075 			 * to be warned about the change, so that applications
3076 			 * relying on it (like backup ones), will properly
3077 			 * notify the change
3078 			 */
3079 			ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3080 			ip2_flags |= XFS_ICHGTIME_CHG;
3081 		}
3082 	}
3083 
3084 	if (ip1_flags) {
3085 		xfs_trans_ichgtime(tp, ip1, ip1_flags);
3086 		xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
3087 	}
3088 	if (ip2_flags) {
3089 		xfs_trans_ichgtime(tp, ip2, ip2_flags);
3090 		xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
3091 	}
3092 	if (dp2_flags) {
3093 		xfs_trans_ichgtime(tp, dp2, dp2_flags);
3094 		xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
3095 	}
3096 	xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3097 	xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
3098 	return xfs_finish_rename(tp);
3099 
3100 out_trans_abort:
3101 	xfs_trans_cancel(tp);
3102 	return error;
3103 }
3104 
3105 /*
3106  * xfs_rename_alloc_whiteout()
3107  *
3108  * Return a referenced, unlinked, unlocked inode that can be used as a
3109  * whiteout in a rename transaction. We use a tmpfile inode here so that if we
3110  * crash between allocating the inode and linking it into the rename transaction
3111  * recovery will free the inode and we won't leak it.
3112  */
3113 static int
3114 xfs_rename_alloc_whiteout(
3115 	struct xfs_inode	*dp,
3116 	struct xfs_inode	**wip)
3117 {
3118 	struct xfs_inode	*tmpfile;
3119 	int			error;
3120 
3121 	error = xfs_create_tmpfile(dp, S_IFCHR | WHITEOUT_MODE, &tmpfile);
3122 	if (error)
3123 		return error;
3124 
3125 	/*
3126 	 * Prepare the tmpfile inode as if it were created through the VFS.
3127 	 * Complete the inode setup and flag it as linkable.  nlink is already
3128 	 * zero, so we can skip the drop_nlink.
3129 	 */
3130 	xfs_setup_iops(tmpfile);
3131 	xfs_finish_inode_setup(tmpfile);
3132 	VFS_I(tmpfile)->i_state |= I_LINKABLE;
3133 
3134 	*wip = tmpfile;
3135 	return 0;
3136 }
3137 
3138 /*
3139  * xfs_rename
3140  */
3141 int
3142 xfs_rename(
3143 	struct xfs_inode	*src_dp,
3144 	struct xfs_name		*src_name,
3145 	struct xfs_inode	*src_ip,
3146 	struct xfs_inode	*target_dp,
3147 	struct xfs_name		*target_name,
3148 	struct xfs_inode	*target_ip,
3149 	unsigned int		flags)
3150 {
3151 	struct xfs_mount	*mp = src_dp->i_mount;
3152 	struct xfs_trans	*tp;
3153 	struct xfs_inode	*wip = NULL;		/* whiteout inode */
3154 	struct xfs_inode	*inodes[__XFS_SORT_INODES];
3155 	struct xfs_buf		*agibp;
3156 	int			num_inodes = __XFS_SORT_INODES;
3157 	bool			new_parent = (src_dp != target_dp);
3158 	bool			src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
3159 	int			spaceres;
3160 	int			error;
3161 
3162 	trace_xfs_rename(src_dp, target_dp, src_name, target_name);
3163 
3164 	if ((flags & RENAME_EXCHANGE) && !target_ip)
3165 		return -EINVAL;
3166 
3167 	/*
3168 	 * If we are doing a whiteout operation, allocate the whiteout inode
3169 	 * we will be placing at the target and ensure the type is set
3170 	 * appropriately.
3171 	 */
3172 	if (flags & RENAME_WHITEOUT) {
3173 		ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
3174 		error = xfs_rename_alloc_whiteout(target_dp, &wip);
3175 		if (error)
3176 			return error;
3177 
3178 		/* setup target dirent info as whiteout */
3179 		src_name->type = XFS_DIR3_FT_CHRDEV;
3180 	}
3181 
3182 	xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
3183 				inodes, &num_inodes);
3184 
3185 	spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
3186 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
3187 	if (error == -ENOSPC) {
3188 		spaceres = 0;
3189 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
3190 				&tp);
3191 	}
3192 	if (error)
3193 		goto out_release_wip;
3194 
3195 	/*
3196 	 * Attach the dquots to the inodes
3197 	 */
3198 	error = xfs_qm_vop_rename_dqattach(inodes);
3199 	if (error)
3200 		goto out_trans_cancel;
3201 
3202 	/*
3203 	 * Lock all the participating inodes. Depending upon whether
3204 	 * the target_name exists in the target directory, and
3205 	 * whether the target directory is the same as the source
3206 	 * directory, we can lock from 2 to 4 inodes.
3207 	 */
3208 	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
3209 
3210 	/*
3211 	 * Join all the inodes to the transaction. From this point on,
3212 	 * we can rely on either trans_commit or trans_cancel to unlock
3213 	 * them.
3214 	 */
3215 	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
3216 	if (new_parent)
3217 		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
3218 	xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
3219 	if (target_ip)
3220 		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
3221 	if (wip)
3222 		xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
3223 
3224 	/*
3225 	 * If we are using project inheritance, we only allow renames
3226 	 * into our tree when the project IDs are the same; else the
3227 	 * tree quota mechanism would be circumvented.
3228 	 */
3229 	if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
3230 		     target_dp->i_d.di_projid != src_ip->i_d.di_projid)) {
3231 		error = -EXDEV;
3232 		goto out_trans_cancel;
3233 	}
3234 
3235 	/* RENAME_EXCHANGE is unique from here on. */
3236 	if (flags & RENAME_EXCHANGE)
3237 		return xfs_cross_rename(tp, src_dp, src_name, src_ip,
3238 					target_dp, target_name, target_ip,
3239 					spaceres);
3240 
3241 	/*
3242 	 * Check for expected errors before we dirty the transaction
3243 	 * so we can return an error without a transaction abort.
3244 	 */
3245 	if (target_ip == NULL) {
3246 		/*
3247 		 * If there's no space reservation, check the entry will
3248 		 * fit before actually inserting it.
3249 		 */
3250 		if (!spaceres) {
3251 			error = xfs_dir_canenter(tp, target_dp, target_name);
3252 			if (error)
3253 				goto out_trans_cancel;
3254 		}
3255 	} else {
3256 		/*
3257 		 * If target exists and it's a directory, check that whether
3258 		 * it can be destroyed.
3259 		 */
3260 		if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
3261 		    (!xfs_dir_isempty(target_ip) ||
3262 		     (VFS_I(target_ip)->i_nlink > 2))) {
3263 			error = -EEXIST;
3264 			goto out_trans_cancel;
3265 		}
3266 	}
3267 
3268 	/*
3269 	 * Directory entry creation below may acquire the AGF. Remove
3270 	 * the whiteout from the unlinked list first to preserve correct
3271 	 * AGI/AGF locking order. This dirties the transaction so failures
3272 	 * after this point will abort and log recovery will clean up the
3273 	 * mess.
3274 	 *
3275 	 * For whiteouts, we need to bump the link count on the whiteout
3276 	 * inode. After this point, we have a real link, clear the tmpfile
3277 	 * state flag from the inode so it doesn't accidentally get misused
3278 	 * in future.
3279 	 */
3280 	if (wip) {
3281 		ASSERT(VFS_I(wip)->i_nlink == 0);
3282 		error = xfs_iunlink_remove(tp, wip);
3283 		if (error)
3284 			goto out_trans_cancel;
3285 
3286 		xfs_bumplink(tp, wip);
3287 		VFS_I(wip)->i_state &= ~I_LINKABLE;
3288 	}
3289 
3290 	/*
3291 	 * Set up the target.
3292 	 */
3293 	if (target_ip == NULL) {
3294 		/*
3295 		 * If target does not exist and the rename crosses
3296 		 * directories, adjust the target directory link count
3297 		 * to account for the ".." reference from the new entry.
3298 		 */
3299 		error = xfs_dir_createname(tp, target_dp, target_name,
3300 					   src_ip->i_ino, spaceres);
3301 		if (error)
3302 			goto out_trans_cancel;
3303 
3304 		xfs_trans_ichgtime(tp, target_dp,
3305 					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3306 
3307 		if (new_parent && src_is_directory) {
3308 			xfs_bumplink(tp, target_dp);
3309 		}
3310 	} else { /* target_ip != NULL */
3311 		/*
3312 		 * Link the source inode under the target name.
3313 		 * If the source inode is a directory and we are moving
3314 		 * it across directories, its ".." entry will be
3315 		 * inconsistent until we replace that down below.
3316 		 *
3317 		 * In case there is already an entry with the same
3318 		 * name at the destination directory, remove it first.
3319 		 */
3320 
3321 		/*
3322 		 * Check whether the replace operation will need to allocate
3323 		 * blocks.  This happens when the shortform directory lacks
3324 		 * space and we have to convert it to a block format directory.
3325 		 * When more blocks are necessary, we must lock the AGI first
3326 		 * to preserve locking order (AGI -> AGF).
3327 		 */
3328 		if (xfs_dir2_sf_replace_needblock(target_dp, src_ip->i_ino)) {
3329 			error = xfs_read_agi(mp, tp,
3330 					XFS_INO_TO_AGNO(mp, target_ip->i_ino),
3331 					&agibp);
3332 			if (error)
3333 				goto out_trans_cancel;
3334 		}
3335 
3336 		error = xfs_dir_replace(tp, target_dp, target_name,
3337 					src_ip->i_ino, spaceres);
3338 		if (error)
3339 			goto out_trans_cancel;
3340 
3341 		xfs_trans_ichgtime(tp, target_dp,
3342 					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3343 
3344 		/*
3345 		 * Decrement the link count on the target since the target
3346 		 * dir no longer points to it.
3347 		 */
3348 		error = xfs_droplink(tp, target_ip);
3349 		if (error)
3350 			goto out_trans_cancel;
3351 
3352 		if (src_is_directory) {
3353 			/*
3354 			 * Drop the link from the old "." entry.
3355 			 */
3356 			error = xfs_droplink(tp, target_ip);
3357 			if (error)
3358 				goto out_trans_cancel;
3359 		}
3360 	} /* target_ip != NULL */
3361 
3362 	/*
3363 	 * Remove the source.
3364 	 */
3365 	if (new_parent && src_is_directory) {
3366 		/*
3367 		 * Rewrite the ".." entry to point to the new
3368 		 * directory.
3369 		 */
3370 		error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3371 					target_dp->i_ino, spaceres);
3372 		ASSERT(error != -EEXIST);
3373 		if (error)
3374 			goto out_trans_cancel;
3375 	}
3376 
3377 	/*
3378 	 * We always want to hit the ctime on the source inode.
3379 	 *
3380 	 * This isn't strictly required by the standards since the source
3381 	 * inode isn't really being changed, but old unix file systems did
3382 	 * it and some incremental backup programs won't work without it.
3383 	 */
3384 	xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3385 	xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3386 
3387 	/*
3388 	 * Adjust the link count on src_dp.  This is necessary when
3389 	 * renaming a directory, either within one parent when
3390 	 * the target existed, or across two parent directories.
3391 	 */
3392 	if (src_is_directory && (new_parent || target_ip != NULL)) {
3393 
3394 		/*
3395 		 * Decrement link count on src_directory since the
3396 		 * entry that's moved no longer points to it.
3397 		 */
3398 		error = xfs_droplink(tp, src_dp);
3399 		if (error)
3400 			goto out_trans_cancel;
3401 	}
3402 
3403 	/*
3404 	 * For whiteouts, we only need to update the source dirent with the
3405 	 * inode number of the whiteout inode rather than removing it
3406 	 * altogether.
3407 	 */
3408 	if (wip) {
3409 		error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3410 					spaceres);
3411 	} else
3412 		error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3413 					   spaceres);
3414 	if (error)
3415 		goto out_trans_cancel;
3416 
3417 	xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3418 	xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3419 	if (new_parent)
3420 		xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3421 
3422 	error = xfs_finish_rename(tp);
3423 	if (wip)
3424 		xfs_irele(wip);
3425 	return error;
3426 
3427 out_trans_cancel:
3428 	xfs_trans_cancel(tp);
3429 out_release_wip:
3430 	if (wip)
3431 		xfs_irele(wip);
3432 	return error;
3433 }
3434 
3435 static int
3436 xfs_iflush(
3437 	struct xfs_inode	*ip,
3438 	struct xfs_buf		*bp)
3439 {
3440 	struct xfs_inode_log_item *iip = ip->i_itemp;
3441 	struct xfs_dinode	*dip;
3442 	struct xfs_mount	*mp = ip->i_mount;
3443 	int			error;
3444 
3445 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3446 	ASSERT(xfs_isiflocked(ip));
3447 	ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3448 	       ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3449 	ASSERT(iip->ili_item.li_buf == bp);
3450 
3451 	dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
3452 
3453 	/*
3454 	 * We don't flush the inode if any of the following checks fail, but we
3455 	 * do still update the log item and attach to the backing buffer as if
3456 	 * the flush happened. This is a formality to facilitate predictable
3457 	 * error handling as the caller will shutdown and fail the buffer.
3458 	 */
3459 	error = -EFSCORRUPTED;
3460 	if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3461 			       mp, XFS_ERRTAG_IFLUSH_1)) {
3462 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3463 			"%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
3464 			__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3465 		goto flush_out;
3466 	}
3467 	if (S_ISREG(VFS_I(ip)->i_mode)) {
3468 		if (XFS_TEST_ERROR(
3469 		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3470 		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
3471 		    mp, XFS_ERRTAG_IFLUSH_3)) {
3472 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3473 				"%s: Bad regular inode %Lu, ptr "PTR_FMT,
3474 				__func__, ip->i_ino, ip);
3475 			goto flush_out;
3476 		}
3477 	} else if (S_ISDIR(VFS_I(ip)->i_mode)) {
3478 		if (XFS_TEST_ERROR(
3479 		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3480 		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
3481 		    ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
3482 		    mp, XFS_ERRTAG_IFLUSH_4)) {
3483 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3484 				"%s: Bad directory inode %Lu, ptr "PTR_FMT,
3485 				__func__, ip->i_ino, ip);
3486 			goto flush_out;
3487 		}
3488 	}
3489 	if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp) >
3490 				ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
3491 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3492 			"%s: detected corrupt incore inode %Lu, "
3493 			"total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
3494 			__func__, ip->i_ino,
3495 			ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp),
3496 			ip->i_d.di_nblocks, ip);
3497 		goto flush_out;
3498 	}
3499 	if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3500 				mp, XFS_ERRTAG_IFLUSH_6)) {
3501 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3502 			"%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
3503 			__func__, ip->i_ino, ip->i_d.di_forkoff, ip);
3504 		goto flush_out;
3505 	}
3506 
3507 	/*
3508 	 * Inode item log recovery for v2 inodes are dependent on the
3509 	 * di_flushiter count for correct sequencing. We bump the flush
3510 	 * iteration count so we can detect flushes which postdate a log record
3511 	 * during recovery. This is redundant as we now log every change and
3512 	 * hence this can't happen but we need to still do it to ensure
3513 	 * backwards compatibility with old kernels that predate logging all
3514 	 * inode changes.
3515 	 */
3516 	if (!xfs_sb_version_has_v3inode(&mp->m_sb))
3517 		ip->i_d.di_flushiter++;
3518 
3519 	/*
3520 	 * If there are inline format data / attr forks attached to this inode,
3521 	 * make sure they are not corrupt.
3522 	 */
3523 	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
3524 	    xfs_ifork_verify_local_data(ip))
3525 		goto flush_out;
3526 	if (ip->i_afp && ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL &&
3527 	    xfs_ifork_verify_local_attr(ip))
3528 		goto flush_out;
3529 
3530 	/*
3531 	 * Copy the dirty parts of the inode into the on-disk inode.  We always
3532 	 * copy out the core of the inode, because if the inode is dirty at all
3533 	 * the core must be.
3534 	 */
3535 	xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
3536 
3537 	/* Wrap, we never let the log put out DI_MAX_FLUSH */
3538 	if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3539 		ip->i_d.di_flushiter = 0;
3540 
3541 	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3542 	if (XFS_IFORK_Q(ip))
3543 		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3544 
3545 	/*
3546 	 * We've recorded everything logged in the inode, so we'd like to clear
3547 	 * the ili_fields bits so we don't log and flush things unnecessarily.
3548 	 * However, we can't stop logging all this information until the data
3549 	 * we've copied into the disk buffer is written to disk.  If we did we
3550 	 * might overwrite the copy of the inode in the log with all the data
3551 	 * after re-logging only part of it, and in the face of a crash we
3552 	 * wouldn't have all the data we need to recover.
3553 	 *
3554 	 * What we do is move the bits to the ili_last_fields field.  When
3555 	 * logging the inode, these bits are moved back to the ili_fields field.
3556 	 * In the xfs_iflush_done() routine we clear ili_last_fields, since we
3557 	 * know that the information those bits represent is permanently on
3558 	 * disk.  As long as the flush completes before the inode is logged
3559 	 * again, then both ili_fields and ili_last_fields will be cleared.
3560 	 */
3561 	error = 0;
3562 flush_out:
3563 	spin_lock(&iip->ili_lock);
3564 	iip->ili_last_fields = iip->ili_fields;
3565 	iip->ili_fields = 0;
3566 	iip->ili_fsync_fields = 0;
3567 	spin_unlock(&iip->ili_lock);
3568 
3569 	/*
3570 	 * Store the current LSN of the inode so that we can tell whether the
3571 	 * item has moved in the AIL from xfs_iflush_done().
3572 	 */
3573 	xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3574 				&iip->ili_item.li_lsn);
3575 
3576 	/* generate the checksum. */
3577 	xfs_dinode_calc_crc(mp, dip);
3578 	return error;
3579 }
3580 
3581 /*
3582  * Non-blocking flush of dirty inode metadata into the backing buffer.
3583  *
3584  * The caller must have a reference to the inode and hold the cluster buffer
3585  * locked. The function will walk across all the inodes on the cluster buffer it
3586  * can find and lock without blocking, and flush them to the cluster buffer.
3587  *
3588  * On successful flushing of at least one inode, the caller must write out the
3589  * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
3590  * the caller needs to release the buffer. On failure, the filesystem will be
3591  * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
3592  * will be returned.
3593  */
3594 int
3595 xfs_iflush_cluster(
3596 	struct xfs_buf		*bp)
3597 {
3598 	struct xfs_mount	*mp = bp->b_mount;
3599 	struct xfs_log_item	*lip, *n;
3600 	struct xfs_inode	*ip;
3601 	struct xfs_inode_log_item *iip;
3602 	int			clcount = 0;
3603 	int			error = 0;
3604 
3605 	/*
3606 	 * We must use the safe variant here as on shutdown xfs_iflush_abort()
3607 	 * can remove itself from the list.
3608 	 */
3609 	list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
3610 		iip = (struct xfs_inode_log_item *)lip;
3611 		ip = iip->ili_inode;
3612 
3613 		/*
3614 		 * Quick and dirty check to avoid locks if possible.
3615 		 */
3616 		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLOCK))
3617 			continue;
3618 		if (xfs_ipincount(ip))
3619 			continue;
3620 
3621 		/*
3622 		 * The inode is still attached to the buffer, which means it is
3623 		 * dirty but reclaim might try to grab it. Check carefully for
3624 		 * that, and grab the ilock while still holding the i_flags_lock
3625 		 * to guarantee reclaim will not be able to reclaim this inode
3626 		 * once we drop the i_flags_lock.
3627 		 */
3628 		spin_lock(&ip->i_flags_lock);
3629 		ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
3630 		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLOCK)) {
3631 			spin_unlock(&ip->i_flags_lock);
3632 			continue;
3633 		}
3634 
3635 		/*
3636 		 * ILOCK will pin the inode against reclaim and prevent
3637 		 * concurrent transactions modifying the inode while we are
3638 		 * flushing the inode.
3639 		 */
3640 		if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
3641 			spin_unlock(&ip->i_flags_lock);
3642 			continue;
3643 		}
3644 		spin_unlock(&ip->i_flags_lock);
3645 
3646 		/*
3647 		 * Skip inodes that are already flush locked as they have
3648 		 * already been written to the buffer.
3649 		 */
3650 		if (!xfs_iflock_nowait(ip)) {
3651 			xfs_iunlock(ip, XFS_ILOCK_SHARED);
3652 			continue;
3653 		}
3654 
3655 		/*
3656 		 * Abort flushing this inode if we are shut down because the
3657 		 * inode may not currently be in the AIL. This can occur when
3658 		 * log I/O failure unpins the inode without inserting into the
3659 		 * AIL, leaving a dirty/unpinned inode attached to the buffer
3660 		 * that otherwise looks like it should be flushed.
3661 		 */
3662 		if (XFS_FORCED_SHUTDOWN(mp)) {
3663 			xfs_iunpin_wait(ip);
3664 			/* xfs_iflush_abort() drops the flush lock */
3665 			xfs_iflush_abort(ip);
3666 			xfs_iunlock(ip, XFS_ILOCK_SHARED);
3667 			error = -EIO;
3668 			continue;
3669 		}
3670 
3671 		/* don't block waiting on a log force to unpin dirty inodes */
3672 		if (xfs_ipincount(ip)) {
3673 			xfs_ifunlock(ip);
3674 			xfs_iunlock(ip, XFS_ILOCK_SHARED);
3675 			continue;
3676 		}
3677 
3678 		if (!xfs_inode_clean(ip))
3679 			error = xfs_iflush(ip, bp);
3680 		else
3681 			xfs_ifunlock(ip);
3682 		xfs_iunlock(ip, XFS_ILOCK_SHARED);
3683 		if (error)
3684 			break;
3685 		clcount++;
3686 	}
3687 
3688 	if (error) {
3689 		bp->b_flags |= XBF_ASYNC;
3690 		xfs_buf_ioend_fail(bp);
3691 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3692 		return error;
3693 	}
3694 
3695 	if (!clcount)
3696 		return -EAGAIN;
3697 
3698 	XFS_STATS_INC(mp, xs_icluster_flushcnt);
3699 	XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
3700 	return 0;
3701 
3702 }
3703 
3704 /* Release an inode. */
3705 void
3706 xfs_irele(
3707 	struct xfs_inode	*ip)
3708 {
3709 	trace_xfs_irele(ip, _RET_IP_);
3710 	iput(VFS_I(ip));
3711 }
3712 
3713 /*
3714  * Ensure all commited transactions touching the inode are written to the log.
3715  */
3716 int
3717 xfs_log_force_inode(
3718 	struct xfs_inode	*ip)
3719 {
3720 	xfs_lsn_t		lsn = 0;
3721 
3722 	xfs_ilock(ip, XFS_ILOCK_SHARED);
3723 	if (xfs_ipincount(ip))
3724 		lsn = ip->i_itemp->ili_last_lsn;
3725 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
3726 
3727 	if (!lsn)
3728 		return 0;
3729 	return xfs_log_force_lsn(ip->i_mount, lsn, XFS_LOG_SYNC, NULL);
3730 }
3731 
3732 /*
3733  * Grab the exclusive iolock for a data copy from src to dest, making sure to
3734  * abide vfs locking order (lowest pointer value goes first) and breaking the
3735  * layout leases before proceeding.  The loop is needed because we cannot call
3736  * the blocking break_layout() with the iolocks held, and therefore have to
3737  * back out both locks.
3738  */
3739 static int
3740 xfs_iolock_two_inodes_and_break_layout(
3741 	struct inode		*src,
3742 	struct inode		*dest)
3743 {
3744 	int			error;
3745 
3746 	if (src > dest)
3747 		swap(src, dest);
3748 
3749 retry:
3750 	/* Wait to break both inodes' layouts before we start locking. */
3751 	error = break_layout(src, true);
3752 	if (error)
3753 		return error;
3754 	if (src != dest) {
3755 		error = break_layout(dest, true);
3756 		if (error)
3757 			return error;
3758 	}
3759 
3760 	/* Lock one inode and make sure nobody got in and leased it. */
3761 	inode_lock(src);
3762 	error = break_layout(src, false);
3763 	if (error) {
3764 		inode_unlock(src);
3765 		if (error == -EWOULDBLOCK)
3766 			goto retry;
3767 		return error;
3768 	}
3769 
3770 	if (src == dest)
3771 		return 0;
3772 
3773 	/* Lock the other inode and make sure nobody got in and leased it. */
3774 	inode_lock_nested(dest, I_MUTEX_NONDIR2);
3775 	error = break_layout(dest, false);
3776 	if (error) {
3777 		inode_unlock(src);
3778 		inode_unlock(dest);
3779 		if (error == -EWOULDBLOCK)
3780 			goto retry;
3781 		return error;
3782 	}
3783 
3784 	return 0;
3785 }
3786 
3787 /*
3788  * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
3789  * mmap activity.
3790  */
3791 int
3792 xfs_ilock2_io_mmap(
3793 	struct xfs_inode	*ip1,
3794 	struct xfs_inode	*ip2)
3795 {
3796 	int			ret;
3797 
3798 	ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
3799 	if (ret)
3800 		return ret;
3801 	if (ip1 == ip2)
3802 		xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
3803 	else
3804 		xfs_lock_two_inodes(ip1, XFS_MMAPLOCK_EXCL,
3805 				    ip2, XFS_MMAPLOCK_EXCL);
3806 	return 0;
3807 }
3808 
3809 /* Unlock both inodes to allow IO and mmap activity. */
3810 void
3811 xfs_iunlock2_io_mmap(
3812 	struct xfs_inode	*ip1,
3813 	struct xfs_inode	*ip2)
3814 {
3815 	bool			same_inode = (ip1 == ip2);
3816 
3817 	xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3818 	if (!same_inode)
3819 		xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3820 	inode_unlock(VFS_I(ip2));
3821 	if (!same_inode)
3822 		inode_unlock(VFS_I(ip1));
3823 }
3824