xref: /openbmc/linux/fs/xfs/xfs_inode.c (revision b34e08d5)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include <linux/log2.h>
19 
20 #include "xfs.h"
21 #include "xfs_fs.h"
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_log_format.h"
25 #include "xfs_trans_resv.h"
26 #include "xfs_inum.h"
27 #include "xfs_sb.h"
28 #include "xfs_ag.h"
29 #include "xfs_mount.h"
30 #include "xfs_inode.h"
31 #include "xfs_da_format.h"
32 #include "xfs_da_btree.h"
33 #include "xfs_dir2.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_attr.h"
36 #include "xfs_trans_space.h"
37 #include "xfs_trans.h"
38 #include "xfs_buf_item.h"
39 #include "xfs_inode_item.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_bmap.h"
42 #include "xfs_bmap_util.h"
43 #include "xfs_error.h"
44 #include "xfs_quota.h"
45 #include "xfs_filestream.h"
46 #include "xfs_cksum.h"
47 #include "xfs_trace.h"
48 #include "xfs_icache.h"
49 #include "xfs_symlink.h"
50 #include "xfs_trans_priv.h"
51 #include "xfs_log.h"
52 #include "xfs_bmap_btree.h"
53 
54 kmem_zone_t *xfs_inode_zone;
55 
56 /*
57  * Used in xfs_itruncate_extents().  This is the maximum number of extents
58  * freed from a file in a single transaction.
59  */
60 #define	XFS_ITRUNC_MAX_EXTENTS	2
61 
62 STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
63 
64 STATIC int xfs_iunlink_remove(xfs_trans_t *, xfs_inode_t *);
65 
66 /*
67  * helper function to extract extent size hint from inode
68  */
69 xfs_extlen_t
70 xfs_get_extsz_hint(
71 	struct xfs_inode	*ip)
72 {
73 	if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
74 		return ip->i_d.di_extsize;
75 	if (XFS_IS_REALTIME_INODE(ip))
76 		return ip->i_mount->m_sb.sb_rextsize;
77 	return 0;
78 }
79 
80 /*
81  * These two are wrapper routines around the xfs_ilock() routine used to
82  * centralize some grungy code.  They are used in places that wish to lock the
83  * inode solely for reading the extents.  The reason these places can't just
84  * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
85  * bringing in of the extents from disk for a file in b-tree format.  If the
86  * inode is in b-tree format, then we need to lock the inode exclusively until
87  * the extents are read in.  Locking it exclusively all the time would limit
88  * our parallelism unnecessarily, though.  What we do instead is check to see
89  * if the extents have been read in yet, and only lock the inode exclusively
90  * if they have not.
91  *
92  * The functions return a value which should be given to the corresponding
93  * xfs_iunlock() call.
94  */
95 uint
96 xfs_ilock_data_map_shared(
97 	struct xfs_inode	*ip)
98 {
99 	uint			lock_mode = XFS_ILOCK_SHARED;
100 
101 	if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
102 	    (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
103 		lock_mode = XFS_ILOCK_EXCL;
104 	xfs_ilock(ip, lock_mode);
105 	return lock_mode;
106 }
107 
108 uint
109 xfs_ilock_attr_map_shared(
110 	struct xfs_inode	*ip)
111 {
112 	uint			lock_mode = XFS_ILOCK_SHARED;
113 
114 	if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE &&
115 	    (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
116 		lock_mode = XFS_ILOCK_EXCL;
117 	xfs_ilock(ip, lock_mode);
118 	return lock_mode;
119 }
120 
121 /*
122  * The xfs inode contains 2 locks: a multi-reader lock called the
123  * i_iolock and a multi-reader lock called the i_lock.  This routine
124  * allows either or both of the locks to be obtained.
125  *
126  * The 2 locks should always be ordered so that the IO lock is
127  * obtained first in order to prevent deadlock.
128  *
129  * ip -- the inode being locked
130  * lock_flags -- this parameter indicates the inode's locks
131  *       to be locked.  It can be:
132  *		XFS_IOLOCK_SHARED,
133  *		XFS_IOLOCK_EXCL,
134  *		XFS_ILOCK_SHARED,
135  *		XFS_ILOCK_EXCL,
136  *		XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
137  *		XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
138  *		XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
139  *		XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
140  */
141 void
142 xfs_ilock(
143 	xfs_inode_t		*ip,
144 	uint			lock_flags)
145 {
146 	trace_xfs_ilock(ip, lock_flags, _RET_IP_);
147 
148 	/*
149 	 * You can't set both SHARED and EXCL for the same lock,
150 	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
151 	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
152 	 */
153 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
154 	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
155 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
156 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
157 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
158 
159 	if (lock_flags & XFS_IOLOCK_EXCL)
160 		mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
161 	else if (lock_flags & XFS_IOLOCK_SHARED)
162 		mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
163 
164 	if (lock_flags & XFS_ILOCK_EXCL)
165 		mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
166 	else if (lock_flags & XFS_ILOCK_SHARED)
167 		mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
168 }
169 
170 /*
171  * This is just like xfs_ilock(), except that the caller
172  * is guaranteed not to sleep.  It returns 1 if it gets
173  * the requested locks and 0 otherwise.  If the IO lock is
174  * obtained but the inode lock cannot be, then the IO lock
175  * is dropped before returning.
176  *
177  * ip -- the inode being locked
178  * lock_flags -- this parameter indicates the inode's locks to be
179  *       to be locked.  See the comment for xfs_ilock() for a list
180  *	 of valid values.
181  */
182 int
183 xfs_ilock_nowait(
184 	xfs_inode_t		*ip,
185 	uint			lock_flags)
186 {
187 	trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
188 
189 	/*
190 	 * You can't set both SHARED and EXCL for the same lock,
191 	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
192 	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
193 	 */
194 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
195 	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
196 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
197 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
198 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
199 
200 	if (lock_flags & XFS_IOLOCK_EXCL) {
201 		if (!mrtryupdate(&ip->i_iolock))
202 			goto out;
203 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
204 		if (!mrtryaccess(&ip->i_iolock))
205 			goto out;
206 	}
207 	if (lock_flags & XFS_ILOCK_EXCL) {
208 		if (!mrtryupdate(&ip->i_lock))
209 			goto out_undo_iolock;
210 	} else if (lock_flags & XFS_ILOCK_SHARED) {
211 		if (!mrtryaccess(&ip->i_lock))
212 			goto out_undo_iolock;
213 	}
214 	return 1;
215 
216  out_undo_iolock:
217 	if (lock_flags & XFS_IOLOCK_EXCL)
218 		mrunlock_excl(&ip->i_iolock);
219 	else if (lock_flags & XFS_IOLOCK_SHARED)
220 		mrunlock_shared(&ip->i_iolock);
221  out:
222 	return 0;
223 }
224 
225 /*
226  * xfs_iunlock() is used to drop the inode locks acquired with
227  * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
228  * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
229  * that we know which locks to drop.
230  *
231  * ip -- the inode being unlocked
232  * lock_flags -- this parameter indicates the inode's locks to be
233  *       to be unlocked.  See the comment for xfs_ilock() for a list
234  *	 of valid values for this parameter.
235  *
236  */
237 void
238 xfs_iunlock(
239 	xfs_inode_t		*ip,
240 	uint			lock_flags)
241 {
242 	/*
243 	 * You can't set both SHARED and EXCL for the same lock,
244 	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
245 	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
246 	 */
247 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
248 	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
249 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
250 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
251 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
252 	ASSERT(lock_flags != 0);
253 
254 	if (lock_flags & XFS_IOLOCK_EXCL)
255 		mrunlock_excl(&ip->i_iolock);
256 	else if (lock_flags & XFS_IOLOCK_SHARED)
257 		mrunlock_shared(&ip->i_iolock);
258 
259 	if (lock_flags & XFS_ILOCK_EXCL)
260 		mrunlock_excl(&ip->i_lock);
261 	else if (lock_flags & XFS_ILOCK_SHARED)
262 		mrunlock_shared(&ip->i_lock);
263 
264 	trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
265 }
266 
267 /*
268  * give up write locks.  the i/o lock cannot be held nested
269  * if it is being demoted.
270  */
271 void
272 xfs_ilock_demote(
273 	xfs_inode_t		*ip,
274 	uint			lock_flags)
275 {
276 	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
277 	ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
278 
279 	if (lock_flags & XFS_ILOCK_EXCL)
280 		mrdemote(&ip->i_lock);
281 	if (lock_flags & XFS_IOLOCK_EXCL)
282 		mrdemote(&ip->i_iolock);
283 
284 	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
285 }
286 
287 #if defined(DEBUG) || defined(XFS_WARN)
288 int
289 xfs_isilocked(
290 	xfs_inode_t		*ip,
291 	uint			lock_flags)
292 {
293 	if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
294 		if (!(lock_flags & XFS_ILOCK_SHARED))
295 			return !!ip->i_lock.mr_writer;
296 		return rwsem_is_locked(&ip->i_lock.mr_lock);
297 	}
298 
299 	if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
300 		if (!(lock_flags & XFS_IOLOCK_SHARED))
301 			return !!ip->i_iolock.mr_writer;
302 		return rwsem_is_locked(&ip->i_iolock.mr_lock);
303 	}
304 
305 	ASSERT(0);
306 	return 0;
307 }
308 #endif
309 
310 #ifdef DEBUG
311 int xfs_locked_n;
312 int xfs_small_retries;
313 int xfs_middle_retries;
314 int xfs_lots_retries;
315 int xfs_lock_delays;
316 #endif
317 
318 /*
319  * Bump the subclass so xfs_lock_inodes() acquires each lock with
320  * a different value
321  */
322 static inline int
323 xfs_lock_inumorder(int lock_mode, int subclass)
324 {
325 	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))
326 		lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_IOLOCK_SHIFT;
327 	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))
328 		lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_ILOCK_SHIFT;
329 
330 	return lock_mode;
331 }
332 
333 /*
334  * The following routine will lock n inodes in exclusive mode.
335  * We assume the caller calls us with the inodes in i_ino order.
336  *
337  * We need to detect deadlock where an inode that we lock
338  * is in the AIL and we start waiting for another inode that is locked
339  * by a thread in a long running transaction (such as truncate). This can
340  * result in deadlock since the long running trans might need to wait
341  * for the inode we just locked in order to push the tail and free space
342  * in the log.
343  */
344 void
345 xfs_lock_inodes(
346 	xfs_inode_t	**ips,
347 	int		inodes,
348 	uint		lock_mode)
349 {
350 	int		attempts = 0, i, j, try_lock;
351 	xfs_log_item_t	*lp;
352 
353 	ASSERT(ips && (inodes >= 2)); /* we need at least two */
354 
355 	try_lock = 0;
356 	i = 0;
357 
358 again:
359 	for (; i < inodes; i++) {
360 		ASSERT(ips[i]);
361 
362 		if (i && (ips[i] == ips[i-1]))	/* Already locked */
363 			continue;
364 
365 		/*
366 		 * If try_lock is not set yet, make sure all locked inodes
367 		 * are not in the AIL.
368 		 * If any are, set try_lock to be used later.
369 		 */
370 
371 		if (!try_lock) {
372 			for (j = (i - 1); j >= 0 && !try_lock; j--) {
373 				lp = (xfs_log_item_t *)ips[j]->i_itemp;
374 				if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
375 					try_lock++;
376 				}
377 			}
378 		}
379 
380 		/*
381 		 * If any of the previous locks we have locked is in the AIL,
382 		 * we must TRY to get the second and subsequent locks. If
383 		 * we can't get any, we must release all we have
384 		 * and try again.
385 		 */
386 
387 		if (try_lock) {
388 			/* try_lock must be 0 if i is 0. */
389 			/*
390 			 * try_lock means we have an inode locked
391 			 * that is in the AIL.
392 			 */
393 			ASSERT(i != 0);
394 			if (!xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i))) {
395 				attempts++;
396 
397 				/*
398 				 * Unlock all previous guys and try again.
399 				 * xfs_iunlock will try to push the tail
400 				 * if the inode is in the AIL.
401 				 */
402 
403 				for(j = i - 1; j >= 0; j--) {
404 
405 					/*
406 					 * Check to see if we've already
407 					 * unlocked this one.
408 					 * Not the first one going back,
409 					 * and the inode ptr is the same.
410 					 */
411 					if ((j != (i - 1)) && ips[j] ==
412 								ips[j+1])
413 						continue;
414 
415 					xfs_iunlock(ips[j], lock_mode);
416 				}
417 
418 				if ((attempts % 5) == 0) {
419 					delay(1); /* Don't just spin the CPU */
420 #ifdef DEBUG
421 					xfs_lock_delays++;
422 #endif
423 				}
424 				i = 0;
425 				try_lock = 0;
426 				goto again;
427 			}
428 		} else {
429 			xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
430 		}
431 	}
432 
433 #ifdef DEBUG
434 	if (attempts) {
435 		if (attempts < 5) xfs_small_retries++;
436 		else if (attempts < 100) xfs_middle_retries++;
437 		else xfs_lots_retries++;
438 	} else {
439 		xfs_locked_n++;
440 	}
441 #endif
442 }
443 
444 /*
445  * xfs_lock_two_inodes() can only be used to lock one type of lock
446  * at a time - the iolock or the ilock, but not both at once. If
447  * we lock both at once, lockdep will report false positives saying
448  * we have violated locking orders.
449  */
450 void
451 xfs_lock_two_inodes(
452 	xfs_inode_t		*ip0,
453 	xfs_inode_t		*ip1,
454 	uint			lock_mode)
455 {
456 	xfs_inode_t		*temp;
457 	int			attempts = 0;
458 	xfs_log_item_t		*lp;
459 
460 	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))
461 		ASSERT((lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) == 0);
462 	ASSERT(ip0->i_ino != ip1->i_ino);
463 
464 	if (ip0->i_ino > ip1->i_ino) {
465 		temp = ip0;
466 		ip0 = ip1;
467 		ip1 = temp;
468 	}
469 
470  again:
471 	xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0));
472 
473 	/*
474 	 * If the first lock we have locked is in the AIL, we must TRY to get
475 	 * the second lock. If we can't get it, we must release the first one
476 	 * and try again.
477 	 */
478 	lp = (xfs_log_item_t *)ip0->i_itemp;
479 	if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
480 		if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) {
481 			xfs_iunlock(ip0, lock_mode);
482 			if ((++attempts % 5) == 0)
483 				delay(1); /* Don't just spin the CPU */
484 			goto again;
485 		}
486 	} else {
487 		xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1));
488 	}
489 }
490 
491 
492 void
493 __xfs_iflock(
494 	struct xfs_inode	*ip)
495 {
496 	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
497 	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
498 
499 	do {
500 		prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
501 		if (xfs_isiflocked(ip))
502 			io_schedule();
503 	} while (!xfs_iflock_nowait(ip));
504 
505 	finish_wait(wq, &wait.wait);
506 }
507 
508 STATIC uint
509 _xfs_dic2xflags(
510 	__uint16_t		di_flags)
511 {
512 	uint			flags = 0;
513 
514 	if (di_flags & XFS_DIFLAG_ANY) {
515 		if (di_flags & XFS_DIFLAG_REALTIME)
516 			flags |= XFS_XFLAG_REALTIME;
517 		if (di_flags & XFS_DIFLAG_PREALLOC)
518 			flags |= XFS_XFLAG_PREALLOC;
519 		if (di_flags & XFS_DIFLAG_IMMUTABLE)
520 			flags |= XFS_XFLAG_IMMUTABLE;
521 		if (di_flags & XFS_DIFLAG_APPEND)
522 			flags |= XFS_XFLAG_APPEND;
523 		if (di_flags & XFS_DIFLAG_SYNC)
524 			flags |= XFS_XFLAG_SYNC;
525 		if (di_flags & XFS_DIFLAG_NOATIME)
526 			flags |= XFS_XFLAG_NOATIME;
527 		if (di_flags & XFS_DIFLAG_NODUMP)
528 			flags |= XFS_XFLAG_NODUMP;
529 		if (di_flags & XFS_DIFLAG_RTINHERIT)
530 			flags |= XFS_XFLAG_RTINHERIT;
531 		if (di_flags & XFS_DIFLAG_PROJINHERIT)
532 			flags |= XFS_XFLAG_PROJINHERIT;
533 		if (di_flags & XFS_DIFLAG_NOSYMLINKS)
534 			flags |= XFS_XFLAG_NOSYMLINKS;
535 		if (di_flags & XFS_DIFLAG_EXTSIZE)
536 			flags |= XFS_XFLAG_EXTSIZE;
537 		if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
538 			flags |= XFS_XFLAG_EXTSZINHERIT;
539 		if (di_flags & XFS_DIFLAG_NODEFRAG)
540 			flags |= XFS_XFLAG_NODEFRAG;
541 		if (di_flags & XFS_DIFLAG_FILESTREAM)
542 			flags |= XFS_XFLAG_FILESTREAM;
543 	}
544 
545 	return flags;
546 }
547 
548 uint
549 xfs_ip2xflags(
550 	xfs_inode_t		*ip)
551 {
552 	xfs_icdinode_t		*dic = &ip->i_d;
553 
554 	return _xfs_dic2xflags(dic->di_flags) |
555 				(XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0);
556 }
557 
558 uint
559 xfs_dic2xflags(
560 	xfs_dinode_t		*dip)
561 {
562 	return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) |
563 				(XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0);
564 }
565 
566 /*
567  * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
568  * is allowed, otherwise it has to be an exact match. If a CI match is found,
569  * ci_name->name will point to a the actual name (caller must free) or
570  * will be set to NULL if an exact match is found.
571  */
572 int
573 xfs_lookup(
574 	xfs_inode_t		*dp,
575 	struct xfs_name		*name,
576 	xfs_inode_t		**ipp,
577 	struct xfs_name		*ci_name)
578 {
579 	xfs_ino_t		inum;
580 	int			error;
581 	uint			lock_mode;
582 
583 	trace_xfs_lookup(dp, name);
584 
585 	if (XFS_FORCED_SHUTDOWN(dp->i_mount))
586 		return XFS_ERROR(EIO);
587 
588 	lock_mode = xfs_ilock_data_map_shared(dp);
589 	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
590 	xfs_iunlock(dp, lock_mode);
591 
592 	if (error)
593 		goto out;
594 
595 	error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
596 	if (error)
597 		goto out_free_name;
598 
599 	return 0;
600 
601 out_free_name:
602 	if (ci_name)
603 		kmem_free(ci_name->name);
604 out:
605 	*ipp = NULL;
606 	return error;
607 }
608 
609 /*
610  * Allocate an inode on disk and return a copy of its in-core version.
611  * The in-core inode is locked exclusively.  Set mode, nlink, and rdev
612  * appropriately within the inode.  The uid and gid for the inode are
613  * set according to the contents of the given cred structure.
614  *
615  * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
616  * has a free inode available, call xfs_iget() to obtain the in-core
617  * version of the allocated inode.  Finally, fill in the inode and
618  * log its initial contents.  In this case, ialloc_context would be
619  * set to NULL.
620  *
621  * If xfs_dialloc() does not have an available inode, it will replenish
622  * its supply by doing an allocation. Since we can only do one
623  * allocation within a transaction without deadlocks, we must commit
624  * the current transaction before returning the inode itself.
625  * In this case, therefore, we will set ialloc_context and return.
626  * The caller should then commit the current transaction, start a new
627  * transaction, and call xfs_ialloc() again to actually get the inode.
628  *
629  * To ensure that some other process does not grab the inode that
630  * was allocated during the first call to xfs_ialloc(), this routine
631  * also returns the [locked] bp pointing to the head of the freelist
632  * as ialloc_context.  The caller should hold this buffer across
633  * the commit and pass it back into this routine on the second call.
634  *
635  * If we are allocating quota inodes, we do not have a parent inode
636  * to attach to or associate with (i.e. pip == NULL) because they
637  * are not linked into the directory structure - they are attached
638  * directly to the superblock - and so have no parent.
639  */
640 int
641 xfs_ialloc(
642 	xfs_trans_t	*tp,
643 	xfs_inode_t	*pip,
644 	umode_t		mode,
645 	xfs_nlink_t	nlink,
646 	xfs_dev_t	rdev,
647 	prid_t		prid,
648 	int		okalloc,
649 	xfs_buf_t	**ialloc_context,
650 	xfs_inode_t	**ipp)
651 {
652 	struct xfs_mount *mp = tp->t_mountp;
653 	xfs_ino_t	ino;
654 	xfs_inode_t	*ip;
655 	uint		flags;
656 	int		error;
657 	timespec_t	tv;
658 	int		filestreams = 0;
659 
660 	/*
661 	 * Call the space management code to pick
662 	 * the on-disk inode to be allocated.
663 	 */
664 	error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
665 			    ialloc_context, &ino);
666 	if (error)
667 		return error;
668 	if (*ialloc_context || ino == NULLFSINO) {
669 		*ipp = NULL;
670 		return 0;
671 	}
672 	ASSERT(*ialloc_context == NULL);
673 
674 	/*
675 	 * Get the in-core inode with the lock held exclusively.
676 	 * This is because we're setting fields here we need
677 	 * to prevent others from looking at until we're done.
678 	 */
679 	error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
680 			 XFS_ILOCK_EXCL, &ip);
681 	if (error)
682 		return error;
683 	ASSERT(ip != NULL);
684 
685 	ip->i_d.di_mode = mode;
686 	ip->i_d.di_onlink = 0;
687 	ip->i_d.di_nlink = nlink;
688 	ASSERT(ip->i_d.di_nlink == nlink);
689 	ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
690 	ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
691 	xfs_set_projid(ip, prid);
692 	memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
693 
694 	/*
695 	 * If the superblock version is up to where we support new format
696 	 * inodes and this is currently an old format inode, then change
697 	 * the inode version number now.  This way we only do the conversion
698 	 * here rather than here and in the flush/logging code.
699 	 */
700 	if (xfs_sb_version_hasnlink(&mp->m_sb) &&
701 	    ip->i_d.di_version == 1) {
702 		ip->i_d.di_version = 2;
703 		/*
704 		 * We've already zeroed the old link count, the projid field,
705 		 * and the pad field.
706 		 */
707 	}
708 
709 	/*
710 	 * Project ids won't be stored on disk if we are using a version 1 inode.
711 	 */
712 	if ((prid != 0) && (ip->i_d.di_version == 1))
713 		xfs_bump_ino_vers2(tp, ip);
714 
715 	if (pip && XFS_INHERIT_GID(pip)) {
716 		ip->i_d.di_gid = pip->i_d.di_gid;
717 		if ((pip->i_d.di_mode & S_ISGID) && S_ISDIR(mode)) {
718 			ip->i_d.di_mode |= S_ISGID;
719 		}
720 	}
721 
722 	/*
723 	 * If the group ID of the new file does not match the effective group
724 	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
725 	 * (and only if the irix_sgid_inherit compatibility variable is set).
726 	 */
727 	if ((irix_sgid_inherit) &&
728 	    (ip->i_d.di_mode & S_ISGID) &&
729 	    (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid)))) {
730 		ip->i_d.di_mode &= ~S_ISGID;
731 	}
732 
733 	ip->i_d.di_size = 0;
734 	ip->i_d.di_nextents = 0;
735 	ASSERT(ip->i_d.di_nblocks == 0);
736 
737 	nanotime(&tv);
738 	ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
739 	ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
740 	ip->i_d.di_atime = ip->i_d.di_mtime;
741 	ip->i_d.di_ctime = ip->i_d.di_mtime;
742 
743 	/*
744 	 * di_gen will have been taken care of in xfs_iread.
745 	 */
746 	ip->i_d.di_extsize = 0;
747 	ip->i_d.di_dmevmask = 0;
748 	ip->i_d.di_dmstate = 0;
749 	ip->i_d.di_flags = 0;
750 
751 	if (ip->i_d.di_version == 3) {
752 		ASSERT(ip->i_d.di_ino == ino);
753 		ASSERT(uuid_equal(&ip->i_d.di_uuid, &mp->m_sb.sb_uuid));
754 		ip->i_d.di_crc = 0;
755 		ip->i_d.di_changecount = 1;
756 		ip->i_d.di_lsn = 0;
757 		ip->i_d.di_flags2 = 0;
758 		memset(&(ip->i_d.di_pad2[0]), 0, sizeof(ip->i_d.di_pad2));
759 		ip->i_d.di_crtime = ip->i_d.di_mtime;
760 	}
761 
762 
763 	flags = XFS_ILOG_CORE;
764 	switch (mode & S_IFMT) {
765 	case S_IFIFO:
766 	case S_IFCHR:
767 	case S_IFBLK:
768 	case S_IFSOCK:
769 		ip->i_d.di_format = XFS_DINODE_FMT_DEV;
770 		ip->i_df.if_u2.if_rdev = rdev;
771 		ip->i_df.if_flags = 0;
772 		flags |= XFS_ILOG_DEV;
773 		break;
774 	case S_IFREG:
775 		/*
776 		 * we can't set up filestreams until after the VFS inode
777 		 * is set up properly.
778 		 */
779 		if (pip && xfs_inode_is_filestream(pip))
780 			filestreams = 1;
781 		/* fall through */
782 	case S_IFDIR:
783 		if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
784 			uint	di_flags = 0;
785 
786 			if (S_ISDIR(mode)) {
787 				if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
788 					di_flags |= XFS_DIFLAG_RTINHERIT;
789 				if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
790 					di_flags |= XFS_DIFLAG_EXTSZINHERIT;
791 					ip->i_d.di_extsize = pip->i_d.di_extsize;
792 				}
793 			} else if (S_ISREG(mode)) {
794 				if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
795 					di_flags |= XFS_DIFLAG_REALTIME;
796 				if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
797 					di_flags |= XFS_DIFLAG_EXTSIZE;
798 					ip->i_d.di_extsize = pip->i_d.di_extsize;
799 				}
800 			}
801 			if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
802 			    xfs_inherit_noatime)
803 				di_flags |= XFS_DIFLAG_NOATIME;
804 			if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
805 			    xfs_inherit_nodump)
806 				di_flags |= XFS_DIFLAG_NODUMP;
807 			if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
808 			    xfs_inherit_sync)
809 				di_flags |= XFS_DIFLAG_SYNC;
810 			if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
811 			    xfs_inherit_nosymlinks)
812 				di_flags |= XFS_DIFLAG_NOSYMLINKS;
813 			if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
814 				di_flags |= XFS_DIFLAG_PROJINHERIT;
815 			if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
816 			    xfs_inherit_nodefrag)
817 				di_flags |= XFS_DIFLAG_NODEFRAG;
818 			if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
819 				di_flags |= XFS_DIFLAG_FILESTREAM;
820 			ip->i_d.di_flags |= di_flags;
821 		}
822 		/* FALLTHROUGH */
823 	case S_IFLNK:
824 		ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
825 		ip->i_df.if_flags = XFS_IFEXTENTS;
826 		ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
827 		ip->i_df.if_u1.if_extents = NULL;
828 		break;
829 	default:
830 		ASSERT(0);
831 	}
832 	/*
833 	 * Attribute fork settings for new inode.
834 	 */
835 	ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
836 	ip->i_d.di_anextents = 0;
837 
838 	/*
839 	 * Log the new values stuffed into the inode.
840 	 */
841 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
842 	xfs_trans_log_inode(tp, ip, flags);
843 
844 	/* now that we have an i_mode we can setup inode ops and unlock */
845 	xfs_setup_inode(ip);
846 
847 	/* now we have set up the vfs inode we can associate the filestream */
848 	if (filestreams) {
849 		error = xfs_filestream_associate(pip, ip);
850 		if (error < 0)
851 			return -error;
852 		if (!error)
853 			xfs_iflags_set(ip, XFS_IFILESTREAM);
854 	}
855 
856 	*ipp = ip;
857 	return 0;
858 }
859 
860 /*
861  * Allocates a new inode from disk and return a pointer to the
862  * incore copy. This routine will internally commit the current
863  * transaction and allocate a new one if the Space Manager needed
864  * to do an allocation to replenish the inode free-list.
865  *
866  * This routine is designed to be called from xfs_create and
867  * xfs_create_dir.
868  *
869  */
870 int
871 xfs_dir_ialloc(
872 	xfs_trans_t	**tpp,		/* input: current transaction;
873 					   output: may be a new transaction. */
874 	xfs_inode_t	*dp,		/* directory within whose allocate
875 					   the inode. */
876 	umode_t		mode,
877 	xfs_nlink_t	nlink,
878 	xfs_dev_t	rdev,
879 	prid_t		prid,		/* project id */
880 	int		okalloc,	/* ok to allocate new space */
881 	xfs_inode_t	**ipp,		/* pointer to inode; it will be
882 					   locked. */
883 	int		*committed)
884 
885 {
886 	xfs_trans_t	*tp;
887 	xfs_trans_t	*ntp;
888 	xfs_inode_t	*ip;
889 	xfs_buf_t	*ialloc_context = NULL;
890 	int		code;
891 	void		*dqinfo;
892 	uint		tflags;
893 
894 	tp = *tpp;
895 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
896 
897 	/*
898 	 * xfs_ialloc will return a pointer to an incore inode if
899 	 * the Space Manager has an available inode on the free
900 	 * list. Otherwise, it will do an allocation and replenish
901 	 * the freelist.  Since we can only do one allocation per
902 	 * transaction without deadlocks, we will need to commit the
903 	 * current transaction and start a new one.  We will then
904 	 * need to call xfs_ialloc again to get the inode.
905 	 *
906 	 * If xfs_ialloc did an allocation to replenish the freelist,
907 	 * it returns the bp containing the head of the freelist as
908 	 * ialloc_context. We will hold a lock on it across the
909 	 * transaction commit so that no other process can steal
910 	 * the inode(s) that we've just allocated.
911 	 */
912 	code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, okalloc,
913 			  &ialloc_context, &ip);
914 
915 	/*
916 	 * Return an error if we were unable to allocate a new inode.
917 	 * This should only happen if we run out of space on disk or
918 	 * encounter a disk error.
919 	 */
920 	if (code) {
921 		*ipp = NULL;
922 		return code;
923 	}
924 	if (!ialloc_context && !ip) {
925 		*ipp = NULL;
926 		return XFS_ERROR(ENOSPC);
927 	}
928 
929 	/*
930 	 * If the AGI buffer is non-NULL, then we were unable to get an
931 	 * inode in one operation.  We need to commit the current
932 	 * transaction and call xfs_ialloc() again.  It is guaranteed
933 	 * to succeed the second time.
934 	 */
935 	if (ialloc_context) {
936 		struct xfs_trans_res tres;
937 
938 		/*
939 		 * Normally, xfs_trans_commit releases all the locks.
940 		 * We call bhold to hang on to the ialloc_context across
941 		 * the commit.  Holding this buffer prevents any other
942 		 * processes from doing any allocations in this
943 		 * allocation group.
944 		 */
945 		xfs_trans_bhold(tp, ialloc_context);
946 		/*
947 		 * Save the log reservation so we can use
948 		 * them in the next transaction.
949 		 */
950 		tres.tr_logres = xfs_trans_get_log_res(tp);
951 		tres.tr_logcount = xfs_trans_get_log_count(tp);
952 
953 		/*
954 		 * We want the quota changes to be associated with the next
955 		 * transaction, NOT this one. So, detach the dqinfo from this
956 		 * and attach it to the next transaction.
957 		 */
958 		dqinfo = NULL;
959 		tflags = 0;
960 		if (tp->t_dqinfo) {
961 			dqinfo = (void *)tp->t_dqinfo;
962 			tp->t_dqinfo = NULL;
963 			tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
964 			tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
965 		}
966 
967 		ntp = xfs_trans_dup(tp);
968 		code = xfs_trans_commit(tp, 0);
969 		tp = ntp;
970 		if (committed != NULL) {
971 			*committed = 1;
972 		}
973 		/*
974 		 * If we get an error during the commit processing,
975 		 * release the buffer that is still held and return
976 		 * to the caller.
977 		 */
978 		if (code) {
979 			xfs_buf_relse(ialloc_context);
980 			if (dqinfo) {
981 				tp->t_dqinfo = dqinfo;
982 				xfs_trans_free_dqinfo(tp);
983 			}
984 			*tpp = ntp;
985 			*ipp = NULL;
986 			return code;
987 		}
988 
989 		/*
990 		 * transaction commit worked ok so we can drop the extra ticket
991 		 * reference that we gained in xfs_trans_dup()
992 		 */
993 		xfs_log_ticket_put(tp->t_ticket);
994 		tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
995 		code = xfs_trans_reserve(tp, &tres, 0, 0);
996 
997 		/*
998 		 * Re-attach the quota info that we detached from prev trx.
999 		 */
1000 		if (dqinfo) {
1001 			tp->t_dqinfo = dqinfo;
1002 			tp->t_flags |= tflags;
1003 		}
1004 
1005 		if (code) {
1006 			xfs_buf_relse(ialloc_context);
1007 			*tpp = ntp;
1008 			*ipp = NULL;
1009 			return code;
1010 		}
1011 		xfs_trans_bjoin(tp, ialloc_context);
1012 
1013 		/*
1014 		 * Call ialloc again. Since we've locked out all
1015 		 * other allocations in this allocation group,
1016 		 * this call should always succeed.
1017 		 */
1018 		code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
1019 				  okalloc, &ialloc_context, &ip);
1020 
1021 		/*
1022 		 * If we get an error at this point, return to the caller
1023 		 * so that the current transaction can be aborted.
1024 		 */
1025 		if (code) {
1026 			*tpp = tp;
1027 			*ipp = NULL;
1028 			return code;
1029 		}
1030 		ASSERT(!ialloc_context && ip);
1031 
1032 	} else {
1033 		if (committed != NULL)
1034 			*committed = 0;
1035 	}
1036 
1037 	*ipp = ip;
1038 	*tpp = tp;
1039 
1040 	return 0;
1041 }
1042 
1043 /*
1044  * Decrement the link count on an inode & log the change.
1045  * If this causes the link count to go to zero, initiate the
1046  * logging activity required to truncate a file.
1047  */
1048 int				/* error */
1049 xfs_droplink(
1050 	xfs_trans_t *tp,
1051 	xfs_inode_t *ip)
1052 {
1053 	int	error;
1054 
1055 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1056 
1057 	ASSERT (ip->i_d.di_nlink > 0);
1058 	ip->i_d.di_nlink--;
1059 	drop_nlink(VFS_I(ip));
1060 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1061 
1062 	error = 0;
1063 	if (ip->i_d.di_nlink == 0) {
1064 		/*
1065 		 * We're dropping the last link to this file.
1066 		 * Move the on-disk inode to the AGI unlinked list.
1067 		 * From xfs_inactive() we will pull the inode from
1068 		 * the list and free it.
1069 		 */
1070 		error = xfs_iunlink(tp, ip);
1071 	}
1072 	return error;
1073 }
1074 
1075 /*
1076  * This gets called when the inode's version needs to be changed from 1 to 2.
1077  * Currently this happens when the nlink field overflows the old 16-bit value
1078  * or when chproj is called to change the project for the first time.
1079  * As a side effect the superblock version will also get rev'd
1080  * to contain the NLINK bit.
1081  */
1082 void
1083 xfs_bump_ino_vers2(
1084 	xfs_trans_t	*tp,
1085 	xfs_inode_t	*ip)
1086 {
1087 	xfs_mount_t	*mp;
1088 
1089 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1090 	ASSERT(ip->i_d.di_version == 1);
1091 
1092 	ip->i_d.di_version = 2;
1093 	ip->i_d.di_onlink = 0;
1094 	memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
1095 	mp = tp->t_mountp;
1096 	if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
1097 		spin_lock(&mp->m_sb_lock);
1098 		if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
1099 			xfs_sb_version_addnlink(&mp->m_sb);
1100 			spin_unlock(&mp->m_sb_lock);
1101 			xfs_mod_sb(tp, XFS_SB_VERSIONNUM);
1102 		} else {
1103 			spin_unlock(&mp->m_sb_lock);
1104 		}
1105 	}
1106 	/* Caller must log the inode */
1107 }
1108 
1109 /*
1110  * Increment the link count on an inode & log the change.
1111  */
1112 int
1113 xfs_bumplink(
1114 	xfs_trans_t *tp,
1115 	xfs_inode_t *ip)
1116 {
1117 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1118 
1119 	ASSERT(ip->i_d.di_nlink > 0 || (VFS_I(ip)->i_state & I_LINKABLE));
1120 	ip->i_d.di_nlink++;
1121 	inc_nlink(VFS_I(ip));
1122 	if ((ip->i_d.di_version == 1) &&
1123 	    (ip->i_d.di_nlink > XFS_MAXLINK_1)) {
1124 		/*
1125 		 * The inode has increased its number of links beyond
1126 		 * what can fit in an old format inode.  It now needs
1127 		 * to be converted to a version 2 inode with a 32 bit
1128 		 * link count.  If this is the first inode in the file
1129 		 * system to do this, then we need to bump the superblock
1130 		 * version number as well.
1131 		 */
1132 		xfs_bump_ino_vers2(tp, ip);
1133 	}
1134 
1135 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1136 	return 0;
1137 }
1138 
1139 int
1140 xfs_create(
1141 	xfs_inode_t		*dp,
1142 	struct xfs_name		*name,
1143 	umode_t			mode,
1144 	xfs_dev_t		rdev,
1145 	xfs_inode_t		**ipp)
1146 {
1147 	int			is_dir = S_ISDIR(mode);
1148 	struct xfs_mount	*mp = dp->i_mount;
1149 	struct xfs_inode	*ip = NULL;
1150 	struct xfs_trans	*tp = NULL;
1151 	int			error;
1152 	xfs_bmap_free_t		free_list;
1153 	xfs_fsblock_t		first_block;
1154 	bool                    unlock_dp_on_error = false;
1155 	uint			cancel_flags;
1156 	int			committed;
1157 	prid_t			prid;
1158 	struct xfs_dquot	*udqp = NULL;
1159 	struct xfs_dquot	*gdqp = NULL;
1160 	struct xfs_dquot	*pdqp = NULL;
1161 	struct xfs_trans_res	tres;
1162 	uint			resblks;
1163 
1164 	trace_xfs_create(dp, name);
1165 
1166 	if (XFS_FORCED_SHUTDOWN(mp))
1167 		return XFS_ERROR(EIO);
1168 
1169 	prid = xfs_get_initial_prid(dp);
1170 
1171 	/*
1172 	 * Make sure that we have allocated dquot(s) on disk.
1173 	 */
1174 	error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
1175 					xfs_kgid_to_gid(current_fsgid()), prid,
1176 					XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1177 					&udqp, &gdqp, &pdqp);
1178 	if (error)
1179 		return error;
1180 
1181 	if (is_dir) {
1182 		rdev = 0;
1183 		resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
1184 		tres.tr_logres = M_RES(mp)->tr_mkdir.tr_logres;
1185 		tres.tr_logcount = XFS_MKDIR_LOG_COUNT;
1186 		tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR);
1187 	} else {
1188 		resblks = XFS_CREATE_SPACE_RES(mp, name->len);
1189 		tres.tr_logres = M_RES(mp)->tr_create.tr_logres;
1190 		tres.tr_logcount = XFS_CREATE_LOG_COUNT;
1191 		tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE);
1192 	}
1193 
1194 	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
1195 
1196 	/*
1197 	 * Initially assume that the file does not exist and
1198 	 * reserve the resources for that case.  If that is not
1199 	 * the case we'll drop the one we have and get a more
1200 	 * appropriate transaction later.
1201 	 */
1202 	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1203 	error = xfs_trans_reserve(tp, &tres, resblks, 0);
1204 	if (error == ENOSPC) {
1205 		/* flush outstanding delalloc blocks and retry */
1206 		xfs_flush_inodes(mp);
1207 		error = xfs_trans_reserve(tp, &tres, resblks, 0);
1208 	}
1209 	if (error == ENOSPC) {
1210 		/* No space at all so try a "no-allocation" reservation */
1211 		resblks = 0;
1212 		error = xfs_trans_reserve(tp, &tres, 0, 0);
1213 	}
1214 	if (error) {
1215 		cancel_flags = 0;
1216 		goto out_trans_cancel;
1217 	}
1218 
1219 	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1220 	unlock_dp_on_error = true;
1221 
1222 	xfs_bmap_init(&free_list, &first_block);
1223 
1224 	/*
1225 	 * Reserve disk quota and the inode.
1226 	 */
1227 	error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1228 						pdqp, resblks, 1, 0);
1229 	if (error)
1230 		goto out_trans_cancel;
1231 
1232 	error = xfs_dir_canenter(tp, dp, name, resblks);
1233 	if (error)
1234 		goto out_trans_cancel;
1235 
1236 	/*
1237 	 * A newly created regular or special file just has one directory
1238 	 * entry pointing to them, but a directory also the "." entry
1239 	 * pointing to itself.
1240 	 */
1241 	error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev,
1242 			       prid, resblks > 0, &ip, &committed);
1243 	if (error) {
1244 		if (error == ENOSPC)
1245 			goto out_trans_cancel;
1246 		goto out_trans_abort;
1247 	}
1248 
1249 	/*
1250 	 * Now we join the directory inode to the transaction.  We do not do it
1251 	 * earlier because xfs_dir_ialloc might commit the previous transaction
1252 	 * (and release all the locks).  An error from here on will result in
1253 	 * the transaction cancel unlocking dp so don't do it explicitly in the
1254 	 * error path.
1255 	 */
1256 	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1257 	unlock_dp_on_error = false;
1258 
1259 	error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1260 					&first_block, &free_list, resblks ?
1261 					resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
1262 	if (error) {
1263 		ASSERT(error != ENOSPC);
1264 		goto out_trans_abort;
1265 	}
1266 	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1267 	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1268 
1269 	if (is_dir) {
1270 		error = xfs_dir_init(tp, ip, dp);
1271 		if (error)
1272 			goto out_bmap_cancel;
1273 
1274 		error = xfs_bumplink(tp, dp);
1275 		if (error)
1276 			goto out_bmap_cancel;
1277 	}
1278 
1279 	/*
1280 	 * If this is a synchronous mount, make sure that the
1281 	 * create transaction goes to disk before returning to
1282 	 * the user.
1283 	 */
1284 	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1285 		xfs_trans_set_sync(tp);
1286 
1287 	/*
1288 	 * Attach the dquot(s) to the inodes and modify them incore.
1289 	 * These ids of the inode couldn't have changed since the new
1290 	 * inode has been locked ever since it was created.
1291 	 */
1292 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1293 
1294 	error = xfs_bmap_finish(&tp, &free_list, &committed);
1295 	if (error)
1296 		goto out_bmap_cancel;
1297 
1298 	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1299 	if (error)
1300 		goto out_release_inode;
1301 
1302 	xfs_qm_dqrele(udqp);
1303 	xfs_qm_dqrele(gdqp);
1304 	xfs_qm_dqrele(pdqp);
1305 
1306 	*ipp = ip;
1307 	return 0;
1308 
1309  out_bmap_cancel:
1310 	xfs_bmap_cancel(&free_list);
1311  out_trans_abort:
1312 	cancel_flags |= XFS_TRANS_ABORT;
1313  out_trans_cancel:
1314 	xfs_trans_cancel(tp, cancel_flags);
1315  out_release_inode:
1316 	/*
1317 	 * Wait until after the current transaction is aborted to
1318 	 * release the inode.  This prevents recursive transactions
1319 	 * and deadlocks from xfs_inactive.
1320 	 */
1321 	if (ip)
1322 		IRELE(ip);
1323 
1324 	xfs_qm_dqrele(udqp);
1325 	xfs_qm_dqrele(gdqp);
1326 	xfs_qm_dqrele(pdqp);
1327 
1328 	if (unlock_dp_on_error)
1329 		xfs_iunlock(dp, XFS_ILOCK_EXCL);
1330 	return error;
1331 }
1332 
1333 int
1334 xfs_create_tmpfile(
1335 	struct xfs_inode	*dp,
1336 	struct dentry		*dentry,
1337 	umode_t			mode)
1338 {
1339 	struct xfs_mount	*mp = dp->i_mount;
1340 	struct xfs_inode	*ip = NULL;
1341 	struct xfs_trans	*tp = NULL;
1342 	int			error;
1343 	uint			cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
1344 	prid_t                  prid;
1345 	struct xfs_dquot	*udqp = NULL;
1346 	struct xfs_dquot	*gdqp = NULL;
1347 	struct xfs_dquot	*pdqp = NULL;
1348 	struct xfs_trans_res	*tres;
1349 	uint			resblks;
1350 
1351 	if (XFS_FORCED_SHUTDOWN(mp))
1352 		return XFS_ERROR(EIO);
1353 
1354 	prid = xfs_get_initial_prid(dp);
1355 
1356 	/*
1357 	 * Make sure that we have allocated dquot(s) on disk.
1358 	 */
1359 	error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
1360 				xfs_kgid_to_gid(current_fsgid()), prid,
1361 				XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1362 				&udqp, &gdqp, &pdqp);
1363 	if (error)
1364 		return error;
1365 
1366 	resblks = XFS_IALLOC_SPACE_RES(mp);
1367 	tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE_TMPFILE);
1368 
1369 	tres = &M_RES(mp)->tr_create_tmpfile;
1370 	error = xfs_trans_reserve(tp, tres, resblks, 0);
1371 	if (error == ENOSPC) {
1372 		/* No space at all so try a "no-allocation" reservation */
1373 		resblks = 0;
1374 		error = xfs_trans_reserve(tp, tres, 0, 0);
1375 	}
1376 	if (error) {
1377 		cancel_flags = 0;
1378 		goto out_trans_cancel;
1379 	}
1380 
1381 	error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1382 						pdqp, resblks, 1, 0);
1383 	if (error)
1384 		goto out_trans_cancel;
1385 
1386 	error = xfs_dir_ialloc(&tp, dp, mode, 1, 0,
1387 				prid, resblks > 0, &ip, NULL);
1388 	if (error) {
1389 		if (error == ENOSPC)
1390 			goto out_trans_cancel;
1391 		goto out_trans_abort;
1392 	}
1393 
1394 	if (mp->m_flags & XFS_MOUNT_WSYNC)
1395 		xfs_trans_set_sync(tp);
1396 
1397 	/*
1398 	 * Attach the dquot(s) to the inodes and modify them incore.
1399 	 * These ids of the inode couldn't have changed since the new
1400 	 * inode has been locked ever since it was created.
1401 	 */
1402 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1403 
1404 	ip->i_d.di_nlink--;
1405 	d_tmpfile(dentry, VFS_I(ip));
1406 	error = xfs_iunlink(tp, ip);
1407 	if (error)
1408 		goto out_trans_abort;
1409 
1410 	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1411 	if (error)
1412 		goto out_release_inode;
1413 
1414 	xfs_qm_dqrele(udqp);
1415 	xfs_qm_dqrele(gdqp);
1416 	xfs_qm_dqrele(pdqp);
1417 
1418 	return 0;
1419 
1420  out_trans_abort:
1421 	cancel_flags |= XFS_TRANS_ABORT;
1422  out_trans_cancel:
1423 	xfs_trans_cancel(tp, cancel_flags);
1424  out_release_inode:
1425 	/*
1426 	 * Wait until after the current transaction is aborted to
1427 	 * release the inode.  This prevents recursive transactions
1428 	 * and deadlocks from xfs_inactive.
1429 	 */
1430 	if (ip)
1431 		IRELE(ip);
1432 
1433 	xfs_qm_dqrele(udqp);
1434 	xfs_qm_dqrele(gdqp);
1435 	xfs_qm_dqrele(pdqp);
1436 
1437 	return error;
1438 }
1439 
1440 int
1441 xfs_link(
1442 	xfs_inode_t		*tdp,
1443 	xfs_inode_t		*sip,
1444 	struct xfs_name		*target_name)
1445 {
1446 	xfs_mount_t		*mp = tdp->i_mount;
1447 	xfs_trans_t		*tp;
1448 	int			error;
1449 	xfs_bmap_free_t         free_list;
1450 	xfs_fsblock_t           first_block;
1451 	int			cancel_flags;
1452 	int			committed;
1453 	int			resblks;
1454 
1455 	trace_xfs_link(tdp, target_name);
1456 
1457 	ASSERT(!S_ISDIR(sip->i_d.di_mode));
1458 
1459 	if (XFS_FORCED_SHUTDOWN(mp))
1460 		return XFS_ERROR(EIO);
1461 
1462 	error = xfs_qm_dqattach(sip, 0);
1463 	if (error)
1464 		goto std_return;
1465 
1466 	error = xfs_qm_dqattach(tdp, 0);
1467 	if (error)
1468 		goto std_return;
1469 
1470 	tp = xfs_trans_alloc(mp, XFS_TRANS_LINK);
1471 	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
1472 	resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1473 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_link, resblks, 0);
1474 	if (error == ENOSPC) {
1475 		resblks = 0;
1476 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_link, 0, 0);
1477 	}
1478 	if (error) {
1479 		cancel_flags = 0;
1480 		goto error_return;
1481 	}
1482 
1483 	xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
1484 
1485 	xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
1486 	xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
1487 
1488 	/*
1489 	 * If we are using project inheritance, we only allow hard link
1490 	 * creation in our tree when the project IDs are the same; else
1491 	 * the tree quota mechanism could be circumvented.
1492 	 */
1493 	if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1494 		     (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
1495 		error = XFS_ERROR(EXDEV);
1496 		goto error_return;
1497 	}
1498 
1499 	error = xfs_dir_canenter(tp, tdp, target_name, resblks);
1500 	if (error)
1501 		goto error_return;
1502 
1503 	xfs_bmap_init(&free_list, &first_block);
1504 
1505 	if (sip->i_d.di_nlink == 0) {
1506 		error = xfs_iunlink_remove(tp, sip);
1507 		if (error)
1508 			goto abort_return;
1509 	}
1510 
1511 	error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1512 					&first_block, &free_list, resblks);
1513 	if (error)
1514 		goto abort_return;
1515 	xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1516 	xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1517 
1518 	error = xfs_bumplink(tp, sip);
1519 	if (error)
1520 		goto abort_return;
1521 
1522 	/*
1523 	 * If this is a synchronous mount, make sure that the
1524 	 * link transaction goes to disk before returning to
1525 	 * the user.
1526 	 */
1527 	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
1528 		xfs_trans_set_sync(tp);
1529 	}
1530 
1531 	error = xfs_bmap_finish (&tp, &free_list, &committed);
1532 	if (error) {
1533 		xfs_bmap_cancel(&free_list);
1534 		goto abort_return;
1535 	}
1536 
1537 	return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1538 
1539  abort_return:
1540 	cancel_flags |= XFS_TRANS_ABORT;
1541  error_return:
1542 	xfs_trans_cancel(tp, cancel_flags);
1543  std_return:
1544 	return error;
1545 }
1546 
1547 /*
1548  * Free up the underlying blocks past new_size.  The new size must be smaller
1549  * than the current size.  This routine can be used both for the attribute and
1550  * data fork, and does not modify the inode size, which is left to the caller.
1551  *
1552  * The transaction passed to this routine must have made a permanent log
1553  * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
1554  * given transaction and start new ones, so make sure everything involved in
1555  * the transaction is tidy before calling here.  Some transaction will be
1556  * returned to the caller to be committed.  The incoming transaction must
1557  * already include the inode, and both inode locks must be held exclusively.
1558  * The inode must also be "held" within the transaction.  On return the inode
1559  * will be "held" within the returned transaction.  This routine does NOT
1560  * require any disk space to be reserved for it within the transaction.
1561  *
1562  * If we get an error, we must return with the inode locked and linked into the
1563  * current transaction. This keeps things simple for the higher level code,
1564  * because it always knows that the inode is locked and held in the transaction
1565  * that returns to it whether errors occur or not.  We don't mark the inode
1566  * dirty on error so that transactions can be easily aborted if possible.
1567  */
1568 int
1569 xfs_itruncate_extents(
1570 	struct xfs_trans	**tpp,
1571 	struct xfs_inode	*ip,
1572 	int			whichfork,
1573 	xfs_fsize_t		new_size)
1574 {
1575 	struct xfs_mount	*mp = ip->i_mount;
1576 	struct xfs_trans	*tp = *tpp;
1577 	struct xfs_trans	*ntp;
1578 	xfs_bmap_free_t		free_list;
1579 	xfs_fsblock_t		first_block;
1580 	xfs_fileoff_t		first_unmap_block;
1581 	xfs_fileoff_t		last_block;
1582 	xfs_filblks_t		unmap_len;
1583 	int			committed;
1584 	int			error = 0;
1585 	int			done = 0;
1586 
1587 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1588 	ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1589 	       xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1590 	ASSERT(new_size <= XFS_ISIZE(ip));
1591 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1592 	ASSERT(ip->i_itemp != NULL);
1593 	ASSERT(ip->i_itemp->ili_lock_flags == 0);
1594 	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1595 
1596 	trace_xfs_itruncate_extents_start(ip, new_size);
1597 
1598 	/*
1599 	 * Since it is possible for space to become allocated beyond
1600 	 * the end of the file (in a crash where the space is allocated
1601 	 * but the inode size is not yet updated), simply remove any
1602 	 * blocks which show up between the new EOF and the maximum
1603 	 * possible file size.  If the first block to be removed is
1604 	 * beyond the maximum file size (ie it is the same as last_block),
1605 	 * then there is nothing to do.
1606 	 */
1607 	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1608 	last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1609 	if (first_unmap_block == last_block)
1610 		return 0;
1611 
1612 	ASSERT(first_unmap_block < last_block);
1613 	unmap_len = last_block - first_unmap_block + 1;
1614 	while (!done) {
1615 		xfs_bmap_init(&free_list, &first_block);
1616 		error = xfs_bunmapi(tp, ip,
1617 				    first_unmap_block, unmap_len,
1618 				    xfs_bmapi_aflag(whichfork),
1619 				    XFS_ITRUNC_MAX_EXTENTS,
1620 				    &first_block, &free_list,
1621 				    &done);
1622 		if (error)
1623 			goto out_bmap_cancel;
1624 
1625 		/*
1626 		 * Duplicate the transaction that has the permanent
1627 		 * reservation and commit the old transaction.
1628 		 */
1629 		error = xfs_bmap_finish(&tp, &free_list, &committed);
1630 		if (committed)
1631 			xfs_trans_ijoin(tp, ip, 0);
1632 		if (error)
1633 			goto out_bmap_cancel;
1634 
1635 		if (committed) {
1636 			/*
1637 			 * Mark the inode dirty so it will be logged and
1638 			 * moved forward in the log as part of every commit.
1639 			 */
1640 			xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1641 		}
1642 
1643 		ntp = xfs_trans_dup(tp);
1644 		error = xfs_trans_commit(tp, 0);
1645 		tp = ntp;
1646 
1647 		xfs_trans_ijoin(tp, ip, 0);
1648 
1649 		if (error)
1650 			goto out;
1651 
1652 		/*
1653 		 * Transaction commit worked ok so we can drop the extra ticket
1654 		 * reference that we gained in xfs_trans_dup()
1655 		 */
1656 		xfs_log_ticket_put(tp->t_ticket);
1657 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
1658 		if (error)
1659 			goto out;
1660 	}
1661 
1662 	/*
1663 	 * Always re-log the inode so that our permanent transaction can keep
1664 	 * on rolling it forward in the log.
1665 	 */
1666 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1667 
1668 	trace_xfs_itruncate_extents_end(ip, new_size);
1669 
1670 out:
1671 	*tpp = tp;
1672 	return error;
1673 out_bmap_cancel:
1674 	/*
1675 	 * If the bunmapi call encounters an error, return to the caller where
1676 	 * the transaction can be properly aborted.  We just need to make sure
1677 	 * we're not holding any resources that we were not when we came in.
1678 	 */
1679 	xfs_bmap_cancel(&free_list);
1680 	goto out;
1681 }
1682 
1683 int
1684 xfs_release(
1685 	xfs_inode_t	*ip)
1686 {
1687 	xfs_mount_t	*mp = ip->i_mount;
1688 	int		error;
1689 
1690 	if (!S_ISREG(ip->i_d.di_mode) || (ip->i_d.di_mode == 0))
1691 		return 0;
1692 
1693 	/* If this is a read-only mount, don't do this (would generate I/O) */
1694 	if (mp->m_flags & XFS_MOUNT_RDONLY)
1695 		return 0;
1696 
1697 	if (!XFS_FORCED_SHUTDOWN(mp)) {
1698 		int truncated;
1699 
1700 		/*
1701 		 * If we are using filestreams, and we have an unlinked
1702 		 * file that we are processing the last close on, then nothing
1703 		 * will be able to reopen and write to this file. Purge this
1704 		 * inode from the filestreams cache so that it doesn't delay
1705 		 * teardown of the inode.
1706 		 */
1707 		if ((ip->i_d.di_nlink == 0) && xfs_inode_is_filestream(ip))
1708 			xfs_filestream_deassociate(ip);
1709 
1710 		/*
1711 		 * If we previously truncated this file and removed old data
1712 		 * in the process, we want to initiate "early" writeout on
1713 		 * the last close.  This is an attempt to combat the notorious
1714 		 * NULL files problem which is particularly noticeable from a
1715 		 * truncate down, buffered (re-)write (delalloc), followed by
1716 		 * a crash.  What we are effectively doing here is
1717 		 * significantly reducing the time window where we'd otherwise
1718 		 * be exposed to that problem.
1719 		 */
1720 		truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1721 		if (truncated) {
1722 			xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1723 			if (VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0) {
1724 				error = -filemap_flush(VFS_I(ip)->i_mapping);
1725 				if (error)
1726 					return error;
1727 			}
1728 		}
1729 	}
1730 
1731 	if (ip->i_d.di_nlink == 0)
1732 		return 0;
1733 
1734 	if (xfs_can_free_eofblocks(ip, false)) {
1735 
1736 		/*
1737 		 * If we can't get the iolock just skip truncating the blocks
1738 		 * past EOF because we could deadlock with the mmap_sem
1739 		 * otherwise.  We'll get another chance to drop them once the
1740 		 * last reference to the inode is dropped, so we'll never leak
1741 		 * blocks permanently.
1742 		 *
1743 		 * Further, check if the inode is being opened, written and
1744 		 * closed frequently and we have delayed allocation blocks
1745 		 * outstanding (e.g. streaming writes from the NFS server),
1746 		 * truncating the blocks past EOF will cause fragmentation to
1747 		 * occur.
1748 		 *
1749 		 * In this case don't do the truncation, either, but we have to
1750 		 * be careful how we detect this case. Blocks beyond EOF show
1751 		 * up as i_delayed_blks even when the inode is clean, so we
1752 		 * need to truncate them away first before checking for a dirty
1753 		 * release. Hence on the first dirty close we will still remove
1754 		 * the speculative allocation, but after that we will leave it
1755 		 * in place.
1756 		 */
1757 		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1758 			return 0;
1759 
1760 		error = xfs_free_eofblocks(mp, ip, true);
1761 		if (error && error != EAGAIN)
1762 			return error;
1763 
1764 		/* delalloc blocks after truncation means it really is dirty */
1765 		if (ip->i_delayed_blks)
1766 			xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1767 	}
1768 	return 0;
1769 }
1770 
1771 /*
1772  * xfs_inactive_truncate
1773  *
1774  * Called to perform a truncate when an inode becomes unlinked.
1775  */
1776 STATIC int
1777 xfs_inactive_truncate(
1778 	struct xfs_inode *ip)
1779 {
1780 	struct xfs_mount	*mp = ip->i_mount;
1781 	struct xfs_trans	*tp;
1782 	int			error;
1783 
1784 	tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
1785 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
1786 	if (error) {
1787 		ASSERT(XFS_FORCED_SHUTDOWN(mp));
1788 		xfs_trans_cancel(tp, 0);
1789 		return error;
1790 	}
1791 
1792 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1793 	xfs_trans_ijoin(tp, ip, 0);
1794 
1795 	/*
1796 	 * Log the inode size first to prevent stale data exposure in the event
1797 	 * of a system crash before the truncate completes. See the related
1798 	 * comment in xfs_setattr_size() for details.
1799 	 */
1800 	ip->i_d.di_size = 0;
1801 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1802 
1803 	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1804 	if (error)
1805 		goto error_trans_cancel;
1806 
1807 	ASSERT(ip->i_d.di_nextents == 0);
1808 
1809 	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1810 	if (error)
1811 		goto error_unlock;
1812 
1813 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1814 	return 0;
1815 
1816 error_trans_cancel:
1817 	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
1818 error_unlock:
1819 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1820 	return error;
1821 }
1822 
1823 /*
1824  * xfs_inactive_ifree()
1825  *
1826  * Perform the inode free when an inode is unlinked.
1827  */
1828 STATIC int
1829 xfs_inactive_ifree(
1830 	struct xfs_inode *ip)
1831 {
1832 	xfs_bmap_free_t		free_list;
1833 	xfs_fsblock_t		first_block;
1834 	int			committed;
1835 	struct xfs_mount	*mp = ip->i_mount;
1836 	struct xfs_trans	*tp;
1837 	int			error;
1838 
1839 	tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
1840 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ifree, 0, 0);
1841 	if (error) {
1842 		ASSERT(XFS_FORCED_SHUTDOWN(mp));
1843 		xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES);
1844 		return error;
1845 	}
1846 
1847 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1848 	xfs_trans_ijoin(tp, ip, 0);
1849 
1850 	xfs_bmap_init(&free_list, &first_block);
1851 	error = xfs_ifree(tp, ip, &free_list);
1852 	if (error) {
1853 		/*
1854 		 * If we fail to free the inode, shut down.  The cancel
1855 		 * might do that, we need to make sure.  Otherwise the
1856 		 * inode might be lost for a long time or forever.
1857 		 */
1858 		if (!XFS_FORCED_SHUTDOWN(mp)) {
1859 			xfs_notice(mp, "%s: xfs_ifree returned error %d",
1860 				__func__, error);
1861 			xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1862 		}
1863 		xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
1864 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1865 		return error;
1866 	}
1867 
1868 	/*
1869 	 * Credit the quota account(s). The inode is gone.
1870 	 */
1871 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1872 
1873 	/*
1874 	 * Just ignore errors at this point.  There is nothing we can
1875 	 * do except to try to keep going. Make sure it's not a silent
1876 	 * error.
1877 	 */
1878 	error = xfs_bmap_finish(&tp,  &free_list, &committed);
1879 	if (error)
1880 		xfs_notice(mp, "%s: xfs_bmap_finish returned error %d",
1881 			__func__, error);
1882 	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1883 	if (error)
1884 		xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
1885 			__func__, error);
1886 
1887 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1888 	return 0;
1889 }
1890 
1891 /*
1892  * xfs_inactive
1893  *
1894  * This is called when the vnode reference count for the vnode
1895  * goes to zero.  If the file has been unlinked, then it must
1896  * now be truncated.  Also, we clear all of the read-ahead state
1897  * kept for the inode here since the file is now closed.
1898  */
1899 void
1900 xfs_inactive(
1901 	xfs_inode_t	*ip)
1902 {
1903 	struct xfs_mount	*mp;
1904 	int			error;
1905 	int			truncate = 0;
1906 
1907 	/*
1908 	 * If the inode is already free, then there can be nothing
1909 	 * to clean up here.
1910 	 */
1911 	if (ip->i_d.di_mode == 0) {
1912 		ASSERT(ip->i_df.if_real_bytes == 0);
1913 		ASSERT(ip->i_df.if_broot_bytes == 0);
1914 		return;
1915 	}
1916 
1917 	mp = ip->i_mount;
1918 
1919 	/* If this is a read-only mount, don't do this (would generate I/O) */
1920 	if (mp->m_flags & XFS_MOUNT_RDONLY)
1921 		return;
1922 
1923 	if (ip->i_d.di_nlink != 0) {
1924 		/*
1925 		 * force is true because we are evicting an inode from the
1926 		 * cache. Post-eof blocks must be freed, lest we end up with
1927 		 * broken free space accounting.
1928 		 */
1929 		if (xfs_can_free_eofblocks(ip, true))
1930 			xfs_free_eofblocks(mp, ip, false);
1931 
1932 		return;
1933 	}
1934 
1935 	if (S_ISREG(ip->i_d.di_mode) &&
1936 	    (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
1937 	     ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
1938 		truncate = 1;
1939 
1940 	error = xfs_qm_dqattach(ip, 0);
1941 	if (error)
1942 		return;
1943 
1944 	if (S_ISLNK(ip->i_d.di_mode))
1945 		error = xfs_inactive_symlink(ip);
1946 	else if (truncate)
1947 		error = xfs_inactive_truncate(ip);
1948 	if (error)
1949 		return;
1950 
1951 	/*
1952 	 * If there are attributes associated with the file then blow them away
1953 	 * now.  The code calls a routine that recursively deconstructs the
1954 	 * attribute fork.  We need to just commit the current transaction
1955 	 * because we can't use it for xfs_attr_inactive().
1956 	 */
1957 	if (ip->i_d.di_anextents > 0) {
1958 		ASSERT(ip->i_d.di_forkoff != 0);
1959 
1960 		error = xfs_attr_inactive(ip);
1961 		if (error)
1962 			return;
1963 	}
1964 
1965 	if (ip->i_afp)
1966 		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
1967 
1968 	ASSERT(ip->i_d.di_anextents == 0);
1969 
1970 	/*
1971 	 * Free the inode.
1972 	 */
1973 	error = xfs_inactive_ifree(ip);
1974 	if (error)
1975 		return;
1976 
1977 	/*
1978 	 * Release the dquots held by inode, if any.
1979 	 */
1980 	xfs_qm_dqdetach(ip);
1981 }
1982 
1983 /*
1984  * This is called when the inode's link count goes to 0.
1985  * We place the on-disk inode on a list in the AGI.  It
1986  * will be pulled from this list when the inode is freed.
1987  */
1988 int
1989 xfs_iunlink(
1990 	xfs_trans_t	*tp,
1991 	xfs_inode_t	*ip)
1992 {
1993 	xfs_mount_t	*mp;
1994 	xfs_agi_t	*agi;
1995 	xfs_dinode_t	*dip;
1996 	xfs_buf_t	*agibp;
1997 	xfs_buf_t	*ibp;
1998 	xfs_agino_t	agino;
1999 	short		bucket_index;
2000 	int		offset;
2001 	int		error;
2002 
2003 	ASSERT(ip->i_d.di_nlink == 0);
2004 	ASSERT(ip->i_d.di_mode != 0);
2005 
2006 	mp = tp->t_mountp;
2007 
2008 	/*
2009 	 * Get the agi buffer first.  It ensures lock ordering
2010 	 * on the list.
2011 	 */
2012 	error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp);
2013 	if (error)
2014 		return error;
2015 	agi = XFS_BUF_TO_AGI(agibp);
2016 
2017 	/*
2018 	 * Get the index into the agi hash table for the
2019 	 * list this inode will go on.
2020 	 */
2021 	agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2022 	ASSERT(agino != 0);
2023 	bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2024 	ASSERT(agi->agi_unlinked[bucket_index]);
2025 	ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
2026 
2027 	if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) {
2028 		/*
2029 		 * There is already another inode in the bucket we need
2030 		 * to add ourselves to.  Add us at the front of the list.
2031 		 * Here we put the head pointer into our next pointer,
2032 		 * and then we fall through to point the head at us.
2033 		 */
2034 		error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
2035 				       0, 0);
2036 		if (error)
2037 			return error;
2038 
2039 		ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO));
2040 		dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
2041 		offset = ip->i_imap.im_boffset +
2042 			offsetof(xfs_dinode_t, di_next_unlinked);
2043 
2044 		/* need to recalc the inode CRC if appropriate */
2045 		xfs_dinode_calc_crc(mp, dip);
2046 
2047 		xfs_trans_inode_buf(tp, ibp);
2048 		xfs_trans_log_buf(tp, ibp, offset,
2049 				  (offset + sizeof(xfs_agino_t) - 1));
2050 		xfs_inobp_check(mp, ibp);
2051 	}
2052 
2053 	/*
2054 	 * Point the bucket head pointer at the inode being inserted.
2055 	 */
2056 	ASSERT(agino != 0);
2057 	agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
2058 	offset = offsetof(xfs_agi_t, agi_unlinked) +
2059 		(sizeof(xfs_agino_t) * bucket_index);
2060 	xfs_trans_log_buf(tp, agibp, offset,
2061 			  (offset + sizeof(xfs_agino_t) - 1));
2062 	return 0;
2063 }
2064 
2065 /*
2066  * Pull the on-disk inode from the AGI unlinked list.
2067  */
2068 STATIC int
2069 xfs_iunlink_remove(
2070 	xfs_trans_t	*tp,
2071 	xfs_inode_t	*ip)
2072 {
2073 	xfs_ino_t	next_ino;
2074 	xfs_mount_t	*mp;
2075 	xfs_agi_t	*agi;
2076 	xfs_dinode_t	*dip;
2077 	xfs_buf_t	*agibp;
2078 	xfs_buf_t	*ibp;
2079 	xfs_agnumber_t	agno;
2080 	xfs_agino_t	agino;
2081 	xfs_agino_t	next_agino;
2082 	xfs_buf_t	*last_ibp;
2083 	xfs_dinode_t	*last_dip = NULL;
2084 	short		bucket_index;
2085 	int		offset, last_offset = 0;
2086 	int		error;
2087 
2088 	mp = tp->t_mountp;
2089 	agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2090 
2091 	/*
2092 	 * Get the agi buffer first.  It ensures lock ordering
2093 	 * on the list.
2094 	 */
2095 	error = xfs_read_agi(mp, tp, agno, &agibp);
2096 	if (error)
2097 		return error;
2098 
2099 	agi = XFS_BUF_TO_AGI(agibp);
2100 
2101 	/*
2102 	 * Get the index into the agi hash table for the
2103 	 * list this inode will go on.
2104 	 */
2105 	agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2106 	ASSERT(agino != 0);
2107 	bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2108 	ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO));
2109 	ASSERT(agi->agi_unlinked[bucket_index]);
2110 
2111 	if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
2112 		/*
2113 		 * We're at the head of the list.  Get the inode's on-disk
2114 		 * buffer to see if there is anyone after us on the list.
2115 		 * Only modify our next pointer if it is not already NULLAGINO.
2116 		 * This saves us the overhead of dealing with the buffer when
2117 		 * there is no need to change it.
2118 		 */
2119 		error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
2120 				       0, 0);
2121 		if (error) {
2122 			xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
2123 				__func__, error);
2124 			return error;
2125 		}
2126 		next_agino = be32_to_cpu(dip->di_next_unlinked);
2127 		ASSERT(next_agino != 0);
2128 		if (next_agino != NULLAGINO) {
2129 			dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
2130 			offset = ip->i_imap.im_boffset +
2131 				offsetof(xfs_dinode_t, di_next_unlinked);
2132 
2133 			/* need to recalc the inode CRC if appropriate */
2134 			xfs_dinode_calc_crc(mp, dip);
2135 
2136 			xfs_trans_inode_buf(tp, ibp);
2137 			xfs_trans_log_buf(tp, ibp, offset,
2138 					  (offset + sizeof(xfs_agino_t) - 1));
2139 			xfs_inobp_check(mp, ibp);
2140 		} else {
2141 			xfs_trans_brelse(tp, ibp);
2142 		}
2143 		/*
2144 		 * Point the bucket head pointer at the next inode.
2145 		 */
2146 		ASSERT(next_agino != 0);
2147 		ASSERT(next_agino != agino);
2148 		agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
2149 		offset = offsetof(xfs_agi_t, agi_unlinked) +
2150 			(sizeof(xfs_agino_t) * bucket_index);
2151 		xfs_trans_log_buf(tp, agibp, offset,
2152 				  (offset + sizeof(xfs_agino_t) - 1));
2153 	} else {
2154 		/*
2155 		 * We need to search the list for the inode being freed.
2156 		 */
2157 		next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2158 		last_ibp = NULL;
2159 		while (next_agino != agino) {
2160 			struct xfs_imap	imap;
2161 
2162 			if (last_ibp)
2163 				xfs_trans_brelse(tp, last_ibp);
2164 
2165 			imap.im_blkno = 0;
2166 			next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
2167 
2168 			error = xfs_imap(mp, tp, next_ino, &imap, 0);
2169 			if (error) {
2170 				xfs_warn(mp,
2171 	"%s: xfs_imap returned error %d.",
2172 					 __func__, error);
2173 				return error;
2174 			}
2175 
2176 			error = xfs_imap_to_bp(mp, tp, &imap, &last_dip,
2177 					       &last_ibp, 0, 0);
2178 			if (error) {
2179 				xfs_warn(mp,
2180 	"%s: xfs_imap_to_bp returned error %d.",
2181 					__func__, error);
2182 				return error;
2183 			}
2184 
2185 			last_offset = imap.im_boffset;
2186 			next_agino = be32_to_cpu(last_dip->di_next_unlinked);
2187 			ASSERT(next_agino != NULLAGINO);
2188 			ASSERT(next_agino != 0);
2189 		}
2190 
2191 		/*
2192 		 * Now last_ibp points to the buffer previous to us on the
2193 		 * unlinked list.  Pull us from the list.
2194 		 */
2195 		error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
2196 				       0, 0);
2197 		if (error) {
2198 			xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.",
2199 				__func__, error);
2200 			return error;
2201 		}
2202 		next_agino = be32_to_cpu(dip->di_next_unlinked);
2203 		ASSERT(next_agino != 0);
2204 		ASSERT(next_agino != agino);
2205 		if (next_agino != NULLAGINO) {
2206 			dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
2207 			offset = ip->i_imap.im_boffset +
2208 				offsetof(xfs_dinode_t, di_next_unlinked);
2209 
2210 			/* need to recalc the inode CRC if appropriate */
2211 			xfs_dinode_calc_crc(mp, dip);
2212 
2213 			xfs_trans_inode_buf(tp, ibp);
2214 			xfs_trans_log_buf(tp, ibp, offset,
2215 					  (offset + sizeof(xfs_agino_t) - 1));
2216 			xfs_inobp_check(mp, ibp);
2217 		} else {
2218 			xfs_trans_brelse(tp, ibp);
2219 		}
2220 		/*
2221 		 * Point the previous inode on the list to the next inode.
2222 		 */
2223 		last_dip->di_next_unlinked = cpu_to_be32(next_agino);
2224 		ASSERT(next_agino != 0);
2225 		offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
2226 
2227 		/* need to recalc the inode CRC if appropriate */
2228 		xfs_dinode_calc_crc(mp, last_dip);
2229 
2230 		xfs_trans_inode_buf(tp, last_ibp);
2231 		xfs_trans_log_buf(tp, last_ibp, offset,
2232 				  (offset + sizeof(xfs_agino_t) - 1));
2233 		xfs_inobp_check(mp, last_ibp);
2234 	}
2235 	return 0;
2236 }
2237 
2238 /*
2239  * A big issue when freeing the inode cluster is that we _cannot_ skip any
2240  * inodes that are in memory - they all must be marked stale and attached to
2241  * the cluster buffer.
2242  */
2243 STATIC int
2244 xfs_ifree_cluster(
2245 	xfs_inode_t	*free_ip,
2246 	xfs_trans_t	*tp,
2247 	xfs_ino_t	inum)
2248 {
2249 	xfs_mount_t		*mp = free_ip->i_mount;
2250 	int			blks_per_cluster;
2251 	int			inodes_per_cluster;
2252 	int			nbufs;
2253 	int			i, j;
2254 	xfs_daddr_t		blkno;
2255 	xfs_buf_t		*bp;
2256 	xfs_inode_t		*ip;
2257 	xfs_inode_log_item_t	*iip;
2258 	xfs_log_item_t		*lip;
2259 	struct xfs_perag	*pag;
2260 
2261 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
2262 	blks_per_cluster = xfs_icluster_size_fsb(mp);
2263 	inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
2264 	nbufs = mp->m_ialloc_blks / blks_per_cluster;
2265 
2266 	for (j = 0; j < nbufs; j++, inum += inodes_per_cluster) {
2267 		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2268 					 XFS_INO_TO_AGBNO(mp, inum));
2269 
2270 		/*
2271 		 * We obtain and lock the backing buffer first in the process
2272 		 * here, as we have to ensure that any dirty inode that we
2273 		 * can't get the flush lock on is attached to the buffer.
2274 		 * If we scan the in-memory inodes first, then buffer IO can
2275 		 * complete before we get a lock on it, and hence we may fail
2276 		 * to mark all the active inodes on the buffer stale.
2277 		 */
2278 		bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2279 					mp->m_bsize * blks_per_cluster,
2280 					XBF_UNMAPPED);
2281 
2282 		if (!bp)
2283 			return ENOMEM;
2284 
2285 		/*
2286 		 * This buffer may not have been correctly initialised as we
2287 		 * didn't read it from disk. That's not important because we are
2288 		 * only using to mark the buffer as stale in the log, and to
2289 		 * attach stale cached inodes on it. That means it will never be
2290 		 * dispatched for IO. If it is, we want to know about it, and we
2291 		 * want it to fail. We can acheive this by adding a write
2292 		 * verifier to the buffer.
2293 		 */
2294 		 bp->b_ops = &xfs_inode_buf_ops;
2295 
2296 		/*
2297 		 * Walk the inodes already attached to the buffer and mark them
2298 		 * stale. These will all have the flush locks held, so an
2299 		 * in-memory inode walk can't lock them. By marking them all
2300 		 * stale first, we will not attempt to lock them in the loop
2301 		 * below as the XFS_ISTALE flag will be set.
2302 		 */
2303 		lip = bp->b_fspriv;
2304 		while (lip) {
2305 			if (lip->li_type == XFS_LI_INODE) {
2306 				iip = (xfs_inode_log_item_t *)lip;
2307 				ASSERT(iip->ili_logged == 1);
2308 				lip->li_cb = xfs_istale_done;
2309 				xfs_trans_ail_copy_lsn(mp->m_ail,
2310 							&iip->ili_flush_lsn,
2311 							&iip->ili_item.li_lsn);
2312 				xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
2313 			}
2314 			lip = lip->li_bio_list;
2315 		}
2316 
2317 
2318 		/*
2319 		 * For each inode in memory attempt to add it to the inode
2320 		 * buffer and set it up for being staled on buffer IO
2321 		 * completion.  This is safe as we've locked out tail pushing
2322 		 * and flushing by locking the buffer.
2323 		 *
2324 		 * We have already marked every inode that was part of a
2325 		 * transaction stale above, which means there is no point in
2326 		 * even trying to lock them.
2327 		 */
2328 		for (i = 0; i < inodes_per_cluster; i++) {
2329 retry:
2330 			rcu_read_lock();
2331 			ip = radix_tree_lookup(&pag->pag_ici_root,
2332 					XFS_INO_TO_AGINO(mp, (inum + i)));
2333 
2334 			/* Inode not in memory, nothing to do */
2335 			if (!ip) {
2336 				rcu_read_unlock();
2337 				continue;
2338 			}
2339 
2340 			/*
2341 			 * because this is an RCU protected lookup, we could
2342 			 * find a recently freed or even reallocated inode
2343 			 * during the lookup. We need to check under the
2344 			 * i_flags_lock for a valid inode here. Skip it if it
2345 			 * is not valid, the wrong inode or stale.
2346 			 */
2347 			spin_lock(&ip->i_flags_lock);
2348 			if (ip->i_ino != inum + i ||
2349 			    __xfs_iflags_test(ip, XFS_ISTALE)) {
2350 				spin_unlock(&ip->i_flags_lock);
2351 				rcu_read_unlock();
2352 				continue;
2353 			}
2354 			spin_unlock(&ip->i_flags_lock);
2355 
2356 			/*
2357 			 * Don't try to lock/unlock the current inode, but we
2358 			 * _cannot_ skip the other inodes that we did not find
2359 			 * in the list attached to the buffer and are not
2360 			 * already marked stale. If we can't lock it, back off
2361 			 * and retry.
2362 			 */
2363 			if (ip != free_ip &&
2364 			    !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2365 				rcu_read_unlock();
2366 				delay(1);
2367 				goto retry;
2368 			}
2369 			rcu_read_unlock();
2370 
2371 			xfs_iflock(ip);
2372 			xfs_iflags_set(ip, XFS_ISTALE);
2373 
2374 			/*
2375 			 * we don't need to attach clean inodes or those only
2376 			 * with unlogged changes (which we throw away, anyway).
2377 			 */
2378 			iip = ip->i_itemp;
2379 			if (!iip || xfs_inode_clean(ip)) {
2380 				ASSERT(ip != free_ip);
2381 				xfs_ifunlock(ip);
2382 				xfs_iunlock(ip, XFS_ILOCK_EXCL);
2383 				continue;
2384 			}
2385 
2386 			iip->ili_last_fields = iip->ili_fields;
2387 			iip->ili_fields = 0;
2388 			iip->ili_logged = 1;
2389 			xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2390 						&iip->ili_item.li_lsn);
2391 
2392 			xfs_buf_attach_iodone(bp, xfs_istale_done,
2393 						  &iip->ili_item);
2394 
2395 			if (ip != free_ip)
2396 				xfs_iunlock(ip, XFS_ILOCK_EXCL);
2397 		}
2398 
2399 		xfs_trans_stale_inode_buf(tp, bp);
2400 		xfs_trans_binval(tp, bp);
2401 	}
2402 
2403 	xfs_perag_put(pag);
2404 	return 0;
2405 }
2406 
2407 /*
2408  * This is called to return an inode to the inode free list.
2409  * The inode should already be truncated to 0 length and have
2410  * no pages associated with it.  This routine also assumes that
2411  * the inode is already a part of the transaction.
2412  *
2413  * The on-disk copy of the inode will have been added to the list
2414  * of unlinked inodes in the AGI. We need to remove the inode from
2415  * that list atomically with respect to freeing it here.
2416  */
2417 int
2418 xfs_ifree(
2419 	xfs_trans_t	*tp,
2420 	xfs_inode_t	*ip,
2421 	xfs_bmap_free_t	*flist)
2422 {
2423 	int			error;
2424 	int			delete;
2425 	xfs_ino_t		first_ino;
2426 
2427 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2428 	ASSERT(ip->i_d.di_nlink == 0);
2429 	ASSERT(ip->i_d.di_nextents == 0);
2430 	ASSERT(ip->i_d.di_anextents == 0);
2431 	ASSERT(ip->i_d.di_size == 0 || !S_ISREG(ip->i_d.di_mode));
2432 	ASSERT(ip->i_d.di_nblocks == 0);
2433 
2434 	/*
2435 	 * Pull the on-disk inode from the AGI unlinked list.
2436 	 */
2437 	error = xfs_iunlink_remove(tp, ip);
2438 	if (error)
2439 		return error;
2440 
2441 	error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino);
2442 	if (error)
2443 		return error;
2444 
2445 	ip->i_d.di_mode = 0;		/* mark incore inode as free */
2446 	ip->i_d.di_flags = 0;
2447 	ip->i_d.di_dmevmask = 0;
2448 	ip->i_d.di_forkoff = 0;		/* mark the attr fork not in use */
2449 	ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2450 	ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
2451 	/*
2452 	 * Bump the generation count so no one will be confused
2453 	 * by reincarnations of this inode.
2454 	 */
2455 	ip->i_d.di_gen++;
2456 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2457 
2458 	if (delete)
2459 		error = xfs_ifree_cluster(ip, tp, first_ino);
2460 
2461 	return error;
2462 }
2463 
2464 /*
2465  * This is called to unpin an inode.  The caller must have the inode locked
2466  * in at least shared mode so that the buffer cannot be subsequently pinned
2467  * once someone is waiting for it to be unpinned.
2468  */
2469 static void
2470 xfs_iunpin(
2471 	struct xfs_inode	*ip)
2472 {
2473 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2474 
2475 	trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2476 
2477 	/* Give the log a push to start the unpinning I/O */
2478 	xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0);
2479 
2480 }
2481 
2482 static void
2483 __xfs_iunpin_wait(
2484 	struct xfs_inode	*ip)
2485 {
2486 	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2487 	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2488 
2489 	xfs_iunpin(ip);
2490 
2491 	do {
2492 		prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
2493 		if (xfs_ipincount(ip))
2494 			io_schedule();
2495 	} while (xfs_ipincount(ip));
2496 	finish_wait(wq, &wait.wait);
2497 }
2498 
2499 void
2500 xfs_iunpin_wait(
2501 	struct xfs_inode	*ip)
2502 {
2503 	if (xfs_ipincount(ip))
2504 		__xfs_iunpin_wait(ip);
2505 }
2506 
2507 /*
2508  * Removing an inode from the namespace involves removing the directory entry
2509  * and dropping the link count on the inode. Removing the directory entry can
2510  * result in locking an AGF (directory blocks were freed) and removing a link
2511  * count can result in placing the inode on an unlinked list which results in
2512  * locking an AGI.
2513  *
2514  * The big problem here is that we have an ordering constraint on AGF and AGI
2515  * locking - inode allocation locks the AGI, then can allocate a new extent for
2516  * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2517  * removes the inode from the unlinked list, requiring that we lock the AGI
2518  * first, and then freeing the inode can result in an inode chunk being freed
2519  * and hence freeing disk space requiring that we lock an AGF.
2520  *
2521  * Hence the ordering that is imposed by other parts of the code is AGI before
2522  * AGF. This means we cannot remove the directory entry before we drop the inode
2523  * reference count and put it on the unlinked list as this results in a lock
2524  * order of AGF then AGI, and this can deadlock against inode allocation and
2525  * freeing. Therefore we must drop the link counts before we remove the
2526  * directory entry.
2527  *
2528  * This is still safe from a transactional point of view - it is not until we
2529  * get to xfs_bmap_finish() that we have the possibility of multiple
2530  * transactions in this operation. Hence as long as we remove the directory
2531  * entry and drop the link count in the first transaction of the remove
2532  * operation, there are no transactional constraints on the ordering here.
2533  */
2534 int
2535 xfs_remove(
2536 	xfs_inode_t             *dp,
2537 	struct xfs_name		*name,
2538 	xfs_inode_t		*ip)
2539 {
2540 	xfs_mount_t		*mp = dp->i_mount;
2541 	xfs_trans_t             *tp = NULL;
2542 	int			is_dir = S_ISDIR(ip->i_d.di_mode);
2543 	int                     error = 0;
2544 	xfs_bmap_free_t         free_list;
2545 	xfs_fsblock_t           first_block;
2546 	int			cancel_flags;
2547 	int			committed;
2548 	int			link_zero;
2549 	uint			resblks;
2550 	uint			log_count;
2551 
2552 	trace_xfs_remove(dp, name);
2553 
2554 	if (XFS_FORCED_SHUTDOWN(mp))
2555 		return XFS_ERROR(EIO);
2556 
2557 	error = xfs_qm_dqattach(dp, 0);
2558 	if (error)
2559 		goto std_return;
2560 
2561 	error = xfs_qm_dqattach(ip, 0);
2562 	if (error)
2563 		goto std_return;
2564 
2565 	if (is_dir) {
2566 		tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR);
2567 		log_count = XFS_DEFAULT_LOG_COUNT;
2568 	} else {
2569 		tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE);
2570 		log_count = XFS_REMOVE_LOG_COUNT;
2571 	}
2572 	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
2573 
2574 	/*
2575 	 * We try to get the real space reservation first,
2576 	 * allowing for directory btree deletion(s) implying
2577 	 * possible bmap insert(s).  If we can't get the space
2578 	 * reservation then we use 0 instead, and avoid the bmap
2579 	 * btree insert(s) in the directory code by, if the bmap
2580 	 * insert tries to happen, instead trimming the LAST
2581 	 * block from the directory.
2582 	 */
2583 	resblks = XFS_REMOVE_SPACE_RES(mp);
2584 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_remove, resblks, 0);
2585 	if (error == ENOSPC) {
2586 		resblks = 0;
2587 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_remove, 0, 0);
2588 	}
2589 	if (error) {
2590 		ASSERT(error != ENOSPC);
2591 		cancel_flags = 0;
2592 		goto out_trans_cancel;
2593 	}
2594 
2595 	xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
2596 
2597 	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
2598 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2599 
2600 	/*
2601 	 * If we're removing a directory perform some additional validation.
2602 	 */
2603 	cancel_flags |= XFS_TRANS_ABORT;
2604 	if (is_dir) {
2605 		ASSERT(ip->i_d.di_nlink >= 2);
2606 		if (ip->i_d.di_nlink != 2) {
2607 			error = XFS_ERROR(ENOTEMPTY);
2608 			goto out_trans_cancel;
2609 		}
2610 		if (!xfs_dir_isempty(ip)) {
2611 			error = XFS_ERROR(ENOTEMPTY);
2612 			goto out_trans_cancel;
2613 		}
2614 
2615 		/* Drop the link from ip's "..".  */
2616 		error = xfs_droplink(tp, dp);
2617 		if (error)
2618 			goto out_trans_cancel;
2619 
2620 		/* Drop the "." link from ip to self.  */
2621 		error = xfs_droplink(tp, ip);
2622 		if (error)
2623 			goto out_trans_cancel;
2624 	} else {
2625 		/*
2626 		 * When removing a non-directory we need to log the parent
2627 		 * inode here.  For a directory this is done implicitly
2628 		 * by the xfs_droplink call for the ".." entry.
2629 		 */
2630 		xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2631 	}
2632 	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2633 
2634 	/* Drop the link from dp to ip. */
2635 	error = xfs_droplink(tp, ip);
2636 	if (error)
2637 		goto out_trans_cancel;
2638 
2639 	/* Determine if this is the last link while the inode is locked */
2640 	link_zero = (ip->i_d.di_nlink == 0);
2641 
2642 	xfs_bmap_init(&free_list, &first_block);
2643 	error = xfs_dir_removename(tp, dp, name, ip->i_ino,
2644 					&first_block, &free_list, resblks);
2645 	if (error) {
2646 		ASSERT(error != ENOENT);
2647 		goto out_bmap_cancel;
2648 	}
2649 
2650 	/*
2651 	 * If this is a synchronous mount, make sure that the
2652 	 * remove transaction goes to disk before returning to
2653 	 * the user.
2654 	 */
2655 	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2656 		xfs_trans_set_sync(tp);
2657 
2658 	error = xfs_bmap_finish(&tp, &free_list, &committed);
2659 	if (error)
2660 		goto out_bmap_cancel;
2661 
2662 	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
2663 	if (error)
2664 		goto std_return;
2665 
2666 	/*
2667 	 * If we are using filestreams, kill the stream association.
2668 	 * If the file is still open it may get a new one but that
2669 	 * will get killed on last close in xfs_close() so we don't
2670 	 * have to worry about that.
2671 	 */
2672 	if (!is_dir && link_zero && xfs_inode_is_filestream(ip))
2673 		xfs_filestream_deassociate(ip);
2674 
2675 	return 0;
2676 
2677  out_bmap_cancel:
2678 	xfs_bmap_cancel(&free_list);
2679  out_trans_cancel:
2680 	xfs_trans_cancel(tp, cancel_flags);
2681  std_return:
2682 	return error;
2683 }
2684 
2685 /*
2686  * Enter all inodes for a rename transaction into a sorted array.
2687  */
2688 STATIC void
2689 xfs_sort_for_rename(
2690 	xfs_inode_t	*dp1,	/* in: old (source) directory inode */
2691 	xfs_inode_t	*dp2,	/* in: new (target) directory inode */
2692 	xfs_inode_t	*ip1,	/* in: inode of old entry */
2693 	xfs_inode_t	*ip2,	/* in: inode of new entry, if it
2694 				   already exists, NULL otherwise. */
2695 	xfs_inode_t	**i_tab,/* out: array of inode returned, sorted */
2696 	int		*num_inodes)  /* out: number of inodes in array */
2697 {
2698 	xfs_inode_t		*temp;
2699 	int			i, j;
2700 
2701 	/*
2702 	 * i_tab contains a list of pointers to inodes.  We initialize
2703 	 * the table here & we'll sort it.  We will then use it to
2704 	 * order the acquisition of the inode locks.
2705 	 *
2706 	 * Note that the table may contain duplicates.  e.g., dp1 == dp2.
2707 	 */
2708 	i_tab[0] = dp1;
2709 	i_tab[1] = dp2;
2710 	i_tab[2] = ip1;
2711 	if (ip2) {
2712 		*num_inodes = 4;
2713 		i_tab[3] = ip2;
2714 	} else {
2715 		*num_inodes = 3;
2716 		i_tab[3] = NULL;
2717 	}
2718 
2719 	/*
2720 	 * Sort the elements via bubble sort.  (Remember, there are at
2721 	 * most 4 elements to sort, so this is adequate.)
2722 	 */
2723 	for (i = 0; i < *num_inodes; i++) {
2724 		for (j = 1; j < *num_inodes; j++) {
2725 			if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
2726 				temp = i_tab[j];
2727 				i_tab[j] = i_tab[j-1];
2728 				i_tab[j-1] = temp;
2729 			}
2730 		}
2731 	}
2732 }
2733 
2734 /*
2735  * xfs_rename
2736  */
2737 int
2738 xfs_rename(
2739 	xfs_inode_t	*src_dp,
2740 	struct xfs_name	*src_name,
2741 	xfs_inode_t	*src_ip,
2742 	xfs_inode_t	*target_dp,
2743 	struct xfs_name	*target_name,
2744 	xfs_inode_t	*target_ip)
2745 {
2746 	xfs_trans_t	*tp = NULL;
2747 	xfs_mount_t	*mp = src_dp->i_mount;
2748 	int		new_parent;		/* moving to a new dir */
2749 	int		src_is_directory;	/* src_name is a directory */
2750 	int		error;
2751 	xfs_bmap_free_t free_list;
2752 	xfs_fsblock_t   first_block;
2753 	int		cancel_flags;
2754 	int		committed;
2755 	xfs_inode_t	*inodes[4];
2756 	int		spaceres;
2757 	int		num_inodes;
2758 
2759 	trace_xfs_rename(src_dp, target_dp, src_name, target_name);
2760 
2761 	new_parent = (src_dp != target_dp);
2762 	src_is_directory = S_ISDIR(src_ip->i_d.di_mode);
2763 
2764 	xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip,
2765 				inodes, &num_inodes);
2766 
2767 	xfs_bmap_init(&free_list, &first_block);
2768 	tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME);
2769 	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
2770 	spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
2771 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, spaceres, 0);
2772 	if (error == ENOSPC) {
2773 		spaceres = 0;
2774 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, 0, 0);
2775 	}
2776 	if (error) {
2777 		xfs_trans_cancel(tp, 0);
2778 		goto std_return;
2779 	}
2780 
2781 	/*
2782 	 * Attach the dquots to the inodes
2783 	 */
2784 	error = xfs_qm_vop_rename_dqattach(inodes);
2785 	if (error) {
2786 		xfs_trans_cancel(tp, cancel_flags);
2787 		goto std_return;
2788 	}
2789 
2790 	/*
2791 	 * Lock all the participating inodes. Depending upon whether
2792 	 * the target_name exists in the target directory, and
2793 	 * whether the target directory is the same as the source
2794 	 * directory, we can lock from 2 to 4 inodes.
2795 	 */
2796 	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
2797 
2798 	/*
2799 	 * Join all the inodes to the transaction. From this point on,
2800 	 * we can rely on either trans_commit or trans_cancel to unlock
2801 	 * them.
2802 	 */
2803 	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
2804 	if (new_parent)
2805 		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
2806 	xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
2807 	if (target_ip)
2808 		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
2809 
2810 	/*
2811 	 * If we are using project inheritance, we only allow renames
2812 	 * into our tree when the project IDs are the same; else the
2813 	 * tree quota mechanism would be circumvented.
2814 	 */
2815 	if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
2816 		     (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
2817 		error = XFS_ERROR(EXDEV);
2818 		goto error_return;
2819 	}
2820 
2821 	/*
2822 	 * Set up the target.
2823 	 */
2824 	if (target_ip == NULL) {
2825 		/*
2826 		 * If there's no space reservation, check the entry will
2827 		 * fit before actually inserting it.
2828 		 */
2829 		error = xfs_dir_canenter(tp, target_dp, target_name, spaceres);
2830 		if (error)
2831 			goto error_return;
2832 		/*
2833 		 * If target does not exist and the rename crosses
2834 		 * directories, adjust the target directory link count
2835 		 * to account for the ".." reference from the new entry.
2836 		 */
2837 		error = xfs_dir_createname(tp, target_dp, target_name,
2838 						src_ip->i_ino, &first_block,
2839 						&free_list, spaceres);
2840 		if (error == ENOSPC)
2841 			goto error_return;
2842 		if (error)
2843 			goto abort_return;
2844 
2845 		xfs_trans_ichgtime(tp, target_dp,
2846 					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2847 
2848 		if (new_parent && src_is_directory) {
2849 			error = xfs_bumplink(tp, target_dp);
2850 			if (error)
2851 				goto abort_return;
2852 		}
2853 	} else { /* target_ip != NULL */
2854 		/*
2855 		 * If target exists and it's a directory, check that both
2856 		 * target and source are directories and that target can be
2857 		 * destroyed, or that neither is a directory.
2858 		 */
2859 		if (S_ISDIR(target_ip->i_d.di_mode)) {
2860 			/*
2861 			 * Make sure target dir is empty.
2862 			 */
2863 			if (!(xfs_dir_isempty(target_ip)) ||
2864 			    (target_ip->i_d.di_nlink > 2)) {
2865 				error = XFS_ERROR(EEXIST);
2866 				goto error_return;
2867 			}
2868 		}
2869 
2870 		/*
2871 		 * Link the source inode under the target name.
2872 		 * If the source inode is a directory and we are moving
2873 		 * it across directories, its ".." entry will be
2874 		 * inconsistent until we replace that down below.
2875 		 *
2876 		 * In case there is already an entry with the same
2877 		 * name at the destination directory, remove it first.
2878 		 */
2879 		error = xfs_dir_replace(tp, target_dp, target_name,
2880 					src_ip->i_ino,
2881 					&first_block, &free_list, spaceres);
2882 		if (error)
2883 			goto abort_return;
2884 
2885 		xfs_trans_ichgtime(tp, target_dp,
2886 					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2887 
2888 		/*
2889 		 * Decrement the link count on the target since the target
2890 		 * dir no longer points to it.
2891 		 */
2892 		error = xfs_droplink(tp, target_ip);
2893 		if (error)
2894 			goto abort_return;
2895 
2896 		if (src_is_directory) {
2897 			/*
2898 			 * Drop the link from the old "." entry.
2899 			 */
2900 			error = xfs_droplink(tp, target_ip);
2901 			if (error)
2902 				goto abort_return;
2903 		}
2904 	} /* target_ip != NULL */
2905 
2906 	/*
2907 	 * Remove the source.
2908 	 */
2909 	if (new_parent && src_is_directory) {
2910 		/*
2911 		 * Rewrite the ".." entry to point to the new
2912 		 * directory.
2913 		 */
2914 		error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
2915 					target_dp->i_ino,
2916 					&first_block, &free_list, spaceres);
2917 		ASSERT(error != EEXIST);
2918 		if (error)
2919 			goto abort_return;
2920 	}
2921 
2922 	/*
2923 	 * We always want to hit the ctime on the source inode.
2924 	 *
2925 	 * This isn't strictly required by the standards since the source
2926 	 * inode isn't really being changed, but old unix file systems did
2927 	 * it and some incremental backup programs won't work without it.
2928 	 */
2929 	xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
2930 	xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
2931 
2932 	/*
2933 	 * Adjust the link count on src_dp.  This is necessary when
2934 	 * renaming a directory, either within one parent when
2935 	 * the target existed, or across two parent directories.
2936 	 */
2937 	if (src_is_directory && (new_parent || target_ip != NULL)) {
2938 
2939 		/*
2940 		 * Decrement link count on src_directory since the
2941 		 * entry that's moved no longer points to it.
2942 		 */
2943 		error = xfs_droplink(tp, src_dp);
2944 		if (error)
2945 			goto abort_return;
2946 	}
2947 
2948 	error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
2949 					&first_block, &free_list, spaceres);
2950 	if (error)
2951 		goto abort_return;
2952 
2953 	xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2954 	xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
2955 	if (new_parent)
2956 		xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
2957 
2958 	/*
2959 	 * If this is a synchronous mount, make sure that the
2960 	 * rename transaction goes to disk before returning to
2961 	 * the user.
2962 	 */
2963 	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
2964 		xfs_trans_set_sync(tp);
2965 	}
2966 
2967 	error = xfs_bmap_finish(&tp, &free_list, &committed);
2968 	if (error) {
2969 		xfs_bmap_cancel(&free_list);
2970 		xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES |
2971 				 XFS_TRANS_ABORT));
2972 		goto std_return;
2973 	}
2974 
2975 	/*
2976 	 * trans_commit will unlock src_ip, target_ip & decrement
2977 	 * the vnode references.
2978 	 */
2979 	return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
2980 
2981  abort_return:
2982 	cancel_flags |= XFS_TRANS_ABORT;
2983  error_return:
2984 	xfs_bmap_cancel(&free_list);
2985 	xfs_trans_cancel(tp, cancel_flags);
2986  std_return:
2987 	return error;
2988 }
2989 
2990 STATIC int
2991 xfs_iflush_cluster(
2992 	xfs_inode_t	*ip,
2993 	xfs_buf_t	*bp)
2994 {
2995 	xfs_mount_t		*mp = ip->i_mount;
2996 	struct xfs_perag	*pag;
2997 	unsigned long		first_index, mask;
2998 	unsigned long		inodes_per_cluster;
2999 	int			ilist_size;
3000 	xfs_inode_t		**ilist;
3001 	xfs_inode_t		*iq;
3002 	int			nr_found;
3003 	int			clcount = 0;
3004 	int			bufwasdelwri;
3005 	int			i;
3006 
3007 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
3008 
3009 	inodes_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
3010 	ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
3011 	ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
3012 	if (!ilist)
3013 		goto out_put;
3014 
3015 	mask = ~(((mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog)) - 1);
3016 	first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
3017 	rcu_read_lock();
3018 	/* really need a gang lookup range call here */
3019 	nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
3020 					first_index, inodes_per_cluster);
3021 	if (nr_found == 0)
3022 		goto out_free;
3023 
3024 	for (i = 0; i < nr_found; i++) {
3025 		iq = ilist[i];
3026 		if (iq == ip)
3027 			continue;
3028 
3029 		/*
3030 		 * because this is an RCU protected lookup, we could find a
3031 		 * recently freed or even reallocated inode during the lookup.
3032 		 * We need to check under the i_flags_lock for a valid inode
3033 		 * here. Skip it if it is not valid or the wrong inode.
3034 		 */
3035 		spin_lock(&ip->i_flags_lock);
3036 		if (!ip->i_ino ||
3037 		    (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
3038 			spin_unlock(&ip->i_flags_lock);
3039 			continue;
3040 		}
3041 		spin_unlock(&ip->i_flags_lock);
3042 
3043 		/*
3044 		 * Do an un-protected check to see if the inode is dirty and
3045 		 * is a candidate for flushing.  These checks will be repeated
3046 		 * later after the appropriate locks are acquired.
3047 		 */
3048 		if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0)
3049 			continue;
3050 
3051 		/*
3052 		 * Try to get locks.  If any are unavailable or it is pinned,
3053 		 * then this inode cannot be flushed and is skipped.
3054 		 */
3055 
3056 		if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED))
3057 			continue;
3058 		if (!xfs_iflock_nowait(iq)) {
3059 			xfs_iunlock(iq, XFS_ILOCK_SHARED);
3060 			continue;
3061 		}
3062 		if (xfs_ipincount(iq)) {
3063 			xfs_ifunlock(iq);
3064 			xfs_iunlock(iq, XFS_ILOCK_SHARED);
3065 			continue;
3066 		}
3067 
3068 		/*
3069 		 * arriving here means that this inode can be flushed.  First
3070 		 * re-check that it's dirty before flushing.
3071 		 */
3072 		if (!xfs_inode_clean(iq)) {
3073 			int	error;
3074 			error = xfs_iflush_int(iq, bp);
3075 			if (error) {
3076 				xfs_iunlock(iq, XFS_ILOCK_SHARED);
3077 				goto cluster_corrupt_out;
3078 			}
3079 			clcount++;
3080 		} else {
3081 			xfs_ifunlock(iq);
3082 		}
3083 		xfs_iunlock(iq, XFS_ILOCK_SHARED);
3084 	}
3085 
3086 	if (clcount) {
3087 		XFS_STATS_INC(xs_icluster_flushcnt);
3088 		XFS_STATS_ADD(xs_icluster_flushinode, clcount);
3089 	}
3090 
3091 out_free:
3092 	rcu_read_unlock();
3093 	kmem_free(ilist);
3094 out_put:
3095 	xfs_perag_put(pag);
3096 	return 0;
3097 
3098 
3099 cluster_corrupt_out:
3100 	/*
3101 	 * Corruption detected in the clustering loop.  Invalidate the
3102 	 * inode buffer and shut down the filesystem.
3103 	 */
3104 	rcu_read_unlock();
3105 	/*
3106 	 * Clean up the buffer.  If it was delwri, just release it --
3107 	 * brelse can handle it with no problems.  If not, shut down the
3108 	 * filesystem before releasing the buffer.
3109 	 */
3110 	bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
3111 	if (bufwasdelwri)
3112 		xfs_buf_relse(bp);
3113 
3114 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3115 
3116 	if (!bufwasdelwri) {
3117 		/*
3118 		 * Just like incore_relse: if we have b_iodone functions,
3119 		 * mark the buffer as an error and call them.  Otherwise
3120 		 * mark it as stale and brelse.
3121 		 */
3122 		if (bp->b_iodone) {
3123 			XFS_BUF_UNDONE(bp);
3124 			xfs_buf_stale(bp);
3125 			xfs_buf_ioerror(bp, EIO);
3126 			xfs_buf_ioend(bp, 0);
3127 		} else {
3128 			xfs_buf_stale(bp);
3129 			xfs_buf_relse(bp);
3130 		}
3131 	}
3132 
3133 	/*
3134 	 * Unlocks the flush lock
3135 	 */
3136 	xfs_iflush_abort(iq, false);
3137 	kmem_free(ilist);
3138 	xfs_perag_put(pag);
3139 	return XFS_ERROR(EFSCORRUPTED);
3140 }
3141 
3142 /*
3143  * Flush dirty inode metadata into the backing buffer.
3144  *
3145  * The caller must have the inode lock and the inode flush lock held.  The
3146  * inode lock will still be held upon return to the caller, and the inode
3147  * flush lock will be released after the inode has reached the disk.
3148  *
3149  * The caller must write out the buffer returned in *bpp and release it.
3150  */
3151 int
3152 xfs_iflush(
3153 	struct xfs_inode	*ip,
3154 	struct xfs_buf		**bpp)
3155 {
3156 	struct xfs_mount	*mp = ip->i_mount;
3157 	struct xfs_buf		*bp;
3158 	struct xfs_dinode	*dip;
3159 	int			error;
3160 
3161 	XFS_STATS_INC(xs_iflush_count);
3162 
3163 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3164 	ASSERT(xfs_isiflocked(ip));
3165 	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3166 	       ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3167 
3168 	*bpp = NULL;
3169 
3170 	xfs_iunpin_wait(ip);
3171 
3172 	/*
3173 	 * For stale inodes we cannot rely on the backing buffer remaining
3174 	 * stale in cache for the remaining life of the stale inode and so
3175 	 * xfs_imap_to_bp() below may give us a buffer that no longer contains
3176 	 * inodes below. We have to check this after ensuring the inode is
3177 	 * unpinned so that it is safe to reclaim the stale inode after the
3178 	 * flush call.
3179 	 */
3180 	if (xfs_iflags_test(ip, XFS_ISTALE)) {
3181 		xfs_ifunlock(ip);
3182 		return 0;
3183 	}
3184 
3185 	/*
3186 	 * This may have been unpinned because the filesystem is shutting
3187 	 * down forcibly. If that's the case we must not write this inode
3188 	 * to disk, because the log record didn't make it to disk.
3189 	 *
3190 	 * We also have to remove the log item from the AIL in this case,
3191 	 * as we wait for an empty AIL as part of the unmount process.
3192 	 */
3193 	if (XFS_FORCED_SHUTDOWN(mp)) {
3194 		error = XFS_ERROR(EIO);
3195 		goto abort_out;
3196 	}
3197 
3198 	/*
3199 	 * Get the buffer containing the on-disk inode.
3200 	 */
3201 	error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
3202 			       0);
3203 	if (error || !bp) {
3204 		xfs_ifunlock(ip);
3205 		return error;
3206 	}
3207 
3208 	/*
3209 	 * First flush out the inode that xfs_iflush was called with.
3210 	 */
3211 	error = xfs_iflush_int(ip, bp);
3212 	if (error)
3213 		goto corrupt_out;
3214 
3215 	/*
3216 	 * If the buffer is pinned then push on the log now so we won't
3217 	 * get stuck waiting in the write for too long.
3218 	 */
3219 	if (xfs_buf_ispinned(bp))
3220 		xfs_log_force(mp, 0);
3221 
3222 	/*
3223 	 * inode clustering:
3224 	 * see if other inodes can be gathered into this write
3225 	 */
3226 	error = xfs_iflush_cluster(ip, bp);
3227 	if (error)
3228 		goto cluster_corrupt_out;
3229 
3230 	*bpp = bp;
3231 	return 0;
3232 
3233 corrupt_out:
3234 	xfs_buf_relse(bp);
3235 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3236 cluster_corrupt_out:
3237 	error = XFS_ERROR(EFSCORRUPTED);
3238 abort_out:
3239 	/*
3240 	 * Unlocks the flush lock
3241 	 */
3242 	xfs_iflush_abort(ip, false);
3243 	return error;
3244 }
3245 
3246 STATIC int
3247 xfs_iflush_int(
3248 	struct xfs_inode	*ip,
3249 	struct xfs_buf		*bp)
3250 {
3251 	struct xfs_inode_log_item *iip = ip->i_itemp;
3252 	struct xfs_dinode	*dip;
3253 	struct xfs_mount	*mp = ip->i_mount;
3254 
3255 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3256 	ASSERT(xfs_isiflocked(ip));
3257 	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3258 	       ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3259 	ASSERT(iip != NULL && iip->ili_fields != 0);
3260 
3261 	/* set *dip = inode's place in the buffer */
3262 	dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
3263 
3264 	if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3265 			       mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
3266 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3267 			"%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
3268 			__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3269 		goto corrupt_out;
3270 	}
3271 	if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
3272 				mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
3273 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3274 			"%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
3275 			__func__, ip->i_ino, ip, ip->i_d.di_magic);
3276 		goto corrupt_out;
3277 	}
3278 	if (S_ISREG(ip->i_d.di_mode)) {
3279 		if (XFS_TEST_ERROR(
3280 		    (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3281 		    (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
3282 		    mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
3283 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3284 				"%s: Bad regular inode %Lu, ptr 0x%p",
3285 				__func__, ip->i_ino, ip);
3286 			goto corrupt_out;
3287 		}
3288 	} else if (S_ISDIR(ip->i_d.di_mode)) {
3289 		if (XFS_TEST_ERROR(
3290 		    (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3291 		    (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
3292 		    (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
3293 		    mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
3294 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3295 				"%s: Bad directory inode %Lu, ptr 0x%p",
3296 				__func__, ip->i_ino, ip);
3297 			goto corrupt_out;
3298 		}
3299 	}
3300 	if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
3301 				ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
3302 				XFS_RANDOM_IFLUSH_5)) {
3303 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3304 			"%s: detected corrupt incore inode %Lu, "
3305 			"total extents = %d, nblocks = %Ld, ptr 0x%p",
3306 			__func__, ip->i_ino,
3307 			ip->i_d.di_nextents + ip->i_d.di_anextents,
3308 			ip->i_d.di_nblocks, ip);
3309 		goto corrupt_out;
3310 	}
3311 	if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3312 				mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
3313 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3314 			"%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
3315 			__func__, ip->i_ino, ip->i_d.di_forkoff, ip);
3316 		goto corrupt_out;
3317 	}
3318 
3319 	/*
3320 	 * Inode item log recovery for v1/v2 inodes are dependent on the
3321 	 * di_flushiter count for correct sequencing. We bump the flush
3322 	 * iteration count so we can detect flushes which postdate a log record
3323 	 * during recovery. This is redundant as we now log every change and
3324 	 * hence this can't happen but we need to still do it to ensure
3325 	 * backwards compatibility with old kernels that predate logging all
3326 	 * inode changes.
3327 	 */
3328 	if (ip->i_d.di_version < 3)
3329 		ip->i_d.di_flushiter++;
3330 
3331 	/*
3332 	 * Copy the dirty parts of the inode into the on-disk
3333 	 * inode.  We always copy out the core of the inode,
3334 	 * because if the inode is dirty at all the core must
3335 	 * be.
3336 	 */
3337 	xfs_dinode_to_disk(dip, &ip->i_d);
3338 
3339 	/* Wrap, we never let the log put out DI_MAX_FLUSH */
3340 	if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3341 		ip->i_d.di_flushiter = 0;
3342 
3343 	/*
3344 	 * If this is really an old format inode and the superblock version
3345 	 * has not been updated to support only new format inodes, then
3346 	 * convert back to the old inode format.  If the superblock version
3347 	 * has been updated, then make the conversion permanent.
3348 	 */
3349 	ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb));
3350 	if (ip->i_d.di_version == 1) {
3351 		if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
3352 			/*
3353 			 * Convert it back.
3354 			 */
3355 			ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
3356 			dip->di_onlink = cpu_to_be16(ip->i_d.di_nlink);
3357 		} else {
3358 			/*
3359 			 * The superblock version has already been bumped,
3360 			 * so just make the conversion to the new inode
3361 			 * format permanent.
3362 			 */
3363 			ip->i_d.di_version = 2;
3364 			dip->di_version = 2;
3365 			ip->i_d.di_onlink = 0;
3366 			dip->di_onlink = 0;
3367 			memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
3368 			memset(&(dip->di_pad[0]), 0,
3369 			      sizeof(dip->di_pad));
3370 			ASSERT(xfs_get_projid(ip) == 0);
3371 		}
3372 	}
3373 
3374 	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp);
3375 	if (XFS_IFORK_Q(ip))
3376 		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
3377 	xfs_inobp_check(mp, bp);
3378 
3379 	/*
3380 	 * We've recorded everything logged in the inode, so we'd like to clear
3381 	 * the ili_fields bits so we don't log and flush things unnecessarily.
3382 	 * However, we can't stop logging all this information until the data
3383 	 * we've copied into the disk buffer is written to disk.  If we did we
3384 	 * might overwrite the copy of the inode in the log with all the data
3385 	 * after re-logging only part of it, and in the face of a crash we
3386 	 * wouldn't have all the data we need to recover.
3387 	 *
3388 	 * What we do is move the bits to the ili_last_fields field.  When
3389 	 * logging the inode, these bits are moved back to the ili_fields field.
3390 	 * In the xfs_iflush_done() routine we clear ili_last_fields, since we
3391 	 * know that the information those bits represent is permanently on
3392 	 * disk.  As long as the flush completes before the inode is logged
3393 	 * again, then both ili_fields and ili_last_fields will be cleared.
3394 	 *
3395 	 * We can play with the ili_fields bits here, because the inode lock
3396 	 * must be held exclusively in order to set bits there and the flush
3397 	 * lock protects the ili_last_fields bits.  Set ili_logged so the flush
3398 	 * done routine can tell whether or not to look in the AIL.  Also, store
3399 	 * the current LSN of the inode so that we can tell whether the item has
3400 	 * moved in the AIL from xfs_iflush_done().  In order to read the lsn we
3401 	 * need the AIL lock, because it is a 64 bit value that cannot be read
3402 	 * atomically.
3403 	 */
3404 	iip->ili_last_fields = iip->ili_fields;
3405 	iip->ili_fields = 0;
3406 	iip->ili_logged = 1;
3407 
3408 	xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3409 				&iip->ili_item.li_lsn);
3410 
3411 	/*
3412 	 * Attach the function xfs_iflush_done to the inode's
3413 	 * buffer.  This will remove the inode from the AIL
3414 	 * and unlock the inode's flush lock when the inode is
3415 	 * completely written to disk.
3416 	 */
3417 	xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
3418 
3419 	/* update the lsn in the on disk inode if required */
3420 	if (ip->i_d.di_version == 3)
3421 		dip->di_lsn = cpu_to_be64(iip->ili_item.li_lsn);
3422 
3423 	/* generate the checksum. */
3424 	xfs_dinode_calc_crc(mp, dip);
3425 
3426 	ASSERT(bp->b_fspriv != NULL);
3427 	ASSERT(bp->b_iodone != NULL);
3428 	return 0;
3429 
3430 corrupt_out:
3431 	return XFS_ERROR(EFSCORRUPTED);
3432 }
3433