xref: /openbmc/linux/fs/xfs/xfs_inode.c (revision c21b37f6)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_imap.h"
25 #include "xfs_trans.h"
26 #include "xfs_trans_priv.h"
27 #include "xfs_sb.h"
28 #include "xfs_ag.h"
29 #include "xfs_dir2.h"
30 #include "xfs_dmapi.h"
31 #include "xfs_mount.h"
32 #include "xfs_bmap_btree.h"
33 #include "xfs_alloc_btree.h"
34 #include "xfs_ialloc_btree.h"
35 #include "xfs_dir2_sf.h"
36 #include "xfs_attr_sf.h"
37 #include "xfs_dinode.h"
38 #include "xfs_inode.h"
39 #include "xfs_buf_item.h"
40 #include "xfs_inode_item.h"
41 #include "xfs_btree.h"
42 #include "xfs_alloc.h"
43 #include "xfs_ialloc.h"
44 #include "xfs_bmap.h"
45 #include "xfs_rw.h"
46 #include "xfs_error.h"
47 #include "xfs_utils.h"
48 #include "xfs_dir2_trace.h"
49 #include "xfs_quota.h"
50 #include "xfs_acl.h"
51 #include "xfs_filestream.h"
52 
53 #include <linux/log2.h>
54 
55 kmem_zone_t *xfs_ifork_zone;
56 kmem_zone_t *xfs_inode_zone;
57 kmem_zone_t *xfs_chashlist_zone;
58 
59 /*
60  * Used in xfs_itruncate().  This is the maximum number of extents
61  * freed from a file in a single transaction.
62  */
63 #define	XFS_ITRUNC_MAX_EXTENTS	2
64 
65 STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
66 STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
67 STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
68 STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
69 
70 
71 #ifdef DEBUG
72 /*
73  * Make sure that the extents in the given memory buffer
74  * are valid.
75  */
76 STATIC void
77 xfs_validate_extents(
78 	xfs_ifork_t		*ifp,
79 	int			nrecs,
80 	int			disk,
81 	xfs_exntfmt_t		fmt)
82 {
83 	xfs_bmbt_rec_t		*ep;
84 	xfs_bmbt_irec_t		irec;
85 	xfs_bmbt_rec_t		rec;
86 	int			i;
87 
88 	for (i = 0; i < nrecs; i++) {
89 		ep = xfs_iext_get_ext(ifp, i);
90 		rec.l0 = get_unaligned((__uint64_t*)&ep->l0);
91 		rec.l1 = get_unaligned((__uint64_t*)&ep->l1);
92 		if (disk)
93 			xfs_bmbt_disk_get_all(&rec, &irec);
94 		else
95 			xfs_bmbt_get_all(&rec, &irec);
96 		if (fmt == XFS_EXTFMT_NOSTATE)
97 			ASSERT(irec.br_state == XFS_EXT_NORM);
98 	}
99 }
100 #else /* DEBUG */
101 #define xfs_validate_extents(ifp, nrecs, disk, fmt)
102 #endif /* DEBUG */
103 
104 /*
105  * Check that none of the inode's in the buffer have a next
106  * unlinked field of 0.
107  */
108 #if defined(DEBUG)
109 void
110 xfs_inobp_check(
111 	xfs_mount_t	*mp,
112 	xfs_buf_t	*bp)
113 {
114 	int		i;
115 	int		j;
116 	xfs_dinode_t	*dip;
117 
118 	j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
119 
120 	for (i = 0; i < j; i++) {
121 		dip = (xfs_dinode_t *)xfs_buf_offset(bp,
122 					i * mp->m_sb.sb_inodesize);
123 		if (!dip->di_next_unlinked)  {
124 			xfs_fs_cmn_err(CE_ALERT, mp,
125 				"Detected a bogus zero next_unlinked field in incore inode buffer 0x%p.  About to pop an ASSERT.",
126 				bp);
127 			ASSERT(dip->di_next_unlinked);
128 		}
129 	}
130 }
131 #endif
132 
133 /*
134  * This routine is called to map an inode number within a file
135  * system to the buffer containing the on-disk version of the
136  * inode.  It returns a pointer to the buffer containing the
137  * on-disk inode in the bpp parameter, and in the dip parameter
138  * it returns a pointer to the on-disk inode within that buffer.
139  *
140  * If a non-zero error is returned, then the contents of bpp and
141  * dipp are undefined.
142  *
143  * Use xfs_imap() to determine the size and location of the
144  * buffer to read from disk.
145  */
146 STATIC int
147 xfs_inotobp(
148 	xfs_mount_t	*mp,
149 	xfs_trans_t	*tp,
150 	xfs_ino_t	ino,
151 	xfs_dinode_t	**dipp,
152 	xfs_buf_t	**bpp,
153 	int		*offset)
154 {
155 	int		di_ok;
156 	xfs_imap_t	imap;
157 	xfs_buf_t	*bp;
158 	int		error;
159 	xfs_dinode_t	*dip;
160 
161 	/*
162 	 * Call the space management code to find the location of the
163 	 * inode on disk.
164 	 */
165 	imap.im_blkno = 0;
166 	error = xfs_imap(mp, tp, ino, &imap, XFS_IMAP_LOOKUP);
167 	if (error != 0) {
168 		cmn_err(CE_WARN,
169 	"xfs_inotobp: xfs_imap()  returned an "
170 	"error %d on %s.  Returning error.", error, mp->m_fsname);
171 		return error;
172 	}
173 
174 	/*
175 	 * If the inode number maps to a block outside the bounds of the
176 	 * file system then return NULL rather than calling read_buf
177 	 * and panicing when we get an error from the driver.
178 	 */
179 	if ((imap.im_blkno + imap.im_len) >
180 	    XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
181 		cmn_err(CE_WARN,
182 	"xfs_inotobp: inode number (%llu + %d) maps to a block outside the bounds "
183 	"of the file system %s.  Returning EINVAL.",
184 			(unsigned long long)imap.im_blkno,
185 			imap.im_len, mp->m_fsname);
186 		return XFS_ERROR(EINVAL);
187 	}
188 
189 	/*
190 	 * Read in the buffer.  If tp is NULL, xfs_trans_read_buf() will
191 	 * default to just a read_buf() call.
192 	 */
193 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno,
194 				   (int)imap.im_len, XFS_BUF_LOCK, &bp);
195 
196 	if (error) {
197 		cmn_err(CE_WARN,
198 	"xfs_inotobp: xfs_trans_read_buf()  returned an "
199 	"error %d on %s.  Returning error.", error, mp->m_fsname);
200 		return error;
201 	}
202 	dip = (xfs_dinode_t *)xfs_buf_offset(bp, 0);
203 	di_ok =
204 		INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC &&
205 		XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT));
206 	if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP,
207 			XFS_RANDOM_ITOBP_INOTOBP))) {
208 		XFS_CORRUPTION_ERROR("xfs_inotobp", XFS_ERRLEVEL_LOW, mp, dip);
209 		xfs_trans_brelse(tp, bp);
210 		cmn_err(CE_WARN,
211 	"xfs_inotobp: XFS_TEST_ERROR()  returned an "
212 	"error on %s.  Returning EFSCORRUPTED.",  mp->m_fsname);
213 		return XFS_ERROR(EFSCORRUPTED);
214 	}
215 
216 	xfs_inobp_check(mp, bp);
217 
218 	/*
219 	 * Set *dipp to point to the on-disk inode in the buffer.
220 	 */
221 	*dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
222 	*bpp = bp;
223 	*offset = imap.im_boffset;
224 	return 0;
225 }
226 
227 
228 /*
229  * This routine is called to map an inode to the buffer containing
230  * the on-disk version of the inode.  It returns a pointer to the
231  * buffer containing the on-disk inode in the bpp parameter, and in
232  * the dip parameter it returns a pointer to the on-disk inode within
233  * that buffer.
234  *
235  * If a non-zero error is returned, then the contents of bpp and
236  * dipp are undefined.
237  *
238  * If the inode is new and has not yet been initialized, use xfs_imap()
239  * to determine the size and location of the buffer to read from disk.
240  * If the inode has already been mapped to its buffer and read in once,
241  * then use the mapping information stored in the inode rather than
242  * calling xfs_imap().  This allows us to avoid the overhead of looking
243  * at the inode btree for small block file systems (see xfs_dilocate()).
244  * We can tell whether the inode has been mapped in before by comparing
245  * its disk block address to 0.  Only uninitialized inodes will have
246  * 0 for the disk block address.
247  */
248 int
249 xfs_itobp(
250 	xfs_mount_t	*mp,
251 	xfs_trans_t	*tp,
252 	xfs_inode_t	*ip,
253 	xfs_dinode_t	**dipp,
254 	xfs_buf_t	**bpp,
255 	xfs_daddr_t	bno,
256 	uint		imap_flags)
257 {
258 	xfs_imap_t	imap;
259 	xfs_buf_t	*bp;
260 	int		error;
261 	int		i;
262 	int		ni;
263 
264 	if (ip->i_blkno == (xfs_daddr_t)0) {
265 		/*
266 		 * Call the space management code to find the location of the
267 		 * inode on disk.
268 		 */
269 		imap.im_blkno = bno;
270 		if ((error = xfs_imap(mp, tp, ip->i_ino, &imap,
271 					XFS_IMAP_LOOKUP | imap_flags)))
272 			return error;
273 
274 		/*
275 		 * If the inode number maps to a block outside the bounds
276 		 * of the file system then return NULL rather than calling
277 		 * read_buf and panicing when we get an error from the
278 		 * driver.
279 		 */
280 		if ((imap.im_blkno + imap.im_len) >
281 		    XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
282 #ifdef DEBUG
283 			xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: "
284 					"(imap.im_blkno (0x%llx) "
285 					"+ imap.im_len (0x%llx)) > "
286 					" XFS_FSB_TO_BB(mp, "
287 					"mp->m_sb.sb_dblocks) (0x%llx)",
288 					(unsigned long long) imap.im_blkno,
289 					(unsigned long long) imap.im_len,
290 					XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
291 #endif /* DEBUG */
292 			return XFS_ERROR(EINVAL);
293 		}
294 
295 		/*
296 		 * Fill in the fields in the inode that will be used to
297 		 * map the inode to its buffer from now on.
298 		 */
299 		ip->i_blkno = imap.im_blkno;
300 		ip->i_len = imap.im_len;
301 		ip->i_boffset = imap.im_boffset;
302 	} else {
303 		/*
304 		 * We've already mapped the inode once, so just use the
305 		 * mapping that we saved the first time.
306 		 */
307 		imap.im_blkno = ip->i_blkno;
308 		imap.im_len = ip->i_len;
309 		imap.im_boffset = ip->i_boffset;
310 	}
311 	ASSERT(bno == 0 || bno == imap.im_blkno);
312 
313 	/*
314 	 * Read in the buffer.  If tp is NULL, xfs_trans_read_buf() will
315 	 * default to just a read_buf() call.
316 	 */
317 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno,
318 				   (int)imap.im_len, XFS_BUF_LOCK, &bp);
319 	if (error) {
320 #ifdef DEBUG
321 		xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: "
322 				"xfs_trans_read_buf() returned error %d, "
323 				"imap.im_blkno 0x%llx, imap.im_len 0x%llx",
324 				error, (unsigned long long) imap.im_blkno,
325 				(unsigned long long) imap.im_len);
326 #endif /* DEBUG */
327 		return error;
328 	}
329 
330 	/*
331 	 * Validate the magic number and version of every inode in the buffer
332 	 * (if DEBUG kernel) or the first inode in the buffer, otherwise.
333 	 * No validation is done here in userspace (xfs_repair).
334 	 */
335 #if !defined(__KERNEL__)
336 	ni = 0;
337 #elif defined(DEBUG)
338 	ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog;
339 #else	/* usual case */
340 	ni = 1;
341 #endif
342 
343 	for (i = 0; i < ni; i++) {
344 		int		di_ok;
345 		xfs_dinode_t	*dip;
346 
347 		dip = (xfs_dinode_t *)xfs_buf_offset(bp,
348 					(i << mp->m_sb.sb_inodelog));
349 		di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC &&
350 			    XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT));
351 		if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
352 						XFS_ERRTAG_ITOBP_INOTOBP,
353 						XFS_RANDOM_ITOBP_INOTOBP))) {
354 			if (imap_flags & XFS_IMAP_BULKSTAT) {
355 				xfs_trans_brelse(tp, bp);
356 				return XFS_ERROR(EINVAL);
357 			}
358 #ifdef DEBUG
359 			cmn_err(CE_ALERT,
360 					"Device %s - bad inode magic/vsn "
361 					"daddr %lld #%d (magic=%x)",
362 				XFS_BUFTARG_NAME(mp->m_ddev_targp),
363 				(unsigned long long)imap.im_blkno, i,
364 				INT_GET(dip->di_core.di_magic, ARCH_CONVERT));
365 #endif
366 			XFS_CORRUPTION_ERROR("xfs_itobp", XFS_ERRLEVEL_HIGH,
367 					     mp, dip);
368 			xfs_trans_brelse(tp, bp);
369 			return XFS_ERROR(EFSCORRUPTED);
370 		}
371 	}
372 
373 	xfs_inobp_check(mp, bp);
374 
375 	/*
376 	 * Mark the buffer as an inode buffer now that it looks good
377 	 */
378 	XFS_BUF_SET_VTYPE(bp, B_FS_INO);
379 
380 	/*
381 	 * Set *dipp to point to the on-disk inode in the buffer.
382 	 */
383 	*dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
384 	*bpp = bp;
385 	return 0;
386 }
387 
388 /*
389  * Move inode type and inode format specific information from the
390  * on-disk inode to the in-core inode.  For fifos, devs, and sockets
391  * this means set if_rdev to the proper value.  For files, directories,
392  * and symlinks this means to bring in the in-line data or extent
393  * pointers.  For a file in B-tree format, only the root is immediately
394  * brought in-core.  The rest will be in-lined in if_extents when it
395  * is first referenced (see xfs_iread_extents()).
396  */
397 STATIC int
398 xfs_iformat(
399 	xfs_inode_t		*ip,
400 	xfs_dinode_t		*dip)
401 {
402 	xfs_attr_shortform_t	*atp;
403 	int			size;
404 	int			error;
405 	xfs_fsize_t             di_size;
406 	ip->i_df.if_ext_max =
407 		XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
408 	error = 0;
409 
410 	if (unlikely(
411 	    INT_GET(dip->di_core.di_nextents, ARCH_CONVERT) +
412 		INT_GET(dip->di_core.di_anextents, ARCH_CONVERT) >
413 	    INT_GET(dip->di_core.di_nblocks, ARCH_CONVERT))) {
414 		xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
415 			"corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
416 			(unsigned long long)ip->i_ino,
417 			(int)(INT_GET(dip->di_core.di_nextents, ARCH_CONVERT)
418 			    + INT_GET(dip->di_core.di_anextents, ARCH_CONVERT)),
419 			(unsigned long long)
420 			INT_GET(dip->di_core.di_nblocks, ARCH_CONVERT));
421 		XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW,
422 				     ip->i_mount, dip);
423 		return XFS_ERROR(EFSCORRUPTED);
424 	}
425 
426 	if (unlikely(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT) > ip->i_mount->m_sb.sb_inodesize)) {
427 		xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
428 			"corrupt dinode %Lu, forkoff = 0x%x.",
429 			(unsigned long long)ip->i_ino,
430 			(int)(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT)));
431 		XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
432 				     ip->i_mount, dip);
433 		return XFS_ERROR(EFSCORRUPTED);
434 	}
435 
436 	switch (ip->i_d.di_mode & S_IFMT) {
437 	case S_IFIFO:
438 	case S_IFCHR:
439 	case S_IFBLK:
440 	case S_IFSOCK:
441 		if (unlikely(INT_GET(dip->di_core.di_format, ARCH_CONVERT) != XFS_DINODE_FMT_DEV)) {
442 			XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW,
443 					      ip->i_mount, dip);
444 			return XFS_ERROR(EFSCORRUPTED);
445 		}
446 		ip->i_d.di_size = 0;
447 		ip->i_size = 0;
448 		ip->i_df.if_u2.if_rdev = INT_GET(dip->di_u.di_dev, ARCH_CONVERT);
449 		break;
450 
451 	case S_IFREG:
452 	case S_IFLNK:
453 	case S_IFDIR:
454 		switch (INT_GET(dip->di_core.di_format, ARCH_CONVERT)) {
455 		case XFS_DINODE_FMT_LOCAL:
456 			/*
457 			 * no local regular files yet
458 			 */
459 			if (unlikely((INT_GET(dip->di_core.di_mode, ARCH_CONVERT) & S_IFMT) == S_IFREG)) {
460 				xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
461 					"corrupt inode %Lu "
462 					"(local format for regular file).",
463 					(unsigned long long) ip->i_ino);
464 				XFS_CORRUPTION_ERROR("xfs_iformat(4)",
465 						     XFS_ERRLEVEL_LOW,
466 						     ip->i_mount, dip);
467 				return XFS_ERROR(EFSCORRUPTED);
468 			}
469 
470 			di_size = INT_GET(dip->di_core.di_size, ARCH_CONVERT);
471 			if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
472 				xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
473 					"corrupt inode %Lu "
474 					"(bad size %Ld for local inode).",
475 					(unsigned long long) ip->i_ino,
476 					(long long) di_size);
477 				XFS_CORRUPTION_ERROR("xfs_iformat(5)",
478 						     XFS_ERRLEVEL_LOW,
479 						     ip->i_mount, dip);
480 				return XFS_ERROR(EFSCORRUPTED);
481 			}
482 
483 			size = (int)di_size;
484 			error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size);
485 			break;
486 		case XFS_DINODE_FMT_EXTENTS:
487 			error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
488 			break;
489 		case XFS_DINODE_FMT_BTREE:
490 			error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
491 			break;
492 		default:
493 			XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW,
494 					 ip->i_mount);
495 			return XFS_ERROR(EFSCORRUPTED);
496 		}
497 		break;
498 
499 	default:
500 		XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount);
501 		return XFS_ERROR(EFSCORRUPTED);
502 	}
503 	if (error) {
504 		return error;
505 	}
506 	if (!XFS_DFORK_Q(dip))
507 		return 0;
508 	ASSERT(ip->i_afp == NULL);
509 	ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
510 	ip->i_afp->if_ext_max =
511 		XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
512 	switch (INT_GET(dip->di_core.di_aformat, ARCH_CONVERT)) {
513 	case XFS_DINODE_FMT_LOCAL:
514 		atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
515 		size = be16_to_cpu(atp->hdr.totsize);
516 		error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
517 		break;
518 	case XFS_DINODE_FMT_EXTENTS:
519 		error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
520 		break;
521 	case XFS_DINODE_FMT_BTREE:
522 		error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
523 		break;
524 	default:
525 		error = XFS_ERROR(EFSCORRUPTED);
526 		break;
527 	}
528 	if (error) {
529 		kmem_zone_free(xfs_ifork_zone, ip->i_afp);
530 		ip->i_afp = NULL;
531 		xfs_idestroy_fork(ip, XFS_DATA_FORK);
532 	}
533 	return error;
534 }
535 
536 /*
537  * The file is in-lined in the on-disk inode.
538  * If it fits into if_inline_data, then copy
539  * it there, otherwise allocate a buffer for it
540  * and copy the data there.  Either way, set
541  * if_data to point at the data.
542  * If we allocate a buffer for the data, make
543  * sure that its size is a multiple of 4 and
544  * record the real size in i_real_bytes.
545  */
546 STATIC int
547 xfs_iformat_local(
548 	xfs_inode_t	*ip,
549 	xfs_dinode_t	*dip,
550 	int		whichfork,
551 	int		size)
552 {
553 	xfs_ifork_t	*ifp;
554 	int		real_size;
555 
556 	/*
557 	 * If the size is unreasonable, then something
558 	 * is wrong and we just bail out rather than crash in
559 	 * kmem_alloc() or memcpy() below.
560 	 */
561 	if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
562 		xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
563 			"corrupt inode %Lu "
564 			"(bad size %d for local fork, size = %d).",
565 			(unsigned long long) ip->i_ino, size,
566 			XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
567 		XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
568 				     ip->i_mount, dip);
569 		return XFS_ERROR(EFSCORRUPTED);
570 	}
571 	ifp = XFS_IFORK_PTR(ip, whichfork);
572 	real_size = 0;
573 	if (size == 0)
574 		ifp->if_u1.if_data = NULL;
575 	else if (size <= sizeof(ifp->if_u2.if_inline_data))
576 		ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
577 	else {
578 		real_size = roundup(size, 4);
579 		ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
580 	}
581 	ifp->if_bytes = size;
582 	ifp->if_real_bytes = real_size;
583 	if (size)
584 		memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size);
585 	ifp->if_flags &= ~XFS_IFEXTENTS;
586 	ifp->if_flags |= XFS_IFINLINE;
587 	return 0;
588 }
589 
590 /*
591  * The file consists of a set of extents all
592  * of which fit into the on-disk inode.
593  * If there are few enough extents to fit into
594  * the if_inline_ext, then copy them there.
595  * Otherwise allocate a buffer for them and copy
596  * them into it.  Either way, set if_extents
597  * to point at the extents.
598  */
599 STATIC int
600 xfs_iformat_extents(
601 	xfs_inode_t	*ip,
602 	xfs_dinode_t	*dip,
603 	int		whichfork)
604 {
605 	xfs_bmbt_rec_t	*ep, *dp;
606 	xfs_ifork_t	*ifp;
607 	int		nex;
608 	int		size;
609 	int		i;
610 
611 	ifp = XFS_IFORK_PTR(ip, whichfork);
612 	nex = XFS_DFORK_NEXTENTS(dip, whichfork);
613 	size = nex * (uint)sizeof(xfs_bmbt_rec_t);
614 
615 	/*
616 	 * If the number of extents is unreasonable, then something
617 	 * is wrong and we just bail out rather than crash in
618 	 * kmem_alloc() or memcpy() below.
619 	 */
620 	if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
621 		xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
622 			"corrupt inode %Lu ((a)extents = %d).",
623 			(unsigned long long) ip->i_ino, nex);
624 		XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
625 				     ip->i_mount, dip);
626 		return XFS_ERROR(EFSCORRUPTED);
627 	}
628 
629 	ifp->if_real_bytes = 0;
630 	if (nex == 0)
631 		ifp->if_u1.if_extents = NULL;
632 	else if (nex <= XFS_INLINE_EXTS)
633 		ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
634 	else
635 		xfs_iext_add(ifp, 0, nex);
636 
637 	ifp->if_bytes = size;
638 	if (size) {
639 		dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
640 		xfs_validate_extents(ifp, nex, 1, XFS_EXTFMT_INODE(ip));
641 		for (i = 0; i < nex; i++, dp++) {
642 			ep = xfs_iext_get_ext(ifp, i);
643 			ep->l0 = INT_GET(get_unaligned((__uint64_t*)&dp->l0),
644 								ARCH_CONVERT);
645 			ep->l1 = INT_GET(get_unaligned((__uint64_t*)&dp->l1),
646 								ARCH_CONVERT);
647 		}
648 		XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork);
649 		if (whichfork != XFS_DATA_FORK ||
650 			XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE)
651 				if (unlikely(xfs_check_nostate_extents(
652 				    ifp, 0, nex))) {
653 					XFS_ERROR_REPORT("xfs_iformat_extents(2)",
654 							 XFS_ERRLEVEL_LOW,
655 							 ip->i_mount);
656 					return XFS_ERROR(EFSCORRUPTED);
657 				}
658 	}
659 	ifp->if_flags |= XFS_IFEXTENTS;
660 	return 0;
661 }
662 
663 /*
664  * The file has too many extents to fit into
665  * the inode, so they are in B-tree format.
666  * Allocate a buffer for the root of the B-tree
667  * and copy the root into it.  The i_extents
668  * field will remain NULL until all of the
669  * extents are read in (when they are needed).
670  */
671 STATIC int
672 xfs_iformat_btree(
673 	xfs_inode_t		*ip,
674 	xfs_dinode_t		*dip,
675 	int			whichfork)
676 {
677 	xfs_bmdr_block_t	*dfp;
678 	xfs_ifork_t		*ifp;
679 	/* REFERENCED */
680 	int			nrecs;
681 	int			size;
682 
683 	ifp = XFS_IFORK_PTR(ip, whichfork);
684 	dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
685 	size = XFS_BMAP_BROOT_SPACE(dfp);
686 	nrecs = XFS_BMAP_BROOT_NUMRECS(dfp);
687 
688 	/*
689 	 * blow out if -- fork has less extents than can fit in
690 	 * fork (fork shouldn't be a btree format), root btree
691 	 * block has more records than can fit into the fork,
692 	 * or the number of extents is greater than the number of
693 	 * blocks.
694 	 */
695 	if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max
696 	    || XFS_BMDR_SPACE_CALC(nrecs) >
697 			XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)
698 	    || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
699 		xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
700 			"corrupt inode %Lu (btree).",
701 			(unsigned long long) ip->i_ino);
702 		XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
703 				 ip->i_mount);
704 		return XFS_ERROR(EFSCORRUPTED);
705 	}
706 
707 	ifp->if_broot_bytes = size;
708 	ifp->if_broot = kmem_alloc(size, KM_SLEEP);
709 	ASSERT(ifp->if_broot != NULL);
710 	/*
711 	 * Copy and convert from the on-disk structure
712 	 * to the in-memory structure.
713 	 */
714 	xfs_bmdr_to_bmbt(dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
715 		ifp->if_broot, size);
716 	ifp->if_flags &= ~XFS_IFEXTENTS;
717 	ifp->if_flags |= XFS_IFBROOT;
718 
719 	return 0;
720 }
721 
722 /*
723  * xfs_xlate_dinode_core - translate an xfs_inode_core_t between ondisk
724  * and native format
725  *
726  * buf  = on-disk representation
727  * dip  = native representation
728  * dir  = direction - +ve -> disk to native
729  *                    -ve -> native to disk
730  */
731 void
732 xfs_xlate_dinode_core(
733 	xfs_caddr_t		buf,
734 	xfs_dinode_core_t	*dip,
735 	int			dir)
736 {
737 	xfs_dinode_core_t	*buf_core = (xfs_dinode_core_t *)buf;
738 	xfs_dinode_core_t	*mem_core = (xfs_dinode_core_t *)dip;
739 	xfs_arch_t		arch = ARCH_CONVERT;
740 
741 	ASSERT(dir);
742 
743 	INT_XLATE(buf_core->di_magic, mem_core->di_magic, dir, arch);
744 	INT_XLATE(buf_core->di_mode, mem_core->di_mode, dir, arch);
745 	INT_XLATE(buf_core->di_version,	mem_core->di_version, dir, arch);
746 	INT_XLATE(buf_core->di_format, mem_core->di_format, dir, arch);
747 	INT_XLATE(buf_core->di_onlink, mem_core->di_onlink, dir, arch);
748 	INT_XLATE(buf_core->di_uid, mem_core->di_uid, dir, arch);
749 	INT_XLATE(buf_core->di_gid, mem_core->di_gid, dir, arch);
750 	INT_XLATE(buf_core->di_nlink, mem_core->di_nlink, dir, arch);
751 	INT_XLATE(buf_core->di_projid, mem_core->di_projid, dir, arch);
752 
753 	if (dir > 0) {
754 		memcpy(mem_core->di_pad, buf_core->di_pad,
755 			sizeof(buf_core->di_pad));
756 	} else {
757 		memcpy(buf_core->di_pad, mem_core->di_pad,
758 			sizeof(buf_core->di_pad));
759 	}
760 
761 	INT_XLATE(buf_core->di_flushiter, mem_core->di_flushiter, dir, arch);
762 
763 	INT_XLATE(buf_core->di_atime.t_sec, mem_core->di_atime.t_sec,
764 			dir, arch);
765 	INT_XLATE(buf_core->di_atime.t_nsec, mem_core->di_atime.t_nsec,
766 			dir, arch);
767 	INT_XLATE(buf_core->di_mtime.t_sec, mem_core->di_mtime.t_sec,
768 			dir, arch);
769 	INT_XLATE(buf_core->di_mtime.t_nsec, mem_core->di_mtime.t_nsec,
770 			dir, arch);
771 	INT_XLATE(buf_core->di_ctime.t_sec, mem_core->di_ctime.t_sec,
772 			dir, arch);
773 	INT_XLATE(buf_core->di_ctime.t_nsec, mem_core->di_ctime.t_nsec,
774 			dir, arch);
775 	INT_XLATE(buf_core->di_size, mem_core->di_size, dir, arch);
776 	INT_XLATE(buf_core->di_nblocks, mem_core->di_nblocks, dir, arch);
777 	INT_XLATE(buf_core->di_extsize, mem_core->di_extsize, dir, arch);
778 	INT_XLATE(buf_core->di_nextents, mem_core->di_nextents, dir, arch);
779 	INT_XLATE(buf_core->di_anextents, mem_core->di_anextents, dir, arch);
780 	INT_XLATE(buf_core->di_forkoff, mem_core->di_forkoff, dir, arch);
781 	INT_XLATE(buf_core->di_aformat, mem_core->di_aformat, dir, arch);
782 	INT_XLATE(buf_core->di_dmevmask, mem_core->di_dmevmask, dir, arch);
783 	INT_XLATE(buf_core->di_dmstate, mem_core->di_dmstate, dir, arch);
784 	INT_XLATE(buf_core->di_flags, mem_core->di_flags, dir, arch);
785 	INT_XLATE(buf_core->di_gen, mem_core->di_gen, dir, arch);
786 }
787 
788 STATIC uint
789 _xfs_dic2xflags(
790 	__uint16_t		di_flags)
791 {
792 	uint			flags = 0;
793 
794 	if (di_flags & XFS_DIFLAG_ANY) {
795 		if (di_flags & XFS_DIFLAG_REALTIME)
796 			flags |= XFS_XFLAG_REALTIME;
797 		if (di_flags & XFS_DIFLAG_PREALLOC)
798 			flags |= XFS_XFLAG_PREALLOC;
799 		if (di_flags & XFS_DIFLAG_IMMUTABLE)
800 			flags |= XFS_XFLAG_IMMUTABLE;
801 		if (di_flags & XFS_DIFLAG_APPEND)
802 			flags |= XFS_XFLAG_APPEND;
803 		if (di_flags & XFS_DIFLAG_SYNC)
804 			flags |= XFS_XFLAG_SYNC;
805 		if (di_flags & XFS_DIFLAG_NOATIME)
806 			flags |= XFS_XFLAG_NOATIME;
807 		if (di_flags & XFS_DIFLAG_NODUMP)
808 			flags |= XFS_XFLAG_NODUMP;
809 		if (di_flags & XFS_DIFLAG_RTINHERIT)
810 			flags |= XFS_XFLAG_RTINHERIT;
811 		if (di_flags & XFS_DIFLAG_PROJINHERIT)
812 			flags |= XFS_XFLAG_PROJINHERIT;
813 		if (di_flags & XFS_DIFLAG_NOSYMLINKS)
814 			flags |= XFS_XFLAG_NOSYMLINKS;
815 		if (di_flags & XFS_DIFLAG_EXTSIZE)
816 			flags |= XFS_XFLAG_EXTSIZE;
817 		if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
818 			flags |= XFS_XFLAG_EXTSZINHERIT;
819 		if (di_flags & XFS_DIFLAG_NODEFRAG)
820 			flags |= XFS_XFLAG_NODEFRAG;
821 		if (di_flags & XFS_DIFLAG_FILESTREAM)
822 			flags |= XFS_XFLAG_FILESTREAM;
823 	}
824 
825 	return flags;
826 }
827 
828 uint
829 xfs_ip2xflags(
830 	xfs_inode_t		*ip)
831 {
832 	xfs_dinode_core_t	*dic = &ip->i_d;
833 
834 	return _xfs_dic2xflags(dic->di_flags) |
835 				(XFS_CFORK_Q(dic) ? XFS_XFLAG_HASATTR : 0);
836 }
837 
838 uint
839 xfs_dic2xflags(
840 	xfs_dinode_core_t	*dic)
841 {
842 	return _xfs_dic2xflags(INT_GET(dic->di_flags, ARCH_CONVERT)) |
843 				(XFS_CFORK_Q_DISK(dic) ? XFS_XFLAG_HASATTR : 0);
844 }
845 
846 /*
847  * Given a mount structure and an inode number, return a pointer
848  * to a newly allocated in-core inode corresponding to the given
849  * inode number.
850  *
851  * Initialize the inode's attributes and extent pointers if it
852  * already has them (it will not if the inode has no links).
853  */
854 int
855 xfs_iread(
856 	xfs_mount_t	*mp,
857 	xfs_trans_t	*tp,
858 	xfs_ino_t	ino,
859 	xfs_inode_t	**ipp,
860 	xfs_daddr_t	bno,
861 	uint		imap_flags)
862 {
863 	xfs_buf_t	*bp;
864 	xfs_dinode_t	*dip;
865 	xfs_inode_t	*ip;
866 	int		error;
867 
868 	ASSERT(xfs_inode_zone != NULL);
869 
870 	ip = kmem_zone_zalloc(xfs_inode_zone, KM_SLEEP);
871 	ip->i_ino = ino;
872 	ip->i_mount = mp;
873 	spin_lock_init(&ip->i_flags_lock);
874 
875 	/*
876 	 * Get pointer's to the on-disk inode and the buffer containing it.
877 	 * If the inode number refers to a block outside the file system
878 	 * then xfs_itobp() will return NULL.  In this case we should
879 	 * return NULL as well.  Set i_blkno to 0 so that xfs_itobp() will
880 	 * know that this is a new incore inode.
881 	 */
882 	error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, imap_flags);
883 	if (error) {
884 		kmem_zone_free(xfs_inode_zone, ip);
885 		return error;
886 	}
887 
888 	/*
889 	 * Initialize inode's trace buffers.
890 	 * Do this before xfs_iformat in case it adds entries.
891 	 */
892 #ifdef XFS_BMAP_TRACE
893 	ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_SLEEP);
894 #endif
895 #ifdef XFS_BMBT_TRACE
896 	ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_SLEEP);
897 #endif
898 #ifdef XFS_RW_TRACE
899 	ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_SLEEP);
900 #endif
901 #ifdef XFS_ILOCK_TRACE
902 	ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_SLEEP);
903 #endif
904 #ifdef XFS_DIR2_TRACE
905 	ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_SLEEP);
906 #endif
907 
908 	/*
909 	 * If we got something that isn't an inode it means someone
910 	 * (nfs or dmi) has a stale handle.
911 	 */
912 	if (INT_GET(dip->di_core.di_magic, ARCH_CONVERT) != XFS_DINODE_MAGIC) {
913 		kmem_zone_free(xfs_inode_zone, ip);
914 		xfs_trans_brelse(tp, bp);
915 #ifdef DEBUG
916 		xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
917 				"dip->di_core.di_magic (0x%x) != "
918 				"XFS_DINODE_MAGIC (0x%x)",
919 				INT_GET(dip->di_core.di_magic, ARCH_CONVERT),
920 				XFS_DINODE_MAGIC);
921 #endif /* DEBUG */
922 		return XFS_ERROR(EINVAL);
923 	}
924 
925 	/*
926 	 * If the on-disk inode is already linked to a directory
927 	 * entry, copy all of the inode into the in-core inode.
928 	 * xfs_iformat() handles copying in the inode format
929 	 * specific information.
930 	 * Otherwise, just get the truly permanent information.
931 	 */
932 	if (dip->di_core.di_mode) {
933 		xfs_xlate_dinode_core((xfs_caddr_t)&dip->di_core,
934 		     &(ip->i_d), 1);
935 		error = xfs_iformat(ip, dip);
936 		if (error)  {
937 			kmem_zone_free(xfs_inode_zone, ip);
938 			xfs_trans_brelse(tp, bp);
939 #ifdef DEBUG
940 			xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
941 					"xfs_iformat() returned error %d",
942 					error);
943 #endif /* DEBUG */
944 			return error;
945 		}
946 	} else {
947 		ip->i_d.di_magic = INT_GET(dip->di_core.di_magic, ARCH_CONVERT);
948 		ip->i_d.di_version = INT_GET(dip->di_core.di_version, ARCH_CONVERT);
949 		ip->i_d.di_gen = INT_GET(dip->di_core.di_gen, ARCH_CONVERT);
950 		ip->i_d.di_flushiter = INT_GET(dip->di_core.di_flushiter, ARCH_CONVERT);
951 		/*
952 		 * Make sure to pull in the mode here as well in
953 		 * case the inode is released without being used.
954 		 * This ensures that xfs_inactive() will see that
955 		 * the inode is already free and not try to mess
956 		 * with the uninitialized part of it.
957 		 */
958 		ip->i_d.di_mode = 0;
959 		/*
960 		 * Initialize the per-fork minima and maxima for a new
961 		 * inode here.  xfs_iformat will do it for old inodes.
962 		 */
963 		ip->i_df.if_ext_max =
964 			XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
965 	}
966 
967 	INIT_LIST_HEAD(&ip->i_reclaim);
968 
969 	/*
970 	 * The inode format changed when we moved the link count and
971 	 * made it 32 bits long.  If this is an old format inode,
972 	 * convert it in memory to look like a new one.  If it gets
973 	 * flushed to disk we will convert back before flushing or
974 	 * logging it.  We zero out the new projid field and the old link
975 	 * count field.  We'll handle clearing the pad field (the remains
976 	 * of the old uuid field) when we actually convert the inode to
977 	 * the new format. We don't change the version number so that we
978 	 * can distinguish this from a real new format inode.
979 	 */
980 	if (ip->i_d.di_version == XFS_DINODE_VERSION_1) {
981 		ip->i_d.di_nlink = ip->i_d.di_onlink;
982 		ip->i_d.di_onlink = 0;
983 		ip->i_d.di_projid = 0;
984 	}
985 
986 	ip->i_delayed_blks = 0;
987 	ip->i_size = ip->i_d.di_size;
988 
989 	/*
990 	 * Mark the buffer containing the inode as something to keep
991 	 * around for a while.  This helps to keep recently accessed
992 	 * meta-data in-core longer.
993 	 */
994 	 XFS_BUF_SET_REF(bp, XFS_INO_REF);
995 
996 	/*
997 	 * Use xfs_trans_brelse() to release the buffer containing the
998 	 * on-disk inode, because it was acquired with xfs_trans_read_buf()
999 	 * in xfs_itobp() above.  If tp is NULL, this is just a normal
1000 	 * brelse().  If we're within a transaction, then xfs_trans_brelse()
1001 	 * will only release the buffer if it is not dirty within the
1002 	 * transaction.  It will be OK to release the buffer in this case,
1003 	 * because inodes on disk are never destroyed and we will be
1004 	 * locking the new in-core inode before putting it in the hash
1005 	 * table where other processes can find it.  Thus we don't have
1006 	 * to worry about the inode being changed just because we released
1007 	 * the buffer.
1008 	 */
1009 	xfs_trans_brelse(tp, bp);
1010 	*ipp = ip;
1011 	return 0;
1012 }
1013 
1014 /*
1015  * Read in extents from a btree-format inode.
1016  * Allocate and fill in if_extents.  Real work is done in xfs_bmap.c.
1017  */
1018 int
1019 xfs_iread_extents(
1020 	xfs_trans_t	*tp,
1021 	xfs_inode_t	*ip,
1022 	int		whichfork)
1023 {
1024 	int		error;
1025 	xfs_ifork_t	*ifp;
1026 	xfs_extnum_t	nextents;
1027 	size_t		size;
1028 
1029 	if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
1030 		XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
1031 				 ip->i_mount);
1032 		return XFS_ERROR(EFSCORRUPTED);
1033 	}
1034 	nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
1035 	size = nextents * sizeof(xfs_bmbt_rec_t);
1036 	ifp = XFS_IFORK_PTR(ip, whichfork);
1037 
1038 	/*
1039 	 * We know that the size is valid (it's checked in iformat_btree)
1040 	 */
1041 	ifp->if_lastex = NULLEXTNUM;
1042 	ifp->if_bytes = ifp->if_real_bytes = 0;
1043 	ifp->if_flags |= XFS_IFEXTENTS;
1044 	xfs_iext_add(ifp, 0, nextents);
1045 	error = xfs_bmap_read_extents(tp, ip, whichfork);
1046 	if (error) {
1047 		xfs_iext_destroy(ifp);
1048 		ifp->if_flags &= ~XFS_IFEXTENTS;
1049 		return error;
1050 	}
1051 	xfs_validate_extents(ifp, nextents, 0, XFS_EXTFMT_INODE(ip));
1052 	return 0;
1053 }
1054 
1055 /*
1056  * Allocate an inode on disk and return a copy of its in-core version.
1057  * The in-core inode is locked exclusively.  Set mode, nlink, and rdev
1058  * appropriately within the inode.  The uid and gid for the inode are
1059  * set according to the contents of the given cred structure.
1060  *
1061  * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
1062  * has a free inode available, call xfs_iget()
1063  * to obtain the in-core version of the allocated inode.  Finally,
1064  * fill in the inode and log its initial contents.  In this case,
1065  * ialloc_context would be set to NULL and call_again set to false.
1066  *
1067  * If xfs_dialloc() does not have an available inode,
1068  * it will replenish its supply by doing an allocation. Since we can
1069  * only do one allocation within a transaction without deadlocks, we
1070  * must commit the current transaction before returning the inode itself.
1071  * In this case, therefore, we will set call_again to true and return.
1072  * The caller should then commit the current transaction, start a new
1073  * transaction, and call xfs_ialloc() again to actually get the inode.
1074  *
1075  * To ensure that some other process does not grab the inode that
1076  * was allocated during the first call to xfs_ialloc(), this routine
1077  * also returns the [locked] bp pointing to the head of the freelist
1078  * as ialloc_context.  The caller should hold this buffer across
1079  * the commit and pass it back into this routine on the second call.
1080  *
1081  * If we are allocating quota inodes, we do not have a parent inode
1082  * to attach to or associate with (i.e. pip == NULL) because they
1083  * are not linked into the directory structure - they are attached
1084  * directly to the superblock - and so have no parent.
1085  */
1086 int
1087 xfs_ialloc(
1088 	xfs_trans_t	*tp,
1089 	xfs_inode_t	*pip,
1090 	mode_t		mode,
1091 	xfs_nlink_t	nlink,
1092 	xfs_dev_t	rdev,
1093 	cred_t		*cr,
1094 	xfs_prid_t	prid,
1095 	int		okalloc,
1096 	xfs_buf_t	**ialloc_context,
1097 	boolean_t	*call_again,
1098 	xfs_inode_t	**ipp)
1099 {
1100 	xfs_ino_t	ino;
1101 	xfs_inode_t	*ip;
1102 	bhv_vnode_t	*vp;
1103 	uint		flags;
1104 	int		error;
1105 
1106 	/*
1107 	 * Call the space management code to pick
1108 	 * the on-disk inode to be allocated.
1109 	 */
1110 	error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
1111 			    ialloc_context, call_again, &ino);
1112 	if (error != 0) {
1113 		return error;
1114 	}
1115 	if (*call_again || ino == NULLFSINO) {
1116 		*ipp = NULL;
1117 		return 0;
1118 	}
1119 	ASSERT(*ialloc_context == NULL);
1120 
1121 	/*
1122 	 * Get the in-core inode with the lock held exclusively.
1123 	 * This is because we're setting fields here we need
1124 	 * to prevent others from looking at until we're done.
1125 	 */
1126 	error = xfs_trans_iget(tp->t_mountp, tp, ino,
1127 				XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
1128 	if (error != 0) {
1129 		return error;
1130 	}
1131 	ASSERT(ip != NULL);
1132 
1133 	vp = XFS_ITOV(ip);
1134 	ip->i_d.di_mode = (__uint16_t)mode;
1135 	ip->i_d.di_onlink = 0;
1136 	ip->i_d.di_nlink = nlink;
1137 	ASSERT(ip->i_d.di_nlink == nlink);
1138 	ip->i_d.di_uid = current_fsuid(cr);
1139 	ip->i_d.di_gid = current_fsgid(cr);
1140 	ip->i_d.di_projid = prid;
1141 	memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
1142 
1143 	/*
1144 	 * If the superblock version is up to where we support new format
1145 	 * inodes and this is currently an old format inode, then change
1146 	 * the inode version number now.  This way we only do the conversion
1147 	 * here rather than here and in the flush/logging code.
1148 	 */
1149 	if (XFS_SB_VERSION_HASNLINK(&tp->t_mountp->m_sb) &&
1150 	    ip->i_d.di_version == XFS_DINODE_VERSION_1) {
1151 		ip->i_d.di_version = XFS_DINODE_VERSION_2;
1152 		/*
1153 		 * We've already zeroed the old link count, the projid field,
1154 		 * and the pad field.
1155 		 */
1156 	}
1157 
1158 	/*
1159 	 * Project ids won't be stored on disk if we are using a version 1 inode.
1160 	 */
1161 	if ((prid != 0) && (ip->i_d.di_version == XFS_DINODE_VERSION_1))
1162 		xfs_bump_ino_vers2(tp, ip);
1163 
1164 	if (pip && XFS_INHERIT_GID(pip, vp->v_vfsp)) {
1165 		ip->i_d.di_gid = pip->i_d.di_gid;
1166 		if ((pip->i_d.di_mode & S_ISGID) && (mode & S_IFMT) == S_IFDIR) {
1167 			ip->i_d.di_mode |= S_ISGID;
1168 		}
1169 	}
1170 
1171 	/*
1172 	 * If the group ID of the new file does not match the effective group
1173 	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
1174 	 * (and only if the irix_sgid_inherit compatibility variable is set).
1175 	 */
1176 	if ((irix_sgid_inherit) &&
1177 	    (ip->i_d.di_mode & S_ISGID) &&
1178 	    (!in_group_p((gid_t)ip->i_d.di_gid))) {
1179 		ip->i_d.di_mode &= ~S_ISGID;
1180 	}
1181 
1182 	ip->i_d.di_size = 0;
1183 	ip->i_size = 0;
1184 	ip->i_d.di_nextents = 0;
1185 	ASSERT(ip->i_d.di_nblocks == 0);
1186 	xfs_ichgtime(ip, XFS_ICHGTIME_CHG|XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD);
1187 	/*
1188 	 * di_gen will have been taken care of in xfs_iread.
1189 	 */
1190 	ip->i_d.di_extsize = 0;
1191 	ip->i_d.di_dmevmask = 0;
1192 	ip->i_d.di_dmstate = 0;
1193 	ip->i_d.di_flags = 0;
1194 	flags = XFS_ILOG_CORE;
1195 	switch (mode & S_IFMT) {
1196 	case S_IFIFO:
1197 	case S_IFCHR:
1198 	case S_IFBLK:
1199 	case S_IFSOCK:
1200 		ip->i_d.di_format = XFS_DINODE_FMT_DEV;
1201 		ip->i_df.if_u2.if_rdev = rdev;
1202 		ip->i_df.if_flags = 0;
1203 		flags |= XFS_ILOG_DEV;
1204 		break;
1205 	case S_IFREG:
1206 		if (pip && xfs_inode_is_filestream(pip)) {
1207 			error = xfs_filestream_associate(pip, ip);
1208 			if (error < 0)
1209 				return -error;
1210 			if (!error)
1211 				xfs_iflags_set(ip, XFS_IFILESTREAM);
1212 		}
1213 		/* fall through */
1214 	case S_IFDIR:
1215 		if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
1216 			uint	di_flags = 0;
1217 
1218 			if ((mode & S_IFMT) == S_IFDIR) {
1219 				if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1220 					di_flags |= XFS_DIFLAG_RTINHERIT;
1221 				if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1222 					di_flags |= XFS_DIFLAG_EXTSZINHERIT;
1223 					ip->i_d.di_extsize = pip->i_d.di_extsize;
1224 				}
1225 			} else if ((mode & S_IFMT) == S_IFREG) {
1226 				if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) {
1227 					di_flags |= XFS_DIFLAG_REALTIME;
1228 					ip->i_iocore.io_flags |= XFS_IOCORE_RT;
1229 				}
1230 				if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1231 					di_flags |= XFS_DIFLAG_EXTSIZE;
1232 					ip->i_d.di_extsize = pip->i_d.di_extsize;
1233 				}
1234 			}
1235 			if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
1236 			    xfs_inherit_noatime)
1237 				di_flags |= XFS_DIFLAG_NOATIME;
1238 			if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
1239 			    xfs_inherit_nodump)
1240 				di_flags |= XFS_DIFLAG_NODUMP;
1241 			if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
1242 			    xfs_inherit_sync)
1243 				di_flags |= XFS_DIFLAG_SYNC;
1244 			if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
1245 			    xfs_inherit_nosymlinks)
1246 				di_flags |= XFS_DIFLAG_NOSYMLINKS;
1247 			if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1248 				di_flags |= XFS_DIFLAG_PROJINHERIT;
1249 			if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
1250 			    xfs_inherit_nodefrag)
1251 				di_flags |= XFS_DIFLAG_NODEFRAG;
1252 			if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
1253 				di_flags |= XFS_DIFLAG_FILESTREAM;
1254 			ip->i_d.di_flags |= di_flags;
1255 		}
1256 		/* FALLTHROUGH */
1257 	case S_IFLNK:
1258 		ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
1259 		ip->i_df.if_flags = XFS_IFEXTENTS;
1260 		ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
1261 		ip->i_df.if_u1.if_extents = NULL;
1262 		break;
1263 	default:
1264 		ASSERT(0);
1265 	}
1266 	/*
1267 	 * Attribute fork settings for new inode.
1268 	 */
1269 	ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1270 	ip->i_d.di_anextents = 0;
1271 
1272 	/*
1273 	 * Log the new values stuffed into the inode.
1274 	 */
1275 	xfs_trans_log_inode(tp, ip, flags);
1276 
1277 	/* now that we have an i_mode we can setup inode ops and unlock */
1278 	bhv_vfs_init_vnode(XFS_MTOVFS(tp->t_mountp), vp, XFS_ITOBHV(ip), 1);
1279 
1280 	*ipp = ip;
1281 	return 0;
1282 }
1283 
1284 /*
1285  * Check to make sure that there are no blocks allocated to the
1286  * file beyond the size of the file.  We don't check this for
1287  * files with fixed size extents or real time extents, but we
1288  * at least do it for regular files.
1289  */
1290 #ifdef DEBUG
1291 void
1292 xfs_isize_check(
1293 	xfs_mount_t	*mp,
1294 	xfs_inode_t	*ip,
1295 	xfs_fsize_t	isize)
1296 {
1297 	xfs_fileoff_t	map_first;
1298 	int		nimaps;
1299 	xfs_bmbt_irec_t	imaps[2];
1300 
1301 	if ((ip->i_d.di_mode & S_IFMT) != S_IFREG)
1302 		return;
1303 
1304 	if (ip->i_d.di_flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_EXTSIZE))
1305 		return;
1306 
1307 	nimaps = 2;
1308 	map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
1309 	/*
1310 	 * The filesystem could be shutting down, so bmapi may return
1311 	 * an error.
1312 	 */
1313 	if (xfs_bmapi(NULL, ip, map_first,
1314 			 (XFS_B_TO_FSB(mp,
1315 				       (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) -
1316 			  map_first),
1317 			 XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps,
1318 			 NULL, NULL))
1319 	    return;
1320 	ASSERT(nimaps == 1);
1321 	ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK);
1322 }
1323 #endif	/* DEBUG */
1324 
1325 /*
1326  * Calculate the last possible buffered byte in a file.  This must
1327  * include data that was buffered beyond the EOF by the write code.
1328  * This also needs to deal with overflowing the xfs_fsize_t type
1329  * which can happen for sizes near the limit.
1330  *
1331  * We also need to take into account any blocks beyond the EOF.  It
1332  * may be the case that they were buffered by a write which failed.
1333  * In that case the pages will still be in memory, but the inode size
1334  * will never have been updated.
1335  */
1336 xfs_fsize_t
1337 xfs_file_last_byte(
1338 	xfs_inode_t	*ip)
1339 {
1340 	xfs_mount_t	*mp;
1341 	xfs_fsize_t	last_byte;
1342 	xfs_fileoff_t	last_block;
1343 	xfs_fileoff_t	size_last_block;
1344 	int		error;
1345 
1346 	ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE | MR_ACCESS));
1347 
1348 	mp = ip->i_mount;
1349 	/*
1350 	 * Only check for blocks beyond the EOF if the extents have
1351 	 * been read in.  This eliminates the need for the inode lock,
1352 	 * and it also saves us from looking when it really isn't
1353 	 * necessary.
1354 	 */
1355 	if (ip->i_df.if_flags & XFS_IFEXTENTS) {
1356 		error = xfs_bmap_last_offset(NULL, ip, &last_block,
1357 			XFS_DATA_FORK);
1358 		if (error) {
1359 			last_block = 0;
1360 		}
1361 	} else {
1362 		last_block = 0;
1363 	}
1364 	size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_size);
1365 	last_block = XFS_FILEOFF_MAX(last_block, size_last_block);
1366 
1367 	last_byte = XFS_FSB_TO_B(mp, last_block);
1368 	if (last_byte < 0) {
1369 		return XFS_MAXIOFFSET(mp);
1370 	}
1371 	last_byte += (1 << mp->m_writeio_log);
1372 	if (last_byte < 0) {
1373 		return XFS_MAXIOFFSET(mp);
1374 	}
1375 	return last_byte;
1376 }
1377 
1378 #if defined(XFS_RW_TRACE)
1379 STATIC void
1380 xfs_itrunc_trace(
1381 	int		tag,
1382 	xfs_inode_t	*ip,
1383 	int		flag,
1384 	xfs_fsize_t	new_size,
1385 	xfs_off_t	toss_start,
1386 	xfs_off_t	toss_finish)
1387 {
1388 	if (ip->i_rwtrace == NULL) {
1389 		return;
1390 	}
1391 
1392 	ktrace_enter(ip->i_rwtrace,
1393 		     (void*)((long)tag),
1394 		     (void*)ip,
1395 		     (void*)(unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff),
1396 		     (void*)(unsigned long)(ip->i_d.di_size & 0xffffffff),
1397 		     (void*)((long)flag),
1398 		     (void*)(unsigned long)((new_size >> 32) & 0xffffffff),
1399 		     (void*)(unsigned long)(new_size & 0xffffffff),
1400 		     (void*)(unsigned long)((toss_start >> 32) & 0xffffffff),
1401 		     (void*)(unsigned long)(toss_start & 0xffffffff),
1402 		     (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff),
1403 		     (void*)(unsigned long)(toss_finish & 0xffffffff),
1404 		     (void*)(unsigned long)current_cpu(),
1405 		     (void*)(unsigned long)current_pid(),
1406 		     (void*)NULL,
1407 		     (void*)NULL,
1408 		     (void*)NULL);
1409 }
1410 #else
1411 #define	xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish)
1412 #endif
1413 
1414 /*
1415  * Start the truncation of the file to new_size.  The new size
1416  * must be smaller than the current size.  This routine will
1417  * clear the buffer and page caches of file data in the removed
1418  * range, and xfs_itruncate_finish() will remove the underlying
1419  * disk blocks.
1420  *
1421  * The inode must have its I/O lock locked EXCLUSIVELY, and it
1422  * must NOT have the inode lock held at all.  This is because we're
1423  * calling into the buffer/page cache code and we can't hold the
1424  * inode lock when we do so.
1425  *
1426  * We need to wait for any direct I/Os in flight to complete before we
1427  * proceed with the truncate. This is needed to prevent the extents
1428  * being read or written by the direct I/Os from being removed while the
1429  * I/O is in flight as there is no other method of synchronising
1430  * direct I/O with the truncate operation.  Also, because we hold
1431  * the IOLOCK in exclusive mode, we prevent new direct I/Os from being
1432  * started until the truncate completes and drops the lock. Essentially,
1433  * the vn_iowait() call forms an I/O barrier that provides strict ordering
1434  * between direct I/Os and the truncate operation.
1435  *
1436  * The flags parameter can have either the value XFS_ITRUNC_DEFINITE
1437  * or XFS_ITRUNC_MAYBE.  The XFS_ITRUNC_MAYBE value should be used
1438  * in the case that the caller is locking things out of order and
1439  * may not be able to call xfs_itruncate_finish() with the inode lock
1440  * held without dropping the I/O lock.  If the caller must drop the
1441  * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start()
1442  * must be called again with all the same restrictions as the initial
1443  * call.
1444  */
1445 int
1446 xfs_itruncate_start(
1447 	xfs_inode_t	*ip,
1448 	uint		flags,
1449 	xfs_fsize_t	new_size)
1450 {
1451 	xfs_fsize_t	last_byte;
1452 	xfs_off_t	toss_start;
1453 	xfs_mount_t	*mp;
1454 	bhv_vnode_t	*vp;
1455 	int		error = 0;
1456 
1457 	ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0);
1458 	ASSERT((new_size == 0) || (new_size <= ip->i_size));
1459 	ASSERT((flags == XFS_ITRUNC_DEFINITE) ||
1460 	       (flags == XFS_ITRUNC_MAYBE));
1461 
1462 	mp = ip->i_mount;
1463 	vp = XFS_ITOV(ip);
1464 
1465 	vn_iowait(vp);  /* wait for the completion of any pending DIOs */
1466 
1467 	/*
1468 	 * Call toss_pages or flushinval_pages to get rid of pages
1469 	 * overlapping the region being removed.  We have to use
1470 	 * the less efficient flushinval_pages in the case that the
1471 	 * caller may not be able to finish the truncate without
1472 	 * dropping the inode's I/O lock.  Make sure
1473 	 * to catch any pages brought in by buffers overlapping
1474 	 * the EOF by searching out beyond the isize by our
1475 	 * block size. We round new_size up to a block boundary
1476 	 * so that we don't toss things on the same block as
1477 	 * new_size but before it.
1478 	 *
1479 	 * Before calling toss_page or flushinval_pages, make sure to
1480 	 * call remapf() over the same region if the file is mapped.
1481 	 * This frees up mapped file references to the pages in the
1482 	 * given range and for the flushinval_pages case it ensures
1483 	 * that we get the latest mapped changes flushed out.
1484 	 */
1485 	toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1486 	toss_start = XFS_FSB_TO_B(mp, toss_start);
1487 	if (toss_start < 0) {
1488 		/*
1489 		 * The place to start tossing is beyond our maximum
1490 		 * file size, so there is no way that the data extended
1491 		 * out there.
1492 		 */
1493 		return 0;
1494 	}
1495 	last_byte = xfs_file_last_byte(ip);
1496 	xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start,
1497 			 last_byte);
1498 	if (last_byte > toss_start) {
1499 		if (flags & XFS_ITRUNC_DEFINITE) {
1500 			bhv_vop_toss_pages(vp, toss_start, -1, FI_REMAPF_LOCKED);
1501 		} else {
1502 			error = bhv_vop_flushinval_pages(vp, toss_start, -1, FI_REMAPF_LOCKED);
1503 		}
1504 	}
1505 
1506 #ifdef DEBUG
1507 	if (new_size == 0) {
1508 		ASSERT(VN_CACHED(vp) == 0);
1509 	}
1510 #endif
1511 	return error;
1512 }
1513 
1514 /*
1515  * Shrink the file to the given new_size.  The new
1516  * size must be smaller than the current size.
1517  * This will free up the underlying blocks
1518  * in the removed range after a call to xfs_itruncate_start()
1519  * or xfs_atruncate_start().
1520  *
1521  * The transaction passed to this routine must have made
1522  * a permanent log reservation of at least XFS_ITRUNCATE_LOG_RES.
1523  * This routine may commit the given transaction and
1524  * start new ones, so make sure everything involved in
1525  * the transaction is tidy before calling here.
1526  * Some transaction will be returned to the caller to be
1527  * committed.  The incoming transaction must already include
1528  * the inode, and both inode locks must be held exclusively.
1529  * The inode must also be "held" within the transaction.  On
1530  * return the inode will be "held" within the returned transaction.
1531  * This routine does NOT require any disk space to be reserved
1532  * for it within the transaction.
1533  *
1534  * The fork parameter must be either xfs_attr_fork or xfs_data_fork,
1535  * and it indicates the fork which is to be truncated.  For the
1536  * attribute fork we only support truncation to size 0.
1537  *
1538  * We use the sync parameter to indicate whether or not the first
1539  * transaction we perform might have to be synchronous.  For the attr fork,
1540  * it needs to be so if the unlink of the inode is not yet known to be
1541  * permanent in the log.  This keeps us from freeing and reusing the
1542  * blocks of the attribute fork before the unlink of the inode becomes
1543  * permanent.
1544  *
1545  * For the data fork, we normally have to run synchronously if we're
1546  * being called out of the inactive path or we're being called
1547  * out of the create path where we're truncating an existing file.
1548  * Either way, the truncate needs to be sync so blocks don't reappear
1549  * in the file with altered data in case of a crash.  wsync filesystems
1550  * can run the first case async because anything that shrinks the inode
1551  * has to run sync so by the time we're called here from inactive, the
1552  * inode size is permanently set to 0.
1553  *
1554  * Calls from the truncate path always need to be sync unless we're
1555  * in a wsync filesystem and the file has already been unlinked.
1556  *
1557  * The caller is responsible for correctly setting the sync parameter.
1558  * It gets too hard for us to guess here which path we're being called
1559  * out of just based on inode state.
1560  */
1561 int
1562 xfs_itruncate_finish(
1563 	xfs_trans_t	**tp,
1564 	xfs_inode_t	*ip,
1565 	xfs_fsize_t	new_size,
1566 	int		fork,
1567 	int		sync)
1568 {
1569 	xfs_fsblock_t	first_block;
1570 	xfs_fileoff_t	first_unmap_block;
1571 	xfs_fileoff_t	last_block;
1572 	xfs_filblks_t	unmap_len=0;
1573 	xfs_mount_t	*mp;
1574 	xfs_trans_t	*ntp;
1575 	int		done;
1576 	int		committed;
1577 	xfs_bmap_free_t	free_list;
1578 	int		error;
1579 
1580 	ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0);
1581 	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
1582 	ASSERT((new_size == 0) || (new_size <= ip->i_size));
1583 	ASSERT(*tp != NULL);
1584 	ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
1585 	ASSERT(ip->i_transp == *tp);
1586 	ASSERT(ip->i_itemp != NULL);
1587 	ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD);
1588 
1589 
1590 	ntp = *tp;
1591 	mp = (ntp)->t_mountp;
1592 	ASSERT(! XFS_NOT_DQATTACHED(mp, ip));
1593 
1594 	/*
1595 	 * We only support truncating the entire attribute fork.
1596 	 */
1597 	if (fork == XFS_ATTR_FORK) {
1598 		new_size = 0LL;
1599 	}
1600 	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1601 	xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0);
1602 	/*
1603 	 * The first thing we do is set the size to new_size permanently
1604 	 * on disk.  This way we don't have to worry about anyone ever
1605 	 * being able to look at the data being freed even in the face
1606 	 * of a crash.  What we're getting around here is the case where
1607 	 * we free a block, it is allocated to another file, it is written
1608 	 * to, and then we crash.  If the new data gets written to the
1609 	 * file but the log buffers containing the free and reallocation
1610 	 * don't, then we'd end up with garbage in the blocks being freed.
1611 	 * As long as we make the new_size permanent before actually
1612 	 * freeing any blocks it doesn't matter if they get writtten to.
1613 	 *
1614 	 * The callers must signal into us whether or not the size
1615 	 * setting here must be synchronous.  There are a few cases
1616 	 * where it doesn't have to be synchronous.  Those cases
1617 	 * occur if the file is unlinked and we know the unlink is
1618 	 * permanent or if the blocks being truncated are guaranteed
1619 	 * to be beyond the inode eof (regardless of the link count)
1620 	 * and the eof value is permanent.  Both of these cases occur
1621 	 * only on wsync-mounted filesystems.  In those cases, we're
1622 	 * guaranteed that no user will ever see the data in the blocks
1623 	 * that are being truncated so the truncate can run async.
1624 	 * In the free beyond eof case, the file may wind up with
1625 	 * more blocks allocated to it than it needs if we crash
1626 	 * and that won't get fixed until the next time the file
1627 	 * is re-opened and closed but that's ok as that shouldn't
1628 	 * be too many blocks.
1629 	 *
1630 	 * However, we can't just make all wsync xactions run async
1631 	 * because there's one call out of the create path that needs
1632 	 * to run sync where it's truncating an existing file to size
1633 	 * 0 whose size is > 0.
1634 	 *
1635 	 * It's probably possible to come up with a test in this
1636 	 * routine that would correctly distinguish all the above
1637 	 * cases from the values of the function parameters and the
1638 	 * inode state but for sanity's sake, I've decided to let the
1639 	 * layers above just tell us.  It's simpler to correctly figure
1640 	 * out in the layer above exactly under what conditions we
1641 	 * can run async and I think it's easier for others read and
1642 	 * follow the logic in case something has to be changed.
1643 	 * cscope is your friend -- rcc.
1644 	 *
1645 	 * The attribute fork is much simpler.
1646 	 *
1647 	 * For the attribute fork we allow the caller to tell us whether
1648 	 * the unlink of the inode that led to this call is yet permanent
1649 	 * in the on disk log.  If it is not and we will be freeing extents
1650 	 * in this inode then we make the first transaction synchronous
1651 	 * to make sure that the unlink is permanent by the time we free
1652 	 * the blocks.
1653 	 */
1654 	if (fork == XFS_DATA_FORK) {
1655 		if (ip->i_d.di_nextents > 0) {
1656 			/*
1657 			 * If we are not changing the file size then do
1658 			 * not update the on-disk file size - we may be
1659 			 * called from xfs_inactive_free_eofblocks().  If we
1660 			 * update the on-disk file size and then the system
1661 			 * crashes before the contents of the file are
1662 			 * flushed to disk then the files may be full of
1663 			 * holes (ie NULL files bug).
1664 			 */
1665 			if (ip->i_size != new_size) {
1666 				ip->i_d.di_size = new_size;
1667 				ip->i_size = new_size;
1668 				xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1669 			}
1670 		}
1671 	} else if (sync) {
1672 		ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC));
1673 		if (ip->i_d.di_anextents > 0)
1674 			xfs_trans_set_sync(ntp);
1675 	}
1676 	ASSERT(fork == XFS_DATA_FORK ||
1677 		(fork == XFS_ATTR_FORK &&
1678 			((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) ||
1679 			 (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC)))));
1680 
1681 	/*
1682 	 * Since it is possible for space to become allocated beyond
1683 	 * the end of the file (in a crash where the space is allocated
1684 	 * but the inode size is not yet updated), simply remove any
1685 	 * blocks which show up between the new EOF and the maximum
1686 	 * possible file size.  If the first block to be removed is
1687 	 * beyond the maximum file size (ie it is the same as last_block),
1688 	 * then there is nothing to do.
1689 	 */
1690 	last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
1691 	ASSERT(first_unmap_block <= last_block);
1692 	done = 0;
1693 	if (last_block == first_unmap_block) {
1694 		done = 1;
1695 	} else {
1696 		unmap_len = last_block - first_unmap_block + 1;
1697 	}
1698 	while (!done) {
1699 		/*
1700 		 * Free up up to XFS_ITRUNC_MAX_EXTENTS.  xfs_bunmapi()
1701 		 * will tell us whether it freed the entire range or
1702 		 * not.  If this is a synchronous mount (wsync),
1703 		 * then we can tell bunmapi to keep all the
1704 		 * transactions asynchronous since the unlink
1705 		 * transaction that made this inode inactive has
1706 		 * already hit the disk.  There's no danger of
1707 		 * the freed blocks being reused, there being a
1708 		 * crash, and the reused blocks suddenly reappearing
1709 		 * in this file with garbage in them once recovery
1710 		 * runs.
1711 		 */
1712 		XFS_BMAP_INIT(&free_list, &first_block);
1713 		error = XFS_BUNMAPI(mp, ntp, &ip->i_iocore,
1714 				    first_unmap_block, unmap_len,
1715 				    XFS_BMAPI_AFLAG(fork) |
1716 				      (sync ? 0 : XFS_BMAPI_ASYNC),
1717 				    XFS_ITRUNC_MAX_EXTENTS,
1718 				    &first_block, &free_list,
1719 				    NULL, &done);
1720 		if (error) {
1721 			/*
1722 			 * If the bunmapi call encounters an error,
1723 			 * return to the caller where the transaction
1724 			 * can be properly aborted.  We just need to
1725 			 * make sure we're not holding any resources
1726 			 * that we were not when we came in.
1727 			 */
1728 			xfs_bmap_cancel(&free_list);
1729 			return error;
1730 		}
1731 
1732 		/*
1733 		 * Duplicate the transaction that has the permanent
1734 		 * reservation and commit the old transaction.
1735 		 */
1736 		error = xfs_bmap_finish(tp, &free_list, &committed);
1737 		ntp = *tp;
1738 		if (error) {
1739 			/*
1740 			 * If the bmap finish call encounters an error,
1741 			 * return to the caller where the transaction
1742 			 * can be properly aborted.  We just need to
1743 			 * make sure we're not holding any resources
1744 			 * that we were not when we came in.
1745 			 *
1746 			 * Aborting from this point might lose some
1747 			 * blocks in the file system, but oh well.
1748 			 */
1749 			xfs_bmap_cancel(&free_list);
1750 			if (committed) {
1751 				/*
1752 				 * If the passed in transaction committed
1753 				 * in xfs_bmap_finish(), then we want to
1754 				 * add the inode to this one before returning.
1755 				 * This keeps things simple for the higher
1756 				 * level code, because it always knows that
1757 				 * the inode is locked and held in the
1758 				 * transaction that returns to it whether
1759 				 * errors occur or not.  We don't mark the
1760 				 * inode dirty so that this transaction can
1761 				 * be easily aborted if possible.
1762 				 */
1763 				xfs_trans_ijoin(ntp, ip,
1764 					XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1765 				xfs_trans_ihold(ntp, ip);
1766 			}
1767 			return error;
1768 		}
1769 
1770 		if (committed) {
1771 			/*
1772 			 * The first xact was committed,
1773 			 * so add the inode to the new one.
1774 			 * Mark it dirty so it will be logged
1775 			 * and moved forward in the log as
1776 			 * part of every commit.
1777 			 */
1778 			xfs_trans_ijoin(ntp, ip,
1779 					XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1780 			xfs_trans_ihold(ntp, ip);
1781 			xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1782 		}
1783 		ntp = xfs_trans_dup(ntp);
1784 		(void) xfs_trans_commit(*tp, 0);
1785 		*tp = ntp;
1786 		error = xfs_trans_reserve(ntp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
1787 					  XFS_TRANS_PERM_LOG_RES,
1788 					  XFS_ITRUNCATE_LOG_COUNT);
1789 		/*
1790 		 * Add the inode being truncated to the next chained
1791 		 * transaction.
1792 		 */
1793 		xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1794 		xfs_trans_ihold(ntp, ip);
1795 		if (error)
1796 			return (error);
1797 	}
1798 	/*
1799 	 * Only update the size in the case of the data fork, but
1800 	 * always re-log the inode so that our permanent transaction
1801 	 * can keep on rolling it forward in the log.
1802 	 */
1803 	if (fork == XFS_DATA_FORK) {
1804 		xfs_isize_check(mp, ip, new_size);
1805 		/*
1806 		 * If we are not changing the file size then do
1807 		 * not update the on-disk file size - we may be
1808 		 * called from xfs_inactive_free_eofblocks().  If we
1809 		 * update the on-disk file size and then the system
1810 		 * crashes before the contents of the file are
1811 		 * flushed to disk then the files may be full of
1812 		 * holes (ie NULL files bug).
1813 		 */
1814 		if (ip->i_size != new_size) {
1815 			ip->i_d.di_size = new_size;
1816 			ip->i_size = new_size;
1817 		}
1818 	}
1819 	xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1820 	ASSERT((new_size != 0) ||
1821 	       (fork == XFS_ATTR_FORK) ||
1822 	       (ip->i_delayed_blks == 0));
1823 	ASSERT((new_size != 0) ||
1824 	       (fork == XFS_ATTR_FORK) ||
1825 	       (ip->i_d.di_nextents == 0));
1826 	xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0);
1827 	return 0;
1828 }
1829 
1830 
1831 /*
1832  * xfs_igrow_start
1833  *
1834  * Do the first part of growing a file: zero any data in the last
1835  * block that is beyond the old EOF.  We need to do this before
1836  * the inode is joined to the transaction to modify the i_size.
1837  * That way we can drop the inode lock and call into the buffer
1838  * cache to get the buffer mapping the EOF.
1839  */
1840 int
1841 xfs_igrow_start(
1842 	xfs_inode_t	*ip,
1843 	xfs_fsize_t	new_size,
1844 	cred_t		*credp)
1845 {
1846 	int		error;
1847 
1848 	ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
1849 	ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
1850 	ASSERT(new_size > ip->i_size);
1851 
1852 	/*
1853 	 * Zero any pages that may have been created by
1854 	 * xfs_write_file() beyond the end of the file
1855 	 * and any blocks between the old and new file sizes.
1856 	 */
1857 	error = xfs_zero_eof(XFS_ITOV(ip), &ip->i_iocore, new_size,
1858 			     ip->i_size);
1859 	return error;
1860 }
1861 
1862 /*
1863  * xfs_igrow_finish
1864  *
1865  * This routine is called to extend the size of a file.
1866  * The inode must have both the iolock and the ilock locked
1867  * for update and it must be a part of the current transaction.
1868  * The xfs_igrow_start() function must have been called previously.
1869  * If the change_flag is not zero, the inode change timestamp will
1870  * be updated.
1871  */
1872 void
1873 xfs_igrow_finish(
1874 	xfs_trans_t	*tp,
1875 	xfs_inode_t	*ip,
1876 	xfs_fsize_t	new_size,
1877 	int		change_flag)
1878 {
1879 	ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
1880 	ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
1881 	ASSERT(ip->i_transp == tp);
1882 	ASSERT(new_size > ip->i_size);
1883 
1884 	/*
1885 	 * Update the file size.  Update the inode change timestamp
1886 	 * if change_flag set.
1887 	 */
1888 	ip->i_d.di_size = new_size;
1889 	ip->i_size = new_size;
1890 	if (change_flag)
1891 		xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
1892 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1893 
1894 }
1895 
1896 
1897 /*
1898  * This is called when the inode's link count goes to 0.
1899  * We place the on-disk inode on a list in the AGI.  It
1900  * will be pulled from this list when the inode is freed.
1901  */
1902 int
1903 xfs_iunlink(
1904 	xfs_trans_t	*tp,
1905 	xfs_inode_t	*ip)
1906 {
1907 	xfs_mount_t	*mp;
1908 	xfs_agi_t	*agi;
1909 	xfs_dinode_t	*dip;
1910 	xfs_buf_t	*agibp;
1911 	xfs_buf_t	*ibp;
1912 	xfs_agnumber_t	agno;
1913 	xfs_daddr_t	agdaddr;
1914 	xfs_agino_t	agino;
1915 	short		bucket_index;
1916 	int		offset;
1917 	int		error;
1918 	int		agi_ok;
1919 
1920 	ASSERT(ip->i_d.di_nlink == 0);
1921 	ASSERT(ip->i_d.di_mode != 0);
1922 	ASSERT(ip->i_transp == tp);
1923 
1924 	mp = tp->t_mountp;
1925 
1926 	agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
1927 	agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
1928 
1929 	/*
1930 	 * Get the agi buffer first.  It ensures lock ordering
1931 	 * on the list.
1932 	 */
1933 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr,
1934 				   XFS_FSS_TO_BB(mp, 1), 0, &agibp);
1935 	if (error) {
1936 		return error;
1937 	}
1938 	/*
1939 	 * Validate the magic number of the agi block.
1940 	 */
1941 	agi = XFS_BUF_TO_AGI(agibp);
1942 	agi_ok =
1943 		be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
1944 		XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum));
1945 	if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK,
1946 			XFS_RANDOM_IUNLINK))) {
1947 		XFS_CORRUPTION_ERROR("xfs_iunlink", XFS_ERRLEVEL_LOW, mp, agi);
1948 		xfs_trans_brelse(tp, agibp);
1949 		return XFS_ERROR(EFSCORRUPTED);
1950 	}
1951 	/*
1952 	 * Get the index into the agi hash table for the
1953 	 * list this inode will go on.
1954 	 */
1955 	agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1956 	ASSERT(agino != 0);
1957 	bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1958 	ASSERT(agi->agi_unlinked[bucket_index]);
1959 	ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
1960 
1961 	if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) {
1962 		/*
1963 		 * There is already another inode in the bucket we need
1964 		 * to add ourselves to.  Add us at the front of the list.
1965 		 * Here we put the head pointer into our next pointer,
1966 		 * and then we fall through to point the head at us.
1967 		 */
1968 		error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
1969 		if (error) {
1970 			return error;
1971 		}
1972 		ASSERT(INT_GET(dip->di_next_unlinked, ARCH_CONVERT) == NULLAGINO);
1973 		ASSERT(dip->di_next_unlinked);
1974 		/* both on-disk, don't endian flip twice */
1975 		dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
1976 		offset = ip->i_boffset +
1977 			offsetof(xfs_dinode_t, di_next_unlinked);
1978 		xfs_trans_inode_buf(tp, ibp);
1979 		xfs_trans_log_buf(tp, ibp, offset,
1980 				  (offset + sizeof(xfs_agino_t) - 1));
1981 		xfs_inobp_check(mp, ibp);
1982 	}
1983 
1984 	/*
1985 	 * Point the bucket head pointer at the inode being inserted.
1986 	 */
1987 	ASSERT(agino != 0);
1988 	agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
1989 	offset = offsetof(xfs_agi_t, agi_unlinked) +
1990 		(sizeof(xfs_agino_t) * bucket_index);
1991 	xfs_trans_log_buf(tp, agibp, offset,
1992 			  (offset + sizeof(xfs_agino_t) - 1));
1993 	return 0;
1994 }
1995 
1996 /*
1997  * Pull the on-disk inode from the AGI unlinked list.
1998  */
1999 STATIC int
2000 xfs_iunlink_remove(
2001 	xfs_trans_t	*tp,
2002 	xfs_inode_t	*ip)
2003 {
2004 	xfs_ino_t	next_ino;
2005 	xfs_mount_t	*mp;
2006 	xfs_agi_t	*agi;
2007 	xfs_dinode_t	*dip;
2008 	xfs_buf_t	*agibp;
2009 	xfs_buf_t	*ibp;
2010 	xfs_agnumber_t	agno;
2011 	xfs_daddr_t	agdaddr;
2012 	xfs_agino_t	agino;
2013 	xfs_agino_t	next_agino;
2014 	xfs_buf_t	*last_ibp;
2015 	xfs_dinode_t	*last_dip = NULL;
2016 	short		bucket_index;
2017 	int		offset, last_offset = 0;
2018 	int		error;
2019 	int		agi_ok;
2020 
2021 	/*
2022 	 * First pull the on-disk inode from the AGI unlinked list.
2023 	 */
2024 	mp = tp->t_mountp;
2025 
2026 	agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2027 	agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
2028 
2029 	/*
2030 	 * Get the agi buffer first.  It ensures lock ordering
2031 	 * on the list.
2032 	 */
2033 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr,
2034 				   XFS_FSS_TO_BB(mp, 1), 0, &agibp);
2035 	if (error) {
2036 		cmn_err(CE_WARN,
2037 			"xfs_iunlink_remove: xfs_trans_read_buf()  returned an error %d on %s.  Returning error.",
2038 			error, mp->m_fsname);
2039 		return error;
2040 	}
2041 	/*
2042 	 * Validate the magic number of the agi block.
2043 	 */
2044 	agi = XFS_BUF_TO_AGI(agibp);
2045 	agi_ok =
2046 		be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
2047 		XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum));
2048 	if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK_REMOVE,
2049 			XFS_RANDOM_IUNLINK_REMOVE))) {
2050 		XFS_CORRUPTION_ERROR("xfs_iunlink_remove", XFS_ERRLEVEL_LOW,
2051 				     mp, agi);
2052 		xfs_trans_brelse(tp, agibp);
2053 		cmn_err(CE_WARN,
2054 			"xfs_iunlink_remove: XFS_TEST_ERROR()  returned an error on %s.  Returning EFSCORRUPTED.",
2055 			 mp->m_fsname);
2056 		return XFS_ERROR(EFSCORRUPTED);
2057 	}
2058 	/*
2059 	 * Get the index into the agi hash table for the
2060 	 * list this inode will go on.
2061 	 */
2062 	agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2063 	ASSERT(agino != 0);
2064 	bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2065 	ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO);
2066 	ASSERT(agi->agi_unlinked[bucket_index]);
2067 
2068 	if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
2069 		/*
2070 		 * We're at the head of the list.  Get the inode's
2071 		 * on-disk buffer to see if there is anyone after us
2072 		 * on the list.  Only modify our next pointer if it
2073 		 * is not already NULLAGINO.  This saves us the overhead
2074 		 * of dealing with the buffer when there is no need to
2075 		 * change it.
2076 		 */
2077 		error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
2078 		if (error) {
2079 			cmn_err(CE_WARN,
2080 				"xfs_iunlink_remove: xfs_itobp()  returned an error %d on %s.  Returning error.",
2081 				error, mp->m_fsname);
2082 			return error;
2083 		}
2084 		next_agino = INT_GET(dip->di_next_unlinked, ARCH_CONVERT);
2085 		ASSERT(next_agino != 0);
2086 		if (next_agino != NULLAGINO) {
2087 			INT_SET(dip->di_next_unlinked, ARCH_CONVERT, NULLAGINO);
2088 			offset = ip->i_boffset +
2089 				offsetof(xfs_dinode_t, di_next_unlinked);
2090 			xfs_trans_inode_buf(tp, ibp);
2091 			xfs_trans_log_buf(tp, ibp, offset,
2092 					  (offset + sizeof(xfs_agino_t) - 1));
2093 			xfs_inobp_check(mp, ibp);
2094 		} else {
2095 			xfs_trans_brelse(tp, ibp);
2096 		}
2097 		/*
2098 		 * Point the bucket head pointer at the next inode.
2099 		 */
2100 		ASSERT(next_agino != 0);
2101 		ASSERT(next_agino != agino);
2102 		agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
2103 		offset = offsetof(xfs_agi_t, agi_unlinked) +
2104 			(sizeof(xfs_agino_t) * bucket_index);
2105 		xfs_trans_log_buf(tp, agibp, offset,
2106 				  (offset + sizeof(xfs_agino_t) - 1));
2107 	} else {
2108 		/*
2109 		 * We need to search the list for the inode being freed.
2110 		 */
2111 		next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2112 		last_ibp = NULL;
2113 		while (next_agino != agino) {
2114 			/*
2115 			 * If the last inode wasn't the one pointing to
2116 			 * us, then release its buffer since we're not
2117 			 * going to do anything with it.
2118 			 */
2119 			if (last_ibp != NULL) {
2120 				xfs_trans_brelse(tp, last_ibp);
2121 			}
2122 			next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
2123 			error = xfs_inotobp(mp, tp, next_ino, &last_dip,
2124 					    &last_ibp, &last_offset);
2125 			if (error) {
2126 				cmn_err(CE_WARN,
2127 			"xfs_iunlink_remove: xfs_inotobp()  returned an error %d on %s.  Returning error.",
2128 					error, mp->m_fsname);
2129 				return error;
2130 			}
2131 			next_agino = INT_GET(last_dip->di_next_unlinked, ARCH_CONVERT);
2132 			ASSERT(next_agino != NULLAGINO);
2133 			ASSERT(next_agino != 0);
2134 		}
2135 		/*
2136 		 * Now last_ibp points to the buffer previous to us on
2137 		 * the unlinked list.  Pull us from the list.
2138 		 */
2139 		error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
2140 		if (error) {
2141 			cmn_err(CE_WARN,
2142 				"xfs_iunlink_remove: xfs_itobp()  returned an error %d on %s.  Returning error.",
2143 				error, mp->m_fsname);
2144 			return error;
2145 		}
2146 		next_agino = INT_GET(dip->di_next_unlinked, ARCH_CONVERT);
2147 		ASSERT(next_agino != 0);
2148 		ASSERT(next_agino != agino);
2149 		if (next_agino != NULLAGINO) {
2150 			INT_SET(dip->di_next_unlinked, ARCH_CONVERT, NULLAGINO);
2151 			offset = ip->i_boffset +
2152 				offsetof(xfs_dinode_t, di_next_unlinked);
2153 			xfs_trans_inode_buf(tp, ibp);
2154 			xfs_trans_log_buf(tp, ibp, offset,
2155 					  (offset + sizeof(xfs_agino_t) - 1));
2156 			xfs_inobp_check(mp, ibp);
2157 		} else {
2158 			xfs_trans_brelse(tp, ibp);
2159 		}
2160 		/*
2161 		 * Point the previous inode on the list to the next inode.
2162 		 */
2163 		INT_SET(last_dip->di_next_unlinked, ARCH_CONVERT, next_agino);
2164 		ASSERT(next_agino != 0);
2165 		offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
2166 		xfs_trans_inode_buf(tp, last_ibp);
2167 		xfs_trans_log_buf(tp, last_ibp, offset,
2168 				  (offset + sizeof(xfs_agino_t) - 1));
2169 		xfs_inobp_check(mp, last_ibp);
2170 	}
2171 	return 0;
2172 }
2173 
2174 STATIC_INLINE int xfs_inode_clean(xfs_inode_t *ip)
2175 {
2176 	return (((ip->i_itemp == NULL) ||
2177 		!(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL)) &&
2178 		(ip->i_update_core == 0));
2179 }
2180 
2181 STATIC void
2182 xfs_ifree_cluster(
2183 	xfs_inode_t	*free_ip,
2184 	xfs_trans_t	*tp,
2185 	xfs_ino_t	inum)
2186 {
2187 	xfs_mount_t		*mp = free_ip->i_mount;
2188 	int			blks_per_cluster;
2189 	int			nbufs;
2190 	int			ninodes;
2191 	int			i, j, found, pre_flushed;
2192 	xfs_daddr_t		blkno;
2193 	xfs_buf_t		*bp;
2194 	xfs_ihash_t		*ih;
2195 	xfs_inode_t		*ip, **ip_found;
2196 	xfs_inode_log_item_t	*iip;
2197 	xfs_log_item_t		*lip;
2198 	SPLDECL(s);
2199 
2200 	if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
2201 		blks_per_cluster = 1;
2202 		ninodes = mp->m_sb.sb_inopblock;
2203 		nbufs = XFS_IALLOC_BLOCKS(mp);
2204 	} else {
2205 		blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
2206 					mp->m_sb.sb_blocksize;
2207 		ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
2208 		nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
2209 	}
2210 
2211 	ip_found = kmem_alloc(ninodes * sizeof(xfs_inode_t *), KM_NOFS);
2212 
2213 	for (j = 0; j < nbufs; j++, inum += ninodes) {
2214 		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2215 					 XFS_INO_TO_AGBNO(mp, inum));
2216 
2217 
2218 		/*
2219 		 * Look for each inode in memory and attempt to lock it,
2220 		 * we can be racing with flush and tail pushing here.
2221 		 * any inode we get the locks on, add to an array of
2222 		 * inode items to process later.
2223 		 *
2224 		 * The get the buffer lock, we could beat a flush
2225 		 * or tail pushing thread to the lock here, in which
2226 		 * case they will go looking for the inode buffer
2227 		 * and fail, we need some other form of interlock
2228 		 * here.
2229 		 */
2230 		found = 0;
2231 		for (i = 0; i < ninodes; i++) {
2232 			ih = XFS_IHASH(mp, inum + i);
2233 			read_lock(&ih->ih_lock);
2234 			for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) {
2235 				if (ip->i_ino == inum + i)
2236 					break;
2237 			}
2238 
2239 			/* Inode not in memory or we found it already,
2240 			 * nothing to do
2241 			 */
2242 			if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) {
2243 				read_unlock(&ih->ih_lock);
2244 				continue;
2245 			}
2246 
2247 			if (xfs_inode_clean(ip)) {
2248 				read_unlock(&ih->ih_lock);
2249 				continue;
2250 			}
2251 
2252 			/* If we can get the locks then add it to the
2253 			 * list, otherwise by the time we get the bp lock
2254 			 * below it will already be attached to the
2255 			 * inode buffer.
2256 			 */
2257 
2258 			/* This inode will already be locked - by us, lets
2259 			 * keep it that way.
2260 			 */
2261 
2262 			if (ip == free_ip) {
2263 				if (xfs_iflock_nowait(ip)) {
2264 					xfs_iflags_set(ip, XFS_ISTALE);
2265 					if (xfs_inode_clean(ip)) {
2266 						xfs_ifunlock(ip);
2267 					} else {
2268 						ip_found[found++] = ip;
2269 					}
2270 				}
2271 				read_unlock(&ih->ih_lock);
2272 				continue;
2273 			}
2274 
2275 			if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2276 				if (xfs_iflock_nowait(ip)) {
2277 					xfs_iflags_set(ip, XFS_ISTALE);
2278 
2279 					if (xfs_inode_clean(ip)) {
2280 						xfs_ifunlock(ip);
2281 						xfs_iunlock(ip, XFS_ILOCK_EXCL);
2282 					} else {
2283 						ip_found[found++] = ip;
2284 					}
2285 				} else {
2286 					xfs_iunlock(ip, XFS_ILOCK_EXCL);
2287 				}
2288 			}
2289 
2290 			read_unlock(&ih->ih_lock);
2291 		}
2292 
2293 		bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2294 					mp->m_bsize * blks_per_cluster,
2295 					XFS_BUF_LOCK);
2296 
2297 		pre_flushed = 0;
2298 		lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
2299 		while (lip) {
2300 			if (lip->li_type == XFS_LI_INODE) {
2301 				iip = (xfs_inode_log_item_t *)lip;
2302 				ASSERT(iip->ili_logged == 1);
2303 				lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done;
2304 				AIL_LOCK(mp,s);
2305 				iip->ili_flush_lsn = iip->ili_item.li_lsn;
2306 				AIL_UNLOCK(mp, s);
2307 				xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
2308 				pre_flushed++;
2309 			}
2310 			lip = lip->li_bio_list;
2311 		}
2312 
2313 		for (i = 0; i < found; i++) {
2314 			ip = ip_found[i];
2315 			iip = ip->i_itemp;
2316 
2317 			if (!iip) {
2318 				ip->i_update_core = 0;
2319 				xfs_ifunlock(ip);
2320 				xfs_iunlock(ip, XFS_ILOCK_EXCL);
2321 				continue;
2322 			}
2323 
2324 			iip->ili_last_fields = iip->ili_format.ilf_fields;
2325 			iip->ili_format.ilf_fields = 0;
2326 			iip->ili_logged = 1;
2327 			AIL_LOCK(mp,s);
2328 			iip->ili_flush_lsn = iip->ili_item.li_lsn;
2329 			AIL_UNLOCK(mp, s);
2330 
2331 			xfs_buf_attach_iodone(bp,
2332 				(void(*)(xfs_buf_t*,xfs_log_item_t*))
2333 				xfs_istale_done, (xfs_log_item_t *)iip);
2334 			if (ip != free_ip) {
2335 				xfs_iunlock(ip, XFS_ILOCK_EXCL);
2336 			}
2337 		}
2338 
2339 		if (found || pre_flushed)
2340 			xfs_trans_stale_inode_buf(tp, bp);
2341 		xfs_trans_binval(tp, bp);
2342 	}
2343 
2344 	kmem_free(ip_found, ninodes * sizeof(xfs_inode_t *));
2345 }
2346 
2347 /*
2348  * This is called to return an inode to the inode free list.
2349  * The inode should already be truncated to 0 length and have
2350  * no pages associated with it.  This routine also assumes that
2351  * the inode is already a part of the transaction.
2352  *
2353  * The on-disk copy of the inode will have been added to the list
2354  * of unlinked inodes in the AGI. We need to remove the inode from
2355  * that list atomically with respect to freeing it here.
2356  */
2357 int
2358 xfs_ifree(
2359 	xfs_trans_t	*tp,
2360 	xfs_inode_t	*ip,
2361 	xfs_bmap_free_t	*flist)
2362 {
2363 	int			error;
2364 	int			delete;
2365 	xfs_ino_t		first_ino;
2366 
2367 	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
2368 	ASSERT(ip->i_transp == tp);
2369 	ASSERT(ip->i_d.di_nlink == 0);
2370 	ASSERT(ip->i_d.di_nextents == 0);
2371 	ASSERT(ip->i_d.di_anextents == 0);
2372 	ASSERT((ip->i_d.di_size == 0 && ip->i_size == 0) ||
2373 	       ((ip->i_d.di_mode & S_IFMT) != S_IFREG));
2374 	ASSERT(ip->i_d.di_nblocks == 0);
2375 
2376 	/*
2377 	 * Pull the on-disk inode from the AGI unlinked list.
2378 	 */
2379 	error = xfs_iunlink_remove(tp, ip);
2380 	if (error != 0) {
2381 		return error;
2382 	}
2383 
2384 	error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino);
2385 	if (error != 0) {
2386 		return error;
2387 	}
2388 	ip->i_d.di_mode = 0;		/* mark incore inode as free */
2389 	ip->i_d.di_flags = 0;
2390 	ip->i_d.di_dmevmask = 0;
2391 	ip->i_d.di_forkoff = 0;		/* mark the attr fork not in use */
2392 	ip->i_df.if_ext_max =
2393 		XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
2394 	ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2395 	ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
2396 	/*
2397 	 * Bump the generation count so no one will be confused
2398 	 * by reincarnations of this inode.
2399 	 */
2400 	ip->i_d.di_gen++;
2401 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2402 
2403 	if (delete) {
2404 		xfs_ifree_cluster(ip, tp, first_ino);
2405 	}
2406 
2407 	return 0;
2408 }
2409 
2410 /*
2411  * Reallocate the space for if_broot based on the number of records
2412  * being added or deleted as indicated in rec_diff.  Move the records
2413  * and pointers in if_broot to fit the new size.  When shrinking this
2414  * will eliminate holes between the records and pointers created by
2415  * the caller.  When growing this will create holes to be filled in
2416  * by the caller.
2417  *
2418  * The caller must not request to add more records than would fit in
2419  * the on-disk inode root.  If the if_broot is currently NULL, then
2420  * if we adding records one will be allocated.  The caller must also
2421  * not request that the number of records go below zero, although
2422  * it can go to zero.
2423  *
2424  * ip -- the inode whose if_broot area is changing
2425  * ext_diff -- the change in the number of records, positive or negative,
2426  *	 requested for the if_broot array.
2427  */
2428 void
2429 xfs_iroot_realloc(
2430 	xfs_inode_t		*ip,
2431 	int			rec_diff,
2432 	int			whichfork)
2433 {
2434 	int			cur_max;
2435 	xfs_ifork_t		*ifp;
2436 	xfs_bmbt_block_t	*new_broot;
2437 	int			new_max;
2438 	size_t			new_size;
2439 	char			*np;
2440 	char			*op;
2441 
2442 	/*
2443 	 * Handle the degenerate case quietly.
2444 	 */
2445 	if (rec_diff == 0) {
2446 		return;
2447 	}
2448 
2449 	ifp = XFS_IFORK_PTR(ip, whichfork);
2450 	if (rec_diff > 0) {
2451 		/*
2452 		 * If there wasn't any memory allocated before, just
2453 		 * allocate it now and get out.
2454 		 */
2455 		if (ifp->if_broot_bytes == 0) {
2456 			new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff);
2457 			ifp->if_broot = (xfs_bmbt_block_t*)kmem_alloc(new_size,
2458 								     KM_SLEEP);
2459 			ifp->if_broot_bytes = (int)new_size;
2460 			return;
2461 		}
2462 
2463 		/*
2464 		 * If there is already an existing if_broot, then we need
2465 		 * to realloc() it and shift the pointers to their new
2466 		 * location.  The records don't change location because
2467 		 * they are kept butted up against the btree block header.
2468 		 */
2469 		cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes);
2470 		new_max = cur_max + rec_diff;
2471 		new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
2472 		ifp->if_broot = (xfs_bmbt_block_t *)
2473 		  kmem_realloc(ifp->if_broot,
2474 				new_size,
2475 				(size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */
2476 				KM_SLEEP);
2477 		op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2478 						      ifp->if_broot_bytes);
2479 		np = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2480 						      (int)new_size);
2481 		ifp->if_broot_bytes = (int)new_size;
2482 		ASSERT(ifp->if_broot_bytes <=
2483 			XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
2484 		memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t));
2485 		return;
2486 	}
2487 
2488 	/*
2489 	 * rec_diff is less than 0.  In this case, we are shrinking the
2490 	 * if_broot buffer.  It must already exist.  If we go to zero
2491 	 * records, just get rid of the root and clear the status bit.
2492 	 */
2493 	ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
2494 	cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes);
2495 	new_max = cur_max + rec_diff;
2496 	ASSERT(new_max >= 0);
2497 	if (new_max > 0)
2498 		new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
2499 	else
2500 		new_size = 0;
2501 	if (new_size > 0) {
2502 		new_broot = (xfs_bmbt_block_t *)kmem_alloc(new_size, KM_SLEEP);
2503 		/*
2504 		 * First copy over the btree block header.
2505 		 */
2506 		memcpy(new_broot, ifp->if_broot, sizeof(xfs_bmbt_block_t));
2507 	} else {
2508 		new_broot = NULL;
2509 		ifp->if_flags &= ~XFS_IFBROOT;
2510 	}
2511 
2512 	/*
2513 	 * Only copy the records and pointers if there are any.
2514 	 */
2515 	if (new_max > 0) {
2516 		/*
2517 		 * First copy the records.
2518 		 */
2519 		op = (char *)XFS_BMAP_BROOT_REC_ADDR(ifp->if_broot, 1,
2520 						     ifp->if_broot_bytes);
2521 		np = (char *)XFS_BMAP_BROOT_REC_ADDR(new_broot, 1,
2522 						     (int)new_size);
2523 		memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
2524 
2525 		/*
2526 		 * Then copy the pointers.
2527 		 */
2528 		op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2529 						     ifp->if_broot_bytes);
2530 		np = (char *)XFS_BMAP_BROOT_PTR_ADDR(new_broot, 1,
2531 						     (int)new_size);
2532 		memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t));
2533 	}
2534 	kmem_free(ifp->if_broot, ifp->if_broot_bytes);
2535 	ifp->if_broot = new_broot;
2536 	ifp->if_broot_bytes = (int)new_size;
2537 	ASSERT(ifp->if_broot_bytes <=
2538 		XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
2539 	return;
2540 }
2541 
2542 
2543 /*
2544  * This is called when the amount of space needed for if_data
2545  * is increased or decreased.  The change in size is indicated by
2546  * the number of bytes that need to be added or deleted in the
2547  * byte_diff parameter.
2548  *
2549  * If the amount of space needed has decreased below the size of the
2550  * inline buffer, then switch to using the inline buffer.  Otherwise,
2551  * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
2552  * to what is needed.
2553  *
2554  * ip -- the inode whose if_data area is changing
2555  * byte_diff -- the change in the number of bytes, positive or negative,
2556  *	 requested for the if_data array.
2557  */
2558 void
2559 xfs_idata_realloc(
2560 	xfs_inode_t	*ip,
2561 	int		byte_diff,
2562 	int		whichfork)
2563 {
2564 	xfs_ifork_t	*ifp;
2565 	int		new_size;
2566 	int		real_size;
2567 
2568 	if (byte_diff == 0) {
2569 		return;
2570 	}
2571 
2572 	ifp = XFS_IFORK_PTR(ip, whichfork);
2573 	new_size = (int)ifp->if_bytes + byte_diff;
2574 	ASSERT(new_size >= 0);
2575 
2576 	if (new_size == 0) {
2577 		if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2578 			kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
2579 		}
2580 		ifp->if_u1.if_data = NULL;
2581 		real_size = 0;
2582 	} else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) {
2583 		/*
2584 		 * If the valid extents/data can fit in if_inline_ext/data,
2585 		 * copy them from the malloc'd vector and free it.
2586 		 */
2587 		if (ifp->if_u1.if_data == NULL) {
2588 			ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2589 		} else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2590 			ASSERT(ifp->if_real_bytes != 0);
2591 			memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
2592 			      new_size);
2593 			kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
2594 			ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2595 		}
2596 		real_size = 0;
2597 	} else {
2598 		/*
2599 		 * Stuck with malloc/realloc.
2600 		 * For inline data, the underlying buffer must be
2601 		 * a multiple of 4 bytes in size so that it can be
2602 		 * logged and stay on word boundaries.  We enforce
2603 		 * that here.
2604 		 */
2605 		real_size = roundup(new_size, 4);
2606 		if (ifp->if_u1.if_data == NULL) {
2607 			ASSERT(ifp->if_real_bytes == 0);
2608 			ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
2609 		} else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2610 			/*
2611 			 * Only do the realloc if the underlying size
2612 			 * is really changing.
2613 			 */
2614 			if (ifp->if_real_bytes != real_size) {
2615 				ifp->if_u1.if_data =
2616 					kmem_realloc(ifp->if_u1.if_data,
2617 							real_size,
2618 							ifp->if_real_bytes,
2619 							KM_SLEEP);
2620 			}
2621 		} else {
2622 			ASSERT(ifp->if_real_bytes == 0);
2623 			ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
2624 			memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
2625 				ifp->if_bytes);
2626 		}
2627 	}
2628 	ifp->if_real_bytes = real_size;
2629 	ifp->if_bytes = new_size;
2630 	ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2631 }
2632 
2633 
2634 
2635 
2636 /*
2637  * Map inode to disk block and offset.
2638  *
2639  * mp -- the mount point structure for the current file system
2640  * tp -- the current transaction
2641  * ino -- the inode number of the inode to be located
2642  * imap -- this structure is filled in with the information necessary
2643  *	 to retrieve the given inode from disk
2644  * flags -- flags to pass to xfs_dilocate indicating whether or not
2645  *	 lookups in the inode btree were OK or not
2646  */
2647 int
2648 xfs_imap(
2649 	xfs_mount_t	*mp,
2650 	xfs_trans_t	*tp,
2651 	xfs_ino_t	ino,
2652 	xfs_imap_t	*imap,
2653 	uint		flags)
2654 {
2655 	xfs_fsblock_t	fsbno;
2656 	int		len;
2657 	int		off;
2658 	int		error;
2659 
2660 	fsbno = imap->im_blkno ?
2661 		XFS_DADDR_TO_FSB(mp, imap->im_blkno) : NULLFSBLOCK;
2662 	error = xfs_dilocate(mp, tp, ino, &fsbno, &len, &off, flags);
2663 	if (error != 0) {
2664 		return error;
2665 	}
2666 	imap->im_blkno = XFS_FSB_TO_DADDR(mp, fsbno);
2667 	imap->im_len = XFS_FSB_TO_BB(mp, len);
2668 	imap->im_agblkno = XFS_FSB_TO_AGBNO(mp, fsbno);
2669 	imap->im_ioffset = (ushort)off;
2670 	imap->im_boffset = (ushort)(off << mp->m_sb.sb_inodelog);
2671 	return 0;
2672 }
2673 
2674 void
2675 xfs_idestroy_fork(
2676 	xfs_inode_t	*ip,
2677 	int		whichfork)
2678 {
2679 	xfs_ifork_t	*ifp;
2680 
2681 	ifp = XFS_IFORK_PTR(ip, whichfork);
2682 	if (ifp->if_broot != NULL) {
2683 		kmem_free(ifp->if_broot, ifp->if_broot_bytes);
2684 		ifp->if_broot = NULL;
2685 	}
2686 
2687 	/*
2688 	 * If the format is local, then we can't have an extents
2689 	 * array so just look for an inline data array.  If we're
2690 	 * not local then we may or may not have an extents list,
2691 	 * so check and free it up if we do.
2692 	 */
2693 	if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
2694 		if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) &&
2695 		    (ifp->if_u1.if_data != NULL)) {
2696 			ASSERT(ifp->if_real_bytes != 0);
2697 			kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
2698 			ifp->if_u1.if_data = NULL;
2699 			ifp->if_real_bytes = 0;
2700 		}
2701 	} else if ((ifp->if_flags & XFS_IFEXTENTS) &&
2702 		   ((ifp->if_flags & XFS_IFEXTIREC) ||
2703 		    ((ifp->if_u1.if_extents != NULL) &&
2704 		     (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) {
2705 		ASSERT(ifp->if_real_bytes != 0);
2706 		xfs_iext_destroy(ifp);
2707 	}
2708 	ASSERT(ifp->if_u1.if_extents == NULL ||
2709 	       ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext);
2710 	ASSERT(ifp->if_real_bytes == 0);
2711 	if (whichfork == XFS_ATTR_FORK) {
2712 		kmem_zone_free(xfs_ifork_zone, ip->i_afp);
2713 		ip->i_afp = NULL;
2714 	}
2715 }
2716 
2717 /*
2718  * This is called free all the memory associated with an inode.
2719  * It must free the inode itself and any buffers allocated for
2720  * if_extents/if_data and if_broot.  It must also free the lock
2721  * associated with the inode.
2722  */
2723 void
2724 xfs_idestroy(
2725 	xfs_inode_t	*ip)
2726 {
2727 
2728 	switch (ip->i_d.di_mode & S_IFMT) {
2729 	case S_IFREG:
2730 	case S_IFDIR:
2731 	case S_IFLNK:
2732 		xfs_idestroy_fork(ip, XFS_DATA_FORK);
2733 		break;
2734 	}
2735 	if (ip->i_afp)
2736 		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
2737 	mrfree(&ip->i_lock);
2738 	mrfree(&ip->i_iolock);
2739 	freesema(&ip->i_flock);
2740 #ifdef XFS_BMAP_TRACE
2741 	ktrace_free(ip->i_xtrace);
2742 #endif
2743 #ifdef XFS_BMBT_TRACE
2744 	ktrace_free(ip->i_btrace);
2745 #endif
2746 #ifdef XFS_RW_TRACE
2747 	ktrace_free(ip->i_rwtrace);
2748 #endif
2749 #ifdef XFS_ILOCK_TRACE
2750 	ktrace_free(ip->i_lock_trace);
2751 #endif
2752 #ifdef XFS_DIR2_TRACE
2753 	ktrace_free(ip->i_dir_trace);
2754 #endif
2755 	if (ip->i_itemp) {
2756 		/*
2757 		 * Only if we are shutting down the fs will we see an
2758 		 * inode still in the AIL. If it is there, we should remove
2759 		 * it to prevent a use-after-free from occurring.
2760 		 */
2761 		xfs_mount_t	*mp = ip->i_mount;
2762 		xfs_log_item_t	*lip = &ip->i_itemp->ili_item;
2763 		int		s;
2764 
2765 		ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
2766 				       XFS_FORCED_SHUTDOWN(ip->i_mount));
2767 		if (lip->li_flags & XFS_LI_IN_AIL) {
2768 			AIL_LOCK(mp, s);
2769 			if (lip->li_flags & XFS_LI_IN_AIL)
2770 				xfs_trans_delete_ail(mp, lip, s);
2771 			else
2772 				AIL_UNLOCK(mp, s);
2773 		}
2774 		xfs_inode_item_destroy(ip);
2775 	}
2776 	kmem_zone_free(xfs_inode_zone, ip);
2777 }
2778 
2779 
2780 /*
2781  * Increment the pin count of the given buffer.
2782  * This value is protected by ipinlock spinlock in the mount structure.
2783  */
2784 void
2785 xfs_ipin(
2786 	xfs_inode_t	*ip)
2787 {
2788 	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
2789 
2790 	atomic_inc(&ip->i_pincount);
2791 }
2792 
2793 /*
2794  * Decrement the pin count of the given inode, and wake up
2795  * anyone in xfs_iwait_unpin() if the count goes to 0.  The
2796  * inode must have been previously pinned with a call to xfs_ipin().
2797  */
2798 void
2799 xfs_iunpin(
2800 	xfs_inode_t	*ip)
2801 {
2802 	ASSERT(atomic_read(&ip->i_pincount) > 0);
2803 
2804 	if (atomic_dec_and_lock(&ip->i_pincount, &ip->i_flags_lock)) {
2805 
2806 		/*
2807 		 * If the inode is currently being reclaimed, the link between
2808 		 * the bhv_vnode and the xfs_inode will be broken after the
2809 		 * XFS_IRECLAIM* flag is set. Hence, if these flags are not
2810 		 * set, then we can move forward and mark the linux inode dirty
2811 		 * knowing that it is still valid as it won't freed until after
2812 		 * the bhv_vnode<->xfs_inode link is broken in xfs_reclaim. The
2813 		 * i_flags_lock is used to synchronise the setting of the
2814 		 * XFS_IRECLAIM* flags and the breaking of the link, and so we
2815 		 * can execute atomically w.r.t to reclaim by holding this lock
2816 		 * here.
2817 		 *
2818 		 * However, we still need to issue the unpin wakeup call as the
2819 		 * inode reclaim may be blocked waiting for the inode to become
2820 		 * unpinned.
2821 		 */
2822 
2823 		if (!__xfs_iflags_test(ip, XFS_IRECLAIM|XFS_IRECLAIMABLE)) {
2824 			bhv_vnode_t	*vp = XFS_ITOV_NULL(ip);
2825 			struct inode *inode = NULL;
2826 
2827 			BUG_ON(vp == NULL);
2828 			inode = vn_to_inode(vp);
2829 			BUG_ON(inode->i_state & I_CLEAR);
2830 
2831 			/* make sync come back and flush this inode */
2832 			if (!(inode->i_state & (I_NEW|I_FREEING)))
2833 				mark_inode_dirty_sync(inode);
2834 		}
2835 		spin_unlock(&ip->i_flags_lock);
2836 		wake_up(&ip->i_ipin_wait);
2837 	}
2838 }
2839 
2840 /*
2841  * This is called to wait for the given inode to be unpinned.
2842  * It will sleep until this happens.  The caller must have the
2843  * inode locked in at least shared mode so that the buffer cannot
2844  * be subsequently pinned once someone is waiting for it to be
2845  * unpinned.
2846  */
2847 STATIC void
2848 xfs_iunpin_wait(
2849 	xfs_inode_t	*ip)
2850 {
2851 	xfs_inode_log_item_t	*iip;
2852 	xfs_lsn_t	lsn;
2853 
2854 	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE | MR_ACCESS));
2855 
2856 	if (atomic_read(&ip->i_pincount) == 0) {
2857 		return;
2858 	}
2859 
2860 	iip = ip->i_itemp;
2861 	if (iip && iip->ili_last_lsn) {
2862 		lsn = iip->ili_last_lsn;
2863 	} else {
2864 		lsn = (xfs_lsn_t)0;
2865 	}
2866 
2867 	/*
2868 	 * Give the log a push so we don't wait here too long.
2869 	 */
2870 	xfs_log_force(ip->i_mount, lsn, XFS_LOG_FORCE);
2871 
2872 	wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0));
2873 }
2874 
2875 
2876 /*
2877  * xfs_iextents_copy()
2878  *
2879  * This is called to copy the REAL extents (as opposed to the delayed
2880  * allocation extents) from the inode into the given buffer.  It
2881  * returns the number of bytes copied into the buffer.
2882  *
2883  * If there are no delayed allocation extents, then we can just
2884  * memcpy() the extents into the buffer.  Otherwise, we need to
2885  * examine each extent in turn and skip those which are delayed.
2886  */
2887 int
2888 xfs_iextents_copy(
2889 	xfs_inode_t		*ip,
2890 	xfs_bmbt_rec_t		*buffer,
2891 	int			whichfork)
2892 {
2893 	int			copied;
2894 	xfs_bmbt_rec_t		*dest_ep;
2895 	xfs_bmbt_rec_t		*ep;
2896 	int			i;
2897 	xfs_ifork_t		*ifp;
2898 	int			nrecs;
2899 	xfs_fsblock_t		start_block;
2900 
2901 	ifp = XFS_IFORK_PTR(ip, whichfork);
2902 	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
2903 	ASSERT(ifp->if_bytes > 0);
2904 
2905 	nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2906 	XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork);
2907 	ASSERT(nrecs > 0);
2908 
2909 	/*
2910 	 * There are some delayed allocation extents in the
2911 	 * inode, so copy the extents one at a time and skip
2912 	 * the delayed ones.  There must be at least one
2913 	 * non-delayed extent.
2914 	 */
2915 	dest_ep = buffer;
2916 	copied = 0;
2917 	for (i = 0; i < nrecs; i++) {
2918 		ep = xfs_iext_get_ext(ifp, i);
2919 		start_block = xfs_bmbt_get_startblock(ep);
2920 		if (ISNULLSTARTBLOCK(start_block)) {
2921 			/*
2922 			 * It's a delayed allocation extent, so skip it.
2923 			 */
2924 			continue;
2925 		}
2926 
2927 		/* Translate to on disk format */
2928 		put_unaligned(INT_GET(ep->l0, ARCH_CONVERT),
2929 			      (__uint64_t*)&dest_ep->l0);
2930 		put_unaligned(INT_GET(ep->l1, ARCH_CONVERT),
2931 			      (__uint64_t*)&dest_ep->l1);
2932 		dest_ep++;
2933 		copied++;
2934 	}
2935 	ASSERT(copied != 0);
2936 	xfs_validate_extents(ifp, copied, 1, XFS_EXTFMT_INODE(ip));
2937 
2938 	return (copied * (uint)sizeof(xfs_bmbt_rec_t));
2939 }
2940 
2941 /*
2942  * Each of the following cases stores data into the same region
2943  * of the on-disk inode, so only one of them can be valid at
2944  * any given time. While it is possible to have conflicting formats
2945  * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
2946  * in EXTENTS format, this can only happen when the fork has
2947  * changed formats after being modified but before being flushed.
2948  * In these cases, the format always takes precedence, because the
2949  * format indicates the current state of the fork.
2950  */
2951 /*ARGSUSED*/
2952 STATIC int
2953 xfs_iflush_fork(
2954 	xfs_inode_t		*ip,
2955 	xfs_dinode_t		*dip,
2956 	xfs_inode_log_item_t	*iip,
2957 	int			whichfork,
2958 	xfs_buf_t		*bp)
2959 {
2960 	char			*cp;
2961 	xfs_ifork_t		*ifp;
2962 	xfs_mount_t		*mp;
2963 #ifdef XFS_TRANS_DEBUG
2964 	int			first;
2965 #endif
2966 	static const short	brootflag[2] =
2967 		{ XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
2968 	static const short	dataflag[2] =
2969 		{ XFS_ILOG_DDATA, XFS_ILOG_ADATA };
2970 	static const short	extflag[2] =
2971 		{ XFS_ILOG_DEXT, XFS_ILOG_AEXT };
2972 
2973 	if (iip == NULL)
2974 		return 0;
2975 	ifp = XFS_IFORK_PTR(ip, whichfork);
2976 	/*
2977 	 * This can happen if we gave up in iformat in an error path,
2978 	 * for the attribute fork.
2979 	 */
2980 	if (ifp == NULL) {
2981 		ASSERT(whichfork == XFS_ATTR_FORK);
2982 		return 0;
2983 	}
2984 	cp = XFS_DFORK_PTR(dip, whichfork);
2985 	mp = ip->i_mount;
2986 	switch (XFS_IFORK_FORMAT(ip, whichfork)) {
2987 	case XFS_DINODE_FMT_LOCAL:
2988 		if ((iip->ili_format.ilf_fields & dataflag[whichfork]) &&
2989 		    (ifp->if_bytes > 0)) {
2990 			ASSERT(ifp->if_u1.if_data != NULL);
2991 			ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2992 			memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
2993 		}
2994 		break;
2995 
2996 	case XFS_DINODE_FMT_EXTENTS:
2997 		ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
2998 		       !(iip->ili_format.ilf_fields & extflag[whichfork]));
2999 		ASSERT((xfs_iext_get_ext(ifp, 0) != NULL) ||
3000 			(ifp->if_bytes == 0));
3001 		ASSERT((xfs_iext_get_ext(ifp, 0) == NULL) ||
3002 			(ifp->if_bytes > 0));
3003 		if ((iip->ili_format.ilf_fields & extflag[whichfork]) &&
3004 		    (ifp->if_bytes > 0)) {
3005 			ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
3006 			(void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
3007 				whichfork);
3008 		}
3009 		break;
3010 
3011 	case XFS_DINODE_FMT_BTREE:
3012 		if ((iip->ili_format.ilf_fields & brootflag[whichfork]) &&
3013 		    (ifp->if_broot_bytes > 0)) {
3014 			ASSERT(ifp->if_broot != NULL);
3015 			ASSERT(ifp->if_broot_bytes <=
3016 			       (XFS_IFORK_SIZE(ip, whichfork) +
3017 				XFS_BROOT_SIZE_ADJ));
3018 			xfs_bmbt_to_bmdr(ifp->if_broot, ifp->if_broot_bytes,
3019 				(xfs_bmdr_block_t *)cp,
3020 				XFS_DFORK_SIZE(dip, mp, whichfork));
3021 		}
3022 		break;
3023 
3024 	case XFS_DINODE_FMT_DEV:
3025 		if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) {
3026 			ASSERT(whichfork == XFS_DATA_FORK);
3027 			INT_SET(dip->di_u.di_dev, ARCH_CONVERT, ip->i_df.if_u2.if_rdev);
3028 		}
3029 		break;
3030 
3031 	case XFS_DINODE_FMT_UUID:
3032 		if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) {
3033 			ASSERT(whichfork == XFS_DATA_FORK);
3034 			memcpy(&dip->di_u.di_muuid, &ip->i_df.if_u2.if_uuid,
3035 				sizeof(uuid_t));
3036 		}
3037 		break;
3038 
3039 	default:
3040 		ASSERT(0);
3041 		break;
3042 	}
3043 
3044 	return 0;
3045 }
3046 
3047 /*
3048  * xfs_iflush() will write a modified inode's changes out to the
3049  * inode's on disk home.  The caller must have the inode lock held
3050  * in at least shared mode and the inode flush semaphore must be
3051  * held as well.  The inode lock will still be held upon return from
3052  * the call and the caller is free to unlock it.
3053  * The inode flush lock will be unlocked when the inode reaches the disk.
3054  * The flags indicate how the inode's buffer should be written out.
3055  */
3056 int
3057 xfs_iflush(
3058 	xfs_inode_t		*ip,
3059 	uint			flags)
3060 {
3061 	xfs_inode_log_item_t	*iip;
3062 	xfs_buf_t		*bp;
3063 	xfs_dinode_t		*dip;
3064 	xfs_mount_t		*mp;
3065 	int			error;
3066 	/* REFERENCED */
3067 	xfs_chash_t		*ch;
3068 	xfs_inode_t		*iq;
3069 	int			clcount;	/* count of inodes clustered */
3070 	int			bufwasdelwri;
3071 	enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) };
3072 	SPLDECL(s);
3073 
3074 	XFS_STATS_INC(xs_iflush_count);
3075 
3076 	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
3077 	ASSERT(issemalocked(&(ip->i_flock)));
3078 	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3079 	       ip->i_d.di_nextents > ip->i_df.if_ext_max);
3080 
3081 	iip = ip->i_itemp;
3082 	mp = ip->i_mount;
3083 
3084 	/*
3085 	 * If the inode isn't dirty, then just release the inode
3086 	 * flush lock and do nothing.
3087 	 */
3088 	if ((ip->i_update_core == 0) &&
3089 	    ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
3090 		ASSERT((iip != NULL) ?
3091 			 !(iip->ili_item.li_flags & XFS_LI_IN_AIL) : 1);
3092 		xfs_ifunlock(ip);
3093 		return 0;
3094 	}
3095 
3096 	/*
3097 	 * We can't flush the inode until it is unpinned, so
3098 	 * wait for it.  We know noone new can pin it, because
3099 	 * we are holding the inode lock shared and you need
3100 	 * to hold it exclusively to pin the inode.
3101 	 */
3102 	xfs_iunpin_wait(ip);
3103 
3104 	/*
3105 	 * This may have been unpinned because the filesystem is shutting
3106 	 * down forcibly. If that's the case we must not write this inode
3107 	 * to disk, because the log record didn't make it to disk!
3108 	 */
3109 	if (XFS_FORCED_SHUTDOWN(mp)) {
3110 		ip->i_update_core = 0;
3111 		if (iip)
3112 			iip->ili_format.ilf_fields = 0;
3113 		xfs_ifunlock(ip);
3114 		return XFS_ERROR(EIO);
3115 	}
3116 
3117 	/*
3118 	 * Get the buffer containing the on-disk inode.
3119 	 */
3120 	error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0, 0);
3121 	if (error) {
3122 		xfs_ifunlock(ip);
3123 		return error;
3124 	}
3125 
3126 	/*
3127 	 * Decide how buffer will be flushed out.  This is done before
3128 	 * the call to xfs_iflush_int because this field is zeroed by it.
3129 	 */
3130 	if (iip != NULL && iip->ili_format.ilf_fields != 0) {
3131 		/*
3132 		 * Flush out the inode buffer according to the directions
3133 		 * of the caller.  In the cases where the caller has given
3134 		 * us a choice choose the non-delwri case.  This is because
3135 		 * the inode is in the AIL and we need to get it out soon.
3136 		 */
3137 		switch (flags) {
3138 		case XFS_IFLUSH_SYNC:
3139 		case XFS_IFLUSH_DELWRI_ELSE_SYNC:
3140 			flags = 0;
3141 			break;
3142 		case XFS_IFLUSH_ASYNC:
3143 		case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
3144 			flags = INT_ASYNC;
3145 			break;
3146 		case XFS_IFLUSH_DELWRI:
3147 			flags = INT_DELWRI;
3148 			break;
3149 		default:
3150 			ASSERT(0);
3151 			flags = 0;
3152 			break;
3153 		}
3154 	} else {
3155 		switch (flags) {
3156 		case XFS_IFLUSH_DELWRI_ELSE_SYNC:
3157 		case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
3158 		case XFS_IFLUSH_DELWRI:
3159 			flags = INT_DELWRI;
3160 			break;
3161 		case XFS_IFLUSH_ASYNC:
3162 			flags = INT_ASYNC;
3163 			break;
3164 		case XFS_IFLUSH_SYNC:
3165 			flags = 0;
3166 			break;
3167 		default:
3168 			ASSERT(0);
3169 			flags = 0;
3170 			break;
3171 		}
3172 	}
3173 
3174 	/*
3175 	 * First flush out the inode that xfs_iflush was called with.
3176 	 */
3177 	error = xfs_iflush_int(ip, bp);
3178 	if (error) {
3179 		goto corrupt_out;
3180 	}
3181 
3182 	/*
3183 	 * inode clustering:
3184 	 * see if other inodes can be gathered into this write
3185 	 */
3186 
3187 	ip->i_chash->chl_buf = bp;
3188 
3189 	ch = XFS_CHASH(mp, ip->i_blkno);
3190 	s = mutex_spinlock(&ch->ch_lock);
3191 
3192 	clcount = 0;
3193 	for (iq = ip->i_cnext; iq != ip; iq = iq->i_cnext) {
3194 		/*
3195 		 * Do an un-protected check to see if the inode is dirty and
3196 		 * is a candidate for flushing.  These checks will be repeated
3197 		 * later after the appropriate locks are acquired.
3198 		 */
3199 		iip = iq->i_itemp;
3200 		if ((iq->i_update_core == 0) &&
3201 		    ((iip == NULL) ||
3202 		     !(iip->ili_format.ilf_fields & XFS_ILOG_ALL)) &&
3203 		      xfs_ipincount(iq) == 0) {
3204 			continue;
3205 		}
3206 
3207 		/*
3208 		 * Try to get locks.  If any are unavailable,
3209 		 * then this inode cannot be flushed and is skipped.
3210 		 */
3211 
3212 		/* get inode locks (just i_lock) */
3213 		if (xfs_ilock_nowait(iq, XFS_ILOCK_SHARED)) {
3214 			/* get inode flush lock */
3215 			if (xfs_iflock_nowait(iq)) {
3216 				/* check if pinned */
3217 				if (xfs_ipincount(iq) == 0) {
3218 					/* arriving here means that
3219 					 * this inode can be flushed.
3220 					 * first re-check that it's
3221 					 * dirty
3222 					 */
3223 					iip = iq->i_itemp;
3224 					if ((iq->i_update_core != 0)||
3225 					    ((iip != NULL) &&
3226 					     (iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
3227 						clcount++;
3228 						error = xfs_iflush_int(iq, bp);
3229 						if (error) {
3230 							xfs_iunlock(iq,
3231 								    XFS_ILOCK_SHARED);
3232 							goto cluster_corrupt_out;
3233 						}
3234 					} else {
3235 						xfs_ifunlock(iq);
3236 					}
3237 				} else {
3238 					xfs_ifunlock(iq);
3239 				}
3240 			}
3241 			xfs_iunlock(iq, XFS_ILOCK_SHARED);
3242 		}
3243 	}
3244 	mutex_spinunlock(&ch->ch_lock, s);
3245 
3246 	if (clcount) {
3247 		XFS_STATS_INC(xs_icluster_flushcnt);
3248 		XFS_STATS_ADD(xs_icluster_flushinode, clcount);
3249 	}
3250 
3251 	/*
3252 	 * If the buffer is pinned then push on the log so we won't
3253 	 * get stuck waiting in the write for too long.
3254 	 */
3255 	if (XFS_BUF_ISPINNED(bp)){
3256 		xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
3257 	}
3258 
3259 	if (flags & INT_DELWRI) {
3260 		xfs_bdwrite(mp, bp);
3261 	} else if (flags & INT_ASYNC) {
3262 		xfs_bawrite(mp, bp);
3263 	} else {
3264 		error = xfs_bwrite(mp, bp);
3265 	}
3266 	return error;
3267 
3268 corrupt_out:
3269 	xfs_buf_relse(bp);
3270 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3271 	xfs_iflush_abort(ip);
3272 	/*
3273 	 * Unlocks the flush lock
3274 	 */
3275 	return XFS_ERROR(EFSCORRUPTED);
3276 
3277 cluster_corrupt_out:
3278 	/* Corruption detected in the clustering loop.  Invalidate the
3279 	 * inode buffer and shut down the filesystem.
3280 	 */
3281 	mutex_spinunlock(&ch->ch_lock, s);
3282 
3283 	/*
3284 	 * Clean up the buffer.  If it was B_DELWRI, just release it --
3285 	 * brelse can handle it with no problems.  If not, shut down the
3286 	 * filesystem before releasing the buffer.
3287 	 */
3288 	if ((bufwasdelwri= XFS_BUF_ISDELAYWRITE(bp))) {
3289 		xfs_buf_relse(bp);
3290 	}
3291 
3292 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3293 
3294 	if(!bufwasdelwri)  {
3295 		/*
3296 		 * Just like incore_relse: if we have b_iodone functions,
3297 		 * mark the buffer as an error and call them.  Otherwise
3298 		 * mark it as stale and brelse.
3299 		 */
3300 		if (XFS_BUF_IODONE_FUNC(bp)) {
3301 			XFS_BUF_CLR_BDSTRAT_FUNC(bp);
3302 			XFS_BUF_UNDONE(bp);
3303 			XFS_BUF_STALE(bp);
3304 			XFS_BUF_SHUT(bp);
3305 			XFS_BUF_ERROR(bp,EIO);
3306 			xfs_biodone(bp);
3307 		} else {
3308 			XFS_BUF_STALE(bp);
3309 			xfs_buf_relse(bp);
3310 		}
3311 	}
3312 
3313 	xfs_iflush_abort(iq);
3314 	/*
3315 	 * Unlocks the flush lock
3316 	 */
3317 	return XFS_ERROR(EFSCORRUPTED);
3318 }
3319 
3320 
3321 STATIC int
3322 xfs_iflush_int(
3323 	xfs_inode_t		*ip,
3324 	xfs_buf_t		*bp)
3325 {
3326 	xfs_inode_log_item_t	*iip;
3327 	xfs_dinode_t		*dip;
3328 	xfs_mount_t		*mp;
3329 #ifdef XFS_TRANS_DEBUG
3330 	int			first;
3331 #endif
3332 	SPLDECL(s);
3333 
3334 	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
3335 	ASSERT(issemalocked(&(ip->i_flock)));
3336 	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3337 	       ip->i_d.di_nextents > ip->i_df.if_ext_max);
3338 
3339 	iip = ip->i_itemp;
3340 	mp = ip->i_mount;
3341 
3342 
3343 	/*
3344 	 * If the inode isn't dirty, then just release the inode
3345 	 * flush lock and do nothing.
3346 	 */
3347 	if ((ip->i_update_core == 0) &&
3348 	    ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
3349 		xfs_ifunlock(ip);
3350 		return 0;
3351 	}
3352 
3353 	/* set *dip = inode's place in the buffer */
3354 	dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_boffset);
3355 
3356 	/*
3357 	 * Clear i_update_core before copying out the data.
3358 	 * This is for coordination with our timestamp updates
3359 	 * that don't hold the inode lock. They will always
3360 	 * update the timestamps BEFORE setting i_update_core,
3361 	 * so if we clear i_update_core after they set it we
3362 	 * are guaranteed to see their updates to the timestamps.
3363 	 * I believe that this depends on strongly ordered memory
3364 	 * semantics, but we have that.  We use the SYNCHRONIZE
3365 	 * macro to make sure that the compiler does not reorder
3366 	 * the i_update_core access below the data copy below.
3367 	 */
3368 	ip->i_update_core = 0;
3369 	SYNCHRONIZE();
3370 
3371 	/*
3372 	 * Make sure to get the latest atime from the Linux inode.
3373 	 */
3374 	xfs_synchronize_atime(ip);
3375 
3376 	if (XFS_TEST_ERROR(INT_GET(dip->di_core.di_magic,ARCH_CONVERT) != XFS_DINODE_MAGIC,
3377 			       mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
3378 		xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3379 		    "xfs_iflush: Bad inode %Lu magic number 0x%x, ptr 0x%p",
3380 			ip->i_ino, (int) INT_GET(dip->di_core.di_magic, ARCH_CONVERT), dip);
3381 		goto corrupt_out;
3382 	}
3383 	if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
3384 				mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
3385 		xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3386 			"xfs_iflush: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
3387 			ip->i_ino, ip, ip->i_d.di_magic);
3388 		goto corrupt_out;
3389 	}
3390 	if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) {
3391 		if (XFS_TEST_ERROR(
3392 		    (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3393 		    (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
3394 		    mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
3395 			xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3396 				"xfs_iflush: Bad regular inode %Lu, ptr 0x%p",
3397 				ip->i_ino, ip);
3398 			goto corrupt_out;
3399 		}
3400 	} else if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
3401 		if (XFS_TEST_ERROR(
3402 		    (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3403 		    (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
3404 		    (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
3405 		    mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
3406 			xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3407 				"xfs_iflush: Bad directory inode %Lu, ptr 0x%p",
3408 				ip->i_ino, ip);
3409 			goto corrupt_out;
3410 		}
3411 	}
3412 	if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
3413 				ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
3414 				XFS_RANDOM_IFLUSH_5)) {
3415 		xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3416 			"xfs_iflush: detected corrupt incore inode %Lu, total extents = %d, nblocks = %Ld, ptr 0x%p",
3417 			ip->i_ino,
3418 			ip->i_d.di_nextents + ip->i_d.di_anextents,
3419 			ip->i_d.di_nblocks,
3420 			ip);
3421 		goto corrupt_out;
3422 	}
3423 	if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3424 				mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
3425 		xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3426 			"xfs_iflush: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
3427 			ip->i_ino, ip->i_d.di_forkoff, ip);
3428 		goto corrupt_out;
3429 	}
3430 	/*
3431 	 * bump the flush iteration count, used to detect flushes which
3432 	 * postdate a log record during recovery.
3433 	 */
3434 
3435 	ip->i_d.di_flushiter++;
3436 
3437 	/*
3438 	 * Copy the dirty parts of the inode into the on-disk
3439 	 * inode.  We always copy out the core of the inode,
3440 	 * because if the inode is dirty at all the core must
3441 	 * be.
3442 	 */
3443 	xfs_xlate_dinode_core((xfs_caddr_t)&(dip->di_core), &(ip->i_d), -1);
3444 
3445 	/* Wrap, we never let the log put out DI_MAX_FLUSH */
3446 	if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3447 		ip->i_d.di_flushiter = 0;
3448 
3449 	/*
3450 	 * If this is really an old format inode and the superblock version
3451 	 * has not been updated to support only new format inodes, then
3452 	 * convert back to the old inode format.  If the superblock version
3453 	 * has been updated, then make the conversion permanent.
3454 	 */
3455 	ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1 ||
3456 	       XFS_SB_VERSION_HASNLINK(&mp->m_sb));
3457 	if (ip->i_d.di_version == XFS_DINODE_VERSION_1) {
3458 		if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) {
3459 			/*
3460 			 * Convert it back.
3461 			 */
3462 			ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
3463 			INT_SET(dip->di_core.di_onlink, ARCH_CONVERT, ip->i_d.di_nlink);
3464 		} else {
3465 			/*
3466 			 * The superblock version has already been bumped,
3467 			 * so just make the conversion to the new inode
3468 			 * format permanent.
3469 			 */
3470 			ip->i_d.di_version = XFS_DINODE_VERSION_2;
3471 			INT_SET(dip->di_core.di_version, ARCH_CONVERT, XFS_DINODE_VERSION_2);
3472 			ip->i_d.di_onlink = 0;
3473 			dip->di_core.di_onlink = 0;
3474 			memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
3475 			memset(&(dip->di_core.di_pad[0]), 0,
3476 			      sizeof(dip->di_core.di_pad));
3477 			ASSERT(ip->i_d.di_projid == 0);
3478 		}
3479 	}
3480 
3481 	if (xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp) == EFSCORRUPTED) {
3482 		goto corrupt_out;
3483 	}
3484 
3485 	if (XFS_IFORK_Q(ip)) {
3486 		/*
3487 		 * The only error from xfs_iflush_fork is on the data fork.
3488 		 */
3489 		(void) xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
3490 	}
3491 	xfs_inobp_check(mp, bp);
3492 
3493 	/*
3494 	 * We've recorded everything logged in the inode, so we'd
3495 	 * like to clear the ilf_fields bits so we don't log and
3496 	 * flush things unnecessarily.  However, we can't stop
3497 	 * logging all this information until the data we've copied
3498 	 * into the disk buffer is written to disk.  If we did we might
3499 	 * overwrite the copy of the inode in the log with all the
3500 	 * data after re-logging only part of it, and in the face of
3501 	 * a crash we wouldn't have all the data we need to recover.
3502 	 *
3503 	 * What we do is move the bits to the ili_last_fields field.
3504 	 * When logging the inode, these bits are moved back to the
3505 	 * ilf_fields field.  In the xfs_iflush_done() routine we
3506 	 * clear ili_last_fields, since we know that the information
3507 	 * those bits represent is permanently on disk.  As long as
3508 	 * the flush completes before the inode is logged again, then
3509 	 * both ilf_fields and ili_last_fields will be cleared.
3510 	 *
3511 	 * We can play with the ilf_fields bits here, because the inode
3512 	 * lock must be held exclusively in order to set bits there
3513 	 * and the flush lock protects the ili_last_fields bits.
3514 	 * Set ili_logged so the flush done
3515 	 * routine can tell whether or not to look in the AIL.
3516 	 * Also, store the current LSN of the inode so that we can tell
3517 	 * whether the item has moved in the AIL from xfs_iflush_done().
3518 	 * In order to read the lsn we need the AIL lock, because
3519 	 * it is a 64 bit value that cannot be read atomically.
3520 	 */
3521 	if (iip != NULL && iip->ili_format.ilf_fields != 0) {
3522 		iip->ili_last_fields = iip->ili_format.ilf_fields;
3523 		iip->ili_format.ilf_fields = 0;
3524 		iip->ili_logged = 1;
3525 
3526 		ASSERT(sizeof(xfs_lsn_t) == 8);	/* don't lock if it shrinks */
3527 		AIL_LOCK(mp,s);
3528 		iip->ili_flush_lsn = iip->ili_item.li_lsn;
3529 		AIL_UNLOCK(mp, s);
3530 
3531 		/*
3532 		 * Attach the function xfs_iflush_done to the inode's
3533 		 * buffer.  This will remove the inode from the AIL
3534 		 * and unlock the inode's flush lock when the inode is
3535 		 * completely written to disk.
3536 		 */
3537 		xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*))
3538 				      xfs_iflush_done, (xfs_log_item_t *)iip);
3539 
3540 		ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
3541 		ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL);
3542 	} else {
3543 		/*
3544 		 * We're flushing an inode which is not in the AIL and has
3545 		 * not been logged but has i_update_core set.  For this
3546 		 * case we can use a B_DELWRI flush and immediately drop
3547 		 * the inode flush lock because we can avoid the whole
3548 		 * AIL state thing.  It's OK to drop the flush lock now,
3549 		 * because we've already locked the buffer and to do anything
3550 		 * you really need both.
3551 		 */
3552 		if (iip != NULL) {
3553 			ASSERT(iip->ili_logged == 0);
3554 			ASSERT(iip->ili_last_fields == 0);
3555 			ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0);
3556 		}
3557 		xfs_ifunlock(ip);
3558 	}
3559 
3560 	return 0;
3561 
3562 corrupt_out:
3563 	return XFS_ERROR(EFSCORRUPTED);
3564 }
3565 
3566 
3567 /*
3568  * Flush all inactive inodes in mp.
3569  */
3570 void
3571 xfs_iflush_all(
3572 	xfs_mount_t	*mp)
3573 {
3574 	xfs_inode_t	*ip;
3575 	bhv_vnode_t	*vp;
3576 
3577  again:
3578 	XFS_MOUNT_ILOCK(mp);
3579 	ip = mp->m_inodes;
3580 	if (ip == NULL)
3581 		goto out;
3582 
3583 	do {
3584 		/* Make sure we skip markers inserted by sync */
3585 		if (ip->i_mount == NULL) {
3586 			ip = ip->i_mnext;
3587 			continue;
3588 		}
3589 
3590 		vp = XFS_ITOV_NULL(ip);
3591 		if (!vp) {
3592 			XFS_MOUNT_IUNLOCK(mp);
3593 			xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC);
3594 			goto again;
3595 		}
3596 
3597 		ASSERT(vn_count(vp) == 0);
3598 
3599 		ip = ip->i_mnext;
3600 	} while (ip != mp->m_inodes);
3601  out:
3602 	XFS_MOUNT_IUNLOCK(mp);
3603 }
3604 
3605 /*
3606  * xfs_iaccess: check accessibility of inode for mode.
3607  */
3608 int
3609 xfs_iaccess(
3610 	xfs_inode_t	*ip,
3611 	mode_t		mode,
3612 	cred_t		*cr)
3613 {
3614 	int		error;
3615 	mode_t		orgmode = mode;
3616 	struct inode	*inode = vn_to_inode(XFS_ITOV(ip));
3617 
3618 	if (mode & S_IWUSR) {
3619 		umode_t		imode = inode->i_mode;
3620 
3621 		if (IS_RDONLY(inode) &&
3622 		    (S_ISREG(imode) || S_ISDIR(imode) || S_ISLNK(imode)))
3623 			return XFS_ERROR(EROFS);
3624 
3625 		if (IS_IMMUTABLE(inode))
3626 			return XFS_ERROR(EACCES);
3627 	}
3628 
3629 	/*
3630 	 * If there's an Access Control List it's used instead of
3631 	 * the mode bits.
3632 	 */
3633 	if ((error = _ACL_XFS_IACCESS(ip, mode, cr)) != -1)
3634 		return error ? XFS_ERROR(error) : 0;
3635 
3636 	if (current_fsuid(cr) != ip->i_d.di_uid) {
3637 		mode >>= 3;
3638 		if (!in_group_p((gid_t)ip->i_d.di_gid))
3639 			mode >>= 3;
3640 	}
3641 
3642 	/*
3643 	 * If the DACs are ok we don't need any capability check.
3644 	 */
3645 	if ((ip->i_d.di_mode & mode) == mode)
3646 		return 0;
3647 	/*
3648 	 * Read/write DACs are always overridable.
3649 	 * Executable DACs are overridable if at least one exec bit is set.
3650 	 */
3651 	if (!(orgmode & S_IXUSR) ||
3652 	    (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode))
3653 		if (capable_cred(cr, CAP_DAC_OVERRIDE))
3654 			return 0;
3655 
3656 	if ((orgmode == S_IRUSR) ||
3657 	    (S_ISDIR(inode->i_mode) && (!(orgmode & S_IWUSR)))) {
3658 		if (capable_cred(cr, CAP_DAC_READ_SEARCH))
3659 			return 0;
3660 #ifdef	NOISE
3661 		cmn_err(CE_NOTE, "Ick: mode=%o, orgmode=%o", mode, orgmode);
3662 #endif	/* NOISE */
3663 		return XFS_ERROR(EACCES);
3664 	}
3665 	return XFS_ERROR(EACCES);
3666 }
3667 
3668 /*
3669  * xfs_iroundup: round up argument to next power of two
3670  */
3671 uint
3672 xfs_iroundup(
3673 	uint	v)
3674 {
3675 	int i;
3676 	uint m;
3677 
3678 	if ((v & (v - 1)) == 0)
3679 		return v;
3680 	ASSERT((v & 0x80000000) == 0);
3681 	if ((v & (v + 1)) == 0)
3682 		return v + 1;
3683 	for (i = 0, m = 1; i < 31; i++, m <<= 1) {
3684 		if (v & m)
3685 			continue;
3686 		v |= m;
3687 		if ((v & (v + 1)) == 0)
3688 			return v + 1;
3689 	}
3690 	ASSERT(0);
3691 	return( 0 );
3692 }
3693 
3694 #ifdef XFS_ILOCK_TRACE
3695 ktrace_t	*xfs_ilock_trace_buf;
3696 
3697 void
3698 xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra)
3699 {
3700 	ktrace_enter(ip->i_lock_trace,
3701 		     (void *)ip,
3702 		     (void *)(unsigned long)lock, /* 1 = LOCK, 3=UNLOCK, etc */
3703 		     (void *)(unsigned long)lockflags, /* XFS_ILOCK_EXCL etc */
3704 		     (void *)ra,		/* caller of ilock */
3705 		     (void *)(unsigned long)current_cpu(),
3706 		     (void *)(unsigned long)current_pid(),
3707 		     NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
3708 }
3709 #endif
3710 
3711 /*
3712  * Return a pointer to the extent record at file index idx.
3713  */
3714 xfs_bmbt_rec_t *
3715 xfs_iext_get_ext(
3716 	xfs_ifork_t	*ifp,		/* inode fork pointer */
3717 	xfs_extnum_t	idx)		/* index of target extent */
3718 {
3719 	ASSERT(idx >= 0);
3720 	if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
3721 		return ifp->if_u1.if_ext_irec->er_extbuf;
3722 	} else if (ifp->if_flags & XFS_IFEXTIREC) {
3723 		xfs_ext_irec_t	*erp;		/* irec pointer */
3724 		int		erp_idx = 0;	/* irec index */
3725 		xfs_extnum_t	page_idx = idx;	/* ext index in target list */
3726 
3727 		erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
3728 		return &erp->er_extbuf[page_idx];
3729 	} else if (ifp->if_bytes) {
3730 		return &ifp->if_u1.if_extents[idx];
3731 	} else {
3732 		return NULL;
3733 	}
3734 }
3735 
3736 /*
3737  * Insert new item(s) into the extent records for incore inode
3738  * fork 'ifp'.  'count' new items are inserted at index 'idx'.
3739  */
3740 void
3741 xfs_iext_insert(
3742 	xfs_ifork_t	*ifp,		/* inode fork pointer */
3743 	xfs_extnum_t	idx,		/* starting index of new items */
3744 	xfs_extnum_t	count,		/* number of inserted items */
3745 	xfs_bmbt_irec_t	*new)		/* items to insert */
3746 {
3747 	xfs_bmbt_rec_t	*ep;		/* extent record pointer */
3748 	xfs_extnum_t	i;		/* extent record index */
3749 
3750 	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3751 	xfs_iext_add(ifp, idx, count);
3752 	for (i = idx; i < idx + count; i++, new++) {
3753 		ep = xfs_iext_get_ext(ifp, i);
3754 		xfs_bmbt_set_all(ep, new);
3755 	}
3756 }
3757 
3758 /*
3759  * This is called when the amount of space required for incore file
3760  * extents needs to be increased. The ext_diff parameter stores the
3761  * number of new extents being added and the idx parameter contains
3762  * the extent index where the new extents will be added. If the new
3763  * extents are being appended, then we just need to (re)allocate and
3764  * initialize the space. Otherwise, if the new extents are being
3765  * inserted into the middle of the existing entries, a bit more work
3766  * is required to make room for the new extents to be inserted. The
3767  * caller is responsible for filling in the new extent entries upon
3768  * return.
3769  */
3770 void
3771 xfs_iext_add(
3772 	xfs_ifork_t	*ifp,		/* inode fork pointer */
3773 	xfs_extnum_t	idx,		/* index to begin adding exts */
3774 	int		ext_diff)	/* number of extents to add */
3775 {
3776 	int		byte_diff;	/* new bytes being added */
3777 	int		new_size;	/* size of extents after adding */
3778 	xfs_extnum_t	nextents;	/* number of extents in file */
3779 
3780 	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3781 	ASSERT((idx >= 0) && (idx <= nextents));
3782 	byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t);
3783 	new_size = ifp->if_bytes + byte_diff;
3784 	/*
3785 	 * If the new number of extents (nextents + ext_diff)
3786 	 * fits inside the inode, then continue to use the inline
3787 	 * extent buffer.
3788 	 */
3789 	if (nextents + ext_diff <= XFS_INLINE_EXTS) {
3790 		if (idx < nextents) {
3791 			memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff],
3792 				&ifp->if_u2.if_inline_ext[idx],
3793 				(nextents - idx) * sizeof(xfs_bmbt_rec_t));
3794 			memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff);
3795 		}
3796 		ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
3797 		ifp->if_real_bytes = 0;
3798 		ifp->if_lastex = nextents + ext_diff;
3799 	}
3800 	/*
3801 	 * Otherwise use a linear (direct) extent list.
3802 	 * If the extents are currently inside the inode,
3803 	 * xfs_iext_realloc_direct will switch us from
3804 	 * inline to direct extent allocation mode.
3805 	 */
3806 	else if (nextents + ext_diff <= XFS_LINEAR_EXTS) {
3807 		xfs_iext_realloc_direct(ifp, new_size);
3808 		if (idx < nextents) {
3809 			memmove(&ifp->if_u1.if_extents[idx + ext_diff],
3810 				&ifp->if_u1.if_extents[idx],
3811 				(nextents - idx) * sizeof(xfs_bmbt_rec_t));
3812 			memset(&ifp->if_u1.if_extents[idx], 0, byte_diff);
3813 		}
3814 	}
3815 	/* Indirection array */
3816 	else {
3817 		xfs_ext_irec_t	*erp;
3818 		int		erp_idx = 0;
3819 		int		page_idx = idx;
3820 
3821 		ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS);
3822 		if (ifp->if_flags & XFS_IFEXTIREC) {
3823 			erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1);
3824 		} else {
3825 			xfs_iext_irec_init(ifp);
3826 			ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3827 			erp = ifp->if_u1.if_ext_irec;
3828 		}
3829 		/* Extents fit in target extent page */
3830 		if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) {
3831 			if (page_idx < erp->er_extcount) {
3832 				memmove(&erp->er_extbuf[page_idx + ext_diff],
3833 					&erp->er_extbuf[page_idx],
3834 					(erp->er_extcount - page_idx) *
3835 					sizeof(xfs_bmbt_rec_t));
3836 				memset(&erp->er_extbuf[page_idx], 0, byte_diff);
3837 			}
3838 			erp->er_extcount += ext_diff;
3839 			xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3840 		}
3841 		/* Insert a new extent page */
3842 		else if (erp) {
3843 			xfs_iext_add_indirect_multi(ifp,
3844 				erp_idx, page_idx, ext_diff);
3845 		}
3846 		/*
3847 		 * If extent(s) are being appended to the last page in
3848 		 * the indirection array and the new extent(s) don't fit
3849 		 * in the page, then erp is NULL and erp_idx is set to
3850 		 * the next index needed in the indirection array.
3851 		 */
3852 		else {
3853 			int	count = ext_diff;
3854 
3855 			while (count) {
3856 				erp = xfs_iext_irec_new(ifp, erp_idx);
3857 				erp->er_extcount = count;
3858 				count -= MIN(count, (int)XFS_LINEAR_EXTS);
3859 				if (count) {
3860 					erp_idx++;
3861 				}
3862 			}
3863 		}
3864 	}
3865 	ifp->if_bytes = new_size;
3866 }
3867 
3868 /*
3869  * This is called when incore extents are being added to the indirection
3870  * array and the new extents do not fit in the target extent list. The
3871  * erp_idx parameter contains the irec index for the target extent list
3872  * in the indirection array, and the idx parameter contains the extent
3873  * index within the list. The number of extents being added is stored
3874  * in the count parameter.
3875  *
3876  *    |-------|   |-------|
3877  *    |       |   |       |    idx - number of extents before idx
3878  *    |  idx  |   | count |
3879  *    |       |   |       |    count - number of extents being inserted at idx
3880  *    |-------|   |-------|
3881  *    | count |   | nex2  |    nex2 - number of extents after idx + count
3882  *    |-------|   |-------|
3883  */
3884 void
3885 xfs_iext_add_indirect_multi(
3886 	xfs_ifork_t	*ifp,			/* inode fork pointer */
3887 	int		erp_idx,		/* target extent irec index */
3888 	xfs_extnum_t	idx,			/* index within target list */
3889 	int		count)			/* new extents being added */
3890 {
3891 	int		byte_diff;		/* new bytes being added */
3892 	xfs_ext_irec_t	*erp;			/* pointer to irec entry */
3893 	xfs_extnum_t	ext_diff;		/* number of extents to add */
3894 	xfs_extnum_t	ext_cnt;		/* new extents still needed */
3895 	xfs_extnum_t	nex2;			/* extents after idx + count */
3896 	xfs_bmbt_rec_t	*nex2_ep = NULL;	/* temp list for nex2 extents */
3897 	int		nlists;			/* number of irec's (lists) */
3898 
3899 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3900 	erp = &ifp->if_u1.if_ext_irec[erp_idx];
3901 	nex2 = erp->er_extcount - idx;
3902 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3903 
3904 	/*
3905 	 * Save second part of target extent list
3906 	 * (all extents past */
3907 	if (nex2) {
3908 		byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3909 		nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_SLEEP);
3910 		memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
3911 		erp->er_extcount -= nex2;
3912 		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
3913 		memset(&erp->er_extbuf[idx], 0, byte_diff);
3914 	}
3915 
3916 	/*
3917 	 * Add the new extents to the end of the target
3918 	 * list, then allocate new irec record(s) and
3919 	 * extent buffer(s) as needed to store the rest
3920 	 * of the new extents.
3921 	 */
3922 	ext_cnt = count;
3923 	ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount);
3924 	if (ext_diff) {
3925 		erp->er_extcount += ext_diff;
3926 		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3927 		ext_cnt -= ext_diff;
3928 	}
3929 	while (ext_cnt) {
3930 		erp_idx++;
3931 		erp = xfs_iext_irec_new(ifp, erp_idx);
3932 		ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS);
3933 		erp->er_extcount = ext_diff;
3934 		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3935 		ext_cnt -= ext_diff;
3936 	}
3937 
3938 	/* Add nex2 extents back to indirection array */
3939 	if (nex2) {
3940 		xfs_extnum_t	ext_avail;
3941 		int		i;
3942 
3943 		byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3944 		ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
3945 		i = 0;
3946 		/*
3947 		 * If nex2 extents fit in the current page, append
3948 		 * nex2_ep after the new extents.
3949 		 */
3950 		if (nex2 <= ext_avail) {
3951 			i = erp->er_extcount;
3952 		}
3953 		/*
3954 		 * Otherwise, check if space is available in the
3955 		 * next page.
3956 		 */
3957 		else if ((erp_idx < nlists - 1) &&
3958 			 (nex2 <= (ext_avail = XFS_LINEAR_EXTS -
3959 			  ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) {
3960 			erp_idx++;
3961 			erp++;
3962 			/* Create a hole for nex2 extents */
3963 			memmove(&erp->er_extbuf[nex2], erp->er_extbuf,
3964 				erp->er_extcount * sizeof(xfs_bmbt_rec_t));
3965 		}
3966 		/*
3967 		 * Final choice, create a new extent page for
3968 		 * nex2 extents.
3969 		 */
3970 		else {
3971 			erp_idx++;
3972 			erp = xfs_iext_irec_new(ifp, erp_idx);
3973 		}
3974 		memmove(&erp->er_extbuf[i], nex2_ep, byte_diff);
3975 		kmem_free(nex2_ep, byte_diff);
3976 		erp->er_extcount += nex2;
3977 		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2);
3978 	}
3979 }
3980 
3981 /*
3982  * This is called when the amount of space required for incore file
3983  * extents needs to be decreased. The ext_diff parameter stores the
3984  * number of extents to be removed and the idx parameter contains
3985  * the extent index where the extents will be removed from.
3986  *
3987  * If the amount of space needed has decreased below the linear
3988  * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
3989  * extent array.  Otherwise, use kmem_realloc() to adjust the
3990  * size to what is needed.
3991  */
3992 void
3993 xfs_iext_remove(
3994 	xfs_ifork_t	*ifp,		/* inode fork pointer */
3995 	xfs_extnum_t	idx,		/* index to begin removing exts */
3996 	int		ext_diff)	/* number of extents to remove */
3997 {
3998 	xfs_extnum_t	nextents;	/* number of extents in file */
3999 	int		new_size;	/* size of extents after removal */
4000 
4001 	ASSERT(ext_diff > 0);
4002 	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4003 	new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
4004 
4005 	if (new_size == 0) {
4006 		xfs_iext_destroy(ifp);
4007 	} else if (ifp->if_flags & XFS_IFEXTIREC) {
4008 		xfs_iext_remove_indirect(ifp, idx, ext_diff);
4009 	} else if (ifp->if_real_bytes) {
4010 		xfs_iext_remove_direct(ifp, idx, ext_diff);
4011 	} else {
4012 		xfs_iext_remove_inline(ifp, idx, ext_diff);
4013 	}
4014 	ifp->if_bytes = new_size;
4015 }
4016 
4017 /*
4018  * This removes ext_diff extents from the inline buffer, beginning
4019  * at extent index idx.
4020  */
4021 void
4022 xfs_iext_remove_inline(
4023 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4024 	xfs_extnum_t	idx,		/* index to begin removing exts */
4025 	int		ext_diff)	/* number of extents to remove */
4026 {
4027 	int		nextents;	/* number of extents in file */
4028 
4029 	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
4030 	ASSERT(idx < XFS_INLINE_EXTS);
4031 	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4032 	ASSERT(((nextents - ext_diff) > 0) &&
4033 		(nextents - ext_diff) < XFS_INLINE_EXTS);
4034 
4035 	if (idx + ext_diff < nextents) {
4036 		memmove(&ifp->if_u2.if_inline_ext[idx],
4037 			&ifp->if_u2.if_inline_ext[idx + ext_diff],
4038 			(nextents - (idx + ext_diff)) *
4039 			 sizeof(xfs_bmbt_rec_t));
4040 		memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff],
4041 			0, ext_diff * sizeof(xfs_bmbt_rec_t));
4042 	} else {
4043 		memset(&ifp->if_u2.if_inline_ext[idx], 0,
4044 			ext_diff * sizeof(xfs_bmbt_rec_t));
4045 	}
4046 }
4047 
4048 /*
4049  * This removes ext_diff extents from a linear (direct) extent list,
4050  * beginning at extent index idx. If the extents are being removed
4051  * from the end of the list (ie. truncate) then we just need to re-
4052  * allocate the list to remove the extra space. Otherwise, if the
4053  * extents are being removed from the middle of the existing extent
4054  * entries, then we first need to move the extent records beginning
4055  * at idx + ext_diff up in the list to overwrite the records being
4056  * removed, then remove the extra space via kmem_realloc.
4057  */
4058 void
4059 xfs_iext_remove_direct(
4060 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4061 	xfs_extnum_t	idx,		/* index to begin removing exts */
4062 	int		ext_diff)	/* number of extents to remove */
4063 {
4064 	xfs_extnum_t	nextents;	/* number of extents in file */
4065 	int		new_size;	/* size of extents after removal */
4066 
4067 	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
4068 	new_size = ifp->if_bytes -
4069 		(ext_diff * sizeof(xfs_bmbt_rec_t));
4070 	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4071 
4072 	if (new_size == 0) {
4073 		xfs_iext_destroy(ifp);
4074 		return;
4075 	}
4076 	/* Move extents up in the list (if needed) */
4077 	if (idx + ext_diff < nextents) {
4078 		memmove(&ifp->if_u1.if_extents[idx],
4079 			&ifp->if_u1.if_extents[idx + ext_diff],
4080 			(nextents - (idx + ext_diff)) *
4081 			 sizeof(xfs_bmbt_rec_t));
4082 	}
4083 	memset(&ifp->if_u1.if_extents[nextents - ext_diff],
4084 		0, ext_diff * sizeof(xfs_bmbt_rec_t));
4085 	/*
4086 	 * Reallocate the direct extent list. If the extents
4087 	 * will fit inside the inode then xfs_iext_realloc_direct
4088 	 * will switch from direct to inline extent allocation
4089 	 * mode for us.
4090 	 */
4091 	xfs_iext_realloc_direct(ifp, new_size);
4092 	ifp->if_bytes = new_size;
4093 }
4094 
4095 /*
4096  * This is called when incore extents are being removed from the
4097  * indirection array and the extents being removed span multiple extent
4098  * buffers. The idx parameter contains the file extent index where we
4099  * want to begin removing extents, and the count parameter contains
4100  * how many extents need to be removed.
4101  *
4102  *    |-------|   |-------|
4103  *    | nex1  |   |       |    nex1 - number of extents before idx
4104  *    |-------|   | count |
4105  *    |       |   |       |    count - number of extents being removed at idx
4106  *    | count |   |-------|
4107  *    |       |   | nex2  |    nex2 - number of extents after idx + count
4108  *    |-------|   |-------|
4109  */
4110 void
4111 xfs_iext_remove_indirect(
4112 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4113 	xfs_extnum_t	idx,		/* index to begin removing extents */
4114 	int		count)		/* number of extents to remove */
4115 {
4116 	xfs_ext_irec_t	*erp;		/* indirection array pointer */
4117 	int		erp_idx = 0;	/* indirection array index */
4118 	xfs_extnum_t	ext_cnt;	/* extents left to remove */
4119 	xfs_extnum_t	ext_diff;	/* extents to remove in current list */
4120 	xfs_extnum_t	nex1;		/* number of extents before idx */
4121 	xfs_extnum_t	nex2;		/* extents after idx + count */
4122 	int		nlists;		/* entries in indirection array */
4123 	int		page_idx = idx;	/* index in target extent list */
4124 
4125 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4126 	erp = xfs_iext_idx_to_irec(ifp,  &page_idx, &erp_idx, 0);
4127 	ASSERT(erp != NULL);
4128 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4129 	nex1 = page_idx;
4130 	ext_cnt = count;
4131 	while (ext_cnt) {
4132 		nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0);
4133 		ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1));
4134 		/*
4135 		 * Check for deletion of entire list;
4136 		 * xfs_iext_irec_remove() updates extent offsets.
4137 		 */
4138 		if (ext_diff == erp->er_extcount) {
4139 			xfs_iext_irec_remove(ifp, erp_idx);
4140 			ext_cnt -= ext_diff;
4141 			nex1 = 0;
4142 			if (ext_cnt) {
4143 				ASSERT(erp_idx < ifp->if_real_bytes /
4144 					XFS_IEXT_BUFSZ);
4145 				erp = &ifp->if_u1.if_ext_irec[erp_idx];
4146 				nex1 = 0;
4147 				continue;
4148 			} else {
4149 				break;
4150 			}
4151 		}
4152 		/* Move extents up (if needed) */
4153 		if (nex2) {
4154 			memmove(&erp->er_extbuf[nex1],
4155 				&erp->er_extbuf[nex1 + ext_diff],
4156 				nex2 * sizeof(xfs_bmbt_rec_t));
4157 		}
4158 		/* Zero out rest of page */
4159 		memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ -
4160 			((nex1 + nex2) * sizeof(xfs_bmbt_rec_t))));
4161 		/* Update remaining counters */
4162 		erp->er_extcount -= ext_diff;
4163 		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff);
4164 		ext_cnt -= ext_diff;
4165 		nex1 = 0;
4166 		erp_idx++;
4167 		erp++;
4168 	}
4169 	ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t);
4170 	xfs_iext_irec_compact(ifp);
4171 }
4172 
4173 /*
4174  * Create, destroy, or resize a linear (direct) block of extents.
4175  */
4176 void
4177 xfs_iext_realloc_direct(
4178 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4179 	int		new_size)	/* new size of extents */
4180 {
4181 	int		rnew_size;	/* real new size of extents */
4182 
4183 	rnew_size = new_size;
4184 
4185 	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) ||
4186 		((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) &&
4187 		 (new_size != ifp->if_real_bytes)));
4188 
4189 	/* Free extent records */
4190 	if (new_size == 0) {
4191 		xfs_iext_destroy(ifp);
4192 	}
4193 	/* Resize direct extent list and zero any new bytes */
4194 	else if (ifp->if_real_bytes) {
4195 		/* Check if extents will fit inside the inode */
4196 		if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) {
4197 			xfs_iext_direct_to_inline(ifp, new_size /
4198 				(uint)sizeof(xfs_bmbt_rec_t));
4199 			ifp->if_bytes = new_size;
4200 			return;
4201 		}
4202 		if (!is_power_of_2(new_size)){
4203 			rnew_size = xfs_iroundup(new_size);
4204 		}
4205 		if (rnew_size != ifp->if_real_bytes) {
4206 			ifp->if_u1.if_extents = (xfs_bmbt_rec_t *)
4207 				kmem_realloc(ifp->if_u1.if_extents,
4208 						rnew_size,
4209 						ifp->if_real_bytes,
4210 						KM_SLEEP);
4211 		}
4212 		if (rnew_size > ifp->if_real_bytes) {
4213 			memset(&ifp->if_u1.if_extents[ifp->if_bytes /
4214 				(uint)sizeof(xfs_bmbt_rec_t)], 0,
4215 				rnew_size - ifp->if_real_bytes);
4216 		}
4217 	}
4218 	/*
4219 	 * Switch from the inline extent buffer to a direct
4220 	 * extent list. Be sure to include the inline extent
4221 	 * bytes in new_size.
4222 	 */
4223 	else {
4224 		new_size += ifp->if_bytes;
4225 		if (!is_power_of_2(new_size)) {
4226 			rnew_size = xfs_iroundup(new_size);
4227 		}
4228 		xfs_iext_inline_to_direct(ifp, rnew_size);
4229 	}
4230 	ifp->if_real_bytes = rnew_size;
4231 	ifp->if_bytes = new_size;
4232 }
4233 
4234 /*
4235  * Switch from linear (direct) extent records to inline buffer.
4236  */
4237 void
4238 xfs_iext_direct_to_inline(
4239 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4240 	xfs_extnum_t	nextents)	/* number of extents in file */
4241 {
4242 	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
4243 	ASSERT(nextents <= XFS_INLINE_EXTS);
4244 	/*
4245 	 * The inline buffer was zeroed when we switched
4246 	 * from inline to direct extent allocation mode,
4247 	 * so we don't need to clear it here.
4248 	 */
4249 	memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
4250 		nextents * sizeof(xfs_bmbt_rec_t));
4251 	kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes);
4252 	ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
4253 	ifp->if_real_bytes = 0;
4254 }
4255 
4256 /*
4257  * Switch from inline buffer to linear (direct) extent records.
4258  * new_size should already be rounded up to the next power of 2
4259  * by the caller (when appropriate), so use new_size as it is.
4260  * However, since new_size may be rounded up, we can't update
4261  * if_bytes here. It is the caller's responsibility to update
4262  * if_bytes upon return.
4263  */
4264 void
4265 xfs_iext_inline_to_direct(
4266 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4267 	int		new_size)	/* number of extents in file */
4268 {
4269 	ifp->if_u1.if_extents = (xfs_bmbt_rec_t *)
4270 		kmem_alloc(new_size, KM_SLEEP);
4271 	memset(ifp->if_u1.if_extents, 0, new_size);
4272 	if (ifp->if_bytes) {
4273 		memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
4274 			ifp->if_bytes);
4275 		memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
4276 			sizeof(xfs_bmbt_rec_t));
4277 	}
4278 	ifp->if_real_bytes = new_size;
4279 }
4280 
4281 /*
4282  * Resize an extent indirection array to new_size bytes.
4283  */
4284 void
4285 xfs_iext_realloc_indirect(
4286 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4287 	int		new_size)	/* new indirection array size */
4288 {
4289 	int		nlists;		/* number of irec's (ex lists) */
4290 	int		size;		/* current indirection array size */
4291 
4292 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4293 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4294 	size = nlists * sizeof(xfs_ext_irec_t);
4295 	ASSERT(ifp->if_real_bytes);
4296 	ASSERT((new_size >= 0) && (new_size != size));
4297 	if (new_size == 0) {
4298 		xfs_iext_destroy(ifp);
4299 	} else {
4300 		ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
4301 			kmem_realloc(ifp->if_u1.if_ext_irec,
4302 				new_size, size, KM_SLEEP);
4303 	}
4304 }
4305 
4306 /*
4307  * Switch from indirection array to linear (direct) extent allocations.
4308  */
4309 void
4310 xfs_iext_indirect_to_direct(
4311 	 xfs_ifork_t	*ifp)		/* inode fork pointer */
4312 {
4313 	xfs_bmbt_rec_t	*ep;		/* extent record pointer */
4314 	xfs_extnum_t	nextents;	/* number of extents in file */
4315 	int		size;		/* size of file extents */
4316 
4317 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4318 	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4319 	ASSERT(nextents <= XFS_LINEAR_EXTS);
4320 	size = nextents * sizeof(xfs_bmbt_rec_t);
4321 
4322 	xfs_iext_irec_compact_full(ifp);
4323 	ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ);
4324 
4325 	ep = ifp->if_u1.if_ext_irec->er_extbuf;
4326 	kmem_free(ifp->if_u1.if_ext_irec, sizeof(xfs_ext_irec_t));
4327 	ifp->if_flags &= ~XFS_IFEXTIREC;
4328 	ifp->if_u1.if_extents = ep;
4329 	ifp->if_bytes = size;
4330 	if (nextents < XFS_LINEAR_EXTS) {
4331 		xfs_iext_realloc_direct(ifp, size);
4332 	}
4333 }
4334 
4335 /*
4336  * Free incore file extents.
4337  */
4338 void
4339 xfs_iext_destroy(
4340 	xfs_ifork_t	*ifp)		/* inode fork pointer */
4341 {
4342 	if (ifp->if_flags & XFS_IFEXTIREC) {
4343 		int	erp_idx;
4344 		int	nlists;
4345 
4346 		nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4347 		for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) {
4348 			xfs_iext_irec_remove(ifp, erp_idx);
4349 		}
4350 		ifp->if_flags &= ~XFS_IFEXTIREC;
4351 	} else if (ifp->if_real_bytes) {
4352 		kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes);
4353 	} else if (ifp->if_bytes) {
4354 		memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
4355 			sizeof(xfs_bmbt_rec_t));
4356 	}
4357 	ifp->if_u1.if_extents = NULL;
4358 	ifp->if_real_bytes = 0;
4359 	ifp->if_bytes = 0;
4360 }
4361 
4362 /*
4363  * Return a pointer to the extent record for file system block bno.
4364  */
4365 xfs_bmbt_rec_t *			/* pointer to found extent record */
4366 xfs_iext_bno_to_ext(
4367 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4368 	xfs_fileoff_t	bno,		/* block number to search for */
4369 	xfs_extnum_t	*idxp)		/* index of target extent */
4370 {
4371 	xfs_bmbt_rec_t	*base;		/* pointer to first extent */
4372 	xfs_filblks_t	blockcount = 0;	/* number of blocks in extent */
4373 	xfs_bmbt_rec_t	*ep = NULL;	/* pointer to target extent */
4374 	xfs_ext_irec_t	*erp = NULL;	/* indirection array pointer */
4375 	int		high;		/* upper boundary in search */
4376 	xfs_extnum_t	idx = 0;	/* index of target extent */
4377 	int		low;		/* lower boundary in search */
4378 	xfs_extnum_t	nextents;	/* number of file extents */
4379 	xfs_fileoff_t	startoff = 0;	/* start offset of extent */
4380 
4381 	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4382 	if (nextents == 0) {
4383 		*idxp = 0;
4384 		return NULL;
4385 	}
4386 	low = 0;
4387 	if (ifp->if_flags & XFS_IFEXTIREC) {
4388 		/* Find target extent list */
4389 		int	erp_idx = 0;
4390 		erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx);
4391 		base = erp->er_extbuf;
4392 		high = erp->er_extcount - 1;
4393 	} else {
4394 		base = ifp->if_u1.if_extents;
4395 		high = nextents - 1;
4396 	}
4397 	/* Binary search extent records */
4398 	while (low <= high) {
4399 		idx = (low + high) >> 1;
4400 		ep = base + idx;
4401 		startoff = xfs_bmbt_get_startoff(ep);
4402 		blockcount = xfs_bmbt_get_blockcount(ep);
4403 		if (bno < startoff) {
4404 			high = idx - 1;
4405 		} else if (bno >= startoff + blockcount) {
4406 			low = idx + 1;
4407 		} else {
4408 			/* Convert back to file-based extent index */
4409 			if (ifp->if_flags & XFS_IFEXTIREC) {
4410 				idx += erp->er_extoff;
4411 			}
4412 			*idxp = idx;
4413 			return ep;
4414 		}
4415 	}
4416 	/* Convert back to file-based extent index */
4417 	if (ifp->if_flags & XFS_IFEXTIREC) {
4418 		idx += erp->er_extoff;
4419 	}
4420 	if (bno >= startoff + blockcount) {
4421 		if (++idx == nextents) {
4422 			ep = NULL;
4423 		} else {
4424 			ep = xfs_iext_get_ext(ifp, idx);
4425 		}
4426 	}
4427 	*idxp = idx;
4428 	return ep;
4429 }
4430 
4431 /*
4432  * Return a pointer to the indirection array entry containing the
4433  * extent record for filesystem block bno. Store the index of the
4434  * target irec in *erp_idxp.
4435  */
4436 xfs_ext_irec_t *			/* pointer to found extent record */
4437 xfs_iext_bno_to_irec(
4438 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4439 	xfs_fileoff_t	bno,		/* block number to search for */
4440 	int		*erp_idxp)	/* irec index of target ext list */
4441 {
4442 	xfs_ext_irec_t	*erp = NULL;	/* indirection array pointer */
4443 	xfs_ext_irec_t	*erp_next;	/* next indirection array entry */
4444 	int		erp_idx;	/* indirection array index */
4445 	int		nlists;		/* number of extent irec's (lists) */
4446 	int		high;		/* binary search upper limit */
4447 	int		low;		/* binary search lower limit */
4448 
4449 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4450 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4451 	erp_idx = 0;
4452 	low = 0;
4453 	high = nlists - 1;
4454 	while (low <= high) {
4455 		erp_idx = (low + high) >> 1;
4456 		erp = &ifp->if_u1.if_ext_irec[erp_idx];
4457 		erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL;
4458 		if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) {
4459 			high = erp_idx - 1;
4460 		} else if (erp_next && bno >=
4461 			   xfs_bmbt_get_startoff(erp_next->er_extbuf)) {
4462 			low = erp_idx + 1;
4463 		} else {
4464 			break;
4465 		}
4466 	}
4467 	*erp_idxp = erp_idx;
4468 	return erp;
4469 }
4470 
4471 /*
4472  * Return a pointer to the indirection array entry containing the
4473  * extent record at file extent index *idxp. Store the index of the
4474  * target irec in *erp_idxp and store the page index of the target
4475  * extent record in *idxp.
4476  */
4477 xfs_ext_irec_t *
4478 xfs_iext_idx_to_irec(
4479 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4480 	xfs_extnum_t	*idxp,		/* extent index (file -> page) */
4481 	int		*erp_idxp,	/* pointer to target irec */
4482 	int		realloc)	/* new bytes were just added */
4483 {
4484 	xfs_ext_irec_t	*prev;		/* pointer to previous irec */
4485 	xfs_ext_irec_t	*erp = NULL;	/* pointer to current irec */
4486 	int		erp_idx;	/* indirection array index */
4487 	int		nlists;		/* number of irec's (ex lists) */
4488 	int		high;		/* binary search upper limit */
4489 	int		low;		/* binary search lower limit */
4490 	xfs_extnum_t	page_idx = *idxp; /* extent index in target list */
4491 
4492 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4493 	ASSERT(page_idx >= 0 && page_idx <=
4494 		ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
4495 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4496 	erp_idx = 0;
4497 	low = 0;
4498 	high = nlists - 1;
4499 
4500 	/* Binary search extent irec's */
4501 	while (low <= high) {
4502 		erp_idx = (low + high) >> 1;
4503 		erp = &ifp->if_u1.if_ext_irec[erp_idx];
4504 		prev = erp_idx > 0 ? erp - 1 : NULL;
4505 		if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff &&
4506 		     realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) {
4507 			high = erp_idx - 1;
4508 		} else if (page_idx > erp->er_extoff + erp->er_extcount ||
4509 			   (page_idx == erp->er_extoff + erp->er_extcount &&
4510 			    !realloc)) {
4511 			low = erp_idx + 1;
4512 		} else if (page_idx == erp->er_extoff + erp->er_extcount &&
4513 			   erp->er_extcount == XFS_LINEAR_EXTS) {
4514 			ASSERT(realloc);
4515 			page_idx = 0;
4516 			erp_idx++;
4517 			erp = erp_idx < nlists ? erp + 1 : NULL;
4518 			break;
4519 		} else {
4520 			page_idx -= erp->er_extoff;
4521 			break;
4522 		}
4523 	}
4524 	*idxp = page_idx;
4525 	*erp_idxp = erp_idx;
4526 	return(erp);
4527 }
4528 
4529 /*
4530  * Allocate and initialize an indirection array once the space needed
4531  * for incore extents increases above XFS_IEXT_BUFSZ.
4532  */
4533 void
4534 xfs_iext_irec_init(
4535 	xfs_ifork_t	*ifp)		/* inode fork pointer */
4536 {
4537 	xfs_ext_irec_t	*erp;		/* indirection array pointer */
4538 	xfs_extnum_t	nextents;	/* number of extents in file */
4539 
4540 	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
4541 	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4542 	ASSERT(nextents <= XFS_LINEAR_EXTS);
4543 
4544 	erp = (xfs_ext_irec_t *)
4545 		kmem_alloc(sizeof(xfs_ext_irec_t), KM_SLEEP);
4546 
4547 	if (nextents == 0) {
4548 		ifp->if_u1.if_extents = (xfs_bmbt_rec_t *)
4549 			kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP);
4550 	} else if (!ifp->if_real_bytes) {
4551 		xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
4552 	} else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
4553 		xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ);
4554 	}
4555 	erp->er_extbuf = ifp->if_u1.if_extents;
4556 	erp->er_extcount = nextents;
4557 	erp->er_extoff = 0;
4558 
4559 	ifp->if_flags |= XFS_IFEXTIREC;
4560 	ifp->if_real_bytes = XFS_IEXT_BUFSZ;
4561 	ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t);
4562 	ifp->if_u1.if_ext_irec = erp;
4563 
4564 	return;
4565 }
4566 
4567 /*
4568  * Allocate and initialize a new entry in the indirection array.
4569  */
4570 xfs_ext_irec_t *
4571 xfs_iext_irec_new(
4572 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4573 	int		erp_idx)	/* index for new irec */
4574 {
4575 	xfs_ext_irec_t	*erp;		/* indirection array pointer */
4576 	int		i;		/* loop counter */
4577 	int		nlists;		/* number of irec's (ex lists) */
4578 
4579 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4580 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4581 
4582 	/* Resize indirection array */
4583 	xfs_iext_realloc_indirect(ifp, ++nlists *
4584 				  sizeof(xfs_ext_irec_t));
4585 	/*
4586 	 * Move records down in the array so the
4587 	 * new page can use erp_idx.
4588 	 */
4589 	erp = ifp->if_u1.if_ext_irec;
4590 	for (i = nlists - 1; i > erp_idx; i--) {
4591 		memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t));
4592 	}
4593 	ASSERT(i == erp_idx);
4594 
4595 	/* Initialize new extent record */
4596 	erp = ifp->if_u1.if_ext_irec;
4597 	erp[erp_idx].er_extbuf = (xfs_bmbt_rec_t *)
4598 		kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP);
4599 	ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4600 	memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
4601 	erp[erp_idx].er_extcount = 0;
4602 	erp[erp_idx].er_extoff = erp_idx > 0 ?
4603 		erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0;
4604 	return (&erp[erp_idx]);
4605 }
4606 
4607 /*
4608  * Remove a record from the indirection array.
4609  */
4610 void
4611 xfs_iext_irec_remove(
4612 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4613 	int		erp_idx)	/* irec index to remove */
4614 {
4615 	xfs_ext_irec_t	*erp;		/* indirection array pointer */
4616 	int		i;		/* loop counter */
4617 	int		nlists;		/* number of irec's (ex lists) */
4618 
4619 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4620 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4621 	erp = &ifp->if_u1.if_ext_irec[erp_idx];
4622 	if (erp->er_extbuf) {
4623 		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1,
4624 			-erp->er_extcount);
4625 		kmem_free(erp->er_extbuf, XFS_IEXT_BUFSZ);
4626 	}
4627 	/* Compact extent records */
4628 	erp = ifp->if_u1.if_ext_irec;
4629 	for (i = erp_idx; i < nlists - 1; i++) {
4630 		memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t));
4631 	}
4632 	/*
4633 	 * Manually free the last extent record from the indirection
4634 	 * array.  A call to xfs_iext_realloc_indirect() with a size
4635 	 * of zero would result in a call to xfs_iext_destroy() which
4636 	 * would in turn call this function again, creating a nasty
4637 	 * infinite loop.
4638 	 */
4639 	if (--nlists) {
4640 		xfs_iext_realloc_indirect(ifp,
4641 			nlists * sizeof(xfs_ext_irec_t));
4642 	} else {
4643 		kmem_free(ifp->if_u1.if_ext_irec,
4644 			sizeof(xfs_ext_irec_t));
4645 	}
4646 	ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4647 }
4648 
4649 /*
4650  * This is called to clean up large amounts of unused memory allocated
4651  * by the indirection array.  Before compacting anything though, verify
4652  * that the indirection array is still needed and switch back to the
4653  * linear extent list (or even the inline buffer) if possible.  The
4654  * compaction policy is as follows:
4655  *
4656  *    Full Compaction: Extents fit into a single page (or inline buffer)
4657  *    Full Compaction: Extents occupy less than 10% of allocated space
4658  * Partial Compaction: Extents occupy > 10% and < 50% of allocated space
4659  *      No Compaction: Extents occupy at least 50% of allocated space
4660  */
4661 void
4662 xfs_iext_irec_compact(
4663 	xfs_ifork_t	*ifp)		/* inode fork pointer */
4664 {
4665 	xfs_extnum_t	nextents;	/* number of extents in file */
4666 	int		nlists;		/* number of irec's (ex lists) */
4667 
4668 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4669 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4670 	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4671 
4672 	if (nextents == 0) {
4673 		xfs_iext_destroy(ifp);
4674 	} else if (nextents <= XFS_INLINE_EXTS) {
4675 		xfs_iext_indirect_to_direct(ifp);
4676 		xfs_iext_direct_to_inline(ifp, nextents);
4677 	} else if (nextents <= XFS_LINEAR_EXTS) {
4678 		xfs_iext_indirect_to_direct(ifp);
4679 	} else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 3) {
4680 		xfs_iext_irec_compact_full(ifp);
4681 	} else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) {
4682 		xfs_iext_irec_compact_pages(ifp);
4683 	}
4684 }
4685 
4686 /*
4687  * Combine extents from neighboring extent pages.
4688  */
4689 void
4690 xfs_iext_irec_compact_pages(
4691 	xfs_ifork_t	*ifp)		/* inode fork pointer */
4692 {
4693 	xfs_ext_irec_t	*erp, *erp_next;/* pointers to irec entries */
4694 	int		erp_idx = 0;	/* indirection array index */
4695 	int		nlists;		/* number of irec's (ex lists) */
4696 
4697 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4698 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4699 	while (erp_idx < nlists - 1) {
4700 		erp = &ifp->if_u1.if_ext_irec[erp_idx];
4701 		erp_next = erp + 1;
4702 		if (erp_next->er_extcount <=
4703 		    (XFS_LINEAR_EXTS - erp->er_extcount)) {
4704 			memmove(&erp->er_extbuf[erp->er_extcount],
4705 				erp_next->er_extbuf, erp_next->er_extcount *
4706 				sizeof(xfs_bmbt_rec_t));
4707 			erp->er_extcount += erp_next->er_extcount;
4708 			/*
4709 			 * Free page before removing extent record
4710 			 * so er_extoffs don't get modified in
4711 			 * xfs_iext_irec_remove.
4712 			 */
4713 			kmem_free(erp_next->er_extbuf, XFS_IEXT_BUFSZ);
4714 			erp_next->er_extbuf = NULL;
4715 			xfs_iext_irec_remove(ifp, erp_idx + 1);
4716 			nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4717 		} else {
4718 			erp_idx++;
4719 		}
4720 	}
4721 }
4722 
4723 /*
4724  * Fully compact the extent records managed by the indirection array.
4725  */
4726 void
4727 xfs_iext_irec_compact_full(
4728 	xfs_ifork_t	*ifp)			/* inode fork pointer */
4729 {
4730 	xfs_bmbt_rec_t	*ep, *ep_next;		/* extent record pointers */
4731 	xfs_ext_irec_t	*erp, *erp_next;	/* extent irec pointers */
4732 	int		erp_idx = 0;		/* extent irec index */
4733 	int		ext_avail;		/* empty entries in ex list */
4734 	int		ext_diff;		/* number of exts to add */
4735 	int		nlists;			/* number of irec's (ex lists) */
4736 
4737 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4738 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4739 	erp = ifp->if_u1.if_ext_irec;
4740 	ep = &erp->er_extbuf[erp->er_extcount];
4741 	erp_next = erp + 1;
4742 	ep_next = erp_next->er_extbuf;
4743 	while (erp_idx < nlists - 1) {
4744 		ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
4745 		ext_diff = MIN(ext_avail, erp_next->er_extcount);
4746 		memcpy(ep, ep_next, ext_diff * sizeof(xfs_bmbt_rec_t));
4747 		erp->er_extcount += ext_diff;
4748 		erp_next->er_extcount -= ext_diff;
4749 		/* Remove next page */
4750 		if (erp_next->er_extcount == 0) {
4751 			/*
4752 			 * Free page before removing extent record
4753 			 * so er_extoffs don't get modified in
4754 			 * xfs_iext_irec_remove.
4755 			 */
4756 			kmem_free(erp_next->er_extbuf,
4757 				erp_next->er_extcount * sizeof(xfs_bmbt_rec_t));
4758 			erp_next->er_extbuf = NULL;
4759 			xfs_iext_irec_remove(ifp, erp_idx + 1);
4760 			erp = &ifp->if_u1.if_ext_irec[erp_idx];
4761 			nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4762 		/* Update next page */
4763 		} else {
4764 			/* Move rest of page up to become next new page */
4765 			memmove(erp_next->er_extbuf, ep_next,
4766 				erp_next->er_extcount * sizeof(xfs_bmbt_rec_t));
4767 			ep_next = erp_next->er_extbuf;
4768 			memset(&ep_next[erp_next->er_extcount], 0,
4769 				(XFS_LINEAR_EXTS - erp_next->er_extcount) *
4770 				sizeof(xfs_bmbt_rec_t));
4771 		}
4772 		if (erp->er_extcount == XFS_LINEAR_EXTS) {
4773 			erp_idx++;
4774 			if (erp_idx < nlists)
4775 				erp = &ifp->if_u1.if_ext_irec[erp_idx];
4776 			else
4777 				break;
4778 		}
4779 		ep = &erp->er_extbuf[erp->er_extcount];
4780 		erp_next = erp + 1;
4781 		ep_next = erp_next->er_extbuf;
4782 	}
4783 }
4784 
4785 /*
4786  * This is called to update the er_extoff field in the indirection
4787  * array when extents have been added or removed from one of the
4788  * extent lists. erp_idx contains the irec index to begin updating
4789  * at and ext_diff contains the number of extents that were added
4790  * or removed.
4791  */
4792 void
4793 xfs_iext_irec_update_extoffs(
4794 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4795 	int		erp_idx,	/* irec index to update */
4796 	int		ext_diff)	/* number of new extents */
4797 {
4798 	int		i;		/* loop counter */
4799 	int		nlists;		/* number of irec's (ex lists */
4800 
4801 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4802 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4803 	for (i = erp_idx; i < nlists; i++) {
4804 		ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff;
4805 	}
4806 }
4807