xref: /openbmc/linux/fs/xfs/libxfs/xfs_ialloc.c (revision 6aa7de05)
1 /*
2  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_sb.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_inode.h"
29 #include "xfs_btree.h"
30 #include "xfs_ialloc.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_alloc.h"
33 #include "xfs_rtalloc.h"
34 #include "xfs_error.h"
35 #include "xfs_bmap.h"
36 #include "xfs_cksum.h"
37 #include "xfs_trans.h"
38 #include "xfs_buf_item.h"
39 #include "xfs_icreate_item.h"
40 #include "xfs_icache.h"
41 #include "xfs_trace.h"
42 #include "xfs_log.h"
43 #include "xfs_rmap.h"
44 
45 
46 /*
47  * Allocation group level functions.
48  */
49 int
50 xfs_ialloc_cluster_alignment(
51 	struct xfs_mount	*mp)
52 {
53 	if (xfs_sb_version_hasalign(&mp->m_sb) &&
54 	    mp->m_sb.sb_inoalignmt >= xfs_icluster_size_fsb(mp))
55 		return mp->m_sb.sb_inoalignmt;
56 	return 1;
57 }
58 
59 /*
60  * Lookup a record by ino in the btree given by cur.
61  */
62 int					/* error */
63 xfs_inobt_lookup(
64 	struct xfs_btree_cur	*cur,	/* btree cursor */
65 	xfs_agino_t		ino,	/* starting inode of chunk */
66 	xfs_lookup_t		dir,	/* <=, >=, == */
67 	int			*stat)	/* success/failure */
68 {
69 	cur->bc_rec.i.ir_startino = ino;
70 	cur->bc_rec.i.ir_holemask = 0;
71 	cur->bc_rec.i.ir_count = 0;
72 	cur->bc_rec.i.ir_freecount = 0;
73 	cur->bc_rec.i.ir_free = 0;
74 	return xfs_btree_lookup(cur, dir, stat);
75 }
76 
77 /*
78  * Update the record referred to by cur to the value given.
79  * This either works (return 0) or gets an EFSCORRUPTED error.
80  */
81 STATIC int				/* error */
82 xfs_inobt_update(
83 	struct xfs_btree_cur	*cur,	/* btree cursor */
84 	xfs_inobt_rec_incore_t	*irec)	/* btree record */
85 {
86 	union xfs_btree_rec	rec;
87 
88 	rec.inobt.ir_startino = cpu_to_be32(irec->ir_startino);
89 	if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
90 		rec.inobt.ir_u.sp.ir_holemask = cpu_to_be16(irec->ir_holemask);
91 		rec.inobt.ir_u.sp.ir_count = irec->ir_count;
92 		rec.inobt.ir_u.sp.ir_freecount = irec->ir_freecount;
93 	} else {
94 		/* ir_holemask/ir_count not supported on-disk */
95 		rec.inobt.ir_u.f.ir_freecount = cpu_to_be32(irec->ir_freecount);
96 	}
97 	rec.inobt.ir_free = cpu_to_be64(irec->ir_free);
98 	return xfs_btree_update(cur, &rec);
99 }
100 
101 /* Convert on-disk btree record to incore inobt record. */
102 void
103 xfs_inobt_btrec_to_irec(
104 	struct xfs_mount		*mp,
105 	union xfs_btree_rec		*rec,
106 	struct xfs_inobt_rec_incore	*irec)
107 {
108 	irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino);
109 	if (xfs_sb_version_hassparseinodes(&mp->m_sb)) {
110 		irec->ir_holemask = be16_to_cpu(rec->inobt.ir_u.sp.ir_holemask);
111 		irec->ir_count = rec->inobt.ir_u.sp.ir_count;
112 		irec->ir_freecount = rec->inobt.ir_u.sp.ir_freecount;
113 	} else {
114 		/*
115 		 * ir_holemask/ir_count not supported on-disk. Fill in hardcoded
116 		 * values for full inode chunks.
117 		 */
118 		irec->ir_holemask = XFS_INOBT_HOLEMASK_FULL;
119 		irec->ir_count = XFS_INODES_PER_CHUNK;
120 		irec->ir_freecount =
121 				be32_to_cpu(rec->inobt.ir_u.f.ir_freecount);
122 	}
123 	irec->ir_free = be64_to_cpu(rec->inobt.ir_free);
124 }
125 
126 /*
127  * Get the data from the pointed-to record.
128  */
129 int
130 xfs_inobt_get_rec(
131 	struct xfs_btree_cur		*cur,
132 	struct xfs_inobt_rec_incore	*irec,
133 	int				*stat)
134 {
135 	union xfs_btree_rec		*rec;
136 	int				error;
137 
138 	error = xfs_btree_get_rec(cur, &rec, stat);
139 	if (error || *stat == 0)
140 		return error;
141 
142 	xfs_inobt_btrec_to_irec(cur->bc_mp, rec, irec);
143 
144 	return 0;
145 }
146 
147 /*
148  * Insert a single inobt record. Cursor must already point to desired location.
149  */
150 STATIC int
151 xfs_inobt_insert_rec(
152 	struct xfs_btree_cur	*cur,
153 	uint16_t		holemask,
154 	uint8_t			count,
155 	int32_t			freecount,
156 	xfs_inofree_t		free,
157 	int			*stat)
158 {
159 	cur->bc_rec.i.ir_holemask = holemask;
160 	cur->bc_rec.i.ir_count = count;
161 	cur->bc_rec.i.ir_freecount = freecount;
162 	cur->bc_rec.i.ir_free = free;
163 	return xfs_btree_insert(cur, stat);
164 }
165 
166 /*
167  * Insert records describing a newly allocated inode chunk into the inobt.
168  */
169 STATIC int
170 xfs_inobt_insert(
171 	struct xfs_mount	*mp,
172 	struct xfs_trans	*tp,
173 	struct xfs_buf		*agbp,
174 	xfs_agino_t		newino,
175 	xfs_agino_t		newlen,
176 	xfs_btnum_t		btnum)
177 {
178 	struct xfs_btree_cur	*cur;
179 	struct xfs_agi		*agi = XFS_BUF_TO_AGI(agbp);
180 	xfs_agnumber_t		agno = be32_to_cpu(agi->agi_seqno);
181 	xfs_agino_t		thisino;
182 	int			i;
183 	int			error;
184 
185 	cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum);
186 
187 	for (thisino = newino;
188 	     thisino < newino + newlen;
189 	     thisino += XFS_INODES_PER_CHUNK) {
190 		error = xfs_inobt_lookup(cur, thisino, XFS_LOOKUP_EQ, &i);
191 		if (error) {
192 			xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
193 			return error;
194 		}
195 		ASSERT(i == 0);
196 
197 		error = xfs_inobt_insert_rec(cur, XFS_INOBT_HOLEMASK_FULL,
198 					     XFS_INODES_PER_CHUNK,
199 					     XFS_INODES_PER_CHUNK,
200 					     XFS_INOBT_ALL_FREE, &i);
201 		if (error) {
202 			xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
203 			return error;
204 		}
205 		ASSERT(i == 1);
206 	}
207 
208 	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
209 
210 	return 0;
211 }
212 
213 /*
214  * Verify that the number of free inodes in the AGI is correct.
215  */
216 #ifdef DEBUG
217 STATIC int
218 xfs_check_agi_freecount(
219 	struct xfs_btree_cur	*cur,
220 	struct xfs_agi		*agi)
221 {
222 	if (cur->bc_nlevels == 1) {
223 		xfs_inobt_rec_incore_t rec;
224 		int		freecount = 0;
225 		int		error;
226 		int		i;
227 
228 		error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
229 		if (error)
230 			return error;
231 
232 		do {
233 			error = xfs_inobt_get_rec(cur, &rec, &i);
234 			if (error)
235 				return error;
236 
237 			if (i) {
238 				freecount += rec.ir_freecount;
239 				error = xfs_btree_increment(cur, 0, &i);
240 				if (error)
241 					return error;
242 			}
243 		} while (i == 1);
244 
245 		if (!XFS_FORCED_SHUTDOWN(cur->bc_mp))
246 			ASSERT(freecount == be32_to_cpu(agi->agi_freecount));
247 	}
248 	return 0;
249 }
250 #else
251 #define xfs_check_agi_freecount(cur, agi)	0
252 #endif
253 
254 /*
255  * Initialise a new set of inodes. When called without a transaction context
256  * (e.g. from recovery) we initiate a delayed write of the inode buffers rather
257  * than logging them (which in a transaction context puts them into the AIL
258  * for writeback rather than the xfsbufd queue).
259  */
260 int
261 xfs_ialloc_inode_init(
262 	struct xfs_mount	*mp,
263 	struct xfs_trans	*tp,
264 	struct list_head	*buffer_list,
265 	int			icount,
266 	xfs_agnumber_t		agno,
267 	xfs_agblock_t		agbno,
268 	xfs_agblock_t		length,
269 	unsigned int		gen)
270 {
271 	struct xfs_buf		*fbuf;
272 	struct xfs_dinode	*free;
273 	int			nbufs, blks_per_cluster, inodes_per_cluster;
274 	int			version;
275 	int			i, j;
276 	xfs_daddr_t		d;
277 	xfs_ino_t		ino = 0;
278 
279 	/*
280 	 * Loop over the new block(s), filling in the inodes.  For small block
281 	 * sizes, manipulate the inodes in buffers  which are multiples of the
282 	 * blocks size.
283 	 */
284 	blks_per_cluster = xfs_icluster_size_fsb(mp);
285 	inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
286 	nbufs = length / blks_per_cluster;
287 
288 	/*
289 	 * Figure out what version number to use in the inodes we create.  If
290 	 * the superblock version has caught up to the one that supports the new
291 	 * inode format, then use the new inode version.  Otherwise use the old
292 	 * version so that old kernels will continue to be able to use the file
293 	 * system.
294 	 *
295 	 * For v3 inodes, we also need to write the inode number into the inode,
296 	 * so calculate the first inode number of the chunk here as
297 	 * XFS_OFFBNO_TO_AGINO() only works within a filesystem block, not
298 	 * across multiple filesystem blocks (such as a cluster) and so cannot
299 	 * be used in the cluster buffer loop below.
300 	 *
301 	 * Further, because we are writing the inode directly into the buffer
302 	 * and calculating a CRC on the entire inode, we have ot log the entire
303 	 * inode so that the entire range the CRC covers is present in the log.
304 	 * That means for v3 inode we log the entire buffer rather than just the
305 	 * inode cores.
306 	 */
307 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
308 		version = 3;
309 		ino = XFS_AGINO_TO_INO(mp, agno,
310 				       XFS_OFFBNO_TO_AGINO(mp, agbno, 0));
311 
312 		/*
313 		 * log the initialisation that is about to take place as an
314 		 * logical operation. This means the transaction does not
315 		 * need to log the physical changes to the inode buffers as log
316 		 * recovery will know what initialisation is actually needed.
317 		 * Hence we only need to log the buffers as "ordered" buffers so
318 		 * they track in the AIL as if they were physically logged.
319 		 */
320 		if (tp)
321 			xfs_icreate_log(tp, agno, agbno, icount,
322 					mp->m_sb.sb_inodesize, length, gen);
323 	} else
324 		version = 2;
325 
326 	for (j = 0; j < nbufs; j++) {
327 		/*
328 		 * Get the block.
329 		 */
330 		d = XFS_AGB_TO_DADDR(mp, agno, agbno + (j * blks_per_cluster));
331 		fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
332 					 mp->m_bsize * blks_per_cluster,
333 					 XBF_UNMAPPED);
334 		if (!fbuf)
335 			return -ENOMEM;
336 
337 		/* Initialize the inode buffers and log them appropriately. */
338 		fbuf->b_ops = &xfs_inode_buf_ops;
339 		xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length));
340 		for (i = 0; i < inodes_per_cluster; i++) {
341 			int	ioffset = i << mp->m_sb.sb_inodelog;
342 			uint	isize = xfs_dinode_size(version);
343 
344 			free = xfs_make_iptr(mp, fbuf, i);
345 			free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
346 			free->di_version = version;
347 			free->di_gen = cpu_to_be32(gen);
348 			free->di_next_unlinked = cpu_to_be32(NULLAGINO);
349 
350 			if (version == 3) {
351 				free->di_ino = cpu_to_be64(ino);
352 				ino++;
353 				uuid_copy(&free->di_uuid,
354 					  &mp->m_sb.sb_meta_uuid);
355 				xfs_dinode_calc_crc(mp, free);
356 			} else if (tp) {
357 				/* just log the inode core */
358 				xfs_trans_log_buf(tp, fbuf, ioffset,
359 						  ioffset + isize - 1);
360 			}
361 		}
362 
363 		if (tp) {
364 			/*
365 			 * Mark the buffer as an inode allocation buffer so it
366 			 * sticks in AIL at the point of this allocation
367 			 * transaction. This ensures the they are on disk before
368 			 * the tail of the log can be moved past this
369 			 * transaction (i.e. by preventing relogging from moving
370 			 * it forward in the log).
371 			 */
372 			xfs_trans_inode_alloc_buf(tp, fbuf);
373 			if (version == 3) {
374 				/*
375 				 * Mark the buffer as ordered so that they are
376 				 * not physically logged in the transaction but
377 				 * still tracked in the AIL as part of the
378 				 * transaction and pin the log appropriately.
379 				 */
380 				xfs_trans_ordered_buf(tp, fbuf);
381 			}
382 		} else {
383 			fbuf->b_flags |= XBF_DONE;
384 			xfs_buf_delwri_queue(fbuf, buffer_list);
385 			xfs_buf_relse(fbuf);
386 		}
387 	}
388 	return 0;
389 }
390 
391 /*
392  * Align startino and allocmask for a recently allocated sparse chunk such that
393  * they are fit for insertion (or merge) into the on-disk inode btrees.
394  *
395  * Background:
396  *
397  * When enabled, sparse inode support increases the inode alignment from cluster
398  * size to inode chunk size. This means that the minimum range between two
399  * non-adjacent inode records in the inobt is large enough for a full inode
400  * record. This allows for cluster sized, cluster aligned block allocation
401  * without need to worry about whether the resulting inode record overlaps with
402  * another record in the tree. Without this basic rule, we would have to deal
403  * with the consequences of overlap by potentially undoing recent allocations in
404  * the inode allocation codepath.
405  *
406  * Because of this alignment rule (which is enforced on mount), there are two
407  * inobt possibilities for newly allocated sparse chunks. One is that the
408  * aligned inode record for the chunk covers a range of inodes not already
409  * covered in the inobt (i.e., it is safe to insert a new sparse record). The
410  * other is that a record already exists at the aligned startino that considers
411  * the newly allocated range as sparse. In the latter case, record content is
412  * merged in hope that sparse inode chunks fill to full chunks over time.
413  */
414 STATIC void
415 xfs_align_sparse_ino(
416 	struct xfs_mount		*mp,
417 	xfs_agino_t			*startino,
418 	uint16_t			*allocmask)
419 {
420 	xfs_agblock_t			agbno;
421 	xfs_agblock_t			mod;
422 	int				offset;
423 
424 	agbno = XFS_AGINO_TO_AGBNO(mp, *startino);
425 	mod = agbno % mp->m_sb.sb_inoalignmt;
426 	if (!mod)
427 		return;
428 
429 	/* calculate the inode offset and align startino */
430 	offset = mod << mp->m_sb.sb_inopblog;
431 	*startino -= offset;
432 
433 	/*
434 	 * Since startino has been aligned down, left shift allocmask such that
435 	 * it continues to represent the same physical inodes relative to the
436 	 * new startino.
437 	 */
438 	*allocmask <<= offset / XFS_INODES_PER_HOLEMASK_BIT;
439 }
440 
441 /*
442  * Determine whether the source inode record can merge into the target. Both
443  * records must be sparse, the inode ranges must match and there must be no
444  * allocation overlap between the records.
445  */
446 STATIC bool
447 __xfs_inobt_can_merge(
448 	struct xfs_inobt_rec_incore	*trec,	/* tgt record */
449 	struct xfs_inobt_rec_incore	*srec)	/* src record */
450 {
451 	uint64_t			talloc;
452 	uint64_t			salloc;
453 
454 	/* records must cover the same inode range */
455 	if (trec->ir_startino != srec->ir_startino)
456 		return false;
457 
458 	/* both records must be sparse */
459 	if (!xfs_inobt_issparse(trec->ir_holemask) ||
460 	    !xfs_inobt_issparse(srec->ir_holemask))
461 		return false;
462 
463 	/* both records must track some inodes */
464 	if (!trec->ir_count || !srec->ir_count)
465 		return false;
466 
467 	/* can't exceed capacity of a full record */
468 	if (trec->ir_count + srec->ir_count > XFS_INODES_PER_CHUNK)
469 		return false;
470 
471 	/* verify there is no allocation overlap */
472 	talloc = xfs_inobt_irec_to_allocmask(trec);
473 	salloc = xfs_inobt_irec_to_allocmask(srec);
474 	if (talloc & salloc)
475 		return false;
476 
477 	return true;
478 }
479 
480 /*
481  * Merge the source inode record into the target. The caller must call
482  * __xfs_inobt_can_merge() to ensure the merge is valid.
483  */
484 STATIC void
485 __xfs_inobt_rec_merge(
486 	struct xfs_inobt_rec_incore	*trec,	/* target */
487 	struct xfs_inobt_rec_incore	*srec)	/* src */
488 {
489 	ASSERT(trec->ir_startino == srec->ir_startino);
490 
491 	/* combine the counts */
492 	trec->ir_count += srec->ir_count;
493 	trec->ir_freecount += srec->ir_freecount;
494 
495 	/*
496 	 * Merge the holemask and free mask. For both fields, 0 bits refer to
497 	 * allocated inodes. We combine the allocated ranges with bitwise AND.
498 	 */
499 	trec->ir_holemask &= srec->ir_holemask;
500 	trec->ir_free &= srec->ir_free;
501 }
502 
503 /*
504  * Insert a new sparse inode chunk into the associated inode btree. The inode
505  * record for the sparse chunk is pre-aligned to a startino that should match
506  * any pre-existing sparse inode record in the tree. This allows sparse chunks
507  * to fill over time.
508  *
509  * This function supports two modes of handling preexisting records depending on
510  * the merge flag. If merge is true, the provided record is merged with the
511  * existing record and updated in place. The merged record is returned in nrec.
512  * If merge is false, an existing record is replaced with the provided record.
513  * If no preexisting record exists, the provided record is always inserted.
514  *
515  * It is considered corruption if a merge is requested and not possible. Given
516  * the sparse inode alignment constraints, this should never happen.
517  */
518 STATIC int
519 xfs_inobt_insert_sprec(
520 	struct xfs_mount		*mp,
521 	struct xfs_trans		*tp,
522 	struct xfs_buf			*agbp,
523 	int				btnum,
524 	struct xfs_inobt_rec_incore	*nrec,	/* in/out: new/merged rec. */
525 	bool				merge)	/* merge or replace */
526 {
527 	struct xfs_btree_cur		*cur;
528 	struct xfs_agi			*agi = XFS_BUF_TO_AGI(agbp);
529 	xfs_agnumber_t			agno = be32_to_cpu(agi->agi_seqno);
530 	int				error;
531 	int				i;
532 	struct xfs_inobt_rec_incore	rec;
533 
534 	cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum);
535 
536 	/* the new record is pre-aligned so we know where to look */
537 	error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i);
538 	if (error)
539 		goto error;
540 	/* if nothing there, insert a new record and return */
541 	if (i == 0) {
542 		error = xfs_inobt_insert_rec(cur, nrec->ir_holemask,
543 					     nrec->ir_count, nrec->ir_freecount,
544 					     nrec->ir_free, &i);
545 		if (error)
546 			goto error;
547 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error);
548 
549 		goto out;
550 	}
551 
552 	/*
553 	 * A record exists at this startino. Merge or replace the record
554 	 * depending on what we've been asked to do.
555 	 */
556 	if (merge) {
557 		error = xfs_inobt_get_rec(cur, &rec, &i);
558 		if (error)
559 			goto error;
560 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error);
561 		XFS_WANT_CORRUPTED_GOTO(mp,
562 					rec.ir_startino == nrec->ir_startino,
563 					error);
564 
565 		/*
566 		 * This should never fail. If we have coexisting records that
567 		 * cannot merge, something is seriously wrong.
568 		 */
569 		XFS_WANT_CORRUPTED_GOTO(mp, __xfs_inobt_can_merge(nrec, &rec),
570 					error);
571 
572 		trace_xfs_irec_merge_pre(mp, agno, rec.ir_startino,
573 					 rec.ir_holemask, nrec->ir_startino,
574 					 nrec->ir_holemask);
575 
576 		/* merge to nrec to output the updated record */
577 		__xfs_inobt_rec_merge(nrec, &rec);
578 
579 		trace_xfs_irec_merge_post(mp, agno, nrec->ir_startino,
580 					  nrec->ir_holemask);
581 
582 		error = xfs_inobt_rec_check_count(mp, nrec);
583 		if (error)
584 			goto error;
585 	}
586 
587 	error = xfs_inobt_update(cur, nrec);
588 	if (error)
589 		goto error;
590 
591 out:
592 	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
593 	return 0;
594 error:
595 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
596 	return error;
597 }
598 
599 /*
600  * Allocate new inodes in the allocation group specified by agbp.
601  * Return 0 for success, else error code.
602  */
603 STATIC int				/* error code or 0 */
604 xfs_ialloc_ag_alloc(
605 	xfs_trans_t	*tp,		/* transaction pointer */
606 	xfs_buf_t	*agbp,		/* alloc group buffer */
607 	int		*alloc)
608 {
609 	xfs_agi_t	*agi;		/* allocation group header */
610 	xfs_alloc_arg_t	args;		/* allocation argument structure */
611 	xfs_agnumber_t	agno;
612 	int		error;
613 	xfs_agino_t	newino;		/* new first inode's number */
614 	xfs_agino_t	newlen;		/* new number of inodes */
615 	int		isaligned = 0;	/* inode allocation at stripe unit */
616 					/* boundary */
617 	uint16_t	allocmask = (uint16_t) -1; /* init. to full chunk */
618 	struct xfs_inobt_rec_incore rec;
619 	struct xfs_perag *pag;
620 	int		do_sparse = 0;
621 
622 	memset(&args, 0, sizeof(args));
623 	args.tp = tp;
624 	args.mp = tp->t_mountp;
625 	args.fsbno = NULLFSBLOCK;
626 	xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_INODES);
627 
628 #ifdef DEBUG
629 	/* randomly do sparse inode allocations */
630 	if (xfs_sb_version_hassparseinodes(&tp->t_mountp->m_sb) &&
631 	    args.mp->m_ialloc_min_blks < args.mp->m_ialloc_blks)
632 		do_sparse = prandom_u32() & 1;
633 #endif
634 
635 	/*
636 	 * Locking will ensure that we don't have two callers in here
637 	 * at one time.
638 	 */
639 	newlen = args.mp->m_ialloc_inos;
640 	if (args.mp->m_maxicount &&
641 	    percpu_counter_read_positive(&args.mp->m_icount) + newlen >
642 							args.mp->m_maxicount)
643 		return -ENOSPC;
644 	args.minlen = args.maxlen = args.mp->m_ialloc_blks;
645 	/*
646 	 * First try to allocate inodes contiguous with the last-allocated
647 	 * chunk of inodes.  If the filesystem is striped, this will fill
648 	 * an entire stripe unit with inodes.
649 	 */
650 	agi = XFS_BUF_TO_AGI(agbp);
651 	newino = be32_to_cpu(agi->agi_newino);
652 	agno = be32_to_cpu(agi->agi_seqno);
653 	args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
654 		     args.mp->m_ialloc_blks;
655 	if (do_sparse)
656 		goto sparse_alloc;
657 	if (likely(newino != NULLAGINO &&
658 		  (args.agbno < be32_to_cpu(agi->agi_length)))) {
659 		args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
660 		args.type = XFS_ALLOCTYPE_THIS_BNO;
661 		args.prod = 1;
662 
663 		/*
664 		 * We need to take into account alignment here to ensure that
665 		 * we don't modify the free list if we fail to have an exact
666 		 * block. If we don't have an exact match, and every oher
667 		 * attempt allocation attempt fails, we'll end up cancelling
668 		 * a dirty transaction and shutting down.
669 		 *
670 		 * For an exact allocation, alignment must be 1,
671 		 * however we need to take cluster alignment into account when
672 		 * fixing up the freelist. Use the minalignslop field to
673 		 * indicate that extra blocks might be required for alignment,
674 		 * but not to use them in the actual exact allocation.
675 		 */
676 		args.alignment = 1;
677 		args.minalignslop = xfs_ialloc_cluster_alignment(args.mp) - 1;
678 
679 		/* Allow space for the inode btree to split. */
680 		args.minleft = args.mp->m_in_maxlevels - 1;
681 		if ((error = xfs_alloc_vextent(&args)))
682 			return error;
683 
684 		/*
685 		 * This request might have dirtied the transaction if the AG can
686 		 * satisfy the request, but the exact block was not available.
687 		 * If the allocation did fail, subsequent requests will relax
688 		 * the exact agbno requirement and increase the alignment
689 		 * instead. It is critical that the total size of the request
690 		 * (len + alignment + slop) does not increase from this point
691 		 * on, so reset minalignslop to ensure it is not included in
692 		 * subsequent requests.
693 		 */
694 		args.minalignslop = 0;
695 	}
696 
697 	if (unlikely(args.fsbno == NULLFSBLOCK)) {
698 		/*
699 		 * Set the alignment for the allocation.
700 		 * If stripe alignment is turned on then align at stripe unit
701 		 * boundary.
702 		 * If the cluster size is smaller than a filesystem block
703 		 * then we're doing I/O for inodes in filesystem block size
704 		 * pieces, so don't need alignment anyway.
705 		 */
706 		isaligned = 0;
707 		if (args.mp->m_sinoalign) {
708 			ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));
709 			args.alignment = args.mp->m_dalign;
710 			isaligned = 1;
711 		} else
712 			args.alignment = xfs_ialloc_cluster_alignment(args.mp);
713 		/*
714 		 * Need to figure out where to allocate the inode blocks.
715 		 * Ideally they should be spaced out through the a.g.
716 		 * For now, just allocate blocks up front.
717 		 */
718 		args.agbno = be32_to_cpu(agi->agi_root);
719 		args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
720 		/*
721 		 * Allocate a fixed-size extent of inodes.
722 		 */
723 		args.type = XFS_ALLOCTYPE_NEAR_BNO;
724 		args.prod = 1;
725 		/*
726 		 * Allow space for the inode btree to split.
727 		 */
728 		args.minleft = args.mp->m_in_maxlevels - 1;
729 		if ((error = xfs_alloc_vextent(&args)))
730 			return error;
731 	}
732 
733 	/*
734 	 * If stripe alignment is turned on, then try again with cluster
735 	 * alignment.
736 	 */
737 	if (isaligned && args.fsbno == NULLFSBLOCK) {
738 		args.type = XFS_ALLOCTYPE_NEAR_BNO;
739 		args.agbno = be32_to_cpu(agi->agi_root);
740 		args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
741 		args.alignment = xfs_ialloc_cluster_alignment(args.mp);
742 		if ((error = xfs_alloc_vextent(&args)))
743 			return error;
744 	}
745 
746 	/*
747 	 * Finally, try a sparse allocation if the filesystem supports it and
748 	 * the sparse allocation length is smaller than a full chunk.
749 	 */
750 	if (xfs_sb_version_hassparseinodes(&args.mp->m_sb) &&
751 	    args.mp->m_ialloc_min_blks < args.mp->m_ialloc_blks &&
752 	    args.fsbno == NULLFSBLOCK) {
753 sparse_alloc:
754 		args.type = XFS_ALLOCTYPE_NEAR_BNO;
755 		args.agbno = be32_to_cpu(agi->agi_root);
756 		args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
757 		args.alignment = args.mp->m_sb.sb_spino_align;
758 		args.prod = 1;
759 
760 		args.minlen = args.mp->m_ialloc_min_blks;
761 		args.maxlen = args.minlen;
762 
763 		/*
764 		 * The inode record will be aligned to full chunk size. We must
765 		 * prevent sparse allocation from AG boundaries that result in
766 		 * invalid inode records, such as records that start at agbno 0
767 		 * or extend beyond the AG.
768 		 *
769 		 * Set min agbno to the first aligned, non-zero agbno and max to
770 		 * the last aligned agbno that is at least one full chunk from
771 		 * the end of the AG.
772 		 */
773 		args.min_agbno = args.mp->m_sb.sb_inoalignmt;
774 		args.max_agbno = round_down(args.mp->m_sb.sb_agblocks,
775 					    args.mp->m_sb.sb_inoalignmt) -
776 				 args.mp->m_ialloc_blks;
777 
778 		error = xfs_alloc_vextent(&args);
779 		if (error)
780 			return error;
781 
782 		newlen = args.len << args.mp->m_sb.sb_inopblog;
783 		ASSERT(newlen <= XFS_INODES_PER_CHUNK);
784 		allocmask = (1 << (newlen / XFS_INODES_PER_HOLEMASK_BIT)) - 1;
785 	}
786 
787 	if (args.fsbno == NULLFSBLOCK) {
788 		*alloc = 0;
789 		return 0;
790 	}
791 	ASSERT(args.len == args.minlen);
792 
793 	/*
794 	 * Stamp and write the inode buffers.
795 	 *
796 	 * Seed the new inode cluster with a random generation number. This
797 	 * prevents short-term reuse of generation numbers if a chunk is
798 	 * freed and then immediately reallocated. We use random numbers
799 	 * rather than a linear progression to prevent the next generation
800 	 * number from being easily guessable.
801 	 */
802 	error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, agno,
803 			args.agbno, args.len, prandom_u32());
804 
805 	if (error)
806 		return error;
807 	/*
808 	 * Convert the results.
809 	 */
810 	newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0);
811 
812 	if (xfs_inobt_issparse(~allocmask)) {
813 		/*
814 		 * We've allocated a sparse chunk. Align the startino and mask.
815 		 */
816 		xfs_align_sparse_ino(args.mp, &newino, &allocmask);
817 
818 		rec.ir_startino = newino;
819 		rec.ir_holemask = ~allocmask;
820 		rec.ir_count = newlen;
821 		rec.ir_freecount = newlen;
822 		rec.ir_free = XFS_INOBT_ALL_FREE;
823 
824 		/*
825 		 * Insert the sparse record into the inobt and allow for a merge
826 		 * if necessary. If a merge does occur, rec is updated to the
827 		 * merged record.
828 		 */
829 		error = xfs_inobt_insert_sprec(args.mp, tp, agbp, XFS_BTNUM_INO,
830 					       &rec, true);
831 		if (error == -EFSCORRUPTED) {
832 			xfs_alert(args.mp,
833 	"invalid sparse inode record: ino 0x%llx holemask 0x%x count %u",
834 				  XFS_AGINO_TO_INO(args.mp, agno,
835 						   rec.ir_startino),
836 				  rec.ir_holemask, rec.ir_count);
837 			xfs_force_shutdown(args.mp, SHUTDOWN_CORRUPT_INCORE);
838 		}
839 		if (error)
840 			return error;
841 
842 		/*
843 		 * We can't merge the part we've just allocated as for the inobt
844 		 * due to finobt semantics. The original record may or may not
845 		 * exist independent of whether physical inodes exist in this
846 		 * sparse chunk.
847 		 *
848 		 * We must update the finobt record based on the inobt record.
849 		 * rec contains the fully merged and up to date inobt record
850 		 * from the previous call. Set merge false to replace any
851 		 * existing record with this one.
852 		 */
853 		if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) {
854 			error = xfs_inobt_insert_sprec(args.mp, tp, agbp,
855 						       XFS_BTNUM_FINO, &rec,
856 						       false);
857 			if (error)
858 				return error;
859 		}
860 	} else {
861 		/* full chunk - insert new records to both btrees */
862 		error = xfs_inobt_insert(args.mp, tp, agbp, newino, newlen,
863 					 XFS_BTNUM_INO);
864 		if (error)
865 			return error;
866 
867 		if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) {
868 			error = xfs_inobt_insert(args.mp, tp, agbp, newino,
869 						 newlen, XFS_BTNUM_FINO);
870 			if (error)
871 				return error;
872 		}
873 	}
874 
875 	/*
876 	 * Update AGI counts and newino.
877 	 */
878 	be32_add_cpu(&agi->agi_count, newlen);
879 	be32_add_cpu(&agi->agi_freecount, newlen);
880 	pag = xfs_perag_get(args.mp, agno);
881 	pag->pagi_freecount += newlen;
882 	xfs_perag_put(pag);
883 	agi->agi_newino = cpu_to_be32(newino);
884 
885 	/*
886 	 * Log allocation group header fields
887 	 */
888 	xfs_ialloc_log_agi(tp, agbp,
889 		XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO);
890 	/*
891 	 * Modify/log superblock values for inode count and inode free count.
892 	 */
893 	xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen);
894 	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen);
895 	*alloc = 1;
896 	return 0;
897 }
898 
899 STATIC xfs_agnumber_t
900 xfs_ialloc_next_ag(
901 	xfs_mount_t	*mp)
902 {
903 	xfs_agnumber_t	agno;
904 
905 	spin_lock(&mp->m_agirotor_lock);
906 	agno = mp->m_agirotor;
907 	if (++mp->m_agirotor >= mp->m_maxagi)
908 		mp->m_agirotor = 0;
909 	spin_unlock(&mp->m_agirotor_lock);
910 
911 	return agno;
912 }
913 
914 /*
915  * Select an allocation group to look for a free inode in, based on the parent
916  * inode and the mode.  Return the allocation group buffer.
917  */
918 STATIC xfs_agnumber_t
919 xfs_ialloc_ag_select(
920 	xfs_trans_t	*tp,		/* transaction pointer */
921 	xfs_ino_t	parent,		/* parent directory inode number */
922 	umode_t		mode,		/* bits set to indicate file type */
923 	int		okalloc)	/* ok to allocate more space */
924 {
925 	xfs_agnumber_t	agcount;	/* number of ag's in the filesystem */
926 	xfs_agnumber_t	agno;		/* current ag number */
927 	int		flags;		/* alloc buffer locking flags */
928 	xfs_extlen_t	ineed;		/* blocks needed for inode allocation */
929 	xfs_extlen_t	longest = 0;	/* longest extent available */
930 	xfs_mount_t	*mp;		/* mount point structure */
931 	int		needspace;	/* file mode implies space allocated */
932 	xfs_perag_t	*pag;		/* per allocation group data */
933 	xfs_agnumber_t	pagno;		/* parent (starting) ag number */
934 	int		error;
935 
936 	/*
937 	 * Files of these types need at least one block if length > 0
938 	 * (and they won't fit in the inode, but that's hard to figure out).
939 	 */
940 	needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode);
941 	mp = tp->t_mountp;
942 	agcount = mp->m_maxagi;
943 	if (S_ISDIR(mode))
944 		pagno = xfs_ialloc_next_ag(mp);
945 	else {
946 		pagno = XFS_INO_TO_AGNO(mp, parent);
947 		if (pagno >= agcount)
948 			pagno = 0;
949 	}
950 
951 	ASSERT(pagno < agcount);
952 
953 	/*
954 	 * Loop through allocation groups, looking for one with a little
955 	 * free space in it.  Note we don't look for free inodes, exactly.
956 	 * Instead, we include whether there is a need to allocate inodes
957 	 * to mean that blocks must be allocated for them,
958 	 * if none are currently free.
959 	 */
960 	agno = pagno;
961 	flags = XFS_ALLOC_FLAG_TRYLOCK;
962 	for (;;) {
963 		pag = xfs_perag_get(mp, agno);
964 		if (!pag->pagi_inodeok) {
965 			xfs_ialloc_next_ag(mp);
966 			goto nextag;
967 		}
968 
969 		if (!pag->pagi_init) {
970 			error = xfs_ialloc_pagi_init(mp, tp, agno);
971 			if (error)
972 				goto nextag;
973 		}
974 
975 		if (pag->pagi_freecount) {
976 			xfs_perag_put(pag);
977 			return agno;
978 		}
979 
980 		if (!okalloc)
981 			goto nextag;
982 
983 		if (!pag->pagf_init) {
984 			error = xfs_alloc_pagf_init(mp, tp, agno, flags);
985 			if (error)
986 				goto nextag;
987 		}
988 
989 		/*
990 		 * Check that there is enough free space for the file plus a
991 		 * chunk of inodes if we need to allocate some. If this is the
992 		 * first pass across the AGs, take into account the potential
993 		 * space needed for alignment of inode chunks when checking the
994 		 * longest contiguous free space in the AG - this prevents us
995 		 * from getting ENOSPC because we have free space larger than
996 		 * m_ialloc_blks but alignment constraints prevent us from using
997 		 * it.
998 		 *
999 		 * If we can't find an AG with space for full alignment slack to
1000 		 * be taken into account, we must be near ENOSPC in all AGs.
1001 		 * Hence we don't include alignment for the second pass and so
1002 		 * if we fail allocation due to alignment issues then it is most
1003 		 * likely a real ENOSPC condition.
1004 		 */
1005 		ineed = mp->m_ialloc_min_blks;
1006 		if (flags && ineed > 1)
1007 			ineed += xfs_ialloc_cluster_alignment(mp);
1008 		longest = pag->pagf_longest;
1009 		if (!longest)
1010 			longest = pag->pagf_flcount > 0;
1011 
1012 		if (pag->pagf_freeblks >= needspace + ineed &&
1013 		    longest >= ineed) {
1014 			xfs_perag_put(pag);
1015 			return agno;
1016 		}
1017 nextag:
1018 		xfs_perag_put(pag);
1019 		/*
1020 		 * No point in iterating over the rest, if we're shutting
1021 		 * down.
1022 		 */
1023 		if (XFS_FORCED_SHUTDOWN(mp))
1024 			return NULLAGNUMBER;
1025 		agno++;
1026 		if (agno >= agcount)
1027 			agno = 0;
1028 		if (agno == pagno) {
1029 			if (flags == 0)
1030 				return NULLAGNUMBER;
1031 			flags = 0;
1032 		}
1033 	}
1034 }
1035 
1036 /*
1037  * Try to retrieve the next record to the left/right from the current one.
1038  */
1039 STATIC int
1040 xfs_ialloc_next_rec(
1041 	struct xfs_btree_cur	*cur,
1042 	xfs_inobt_rec_incore_t	*rec,
1043 	int			*done,
1044 	int			left)
1045 {
1046 	int                     error;
1047 	int			i;
1048 
1049 	if (left)
1050 		error = xfs_btree_decrement(cur, 0, &i);
1051 	else
1052 		error = xfs_btree_increment(cur, 0, &i);
1053 
1054 	if (error)
1055 		return error;
1056 	*done = !i;
1057 	if (i) {
1058 		error = xfs_inobt_get_rec(cur, rec, &i);
1059 		if (error)
1060 			return error;
1061 		XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1062 	}
1063 
1064 	return 0;
1065 }
1066 
1067 STATIC int
1068 xfs_ialloc_get_rec(
1069 	struct xfs_btree_cur	*cur,
1070 	xfs_agino_t		agino,
1071 	xfs_inobt_rec_incore_t	*rec,
1072 	int			*done)
1073 {
1074 	int                     error;
1075 	int			i;
1076 
1077 	error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i);
1078 	if (error)
1079 		return error;
1080 	*done = !i;
1081 	if (i) {
1082 		error = xfs_inobt_get_rec(cur, rec, &i);
1083 		if (error)
1084 			return error;
1085 		XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1086 	}
1087 
1088 	return 0;
1089 }
1090 
1091 /*
1092  * Return the offset of the first free inode in the record. If the inode chunk
1093  * is sparsely allocated, we convert the record holemask to inode granularity
1094  * and mask off the unallocated regions from the inode free mask.
1095  */
1096 STATIC int
1097 xfs_inobt_first_free_inode(
1098 	struct xfs_inobt_rec_incore	*rec)
1099 {
1100 	xfs_inofree_t			realfree;
1101 
1102 	/* if there are no holes, return the first available offset */
1103 	if (!xfs_inobt_issparse(rec->ir_holemask))
1104 		return xfs_lowbit64(rec->ir_free);
1105 
1106 	realfree = xfs_inobt_irec_to_allocmask(rec);
1107 	realfree &= rec->ir_free;
1108 
1109 	return xfs_lowbit64(realfree);
1110 }
1111 
1112 /*
1113  * Allocate an inode using the inobt-only algorithm.
1114  */
1115 STATIC int
1116 xfs_dialloc_ag_inobt(
1117 	struct xfs_trans	*tp,
1118 	struct xfs_buf		*agbp,
1119 	xfs_ino_t		parent,
1120 	xfs_ino_t		*inop)
1121 {
1122 	struct xfs_mount	*mp = tp->t_mountp;
1123 	struct xfs_agi		*agi = XFS_BUF_TO_AGI(agbp);
1124 	xfs_agnumber_t		agno = be32_to_cpu(agi->agi_seqno);
1125 	xfs_agnumber_t		pagno = XFS_INO_TO_AGNO(mp, parent);
1126 	xfs_agino_t		pagino = XFS_INO_TO_AGINO(mp, parent);
1127 	struct xfs_perag	*pag;
1128 	struct xfs_btree_cur	*cur, *tcur;
1129 	struct xfs_inobt_rec_incore rec, trec;
1130 	xfs_ino_t		ino;
1131 	int			error;
1132 	int			offset;
1133 	int			i, j;
1134 	int			searchdistance = 10;
1135 
1136 	pag = xfs_perag_get(mp, agno);
1137 
1138 	ASSERT(pag->pagi_init);
1139 	ASSERT(pag->pagi_inodeok);
1140 	ASSERT(pag->pagi_freecount > 0);
1141 
1142  restart_pagno:
1143 	cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
1144 	/*
1145 	 * If pagino is 0 (this is the root inode allocation) use newino.
1146 	 * This must work because we've just allocated some.
1147 	 */
1148 	if (!pagino)
1149 		pagino = be32_to_cpu(agi->agi_newino);
1150 
1151 	error = xfs_check_agi_freecount(cur, agi);
1152 	if (error)
1153 		goto error0;
1154 
1155 	/*
1156 	 * If in the same AG as the parent, try to get near the parent.
1157 	 */
1158 	if (pagno == agno) {
1159 		int		doneleft;	/* done, to the left */
1160 		int		doneright;	/* done, to the right */
1161 
1162 		error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i);
1163 		if (error)
1164 			goto error0;
1165 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1166 
1167 		error = xfs_inobt_get_rec(cur, &rec, &j);
1168 		if (error)
1169 			goto error0;
1170 		XFS_WANT_CORRUPTED_GOTO(mp, j == 1, error0);
1171 
1172 		if (rec.ir_freecount > 0) {
1173 			/*
1174 			 * Found a free inode in the same chunk
1175 			 * as the parent, done.
1176 			 */
1177 			goto alloc_inode;
1178 		}
1179 
1180 
1181 		/*
1182 		 * In the same AG as parent, but parent's chunk is full.
1183 		 */
1184 
1185 		/* duplicate the cursor, search left & right simultaneously */
1186 		error = xfs_btree_dup_cursor(cur, &tcur);
1187 		if (error)
1188 			goto error0;
1189 
1190 		/*
1191 		 * Skip to last blocks looked up if same parent inode.
1192 		 */
1193 		if (pagino != NULLAGINO &&
1194 		    pag->pagl_pagino == pagino &&
1195 		    pag->pagl_leftrec != NULLAGINO &&
1196 		    pag->pagl_rightrec != NULLAGINO) {
1197 			error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec,
1198 						   &trec, &doneleft);
1199 			if (error)
1200 				goto error1;
1201 
1202 			error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec,
1203 						   &rec, &doneright);
1204 			if (error)
1205 				goto error1;
1206 		} else {
1207 			/* search left with tcur, back up 1 record */
1208 			error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1);
1209 			if (error)
1210 				goto error1;
1211 
1212 			/* search right with cur, go forward 1 record. */
1213 			error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0);
1214 			if (error)
1215 				goto error1;
1216 		}
1217 
1218 		/*
1219 		 * Loop until we find an inode chunk with a free inode.
1220 		 */
1221 		while (--searchdistance > 0 && (!doneleft || !doneright)) {
1222 			int	useleft;  /* using left inode chunk this time */
1223 
1224 			/* figure out the closer block if both are valid. */
1225 			if (!doneleft && !doneright) {
1226 				useleft = pagino -
1227 				 (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) <
1228 				  rec.ir_startino - pagino;
1229 			} else {
1230 				useleft = !doneleft;
1231 			}
1232 
1233 			/* free inodes to the left? */
1234 			if (useleft && trec.ir_freecount) {
1235 				xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1236 				cur = tcur;
1237 
1238 				pag->pagl_leftrec = trec.ir_startino;
1239 				pag->pagl_rightrec = rec.ir_startino;
1240 				pag->pagl_pagino = pagino;
1241 				rec = trec;
1242 				goto alloc_inode;
1243 			}
1244 
1245 			/* free inodes to the right? */
1246 			if (!useleft && rec.ir_freecount) {
1247 				xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1248 
1249 				pag->pagl_leftrec = trec.ir_startino;
1250 				pag->pagl_rightrec = rec.ir_startino;
1251 				pag->pagl_pagino = pagino;
1252 				goto alloc_inode;
1253 			}
1254 
1255 			/* get next record to check */
1256 			if (useleft) {
1257 				error = xfs_ialloc_next_rec(tcur, &trec,
1258 								 &doneleft, 1);
1259 			} else {
1260 				error = xfs_ialloc_next_rec(cur, &rec,
1261 								 &doneright, 0);
1262 			}
1263 			if (error)
1264 				goto error1;
1265 		}
1266 
1267 		if (searchdistance <= 0) {
1268 			/*
1269 			 * Not in range - save last search
1270 			 * location and allocate a new inode
1271 			 */
1272 			xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1273 			pag->pagl_leftrec = trec.ir_startino;
1274 			pag->pagl_rightrec = rec.ir_startino;
1275 			pag->pagl_pagino = pagino;
1276 
1277 		} else {
1278 			/*
1279 			 * We've reached the end of the btree. because
1280 			 * we are only searching a small chunk of the
1281 			 * btree each search, there is obviously free
1282 			 * inodes closer to the parent inode than we
1283 			 * are now. restart the search again.
1284 			 */
1285 			pag->pagl_pagino = NULLAGINO;
1286 			pag->pagl_leftrec = NULLAGINO;
1287 			pag->pagl_rightrec = NULLAGINO;
1288 			xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1289 			xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1290 			goto restart_pagno;
1291 		}
1292 	}
1293 
1294 	/*
1295 	 * In a different AG from the parent.
1296 	 * See if the most recently allocated block has any free.
1297 	 */
1298 	if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
1299 		error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
1300 					 XFS_LOOKUP_EQ, &i);
1301 		if (error)
1302 			goto error0;
1303 
1304 		if (i == 1) {
1305 			error = xfs_inobt_get_rec(cur, &rec, &j);
1306 			if (error)
1307 				goto error0;
1308 
1309 			if (j == 1 && rec.ir_freecount > 0) {
1310 				/*
1311 				 * The last chunk allocated in the group
1312 				 * still has a free inode.
1313 				 */
1314 				goto alloc_inode;
1315 			}
1316 		}
1317 	}
1318 
1319 	/*
1320 	 * None left in the last group, search the whole AG
1321 	 */
1322 	error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
1323 	if (error)
1324 		goto error0;
1325 	XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1326 
1327 	for (;;) {
1328 		error = xfs_inobt_get_rec(cur, &rec, &i);
1329 		if (error)
1330 			goto error0;
1331 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1332 		if (rec.ir_freecount > 0)
1333 			break;
1334 		error = xfs_btree_increment(cur, 0, &i);
1335 		if (error)
1336 			goto error0;
1337 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1338 	}
1339 
1340 alloc_inode:
1341 	offset = xfs_inobt_first_free_inode(&rec);
1342 	ASSERT(offset >= 0);
1343 	ASSERT(offset < XFS_INODES_PER_CHUNK);
1344 	ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
1345 				   XFS_INODES_PER_CHUNK) == 0);
1346 	ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
1347 	rec.ir_free &= ~XFS_INOBT_MASK(offset);
1348 	rec.ir_freecount--;
1349 	error = xfs_inobt_update(cur, &rec);
1350 	if (error)
1351 		goto error0;
1352 	be32_add_cpu(&agi->agi_freecount, -1);
1353 	xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
1354 	pag->pagi_freecount--;
1355 
1356 	error = xfs_check_agi_freecount(cur, agi);
1357 	if (error)
1358 		goto error0;
1359 
1360 	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1361 	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
1362 	xfs_perag_put(pag);
1363 	*inop = ino;
1364 	return 0;
1365 error1:
1366 	xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
1367 error0:
1368 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1369 	xfs_perag_put(pag);
1370 	return error;
1371 }
1372 
1373 /*
1374  * Use the free inode btree to allocate an inode based on distance from the
1375  * parent. Note that the provided cursor may be deleted and replaced.
1376  */
1377 STATIC int
1378 xfs_dialloc_ag_finobt_near(
1379 	xfs_agino_t			pagino,
1380 	struct xfs_btree_cur		**ocur,
1381 	struct xfs_inobt_rec_incore	*rec)
1382 {
1383 	struct xfs_btree_cur		*lcur = *ocur;	/* left search cursor */
1384 	struct xfs_btree_cur		*rcur;	/* right search cursor */
1385 	struct xfs_inobt_rec_incore	rrec;
1386 	int				error;
1387 	int				i, j;
1388 
1389 	error = xfs_inobt_lookup(lcur, pagino, XFS_LOOKUP_LE, &i);
1390 	if (error)
1391 		return error;
1392 
1393 	if (i == 1) {
1394 		error = xfs_inobt_get_rec(lcur, rec, &i);
1395 		if (error)
1396 			return error;
1397 		XFS_WANT_CORRUPTED_RETURN(lcur->bc_mp, i == 1);
1398 
1399 		/*
1400 		 * See if we've landed in the parent inode record. The finobt
1401 		 * only tracks chunks with at least one free inode, so record
1402 		 * existence is enough.
1403 		 */
1404 		if (pagino >= rec->ir_startino &&
1405 		    pagino < (rec->ir_startino + XFS_INODES_PER_CHUNK))
1406 			return 0;
1407 	}
1408 
1409 	error = xfs_btree_dup_cursor(lcur, &rcur);
1410 	if (error)
1411 		return error;
1412 
1413 	error = xfs_inobt_lookup(rcur, pagino, XFS_LOOKUP_GE, &j);
1414 	if (error)
1415 		goto error_rcur;
1416 	if (j == 1) {
1417 		error = xfs_inobt_get_rec(rcur, &rrec, &j);
1418 		if (error)
1419 			goto error_rcur;
1420 		XFS_WANT_CORRUPTED_GOTO(lcur->bc_mp, j == 1, error_rcur);
1421 	}
1422 
1423 	XFS_WANT_CORRUPTED_GOTO(lcur->bc_mp, i == 1 || j == 1, error_rcur);
1424 	if (i == 1 && j == 1) {
1425 		/*
1426 		 * Both the left and right records are valid. Choose the closer
1427 		 * inode chunk to the target.
1428 		 */
1429 		if ((pagino - rec->ir_startino + XFS_INODES_PER_CHUNK - 1) >
1430 		    (rrec.ir_startino - pagino)) {
1431 			*rec = rrec;
1432 			xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR);
1433 			*ocur = rcur;
1434 		} else {
1435 			xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR);
1436 		}
1437 	} else if (j == 1) {
1438 		/* only the right record is valid */
1439 		*rec = rrec;
1440 		xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR);
1441 		*ocur = rcur;
1442 	} else if (i == 1) {
1443 		/* only the left record is valid */
1444 		xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR);
1445 	}
1446 
1447 	return 0;
1448 
1449 error_rcur:
1450 	xfs_btree_del_cursor(rcur, XFS_BTREE_ERROR);
1451 	return error;
1452 }
1453 
1454 /*
1455  * Use the free inode btree to find a free inode based on a newino hint. If
1456  * the hint is NULL, find the first free inode in the AG.
1457  */
1458 STATIC int
1459 xfs_dialloc_ag_finobt_newino(
1460 	struct xfs_agi			*agi,
1461 	struct xfs_btree_cur		*cur,
1462 	struct xfs_inobt_rec_incore	*rec)
1463 {
1464 	int error;
1465 	int i;
1466 
1467 	if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
1468 		error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
1469 					 XFS_LOOKUP_EQ, &i);
1470 		if (error)
1471 			return error;
1472 		if (i == 1) {
1473 			error = xfs_inobt_get_rec(cur, rec, &i);
1474 			if (error)
1475 				return error;
1476 			XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1477 			return 0;
1478 		}
1479 	}
1480 
1481 	/*
1482 	 * Find the first inode available in the AG.
1483 	 */
1484 	error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
1485 	if (error)
1486 		return error;
1487 	XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1488 
1489 	error = xfs_inobt_get_rec(cur, rec, &i);
1490 	if (error)
1491 		return error;
1492 	XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1493 
1494 	return 0;
1495 }
1496 
1497 /*
1498  * Update the inobt based on a modification made to the finobt. Also ensure that
1499  * the records from both trees are equivalent post-modification.
1500  */
1501 STATIC int
1502 xfs_dialloc_ag_update_inobt(
1503 	struct xfs_btree_cur		*cur,	/* inobt cursor */
1504 	struct xfs_inobt_rec_incore	*frec,	/* finobt record */
1505 	int				offset) /* inode offset */
1506 {
1507 	struct xfs_inobt_rec_incore	rec;
1508 	int				error;
1509 	int				i;
1510 
1511 	error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i);
1512 	if (error)
1513 		return error;
1514 	XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1515 
1516 	error = xfs_inobt_get_rec(cur, &rec, &i);
1517 	if (error)
1518 		return error;
1519 	XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1520 	ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) %
1521 				   XFS_INODES_PER_CHUNK) == 0);
1522 
1523 	rec.ir_free &= ~XFS_INOBT_MASK(offset);
1524 	rec.ir_freecount--;
1525 
1526 	XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, (rec.ir_free == frec->ir_free) &&
1527 				  (rec.ir_freecount == frec->ir_freecount));
1528 
1529 	return xfs_inobt_update(cur, &rec);
1530 }
1531 
1532 /*
1533  * Allocate an inode using the free inode btree, if available. Otherwise, fall
1534  * back to the inobt search algorithm.
1535  *
1536  * The caller selected an AG for us, and made sure that free inodes are
1537  * available.
1538  */
1539 STATIC int
1540 xfs_dialloc_ag(
1541 	struct xfs_trans	*tp,
1542 	struct xfs_buf		*agbp,
1543 	xfs_ino_t		parent,
1544 	xfs_ino_t		*inop)
1545 {
1546 	struct xfs_mount		*mp = tp->t_mountp;
1547 	struct xfs_agi			*agi = XFS_BUF_TO_AGI(agbp);
1548 	xfs_agnumber_t			agno = be32_to_cpu(agi->agi_seqno);
1549 	xfs_agnumber_t			pagno = XFS_INO_TO_AGNO(mp, parent);
1550 	xfs_agino_t			pagino = XFS_INO_TO_AGINO(mp, parent);
1551 	struct xfs_perag		*pag;
1552 	struct xfs_btree_cur		*cur;	/* finobt cursor */
1553 	struct xfs_btree_cur		*icur;	/* inobt cursor */
1554 	struct xfs_inobt_rec_incore	rec;
1555 	xfs_ino_t			ino;
1556 	int				error;
1557 	int				offset;
1558 	int				i;
1559 
1560 	if (!xfs_sb_version_hasfinobt(&mp->m_sb))
1561 		return xfs_dialloc_ag_inobt(tp, agbp, parent, inop);
1562 
1563 	pag = xfs_perag_get(mp, agno);
1564 
1565 	/*
1566 	 * If pagino is 0 (this is the root inode allocation) use newino.
1567 	 * This must work because we've just allocated some.
1568 	 */
1569 	if (!pagino)
1570 		pagino = be32_to_cpu(agi->agi_newino);
1571 
1572 	cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_FINO);
1573 
1574 	error = xfs_check_agi_freecount(cur, agi);
1575 	if (error)
1576 		goto error_cur;
1577 
1578 	/*
1579 	 * The search algorithm depends on whether we're in the same AG as the
1580 	 * parent. If so, find the closest available inode to the parent. If
1581 	 * not, consider the agi hint or find the first free inode in the AG.
1582 	 */
1583 	if (agno == pagno)
1584 		error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec);
1585 	else
1586 		error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec);
1587 	if (error)
1588 		goto error_cur;
1589 
1590 	offset = xfs_inobt_first_free_inode(&rec);
1591 	ASSERT(offset >= 0);
1592 	ASSERT(offset < XFS_INODES_PER_CHUNK);
1593 	ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
1594 				   XFS_INODES_PER_CHUNK) == 0);
1595 	ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
1596 
1597 	/*
1598 	 * Modify or remove the finobt record.
1599 	 */
1600 	rec.ir_free &= ~XFS_INOBT_MASK(offset);
1601 	rec.ir_freecount--;
1602 	if (rec.ir_freecount)
1603 		error = xfs_inobt_update(cur, &rec);
1604 	else
1605 		error = xfs_btree_delete(cur, &i);
1606 	if (error)
1607 		goto error_cur;
1608 
1609 	/*
1610 	 * The finobt has now been updated appropriately. We haven't updated the
1611 	 * agi and superblock yet, so we can create an inobt cursor and validate
1612 	 * the original freecount. If all is well, make the equivalent update to
1613 	 * the inobt using the finobt record and offset information.
1614 	 */
1615 	icur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
1616 
1617 	error = xfs_check_agi_freecount(icur, agi);
1618 	if (error)
1619 		goto error_icur;
1620 
1621 	error = xfs_dialloc_ag_update_inobt(icur, &rec, offset);
1622 	if (error)
1623 		goto error_icur;
1624 
1625 	/*
1626 	 * Both trees have now been updated. We must update the perag and
1627 	 * superblock before we can check the freecount for each btree.
1628 	 */
1629 	be32_add_cpu(&agi->agi_freecount, -1);
1630 	xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
1631 	pag->pagi_freecount--;
1632 
1633 	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
1634 
1635 	error = xfs_check_agi_freecount(icur, agi);
1636 	if (error)
1637 		goto error_icur;
1638 	error = xfs_check_agi_freecount(cur, agi);
1639 	if (error)
1640 		goto error_icur;
1641 
1642 	xfs_btree_del_cursor(icur, XFS_BTREE_NOERROR);
1643 	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1644 	xfs_perag_put(pag);
1645 	*inop = ino;
1646 	return 0;
1647 
1648 error_icur:
1649 	xfs_btree_del_cursor(icur, XFS_BTREE_ERROR);
1650 error_cur:
1651 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1652 	xfs_perag_put(pag);
1653 	return error;
1654 }
1655 
1656 /*
1657  * Allocate an inode on disk.
1658  *
1659  * Mode is used to tell whether the new inode will need space, and whether it
1660  * is a directory.
1661  *
1662  * This function is designed to be called twice if it has to do an allocation
1663  * to make more free inodes.  On the first call, *IO_agbp should be set to NULL.
1664  * If an inode is available without having to performn an allocation, an inode
1665  * number is returned.  In this case, *IO_agbp is set to NULL.  If an allocation
1666  * needs to be done, xfs_dialloc returns the current AGI buffer in *IO_agbp.
1667  * The caller should then commit the current transaction, allocate a
1668  * new transaction, and call xfs_dialloc() again, passing in the previous value
1669  * of *IO_agbp.  IO_agbp should be held across the transactions. Since the AGI
1670  * buffer is locked across the two calls, the second call is guaranteed to have
1671  * a free inode available.
1672  *
1673  * Once we successfully pick an inode its number is returned and the on-disk
1674  * data structures are updated.  The inode itself is not read in, since doing so
1675  * would break ordering constraints with xfs_reclaim.
1676  */
1677 int
1678 xfs_dialloc(
1679 	struct xfs_trans	*tp,
1680 	xfs_ino_t		parent,
1681 	umode_t			mode,
1682 	int			okalloc,
1683 	struct xfs_buf		**IO_agbp,
1684 	xfs_ino_t		*inop)
1685 {
1686 	struct xfs_mount	*mp = tp->t_mountp;
1687 	struct xfs_buf		*agbp;
1688 	xfs_agnumber_t		agno;
1689 	int			error;
1690 	int			ialloced;
1691 	int			noroom = 0;
1692 	xfs_agnumber_t		start_agno;
1693 	struct xfs_perag	*pag;
1694 
1695 	if (*IO_agbp) {
1696 		/*
1697 		 * If the caller passes in a pointer to the AGI buffer,
1698 		 * continue where we left off before.  In this case, we
1699 		 * know that the allocation group has free inodes.
1700 		 */
1701 		agbp = *IO_agbp;
1702 		goto out_alloc;
1703 	}
1704 
1705 	/*
1706 	 * We do not have an agbp, so select an initial allocation
1707 	 * group for inode allocation.
1708 	 */
1709 	start_agno = xfs_ialloc_ag_select(tp, parent, mode, okalloc);
1710 	if (start_agno == NULLAGNUMBER) {
1711 		*inop = NULLFSINO;
1712 		return 0;
1713 	}
1714 
1715 	/*
1716 	 * If we have already hit the ceiling of inode blocks then clear
1717 	 * okalloc so we scan all available agi structures for a free
1718 	 * inode.
1719 	 *
1720 	 * Read rough value of mp->m_icount by percpu_counter_read_positive,
1721 	 * which will sacrifice the preciseness but improve the performance.
1722 	 */
1723 	if (mp->m_maxicount &&
1724 	    percpu_counter_read_positive(&mp->m_icount) + mp->m_ialloc_inos
1725 							> mp->m_maxicount) {
1726 		noroom = 1;
1727 		okalloc = 0;
1728 	}
1729 
1730 	/*
1731 	 * Loop until we find an allocation group that either has free inodes
1732 	 * or in which we can allocate some inodes.  Iterate through the
1733 	 * allocation groups upward, wrapping at the end.
1734 	 */
1735 	agno = start_agno;
1736 	for (;;) {
1737 		pag = xfs_perag_get(mp, agno);
1738 		if (!pag->pagi_inodeok) {
1739 			xfs_ialloc_next_ag(mp);
1740 			goto nextag;
1741 		}
1742 
1743 		if (!pag->pagi_init) {
1744 			error = xfs_ialloc_pagi_init(mp, tp, agno);
1745 			if (error)
1746 				goto out_error;
1747 		}
1748 
1749 		/*
1750 		 * Do a first racy fast path check if this AG is usable.
1751 		 */
1752 		if (!pag->pagi_freecount && !okalloc)
1753 			goto nextag;
1754 
1755 		/*
1756 		 * Then read in the AGI buffer and recheck with the AGI buffer
1757 		 * lock held.
1758 		 */
1759 		error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
1760 		if (error)
1761 			goto out_error;
1762 
1763 		if (pag->pagi_freecount) {
1764 			xfs_perag_put(pag);
1765 			goto out_alloc;
1766 		}
1767 
1768 		if (!okalloc)
1769 			goto nextag_relse_buffer;
1770 
1771 
1772 		error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced);
1773 		if (error) {
1774 			xfs_trans_brelse(tp, agbp);
1775 
1776 			if (error != -ENOSPC)
1777 				goto out_error;
1778 
1779 			xfs_perag_put(pag);
1780 			*inop = NULLFSINO;
1781 			return 0;
1782 		}
1783 
1784 		if (ialloced) {
1785 			/*
1786 			 * We successfully allocated some inodes, return
1787 			 * the current context to the caller so that it
1788 			 * can commit the current transaction and call
1789 			 * us again where we left off.
1790 			 */
1791 			ASSERT(pag->pagi_freecount > 0);
1792 			xfs_perag_put(pag);
1793 
1794 			*IO_agbp = agbp;
1795 			*inop = NULLFSINO;
1796 			return 0;
1797 		}
1798 
1799 nextag_relse_buffer:
1800 		xfs_trans_brelse(tp, agbp);
1801 nextag:
1802 		xfs_perag_put(pag);
1803 		if (++agno == mp->m_sb.sb_agcount)
1804 			agno = 0;
1805 		if (agno == start_agno) {
1806 			*inop = NULLFSINO;
1807 			return noroom ? -ENOSPC : 0;
1808 		}
1809 	}
1810 
1811 out_alloc:
1812 	*IO_agbp = NULL;
1813 	return xfs_dialloc_ag(tp, agbp, parent, inop);
1814 out_error:
1815 	xfs_perag_put(pag);
1816 	return error;
1817 }
1818 
1819 /*
1820  * Free the blocks of an inode chunk. We must consider that the inode chunk
1821  * might be sparse and only free the regions that are allocated as part of the
1822  * chunk.
1823  */
1824 STATIC void
1825 xfs_difree_inode_chunk(
1826 	struct xfs_mount		*mp,
1827 	xfs_agnumber_t			agno,
1828 	struct xfs_inobt_rec_incore	*rec,
1829 	struct xfs_defer_ops		*dfops)
1830 {
1831 	xfs_agblock_t	sagbno = XFS_AGINO_TO_AGBNO(mp, rec->ir_startino);
1832 	int		startidx, endidx;
1833 	int		nextbit;
1834 	xfs_agblock_t	agbno;
1835 	int		contigblk;
1836 	struct xfs_owner_info	oinfo;
1837 	DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS);
1838 	xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
1839 
1840 	if (!xfs_inobt_issparse(rec->ir_holemask)) {
1841 		/* not sparse, calculate extent info directly */
1842 		xfs_bmap_add_free(mp, dfops, XFS_AGB_TO_FSB(mp, agno, sagbno),
1843 				  mp->m_ialloc_blks, &oinfo);
1844 		return;
1845 	}
1846 
1847 	/* holemask is only 16-bits (fits in an unsigned long) */
1848 	ASSERT(sizeof(rec->ir_holemask) <= sizeof(holemask[0]));
1849 	holemask[0] = rec->ir_holemask;
1850 
1851 	/*
1852 	 * Find contiguous ranges of zeroes (i.e., allocated regions) in the
1853 	 * holemask and convert the start/end index of each range to an extent.
1854 	 * We start with the start and end index both pointing at the first 0 in
1855 	 * the mask.
1856 	 */
1857 	startidx = endidx = find_first_zero_bit(holemask,
1858 						XFS_INOBT_HOLEMASK_BITS);
1859 	nextbit = startidx + 1;
1860 	while (startidx < XFS_INOBT_HOLEMASK_BITS) {
1861 		nextbit = find_next_zero_bit(holemask, XFS_INOBT_HOLEMASK_BITS,
1862 					     nextbit);
1863 		/*
1864 		 * If the next zero bit is contiguous, update the end index of
1865 		 * the current range and continue.
1866 		 */
1867 		if (nextbit != XFS_INOBT_HOLEMASK_BITS &&
1868 		    nextbit == endidx + 1) {
1869 			endidx = nextbit;
1870 			goto next;
1871 		}
1872 
1873 		/*
1874 		 * nextbit is not contiguous with the current end index. Convert
1875 		 * the current start/end to an extent and add it to the free
1876 		 * list.
1877 		 */
1878 		agbno = sagbno + (startidx * XFS_INODES_PER_HOLEMASK_BIT) /
1879 				  mp->m_sb.sb_inopblock;
1880 		contigblk = ((endidx - startidx + 1) *
1881 			     XFS_INODES_PER_HOLEMASK_BIT) /
1882 			    mp->m_sb.sb_inopblock;
1883 
1884 		ASSERT(agbno % mp->m_sb.sb_spino_align == 0);
1885 		ASSERT(contigblk % mp->m_sb.sb_spino_align == 0);
1886 		xfs_bmap_add_free(mp, dfops, XFS_AGB_TO_FSB(mp, agno, agbno),
1887 				  contigblk, &oinfo);
1888 
1889 		/* reset range to current bit and carry on... */
1890 		startidx = endidx = nextbit;
1891 
1892 next:
1893 		nextbit++;
1894 	}
1895 }
1896 
1897 STATIC int
1898 xfs_difree_inobt(
1899 	struct xfs_mount		*mp,
1900 	struct xfs_trans		*tp,
1901 	struct xfs_buf			*agbp,
1902 	xfs_agino_t			agino,
1903 	struct xfs_defer_ops		*dfops,
1904 	struct xfs_icluster		*xic,
1905 	struct xfs_inobt_rec_incore	*orec)
1906 {
1907 	struct xfs_agi			*agi = XFS_BUF_TO_AGI(agbp);
1908 	xfs_agnumber_t			agno = be32_to_cpu(agi->agi_seqno);
1909 	struct xfs_perag		*pag;
1910 	struct xfs_btree_cur		*cur;
1911 	struct xfs_inobt_rec_incore	rec;
1912 	int				ilen;
1913 	int				error;
1914 	int				i;
1915 	int				off;
1916 
1917 	ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
1918 	ASSERT(XFS_AGINO_TO_AGBNO(mp, agino) < be32_to_cpu(agi->agi_length));
1919 
1920 	/*
1921 	 * Initialize the cursor.
1922 	 */
1923 	cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
1924 
1925 	error = xfs_check_agi_freecount(cur, agi);
1926 	if (error)
1927 		goto error0;
1928 
1929 	/*
1930 	 * Look for the entry describing this inode.
1931 	 */
1932 	if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) {
1933 		xfs_warn(mp, "%s: xfs_inobt_lookup() returned error %d.",
1934 			__func__, error);
1935 		goto error0;
1936 	}
1937 	XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1938 	error = xfs_inobt_get_rec(cur, &rec, &i);
1939 	if (error) {
1940 		xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.",
1941 			__func__, error);
1942 		goto error0;
1943 	}
1944 	XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1945 	/*
1946 	 * Get the offset in the inode chunk.
1947 	 */
1948 	off = agino - rec.ir_startino;
1949 	ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK);
1950 	ASSERT(!(rec.ir_free & XFS_INOBT_MASK(off)));
1951 	/*
1952 	 * Mark the inode free & increment the count.
1953 	 */
1954 	rec.ir_free |= XFS_INOBT_MASK(off);
1955 	rec.ir_freecount++;
1956 
1957 	/*
1958 	 * When an inode chunk is free, it becomes eligible for removal. Don't
1959 	 * remove the chunk if the block size is large enough for multiple inode
1960 	 * chunks (that might not be free).
1961 	 */
1962 	if (!(mp->m_flags & XFS_MOUNT_IKEEP) &&
1963 	    rec.ir_free == XFS_INOBT_ALL_FREE &&
1964 	    mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) {
1965 		xic->deleted = true;
1966 		xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino);
1967 		xic->alloc = xfs_inobt_irec_to_allocmask(&rec);
1968 
1969 		/*
1970 		 * Remove the inode cluster from the AGI B+Tree, adjust the
1971 		 * AGI and Superblock inode counts, and mark the disk space
1972 		 * to be freed when the transaction is committed.
1973 		 */
1974 		ilen = rec.ir_freecount;
1975 		be32_add_cpu(&agi->agi_count, -ilen);
1976 		be32_add_cpu(&agi->agi_freecount, -(ilen - 1));
1977 		xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
1978 		pag = xfs_perag_get(mp, agno);
1979 		pag->pagi_freecount -= ilen - 1;
1980 		xfs_perag_put(pag);
1981 		xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen);
1982 		xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1));
1983 
1984 		if ((error = xfs_btree_delete(cur, &i))) {
1985 			xfs_warn(mp, "%s: xfs_btree_delete returned error %d.",
1986 				__func__, error);
1987 			goto error0;
1988 		}
1989 
1990 		xfs_difree_inode_chunk(mp, agno, &rec, dfops);
1991 	} else {
1992 		xic->deleted = false;
1993 
1994 		error = xfs_inobt_update(cur, &rec);
1995 		if (error) {
1996 			xfs_warn(mp, "%s: xfs_inobt_update returned error %d.",
1997 				__func__, error);
1998 			goto error0;
1999 		}
2000 
2001 		/*
2002 		 * Change the inode free counts and log the ag/sb changes.
2003 		 */
2004 		be32_add_cpu(&agi->agi_freecount, 1);
2005 		xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
2006 		pag = xfs_perag_get(mp, agno);
2007 		pag->pagi_freecount++;
2008 		xfs_perag_put(pag);
2009 		xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1);
2010 	}
2011 
2012 	error = xfs_check_agi_freecount(cur, agi);
2013 	if (error)
2014 		goto error0;
2015 
2016 	*orec = rec;
2017 	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
2018 	return 0;
2019 
2020 error0:
2021 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
2022 	return error;
2023 }
2024 
2025 /*
2026  * Free an inode in the free inode btree.
2027  */
2028 STATIC int
2029 xfs_difree_finobt(
2030 	struct xfs_mount		*mp,
2031 	struct xfs_trans		*tp,
2032 	struct xfs_buf			*agbp,
2033 	xfs_agino_t			agino,
2034 	struct xfs_inobt_rec_incore	*ibtrec) /* inobt record */
2035 {
2036 	struct xfs_agi			*agi = XFS_BUF_TO_AGI(agbp);
2037 	xfs_agnumber_t			agno = be32_to_cpu(agi->agi_seqno);
2038 	struct xfs_btree_cur		*cur;
2039 	struct xfs_inobt_rec_incore	rec;
2040 	int				offset = agino - ibtrec->ir_startino;
2041 	int				error;
2042 	int				i;
2043 
2044 	cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_FINO);
2045 
2046 	error = xfs_inobt_lookup(cur, ibtrec->ir_startino, XFS_LOOKUP_EQ, &i);
2047 	if (error)
2048 		goto error;
2049 	if (i == 0) {
2050 		/*
2051 		 * If the record does not exist in the finobt, we must have just
2052 		 * freed an inode in a previously fully allocated chunk. If not,
2053 		 * something is out of sync.
2054 		 */
2055 		XFS_WANT_CORRUPTED_GOTO(mp, ibtrec->ir_freecount == 1, error);
2056 
2057 		error = xfs_inobt_insert_rec(cur, ibtrec->ir_holemask,
2058 					     ibtrec->ir_count,
2059 					     ibtrec->ir_freecount,
2060 					     ibtrec->ir_free, &i);
2061 		if (error)
2062 			goto error;
2063 		ASSERT(i == 1);
2064 
2065 		goto out;
2066 	}
2067 
2068 	/*
2069 	 * Read and update the existing record. We could just copy the ibtrec
2070 	 * across here, but that would defeat the purpose of having redundant
2071 	 * metadata. By making the modifications independently, we can catch
2072 	 * corruptions that we wouldn't see if we just copied from one record
2073 	 * to another.
2074 	 */
2075 	error = xfs_inobt_get_rec(cur, &rec, &i);
2076 	if (error)
2077 		goto error;
2078 	XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error);
2079 
2080 	rec.ir_free |= XFS_INOBT_MASK(offset);
2081 	rec.ir_freecount++;
2082 
2083 	XFS_WANT_CORRUPTED_GOTO(mp, (rec.ir_free == ibtrec->ir_free) &&
2084 				(rec.ir_freecount == ibtrec->ir_freecount),
2085 				error);
2086 
2087 	/*
2088 	 * The content of inobt records should always match between the inobt
2089 	 * and finobt. The lifecycle of records in the finobt is different from
2090 	 * the inobt in that the finobt only tracks records with at least one
2091 	 * free inode. Hence, if all of the inodes are free and we aren't
2092 	 * keeping inode chunks permanently on disk, remove the record.
2093 	 * Otherwise, update the record with the new information.
2094 	 *
2095 	 * Note that we currently can't free chunks when the block size is large
2096 	 * enough for multiple chunks. Leave the finobt record to remain in sync
2097 	 * with the inobt.
2098 	 */
2099 	if (rec.ir_free == XFS_INOBT_ALL_FREE &&
2100 	    mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK &&
2101 	    !(mp->m_flags & XFS_MOUNT_IKEEP)) {
2102 		error = xfs_btree_delete(cur, &i);
2103 		if (error)
2104 			goto error;
2105 		ASSERT(i == 1);
2106 	} else {
2107 		error = xfs_inobt_update(cur, &rec);
2108 		if (error)
2109 			goto error;
2110 	}
2111 
2112 out:
2113 	error = xfs_check_agi_freecount(cur, agi);
2114 	if (error)
2115 		goto error;
2116 
2117 	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
2118 	return 0;
2119 
2120 error:
2121 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
2122 	return error;
2123 }
2124 
2125 /*
2126  * Free disk inode.  Carefully avoids touching the incore inode, all
2127  * manipulations incore are the caller's responsibility.
2128  * The on-disk inode is not changed by this operation, only the
2129  * btree (free inode mask) is changed.
2130  */
2131 int
2132 xfs_difree(
2133 	struct xfs_trans	*tp,		/* transaction pointer */
2134 	xfs_ino_t		inode,		/* inode to be freed */
2135 	struct xfs_defer_ops	*dfops,		/* extents to free */
2136 	struct xfs_icluster	*xic)	/* cluster info if deleted */
2137 {
2138 	/* REFERENCED */
2139 	xfs_agblock_t		agbno;	/* block number containing inode */
2140 	struct xfs_buf		*agbp;	/* buffer for allocation group header */
2141 	xfs_agino_t		agino;	/* allocation group inode number */
2142 	xfs_agnumber_t		agno;	/* allocation group number */
2143 	int			error;	/* error return value */
2144 	struct xfs_mount	*mp;	/* mount structure for filesystem */
2145 	struct xfs_inobt_rec_incore rec;/* btree record */
2146 
2147 	mp = tp->t_mountp;
2148 
2149 	/*
2150 	 * Break up inode number into its components.
2151 	 */
2152 	agno = XFS_INO_TO_AGNO(mp, inode);
2153 	if (agno >= mp->m_sb.sb_agcount)  {
2154 		xfs_warn(mp, "%s: agno >= mp->m_sb.sb_agcount (%d >= %d).",
2155 			__func__, agno, mp->m_sb.sb_agcount);
2156 		ASSERT(0);
2157 		return -EINVAL;
2158 	}
2159 	agino = XFS_INO_TO_AGINO(mp, inode);
2160 	if (inode != XFS_AGINO_TO_INO(mp, agno, agino))  {
2161 		xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).",
2162 			__func__, (unsigned long long)inode,
2163 			(unsigned long long)XFS_AGINO_TO_INO(mp, agno, agino));
2164 		ASSERT(0);
2165 		return -EINVAL;
2166 	}
2167 	agbno = XFS_AGINO_TO_AGBNO(mp, agino);
2168 	if (agbno >= mp->m_sb.sb_agblocks)  {
2169 		xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).",
2170 			__func__, agbno, mp->m_sb.sb_agblocks);
2171 		ASSERT(0);
2172 		return -EINVAL;
2173 	}
2174 	/*
2175 	 * Get the allocation group header.
2176 	 */
2177 	error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
2178 	if (error) {
2179 		xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.",
2180 			__func__, error);
2181 		return error;
2182 	}
2183 
2184 	/*
2185 	 * Fix up the inode allocation btree.
2186 	 */
2187 	error = xfs_difree_inobt(mp, tp, agbp, agino, dfops, xic, &rec);
2188 	if (error)
2189 		goto error0;
2190 
2191 	/*
2192 	 * Fix up the free inode btree.
2193 	 */
2194 	if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
2195 		error = xfs_difree_finobt(mp, tp, agbp, agino, &rec);
2196 		if (error)
2197 			goto error0;
2198 	}
2199 
2200 	return 0;
2201 
2202 error0:
2203 	return error;
2204 }
2205 
2206 STATIC int
2207 xfs_imap_lookup(
2208 	struct xfs_mount	*mp,
2209 	struct xfs_trans	*tp,
2210 	xfs_agnumber_t		agno,
2211 	xfs_agino_t		agino,
2212 	xfs_agblock_t		agbno,
2213 	xfs_agblock_t		*chunk_agbno,
2214 	xfs_agblock_t		*offset_agbno,
2215 	int			flags)
2216 {
2217 	struct xfs_inobt_rec_incore rec;
2218 	struct xfs_btree_cur	*cur;
2219 	struct xfs_buf		*agbp;
2220 	int			error;
2221 	int			i;
2222 
2223 	error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
2224 	if (error) {
2225 		xfs_alert(mp,
2226 			"%s: xfs_ialloc_read_agi() returned error %d, agno %d",
2227 			__func__, error, agno);
2228 		return error;
2229 	}
2230 
2231 	/*
2232 	 * Lookup the inode record for the given agino. If the record cannot be
2233 	 * found, then it's an invalid inode number and we should abort. Once
2234 	 * we have a record, we need to ensure it contains the inode number
2235 	 * we are looking up.
2236 	 */
2237 	cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
2238 	error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
2239 	if (!error) {
2240 		if (i)
2241 			error = xfs_inobt_get_rec(cur, &rec, &i);
2242 		if (!error && i == 0)
2243 			error = -EINVAL;
2244 	}
2245 
2246 	xfs_trans_brelse(tp, agbp);
2247 	xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
2248 	if (error)
2249 		return error;
2250 
2251 	/* check that the returned record contains the required inode */
2252 	if (rec.ir_startino > agino ||
2253 	    rec.ir_startino + mp->m_ialloc_inos <= agino)
2254 		return -EINVAL;
2255 
2256 	/* for untrusted inodes check it is allocated first */
2257 	if ((flags & XFS_IGET_UNTRUSTED) &&
2258 	    (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino)))
2259 		return -EINVAL;
2260 
2261 	*chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino);
2262 	*offset_agbno = agbno - *chunk_agbno;
2263 	return 0;
2264 }
2265 
2266 /*
2267  * Return the location of the inode in imap, for mapping it into a buffer.
2268  */
2269 int
2270 xfs_imap(
2271 	xfs_mount_t	 *mp,	/* file system mount structure */
2272 	xfs_trans_t	 *tp,	/* transaction pointer */
2273 	xfs_ino_t	ino,	/* inode to locate */
2274 	struct xfs_imap	*imap,	/* location map structure */
2275 	uint		flags)	/* flags for inode btree lookup */
2276 {
2277 	xfs_agblock_t	agbno;	/* block number of inode in the alloc group */
2278 	xfs_agino_t	agino;	/* inode number within alloc group */
2279 	xfs_agnumber_t	agno;	/* allocation group number */
2280 	int		blks_per_cluster; /* num blocks per inode cluster */
2281 	xfs_agblock_t	chunk_agbno;	/* first block in inode chunk */
2282 	xfs_agblock_t	cluster_agbno;	/* first block in inode cluster */
2283 	int		error;	/* error code */
2284 	int		offset;	/* index of inode in its buffer */
2285 	xfs_agblock_t	offset_agbno;	/* blks from chunk start to inode */
2286 
2287 	ASSERT(ino != NULLFSINO);
2288 
2289 	/*
2290 	 * Split up the inode number into its parts.
2291 	 */
2292 	agno = XFS_INO_TO_AGNO(mp, ino);
2293 	agino = XFS_INO_TO_AGINO(mp, ino);
2294 	agbno = XFS_AGINO_TO_AGBNO(mp, agino);
2295 	if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks ||
2296 	    ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
2297 #ifdef DEBUG
2298 		/*
2299 		 * Don't output diagnostic information for untrusted inodes
2300 		 * as they can be invalid without implying corruption.
2301 		 */
2302 		if (flags & XFS_IGET_UNTRUSTED)
2303 			return -EINVAL;
2304 		if (agno >= mp->m_sb.sb_agcount) {
2305 			xfs_alert(mp,
2306 				"%s: agno (%d) >= mp->m_sb.sb_agcount (%d)",
2307 				__func__, agno, mp->m_sb.sb_agcount);
2308 		}
2309 		if (agbno >= mp->m_sb.sb_agblocks) {
2310 			xfs_alert(mp,
2311 		"%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)",
2312 				__func__, (unsigned long long)agbno,
2313 				(unsigned long)mp->m_sb.sb_agblocks);
2314 		}
2315 		if (ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
2316 			xfs_alert(mp,
2317 		"%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)",
2318 				__func__, ino,
2319 				XFS_AGINO_TO_INO(mp, agno, agino));
2320 		}
2321 		xfs_stack_trace();
2322 #endif /* DEBUG */
2323 		return -EINVAL;
2324 	}
2325 
2326 	blks_per_cluster = xfs_icluster_size_fsb(mp);
2327 
2328 	/*
2329 	 * For bulkstat and handle lookups, we have an untrusted inode number
2330 	 * that we have to verify is valid. We cannot do this just by reading
2331 	 * the inode buffer as it may have been unlinked and removed leaving
2332 	 * inodes in stale state on disk. Hence we have to do a btree lookup
2333 	 * in all cases where an untrusted inode number is passed.
2334 	 */
2335 	if (flags & XFS_IGET_UNTRUSTED) {
2336 		error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
2337 					&chunk_agbno, &offset_agbno, flags);
2338 		if (error)
2339 			return error;
2340 		goto out_map;
2341 	}
2342 
2343 	/*
2344 	 * If the inode cluster size is the same as the blocksize or
2345 	 * smaller we get to the buffer by simple arithmetics.
2346 	 */
2347 	if (blks_per_cluster == 1) {
2348 		offset = XFS_INO_TO_OFFSET(mp, ino);
2349 		ASSERT(offset < mp->m_sb.sb_inopblock);
2350 
2351 		imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
2352 		imap->im_len = XFS_FSB_TO_BB(mp, 1);
2353 		imap->im_boffset = (unsigned short)(offset <<
2354 							mp->m_sb.sb_inodelog);
2355 		return 0;
2356 	}
2357 
2358 	/*
2359 	 * If the inode chunks are aligned then use simple maths to
2360 	 * find the location. Otherwise we have to do a btree
2361 	 * lookup to find the location.
2362 	 */
2363 	if (mp->m_inoalign_mask) {
2364 		offset_agbno = agbno & mp->m_inoalign_mask;
2365 		chunk_agbno = agbno - offset_agbno;
2366 	} else {
2367 		error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
2368 					&chunk_agbno, &offset_agbno, flags);
2369 		if (error)
2370 			return error;
2371 	}
2372 
2373 out_map:
2374 	ASSERT(agbno >= chunk_agbno);
2375 	cluster_agbno = chunk_agbno +
2376 		((offset_agbno / blks_per_cluster) * blks_per_cluster);
2377 	offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
2378 		XFS_INO_TO_OFFSET(mp, ino);
2379 
2380 	imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, cluster_agbno);
2381 	imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster);
2382 	imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog);
2383 
2384 	/*
2385 	 * If the inode number maps to a block outside the bounds
2386 	 * of the file system then return NULL rather than calling
2387 	 * read_buf and panicing when we get an error from the
2388 	 * driver.
2389 	 */
2390 	if ((imap->im_blkno + imap->im_len) >
2391 	    XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
2392 		xfs_alert(mp,
2393 	"%s: (im_blkno (0x%llx) + im_len (0x%llx)) > sb_dblocks (0x%llx)",
2394 			__func__, (unsigned long long) imap->im_blkno,
2395 			(unsigned long long) imap->im_len,
2396 			XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
2397 		return -EINVAL;
2398 	}
2399 	return 0;
2400 }
2401 
2402 /*
2403  * Compute and fill in value of m_in_maxlevels.
2404  */
2405 void
2406 xfs_ialloc_compute_maxlevels(
2407 	xfs_mount_t	*mp)		/* file system mount structure */
2408 {
2409 	uint		inodes;
2410 
2411 	inodes = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG;
2412 	mp->m_in_maxlevels = xfs_btree_compute_maxlevels(mp, mp->m_inobt_mnr,
2413 							 inodes);
2414 }
2415 
2416 /*
2417  * Log specified fields for the ag hdr (inode section). The growth of the agi
2418  * structure over time requires that we interpret the buffer as two logical
2419  * regions delineated by the end of the unlinked list. This is due to the size
2420  * of the hash table and its location in the middle of the agi.
2421  *
2422  * For example, a request to log a field before agi_unlinked and a field after
2423  * agi_unlinked could cause us to log the entire hash table and use an excessive
2424  * amount of log space. To avoid this behavior, log the region up through
2425  * agi_unlinked in one call and the region after agi_unlinked through the end of
2426  * the structure in another.
2427  */
2428 void
2429 xfs_ialloc_log_agi(
2430 	xfs_trans_t	*tp,		/* transaction pointer */
2431 	xfs_buf_t	*bp,		/* allocation group header buffer */
2432 	int		fields)		/* bitmask of fields to log */
2433 {
2434 	int			first;		/* first byte number */
2435 	int			last;		/* last byte number */
2436 	static const short	offsets[] = {	/* field starting offsets */
2437 					/* keep in sync with bit definitions */
2438 		offsetof(xfs_agi_t, agi_magicnum),
2439 		offsetof(xfs_agi_t, agi_versionnum),
2440 		offsetof(xfs_agi_t, agi_seqno),
2441 		offsetof(xfs_agi_t, agi_length),
2442 		offsetof(xfs_agi_t, agi_count),
2443 		offsetof(xfs_agi_t, agi_root),
2444 		offsetof(xfs_agi_t, agi_level),
2445 		offsetof(xfs_agi_t, agi_freecount),
2446 		offsetof(xfs_agi_t, agi_newino),
2447 		offsetof(xfs_agi_t, agi_dirino),
2448 		offsetof(xfs_agi_t, agi_unlinked),
2449 		offsetof(xfs_agi_t, agi_free_root),
2450 		offsetof(xfs_agi_t, agi_free_level),
2451 		sizeof(xfs_agi_t)
2452 	};
2453 #ifdef DEBUG
2454 	xfs_agi_t		*agi;	/* allocation group header */
2455 
2456 	agi = XFS_BUF_TO_AGI(bp);
2457 	ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
2458 #endif
2459 
2460 	/*
2461 	 * Compute byte offsets for the first and last fields in the first
2462 	 * region and log the agi buffer. This only logs up through
2463 	 * agi_unlinked.
2464 	 */
2465 	if (fields & XFS_AGI_ALL_BITS_R1) {
2466 		xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R1,
2467 				  &first, &last);
2468 		xfs_trans_log_buf(tp, bp, first, last);
2469 	}
2470 
2471 	/*
2472 	 * Mask off the bits in the first region and calculate the first and
2473 	 * last field offsets for any bits in the second region.
2474 	 */
2475 	fields &= ~XFS_AGI_ALL_BITS_R1;
2476 	if (fields) {
2477 		xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R2,
2478 				  &first, &last);
2479 		xfs_trans_log_buf(tp, bp, first, last);
2480 	}
2481 }
2482 
2483 #ifdef DEBUG
2484 STATIC void
2485 xfs_check_agi_unlinked(
2486 	struct xfs_agi		*agi)
2487 {
2488 	int			i;
2489 
2490 	for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++)
2491 		ASSERT(agi->agi_unlinked[i]);
2492 }
2493 #else
2494 #define xfs_check_agi_unlinked(agi)
2495 #endif
2496 
2497 static bool
2498 xfs_agi_verify(
2499 	struct xfs_buf	*bp)
2500 {
2501 	struct xfs_mount *mp = bp->b_target->bt_mount;
2502 	struct xfs_agi	*agi = XFS_BUF_TO_AGI(bp);
2503 
2504 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
2505 		if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid))
2506 			return false;
2507 		if (!xfs_log_check_lsn(mp,
2508 				be64_to_cpu(XFS_BUF_TO_AGI(bp)->agi_lsn)))
2509 			return false;
2510 	}
2511 
2512 	/*
2513 	 * Validate the magic number of the agi block.
2514 	 */
2515 	if (agi->agi_magicnum != cpu_to_be32(XFS_AGI_MAGIC))
2516 		return false;
2517 	if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)))
2518 		return false;
2519 
2520 	if (be32_to_cpu(agi->agi_level) < 1 ||
2521 	    be32_to_cpu(agi->agi_level) > XFS_BTREE_MAXLEVELS)
2522 		return false;
2523 
2524 	if (xfs_sb_version_hasfinobt(&mp->m_sb) &&
2525 	    (be32_to_cpu(agi->agi_free_level) < 1 ||
2526 	     be32_to_cpu(agi->agi_free_level) > XFS_BTREE_MAXLEVELS))
2527 		return false;
2528 
2529 	/*
2530 	 * during growfs operations, the perag is not fully initialised,
2531 	 * so we can't use it for any useful checking. growfs ensures we can't
2532 	 * use it by using uncached buffers that don't have the perag attached
2533 	 * so we can detect and avoid this problem.
2534 	 */
2535 	if (bp->b_pag && be32_to_cpu(agi->agi_seqno) != bp->b_pag->pag_agno)
2536 		return false;
2537 
2538 	xfs_check_agi_unlinked(agi);
2539 	return true;
2540 }
2541 
2542 static void
2543 xfs_agi_read_verify(
2544 	struct xfs_buf	*bp)
2545 {
2546 	struct xfs_mount *mp = bp->b_target->bt_mount;
2547 
2548 	if (xfs_sb_version_hascrc(&mp->m_sb) &&
2549 	    !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF))
2550 		xfs_buf_ioerror(bp, -EFSBADCRC);
2551 	else if (XFS_TEST_ERROR(!xfs_agi_verify(bp), mp,
2552 				XFS_ERRTAG_IALLOC_READ_AGI))
2553 		xfs_buf_ioerror(bp, -EFSCORRUPTED);
2554 
2555 	if (bp->b_error)
2556 		xfs_verifier_error(bp);
2557 }
2558 
2559 static void
2560 xfs_agi_write_verify(
2561 	struct xfs_buf	*bp)
2562 {
2563 	struct xfs_mount *mp = bp->b_target->bt_mount;
2564 	struct xfs_buf_log_item	*bip = bp->b_fspriv;
2565 
2566 	if (!xfs_agi_verify(bp)) {
2567 		xfs_buf_ioerror(bp, -EFSCORRUPTED);
2568 		xfs_verifier_error(bp);
2569 		return;
2570 	}
2571 
2572 	if (!xfs_sb_version_hascrc(&mp->m_sb))
2573 		return;
2574 
2575 	if (bip)
2576 		XFS_BUF_TO_AGI(bp)->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2577 	xfs_buf_update_cksum(bp, XFS_AGI_CRC_OFF);
2578 }
2579 
2580 const struct xfs_buf_ops xfs_agi_buf_ops = {
2581 	.name = "xfs_agi",
2582 	.verify_read = xfs_agi_read_verify,
2583 	.verify_write = xfs_agi_write_verify,
2584 };
2585 
2586 /*
2587  * Read in the allocation group header (inode allocation section)
2588  */
2589 int
2590 xfs_read_agi(
2591 	struct xfs_mount	*mp,	/* file system mount structure */
2592 	struct xfs_trans	*tp,	/* transaction pointer */
2593 	xfs_agnumber_t		agno,	/* allocation group number */
2594 	struct xfs_buf		**bpp)	/* allocation group hdr buf */
2595 {
2596 	int			error;
2597 
2598 	trace_xfs_read_agi(mp, agno);
2599 
2600 	ASSERT(agno != NULLAGNUMBER);
2601 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
2602 			XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
2603 			XFS_FSS_TO_BB(mp, 1), 0, bpp, &xfs_agi_buf_ops);
2604 	if (error)
2605 		return error;
2606 	if (tp)
2607 		xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_AGI_BUF);
2608 
2609 	xfs_buf_set_ref(*bpp, XFS_AGI_REF);
2610 	return 0;
2611 }
2612 
2613 int
2614 xfs_ialloc_read_agi(
2615 	struct xfs_mount	*mp,	/* file system mount structure */
2616 	struct xfs_trans	*tp,	/* transaction pointer */
2617 	xfs_agnumber_t		agno,	/* allocation group number */
2618 	struct xfs_buf		**bpp)	/* allocation group hdr buf */
2619 {
2620 	struct xfs_agi		*agi;	/* allocation group header */
2621 	struct xfs_perag	*pag;	/* per allocation group data */
2622 	int			error;
2623 
2624 	trace_xfs_ialloc_read_agi(mp, agno);
2625 
2626 	error = xfs_read_agi(mp, tp, agno, bpp);
2627 	if (error)
2628 		return error;
2629 
2630 	agi = XFS_BUF_TO_AGI(*bpp);
2631 	pag = xfs_perag_get(mp, agno);
2632 	if (!pag->pagi_init) {
2633 		pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
2634 		pag->pagi_count = be32_to_cpu(agi->agi_count);
2635 		pag->pagi_init = 1;
2636 	}
2637 
2638 	/*
2639 	 * It's possible for these to be out of sync if
2640 	 * we are in the middle of a forced shutdown.
2641 	 */
2642 	ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) ||
2643 		XFS_FORCED_SHUTDOWN(mp));
2644 	xfs_perag_put(pag);
2645 	return 0;
2646 }
2647 
2648 /*
2649  * Read in the agi to initialise the per-ag data in the mount structure
2650  */
2651 int
2652 xfs_ialloc_pagi_init(
2653 	xfs_mount_t	*mp,		/* file system mount structure */
2654 	xfs_trans_t	*tp,		/* transaction pointer */
2655 	xfs_agnumber_t	agno)		/* allocation group number */
2656 {
2657 	xfs_buf_t	*bp = NULL;
2658 	int		error;
2659 
2660 	error = xfs_ialloc_read_agi(mp, tp, agno, &bp);
2661 	if (error)
2662 		return error;
2663 	if (bp)
2664 		xfs_trans_brelse(tp, bp);
2665 	return 0;
2666 }
2667