xref: /openbmc/linux/fs/xfs/libxfs/xfs_alloc.c (revision 5a170e9e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_shared.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_btree.h"
18 #include "xfs_rmap.h"
19 #include "xfs_alloc_btree.h"
20 #include "xfs_alloc.h"
21 #include "xfs_extent_busy.h"
22 #include "xfs_errortag.h"
23 #include "xfs_error.h"
24 #include "xfs_cksum.h"
25 #include "xfs_trace.h"
26 #include "xfs_trans.h"
27 #include "xfs_buf_item.h"
28 #include "xfs_log.h"
29 #include "xfs_ag_resv.h"
30 #include "xfs_bmap.h"
31 
32 extern kmem_zone_t	*xfs_bmap_free_item_zone;
33 
34 struct workqueue_struct *xfs_alloc_wq;
35 
36 #define XFS_ABSDIFF(a,b)	(((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
37 
38 #define	XFSA_FIXUP_BNO_OK	1
39 #define	XFSA_FIXUP_CNT_OK	2
40 
41 STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
42 STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
43 STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
44 STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
45 		xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
46 
47 /*
48  * Size of the AGFL.  For CRC-enabled filesystes we steal a couple of slots in
49  * the beginning of the block for a proper header with the location information
50  * and CRC.
51  */
52 unsigned int
53 xfs_agfl_size(
54 	struct xfs_mount	*mp)
55 {
56 	unsigned int		size = mp->m_sb.sb_sectsize;
57 
58 	if (xfs_sb_version_hascrc(&mp->m_sb))
59 		size -= sizeof(struct xfs_agfl);
60 
61 	return size / sizeof(xfs_agblock_t);
62 }
63 
64 unsigned int
65 xfs_refc_block(
66 	struct xfs_mount	*mp)
67 {
68 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
69 		return XFS_RMAP_BLOCK(mp) + 1;
70 	if (xfs_sb_version_hasfinobt(&mp->m_sb))
71 		return XFS_FIBT_BLOCK(mp) + 1;
72 	return XFS_IBT_BLOCK(mp) + 1;
73 }
74 
75 xfs_extlen_t
76 xfs_prealloc_blocks(
77 	struct xfs_mount	*mp)
78 {
79 	if (xfs_sb_version_hasreflink(&mp->m_sb))
80 		return xfs_refc_block(mp) + 1;
81 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
82 		return XFS_RMAP_BLOCK(mp) + 1;
83 	if (xfs_sb_version_hasfinobt(&mp->m_sb))
84 		return XFS_FIBT_BLOCK(mp) + 1;
85 	return XFS_IBT_BLOCK(mp) + 1;
86 }
87 
88 /*
89  * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
90  * AGF buffer (PV 947395), we place constraints on the relationship among
91  * actual allocations for data blocks, freelist blocks, and potential file data
92  * bmap btree blocks. However, these restrictions may result in no actual space
93  * allocated for a delayed extent, for example, a data block in a certain AG is
94  * allocated but there is no additional block for the additional bmap btree
95  * block due to a split of the bmap btree of the file. The result of this may
96  * lead to an infinite loop when the file gets flushed to disk and all delayed
97  * extents need to be actually allocated. To get around this, we explicitly set
98  * aside a few blocks which will not be reserved in delayed allocation.
99  *
100  * We need to reserve 4 fsbs _per AG_ for the freelist and 4 more to handle a
101  * potential split of the file's bmap btree.
102  */
103 unsigned int
104 xfs_alloc_set_aside(
105 	struct xfs_mount	*mp)
106 {
107 	return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
108 }
109 
110 /*
111  * When deciding how much space to allocate out of an AG, we limit the
112  * allocation maximum size to the size the AG. However, we cannot use all the
113  * blocks in the AG - some are permanently used by metadata. These
114  * blocks are generally:
115  *	- the AG superblock, AGF, AGI and AGFL
116  *	- the AGF (bno and cnt) and AGI btree root blocks, and optionally
117  *	  the AGI free inode and rmap btree root blocks.
118  *	- blocks on the AGFL according to xfs_alloc_set_aside() limits
119  *	- the rmapbt root block
120  *
121  * The AG headers are sector sized, so the amount of space they take up is
122  * dependent on filesystem geometry. The others are all single blocks.
123  */
124 unsigned int
125 xfs_alloc_ag_max_usable(
126 	struct xfs_mount	*mp)
127 {
128 	unsigned int		blocks;
129 
130 	blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
131 	blocks += XFS_ALLOC_AGFL_RESERVE;
132 	blocks += 3;			/* AGF, AGI btree root blocks */
133 	if (xfs_sb_version_hasfinobt(&mp->m_sb))
134 		blocks++;		/* finobt root block */
135 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
136 		blocks++; 		/* rmap root block */
137 	if (xfs_sb_version_hasreflink(&mp->m_sb))
138 		blocks++;		/* refcount root block */
139 
140 	return mp->m_sb.sb_agblocks - blocks;
141 }
142 
143 /*
144  * Lookup the record equal to [bno, len] in the btree given by cur.
145  */
146 STATIC int				/* error */
147 xfs_alloc_lookup_eq(
148 	struct xfs_btree_cur	*cur,	/* btree cursor */
149 	xfs_agblock_t		bno,	/* starting block of extent */
150 	xfs_extlen_t		len,	/* length of extent */
151 	int			*stat)	/* success/failure */
152 {
153 	cur->bc_rec.a.ar_startblock = bno;
154 	cur->bc_rec.a.ar_blockcount = len;
155 	return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
156 }
157 
158 /*
159  * Lookup the first record greater than or equal to [bno, len]
160  * in the btree given by cur.
161  */
162 int				/* error */
163 xfs_alloc_lookup_ge(
164 	struct xfs_btree_cur	*cur,	/* btree cursor */
165 	xfs_agblock_t		bno,	/* starting block of extent */
166 	xfs_extlen_t		len,	/* length of extent */
167 	int			*stat)	/* success/failure */
168 {
169 	cur->bc_rec.a.ar_startblock = bno;
170 	cur->bc_rec.a.ar_blockcount = len;
171 	return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
172 }
173 
174 /*
175  * Lookup the first record less than or equal to [bno, len]
176  * in the btree given by cur.
177  */
178 int					/* error */
179 xfs_alloc_lookup_le(
180 	struct xfs_btree_cur	*cur,	/* btree cursor */
181 	xfs_agblock_t		bno,	/* starting block of extent */
182 	xfs_extlen_t		len,	/* length of extent */
183 	int			*stat)	/* success/failure */
184 {
185 	cur->bc_rec.a.ar_startblock = bno;
186 	cur->bc_rec.a.ar_blockcount = len;
187 	return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
188 }
189 
190 /*
191  * Update the record referred to by cur to the value given
192  * by [bno, len].
193  * This either works (return 0) or gets an EFSCORRUPTED error.
194  */
195 STATIC int				/* error */
196 xfs_alloc_update(
197 	struct xfs_btree_cur	*cur,	/* btree cursor */
198 	xfs_agblock_t		bno,	/* starting block of extent */
199 	xfs_extlen_t		len)	/* length of extent */
200 {
201 	union xfs_btree_rec	rec;
202 
203 	rec.alloc.ar_startblock = cpu_to_be32(bno);
204 	rec.alloc.ar_blockcount = cpu_to_be32(len);
205 	return xfs_btree_update(cur, &rec);
206 }
207 
208 /*
209  * Get the data from the pointed-to record.
210  */
211 int					/* error */
212 xfs_alloc_get_rec(
213 	struct xfs_btree_cur	*cur,	/* btree cursor */
214 	xfs_agblock_t		*bno,	/* output: starting block of extent */
215 	xfs_extlen_t		*len,	/* output: length of extent */
216 	int			*stat)	/* output: success/failure */
217 {
218 	struct xfs_mount	*mp = cur->bc_mp;
219 	xfs_agnumber_t		agno = cur->bc_private.a.agno;
220 	union xfs_btree_rec	*rec;
221 	int			error;
222 
223 	error = xfs_btree_get_rec(cur, &rec, stat);
224 	if (error || !(*stat))
225 		return error;
226 
227 	*bno = be32_to_cpu(rec->alloc.ar_startblock);
228 	*len = be32_to_cpu(rec->alloc.ar_blockcount);
229 
230 	if (*len == 0)
231 		goto out_bad_rec;
232 
233 	/* check for valid extent range, including overflow */
234 	if (!xfs_verify_agbno(mp, agno, *bno))
235 		goto out_bad_rec;
236 	if (*bno > *bno + *len)
237 		goto out_bad_rec;
238 	if (!xfs_verify_agbno(mp, agno, *bno + *len - 1))
239 		goto out_bad_rec;
240 
241 	return 0;
242 
243 out_bad_rec:
244 	xfs_warn(mp,
245 		"%s Freespace BTree record corruption in AG %d detected!",
246 		cur->bc_btnum == XFS_BTNUM_BNO ? "Block" : "Size", agno);
247 	xfs_warn(mp,
248 		"start block 0x%x block count 0x%x", *bno, *len);
249 	return -EFSCORRUPTED;
250 }
251 
252 /*
253  * Compute aligned version of the found extent.
254  * Takes alignment and min length into account.
255  */
256 STATIC bool
257 xfs_alloc_compute_aligned(
258 	xfs_alloc_arg_t	*args,		/* allocation argument structure */
259 	xfs_agblock_t	foundbno,	/* starting block in found extent */
260 	xfs_extlen_t	foundlen,	/* length in found extent */
261 	xfs_agblock_t	*resbno,	/* result block number */
262 	xfs_extlen_t	*reslen,	/* result length */
263 	unsigned	*busy_gen)
264 {
265 	xfs_agblock_t	bno = foundbno;
266 	xfs_extlen_t	len = foundlen;
267 	xfs_extlen_t	diff;
268 	bool		busy;
269 
270 	/* Trim busy sections out of found extent */
271 	busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
272 
273 	/*
274 	 * If we have a largish extent that happens to start before min_agbno,
275 	 * see if we can shift it into range...
276 	 */
277 	if (bno < args->min_agbno && bno + len > args->min_agbno) {
278 		diff = args->min_agbno - bno;
279 		if (len > diff) {
280 			bno += diff;
281 			len -= diff;
282 		}
283 	}
284 
285 	if (args->alignment > 1 && len >= args->minlen) {
286 		xfs_agblock_t	aligned_bno = roundup(bno, args->alignment);
287 
288 		diff = aligned_bno - bno;
289 
290 		*resbno = aligned_bno;
291 		*reslen = diff >= len ? 0 : len - diff;
292 	} else {
293 		*resbno = bno;
294 		*reslen = len;
295 	}
296 
297 	return busy;
298 }
299 
300 /*
301  * Compute best start block and diff for "near" allocations.
302  * freelen >= wantlen already checked by caller.
303  */
304 STATIC xfs_extlen_t			/* difference value (absolute) */
305 xfs_alloc_compute_diff(
306 	xfs_agblock_t	wantbno,	/* target starting block */
307 	xfs_extlen_t	wantlen,	/* target length */
308 	xfs_extlen_t	alignment,	/* target alignment */
309 	int		datatype,	/* are we allocating data? */
310 	xfs_agblock_t	freebno,	/* freespace's starting block */
311 	xfs_extlen_t	freelen,	/* freespace's length */
312 	xfs_agblock_t	*newbnop)	/* result: best start block from free */
313 {
314 	xfs_agblock_t	freeend;	/* end of freespace extent */
315 	xfs_agblock_t	newbno1;	/* return block number */
316 	xfs_agblock_t	newbno2;	/* other new block number */
317 	xfs_extlen_t	newlen1=0;	/* length with newbno1 */
318 	xfs_extlen_t	newlen2=0;	/* length with newbno2 */
319 	xfs_agblock_t	wantend;	/* end of target extent */
320 	bool		userdata = xfs_alloc_is_userdata(datatype);
321 
322 	ASSERT(freelen >= wantlen);
323 	freeend = freebno + freelen;
324 	wantend = wantbno + wantlen;
325 	/*
326 	 * We want to allocate from the start of a free extent if it is past
327 	 * the desired block or if we are allocating user data and the free
328 	 * extent is before desired block. The second case is there to allow
329 	 * for contiguous allocation from the remaining free space if the file
330 	 * grows in the short term.
331 	 */
332 	if (freebno >= wantbno || (userdata && freeend < wantend)) {
333 		if ((newbno1 = roundup(freebno, alignment)) >= freeend)
334 			newbno1 = NULLAGBLOCK;
335 	} else if (freeend >= wantend && alignment > 1) {
336 		newbno1 = roundup(wantbno, alignment);
337 		newbno2 = newbno1 - alignment;
338 		if (newbno1 >= freeend)
339 			newbno1 = NULLAGBLOCK;
340 		else
341 			newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
342 		if (newbno2 < freebno)
343 			newbno2 = NULLAGBLOCK;
344 		else
345 			newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
346 		if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
347 			if (newlen1 < newlen2 ||
348 			    (newlen1 == newlen2 &&
349 			     XFS_ABSDIFF(newbno1, wantbno) >
350 			     XFS_ABSDIFF(newbno2, wantbno)))
351 				newbno1 = newbno2;
352 		} else if (newbno2 != NULLAGBLOCK)
353 			newbno1 = newbno2;
354 	} else if (freeend >= wantend) {
355 		newbno1 = wantbno;
356 	} else if (alignment > 1) {
357 		newbno1 = roundup(freeend - wantlen, alignment);
358 		if (newbno1 > freeend - wantlen &&
359 		    newbno1 - alignment >= freebno)
360 			newbno1 -= alignment;
361 		else if (newbno1 >= freeend)
362 			newbno1 = NULLAGBLOCK;
363 	} else
364 		newbno1 = freeend - wantlen;
365 	*newbnop = newbno1;
366 	return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
367 }
368 
369 /*
370  * Fix up the length, based on mod and prod.
371  * len should be k * prod + mod for some k.
372  * If len is too small it is returned unchanged.
373  * If len hits maxlen it is left alone.
374  */
375 STATIC void
376 xfs_alloc_fix_len(
377 	xfs_alloc_arg_t	*args)		/* allocation argument structure */
378 {
379 	xfs_extlen_t	k;
380 	xfs_extlen_t	rlen;
381 
382 	ASSERT(args->mod < args->prod);
383 	rlen = args->len;
384 	ASSERT(rlen >= args->minlen);
385 	ASSERT(rlen <= args->maxlen);
386 	if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
387 	    (args->mod == 0 && rlen < args->prod))
388 		return;
389 	k = rlen % args->prod;
390 	if (k == args->mod)
391 		return;
392 	if (k > args->mod)
393 		rlen = rlen - (k - args->mod);
394 	else
395 		rlen = rlen - args->prod + (args->mod - k);
396 	/* casts to (int) catch length underflows */
397 	if ((int)rlen < (int)args->minlen)
398 		return;
399 	ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
400 	ASSERT(rlen % args->prod == args->mod);
401 	ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
402 		rlen + args->minleft);
403 	args->len = rlen;
404 }
405 
406 /*
407  * Update the two btrees, logically removing from freespace the extent
408  * starting at rbno, rlen blocks.  The extent is contained within the
409  * actual (current) free extent fbno for flen blocks.
410  * Flags are passed in indicating whether the cursors are set to the
411  * relevant records.
412  */
413 STATIC int				/* error code */
414 xfs_alloc_fixup_trees(
415 	xfs_btree_cur_t	*cnt_cur,	/* cursor for by-size btree */
416 	xfs_btree_cur_t	*bno_cur,	/* cursor for by-block btree */
417 	xfs_agblock_t	fbno,		/* starting block of free extent */
418 	xfs_extlen_t	flen,		/* length of free extent */
419 	xfs_agblock_t	rbno,		/* starting block of returned extent */
420 	xfs_extlen_t	rlen,		/* length of returned extent */
421 	int		flags)		/* flags, XFSA_FIXUP_... */
422 {
423 	int		error;		/* error code */
424 	int		i;		/* operation results */
425 	xfs_agblock_t	nfbno1;		/* first new free startblock */
426 	xfs_agblock_t	nfbno2;		/* second new free startblock */
427 	xfs_extlen_t	nflen1=0;	/* first new free length */
428 	xfs_extlen_t	nflen2=0;	/* second new free length */
429 	struct xfs_mount *mp;
430 
431 	mp = cnt_cur->bc_mp;
432 
433 	/*
434 	 * Look up the record in the by-size tree if necessary.
435 	 */
436 	if (flags & XFSA_FIXUP_CNT_OK) {
437 #ifdef DEBUG
438 		if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
439 			return error;
440 		XFS_WANT_CORRUPTED_RETURN(mp,
441 			i == 1 && nfbno1 == fbno && nflen1 == flen);
442 #endif
443 	} else {
444 		if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
445 			return error;
446 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
447 	}
448 	/*
449 	 * Look up the record in the by-block tree if necessary.
450 	 */
451 	if (flags & XFSA_FIXUP_BNO_OK) {
452 #ifdef DEBUG
453 		if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
454 			return error;
455 		XFS_WANT_CORRUPTED_RETURN(mp,
456 			i == 1 && nfbno1 == fbno && nflen1 == flen);
457 #endif
458 	} else {
459 		if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
460 			return error;
461 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
462 	}
463 
464 #ifdef DEBUG
465 	if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
466 		struct xfs_btree_block	*bnoblock;
467 		struct xfs_btree_block	*cntblock;
468 
469 		bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
470 		cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
471 
472 		XFS_WANT_CORRUPTED_RETURN(mp,
473 			bnoblock->bb_numrecs == cntblock->bb_numrecs);
474 	}
475 #endif
476 
477 	/*
478 	 * Deal with all four cases: the allocated record is contained
479 	 * within the freespace record, so we can have new freespace
480 	 * at either (or both) end, or no freespace remaining.
481 	 */
482 	if (rbno == fbno && rlen == flen)
483 		nfbno1 = nfbno2 = NULLAGBLOCK;
484 	else if (rbno == fbno) {
485 		nfbno1 = rbno + rlen;
486 		nflen1 = flen - rlen;
487 		nfbno2 = NULLAGBLOCK;
488 	} else if (rbno + rlen == fbno + flen) {
489 		nfbno1 = fbno;
490 		nflen1 = flen - rlen;
491 		nfbno2 = NULLAGBLOCK;
492 	} else {
493 		nfbno1 = fbno;
494 		nflen1 = rbno - fbno;
495 		nfbno2 = rbno + rlen;
496 		nflen2 = (fbno + flen) - nfbno2;
497 	}
498 	/*
499 	 * Delete the entry from the by-size btree.
500 	 */
501 	if ((error = xfs_btree_delete(cnt_cur, &i)))
502 		return error;
503 	XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
504 	/*
505 	 * Add new by-size btree entry(s).
506 	 */
507 	if (nfbno1 != NULLAGBLOCK) {
508 		if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
509 			return error;
510 		XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
511 		if ((error = xfs_btree_insert(cnt_cur, &i)))
512 			return error;
513 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
514 	}
515 	if (nfbno2 != NULLAGBLOCK) {
516 		if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
517 			return error;
518 		XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
519 		if ((error = xfs_btree_insert(cnt_cur, &i)))
520 			return error;
521 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
522 	}
523 	/*
524 	 * Fix up the by-block btree entry(s).
525 	 */
526 	if (nfbno1 == NULLAGBLOCK) {
527 		/*
528 		 * No remaining freespace, just delete the by-block tree entry.
529 		 */
530 		if ((error = xfs_btree_delete(bno_cur, &i)))
531 			return error;
532 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
533 	} else {
534 		/*
535 		 * Update the by-block entry to start later|be shorter.
536 		 */
537 		if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
538 			return error;
539 	}
540 	if (nfbno2 != NULLAGBLOCK) {
541 		/*
542 		 * 2 resulting free entries, need to add one.
543 		 */
544 		if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
545 			return error;
546 		XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
547 		if ((error = xfs_btree_insert(bno_cur, &i)))
548 			return error;
549 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
550 	}
551 	return 0;
552 }
553 
554 static xfs_failaddr_t
555 xfs_agfl_verify(
556 	struct xfs_buf	*bp)
557 {
558 	struct xfs_mount *mp = bp->b_target->bt_mount;
559 	struct xfs_agfl	*agfl = XFS_BUF_TO_AGFL(bp);
560 	int		i;
561 
562 	/*
563 	 * There is no verification of non-crc AGFLs because mkfs does not
564 	 * initialise the AGFL to zero or NULL. Hence the only valid part of the
565 	 * AGFL is what the AGF says is active. We can't get to the AGF, so we
566 	 * can't verify just those entries are valid.
567 	 */
568 	if (!xfs_sb_version_hascrc(&mp->m_sb))
569 		return NULL;
570 
571 	if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
572 		return __this_address;
573 	if (be32_to_cpu(agfl->agfl_magicnum) != XFS_AGFL_MAGIC)
574 		return __this_address;
575 	/*
576 	 * during growfs operations, the perag is not fully initialised,
577 	 * so we can't use it for any useful checking. growfs ensures we can't
578 	 * use it by using uncached buffers that don't have the perag attached
579 	 * so we can detect and avoid this problem.
580 	 */
581 	if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
582 		return __this_address;
583 
584 	for (i = 0; i < xfs_agfl_size(mp); i++) {
585 		if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
586 		    be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
587 			return __this_address;
588 	}
589 
590 	if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
591 		return __this_address;
592 	return NULL;
593 }
594 
595 static void
596 xfs_agfl_read_verify(
597 	struct xfs_buf	*bp)
598 {
599 	struct xfs_mount *mp = bp->b_target->bt_mount;
600 	xfs_failaddr_t	fa;
601 
602 	/*
603 	 * There is no verification of non-crc AGFLs because mkfs does not
604 	 * initialise the AGFL to zero or NULL. Hence the only valid part of the
605 	 * AGFL is what the AGF says is active. We can't get to the AGF, so we
606 	 * can't verify just those entries are valid.
607 	 */
608 	if (!xfs_sb_version_hascrc(&mp->m_sb))
609 		return;
610 
611 	if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
612 		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
613 	else {
614 		fa = xfs_agfl_verify(bp);
615 		if (fa)
616 			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
617 	}
618 }
619 
620 static void
621 xfs_agfl_write_verify(
622 	struct xfs_buf	*bp)
623 {
624 	struct xfs_mount	*mp = bp->b_target->bt_mount;
625 	struct xfs_buf_log_item	*bip = bp->b_log_item;
626 	xfs_failaddr_t		fa;
627 
628 	/* no verification of non-crc AGFLs */
629 	if (!xfs_sb_version_hascrc(&mp->m_sb))
630 		return;
631 
632 	fa = xfs_agfl_verify(bp);
633 	if (fa) {
634 		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
635 		return;
636 	}
637 
638 	if (bip)
639 		XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
640 
641 	xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
642 }
643 
644 const struct xfs_buf_ops xfs_agfl_buf_ops = {
645 	.name = "xfs_agfl",
646 	.verify_read = xfs_agfl_read_verify,
647 	.verify_write = xfs_agfl_write_verify,
648 	.verify_struct = xfs_agfl_verify,
649 };
650 
651 /*
652  * Read in the allocation group free block array.
653  */
654 int					/* error */
655 xfs_alloc_read_agfl(
656 	xfs_mount_t	*mp,		/* mount point structure */
657 	xfs_trans_t	*tp,		/* transaction pointer */
658 	xfs_agnumber_t	agno,		/* allocation group number */
659 	xfs_buf_t	**bpp)		/* buffer for the ag free block array */
660 {
661 	xfs_buf_t	*bp;		/* return value */
662 	int		error;
663 
664 	ASSERT(agno != NULLAGNUMBER);
665 	error = xfs_trans_read_buf(
666 			mp, tp, mp->m_ddev_targp,
667 			XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
668 			XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
669 	if (error)
670 		return error;
671 	xfs_buf_set_ref(bp, XFS_AGFL_REF);
672 	*bpp = bp;
673 	return 0;
674 }
675 
676 STATIC int
677 xfs_alloc_update_counters(
678 	struct xfs_trans	*tp,
679 	struct xfs_perag	*pag,
680 	struct xfs_buf		*agbp,
681 	long			len)
682 {
683 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
684 
685 	pag->pagf_freeblks += len;
686 	be32_add_cpu(&agf->agf_freeblks, len);
687 
688 	xfs_trans_agblocks_delta(tp, len);
689 	if (unlikely(be32_to_cpu(agf->agf_freeblks) >
690 		     be32_to_cpu(agf->agf_length)))
691 		return -EFSCORRUPTED;
692 
693 	xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
694 	return 0;
695 }
696 
697 /*
698  * Allocation group level functions.
699  */
700 
701 /*
702  * Allocate a variable extent in the allocation group agno.
703  * Type and bno are used to determine where in the allocation group the
704  * extent will start.
705  * Extent's length (returned in *len) will be between minlen and maxlen,
706  * and of the form k * prod + mod unless there's nothing that large.
707  * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
708  */
709 STATIC int			/* error */
710 xfs_alloc_ag_vextent(
711 	xfs_alloc_arg_t	*args)	/* argument structure for allocation */
712 {
713 	int		error=0;
714 
715 	ASSERT(args->minlen > 0);
716 	ASSERT(args->maxlen > 0);
717 	ASSERT(args->minlen <= args->maxlen);
718 	ASSERT(args->mod < args->prod);
719 	ASSERT(args->alignment > 0);
720 
721 	/*
722 	 * Branch to correct routine based on the type.
723 	 */
724 	args->wasfromfl = 0;
725 	switch (args->type) {
726 	case XFS_ALLOCTYPE_THIS_AG:
727 		error = xfs_alloc_ag_vextent_size(args);
728 		break;
729 	case XFS_ALLOCTYPE_NEAR_BNO:
730 		error = xfs_alloc_ag_vextent_near(args);
731 		break;
732 	case XFS_ALLOCTYPE_THIS_BNO:
733 		error = xfs_alloc_ag_vextent_exact(args);
734 		break;
735 	default:
736 		ASSERT(0);
737 		/* NOTREACHED */
738 	}
739 
740 	if (error || args->agbno == NULLAGBLOCK)
741 		return error;
742 
743 	ASSERT(args->len >= args->minlen);
744 	ASSERT(args->len <= args->maxlen);
745 	ASSERT(!args->wasfromfl || args->resv != XFS_AG_RESV_AGFL);
746 	ASSERT(args->agbno % args->alignment == 0);
747 
748 	/* if not file data, insert new block into the reverse map btree */
749 	if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
750 		error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
751 				       args->agbno, args->len, &args->oinfo);
752 		if (error)
753 			return error;
754 	}
755 
756 	if (!args->wasfromfl) {
757 		error = xfs_alloc_update_counters(args->tp, args->pag,
758 						  args->agbp,
759 						  -((long)(args->len)));
760 		if (error)
761 			return error;
762 
763 		ASSERT(!xfs_extent_busy_search(args->mp, args->agno,
764 					      args->agbno, args->len));
765 	}
766 
767 	xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
768 
769 	XFS_STATS_INC(args->mp, xs_allocx);
770 	XFS_STATS_ADD(args->mp, xs_allocb, args->len);
771 	return error;
772 }
773 
774 /*
775  * Allocate a variable extent at exactly agno/bno.
776  * Extent's length (returned in *len) will be between minlen and maxlen,
777  * and of the form k * prod + mod unless there's nothing that large.
778  * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
779  */
780 STATIC int			/* error */
781 xfs_alloc_ag_vextent_exact(
782 	xfs_alloc_arg_t	*args)	/* allocation argument structure */
783 {
784 	xfs_btree_cur_t	*bno_cur;/* by block-number btree cursor */
785 	xfs_btree_cur_t	*cnt_cur;/* by count btree cursor */
786 	int		error;
787 	xfs_agblock_t	fbno;	/* start block of found extent */
788 	xfs_extlen_t	flen;	/* length of found extent */
789 	xfs_agblock_t	tbno;	/* start block of busy extent */
790 	xfs_extlen_t	tlen;	/* length of busy extent */
791 	xfs_agblock_t	tend;	/* end block of busy extent */
792 	int		i;	/* success/failure of operation */
793 	unsigned	busy_gen;
794 
795 	ASSERT(args->alignment == 1);
796 
797 	/*
798 	 * Allocate/initialize a cursor for the by-number freespace btree.
799 	 */
800 	bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
801 					  args->agno, XFS_BTNUM_BNO);
802 
803 	/*
804 	 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
805 	 * Look for the closest free block <= bno, it must contain bno
806 	 * if any free block does.
807 	 */
808 	error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
809 	if (error)
810 		goto error0;
811 	if (!i)
812 		goto not_found;
813 
814 	/*
815 	 * Grab the freespace record.
816 	 */
817 	error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
818 	if (error)
819 		goto error0;
820 	XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
821 	ASSERT(fbno <= args->agbno);
822 
823 	/*
824 	 * Check for overlapping busy extents.
825 	 */
826 	tbno = fbno;
827 	tlen = flen;
828 	xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
829 
830 	/*
831 	 * Give up if the start of the extent is busy, or the freespace isn't
832 	 * long enough for the minimum request.
833 	 */
834 	if (tbno > args->agbno)
835 		goto not_found;
836 	if (tlen < args->minlen)
837 		goto not_found;
838 	tend = tbno + tlen;
839 	if (tend < args->agbno + args->minlen)
840 		goto not_found;
841 
842 	/*
843 	 * End of extent will be smaller of the freespace end and the
844 	 * maximal requested end.
845 	 *
846 	 * Fix the length according to mod and prod if given.
847 	 */
848 	args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
849 						- args->agbno;
850 	xfs_alloc_fix_len(args);
851 	ASSERT(args->agbno + args->len <= tend);
852 
853 	/*
854 	 * We are allocating agbno for args->len
855 	 * Allocate/initialize a cursor for the by-size btree.
856 	 */
857 	cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
858 		args->agno, XFS_BTNUM_CNT);
859 	ASSERT(args->agbno + args->len <=
860 		be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
861 	error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
862 				      args->len, XFSA_FIXUP_BNO_OK);
863 	if (error) {
864 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
865 		goto error0;
866 	}
867 
868 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
869 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
870 
871 	args->wasfromfl = 0;
872 	trace_xfs_alloc_exact_done(args);
873 	return 0;
874 
875 not_found:
876 	/* Didn't find it, return null. */
877 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
878 	args->agbno = NULLAGBLOCK;
879 	trace_xfs_alloc_exact_notfound(args);
880 	return 0;
881 
882 error0:
883 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
884 	trace_xfs_alloc_exact_error(args);
885 	return error;
886 }
887 
888 /*
889  * Search the btree in a given direction via the search cursor and compare
890  * the records found against the good extent we've already found.
891  */
892 STATIC int
893 xfs_alloc_find_best_extent(
894 	struct xfs_alloc_arg	*args,	/* allocation argument structure */
895 	struct xfs_btree_cur	**gcur,	/* good cursor */
896 	struct xfs_btree_cur	**scur,	/* searching cursor */
897 	xfs_agblock_t		gdiff,	/* difference for search comparison */
898 	xfs_agblock_t		*sbno,	/* extent found by search */
899 	xfs_extlen_t		*slen,	/* extent length */
900 	xfs_agblock_t		*sbnoa,	/* aligned extent found by search */
901 	xfs_extlen_t		*slena,	/* aligned extent length */
902 	int			dir)	/* 0 = search right, 1 = search left */
903 {
904 	xfs_agblock_t		new;
905 	xfs_agblock_t		sdiff;
906 	int			error;
907 	int			i;
908 	unsigned		busy_gen;
909 
910 	/* The good extent is perfect, no need to  search. */
911 	if (!gdiff)
912 		goto out_use_good;
913 
914 	/*
915 	 * Look until we find a better one, run out of space or run off the end.
916 	 */
917 	do {
918 		error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
919 		if (error)
920 			goto error0;
921 		XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
922 		xfs_alloc_compute_aligned(args, *sbno, *slen,
923 				sbnoa, slena, &busy_gen);
924 
925 		/*
926 		 * The good extent is closer than this one.
927 		 */
928 		if (!dir) {
929 			if (*sbnoa > args->max_agbno)
930 				goto out_use_good;
931 			if (*sbnoa >= args->agbno + gdiff)
932 				goto out_use_good;
933 		} else {
934 			if (*sbnoa < args->min_agbno)
935 				goto out_use_good;
936 			if (*sbnoa <= args->agbno - gdiff)
937 				goto out_use_good;
938 		}
939 
940 		/*
941 		 * Same distance, compare length and pick the best.
942 		 */
943 		if (*slena >= args->minlen) {
944 			args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
945 			xfs_alloc_fix_len(args);
946 
947 			sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
948 						       args->alignment,
949 						       args->datatype, *sbnoa,
950 						       *slena, &new);
951 
952 			/*
953 			 * Choose closer size and invalidate other cursor.
954 			 */
955 			if (sdiff < gdiff)
956 				goto out_use_search;
957 			goto out_use_good;
958 		}
959 
960 		if (!dir)
961 			error = xfs_btree_increment(*scur, 0, &i);
962 		else
963 			error = xfs_btree_decrement(*scur, 0, &i);
964 		if (error)
965 			goto error0;
966 	} while (i);
967 
968 out_use_good:
969 	xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
970 	*scur = NULL;
971 	return 0;
972 
973 out_use_search:
974 	xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
975 	*gcur = NULL;
976 	return 0;
977 
978 error0:
979 	/* caller invalidates cursors */
980 	return error;
981 }
982 
983 /*
984  * Allocate a variable extent near bno in the allocation group agno.
985  * Extent's length (returned in len) will be between minlen and maxlen,
986  * and of the form k * prod + mod unless there's nothing that large.
987  * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
988  */
989 STATIC int				/* error */
990 xfs_alloc_ag_vextent_near(
991 	xfs_alloc_arg_t	*args)		/* allocation argument structure */
992 {
993 	xfs_btree_cur_t	*bno_cur_gt;	/* cursor for bno btree, right side */
994 	xfs_btree_cur_t	*bno_cur_lt;	/* cursor for bno btree, left side */
995 	xfs_btree_cur_t	*cnt_cur;	/* cursor for count btree */
996 	xfs_agblock_t	gtbno;		/* start bno of right side entry */
997 	xfs_agblock_t	gtbnoa;		/* aligned ... */
998 	xfs_extlen_t	gtdiff;		/* difference to right side entry */
999 	xfs_extlen_t	gtlen;		/* length of right side entry */
1000 	xfs_extlen_t	gtlena;		/* aligned ... */
1001 	xfs_agblock_t	gtnew;		/* useful start bno of right side */
1002 	int		error;		/* error code */
1003 	int		i;		/* result code, temporary */
1004 	int		j;		/* result code, temporary */
1005 	xfs_agblock_t	ltbno;		/* start bno of left side entry */
1006 	xfs_agblock_t	ltbnoa;		/* aligned ... */
1007 	xfs_extlen_t	ltdiff;		/* difference to left side entry */
1008 	xfs_extlen_t	ltlen;		/* length of left side entry */
1009 	xfs_extlen_t	ltlena;		/* aligned ... */
1010 	xfs_agblock_t	ltnew;		/* useful start bno of left side */
1011 	xfs_extlen_t	rlen;		/* length of returned extent */
1012 	bool		busy;
1013 	unsigned	busy_gen;
1014 #ifdef DEBUG
1015 	/*
1016 	 * Randomly don't execute the first algorithm.
1017 	 */
1018 	int		dofirst;	/* set to do first algorithm */
1019 
1020 	dofirst = prandom_u32() & 1;
1021 #endif
1022 
1023 	/* handle unitialized agbno range so caller doesn't have to */
1024 	if (!args->min_agbno && !args->max_agbno)
1025 		args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
1026 	ASSERT(args->min_agbno <= args->max_agbno);
1027 
1028 	/* clamp agbno to the range if it's outside */
1029 	if (args->agbno < args->min_agbno)
1030 		args->agbno = args->min_agbno;
1031 	if (args->agbno > args->max_agbno)
1032 		args->agbno = args->max_agbno;
1033 
1034 restart:
1035 	bno_cur_lt = NULL;
1036 	bno_cur_gt = NULL;
1037 	ltlen = 0;
1038 	gtlena = 0;
1039 	ltlena = 0;
1040 	busy = false;
1041 
1042 	/*
1043 	 * Get a cursor for the by-size btree.
1044 	 */
1045 	cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1046 		args->agno, XFS_BTNUM_CNT);
1047 
1048 	/*
1049 	 * See if there are any free extents as big as maxlen.
1050 	 */
1051 	if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
1052 		goto error0;
1053 	/*
1054 	 * If none, then pick up the last entry in the tree unless the
1055 	 * tree is empty.
1056 	 */
1057 	if (!i) {
1058 		if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &ltbno,
1059 				&ltlen, &i)))
1060 			goto error0;
1061 		if (i == 0 || ltlen == 0) {
1062 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1063 			trace_xfs_alloc_near_noentry(args);
1064 			return 0;
1065 		}
1066 		ASSERT(i == 1);
1067 	}
1068 	args->wasfromfl = 0;
1069 
1070 	/*
1071 	 * First algorithm.
1072 	 * If the requested extent is large wrt the freespaces available
1073 	 * in this a.g., then the cursor will be pointing to a btree entry
1074 	 * near the right edge of the tree.  If it's in the last btree leaf
1075 	 * block, then we just examine all the entries in that block
1076 	 * that are big enough, and pick the best one.
1077 	 * This is written as a while loop so we can break out of it,
1078 	 * but we never loop back to the top.
1079 	 */
1080 	while (xfs_btree_islastblock(cnt_cur, 0)) {
1081 		xfs_extlen_t	bdiff;
1082 		int		besti=0;
1083 		xfs_extlen_t	blen=0;
1084 		xfs_agblock_t	bnew=0;
1085 
1086 #ifdef DEBUG
1087 		if (dofirst)
1088 			break;
1089 #endif
1090 		/*
1091 		 * Start from the entry that lookup found, sequence through
1092 		 * all larger free blocks.  If we're actually pointing at a
1093 		 * record smaller than maxlen, go to the start of this block,
1094 		 * and skip all those smaller than minlen.
1095 		 */
1096 		if (ltlen || args->alignment > 1) {
1097 			cnt_cur->bc_ptrs[0] = 1;
1098 			do {
1099 				if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno,
1100 						&ltlen, &i)))
1101 					goto error0;
1102 				XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1103 				if (ltlen >= args->minlen)
1104 					break;
1105 				if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
1106 					goto error0;
1107 			} while (i);
1108 			ASSERT(ltlen >= args->minlen);
1109 			if (!i)
1110 				break;
1111 		}
1112 		i = cnt_cur->bc_ptrs[0];
1113 		for (j = 1, blen = 0, bdiff = 0;
1114 		     !error && j && (blen < args->maxlen || bdiff > 0);
1115 		     error = xfs_btree_increment(cnt_cur, 0, &j)) {
1116 			/*
1117 			 * For each entry, decide if it's better than
1118 			 * the previous best entry.
1119 			 */
1120 			if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
1121 				goto error0;
1122 			XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1123 			busy = xfs_alloc_compute_aligned(args, ltbno, ltlen,
1124 					&ltbnoa, &ltlena, &busy_gen);
1125 			if (ltlena < args->minlen)
1126 				continue;
1127 			if (ltbnoa < args->min_agbno || ltbnoa > args->max_agbno)
1128 				continue;
1129 			args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1130 			xfs_alloc_fix_len(args);
1131 			ASSERT(args->len >= args->minlen);
1132 			if (args->len < blen)
1133 				continue;
1134 			ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1135 				args->alignment, args->datatype, ltbnoa,
1136 				ltlena, &ltnew);
1137 			if (ltnew != NULLAGBLOCK &&
1138 			    (args->len > blen || ltdiff < bdiff)) {
1139 				bdiff = ltdiff;
1140 				bnew = ltnew;
1141 				blen = args->len;
1142 				besti = cnt_cur->bc_ptrs[0];
1143 			}
1144 		}
1145 		/*
1146 		 * It didn't work.  We COULD be in a case where
1147 		 * there's a good record somewhere, so try again.
1148 		 */
1149 		if (blen == 0)
1150 			break;
1151 		/*
1152 		 * Point at the best entry, and retrieve it again.
1153 		 */
1154 		cnt_cur->bc_ptrs[0] = besti;
1155 		if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
1156 			goto error0;
1157 		XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1158 		ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1159 		args->len = blen;
1160 
1161 		/*
1162 		 * We are allocating starting at bnew for blen blocks.
1163 		 */
1164 		args->agbno = bnew;
1165 		ASSERT(bnew >= ltbno);
1166 		ASSERT(bnew + blen <= ltbno + ltlen);
1167 		/*
1168 		 * Set up a cursor for the by-bno tree.
1169 		 */
1170 		bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
1171 			args->agbp, args->agno, XFS_BTNUM_BNO);
1172 		/*
1173 		 * Fix up the btree entries.
1174 		 */
1175 		if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
1176 				ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
1177 			goto error0;
1178 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1179 		xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1180 
1181 		trace_xfs_alloc_near_first(args);
1182 		return 0;
1183 	}
1184 	/*
1185 	 * Second algorithm.
1186 	 * Search in the by-bno tree to the left and to the right
1187 	 * simultaneously, until in each case we find a space big enough,
1188 	 * or run into the edge of the tree.  When we run into the edge,
1189 	 * we deallocate that cursor.
1190 	 * If both searches succeed, we compare the two spaces and pick
1191 	 * the better one.
1192 	 * With alignment, it's possible for both to fail; the upper
1193 	 * level algorithm that picks allocation groups for allocations
1194 	 * is not supposed to do this.
1195 	 */
1196 	/*
1197 	 * Allocate and initialize the cursor for the leftward search.
1198 	 */
1199 	bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1200 		args->agno, XFS_BTNUM_BNO);
1201 	/*
1202 	 * Lookup <= bno to find the leftward search's starting point.
1203 	 */
1204 	if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
1205 		goto error0;
1206 	if (!i) {
1207 		/*
1208 		 * Didn't find anything; use this cursor for the rightward
1209 		 * search.
1210 		 */
1211 		bno_cur_gt = bno_cur_lt;
1212 		bno_cur_lt = NULL;
1213 	}
1214 	/*
1215 	 * Found something.  Duplicate the cursor for the rightward search.
1216 	 */
1217 	else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
1218 		goto error0;
1219 	/*
1220 	 * Increment the cursor, so we will point at the entry just right
1221 	 * of the leftward entry if any, or to the leftmost entry.
1222 	 */
1223 	if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1224 		goto error0;
1225 	if (!i) {
1226 		/*
1227 		 * It failed, there are no rightward entries.
1228 		 */
1229 		xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
1230 		bno_cur_gt = NULL;
1231 	}
1232 	/*
1233 	 * Loop going left with the leftward cursor, right with the
1234 	 * rightward cursor, until either both directions give up or
1235 	 * we find an entry at least as big as minlen.
1236 	 */
1237 	do {
1238 		if (bno_cur_lt) {
1239 			if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
1240 				goto error0;
1241 			XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1242 			busy |= xfs_alloc_compute_aligned(args, ltbno, ltlen,
1243 					&ltbnoa, &ltlena, &busy_gen);
1244 			if (ltlena >= args->minlen && ltbnoa >= args->min_agbno)
1245 				break;
1246 			if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
1247 				goto error0;
1248 			if (!i || ltbnoa < args->min_agbno) {
1249 				xfs_btree_del_cursor(bno_cur_lt,
1250 						     XFS_BTREE_NOERROR);
1251 				bno_cur_lt = NULL;
1252 			}
1253 		}
1254 		if (bno_cur_gt) {
1255 			if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
1256 				goto error0;
1257 			XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1258 			busy |= xfs_alloc_compute_aligned(args, gtbno, gtlen,
1259 					&gtbnoa, &gtlena, &busy_gen);
1260 			if (gtlena >= args->minlen && gtbnoa <= args->max_agbno)
1261 				break;
1262 			if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1263 				goto error0;
1264 			if (!i || gtbnoa > args->max_agbno) {
1265 				xfs_btree_del_cursor(bno_cur_gt,
1266 						     XFS_BTREE_NOERROR);
1267 				bno_cur_gt = NULL;
1268 			}
1269 		}
1270 	} while (bno_cur_lt || bno_cur_gt);
1271 
1272 	/*
1273 	 * Got both cursors still active, need to find better entry.
1274 	 */
1275 	if (bno_cur_lt && bno_cur_gt) {
1276 		if (ltlena >= args->minlen) {
1277 			/*
1278 			 * Left side is good, look for a right side entry.
1279 			 */
1280 			args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1281 			xfs_alloc_fix_len(args);
1282 			ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1283 				args->alignment, args->datatype, ltbnoa,
1284 				ltlena, &ltnew);
1285 
1286 			error = xfs_alloc_find_best_extent(args,
1287 						&bno_cur_lt, &bno_cur_gt,
1288 						ltdiff, &gtbno, &gtlen,
1289 						&gtbnoa, &gtlena,
1290 						0 /* search right */);
1291 		} else {
1292 			ASSERT(gtlena >= args->minlen);
1293 
1294 			/*
1295 			 * Right side is good, look for a left side entry.
1296 			 */
1297 			args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1298 			xfs_alloc_fix_len(args);
1299 			gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1300 				args->alignment, args->datatype, gtbnoa,
1301 				gtlena, &gtnew);
1302 
1303 			error = xfs_alloc_find_best_extent(args,
1304 						&bno_cur_gt, &bno_cur_lt,
1305 						gtdiff, &ltbno, &ltlen,
1306 						&ltbnoa, &ltlena,
1307 						1 /* search left */);
1308 		}
1309 
1310 		if (error)
1311 			goto error0;
1312 	}
1313 
1314 	/*
1315 	 * If we couldn't get anything, give up.
1316 	 */
1317 	if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
1318 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1319 
1320 		if (busy) {
1321 			trace_xfs_alloc_near_busy(args);
1322 			xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
1323 			goto restart;
1324 		}
1325 		trace_xfs_alloc_size_neither(args);
1326 		args->agbno = NULLAGBLOCK;
1327 		return 0;
1328 	}
1329 
1330 	/*
1331 	 * At this point we have selected a freespace entry, either to the
1332 	 * left or to the right.  If it's on the right, copy all the
1333 	 * useful variables to the "left" set so we only have one
1334 	 * copy of this code.
1335 	 */
1336 	if (bno_cur_gt) {
1337 		bno_cur_lt = bno_cur_gt;
1338 		bno_cur_gt = NULL;
1339 		ltbno = gtbno;
1340 		ltbnoa = gtbnoa;
1341 		ltlen = gtlen;
1342 		ltlena = gtlena;
1343 		j = 1;
1344 	} else
1345 		j = 0;
1346 
1347 	/*
1348 	 * Fix up the length and compute the useful address.
1349 	 */
1350 	args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1351 	xfs_alloc_fix_len(args);
1352 	rlen = args->len;
1353 	(void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
1354 				     args->datatype, ltbnoa, ltlena, &ltnew);
1355 	ASSERT(ltnew >= ltbno);
1356 	ASSERT(ltnew + rlen <= ltbnoa + ltlena);
1357 	ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1358 	ASSERT(ltnew >= args->min_agbno && ltnew <= args->max_agbno);
1359 	args->agbno = ltnew;
1360 
1361 	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1362 			ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1363 		goto error0;
1364 
1365 	if (j)
1366 		trace_xfs_alloc_near_greater(args);
1367 	else
1368 		trace_xfs_alloc_near_lesser(args);
1369 
1370 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1371 	xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1372 	return 0;
1373 
1374  error0:
1375 	trace_xfs_alloc_near_error(args);
1376 	if (cnt_cur != NULL)
1377 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1378 	if (bno_cur_lt != NULL)
1379 		xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
1380 	if (bno_cur_gt != NULL)
1381 		xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
1382 	return error;
1383 }
1384 
1385 /*
1386  * Allocate a variable extent anywhere in the allocation group agno.
1387  * Extent's length (returned in len) will be between minlen and maxlen,
1388  * and of the form k * prod + mod unless there's nothing that large.
1389  * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1390  */
1391 STATIC int				/* error */
1392 xfs_alloc_ag_vextent_size(
1393 	xfs_alloc_arg_t	*args)		/* allocation argument structure */
1394 {
1395 	xfs_btree_cur_t	*bno_cur;	/* cursor for bno btree */
1396 	xfs_btree_cur_t	*cnt_cur;	/* cursor for cnt btree */
1397 	int		error;		/* error result */
1398 	xfs_agblock_t	fbno;		/* start of found freespace */
1399 	xfs_extlen_t	flen;		/* length of found freespace */
1400 	int		i;		/* temp status variable */
1401 	xfs_agblock_t	rbno;		/* returned block number */
1402 	xfs_extlen_t	rlen;		/* length of returned extent */
1403 	bool		busy;
1404 	unsigned	busy_gen;
1405 
1406 restart:
1407 	/*
1408 	 * Allocate and initialize a cursor for the by-size btree.
1409 	 */
1410 	cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1411 		args->agno, XFS_BTNUM_CNT);
1412 	bno_cur = NULL;
1413 	busy = false;
1414 
1415 	/*
1416 	 * Look for an entry >= maxlen+alignment-1 blocks.
1417 	 */
1418 	if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1419 			args->maxlen + args->alignment - 1, &i)))
1420 		goto error0;
1421 
1422 	/*
1423 	 * If none then we have to settle for a smaller extent. In the case that
1424 	 * there are no large extents, this will return the last entry in the
1425 	 * tree unless the tree is empty. In the case that there are only busy
1426 	 * large extents, this will return the largest small extent unless there
1427 	 * are no smaller extents available.
1428 	 */
1429 	if (!i) {
1430 		error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1431 						   &fbno, &flen, &i);
1432 		if (error)
1433 			goto error0;
1434 		if (i == 0 || flen == 0) {
1435 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1436 			trace_xfs_alloc_size_noentry(args);
1437 			return 0;
1438 		}
1439 		ASSERT(i == 1);
1440 		busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
1441 				&rlen, &busy_gen);
1442 	} else {
1443 		/*
1444 		 * Search for a non-busy extent that is large enough.
1445 		 */
1446 		for (;;) {
1447 			error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1448 			if (error)
1449 				goto error0;
1450 			XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1451 
1452 			busy = xfs_alloc_compute_aligned(args, fbno, flen,
1453 					&rbno, &rlen, &busy_gen);
1454 
1455 			if (rlen >= args->maxlen)
1456 				break;
1457 
1458 			error = xfs_btree_increment(cnt_cur, 0, &i);
1459 			if (error)
1460 				goto error0;
1461 			if (i == 0) {
1462 				/*
1463 				 * Our only valid extents must have been busy.
1464 				 * Make it unbusy by forcing the log out and
1465 				 * retrying.
1466 				 */
1467 				xfs_btree_del_cursor(cnt_cur,
1468 						     XFS_BTREE_NOERROR);
1469 				trace_xfs_alloc_size_busy(args);
1470 				xfs_extent_busy_flush(args->mp,
1471 							args->pag, busy_gen);
1472 				goto restart;
1473 			}
1474 		}
1475 	}
1476 
1477 	/*
1478 	 * In the first case above, we got the last entry in the
1479 	 * by-size btree.  Now we check to see if the space hits maxlen
1480 	 * once aligned; if not, we search left for something better.
1481 	 * This can't happen in the second case above.
1482 	 */
1483 	rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1484 	XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1485 			(rlen <= flen && rbno + rlen <= fbno + flen), error0);
1486 	if (rlen < args->maxlen) {
1487 		xfs_agblock_t	bestfbno;
1488 		xfs_extlen_t	bestflen;
1489 		xfs_agblock_t	bestrbno;
1490 		xfs_extlen_t	bestrlen;
1491 
1492 		bestrlen = rlen;
1493 		bestrbno = rbno;
1494 		bestflen = flen;
1495 		bestfbno = fbno;
1496 		for (;;) {
1497 			if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1498 				goto error0;
1499 			if (i == 0)
1500 				break;
1501 			if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1502 					&i)))
1503 				goto error0;
1504 			XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1505 			if (flen < bestrlen)
1506 				break;
1507 			busy = xfs_alloc_compute_aligned(args, fbno, flen,
1508 					&rbno, &rlen, &busy_gen);
1509 			rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1510 			XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1511 				(rlen <= flen && rbno + rlen <= fbno + flen),
1512 				error0);
1513 			if (rlen > bestrlen) {
1514 				bestrlen = rlen;
1515 				bestrbno = rbno;
1516 				bestflen = flen;
1517 				bestfbno = fbno;
1518 				if (rlen == args->maxlen)
1519 					break;
1520 			}
1521 		}
1522 		if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1523 				&i)))
1524 			goto error0;
1525 		XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1526 		rlen = bestrlen;
1527 		rbno = bestrbno;
1528 		flen = bestflen;
1529 		fbno = bestfbno;
1530 	}
1531 	args->wasfromfl = 0;
1532 	/*
1533 	 * Fix up the length.
1534 	 */
1535 	args->len = rlen;
1536 	if (rlen < args->minlen) {
1537 		if (busy) {
1538 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1539 			trace_xfs_alloc_size_busy(args);
1540 			xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
1541 			goto restart;
1542 		}
1543 		goto out_nominleft;
1544 	}
1545 	xfs_alloc_fix_len(args);
1546 
1547 	rlen = args->len;
1548 	XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
1549 	/*
1550 	 * Allocate and initialize a cursor for the by-block tree.
1551 	 */
1552 	bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1553 		args->agno, XFS_BTNUM_BNO);
1554 	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1555 			rbno, rlen, XFSA_FIXUP_CNT_OK)))
1556 		goto error0;
1557 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1558 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1559 	cnt_cur = bno_cur = NULL;
1560 	args->len = rlen;
1561 	args->agbno = rbno;
1562 	XFS_WANT_CORRUPTED_GOTO(args->mp,
1563 		args->agbno + args->len <=
1564 			be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1565 		error0);
1566 	trace_xfs_alloc_size_done(args);
1567 	return 0;
1568 
1569 error0:
1570 	trace_xfs_alloc_size_error(args);
1571 	if (cnt_cur)
1572 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1573 	if (bno_cur)
1574 		xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1575 	return error;
1576 
1577 out_nominleft:
1578 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1579 	trace_xfs_alloc_size_nominleft(args);
1580 	args->agbno = NULLAGBLOCK;
1581 	return 0;
1582 }
1583 
1584 /*
1585  * Deal with the case where only small freespaces remain.
1586  * Either return the contents of the last freespace record,
1587  * or allocate space from the freelist if there is nothing in the tree.
1588  */
1589 STATIC int			/* error */
1590 xfs_alloc_ag_vextent_small(
1591 	xfs_alloc_arg_t	*args,	/* allocation argument structure */
1592 	xfs_btree_cur_t	*ccur,	/* by-size cursor */
1593 	xfs_agblock_t	*fbnop,	/* result block number */
1594 	xfs_extlen_t	*flenp,	/* result length */
1595 	int		*stat)	/* status: 0-freelist, 1-normal/none */
1596 {
1597 	int		error;
1598 	xfs_agblock_t	fbno;
1599 	xfs_extlen_t	flen;
1600 	int		i;
1601 
1602 	if ((error = xfs_btree_decrement(ccur, 0, &i)))
1603 		goto error0;
1604 	if (i) {
1605 		if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
1606 			goto error0;
1607 		XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1608 	}
1609 	/*
1610 	 * Nothing in the btree, try the freelist.  Make sure
1611 	 * to respect minleft even when pulling from the
1612 	 * freelist.
1613 	 */
1614 	else if (args->minlen == 1 && args->alignment == 1 &&
1615 		 args->resv != XFS_AG_RESV_AGFL &&
1616 		 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
1617 		  > args->minleft)) {
1618 		error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1619 		if (error)
1620 			goto error0;
1621 		if (fbno != NULLAGBLOCK) {
1622 			xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
1623 			      xfs_alloc_allow_busy_reuse(args->datatype));
1624 
1625 			if (xfs_alloc_is_userdata(args->datatype)) {
1626 				xfs_buf_t	*bp;
1627 
1628 				bp = xfs_btree_get_bufs(args->mp, args->tp,
1629 					args->agno, fbno, 0);
1630 				if (!bp) {
1631 					error = -EFSCORRUPTED;
1632 					goto error0;
1633 				}
1634 				xfs_trans_binval(args->tp, bp);
1635 			}
1636 			args->len = 1;
1637 			args->agbno = fbno;
1638 			XFS_WANT_CORRUPTED_GOTO(args->mp,
1639 				args->agbno + args->len <=
1640 				be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1641 				error0);
1642 			args->wasfromfl = 1;
1643 			trace_xfs_alloc_small_freelist(args);
1644 
1645 			/*
1646 			 * If we're feeding an AGFL block to something that
1647 			 * doesn't live in the free space, we need to clear
1648 			 * out the OWN_AG rmap.
1649 			 */
1650 			error = xfs_rmap_free(args->tp, args->agbp, args->agno,
1651 					fbno, 1, &XFS_RMAP_OINFO_AG);
1652 			if (error)
1653 				goto error0;
1654 
1655 			*stat = 0;
1656 			return 0;
1657 		}
1658 		/*
1659 		 * Nothing in the freelist.
1660 		 */
1661 		else
1662 			flen = 0;
1663 	}
1664 	/*
1665 	 * Can't allocate from the freelist for some reason.
1666 	 */
1667 	else {
1668 		fbno = NULLAGBLOCK;
1669 		flen = 0;
1670 	}
1671 	/*
1672 	 * Can't do the allocation, give up.
1673 	 */
1674 	if (flen < args->minlen) {
1675 		args->agbno = NULLAGBLOCK;
1676 		trace_xfs_alloc_small_notenough(args);
1677 		flen = 0;
1678 	}
1679 	*fbnop = fbno;
1680 	*flenp = flen;
1681 	*stat = 1;
1682 	trace_xfs_alloc_small_done(args);
1683 	return 0;
1684 
1685 error0:
1686 	trace_xfs_alloc_small_error(args);
1687 	return error;
1688 }
1689 
1690 /*
1691  * Free the extent starting at agno/bno for length.
1692  */
1693 STATIC int
1694 xfs_free_ag_extent(
1695 	struct xfs_trans		*tp,
1696 	struct xfs_buf			*agbp,
1697 	xfs_agnumber_t			agno,
1698 	xfs_agblock_t			bno,
1699 	xfs_extlen_t			len,
1700 	const struct xfs_owner_info	*oinfo,
1701 	enum xfs_ag_resv_type		type)
1702 {
1703 	struct xfs_mount		*mp;
1704 	struct xfs_perag		*pag;
1705 	struct xfs_btree_cur		*bno_cur;
1706 	struct xfs_btree_cur		*cnt_cur;
1707 	xfs_agblock_t			gtbno; /* start of right neighbor */
1708 	xfs_extlen_t			gtlen; /* length of right neighbor */
1709 	xfs_agblock_t			ltbno; /* start of left neighbor */
1710 	xfs_extlen_t			ltlen; /* length of left neighbor */
1711 	xfs_agblock_t			nbno; /* new starting block of freesp */
1712 	xfs_extlen_t			nlen; /* new length of freespace */
1713 	int				haveleft; /* have a left neighbor */
1714 	int				haveright; /* have a right neighbor */
1715 	int				i;
1716 	int				error;
1717 
1718 	bno_cur = cnt_cur = NULL;
1719 	mp = tp->t_mountp;
1720 
1721 	if (!xfs_rmap_should_skip_owner_update(oinfo)) {
1722 		error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
1723 		if (error)
1724 			goto error0;
1725 	}
1726 
1727 	/*
1728 	 * Allocate and initialize a cursor for the by-block btree.
1729 	 */
1730 	bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
1731 	/*
1732 	 * Look for a neighboring block on the left (lower block numbers)
1733 	 * that is contiguous with this space.
1734 	 */
1735 	if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1736 		goto error0;
1737 	if (haveleft) {
1738 		/*
1739 		 * There is a block to our left.
1740 		 */
1741 		if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
1742 			goto error0;
1743 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1744 		/*
1745 		 * It's not contiguous, though.
1746 		 */
1747 		if (ltbno + ltlen < bno)
1748 			haveleft = 0;
1749 		else {
1750 			/*
1751 			 * If this failure happens the request to free this
1752 			 * space was invalid, it's (partly) already free.
1753 			 * Very bad.
1754 			 */
1755 			XFS_WANT_CORRUPTED_GOTO(mp,
1756 						ltbno + ltlen <= bno, error0);
1757 		}
1758 	}
1759 	/*
1760 	 * Look for a neighboring block on the right (higher block numbers)
1761 	 * that is contiguous with this space.
1762 	 */
1763 	if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1764 		goto error0;
1765 	if (haveright) {
1766 		/*
1767 		 * There is a block to our right.
1768 		 */
1769 		if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
1770 			goto error0;
1771 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1772 		/*
1773 		 * It's not contiguous, though.
1774 		 */
1775 		if (bno + len < gtbno)
1776 			haveright = 0;
1777 		else {
1778 			/*
1779 			 * If this failure happens the request to free this
1780 			 * space was invalid, it's (partly) already free.
1781 			 * Very bad.
1782 			 */
1783 			XFS_WANT_CORRUPTED_GOTO(mp, gtbno >= bno + len, error0);
1784 		}
1785 	}
1786 	/*
1787 	 * Now allocate and initialize a cursor for the by-size tree.
1788 	 */
1789 	cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
1790 	/*
1791 	 * Have both left and right contiguous neighbors.
1792 	 * Merge all three into a single free block.
1793 	 */
1794 	if (haveleft && haveright) {
1795 		/*
1796 		 * Delete the old by-size entry on the left.
1797 		 */
1798 		if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1799 			goto error0;
1800 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1801 		if ((error = xfs_btree_delete(cnt_cur, &i)))
1802 			goto error0;
1803 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1804 		/*
1805 		 * Delete the old by-size entry on the right.
1806 		 */
1807 		if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1808 			goto error0;
1809 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1810 		if ((error = xfs_btree_delete(cnt_cur, &i)))
1811 			goto error0;
1812 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1813 		/*
1814 		 * Delete the old by-block entry for the right block.
1815 		 */
1816 		if ((error = xfs_btree_delete(bno_cur, &i)))
1817 			goto error0;
1818 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1819 		/*
1820 		 * Move the by-block cursor back to the left neighbor.
1821 		 */
1822 		if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1823 			goto error0;
1824 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1825 #ifdef DEBUG
1826 		/*
1827 		 * Check that this is the right record: delete didn't
1828 		 * mangle the cursor.
1829 		 */
1830 		{
1831 			xfs_agblock_t	xxbno;
1832 			xfs_extlen_t	xxlen;
1833 
1834 			if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1835 					&i)))
1836 				goto error0;
1837 			XFS_WANT_CORRUPTED_GOTO(mp,
1838 				i == 1 && xxbno == ltbno && xxlen == ltlen,
1839 				error0);
1840 		}
1841 #endif
1842 		/*
1843 		 * Update remaining by-block entry to the new, joined block.
1844 		 */
1845 		nbno = ltbno;
1846 		nlen = len + ltlen + gtlen;
1847 		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1848 			goto error0;
1849 	}
1850 	/*
1851 	 * Have only a left contiguous neighbor.
1852 	 * Merge it together with the new freespace.
1853 	 */
1854 	else if (haveleft) {
1855 		/*
1856 		 * Delete the old by-size entry on the left.
1857 		 */
1858 		if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1859 			goto error0;
1860 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1861 		if ((error = xfs_btree_delete(cnt_cur, &i)))
1862 			goto error0;
1863 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1864 		/*
1865 		 * Back up the by-block cursor to the left neighbor, and
1866 		 * update its length.
1867 		 */
1868 		if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1869 			goto error0;
1870 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1871 		nbno = ltbno;
1872 		nlen = len + ltlen;
1873 		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1874 			goto error0;
1875 	}
1876 	/*
1877 	 * Have only a right contiguous neighbor.
1878 	 * Merge it together with the new freespace.
1879 	 */
1880 	else if (haveright) {
1881 		/*
1882 		 * Delete the old by-size entry on the right.
1883 		 */
1884 		if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1885 			goto error0;
1886 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1887 		if ((error = xfs_btree_delete(cnt_cur, &i)))
1888 			goto error0;
1889 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1890 		/*
1891 		 * Update the starting block and length of the right
1892 		 * neighbor in the by-block tree.
1893 		 */
1894 		nbno = bno;
1895 		nlen = len + gtlen;
1896 		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1897 			goto error0;
1898 	}
1899 	/*
1900 	 * No contiguous neighbors.
1901 	 * Insert the new freespace into the by-block tree.
1902 	 */
1903 	else {
1904 		nbno = bno;
1905 		nlen = len;
1906 		if ((error = xfs_btree_insert(bno_cur, &i)))
1907 			goto error0;
1908 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1909 	}
1910 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1911 	bno_cur = NULL;
1912 	/*
1913 	 * In all cases we need to insert the new freespace in the by-size tree.
1914 	 */
1915 	if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1916 		goto error0;
1917 	XFS_WANT_CORRUPTED_GOTO(mp, i == 0, error0);
1918 	if ((error = xfs_btree_insert(cnt_cur, &i)))
1919 		goto error0;
1920 	XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1921 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1922 	cnt_cur = NULL;
1923 
1924 	/*
1925 	 * Update the freespace totals in the ag and superblock.
1926 	 */
1927 	pag = xfs_perag_get(mp, agno);
1928 	error = xfs_alloc_update_counters(tp, pag, agbp, len);
1929 	xfs_ag_resv_free_extent(pag, type, tp, len);
1930 	xfs_perag_put(pag);
1931 	if (error)
1932 		goto error0;
1933 
1934 	XFS_STATS_INC(mp, xs_freex);
1935 	XFS_STATS_ADD(mp, xs_freeb, len);
1936 
1937 	trace_xfs_free_extent(mp, agno, bno, len, type, haveleft, haveright);
1938 
1939 	return 0;
1940 
1941  error0:
1942 	trace_xfs_free_extent(mp, agno, bno, len, type, -1, -1);
1943 	if (bno_cur)
1944 		xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1945 	if (cnt_cur)
1946 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1947 	return error;
1948 }
1949 
1950 /*
1951  * Visible (exported) allocation/free functions.
1952  * Some of these are used just by xfs_alloc_btree.c and this file.
1953  */
1954 
1955 /*
1956  * Compute and fill in value of m_ag_maxlevels.
1957  */
1958 void
1959 xfs_alloc_compute_maxlevels(
1960 	xfs_mount_t	*mp)	/* file system mount structure */
1961 {
1962 	mp->m_ag_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr,
1963 			(mp->m_sb.sb_agblocks + 1) / 2);
1964 }
1965 
1966 /*
1967  * Find the length of the longest extent in an AG.  The 'need' parameter
1968  * specifies how much space we're going to need for the AGFL and the
1969  * 'reserved' parameter tells us how many blocks in this AG are reserved for
1970  * other callers.
1971  */
1972 xfs_extlen_t
1973 xfs_alloc_longest_free_extent(
1974 	struct xfs_perag	*pag,
1975 	xfs_extlen_t		need,
1976 	xfs_extlen_t		reserved)
1977 {
1978 	xfs_extlen_t		delta = 0;
1979 
1980 	/*
1981 	 * If the AGFL needs a recharge, we'll have to subtract that from the
1982 	 * longest extent.
1983 	 */
1984 	if (need > pag->pagf_flcount)
1985 		delta = need - pag->pagf_flcount;
1986 
1987 	/*
1988 	 * If we cannot maintain others' reservations with space from the
1989 	 * not-longest freesp extents, we'll have to subtract /that/ from
1990 	 * the longest extent too.
1991 	 */
1992 	if (pag->pagf_freeblks - pag->pagf_longest < reserved)
1993 		delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
1994 
1995 	/*
1996 	 * If the longest extent is long enough to satisfy all the
1997 	 * reservations and AGFL rules in place, we can return this extent.
1998 	 */
1999 	if (pag->pagf_longest > delta)
2000 		return pag->pagf_longest - delta;
2001 
2002 	/* Otherwise, let the caller try for 1 block if there's space. */
2003 	return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
2004 }
2005 
2006 unsigned int
2007 xfs_alloc_min_freelist(
2008 	struct xfs_mount	*mp,
2009 	struct xfs_perag	*pag)
2010 {
2011 	unsigned int		min_free;
2012 
2013 	/* space needed by-bno freespace btree */
2014 	min_free = min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_BNOi] + 1,
2015 				       mp->m_ag_maxlevels);
2016 	/* space needed by-size freespace btree */
2017 	min_free += min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_CNTi] + 1,
2018 				       mp->m_ag_maxlevels);
2019 	/* space needed reverse mapping used space btree */
2020 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2021 		min_free += min_t(unsigned int,
2022 				  pag->pagf_levels[XFS_BTNUM_RMAPi] + 1,
2023 				  mp->m_rmap_maxlevels);
2024 
2025 	return min_free;
2026 }
2027 
2028 /*
2029  * Check if the operation we are fixing up the freelist for should go ahead or
2030  * not. If we are freeing blocks, we always allow it, otherwise the allocation
2031  * is dependent on whether the size and shape of free space available will
2032  * permit the requested allocation to take place.
2033  */
2034 static bool
2035 xfs_alloc_space_available(
2036 	struct xfs_alloc_arg	*args,
2037 	xfs_extlen_t		min_free,
2038 	int			flags)
2039 {
2040 	struct xfs_perag	*pag = args->pag;
2041 	xfs_extlen_t		alloc_len, longest;
2042 	xfs_extlen_t		reservation; /* blocks that are still reserved */
2043 	int			available;
2044 
2045 	if (flags & XFS_ALLOC_FLAG_FREEING)
2046 		return true;
2047 
2048 	reservation = xfs_ag_resv_needed(pag, args->resv);
2049 
2050 	/* do we have enough contiguous free space for the allocation? */
2051 	alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
2052 	longest = xfs_alloc_longest_free_extent(pag, min_free, reservation);
2053 	if (longest < alloc_len)
2054 		return false;
2055 
2056 	/* do we have enough free space remaining for the allocation? */
2057 	available = (int)(pag->pagf_freeblks + pag->pagf_flcount -
2058 			  reservation - min_free - args->minleft);
2059 	if (available < (int)max(args->total, alloc_len))
2060 		return false;
2061 
2062 	/*
2063 	 * Clamp maxlen to the amount of free space available for the actual
2064 	 * extent allocation.
2065 	 */
2066 	if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2067 		args->maxlen = available;
2068 		ASSERT(args->maxlen > 0);
2069 		ASSERT(args->maxlen >= args->minlen);
2070 	}
2071 
2072 	return true;
2073 }
2074 
2075 int
2076 xfs_free_agfl_block(
2077 	struct xfs_trans	*tp,
2078 	xfs_agnumber_t		agno,
2079 	xfs_agblock_t		agbno,
2080 	struct xfs_buf		*agbp,
2081 	struct xfs_owner_info	*oinfo)
2082 {
2083 	int			error;
2084 	struct xfs_buf		*bp;
2085 
2086 	error = xfs_free_ag_extent(tp, agbp, agno, agbno, 1, oinfo,
2087 				   XFS_AG_RESV_AGFL);
2088 	if (error)
2089 		return error;
2090 
2091 	bp = xfs_btree_get_bufs(tp->t_mountp, tp, agno, agbno, 0);
2092 	if (!bp)
2093 		return -EFSCORRUPTED;
2094 	xfs_trans_binval(tp, bp);
2095 
2096 	return 0;
2097 }
2098 
2099 /*
2100  * Check the agfl fields of the agf for inconsistency or corruption. The purpose
2101  * is to detect an agfl header padding mismatch between current and early v5
2102  * kernels. This problem manifests as a 1-slot size difference between the
2103  * on-disk flcount and the active [first, last] range of a wrapped agfl. This
2104  * may also catch variants of agfl count corruption unrelated to padding. Either
2105  * way, we'll reset the agfl and warn the user.
2106  *
2107  * Return true if a reset is required before the agfl can be used, false
2108  * otherwise.
2109  */
2110 static bool
2111 xfs_agfl_needs_reset(
2112 	struct xfs_mount	*mp,
2113 	struct xfs_agf		*agf)
2114 {
2115 	uint32_t		f = be32_to_cpu(agf->agf_flfirst);
2116 	uint32_t		l = be32_to_cpu(agf->agf_fllast);
2117 	uint32_t		c = be32_to_cpu(agf->agf_flcount);
2118 	int			agfl_size = xfs_agfl_size(mp);
2119 	int			active;
2120 
2121 	/* no agfl header on v4 supers */
2122 	if (!xfs_sb_version_hascrc(&mp->m_sb))
2123 		return false;
2124 
2125 	/*
2126 	 * The agf read verifier catches severe corruption of these fields.
2127 	 * Repeat some sanity checks to cover a packed -> unpacked mismatch if
2128 	 * the verifier allows it.
2129 	 */
2130 	if (f >= agfl_size || l >= agfl_size)
2131 		return true;
2132 	if (c > agfl_size)
2133 		return true;
2134 
2135 	/*
2136 	 * Check consistency between the on-disk count and the active range. An
2137 	 * agfl padding mismatch manifests as an inconsistent flcount.
2138 	 */
2139 	if (c && l >= f)
2140 		active = l - f + 1;
2141 	else if (c)
2142 		active = agfl_size - f + l + 1;
2143 	else
2144 		active = 0;
2145 
2146 	return active != c;
2147 }
2148 
2149 /*
2150  * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
2151  * agfl content cannot be trusted. Warn the user that a repair is required to
2152  * recover leaked blocks.
2153  *
2154  * The purpose of this mechanism is to handle filesystems affected by the agfl
2155  * header padding mismatch problem. A reset keeps the filesystem online with a
2156  * relatively minor free space accounting inconsistency rather than suffer the
2157  * inevitable crash from use of an invalid agfl block.
2158  */
2159 static void
2160 xfs_agfl_reset(
2161 	struct xfs_trans	*tp,
2162 	struct xfs_buf		*agbp,
2163 	struct xfs_perag	*pag)
2164 {
2165 	struct xfs_mount	*mp = tp->t_mountp;
2166 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
2167 
2168 	ASSERT(pag->pagf_agflreset);
2169 	trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
2170 
2171 	xfs_warn(mp,
2172 	       "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
2173 	       "Please unmount and run xfs_repair.",
2174 	         pag->pag_agno, pag->pagf_flcount);
2175 
2176 	agf->agf_flfirst = 0;
2177 	agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
2178 	agf->agf_flcount = 0;
2179 	xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
2180 				    XFS_AGF_FLCOUNT);
2181 
2182 	pag->pagf_flcount = 0;
2183 	pag->pagf_agflreset = false;
2184 }
2185 
2186 /*
2187  * Defer an AGFL block free. This is effectively equivalent to
2188  * xfs_bmap_add_free() with some special handling particular to AGFL blocks.
2189  *
2190  * Deferring AGFL frees helps prevent log reservation overruns due to too many
2191  * allocation operations in a transaction. AGFL frees are prone to this problem
2192  * because for one they are always freed one at a time. Further, an immediate
2193  * AGFL block free can cause a btree join and require another block free before
2194  * the real allocation can proceed. Deferring the free disconnects freeing up
2195  * the AGFL slot from freeing the block.
2196  */
2197 STATIC void
2198 xfs_defer_agfl_block(
2199 	struct xfs_trans		*tp,
2200 	xfs_agnumber_t			agno,
2201 	xfs_fsblock_t			agbno,
2202 	struct xfs_owner_info		*oinfo)
2203 {
2204 	struct xfs_mount		*mp = tp->t_mountp;
2205 	struct xfs_extent_free_item	*new;		/* new element */
2206 
2207 	ASSERT(xfs_bmap_free_item_zone != NULL);
2208 	ASSERT(oinfo != NULL);
2209 
2210 	new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
2211 	new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno);
2212 	new->xefi_blockcount = 1;
2213 	new->xefi_oinfo = *oinfo;
2214 
2215 	trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
2216 
2217 	xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_AGFL_FREE, &new->xefi_list);
2218 }
2219 
2220 /*
2221  * Decide whether to use this allocation group for this allocation.
2222  * If so, fix up the btree freelist's size.
2223  */
2224 int			/* error */
2225 xfs_alloc_fix_freelist(
2226 	struct xfs_alloc_arg	*args,	/* allocation argument structure */
2227 	int			flags)	/* XFS_ALLOC_FLAG_... */
2228 {
2229 	struct xfs_mount	*mp = args->mp;
2230 	struct xfs_perag	*pag = args->pag;
2231 	struct xfs_trans	*tp = args->tp;
2232 	struct xfs_buf		*agbp = NULL;
2233 	struct xfs_buf		*agflbp = NULL;
2234 	struct xfs_alloc_arg	targs;	/* local allocation arguments */
2235 	xfs_agblock_t		bno;	/* freelist block */
2236 	xfs_extlen_t		need;	/* total blocks needed in freelist */
2237 	int			error = 0;
2238 
2239 	if (!pag->pagf_init) {
2240 		error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2241 		if (error)
2242 			goto out_no_agbp;
2243 		if (!pag->pagf_init) {
2244 			ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2245 			ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2246 			goto out_agbp_relse;
2247 		}
2248 	}
2249 
2250 	/*
2251 	 * If this is a metadata preferred pag and we are user data then try
2252 	 * somewhere else if we are not being asked to try harder at this
2253 	 * point
2254 	 */
2255 	if (pag->pagf_metadata && xfs_alloc_is_userdata(args->datatype) &&
2256 	    (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2257 		ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2258 		goto out_agbp_relse;
2259 	}
2260 
2261 	need = xfs_alloc_min_freelist(mp, pag);
2262 	if (!xfs_alloc_space_available(args, need, flags |
2263 			XFS_ALLOC_FLAG_CHECK))
2264 		goto out_agbp_relse;
2265 
2266 	/*
2267 	 * Get the a.g. freespace buffer.
2268 	 * Can fail if we're not blocking on locks, and it's held.
2269 	 */
2270 	if (!agbp) {
2271 		error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2272 		if (error)
2273 			goto out_no_agbp;
2274 		if (!agbp) {
2275 			ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2276 			ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2277 			goto out_no_agbp;
2278 		}
2279 	}
2280 
2281 	/* reset a padding mismatched agfl before final free space check */
2282 	if (pag->pagf_agflreset)
2283 		xfs_agfl_reset(tp, agbp, pag);
2284 
2285 	/* If there isn't enough total space or single-extent, reject it. */
2286 	need = xfs_alloc_min_freelist(mp, pag);
2287 	if (!xfs_alloc_space_available(args, need, flags))
2288 		goto out_agbp_relse;
2289 
2290 	/*
2291 	 * Make the freelist shorter if it's too long.
2292 	 *
2293 	 * Note that from this point onwards, we will always release the agf and
2294 	 * agfl buffers on error. This handles the case where we error out and
2295 	 * the buffers are clean or may not have been joined to the transaction
2296 	 * and hence need to be released manually. If they have been joined to
2297 	 * the transaction, then xfs_trans_brelse() will handle them
2298 	 * appropriately based on the recursion count and dirty state of the
2299 	 * buffer.
2300 	 *
2301 	 * XXX (dgc): When we have lots of free space, does this buy us
2302 	 * anything other than extra overhead when we need to put more blocks
2303 	 * back on the free list? Maybe we should only do this when space is
2304 	 * getting low or the AGFL is more than half full?
2305 	 *
2306 	 * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
2307 	 * big; the NORMAP flag prevents AGFL expand/shrink operations from
2308 	 * updating the rmapbt.  Both flags are used in xfs_repair while we're
2309 	 * rebuilding the rmapbt, and neither are used by the kernel.  They're
2310 	 * both required to ensure that rmaps are correctly recorded for the
2311 	 * regenerated AGFL, bnobt, and cntbt.  See repair/phase5.c and
2312 	 * repair/rmap.c in xfsprogs for details.
2313 	 */
2314 	memset(&targs, 0, sizeof(targs));
2315 	/* struct copy below */
2316 	if (flags & XFS_ALLOC_FLAG_NORMAP)
2317 		targs.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
2318 	else
2319 		targs.oinfo = XFS_RMAP_OINFO_AG;
2320 	while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) {
2321 		error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
2322 		if (error)
2323 			goto out_agbp_relse;
2324 
2325 		/* defer agfl frees */
2326 		xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo);
2327 	}
2328 
2329 	targs.tp = tp;
2330 	targs.mp = mp;
2331 	targs.agbp = agbp;
2332 	targs.agno = args->agno;
2333 	targs.alignment = targs.minlen = targs.prod = 1;
2334 	targs.type = XFS_ALLOCTYPE_THIS_AG;
2335 	targs.pag = pag;
2336 	error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp);
2337 	if (error)
2338 		goto out_agbp_relse;
2339 
2340 	/* Make the freelist longer if it's too short. */
2341 	while (pag->pagf_flcount < need) {
2342 		targs.agbno = 0;
2343 		targs.maxlen = need - pag->pagf_flcount;
2344 		targs.resv = XFS_AG_RESV_AGFL;
2345 
2346 		/* Allocate as many blocks as possible at once. */
2347 		error = xfs_alloc_ag_vextent(&targs);
2348 		if (error)
2349 			goto out_agflbp_relse;
2350 
2351 		/*
2352 		 * Stop if we run out.  Won't happen if callers are obeying
2353 		 * the restrictions correctly.  Can happen for free calls
2354 		 * on a completely full ag.
2355 		 */
2356 		if (targs.agbno == NULLAGBLOCK) {
2357 			if (flags & XFS_ALLOC_FLAG_FREEING)
2358 				break;
2359 			goto out_agflbp_relse;
2360 		}
2361 		/*
2362 		 * Put each allocated block on the list.
2363 		 */
2364 		for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
2365 			error = xfs_alloc_put_freelist(tp, agbp,
2366 							agflbp, bno, 0);
2367 			if (error)
2368 				goto out_agflbp_relse;
2369 		}
2370 	}
2371 	xfs_trans_brelse(tp, agflbp);
2372 	args->agbp = agbp;
2373 	return 0;
2374 
2375 out_agflbp_relse:
2376 	xfs_trans_brelse(tp, agflbp);
2377 out_agbp_relse:
2378 	if (agbp)
2379 		xfs_trans_brelse(tp, agbp);
2380 out_no_agbp:
2381 	args->agbp = NULL;
2382 	return error;
2383 }
2384 
2385 /*
2386  * Get a block from the freelist.
2387  * Returns with the buffer for the block gotten.
2388  */
2389 int				/* error */
2390 xfs_alloc_get_freelist(
2391 	xfs_trans_t	*tp,	/* transaction pointer */
2392 	xfs_buf_t	*agbp,	/* buffer containing the agf structure */
2393 	xfs_agblock_t	*bnop,	/* block address retrieved from freelist */
2394 	int		btreeblk) /* destination is a AGF btree */
2395 {
2396 	xfs_agf_t	*agf;	/* a.g. freespace structure */
2397 	xfs_buf_t	*agflbp;/* buffer for a.g. freelist structure */
2398 	xfs_agblock_t	bno;	/* block number returned */
2399 	__be32		*agfl_bno;
2400 	int		error;
2401 	int		logflags;
2402 	xfs_mount_t	*mp = tp->t_mountp;
2403 	xfs_perag_t	*pag;	/* per allocation group data */
2404 
2405 	/*
2406 	 * Freelist is empty, give up.
2407 	 */
2408 	agf = XFS_BUF_TO_AGF(agbp);
2409 	if (!agf->agf_flcount) {
2410 		*bnop = NULLAGBLOCK;
2411 		return 0;
2412 	}
2413 	/*
2414 	 * Read the array of free blocks.
2415 	 */
2416 	error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno),
2417 				    &agflbp);
2418 	if (error)
2419 		return error;
2420 
2421 
2422 	/*
2423 	 * Get the block number and update the data structures.
2424 	 */
2425 	agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2426 	bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
2427 	be32_add_cpu(&agf->agf_flfirst, 1);
2428 	xfs_trans_brelse(tp, agflbp);
2429 	if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
2430 		agf->agf_flfirst = 0;
2431 
2432 	pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2433 	ASSERT(!pag->pagf_agflreset);
2434 	be32_add_cpu(&agf->agf_flcount, -1);
2435 	xfs_trans_agflist_delta(tp, -1);
2436 	pag->pagf_flcount--;
2437 
2438 	logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2439 	if (btreeblk) {
2440 		be32_add_cpu(&agf->agf_btreeblks, 1);
2441 		pag->pagf_btreeblks++;
2442 		logflags |= XFS_AGF_BTREEBLKS;
2443 	}
2444 	xfs_perag_put(pag);
2445 
2446 	xfs_alloc_log_agf(tp, agbp, logflags);
2447 	*bnop = bno;
2448 
2449 	return 0;
2450 }
2451 
2452 /*
2453  * Log the given fields from the agf structure.
2454  */
2455 void
2456 xfs_alloc_log_agf(
2457 	xfs_trans_t	*tp,	/* transaction pointer */
2458 	xfs_buf_t	*bp,	/* buffer for a.g. freelist header */
2459 	int		fields)	/* mask of fields to be logged (XFS_AGF_...) */
2460 {
2461 	int	first;		/* first byte offset */
2462 	int	last;		/* last byte offset */
2463 	static const short	offsets[] = {
2464 		offsetof(xfs_agf_t, agf_magicnum),
2465 		offsetof(xfs_agf_t, agf_versionnum),
2466 		offsetof(xfs_agf_t, agf_seqno),
2467 		offsetof(xfs_agf_t, agf_length),
2468 		offsetof(xfs_agf_t, agf_roots[0]),
2469 		offsetof(xfs_agf_t, agf_levels[0]),
2470 		offsetof(xfs_agf_t, agf_flfirst),
2471 		offsetof(xfs_agf_t, agf_fllast),
2472 		offsetof(xfs_agf_t, agf_flcount),
2473 		offsetof(xfs_agf_t, agf_freeblks),
2474 		offsetof(xfs_agf_t, agf_longest),
2475 		offsetof(xfs_agf_t, agf_btreeblks),
2476 		offsetof(xfs_agf_t, agf_uuid),
2477 		offsetof(xfs_agf_t, agf_rmap_blocks),
2478 		offsetof(xfs_agf_t, agf_refcount_blocks),
2479 		offsetof(xfs_agf_t, agf_refcount_root),
2480 		offsetof(xfs_agf_t, agf_refcount_level),
2481 		/* needed so that we don't log the whole rest of the structure: */
2482 		offsetof(xfs_agf_t, agf_spare64),
2483 		sizeof(xfs_agf_t)
2484 	};
2485 
2486 	trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2487 
2488 	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
2489 
2490 	xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2491 	xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2492 }
2493 
2494 /*
2495  * Interface for inode allocation to force the pag data to be initialized.
2496  */
2497 int					/* error */
2498 xfs_alloc_pagf_init(
2499 	xfs_mount_t		*mp,	/* file system mount structure */
2500 	xfs_trans_t		*tp,	/* transaction pointer */
2501 	xfs_agnumber_t		agno,	/* allocation group number */
2502 	int			flags)	/* XFS_ALLOC_FLAGS_... */
2503 {
2504 	xfs_buf_t		*bp;
2505 	int			error;
2506 
2507 	if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
2508 		return error;
2509 	if (bp)
2510 		xfs_trans_brelse(tp, bp);
2511 	return 0;
2512 }
2513 
2514 /*
2515  * Put the block on the freelist for the allocation group.
2516  */
2517 int					/* error */
2518 xfs_alloc_put_freelist(
2519 	xfs_trans_t		*tp,	/* transaction pointer */
2520 	xfs_buf_t		*agbp,	/* buffer for a.g. freelist header */
2521 	xfs_buf_t		*agflbp,/* buffer for a.g. free block array */
2522 	xfs_agblock_t		bno,	/* block being freed */
2523 	int			btreeblk) /* block came from a AGF btree */
2524 {
2525 	xfs_agf_t		*agf;	/* a.g. freespace structure */
2526 	__be32			*blockp;/* pointer to array entry */
2527 	int			error;
2528 	int			logflags;
2529 	xfs_mount_t		*mp;	/* mount structure */
2530 	xfs_perag_t		*pag;	/* per allocation group data */
2531 	__be32			*agfl_bno;
2532 	int			startoff;
2533 
2534 	agf = XFS_BUF_TO_AGF(agbp);
2535 	mp = tp->t_mountp;
2536 
2537 	if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
2538 			be32_to_cpu(agf->agf_seqno), &agflbp)))
2539 		return error;
2540 	be32_add_cpu(&agf->agf_fllast, 1);
2541 	if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
2542 		agf->agf_fllast = 0;
2543 
2544 	pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2545 	ASSERT(!pag->pagf_agflreset);
2546 	be32_add_cpu(&agf->agf_flcount, 1);
2547 	xfs_trans_agflist_delta(tp, 1);
2548 	pag->pagf_flcount++;
2549 
2550 	logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2551 	if (btreeblk) {
2552 		be32_add_cpu(&agf->agf_btreeblks, -1);
2553 		pag->pagf_btreeblks--;
2554 		logflags |= XFS_AGF_BTREEBLKS;
2555 	}
2556 	xfs_perag_put(pag);
2557 
2558 	xfs_alloc_log_agf(tp, agbp, logflags);
2559 
2560 	ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
2561 
2562 	agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2563 	blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
2564 	*blockp = cpu_to_be32(bno);
2565 	startoff = (char *)blockp - (char *)agflbp->b_addr;
2566 
2567 	xfs_alloc_log_agf(tp, agbp, logflags);
2568 
2569 	xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
2570 	xfs_trans_log_buf(tp, agflbp, startoff,
2571 			  startoff + sizeof(xfs_agblock_t) - 1);
2572 	return 0;
2573 }
2574 
2575 static xfs_failaddr_t
2576 xfs_agf_verify(
2577 	struct xfs_buf		*bp)
2578 {
2579 	struct xfs_mount	*mp = bp->b_target->bt_mount;
2580 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(bp);
2581 
2582 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
2583 		if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
2584 			return __this_address;
2585 		if (!xfs_log_check_lsn(mp,
2586 				be64_to_cpu(XFS_BUF_TO_AGF(bp)->agf_lsn)))
2587 			return __this_address;
2588 	}
2589 
2590 	if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
2591 	      XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2592 	      be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2593 	      be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) &&
2594 	      be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) &&
2595 	      be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
2596 		return __this_address;
2597 
2598 	if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
2599 	    be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
2600 	    be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
2601 	    be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS)
2602 		return __this_address;
2603 
2604 	if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
2605 	    (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
2606 	     be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS))
2607 		return __this_address;
2608 
2609 	/*
2610 	 * during growfs operations, the perag is not fully initialised,
2611 	 * so we can't use it for any useful checking. growfs ensures we can't
2612 	 * use it by using uncached buffers that don't have the perag attached
2613 	 * so we can detect and avoid this problem.
2614 	 */
2615 	if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
2616 		return __this_address;
2617 
2618 	if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
2619 	    be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
2620 		return __this_address;
2621 
2622 	if (xfs_sb_version_hasreflink(&mp->m_sb) &&
2623 	    (be32_to_cpu(agf->agf_refcount_level) < 1 ||
2624 	     be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS))
2625 		return __this_address;
2626 
2627 	return NULL;
2628 
2629 }
2630 
2631 static void
2632 xfs_agf_read_verify(
2633 	struct xfs_buf	*bp)
2634 {
2635 	struct xfs_mount *mp = bp->b_target->bt_mount;
2636 	xfs_failaddr_t	fa;
2637 
2638 	if (xfs_sb_version_hascrc(&mp->m_sb) &&
2639 	    !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
2640 		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
2641 	else {
2642 		fa = xfs_agf_verify(bp);
2643 		if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
2644 			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2645 	}
2646 }
2647 
2648 static void
2649 xfs_agf_write_verify(
2650 	struct xfs_buf	*bp)
2651 {
2652 	struct xfs_mount	*mp = bp->b_target->bt_mount;
2653 	struct xfs_buf_log_item	*bip = bp->b_log_item;
2654 	xfs_failaddr_t		fa;
2655 
2656 	fa = xfs_agf_verify(bp);
2657 	if (fa) {
2658 		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2659 		return;
2660 	}
2661 
2662 	if (!xfs_sb_version_hascrc(&mp->m_sb))
2663 		return;
2664 
2665 	if (bip)
2666 		XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2667 
2668 	xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
2669 }
2670 
2671 const struct xfs_buf_ops xfs_agf_buf_ops = {
2672 	.name = "xfs_agf",
2673 	.verify_read = xfs_agf_read_verify,
2674 	.verify_write = xfs_agf_write_verify,
2675 	.verify_struct = xfs_agf_verify,
2676 };
2677 
2678 /*
2679  * Read in the allocation group header (free/alloc section).
2680  */
2681 int					/* error */
2682 xfs_read_agf(
2683 	struct xfs_mount	*mp,	/* mount point structure */
2684 	struct xfs_trans	*tp,	/* transaction pointer */
2685 	xfs_agnumber_t		agno,	/* allocation group number */
2686 	int			flags,	/* XFS_BUF_ */
2687 	struct xfs_buf		**bpp)	/* buffer for the ag freelist header */
2688 {
2689 	int		error;
2690 
2691 	trace_xfs_read_agf(mp, agno);
2692 
2693 	ASSERT(agno != NULLAGNUMBER);
2694 	error = xfs_trans_read_buf(
2695 			mp, tp, mp->m_ddev_targp,
2696 			XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
2697 			XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
2698 	if (error)
2699 		return error;
2700 	if (!*bpp)
2701 		return 0;
2702 
2703 	ASSERT(!(*bpp)->b_error);
2704 	xfs_buf_set_ref(*bpp, XFS_AGF_REF);
2705 	return 0;
2706 }
2707 
2708 /*
2709  * Read in the allocation group header (free/alloc section).
2710  */
2711 int					/* error */
2712 xfs_alloc_read_agf(
2713 	struct xfs_mount	*mp,	/* mount point structure */
2714 	struct xfs_trans	*tp,	/* transaction pointer */
2715 	xfs_agnumber_t		agno,	/* allocation group number */
2716 	int			flags,	/* XFS_ALLOC_FLAG_... */
2717 	struct xfs_buf		**bpp)	/* buffer for the ag freelist header */
2718 {
2719 	struct xfs_agf		*agf;		/* ag freelist header */
2720 	struct xfs_perag	*pag;		/* per allocation group data */
2721 	int			error;
2722 
2723 	trace_xfs_alloc_read_agf(mp, agno);
2724 
2725 	ASSERT(agno != NULLAGNUMBER);
2726 	error = xfs_read_agf(mp, tp, agno,
2727 			(flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
2728 			bpp);
2729 	if (error)
2730 		return error;
2731 	if (!*bpp)
2732 		return 0;
2733 	ASSERT(!(*bpp)->b_error);
2734 
2735 	agf = XFS_BUF_TO_AGF(*bpp);
2736 	pag = xfs_perag_get(mp, agno);
2737 	if (!pag->pagf_init) {
2738 		pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
2739 		pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
2740 		pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
2741 		pag->pagf_longest = be32_to_cpu(agf->agf_longest);
2742 		pag->pagf_levels[XFS_BTNUM_BNOi] =
2743 			be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
2744 		pag->pagf_levels[XFS_BTNUM_CNTi] =
2745 			be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
2746 		pag->pagf_levels[XFS_BTNUM_RMAPi] =
2747 			be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
2748 		pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
2749 		pag->pagf_init = 1;
2750 		pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
2751 	}
2752 #ifdef DEBUG
2753 	else if (!XFS_FORCED_SHUTDOWN(mp)) {
2754 		ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
2755 		ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
2756 		ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
2757 		ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
2758 		ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
2759 		       be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
2760 		ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
2761 		       be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
2762 	}
2763 #endif
2764 	xfs_perag_put(pag);
2765 	return 0;
2766 }
2767 
2768 /*
2769  * Allocate an extent (variable-size).
2770  * Depending on the allocation type, we either look in a single allocation
2771  * group or loop over the allocation groups to find the result.
2772  */
2773 int				/* error */
2774 xfs_alloc_vextent(
2775 	struct xfs_alloc_arg	*args)	/* allocation argument structure */
2776 {
2777 	xfs_agblock_t		agsize;	/* allocation group size */
2778 	int			error;
2779 	int			flags;	/* XFS_ALLOC_FLAG_... locking flags */
2780 	struct xfs_mount	*mp;	/* mount structure pointer */
2781 	xfs_agnumber_t		sagno;	/* starting allocation group number */
2782 	xfs_alloctype_t		type;	/* input allocation type */
2783 	int			bump_rotor = 0;
2784 	xfs_agnumber_t		rotorstep = xfs_rotorstep; /* inode32 agf stepper */
2785 
2786 	mp = args->mp;
2787 	type = args->otype = args->type;
2788 	args->agbno = NULLAGBLOCK;
2789 	/*
2790 	 * Just fix this up, for the case where the last a.g. is shorter
2791 	 * (or there's only one a.g.) and the caller couldn't easily figure
2792 	 * that out (xfs_bmap_alloc).
2793 	 */
2794 	agsize = mp->m_sb.sb_agblocks;
2795 	if (args->maxlen > agsize)
2796 		args->maxlen = agsize;
2797 	if (args->alignment == 0)
2798 		args->alignment = 1;
2799 	ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
2800 	ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
2801 	ASSERT(args->minlen <= args->maxlen);
2802 	ASSERT(args->minlen <= agsize);
2803 	ASSERT(args->mod < args->prod);
2804 	if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
2805 	    XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
2806 	    args->minlen > args->maxlen || args->minlen > agsize ||
2807 	    args->mod >= args->prod) {
2808 		args->fsbno = NULLFSBLOCK;
2809 		trace_xfs_alloc_vextent_badargs(args);
2810 		return 0;
2811 	}
2812 
2813 	switch (type) {
2814 	case XFS_ALLOCTYPE_THIS_AG:
2815 	case XFS_ALLOCTYPE_NEAR_BNO:
2816 	case XFS_ALLOCTYPE_THIS_BNO:
2817 		/*
2818 		 * These three force us into a single a.g.
2819 		 */
2820 		args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2821 		args->pag = xfs_perag_get(mp, args->agno);
2822 		error = xfs_alloc_fix_freelist(args, 0);
2823 		if (error) {
2824 			trace_xfs_alloc_vextent_nofix(args);
2825 			goto error0;
2826 		}
2827 		if (!args->agbp) {
2828 			trace_xfs_alloc_vextent_noagbp(args);
2829 			break;
2830 		}
2831 		args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2832 		if ((error = xfs_alloc_ag_vextent(args)))
2833 			goto error0;
2834 		break;
2835 	case XFS_ALLOCTYPE_START_BNO:
2836 		/*
2837 		 * Try near allocation first, then anywhere-in-ag after
2838 		 * the first a.g. fails.
2839 		 */
2840 		if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
2841 		    (mp->m_flags & XFS_MOUNT_32BITINODES)) {
2842 			args->fsbno = XFS_AGB_TO_FSB(mp,
2843 					((mp->m_agfrotor / rotorstep) %
2844 					mp->m_sb.sb_agcount), 0);
2845 			bump_rotor = 1;
2846 		}
2847 		args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2848 		args->type = XFS_ALLOCTYPE_NEAR_BNO;
2849 		/* FALLTHROUGH */
2850 	case XFS_ALLOCTYPE_FIRST_AG:
2851 		/*
2852 		 * Rotate through the allocation groups looking for a winner.
2853 		 */
2854 		if (type == XFS_ALLOCTYPE_FIRST_AG) {
2855 			/*
2856 			 * Start with allocation group given by bno.
2857 			 */
2858 			args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2859 			args->type = XFS_ALLOCTYPE_THIS_AG;
2860 			sagno = 0;
2861 			flags = 0;
2862 		} else {
2863 			/*
2864 			 * Start with the given allocation group.
2865 			 */
2866 			args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2867 			flags = XFS_ALLOC_FLAG_TRYLOCK;
2868 		}
2869 		/*
2870 		 * Loop over allocation groups twice; first time with
2871 		 * trylock set, second time without.
2872 		 */
2873 		for (;;) {
2874 			args->pag = xfs_perag_get(mp, args->agno);
2875 			error = xfs_alloc_fix_freelist(args, flags);
2876 			if (error) {
2877 				trace_xfs_alloc_vextent_nofix(args);
2878 				goto error0;
2879 			}
2880 			/*
2881 			 * If we get a buffer back then the allocation will fly.
2882 			 */
2883 			if (args->agbp) {
2884 				if ((error = xfs_alloc_ag_vextent(args)))
2885 					goto error0;
2886 				break;
2887 			}
2888 
2889 			trace_xfs_alloc_vextent_loopfailed(args);
2890 
2891 			/*
2892 			 * Didn't work, figure out the next iteration.
2893 			 */
2894 			if (args->agno == sagno &&
2895 			    type == XFS_ALLOCTYPE_START_BNO)
2896 				args->type = XFS_ALLOCTYPE_THIS_AG;
2897 			/*
2898 			* For the first allocation, we can try any AG to get
2899 			* space.  However, if we already have allocated a
2900 			* block, we don't want to try AGs whose number is below
2901 			* sagno. Otherwise, we may end up with out-of-order
2902 			* locking of AGF, which might cause deadlock.
2903 			*/
2904 			if (++(args->agno) == mp->m_sb.sb_agcount) {
2905 				if (args->tp->t_firstblock != NULLFSBLOCK)
2906 					args->agno = sagno;
2907 				else
2908 					args->agno = 0;
2909 			}
2910 			/*
2911 			 * Reached the starting a.g., must either be done
2912 			 * or switch to non-trylock mode.
2913 			 */
2914 			if (args->agno == sagno) {
2915 				if (flags == 0) {
2916 					args->agbno = NULLAGBLOCK;
2917 					trace_xfs_alloc_vextent_allfailed(args);
2918 					break;
2919 				}
2920 
2921 				flags = 0;
2922 				if (type == XFS_ALLOCTYPE_START_BNO) {
2923 					args->agbno = XFS_FSB_TO_AGBNO(mp,
2924 						args->fsbno);
2925 					args->type = XFS_ALLOCTYPE_NEAR_BNO;
2926 				}
2927 			}
2928 			xfs_perag_put(args->pag);
2929 		}
2930 		if (bump_rotor) {
2931 			if (args->agno == sagno)
2932 				mp->m_agfrotor = (mp->m_agfrotor + 1) %
2933 					(mp->m_sb.sb_agcount * rotorstep);
2934 			else
2935 				mp->m_agfrotor = (args->agno * rotorstep + 1) %
2936 					(mp->m_sb.sb_agcount * rotorstep);
2937 		}
2938 		break;
2939 	default:
2940 		ASSERT(0);
2941 		/* NOTREACHED */
2942 	}
2943 	if (args->agbno == NULLAGBLOCK)
2944 		args->fsbno = NULLFSBLOCK;
2945 	else {
2946 		args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
2947 #ifdef DEBUG
2948 		ASSERT(args->len >= args->minlen);
2949 		ASSERT(args->len <= args->maxlen);
2950 		ASSERT(args->agbno % args->alignment == 0);
2951 		XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
2952 			args->len);
2953 #endif
2954 
2955 		/* Zero the extent if we were asked to do so */
2956 		if (args->datatype & XFS_ALLOC_USERDATA_ZERO) {
2957 			error = xfs_zero_extent(args->ip, args->fsbno, args->len);
2958 			if (error)
2959 				goto error0;
2960 		}
2961 
2962 	}
2963 	xfs_perag_put(args->pag);
2964 	return 0;
2965 error0:
2966 	xfs_perag_put(args->pag);
2967 	return error;
2968 }
2969 
2970 /* Ensure that the freelist is at full capacity. */
2971 int
2972 xfs_free_extent_fix_freelist(
2973 	struct xfs_trans	*tp,
2974 	xfs_agnumber_t		agno,
2975 	struct xfs_buf		**agbp)
2976 {
2977 	struct xfs_alloc_arg	args;
2978 	int			error;
2979 
2980 	memset(&args, 0, sizeof(struct xfs_alloc_arg));
2981 	args.tp = tp;
2982 	args.mp = tp->t_mountp;
2983 	args.agno = agno;
2984 
2985 	/*
2986 	 * validate that the block number is legal - the enables us to detect
2987 	 * and handle a silent filesystem corruption rather than crashing.
2988 	 */
2989 	if (args.agno >= args.mp->m_sb.sb_agcount)
2990 		return -EFSCORRUPTED;
2991 
2992 	args.pag = xfs_perag_get(args.mp, args.agno);
2993 	ASSERT(args.pag);
2994 
2995 	error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
2996 	if (error)
2997 		goto out;
2998 
2999 	*agbp = args.agbp;
3000 out:
3001 	xfs_perag_put(args.pag);
3002 	return error;
3003 }
3004 
3005 /*
3006  * Free an extent.
3007  * Just break up the extent address and hand off to xfs_free_ag_extent
3008  * after fixing up the freelist.
3009  */
3010 int
3011 __xfs_free_extent(
3012 	struct xfs_trans		*tp,
3013 	xfs_fsblock_t			bno,
3014 	xfs_extlen_t			len,
3015 	const struct xfs_owner_info	*oinfo,
3016 	enum xfs_ag_resv_type		type,
3017 	bool				skip_discard)
3018 {
3019 	struct xfs_mount		*mp = tp->t_mountp;
3020 	struct xfs_buf			*agbp;
3021 	xfs_agnumber_t			agno = XFS_FSB_TO_AGNO(mp, bno);
3022 	xfs_agblock_t			agbno = XFS_FSB_TO_AGBNO(mp, bno);
3023 	int				error;
3024 	unsigned int			busy_flags = 0;
3025 
3026 	ASSERT(len != 0);
3027 	ASSERT(type != XFS_AG_RESV_AGFL);
3028 
3029 	if (XFS_TEST_ERROR(false, mp,
3030 			XFS_ERRTAG_FREE_EXTENT))
3031 		return -EIO;
3032 
3033 	error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
3034 	if (error)
3035 		return error;
3036 
3037 	XFS_WANT_CORRUPTED_GOTO(mp, agbno < mp->m_sb.sb_agblocks, err);
3038 
3039 	/* validate the extent size is legal now we have the agf locked */
3040 	XFS_WANT_CORRUPTED_GOTO(mp,
3041 		agbno + len <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length),
3042 				err);
3043 
3044 	error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type);
3045 	if (error)
3046 		goto err;
3047 
3048 	if (skip_discard)
3049 		busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD;
3050 	xfs_extent_busy_insert(tp, agno, agbno, len, busy_flags);
3051 	return 0;
3052 
3053 err:
3054 	xfs_trans_brelse(tp, agbp);
3055 	return error;
3056 }
3057 
3058 struct xfs_alloc_query_range_info {
3059 	xfs_alloc_query_range_fn	fn;
3060 	void				*priv;
3061 };
3062 
3063 /* Format btree record and pass to our callback. */
3064 STATIC int
3065 xfs_alloc_query_range_helper(
3066 	struct xfs_btree_cur		*cur,
3067 	union xfs_btree_rec		*rec,
3068 	void				*priv)
3069 {
3070 	struct xfs_alloc_query_range_info	*query = priv;
3071 	struct xfs_alloc_rec_incore		irec;
3072 
3073 	irec.ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
3074 	irec.ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
3075 	return query->fn(cur, &irec, query->priv);
3076 }
3077 
3078 /* Find all free space within a given range of blocks. */
3079 int
3080 xfs_alloc_query_range(
3081 	struct xfs_btree_cur			*cur,
3082 	struct xfs_alloc_rec_incore		*low_rec,
3083 	struct xfs_alloc_rec_incore		*high_rec,
3084 	xfs_alloc_query_range_fn		fn,
3085 	void					*priv)
3086 {
3087 	union xfs_btree_irec			low_brec;
3088 	union xfs_btree_irec			high_brec;
3089 	struct xfs_alloc_query_range_info	query;
3090 
3091 	ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3092 	low_brec.a = *low_rec;
3093 	high_brec.a = *high_rec;
3094 	query.priv = priv;
3095 	query.fn = fn;
3096 	return xfs_btree_query_range(cur, &low_brec, &high_brec,
3097 			xfs_alloc_query_range_helper, &query);
3098 }
3099 
3100 /* Find all free space records. */
3101 int
3102 xfs_alloc_query_all(
3103 	struct xfs_btree_cur			*cur,
3104 	xfs_alloc_query_range_fn		fn,
3105 	void					*priv)
3106 {
3107 	struct xfs_alloc_query_range_info	query;
3108 
3109 	ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3110 	query.priv = priv;
3111 	query.fn = fn;
3112 	return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
3113 }
3114 
3115 /* Is there a record covering a given extent? */
3116 int
3117 xfs_alloc_has_record(
3118 	struct xfs_btree_cur	*cur,
3119 	xfs_agblock_t		bno,
3120 	xfs_extlen_t		len,
3121 	bool			*exists)
3122 {
3123 	union xfs_btree_irec	low;
3124 	union xfs_btree_irec	high;
3125 
3126 	memset(&low, 0, sizeof(low));
3127 	low.a.ar_startblock = bno;
3128 	memset(&high, 0xFF, sizeof(high));
3129 	high.a.ar_startblock = bno + len - 1;
3130 
3131 	return xfs_btree_has_record(cur, &low, &high, exists);
3132 }
3133 
3134 /*
3135  * Walk all the blocks in the AGFL.  The @walk_fn can return any negative
3136  * error code or XFS_BTREE_QUERY_RANGE_ABORT.
3137  */
3138 int
3139 xfs_agfl_walk(
3140 	struct xfs_mount	*mp,
3141 	struct xfs_agf		*agf,
3142 	struct xfs_buf		*agflbp,
3143 	xfs_agfl_walk_fn	walk_fn,
3144 	void			*priv)
3145 {
3146 	__be32			*agfl_bno;
3147 	unsigned int		i;
3148 	int			error;
3149 
3150 	agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
3151 	i = be32_to_cpu(agf->agf_flfirst);
3152 
3153 	/* Nothing to walk in an empty AGFL. */
3154 	if (agf->agf_flcount == cpu_to_be32(0))
3155 		return 0;
3156 
3157 	/* Otherwise, walk from first to last, wrapping as needed. */
3158 	for (;;) {
3159 		error = walk_fn(mp, be32_to_cpu(agfl_bno[i]), priv);
3160 		if (error)
3161 			return error;
3162 		if (i == be32_to_cpu(agf->agf_fllast))
3163 			break;
3164 		if (++i == xfs_agfl_size(mp))
3165 			i = 0;
3166 	}
3167 
3168 	return 0;
3169 }
3170