xref: /openbmc/linux/fs/xfs/libxfs/xfs_rmap_btree.c (revision ddc141e5)
1 /*
2  * Copyright (c) 2014 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_sb.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_inode.h"
29 #include "xfs_trans.h"
30 #include "xfs_alloc.h"
31 #include "xfs_btree.h"
32 #include "xfs_rmap.h"
33 #include "xfs_rmap_btree.h"
34 #include "xfs_trace.h"
35 #include "xfs_cksum.h"
36 #include "xfs_error.h"
37 #include "xfs_extent_busy.h"
38 #include "xfs_ag_resv.h"
39 
40 /*
41  * Reverse map btree.
42  *
43  * This is a per-ag tree used to track the owner(s) of a given extent. With
44  * reflink it is possible for there to be multiple owners, which is a departure
45  * from classic XFS. Owner records for data extents are inserted when the
46  * extent is mapped and removed when an extent is unmapped.  Owner records for
47  * all other block types (i.e. metadata) are inserted when an extent is
48  * allocated and removed when an extent is freed. There can only be one owner
49  * of a metadata extent, usually an inode or some other metadata structure like
50  * an AG btree.
51  *
52  * The rmap btree is part of the free space management, so blocks for the tree
53  * are sourced from the agfl. Hence we need transaction reservation support for
54  * this tree so that the freelist is always large enough. This also impacts on
55  * the minimum space we need to leave free in the AG.
56  *
57  * The tree is ordered by [ag block, owner, offset]. This is a large key size,
58  * but it is the only way to enforce unique keys when a block can be owned by
59  * multiple files at any offset. There's no need to order/search by extent
60  * size for online updating/management of the tree. It is intended that most
61  * reverse lookups will be to find the owner(s) of a particular block, or to
62  * try to recover tree and file data from corrupt primary metadata.
63  */
64 
65 static struct xfs_btree_cur *
66 xfs_rmapbt_dup_cursor(
67 	struct xfs_btree_cur	*cur)
68 {
69 	return xfs_rmapbt_init_cursor(cur->bc_mp, cur->bc_tp,
70 			cur->bc_private.a.agbp, cur->bc_private.a.agno);
71 }
72 
73 STATIC void
74 xfs_rmapbt_set_root(
75 	struct xfs_btree_cur	*cur,
76 	union xfs_btree_ptr	*ptr,
77 	int			inc)
78 {
79 	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
80 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
81 	xfs_agnumber_t		seqno = be32_to_cpu(agf->agf_seqno);
82 	int			btnum = cur->bc_btnum;
83 	struct xfs_perag	*pag = xfs_perag_get(cur->bc_mp, seqno);
84 
85 	ASSERT(ptr->s != 0);
86 
87 	agf->agf_roots[btnum] = ptr->s;
88 	be32_add_cpu(&agf->agf_levels[btnum], inc);
89 	pag->pagf_levels[btnum] += inc;
90 	xfs_perag_put(pag);
91 
92 	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
93 }
94 
95 STATIC int
96 xfs_rmapbt_alloc_block(
97 	struct xfs_btree_cur	*cur,
98 	union xfs_btree_ptr	*start,
99 	union xfs_btree_ptr	*new,
100 	int			*stat)
101 {
102 	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
103 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
104 	int			error;
105 	xfs_agblock_t		bno;
106 
107 	XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
108 
109 	/* Allocate the new block from the freelist. If we can't, give up.  */
110 	error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
111 				       &bno, 1);
112 	if (error) {
113 		XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
114 		return error;
115 	}
116 
117 	trace_xfs_rmapbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno,
118 			bno, 1);
119 	if (bno == NULLAGBLOCK) {
120 		XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
121 		*stat = 0;
122 		return 0;
123 	}
124 
125 	xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1,
126 			false);
127 
128 	xfs_trans_agbtree_delta(cur->bc_tp, 1);
129 	new->s = cpu_to_be32(bno);
130 	be32_add_cpu(&agf->agf_rmap_blocks, 1);
131 	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
132 
133 	XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
134 	*stat = 1;
135 	return 0;
136 }
137 
138 STATIC int
139 xfs_rmapbt_free_block(
140 	struct xfs_btree_cur	*cur,
141 	struct xfs_buf		*bp)
142 {
143 	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
144 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
145 	xfs_agblock_t		bno;
146 	int			error;
147 
148 	bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
149 	trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
150 			bno, 1);
151 	be32_add_cpu(&agf->agf_rmap_blocks, -1);
152 	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
153 	error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
154 	if (error)
155 		return error;
156 
157 	xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
158 			      XFS_EXTENT_BUSY_SKIP_DISCARD);
159 	xfs_trans_agbtree_delta(cur->bc_tp, -1);
160 
161 	return 0;
162 }
163 
164 STATIC int
165 xfs_rmapbt_get_minrecs(
166 	struct xfs_btree_cur	*cur,
167 	int			level)
168 {
169 	return cur->bc_mp->m_rmap_mnr[level != 0];
170 }
171 
172 STATIC int
173 xfs_rmapbt_get_maxrecs(
174 	struct xfs_btree_cur	*cur,
175 	int			level)
176 {
177 	return cur->bc_mp->m_rmap_mxr[level != 0];
178 }
179 
180 STATIC void
181 xfs_rmapbt_init_key_from_rec(
182 	union xfs_btree_key	*key,
183 	union xfs_btree_rec	*rec)
184 {
185 	key->rmap.rm_startblock = rec->rmap.rm_startblock;
186 	key->rmap.rm_owner = rec->rmap.rm_owner;
187 	key->rmap.rm_offset = rec->rmap.rm_offset;
188 }
189 
190 /*
191  * The high key for a reverse mapping record can be computed by shifting
192  * the startblock and offset to the highest value that would still map
193  * to that record.  In practice this means that we add blockcount-1 to
194  * the startblock for all records, and if the record is for a data/attr
195  * fork mapping, we add blockcount-1 to the offset too.
196  */
197 STATIC void
198 xfs_rmapbt_init_high_key_from_rec(
199 	union xfs_btree_key	*key,
200 	union xfs_btree_rec	*rec)
201 {
202 	uint64_t		off;
203 	int			adj;
204 
205 	adj = be32_to_cpu(rec->rmap.rm_blockcount) - 1;
206 
207 	key->rmap.rm_startblock = rec->rmap.rm_startblock;
208 	be32_add_cpu(&key->rmap.rm_startblock, adj);
209 	key->rmap.rm_owner = rec->rmap.rm_owner;
210 	key->rmap.rm_offset = rec->rmap.rm_offset;
211 	if (XFS_RMAP_NON_INODE_OWNER(be64_to_cpu(rec->rmap.rm_owner)) ||
212 	    XFS_RMAP_IS_BMBT_BLOCK(be64_to_cpu(rec->rmap.rm_offset)))
213 		return;
214 	off = be64_to_cpu(key->rmap.rm_offset);
215 	off = (XFS_RMAP_OFF(off) + adj) | (off & ~XFS_RMAP_OFF_MASK);
216 	key->rmap.rm_offset = cpu_to_be64(off);
217 }
218 
219 STATIC void
220 xfs_rmapbt_init_rec_from_cur(
221 	struct xfs_btree_cur	*cur,
222 	union xfs_btree_rec	*rec)
223 {
224 	rec->rmap.rm_startblock = cpu_to_be32(cur->bc_rec.r.rm_startblock);
225 	rec->rmap.rm_blockcount = cpu_to_be32(cur->bc_rec.r.rm_blockcount);
226 	rec->rmap.rm_owner = cpu_to_be64(cur->bc_rec.r.rm_owner);
227 	rec->rmap.rm_offset = cpu_to_be64(
228 			xfs_rmap_irec_offset_pack(&cur->bc_rec.r));
229 }
230 
231 STATIC void
232 xfs_rmapbt_init_ptr_from_cur(
233 	struct xfs_btree_cur	*cur,
234 	union xfs_btree_ptr	*ptr)
235 {
236 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
237 
238 	ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
239 	ASSERT(agf->agf_roots[cur->bc_btnum] != 0);
240 
241 	ptr->s = agf->agf_roots[cur->bc_btnum];
242 }
243 
244 STATIC int64_t
245 xfs_rmapbt_key_diff(
246 	struct xfs_btree_cur	*cur,
247 	union xfs_btree_key	*key)
248 {
249 	struct xfs_rmap_irec	*rec = &cur->bc_rec.r;
250 	struct xfs_rmap_key	*kp = &key->rmap;
251 	__u64			x, y;
252 	int64_t			d;
253 
254 	d = (int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock;
255 	if (d)
256 		return d;
257 
258 	x = be64_to_cpu(kp->rm_owner);
259 	y = rec->rm_owner;
260 	if (x > y)
261 		return 1;
262 	else if (y > x)
263 		return -1;
264 
265 	x = XFS_RMAP_OFF(be64_to_cpu(kp->rm_offset));
266 	y = rec->rm_offset;
267 	if (x > y)
268 		return 1;
269 	else if (y > x)
270 		return -1;
271 	return 0;
272 }
273 
274 STATIC int64_t
275 xfs_rmapbt_diff_two_keys(
276 	struct xfs_btree_cur	*cur,
277 	union xfs_btree_key	*k1,
278 	union xfs_btree_key	*k2)
279 {
280 	struct xfs_rmap_key	*kp1 = &k1->rmap;
281 	struct xfs_rmap_key	*kp2 = &k2->rmap;
282 	int64_t			d;
283 	__u64			x, y;
284 
285 	d = (int64_t)be32_to_cpu(kp1->rm_startblock) -
286 		       be32_to_cpu(kp2->rm_startblock);
287 	if (d)
288 		return d;
289 
290 	x = be64_to_cpu(kp1->rm_owner);
291 	y = be64_to_cpu(kp2->rm_owner);
292 	if (x > y)
293 		return 1;
294 	else if (y > x)
295 		return -1;
296 
297 	x = XFS_RMAP_OFF(be64_to_cpu(kp1->rm_offset));
298 	y = XFS_RMAP_OFF(be64_to_cpu(kp2->rm_offset));
299 	if (x > y)
300 		return 1;
301 	else if (y > x)
302 		return -1;
303 	return 0;
304 }
305 
306 static xfs_failaddr_t
307 xfs_rmapbt_verify(
308 	struct xfs_buf		*bp)
309 {
310 	struct xfs_mount	*mp = bp->b_target->bt_mount;
311 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
312 	struct xfs_perag	*pag = bp->b_pag;
313 	xfs_failaddr_t		fa;
314 	unsigned int		level;
315 
316 	/*
317 	 * magic number and level verification
318 	 *
319 	 * During growfs operations, we can't verify the exact level or owner as
320 	 * the perag is not fully initialised and hence not attached to the
321 	 * buffer.  In this case, check against the maximum tree depth.
322 	 *
323 	 * Similarly, during log recovery we will have a perag structure
324 	 * attached, but the agf information will not yet have been initialised
325 	 * from the on disk AGF. Again, we can only check against maximum limits
326 	 * in this case.
327 	 */
328 	if (block->bb_magic != cpu_to_be32(XFS_RMAP_CRC_MAGIC))
329 		return __this_address;
330 
331 	if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
332 		return __this_address;
333 	fa = xfs_btree_sblock_v5hdr_verify(bp);
334 	if (fa)
335 		return fa;
336 
337 	level = be16_to_cpu(block->bb_level);
338 	if (pag && pag->pagf_init) {
339 		if (level >= pag->pagf_levels[XFS_BTNUM_RMAPi])
340 			return __this_address;
341 	} else if (level >= mp->m_rmap_maxlevels)
342 		return __this_address;
343 
344 	return xfs_btree_sblock_verify(bp, mp->m_rmap_mxr[level != 0]);
345 }
346 
347 static void
348 xfs_rmapbt_read_verify(
349 	struct xfs_buf	*bp)
350 {
351 	xfs_failaddr_t	fa;
352 
353 	if (!xfs_btree_sblock_verify_crc(bp))
354 		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
355 	else {
356 		fa = xfs_rmapbt_verify(bp);
357 		if (fa)
358 			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
359 	}
360 
361 	if (bp->b_error)
362 		trace_xfs_btree_corrupt(bp, _RET_IP_);
363 }
364 
365 static void
366 xfs_rmapbt_write_verify(
367 	struct xfs_buf	*bp)
368 {
369 	xfs_failaddr_t	fa;
370 
371 	fa = xfs_rmapbt_verify(bp);
372 	if (fa) {
373 		trace_xfs_btree_corrupt(bp, _RET_IP_);
374 		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
375 		return;
376 	}
377 	xfs_btree_sblock_calc_crc(bp);
378 
379 }
380 
381 const struct xfs_buf_ops xfs_rmapbt_buf_ops = {
382 	.name			= "xfs_rmapbt",
383 	.verify_read		= xfs_rmapbt_read_verify,
384 	.verify_write		= xfs_rmapbt_write_verify,
385 	.verify_struct		= xfs_rmapbt_verify,
386 };
387 
388 STATIC int
389 xfs_rmapbt_keys_inorder(
390 	struct xfs_btree_cur	*cur,
391 	union xfs_btree_key	*k1,
392 	union xfs_btree_key	*k2)
393 {
394 	uint32_t		x;
395 	uint32_t		y;
396 	uint64_t		a;
397 	uint64_t		b;
398 
399 	x = be32_to_cpu(k1->rmap.rm_startblock);
400 	y = be32_to_cpu(k2->rmap.rm_startblock);
401 	if (x < y)
402 		return 1;
403 	else if (x > y)
404 		return 0;
405 	a = be64_to_cpu(k1->rmap.rm_owner);
406 	b = be64_to_cpu(k2->rmap.rm_owner);
407 	if (a < b)
408 		return 1;
409 	else if (a > b)
410 		return 0;
411 	a = XFS_RMAP_OFF(be64_to_cpu(k1->rmap.rm_offset));
412 	b = XFS_RMAP_OFF(be64_to_cpu(k2->rmap.rm_offset));
413 	if (a <= b)
414 		return 1;
415 	return 0;
416 }
417 
418 STATIC int
419 xfs_rmapbt_recs_inorder(
420 	struct xfs_btree_cur	*cur,
421 	union xfs_btree_rec	*r1,
422 	union xfs_btree_rec	*r2)
423 {
424 	uint32_t		x;
425 	uint32_t		y;
426 	uint64_t		a;
427 	uint64_t		b;
428 
429 	x = be32_to_cpu(r1->rmap.rm_startblock);
430 	y = be32_to_cpu(r2->rmap.rm_startblock);
431 	if (x < y)
432 		return 1;
433 	else if (x > y)
434 		return 0;
435 	a = be64_to_cpu(r1->rmap.rm_owner);
436 	b = be64_to_cpu(r2->rmap.rm_owner);
437 	if (a < b)
438 		return 1;
439 	else if (a > b)
440 		return 0;
441 	a = XFS_RMAP_OFF(be64_to_cpu(r1->rmap.rm_offset));
442 	b = XFS_RMAP_OFF(be64_to_cpu(r2->rmap.rm_offset));
443 	if (a <= b)
444 		return 1;
445 	return 0;
446 }
447 
448 static const struct xfs_btree_ops xfs_rmapbt_ops = {
449 	.rec_len		= sizeof(struct xfs_rmap_rec),
450 	.key_len		= 2 * sizeof(struct xfs_rmap_key),
451 
452 	.dup_cursor		= xfs_rmapbt_dup_cursor,
453 	.set_root		= xfs_rmapbt_set_root,
454 	.alloc_block		= xfs_rmapbt_alloc_block,
455 	.free_block		= xfs_rmapbt_free_block,
456 	.get_minrecs		= xfs_rmapbt_get_minrecs,
457 	.get_maxrecs		= xfs_rmapbt_get_maxrecs,
458 	.init_key_from_rec	= xfs_rmapbt_init_key_from_rec,
459 	.init_high_key_from_rec	= xfs_rmapbt_init_high_key_from_rec,
460 	.init_rec_from_cur	= xfs_rmapbt_init_rec_from_cur,
461 	.init_ptr_from_cur	= xfs_rmapbt_init_ptr_from_cur,
462 	.key_diff		= xfs_rmapbt_key_diff,
463 	.buf_ops		= &xfs_rmapbt_buf_ops,
464 	.diff_two_keys		= xfs_rmapbt_diff_two_keys,
465 	.keys_inorder		= xfs_rmapbt_keys_inorder,
466 	.recs_inorder		= xfs_rmapbt_recs_inorder,
467 };
468 
469 /*
470  * Allocate a new allocation btree cursor.
471  */
472 struct xfs_btree_cur *
473 xfs_rmapbt_init_cursor(
474 	struct xfs_mount	*mp,
475 	struct xfs_trans	*tp,
476 	struct xfs_buf		*agbp,
477 	xfs_agnumber_t		agno)
478 {
479 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
480 	struct xfs_btree_cur	*cur;
481 
482 	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
483 	cur->bc_tp = tp;
484 	cur->bc_mp = mp;
485 	/* Overlapping btree; 2 keys per pointer. */
486 	cur->bc_btnum = XFS_BTNUM_RMAP;
487 	cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
488 	cur->bc_blocklog = mp->m_sb.sb_blocklog;
489 	cur->bc_ops = &xfs_rmapbt_ops;
490 	cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
491 	cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
492 
493 	cur->bc_private.a.agbp = agbp;
494 	cur->bc_private.a.agno = agno;
495 
496 	return cur;
497 }
498 
499 /*
500  * Calculate number of records in an rmap btree block.
501  */
502 int
503 xfs_rmapbt_maxrecs(
504 	struct xfs_mount	*mp,
505 	int			blocklen,
506 	int			leaf)
507 {
508 	blocklen -= XFS_RMAP_BLOCK_LEN;
509 
510 	if (leaf)
511 		return blocklen / sizeof(struct xfs_rmap_rec);
512 	return blocklen /
513 		(2 * sizeof(struct xfs_rmap_key) + sizeof(xfs_rmap_ptr_t));
514 }
515 
516 /* Compute the maximum height of an rmap btree. */
517 void
518 xfs_rmapbt_compute_maxlevels(
519 	struct xfs_mount		*mp)
520 {
521 	/*
522 	 * On a non-reflink filesystem, the maximum number of rmap
523 	 * records is the number of blocks in the AG, hence the max
524 	 * rmapbt height is log_$maxrecs($agblocks).  However, with
525 	 * reflink each AG block can have up to 2^32 (per the refcount
526 	 * record format) owners, which means that theoretically we
527 	 * could face up to 2^64 rmap records.
528 	 *
529 	 * That effectively means that the max rmapbt height must be
530 	 * XFS_BTREE_MAXLEVELS.  "Fortunately" we'll run out of AG
531 	 * blocks to feed the rmapbt long before the rmapbt reaches
532 	 * maximum height.  The reflink code uses ag_resv_critical to
533 	 * disallow reflinking when less than 10% of the per-AG metadata
534 	 * block reservation since the fallback is a regular file copy.
535 	 */
536 	if (xfs_sb_version_hasreflink(&mp->m_sb))
537 		mp->m_rmap_maxlevels = XFS_BTREE_MAXLEVELS;
538 	else
539 		mp->m_rmap_maxlevels = xfs_btree_compute_maxlevels(mp,
540 				mp->m_rmap_mnr, mp->m_sb.sb_agblocks);
541 }
542 
543 /* Calculate the refcount btree size for some records. */
544 xfs_extlen_t
545 xfs_rmapbt_calc_size(
546 	struct xfs_mount	*mp,
547 	unsigned long long	len)
548 {
549 	return xfs_btree_calc_size(mp, mp->m_rmap_mnr, len);
550 }
551 
552 /*
553  * Calculate the maximum refcount btree size.
554  */
555 xfs_extlen_t
556 xfs_rmapbt_max_size(
557 	struct xfs_mount	*mp,
558 	xfs_agblock_t		agblocks)
559 {
560 	/* Bail out if we're uninitialized, which can happen in mkfs. */
561 	if (mp->m_rmap_mxr[0] == 0)
562 		return 0;
563 
564 	return xfs_rmapbt_calc_size(mp, agblocks);
565 }
566 
567 /*
568  * Figure out how many blocks to reserve and how many are used by this btree.
569  */
570 int
571 xfs_rmapbt_calc_reserves(
572 	struct xfs_mount	*mp,
573 	xfs_agnumber_t		agno,
574 	xfs_extlen_t		*ask,
575 	xfs_extlen_t		*used)
576 {
577 	struct xfs_buf		*agbp;
578 	struct xfs_agf		*agf;
579 	xfs_agblock_t		agblocks;
580 	xfs_extlen_t		tree_len;
581 	int			error;
582 
583 	if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
584 		return 0;
585 
586 	error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
587 	if (error)
588 		return error;
589 
590 	agf = XFS_BUF_TO_AGF(agbp);
591 	agblocks = be32_to_cpu(agf->agf_length);
592 	tree_len = be32_to_cpu(agf->agf_rmap_blocks);
593 	xfs_buf_relse(agbp);
594 
595 	/* Reserve 1% of the AG or enough for 1 block per record. */
596 	*ask += max(agblocks / 100, xfs_rmapbt_max_size(mp, agblocks));
597 	*used += tree_len;
598 
599 	return error;
600 }
601