1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2016 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_btree.h"
15 #include "xfs_btree_staging.h"
16 #include "xfs_refcount_btree.h"
17 #include "xfs_alloc.h"
18 #include "xfs_error.h"
19 #include "xfs_trace.h"
20 #include "xfs_trans.h"
21 #include "xfs_bit.h"
22 #include "xfs_rmap.h"
23 
24 static struct xfs_btree_cur *
25 xfs_refcountbt_dup_cursor(
26 	struct xfs_btree_cur	*cur)
27 {
28 	return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
29 			cur->bc_ag.agbp, cur->bc_ag.agno);
30 }
31 
32 STATIC void
33 xfs_refcountbt_set_root(
34 	struct xfs_btree_cur	*cur,
35 	union xfs_btree_ptr	*ptr,
36 	int			inc)
37 {
38 	struct xfs_buf		*agbp = cur->bc_ag.agbp;
39 	struct xfs_agf		*agf = agbp->b_addr;
40 	xfs_agnumber_t		seqno = be32_to_cpu(agf->agf_seqno);
41 	struct xfs_perag	*pag = xfs_perag_get(cur->bc_mp, seqno);
42 
43 	ASSERT(ptr->s != 0);
44 
45 	agf->agf_refcount_root = ptr->s;
46 	be32_add_cpu(&agf->agf_refcount_level, inc);
47 	pag->pagf_refcount_level += inc;
48 	xfs_perag_put(pag);
49 
50 	xfs_alloc_log_agf(cur->bc_tp, agbp,
51 			XFS_AGF_REFCOUNT_ROOT | XFS_AGF_REFCOUNT_LEVEL);
52 }
53 
54 STATIC int
55 xfs_refcountbt_alloc_block(
56 	struct xfs_btree_cur	*cur,
57 	union xfs_btree_ptr	*start,
58 	union xfs_btree_ptr	*new,
59 	int			*stat)
60 {
61 	struct xfs_buf		*agbp = cur->bc_ag.agbp;
62 	struct xfs_agf		*agf = agbp->b_addr;
63 	struct xfs_alloc_arg	args;		/* block allocation args */
64 	int			error;		/* error return value */
65 
66 	memset(&args, 0, sizeof(args));
67 	args.tp = cur->bc_tp;
68 	args.mp = cur->bc_mp;
69 	args.type = XFS_ALLOCTYPE_NEAR_BNO;
70 	args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.agno,
71 			xfs_refc_block(args.mp));
72 	args.oinfo = XFS_RMAP_OINFO_REFC;
73 	args.minlen = args.maxlen = args.prod = 1;
74 	args.resv = XFS_AG_RESV_METADATA;
75 
76 	error = xfs_alloc_vextent(&args);
77 	if (error)
78 		goto out_error;
79 	trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_ag.agno,
80 			args.agbno, 1);
81 	if (args.fsbno == NULLFSBLOCK) {
82 		*stat = 0;
83 		return 0;
84 	}
85 	ASSERT(args.agno == cur->bc_ag.agno);
86 	ASSERT(args.len == 1);
87 
88 	new->s = cpu_to_be32(args.agbno);
89 	be32_add_cpu(&agf->agf_refcount_blocks, 1);
90 	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
91 
92 	*stat = 1;
93 	return 0;
94 
95 out_error:
96 	return error;
97 }
98 
99 STATIC int
100 xfs_refcountbt_free_block(
101 	struct xfs_btree_cur	*cur,
102 	struct xfs_buf		*bp)
103 {
104 	struct xfs_mount	*mp = cur->bc_mp;
105 	struct xfs_buf		*agbp = cur->bc_ag.agbp;
106 	struct xfs_agf		*agf = agbp->b_addr;
107 	xfs_fsblock_t		fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
108 	int			error;
109 
110 	trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_ag.agno,
111 			XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
112 	be32_add_cpu(&agf->agf_refcount_blocks, -1);
113 	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
114 	error = xfs_free_extent(cur->bc_tp, fsbno, 1, &XFS_RMAP_OINFO_REFC,
115 			XFS_AG_RESV_METADATA);
116 	if (error)
117 		return error;
118 
119 	return error;
120 }
121 
122 STATIC int
123 xfs_refcountbt_get_minrecs(
124 	struct xfs_btree_cur	*cur,
125 	int			level)
126 {
127 	return cur->bc_mp->m_refc_mnr[level != 0];
128 }
129 
130 STATIC int
131 xfs_refcountbt_get_maxrecs(
132 	struct xfs_btree_cur	*cur,
133 	int			level)
134 {
135 	return cur->bc_mp->m_refc_mxr[level != 0];
136 }
137 
138 STATIC void
139 xfs_refcountbt_init_key_from_rec(
140 	union xfs_btree_key	*key,
141 	union xfs_btree_rec	*rec)
142 {
143 	key->refc.rc_startblock = rec->refc.rc_startblock;
144 }
145 
146 STATIC void
147 xfs_refcountbt_init_high_key_from_rec(
148 	union xfs_btree_key	*key,
149 	union xfs_btree_rec	*rec)
150 {
151 	__u32			x;
152 
153 	x = be32_to_cpu(rec->refc.rc_startblock);
154 	x += be32_to_cpu(rec->refc.rc_blockcount) - 1;
155 	key->refc.rc_startblock = cpu_to_be32(x);
156 }
157 
158 STATIC void
159 xfs_refcountbt_init_rec_from_cur(
160 	struct xfs_btree_cur	*cur,
161 	union xfs_btree_rec	*rec)
162 {
163 	rec->refc.rc_startblock = cpu_to_be32(cur->bc_rec.rc.rc_startblock);
164 	rec->refc.rc_blockcount = cpu_to_be32(cur->bc_rec.rc.rc_blockcount);
165 	rec->refc.rc_refcount = cpu_to_be32(cur->bc_rec.rc.rc_refcount);
166 }
167 
168 STATIC void
169 xfs_refcountbt_init_ptr_from_cur(
170 	struct xfs_btree_cur	*cur,
171 	union xfs_btree_ptr	*ptr)
172 {
173 	struct xfs_agf		*agf = cur->bc_ag.agbp->b_addr;
174 
175 	ASSERT(cur->bc_ag.agno == be32_to_cpu(agf->agf_seqno));
176 
177 	ptr->s = agf->agf_refcount_root;
178 }
179 
180 STATIC int64_t
181 xfs_refcountbt_key_diff(
182 	struct xfs_btree_cur	*cur,
183 	union xfs_btree_key	*key)
184 {
185 	struct xfs_refcount_irec	*rec = &cur->bc_rec.rc;
186 	struct xfs_refcount_key		*kp = &key->refc;
187 
188 	return (int64_t)be32_to_cpu(kp->rc_startblock) - rec->rc_startblock;
189 }
190 
191 STATIC int64_t
192 xfs_refcountbt_diff_two_keys(
193 	struct xfs_btree_cur	*cur,
194 	union xfs_btree_key	*k1,
195 	union xfs_btree_key	*k2)
196 {
197 	return (int64_t)be32_to_cpu(k1->refc.rc_startblock) -
198 			  be32_to_cpu(k2->refc.rc_startblock);
199 }
200 
201 STATIC xfs_failaddr_t
202 xfs_refcountbt_verify(
203 	struct xfs_buf		*bp)
204 {
205 	struct xfs_mount	*mp = bp->b_mount;
206 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
207 	struct xfs_perag	*pag = bp->b_pag;
208 	xfs_failaddr_t		fa;
209 	unsigned int		level;
210 
211 	if (!xfs_verify_magic(bp, block->bb_magic))
212 		return __this_address;
213 
214 	if (!xfs_sb_version_hasreflink(&mp->m_sb))
215 		return __this_address;
216 	fa = xfs_btree_sblock_v5hdr_verify(bp);
217 	if (fa)
218 		return fa;
219 
220 	level = be16_to_cpu(block->bb_level);
221 	if (pag && pag->pagf_init) {
222 		if (level >= pag->pagf_refcount_level)
223 			return __this_address;
224 	} else if (level >= mp->m_refc_maxlevels)
225 		return __this_address;
226 
227 	return xfs_btree_sblock_verify(bp, mp->m_refc_mxr[level != 0]);
228 }
229 
230 STATIC void
231 xfs_refcountbt_read_verify(
232 	struct xfs_buf	*bp)
233 {
234 	xfs_failaddr_t	fa;
235 
236 	if (!xfs_btree_sblock_verify_crc(bp))
237 		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
238 	else {
239 		fa = xfs_refcountbt_verify(bp);
240 		if (fa)
241 			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
242 	}
243 
244 	if (bp->b_error)
245 		trace_xfs_btree_corrupt(bp, _RET_IP_);
246 }
247 
248 STATIC void
249 xfs_refcountbt_write_verify(
250 	struct xfs_buf	*bp)
251 {
252 	xfs_failaddr_t	fa;
253 
254 	fa = xfs_refcountbt_verify(bp);
255 	if (fa) {
256 		trace_xfs_btree_corrupt(bp, _RET_IP_);
257 		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
258 		return;
259 	}
260 	xfs_btree_sblock_calc_crc(bp);
261 
262 }
263 
264 const struct xfs_buf_ops xfs_refcountbt_buf_ops = {
265 	.name			= "xfs_refcountbt",
266 	.magic			= { 0, cpu_to_be32(XFS_REFC_CRC_MAGIC) },
267 	.verify_read		= xfs_refcountbt_read_verify,
268 	.verify_write		= xfs_refcountbt_write_verify,
269 	.verify_struct		= xfs_refcountbt_verify,
270 };
271 
272 STATIC int
273 xfs_refcountbt_keys_inorder(
274 	struct xfs_btree_cur	*cur,
275 	union xfs_btree_key	*k1,
276 	union xfs_btree_key	*k2)
277 {
278 	return be32_to_cpu(k1->refc.rc_startblock) <
279 	       be32_to_cpu(k2->refc.rc_startblock);
280 }
281 
282 STATIC int
283 xfs_refcountbt_recs_inorder(
284 	struct xfs_btree_cur	*cur,
285 	union xfs_btree_rec	*r1,
286 	union xfs_btree_rec	*r2)
287 {
288 	return  be32_to_cpu(r1->refc.rc_startblock) +
289 		be32_to_cpu(r1->refc.rc_blockcount) <=
290 		be32_to_cpu(r2->refc.rc_startblock);
291 }
292 
293 static const struct xfs_btree_ops xfs_refcountbt_ops = {
294 	.rec_len		= sizeof(struct xfs_refcount_rec),
295 	.key_len		= sizeof(struct xfs_refcount_key),
296 
297 	.dup_cursor		= xfs_refcountbt_dup_cursor,
298 	.set_root		= xfs_refcountbt_set_root,
299 	.alloc_block		= xfs_refcountbt_alloc_block,
300 	.free_block		= xfs_refcountbt_free_block,
301 	.get_minrecs		= xfs_refcountbt_get_minrecs,
302 	.get_maxrecs		= xfs_refcountbt_get_maxrecs,
303 	.init_key_from_rec	= xfs_refcountbt_init_key_from_rec,
304 	.init_high_key_from_rec	= xfs_refcountbt_init_high_key_from_rec,
305 	.init_rec_from_cur	= xfs_refcountbt_init_rec_from_cur,
306 	.init_ptr_from_cur	= xfs_refcountbt_init_ptr_from_cur,
307 	.key_diff		= xfs_refcountbt_key_diff,
308 	.buf_ops		= &xfs_refcountbt_buf_ops,
309 	.diff_two_keys		= xfs_refcountbt_diff_two_keys,
310 	.keys_inorder		= xfs_refcountbt_keys_inorder,
311 	.recs_inorder		= xfs_refcountbt_recs_inorder,
312 };
313 
314 /*
315  * Initialize a new refcount btree cursor.
316  */
317 static struct xfs_btree_cur *
318 xfs_refcountbt_init_common(
319 	struct xfs_mount	*mp,
320 	struct xfs_trans	*tp,
321 	xfs_agnumber_t		agno)
322 {
323 	struct xfs_btree_cur	*cur;
324 
325 	ASSERT(agno != NULLAGNUMBER);
326 	ASSERT(agno < mp->m_sb.sb_agcount);
327 
328 	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
329 	cur->bc_tp = tp;
330 	cur->bc_mp = mp;
331 	cur->bc_btnum = XFS_BTNUM_REFC;
332 	cur->bc_blocklog = mp->m_sb.sb_blocklog;
333 	cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
334 
335 	cur->bc_ag.agno = agno;
336 	cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
337 
338 	cur->bc_ag.refc.nr_ops = 0;
339 	cur->bc_ag.refc.shape_changes = 0;
340 	cur->bc_ops = &xfs_refcountbt_ops;
341 	return cur;
342 }
343 
344 /* Create a btree cursor. */
345 struct xfs_btree_cur *
346 xfs_refcountbt_init_cursor(
347 	struct xfs_mount	*mp,
348 	struct xfs_trans	*tp,
349 	struct xfs_buf		*agbp,
350 	xfs_agnumber_t		agno)
351 {
352 	struct xfs_agf		*agf = agbp->b_addr;
353 	struct xfs_btree_cur	*cur;
354 
355 	cur = xfs_refcountbt_init_common(mp, tp, agno);
356 	cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
357 	cur->bc_ag.agbp = agbp;
358 	return cur;
359 }
360 
361 /* Create a btree cursor with a fake root for staging. */
362 struct xfs_btree_cur *
363 xfs_refcountbt_stage_cursor(
364 	struct xfs_mount	*mp,
365 	struct xbtree_afakeroot	*afake,
366 	xfs_agnumber_t		agno)
367 {
368 	struct xfs_btree_cur	*cur;
369 
370 	cur = xfs_refcountbt_init_common(mp, NULL, agno);
371 	xfs_btree_stage_afakeroot(cur, afake);
372 	return cur;
373 }
374 
375 /*
376  * Swap in the new btree root.  Once we pass this point the newly rebuilt btree
377  * is in place and we have to kill off all the old btree blocks.
378  */
379 void
380 xfs_refcountbt_commit_staged_btree(
381 	struct xfs_btree_cur	*cur,
382 	struct xfs_trans	*tp,
383 	struct xfs_buf		*agbp)
384 {
385 	struct xfs_agf		*agf = agbp->b_addr;
386 	struct xbtree_afakeroot	*afake = cur->bc_ag.afake;
387 
388 	ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
389 
390 	agf->agf_refcount_root = cpu_to_be32(afake->af_root);
391 	agf->agf_refcount_level = cpu_to_be32(afake->af_levels);
392 	agf->agf_refcount_blocks = cpu_to_be32(afake->af_blocks);
393 	xfs_alloc_log_agf(tp, agbp, XFS_AGF_REFCOUNT_BLOCKS |
394 				    XFS_AGF_REFCOUNT_ROOT |
395 				    XFS_AGF_REFCOUNT_LEVEL);
396 	xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_refcountbt_ops);
397 }
398 
399 /*
400  * Calculate the number of records in a refcount btree block.
401  */
402 int
403 xfs_refcountbt_maxrecs(
404 	int			blocklen,
405 	bool			leaf)
406 {
407 	blocklen -= XFS_REFCOUNT_BLOCK_LEN;
408 
409 	if (leaf)
410 		return blocklen / sizeof(struct xfs_refcount_rec);
411 	return blocklen / (sizeof(struct xfs_refcount_key) +
412 			   sizeof(xfs_refcount_ptr_t));
413 }
414 
415 /* Compute the maximum height of a refcount btree. */
416 void
417 xfs_refcountbt_compute_maxlevels(
418 	struct xfs_mount		*mp)
419 {
420 	mp->m_refc_maxlevels = xfs_btree_compute_maxlevels(
421 			mp->m_refc_mnr, mp->m_sb.sb_agblocks);
422 }
423 
424 /* Calculate the refcount btree size for some records. */
425 xfs_extlen_t
426 xfs_refcountbt_calc_size(
427 	struct xfs_mount	*mp,
428 	unsigned long long	len)
429 {
430 	return xfs_btree_calc_size(mp->m_refc_mnr, len);
431 }
432 
433 /*
434  * Calculate the maximum refcount btree size.
435  */
436 xfs_extlen_t
437 xfs_refcountbt_max_size(
438 	struct xfs_mount	*mp,
439 	xfs_agblock_t		agblocks)
440 {
441 	/* Bail out if we're uninitialized, which can happen in mkfs. */
442 	if (mp->m_refc_mxr[0] == 0)
443 		return 0;
444 
445 	return xfs_refcountbt_calc_size(mp, agblocks);
446 }
447 
448 /*
449  * Figure out how many blocks to reserve and how many are used by this btree.
450  */
451 int
452 xfs_refcountbt_calc_reserves(
453 	struct xfs_mount	*mp,
454 	struct xfs_trans	*tp,
455 	xfs_agnumber_t		agno,
456 	xfs_extlen_t		*ask,
457 	xfs_extlen_t		*used)
458 {
459 	struct xfs_buf		*agbp;
460 	struct xfs_agf		*agf;
461 	xfs_agblock_t		agblocks;
462 	xfs_extlen_t		tree_len;
463 	int			error;
464 
465 	if (!xfs_sb_version_hasreflink(&mp->m_sb))
466 		return 0;
467 
468 
469 	error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
470 	if (error)
471 		return error;
472 
473 	agf = agbp->b_addr;
474 	agblocks = be32_to_cpu(agf->agf_length);
475 	tree_len = be32_to_cpu(agf->agf_refcount_blocks);
476 	xfs_trans_brelse(tp, agbp);
477 
478 	/*
479 	 * The log is permanently allocated, so the space it occupies will
480 	 * never be available for the kinds of things that would require btree
481 	 * expansion.  We therefore can pretend the space isn't there.
482 	 */
483 	if (mp->m_sb.sb_logstart &&
484 	    XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == agno)
485 		agblocks -= mp->m_sb.sb_logblocks;
486 
487 	*ask += xfs_refcountbt_max_size(mp, agblocks);
488 	*used += tree_len;
489 
490 	return error;
491 }
492