1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2016 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_btree.h"
15 #include "xfs_bmap.h"
16 #include "xfs_refcount_btree.h"
17 #include "xfs_alloc.h"
18 #include "xfs_error.h"
19 #include "xfs_trace.h"
20 #include "xfs_cksum.h"
21 #include "xfs_trans.h"
22 #include "xfs_bit.h"
23 #include "xfs_rmap.h"
24 
25 static struct xfs_btree_cur *
26 xfs_refcountbt_dup_cursor(
27 	struct xfs_btree_cur	*cur)
28 {
29 	return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
30 			cur->bc_private.a.agbp, cur->bc_private.a.agno);
31 }
32 
33 STATIC void
34 xfs_refcountbt_set_root(
35 	struct xfs_btree_cur	*cur,
36 	union xfs_btree_ptr	*ptr,
37 	int			inc)
38 {
39 	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
40 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
41 	xfs_agnumber_t		seqno = be32_to_cpu(agf->agf_seqno);
42 	struct xfs_perag	*pag = xfs_perag_get(cur->bc_mp, seqno);
43 
44 	ASSERT(ptr->s != 0);
45 
46 	agf->agf_refcount_root = ptr->s;
47 	be32_add_cpu(&agf->agf_refcount_level, inc);
48 	pag->pagf_refcount_level += inc;
49 	xfs_perag_put(pag);
50 
51 	xfs_alloc_log_agf(cur->bc_tp, agbp,
52 			XFS_AGF_REFCOUNT_ROOT | XFS_AGF_REFCOUNT_LEVEL);
53 }
54 
55 STATIC int
56 xfs_refcountbt_alloc_block(
57 	struct xfs_btree_cur	*cur,
58 	union xfs_btree_ptr	*start,
59 	union xfs_btree_ptr	*new,
60 	int			*stat)
61 {
62 	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
63 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
64 	struct xfs_alloc_arg	args;		/* block allocation args */
65 	int			error;		/* error return value */
66 
67 	memset(&args, 0, sizeof(args));
68 	args.tp = cur->bc_tp;
69 	args.mp = cur->bc_mp;
70 	args.type = XFS_ALLOCTYPE_NEAR_BNO;
71 	args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
72 			xfs_refc_block(args.mp));
73 	xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_REFC);
74 	args.minlen = args.maxlen = args.prod = 1;
75 	args.resv = XFS_AG_RESV_METADATA;
76 
77 	error = xfs_alloc_vextent(&args);
78 	if (error)
79 		goto out_error;
80 	trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno,
81 			args.agbno, 1);
82 	if (args.fsbno == NULLFSBLOCK) {
83 		*stat = 0;
84 		return 0;
85 	}
86 	ASSERT(args.agno == cur->bc_private.a.agno);
87 	ASSERT(args.len == 1);
88 
89 	new->s = cpu_to_be32(args.agbno);
90 	be32_add_cpu(&agf->agf_refcount_blocks, 1);
91 	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
92 
93 	*stat = 1;
94 	return 0;
95 
96 out_error:
97 	return error;
98 }
99 
100 STATIC int
101 xfs_refcountbt_free_block(
102 	struct xfs_btree_cur	*cur,
103 	struct xfs_buf		*bp)
104 {
105 	struct xfs_mount	*mp = cur->bc_mp;
106 	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
107 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
108 	xfs_fsblock_t		fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
109 	struct xfs_owner_info	oinfo;
110 	int			error;
111 
112 	trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
113 			XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
114 	xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_REFC);
115 	be32_add_cpu(&agf->agf_refcount_blocks, -1);
116 	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
117 	error = xfs_free_extent(cur->bc_tp, fsbno, 1, &oinfo,
118 			XFS_AG_RESV_METADATA);
119 	if (error)
120 		return error;
121 
122 	return error;
123 }
124 
125 STATIC int
126 xfs_refcountbt_get_minrecs(
127 	struct xfs_btree_cur	*cur,
128 	int			level)
129 {
130 	return cur->bc_mp->m_refc_mnr[level != 0];
131 }
132 
133 STATIC int
134 xfs_refcountbt_get_maxrecs(
135 	struct xfs_btree_cur	*cur,
136 	int			level)
137 {
138 	return cur->bc_mp->m_refc_mxr[level != 0];
139 }
140 
141 STATIC void
142 xfs_refcountbt_init_key_from_rec(
143 	union xfs_btree_key	*key,
144 	union xfs_btree_rec	*rec)
145 {
146 	key->refc.rc_startblock = rec->refc.rc_startblock;
147 }
148 
149 STATIC void
150 xfs_refcountbt_init_high_key_from_rec(
151 	union xfs_btree_key	*key,
152 	union xfs_btree_rec	*rec)
153 {
154 	__u32			x;
155 
156 	x = be32_to_cpu(rec->refc.rc_startblock);
157 	x += be32_to_cpu(rec->refc.rc_blockcount) - 1;
158 	key->refc.rc_startblock = cpu_to_be32(x);
159 }
160 
161 STATIC void
162 xfs_refcountbt_init_rec_from_cur(
163 	struct xfs_btree_cur	*cur,
164 	union xfs_btree_rec	*rec)
165 {
166 	rec->refc.rc_startblock = cpu_to_be32(cur->bc_rec.rc.rc_startblock);
167 	rec->refc.rc_blockcount = cpu_to_be32(cur->bc_rec.rc.rc_blockcount);
168 	rec->refc.rc_refcount = cpu_to_be32(cur->bc_rec.rc.rc_refcount);
169 }
170 
171 STATIC void
172 xfs_refcountbt_init_ptr_from_cur(
173 	struct xfs_btree_cur	*cur,
174 	union xfs_btree_ptr	*ptr)
175 {
176 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
177 
178 	ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
179 
180 	ptr->s = agf->agf_refcount_root;
181 }
182 
183 STATIC int64_t
184 xfs_refcountbt_key_diff(
185 	struct xfs_btree_cur	*cur,
186 	union xfs_btree_key	*key)
187 {
188 	struct xfs_refcount_irec	*rec = &cur->bc_rec.rc;
189 	struct xfs_refcount_key		*kp = &key->refc;
190 
191 	return (int64_t)be32_to_cpu(kp->rc_startblock) - rec->rc_startblock;
192 }
193 
194 STATIC int64_t
195 xfs_refcountbt_diff_two_keys(
196 	struct xfs_btree_cur	*cur,
197 	union xfs_btree_key	*k1,
198 	union xfs_btree_key	*k2)
199 {
200 	return (int64_t)be32_to_cpu(k1->refc.rc_startblock) -
201 			  be32_to_cpu(k2->refc.rc_startblock);
202 }
203 
204 STATIC xfs_failaddr_t
205 xfs_refcountbt_verify(
206 	struct xfs_buf		*bp)
207 {
208 	struct xfs_mount	*mp = bp->b_target->bt_mount;
209 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
210 	struct xfs_perag	*pag = bp->b_pag;
211 	xfs_failaddr_t		fa;
212 	unsigned int		level;
213 
214 	if (block->bb_magic != cpu_to_be32(XFS_REFC_CRC_MAGIC))
215 		return __this_address;
216 
217 	if (!xfs_sb_version_hasreflink(&mp->m_sb))
218 		return __this_address;
219 	fa = xfs_btree_sblock_v5hdr_verify(bp);
220 	if (fa)
221 		return fa;
222 
223 	level = be16_to_cpu(block->bb_level);
224 	if (pag && pag->pagf_init) {
225 		if (level >= pag->pagf_refcount_level)
226 			return __this_address;
227 	} else if (level >= mp->m_refc_maxlevels)
228 		return __this_address;
229 
230 	return xfs_btree_sblock_verify(bp, mp->m_refc_mxr[level != 0]);
231 }
232 
233 STATIC void
234 xfs_refcountbt_read_verify(
235 	struct xfs_buf	*bp)
236 {
237 	xfs_failaddr_t	fa;
238 
239 	if (!xfs_btree_sblock_verify_crc(bp))
240 		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
241 	else {
242 		fa = xfs_refcountbt_verify(bp);
243 		if (fa)
244 			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
245 	}
246 
247 	if (bp->b_error)
248 		trace_xfs_btree_corrupt(bp, _RET_IP_);
249 }
250 
251 STATIC void
252 xfs_refcountbt_write_verify(
253 	struct xfs_buf	*bp)
254 {
255 	xfs_failaddr_t	fa;
256 
257 	fa = xfs_refcountbt_verify(bp);
258 	if (fa) {
259 		trace_xfs_btree_corrupt(bp, _RET_IP_);
260 		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
261 		return;
262 	}
263 	xfs_btree_sblock_calc_crc(bp);
264 
265 }
266 
267 const struct xfs_buf_ops xfs_refcountbt_buf_ops = {
268 	.name			= "xfs_refcountbt",
269 	.verify_read		= xfs_refcountbt_read_verify,
270 	.verify_write		= xfs_refcountbt_write_verify,
271 	.verify_struct		= xfs_refcountbt_verify,
272 };
273 
274 STATIC int
275 xfs_refcountbt_keys_inorder(
276 	struct xfs_btree_cur	*cur,
277 	union xfs_btree_key	*k1,
278 	union xfs_btree_key	*k2)
279 {
280 	return be32_to_cpu(k1->refc.rc_startblock) <
281 	       be32_to_cpu(k2->refc.rc_startblock);
282 }
283 
284 STATIC int
285 xfs_refcountbt_recs_inorder(
286 	struct xfs_btree_cur	*cur,
287 	union xfs_btree_rec	*r1,
288 	union xfs_btree_rec	*r2)
289 {
290 	return  be32_to_cpu(r1->refc.rc_startblock) +
291 		be32_to_cpu(r1->refc.rc_blockcount) <=
292 		be32_to_cpu(r2->refc.rc_startblock);
293 }
294 
295 static const struct xfs_btree_ops xfs_refcountbt_ops = {
296 	.rec_len		= sizeof(struct xfs_refcount_rec),
297 	.key_len		= sizeof(struct xfs_refcount_key),
298 
299 	.dup_cursor		= xfs_refcountbt_dup_cursor,
300 	.set_root		= xfs_refcountbt_set_root,
301 	.alloc_block		= xfs_refcountbt_alloc_block,
302 	.free_block		= xfs_refcountbt_free_block,
303 	.get_minrecs		= xfs_refcountbt_get_minrecs,
304 	.get_maxrecs		= xfs_refcountbt_get_maxrecs,
305 	.init_key_from_rec	= xfs_refcountbt_init_key_from_rec,
306 	.init_high_key_from_rec	= xfs_refcountbt_init_high_key_from_rec,
307 	.init_rec_from_cur	= xfs_refcountbt_init_rec_from_cur,
308 	.init_ptr_from_cur	= xfs_refcountbt_init_ptr_from_cur,
309 	.key_diff		= xfs_refcountbt_key_diff,
310 	.buf_ops		= &xfs_refcountbt_buf_ops,
311 	.diff_two_keys		= xfs_refcountbt_diff_two_keys,
312 	.keys_inorder		= xfs_refcountbt_keys_inorder,
313 	.recs_inorder		= xfs_refcountbt_recs_inorder,
314 };
315 
316 /*
317  * Allocate a new refcount btree cursor.
318  */
319 struct xfs_btree_cur *
320 xfs_refcountbt_init_cursor(
321 	struct xfs_mount	*mp,
322 	struct xfs_trans	*tp,
323 	struct xfs_buf		*agbp,
324 	xfs_agnumber_t		agno)
325 {
326 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
327 	struct xfs_btree_cur	*cur;
328 
329 	ASSERT(agno != NULLAGNUMBER);
330 	ASSERT(agno < mp->m_sb.sb_agcount);
331 	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
332 
333 	cur->bc_tp = tp;
334 	cur->bc_mp = mp;
335 	cur->bc_btnum = XFS_BTNUM_REFC;
336 	cur->bc_blocklog = mp->m_sb.sb_blocklog;
337 	cur->bc_ops = &xfs_refcountbt_ops;
338 	cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
339 
340 	cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
341 
342 	cur->bc_private.a.agbp = agbp;
343 	cur->bc_private.a.agno = agno;
344 	cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
345 
346 	cur->bc_private.a.priv.refc.nr_ops = 0;
347 	cur->bc_private.a.priv.refc.shape_changes = 0;
348 
349 	return cur;
350 }
351 
352 /*
353  * Calculate the number of records in a refcount btree block.
354  */
355 int
356 xfs_refcountbt_maxrecs(
357 	int			blocklen,
358 	bool			leaf)
359 {
360 	blocklen -= XFS_REFCOUNT_BLOCK_LEN;
361 
362 	if (leaf)
363 		return blocklen / sizeof(struct xfs_refcount_rec);
364 	return blocklen / (sizeof(struct xfs_refcount_key) +
365 			   sizeof(xfs_refcount_ptr_t));
366 }
367 
368 /* Compute the maximum height of a refcount btree. */
369 void
370 xfs_refcountbt_compute_maxlevels(
371 	struct xfs_mount		*mp)
372 {
373 	mp->m_refc_maxlevels = xfs_btree_compute_maxlevels(
374 			mp->m_refc_mnr, mp->m_sb.sb_agblocks);
375 }
376 
377 /* Calculate the refcount btree size for some records. */
378 xfs_extlen_t
379 xfs_refcountbt_calc_size(
380 	struct xfs_mount	*mp,
381 	unsigned long long	len)
382 {
383 	return xfs_btree_calc_size(mp->m_refc_mnr, len);
384 }
385 
386 /*
387  * Calculate the maximum refcount btree size.
388  */
389 xfs_extlen_t
390 xfs_refcountbt_max_size(
391 	struct xfs_mount	*mp,
392 	xfs_agblock_t		agblocks)
393 {
394 	/* Bail out if we're uninitialized, which can happen in mkfs. */
395 	if (mp->m_refc_mxr[0] == 0)
396 		return 0;
397 
398 	return xfs_refcountbt_calc_size(mp, agblocks);
399 }
400 
401 /*
402  * Figure out how many blocks to reserve and how many are used by this btree.
403  */
404 int
405 xfs_refcountbt_calc_reserves(
406 	struct xfs_mount	*mp,
407 	struct xfs_trans	*tp,
408 	xfs_agnumber_t		agno,
409 	xfs_extlen_t		*ask,
410 	xfs_extlen_t		*used)
411 {
412 	struct xfs_buf		*agbp;
413 	struct xfs_agf		*agf;
414 	xfs_agblock_t		agblocks;
415 	xfs_extlen_t		tree_len;
416 	int			error;
417 
418 	if (!xfs_sb_version_hasreflink(&mp->m_sb))
419 		return 0;
420 
421 
422 	error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
423 	if (error)
424 		return error;
425 
426 	agf = XFS_BUF_TO_AGF(agbp);
427 	agblocks = be32_to_cpu(agf->agf_length);
428 	tree_len = be32_to_cpu(agf->agf_refcount_blocks);
429 	xfs_trans_brelse(tp, agbp);
430 
431 	*ask += xfs_refcountbt_max_size(mp, agblocks);
432 	*used += tree_len;
433 
434 	return error;
435 }
436