xref: /openbmc/linux/fs/xfs/libxfs/xfs_refcount_btree.c (revision e33bbe69149b802c0c77bfb822685772f85388ca)
1 /*
2  * Copyright (C) 2016 Oracle.  All Rights Reserved.
3  *
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it would be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write the Free Software Foundation,
18  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301, USA.
19  */
20 #include "xfs.h"
21 #include "xfs_fs.h"
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_log_format.h"
25 #include "xfs_trans_resv.h"
26 #include "xfs_sb.h"
27 #include "xfs_mount.h"
28 #include "xfs_btree.h"
29 #include "xfs_bmap.h"
30 #include "xfs_refcount_btree.h"
31 #include "xfs_alloc.h"
32 #include "xfs_error.h"
33 #include "xfs_trace.h"
34 #include "xfs_cksum.h"
35 #include "xfs_trans.h"
36 #include "xfs_bit.h"
37 #include "xfs_rmap.h"
38 
39 static struct xfs_btree_cur *
40 xfs_refcountbt_dup_cursor(
41 	struct xfs_btree_cur	*cur)
42 {
43 	return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
44 			cur->bc_private.a.agbp, cur->bc_private.a.agno,
45 			cur->bc_private.a.dfops);
46 }
47 
48 STATIC void
49 xfs_refcountbt_set_root(
50 	struct xfs_btree_cur	*cur,
51 	union xfs_btree_ptr	*ptr,
52 	int			inc)
53 {
54 	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
55 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
56 	xfs_agnumber_t		seqno = be32_to_cpu(agf->agf_seqno);
57 	struct xfs_perag	*pag = xfs_perag_get(cur->bc_mp, seqno);
58 
59 	ASSERT(ptr->s != 0);
60 
61 	agf->agf_refcount_root = ptr->s;
62 	be32_add_cpu(&agf->agf_refcount_level, inc);
63 	pag->pagf_refcount_level += inc;
64 	xfs_perag_put(pag);
65 
66 	xfs_alloc_log_agf(cur->bc_tp, agbp,
67 			XFS_AGF_REFCOUNT_ROOT | XFS_AGF_REFCOUNT_LEVEL);
68 }
69 
70 STATIC int
71 xfs_refcountbt_alloc_block(
72 	struct xfs_btree_cur	*cur,
73 	union xfs_btree_ptr	*start,
74 	union xfs_btree_ptr	*new,
75 	int			*stat)
76 {
77 	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
78 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
79 	struct xfs_alloc_arg	args;		/* block allocation args */
80 	int			error;		/* error return value */
81 
82 	memset(&args, 0, sizeof(args));
83 	args.tp = cur->bc_tp;
84 	args.mp = cur->bc_mp;
85 	args.type = XFS_ALLOCTYPE_NEAR_BNO;
86 	args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
87 			xfs_refc_block(args.mp));
88 	args.firstblock = args.fsbno;
89 	xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_REFC);
90 	args.minlen = args.maxlen = args.prod = 1;
91 	args.resv = XFS_AG_RESV_METADATA;
92 
93 	error = xfs_alloc_vextent(&args);
94 	if (error)
95 		goto out_error;
96 	trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno,
97 			args.agbno, 1);
98 	if (args.fsbno == NULLFSBLOCK) {
99 		*stat = 0;
100 		return 0;
101 	}
102 	ASSERT(args.agno == cur->bc_private.a.agno);
103 	ASSERT(args.len == 1);
104 
105 	new->s = cpu_to_be32(args.agbno);
106 	be32_add_cpu(&agf->agf_refcount_blocks, 1);
107 	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
108 
109 	*stat = 1;
110 	return 0;
111 
112 out_error:
113 	return error;
114 }
115 
116 STATIC int
117 xfs_refcountbt_free_block(
118 	struct xfs_btree_cur	*cur,
119 	struct xfs_buf		*bp)
120 {
121 	struct xfs_mount	*mp = cur->bc_mp;
122 	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
123 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
124 	xfs_fsblock_t		fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
125 	struct xfs_owner_info	oinfo;
126 	int			error;
127 
128 	trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
129 			XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
130 	xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_REFC);
131 	be32_add_cpu(&agf->agf_refcount_blocks, -1);
132 	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
133 	error = xfs_free_extent(cur->bc_tp, fsbno, 1, &oinfo,
134 			XFS_AG_RESV_METADATA);
135 	if (error)
136 		return error;
137 
138 	return error;
139 }
140 
141 STATIC int
142 xfs_refcountbt_get_minrecs(
143 	struct xfs_btree_cur	*cur,
144 	int			level)
145 {
146 	return cur->bc_mp->m_refc_mnr[level != 0];
147 }
148 
149 STATIC int
150 xfs_refcountbt_get_maxrecs(
151 	struct xfs_btree_cur	*cur,
152 	int			level)
153 {
154 	return cur->bc_mp->m_refc_mxr[level != 0];
155 }
156 
157 STATIC void
158 xfs_refcountbt_init_key_from_rec(
159 	union xfs_btree_key	*key,
160 	union xfs_btree_rec	*rec)
161 {
162 	key->refc.rc_startblock = rec->refc.rc_startblock;
163 }
164 
165 STATIC void
166 xfs_refcountbt_init_high_key_from_rec(
167 	union xfs_btree_key	*key,
168 	union xfs_btree_rec	*rec)
169 {
170 	__u32			x;
171 
172 	x = be32_to_cpu(rec->refc.rc_startblock);
173 	x += be32_to_cpu(rec->refc.rc_blockcount) - 1;
174 	key->refc.rc_startblock = cpu_to_be32(x);
175 }
176 
177 STATIC void
178 xfs_refcountbt_init_rec_from_cur(
179 	struct xfs_btree_cur	*cur,
180 	union xfs_btree_rec	*rec)
181 {
182 	rec->refc.rc_startblock = cpu_to_be32(cur->bc_rec.rc.rc_startblock);
183 	rec->refc.rc_blockcount = cpu_to_be32(cur->bc_rec.rc.rc_blockcount);
184 	rec->refc.rc_refcount = cpu_to_be32(cur->bc_rec.rc.rc_refcount);
185 }
186 
187 STATIC void
188 xfs_refcountbt_init_ptr_from_cur(
189 	struct xfs_btree_cur	*cur,
190 	union xfs_btree_ptr	*ptr)
191 {
192 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
193 
194 	ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
195 	ASSERT(agf->agf_refcount_root != 0);
196 
197 	ptr->s = agf->agf_refcount_root;
198 }
199 
200 STATIC int64_t
201 xfs_refcountbt_key_diff(
202 	struct xfs_btree_cur	*cur,
203 	union xfs_btree_key	*key)
204 {
205 	struct xfs_refcount_irec	*rec = &cur->bc_rec.rc;
206 	struct xfs_refcount_key		*kp = &key->refc;
207 
208 	return (int64_t)be32_to_cpu(kp->rc_startblock) - rec->rc_startblock;
209 }
210 
211 STATIC int64_t
212 xfs_refcountbt_diff_two_keys(
213 	struct xfs_btree_cur	*cur,
214 	union xfs_btree_key	*k1,
215 	union xfs_btree_key	*k2)
216 {
217 	return (int64_t)be32_to_cpu(k1->refc.rc_startblock) -
218 			  be32_to_cpu(k2->refc.rc_startblock);
219 }
220 
221 STATIC xfs_failaddr_t
222 xfs_refcountbt_verify(
223 	struct xfs_buf		*bp)
224 {
225 	struct xfs_mount	*mp = bp->b_target->bt_mount;
226 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
227 	struct xfs_perag	*pag = bp->b_pag;
228 	xfs_failaddr_t		fa;
229 	unsigned int		level;
230 
231 	if (block->bb_magic != cpu_to_be32(XFS_REFC_CRC_MAGIC))
232 		return __this_address;
233 
234 	if (!xfs_sb_version_hasreflink(&mp->m_sb))
235 		return __this_address;
236 	fa = xfs_btree_sblock_v5hdr_verify(bp);
237 	if (fa)
238 		return fa;
239 
240 	level = be16_to_cpu(block->bb_level);
241 	if (pag && pag->pagf_init) {
242 		if (level >= pag->pagf_refcount_level)
243 			return __this_address;
244 	} else if (level >= mp->m_refc_maxlevels)
245 		return __this_address;
246 
247 	return xfs_btree_sblock_verify(bp, mp->m_refc_mxr[level != 0]);
248 }
249 
250 STATIC void
251 xfs_refcountbt_read_verify(
252 	struct xfs_buf	*bp)
253 {
254 	xfs_failaddr_t	fa;
255 
256 	if (!xfs_btree_sblock_verify_crc(bp))
257 		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
258 	else {
259 		fa = xfs_refcountbt_verify(bp);
260 		if (fa)
261 			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
262 	}
263 
264 	if (bp->b_error)
265 		trace_xfs_btree_corrupt(bp, _RET_IP_);
266 }
267 
268 STATIC void
269 xfs_refcountbt_write_verify(
270 	struct xfs_buf	*bp)
271 {
272 	xfs_failaddr_t	fa;
273 
274 	fa = xfs_refcountbt_verify(bp);
275 	if (fa) {
276 		trace_xfs_btree_corrupt(bp, _RET_IP_);
277 		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
278 		return;
279 	}
280 	xfs_btree_sblock_calc_crc(bp);
281 
282 }
283 
284 const struct xfs_buf_ops xfs_refcountbt_buf_ops = {
285 	.name			= "xfs_refcountbt",
286 	.verify_read		= xfs_refcountbt_read_verify,
287 	.verify_write		= xfs_refcountbt_write_verify,
288 	.verify_struct		= xfs_refcountbt_verify,
289 };
290 
291 STATIC int
292 xfs_refcountbt_keys_inorder(
293 	struct xfs_btree_cur	*cur,
294 	union xfs_btree_key	*k1,
295 	union xfs_btree_key	*k2)
296 {
297 	return be32_to_cpu(k1->refc.rc_startblock) <
298 	       be32_to_cpu(k2->refc.rc_startblock);
299 }
300 
301 STATIC int
302 xfs_refcountbt_recs_inorder(
303 	struct xfs_btree_cur	*cur,
304 	union xfs_btree_rec	*r1,
305 	union xfs_btree_rec	*r2)
306 {
307 	return  be32_to_cpu(r1->refc.rc_startblock) +
308 		be32_to_cpu(r1->refc.rc_blockcount) <=
309 		be32_to_cpu(r2->refc.rc_startblock);
310 }
311 
312 static const struct xfs_btree_ops xfs_refcountbt_ops = {
313 	.rec_len		= sizeof(struct xfs_refcount_rec),
314 	.key_len		= sizeof(struct xfs_refcount_key),
315 
316 	.dup_cursor		= xfs_refcountbt_dup_cursor,
317 	.set_root		= xfs_refcountbt_set_root,
318 	.alloc_block		= xfs_refcountbt_alloc_block,
319 	.free_block		= xfs_refcountbt_free_block,
320 	.get_minrecs		= xfs_refcountbt_get_minrecs,
321 	.get_maxrecs		= xfs_refcountbt_get_maxrecs,
322 	.init_key_from_rec	= xfs_refcountbt_init_key_from_rec,
323 	.init_high_key_from_rec	= xfs_refcountbt_init_high_key_from_rec,
324 	.init_rec_from_cur	= xfs_refcountbt_init_rec_from_cur,
325 	.init_ptr_from_cur	= xfs_refcountbt_init_ptr_from_cur,
326 	.key_diff		= xfs_refcountbt_key_diff,
327 	.buf_ops		= &xfs_refcountbt_buf_ops,
328 	.diff_two_keys		= xfs_refcountbt_diff_two_keys,
329 	.keys_inorder		= xfs_refcountbt_keys_inorder,
330 	.recs_inorder		= xfs_refcountbt_recs_inorder,
331 };
332 
333 /*
334  * Allocate a new refcount btree cursor.
335  */
336 struct xfs_btree_cur *
337 xfs_refcountbt_init_cursor(
338 	struct xfs_mount	*mp,
339 	struct xfs_trans	*tp,
340 	struct xfs_buf		*agbp,
341 	xfs_agnumber_t		agno,
342 	struct xfs_defer_ops	*dfops)
343 {
344 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
345 	struct xfs_btree_cur	*cur;
346 
347 	ASSERT(agno != NULLAGNUMBER);
348 	ASSERT(agno < mp->m_sb.sb_agcount);
349 	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
350 
351 	cur->bc_tp = tp;
352 	cur->bc_mp = mp;
353 	cur->bc_btnum = XFS_BTNUM_REFC;
354 	cur->bc_blocklog = mp->m_sb.sb_blocklog;
355 	cur->bc_ops = &xfs_refcountbt_ops;
356 	cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
357 
358 	cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
359 
360 	cur->bc_private.a.agbp = agbp;
361 	cur->bc_private.a.agno = agno;
362 	cur->bc_private.a.dfops = dfops;
363 	cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
364 
365 	cur->bc_private.a.priv.refc.nr_ops = 0;
366 	cur->bc_private.a.priv.refc.shape_changes = 0;
367 
368 	return cur;
369 }
370 
371 /*
372  * Calculate the number of records in a refcount btree block.
373  */
374 int
375 xfs_refcountbt_maxrecs(
376 	int			blocklen,
377 	bool			leaf)
378 {
379 	blocklen -= XFS_REFCOUNT_BLOCK_LEN;
380 
381 	if (leaf)
382 		return blocklen / sizeof(struct xfs_refcount_rec);
383 	return blocklen / (sizeof(struct xfs_refcount_key) +
384 			   sizeof(xfs_refcount_ptr_t));
385 }
386 
387 /* Compute the maximum height of a refcount btree. */
388 void
389 xfs_refcountbt_compute_maxlevels(
390 	struct xfs_mount		*mp)
391 {
392 	mp->m_refc_maxlevels = xfs_btree_compute_maxlevels(
393 			mp->m_refc_mnr, mp->m_sb.sb_agblocks);
394 }
395 
396 /* Calculate the refcount btree size for some records. */
397 xfs_extlen_t
398 xfs_refcountbt_calc_size(
399 	struct xfs_mount	*mp,
400 	unsigned long long	len)
401 {
402 	return xfs_btree_calc_size(mp->m_refc_mnr, len);
403 }
404 
405 /*
406  * Calculate the maximum refcount btree size.
407  */
408 xfs_extlen_t
409 xfs_refcountbt_max_size(
410 	struct xfs_mount	*mp,
411 	xfs_agblock_t		agblocks)
412 {
413 	/* Bail out if we're uninitialized, which can happen in mkfs. */
414 	if (mp->m_refc_mxr[0] == 0)
415 		return 0;
416 
417 	return xfs_refcountbt_calc_size(mp, agblocks);
418 }
419 
420 /*
421  * Figure out how many blocks to reserve and how many are used by this btree.
422  */
423 int
424 xfs_refcountbt_calc_reserves(
425 	struct xfs_mount	*mp,
426 	xfs_agnumber_t		agno,
427 	xfs_extlen_t		*ask,
428 	xfs_extlen_t		*used)
429 {
430 	struct xfs_buf		*agbp;
431 	struct xfs_agf		*agf;
432 	xfs_agblock_t		agblocks;
433 	xfs_extlen_t		tree_len;
434 	int			error;
435 
436 	if (!xfs_sb_version_hasreflink(&mp->m_sb))
437 		return 0;
438 
439 
440 	error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
441 	if (error)
442 		return error;
443 
444 	agf = XFS_BUF_TO_AGF(agbp);
445 	agblocks = be32_to_cpu(agf->agf_length);
446 	tree_len = be32_to_cpu(agf->agf_refcount_blocks);
447 	xfs_buf_relse(agbp);
448 
449 	*ask += xfs_refcountbt_max_size(mp, agblocks);
450 	*used += tree_len;
451 
452 	return error;
453 }
454