xref: /openbmc/linux/fs/xfs/scrub/scrub.c (revision 4a3fad70)
1 /*
2  * Copyright (C) 2017 Oracle.  All Rights Reserved.
3  *
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it would be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write the Free Software Foundation,
18  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301, USA.
19  */
20 #include "xfs.h"
21 #include "xfs_fs.h"
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_defer.h"
27 #include "xfs_btree.h"
28 #include "xfs_bit.h"
29 #include "xfs_log_format.h"
30 #include "xfs_trans.h"
31 #include "xfs_sb.h"
32 #include "xfs_inode.h"
33 #include "xfs_icache.h"
34 #include "xfs_itable.h"
35 #include "xfs_alloc.h"
36 #include "xfs_alloc_btree.h"
37 #include "xfs_bmap.h"
38 #include "xfs_bmap_btree.h"
39 #include "xfs_ialloc.h"
40 #include "xfs_ialloc_btree.h"
41 #include "xfs_refcount.h"
42 #include "xfs_refcount_btree.h"
43 #include "xfs_rmap.h"
44 #include "xfs_rmap_btree.h"
45 #include "scrub/xfs_scrub.h"
46 #include "scrub/scrub.h"
47 #include "scrub/common.h"
48 #include "scrub/trace.h"
49 #include "scrub/btree.h"
50 
51 /*
52  * Online Scrub and Repair
53  *
54  * Traditionally, XFS (the kernel driver) did not know how to check or
55  * repair on-disk data structures.  That task was left to the xfs_check
56  * and xfs_repair tools, both of which require taking the filesystem
57  * offline for a thorough but time consuming examination.  Online
58  * scrub & repair, on the other hand, enables us to check the metadata
59  * for obvious errors while carefully stepping around the filesystem's
60  * ongoing operations, locking rules, etc.
61  *
62  * Given that most XFS metadata consist of records stored in a btree,
63  * most of the checking functions iterate the btree blocks themselves
64  * looking for irregularities.  When a record block is encountered, each
65  * record can be checked for obviously bad values.  Record values can
66  * also be cross-referenced against other btrees to look for potential
67  * misunderstandings between pieces of metadata.
68  *
69  * It is expected that the checkers responsible for per-AG metadata
70  * structures will lock the AG headers (AGI, AGF, AGFL), iterate the
71  * metadata structure, and perform any relevant cross-referencing before
72  * unlocking the AG and returning the results to userspace.  These
73  * scrubbers must not keep an AG locked for too long to avoid tying up
74  * the block and inode allocators.
75  *
76  * Block maps and b-trees rooted in an inode present a special challenge
77  * because they can involve extents from any AG.  The general scrubber
78  * structure of lock -> check -> xref -> unlock still holds, but AG
79  * locking order rules /must/ be obeyed to avoid deadlocks.  The
80  * ordering rule, of course, is that we must lock in increasing AG
81  * order.  Helper functions are provided to track which AG headers we've
82  * already locked.  If we detect an imminent locking order violation, we
83  * can signal a potential deadlock, in which case the scrubber can jump
84  * out to the top level, lock all the AGs in order, and retry the scrub.
85  *
86  * For file data (directories, extended attributes, symlinks) scrub, we
87  * can simply lock the inode and walk the data.  For btree data
88  * (directories and attributes) we follow the same btree-scrubbing
89  * strategy outlined previously to check the records.
90  *
91  * We use a bit of trickery with transactions to avoid buffer deadlocks
92  * if there is a cycle in the metadata.  The basic problem is that
93  * travelling down a btree involves locking the current buffer at each
94  * tree level.  If a pointer should somehow point back to a buffer that
95  * we've already examined, we will deadlock due to the second buffer
96  * locking attempt.  Note however that grabbing a buffer in transaction
97  * context links the locked buffer to the transaction.  If we try to
98  * re-grab the buffer in the context of the same transaction, we avoid
99  * the second lock attempt and continue.  Between the verifier and the
100  * scrubber, something will notice that something is amiss and report
101  * the corruption.  Therefore, each scrubber will allocate an empty
102  * transaction, attach buffers to it, and cancel the transaction at the
103  * end of the scrub run.  Cancelling a non-dirty transaction simply
104  * unlocks the buffers.
105  *
106  * There are four pieces of data that scrub can communicate to
107  * userspace.  The first is the error code (errno), which can be used to
108  * communicate operational errors in performing the scrub.  There are
109  * also three flags that can be set in the scrub context.  If the data
110  * structure itself is corrupt, the CORRUPT flag will be set.  If
111  * the metadata is correct but otherwise suboptimal, the PREEN flag
112  * will be set.
113  */
114 
115 /*
116  * Scrub probe -- userspace uses this to probe if we're willing to scrub
117  * or repair a given mountpoint.  This will be used by xfs_scrub to
118  * probe the kernel's abilities to scrub (and repair) the metadata.  We
119  * do this by validating the ioctl inputs from userspace, preparing the
120  * filesystem for a scrub (or a repair) operation, and immediately
121  * returning to userspace.  Userspace can use the returned errno and
122  * structure state to decide (in broad terms) if scrub/repair are
123  * supported by the running kernel.
124  */
125 static int
126 xfs_scrub_probe(
127 	struct xfs_scrub_context	*sc)
128 {
129 	int				error = 0;
130 
131 	if (sc->sm->sm_ino || sc->sm->sm_agno)
132 		return -EINVAL;
133 	if (xfs_scrub_should_terminate(sc, &error))
134 		return error;
135 
136 	return 0;
137 }
138 
139 /* Scrub setup and teardown */
140 
141 /* Free all the resources and finish the transactions. */
142 STATIC int
143 xfs_scrub_teardown(
144 	struct xfs_scrub_context	*sc,
145 	struct xfs_inode		*ip_in,
146 	int				error)
147 {
148 	xfs_scrub_ag_free(sc, &sc->sa);
149 	if (sc->tp) {
150 		xfs_trans_cancel(sc->tp);
151 		sc->tp = NULL;
152 	}
153 	if (sc->ip) {
154 		xfs_iunlock(sc->ip, sc->ilock_flags);
155 		if (sc->ip != ip_in &&
156 		    !xfs_internal_inum(sc->mp, sc->ip->i_ino))
157 			iput(VFS_I(sc->ip));
158 		sc->ip = NULL;
159 	}
160 	if (sc->buf) {
161 		kmem_free(sc->buf);
162 		sc->buf = NULL;
163 	}
164 	return error;
165 }
166 
167 /* Scrubbing dispatch. */
168 
169 static const struct xfs_scrub_meta_ops meta_scrub_ops[] = {
170 	{ /* ioctl presence test */
171 		.setup	= xfs_scrub_setup_fs,
172 		.scrub	= xfs_scrub_probe,
173 	},
174 	{ /* superblock */
175 		.setup	= xfs_scrub_setup_ag_header,
176 		.scrub	= xfs_scrub_superblock,
177 	},
178 	{ /* agf */
179 		.setup	= xfs_scrub_setup_ag_header,
180 		.scrub	= xfs_scrub_agf,
181 	},
182 	{ /* agfl */
183 		.setup	= xfs_scrub_setup_ag_header,
184 		.scrub	= xfs_scrub_agfl,
185 	},
186 	{ /* agi */
187 		.setup	= xfs_scrub_setup_ag_header,
188 		.scrub	= xfs_scrub_agi,
189 	},
190 	{ /* bnobt */
191 		.setup	= xfs_scrub_setup_ag_allocbt,
192 		.scrub	= xfs_scrub_bnobt,
193 	},
194 	{ /* cntbt */
195 		.setup	= xfs_scrub_setup_ag_allocbt,
196 		.scrub	= xfs_scrub_cntbt,
197 	},
198 	{ /* inobt */
199 		.setup	= xfs_scrub_setup_ag_iallocbt,
200 		.scrub	= xfs_scrub_inobt,
201 	},
202 	{ /* finobt */
203 		.setup	= xfs_scrub_setup_ag_iallocbt,
204 		.scrub	= xfs_scrub_finobt,
205 		.has	= xfs_sb_version_hasfinobt,
206 	},
207 	{ /* rmapbt */
208 		.setup	= xfs_scrub_setup_ag_rmapbt,
209 		.scrub	= xfs_scrub_rmapbt,
210 		.has	= xfs_sb_version_hasrmapbt,
211 	},
212 	{ /* refcountbt */
213 		.setup	= xfs_scrub_setup_ag_refcountbt,
214 		.scrub	= xfs_scrub_refcountbt,
215 		.has	= xfs_sb_version_hasreflink,
216 	},
217 	{ /* inode record */
218 		.setup	= xfs_scrub_setup_inode,
219 		.scrub	= xfs_scrub_inode,
220 	},
221 	{ /* inode data fork */
222 		.setup	= xfs_scrub_setup_inode_bmap,
223 		.scrub	= xfs_scrub_bmap_data,
224 	},
225 	{ /* inode attr fork */
226 		.setup	= xfs_scrub_setup_inode_bmap,
227 		.scrub	= xfs_scrub_bmap_attr,
228 	},
229 	{ /* inode CoW fork */
230 		.setup	= xfs_scrub_setup_inode_bmap,
231 		.scrub	= xfs_scrub_bmap_cow,
232 	},
233 	{ /* directory */
234 		.setup	= xfs_scrub_setup_directory,
235 		.scrub	= xfs_scrub_directory,
236 	},
237 	{ /* extended attributes */
238 		.setup	= xfs_scrub_setup_xattr,
239 		.scrub	= xfs_scrub_xattr,
240 	},
241 	{ /* symbolic link */
242 		.setup	= xfs_scrub_setup_symlink,
243 		.scrub	= xfs_scrub_symlink,
244 	},
245 	{ /* parent pointers */
246 		.setup	= xfs_scrub_setup_parent,
247 		.scrub	= xfs_scrub_parent,
248 	},
249 	{ /* realtime bitmap */
250 		.setup	= xfs_scrub_setup_rt,
251 		.scrub	= xfs_scrub_rtbitmap,
252 		.has	= xfs_sb_version_hasrealtime,
253 	},
254 	{ /* realtime summary */
255 		.setup	= xfs_scrub_setup_rt,
256 		.scrub	= xfs_scrub_rtsummary,
257 		.has	= xfs_sb_version_hasrealtime,
258 	},
259 	{ /* user quota */
260 		.setup = xfs_scrub_setup_quota,
261 		.scrub = xfs_scrub_quota,
262 	},
263 	{ /* group quota */
264 		.setup = xfs_scrub_setup_quota,
265 		.scrub = xfs_scrub_quota,
266 	},
267 	{ /* project quota */
268 		.setup = xfs_scrub_setup_quota,
269 		.scrub = xfs_scrub_quota,
270 	},
271 };
272 
273 /* This isn't a stable feature, warn once per day. */
274 static inline void
275 xfs_scrub_experimental_warning(
276 	struct xfs_mount	*mp)
277 {
278 	static struct ratelimit_state scrub_warning = RATELIMIT_STATE_INIT(
279 			"xfs_scrub_warning", 86400 * HZ, 1);
280 	ratelimit_set_flags(&scrub_warning, RATELIMIT_MSG_ON_RELEASE);
281 
282 	if (__ratelimit(&scrub_warning))
283 		xfs_alert(mp,
284 "EXPERIMENTAL online scrub feature in use. Use at your own risk!");
285 }
286 
287 /* Dispatch metadata scrubbing. */
288 int
289 xfs_scrub_metadata(
290 	struct xfs_inode		*ip,
291 	struct xfs_scrub_metadata	*sm)
292 {
293 	struct xfs_scrub_context	sc;
294 	struct xfs_mount		*mp = ip->i_mount;
295 	const struct xfs_scrub_meta_ops	*ops;
296 	bool				try_harder = false;
297 	int				error = 0;
298 
299 	trace_xfs_scrub_start(ip, sm, error);
300 
301 	/* Forbidden if we are shut down or mounted norecovery. */
302 	error = -ESHUTDOWN;
303 	if (XFS_FORCED_SHUTDOWN(mp))
304 		goto out;
305 	error = -ENOTRECOVERABLE;
306 	if (mp->m_flags & XFS_MOUNT_NORECOVERY)
307 		goto out;
308 
309 	/* Check our inputs. */
310 	error = -EINVAL;
311 	sm->sm_flags &= ~XFS_SCRUB_FLAGS_OUT;
312 	if (sm->sm_flags & ~XFS_SCRUB_FLAGS_IN)
313 		goto out;
314 	if (memchr_inv(sm->sm_reserved, 0, sizeof(sm->sm_reserved)))
315 		goto out;
316 
317 	/* Do we know about this type of metadata? */
318 	error = -ENOENT;
319 	if (sm->sm_type >= XFS_SCRUB_TYPE_NR)
320 		goto out;
321 	ops = &meta_scrub_ops[sm->sm_type];
322 	if (ops->scrub == NULL)
323 		goto out;
324 
325 	/*
326 	 * We won't scrub any filesystem that doesn't have the ability
327 	 * to record unwritten extents.  The option was made default in
328 	 * 2003, removed from mkfs in 2007, and cannot be disabled in
329 	 * v5, so if we find a filesystem without this flag it's either
330 	 * really old or totally unsupported.  Avoid it either way.
331 	 * We also don't support v1-v3 filesystems, which aren't
332 	 * mountable.
333 	 */
334 	error = -EOPNOTSUPP;
335 	if (!xfs_sb_version_hasextflgbit(&mp->m_sb))
336 		goto out;
337 
338 	/* Does this fs even support this type of metadata? */
339 	error = -ENOENT;
340 	if (ops->has && !ops->has(&mp->m_sb))
341 		goto out;
342 
343 	/* We don't know how to repair anything yet. */
344 	error = -EOPNOTSUPP;
345 	if (sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)
346 		goto out;
347 
348 	xfs_scrub_experimental_warning(mp);
349 
350 retry_op:
351 	/* Set up for the operation. */
352 	memset(&sc, 0, sizeof(sc));
353 	sc.mp = ip->i_mount;
354 	sc.sm = sm;
355 	sc.ops = ops;
356 	sc.try_harder = try_harder;
357 	sc.sa.agno = NULLAGNUMBER;
358 	error = sc.ops->setup(&sc, ip);
359 	if (error)
360 		goto out_teardown;
361 
362 	/* Scrub for errors. */
363 	error = sc.ops->scrub(&sc);
364 	if (!try_harder && error == -EDEADLOCK) {
365 		/*
366 		 * Scrubbers return -EDEADLOCK to mean 'try harder'.
367 		 * Tear down everything we hold, then set up again with
368 		 * preparation for worst-case scenarios.
369 		 */
370 		error = xfs_scrub_teardown(&sc, ip, 0);
371 		if (error)
372 			goto out;
373 		try_harder = true;
374 		goto retry_op;
375 	} else if (error)
376 		goto out_teardown;
377 
378 	if (sc.sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
379 			       XFS_SCRUB_OFLAG_XCORRUPT))
380 		xfs_alert_ratelimited(mp, "Corruption detected during scrub.");
381 
382 out_teardown:
383 	error = xfs_scrub_teardown(&sc, ip, error);
384 out:
385 	trace_xfs_scrub_done(ip, sm, error);
386 	if (error == -EFSCORRUPTED || error == -EFSBADCRC) {
387 		sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
388 		error = 0;
389 	}
390 	return error;
391 }
392