xref: /openbmc/linux/fs/xfs/scrub/common.h (revision fb6e584e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2017-2023 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <djwong@kernel.org>
5  */
6 #ifndef __XFS_SCRUB_COMMON_H__
7 #define __XFS_SCRUB_COMMON_H__
8 
9 /*
10  * We /could/ terminate a scrub/repair operation early.  If we're not
11  * in a good place to continue (fatal signal, etc.) then bail out.
12  * Note that we're careful not to make any judgements about *error.
13  */
14 static inline bool
xchk_should_terminate(struct xfs_scrub * sc,int * error)15 xchk_should_terminate(
16 	struct xfs_scrub	*sc,
17 	int			*error)
18 {
19 	/*
20 	 * If preemption is disabled, we need to yield to the scheduler every
21 	 * few seconds so that we don't run afoul of the soft lockup watchdog
22 	 * or RCU stall detector.
23 	 */
24 	cond_resched();
25 
26 	if (fatal_signal_pending(current)) {
27 		if (*error == 0)
28 			*error = -EINTR;
29 		return true;
30 	}
31 	return false;
32 }
33 
34 int xchk_trans_alloc(struct xfs_scrub *sc, uint resblks);
35 void xchk_trans_cancel(struct xfs_scrub *sc);
36 
37 bool xchk_process_error(struct xfs_scrub *sc, xfs_agnumber_t agno,
38 		xfs_agblock_t bno, int *error);
39 bool xchk_fblock_process_error(struct xfs_scrub *sc, int whichfork,
40 		xfs_fileoff_t offset, int *error);
41 
42 bool xchk_xref_process_error(struct xfs_scrub *sc,
43 		xfs_agnumber_t agno, xfs_agblock_t bno, int *error);
44 bool xchk_fblock_xref_process_error(struct xfs_scrub *sc,
45 		int whichfork, xfs_fileoff_t offset, int *error);
46 
47 void xchk_block_set_preen(struct xfs_scrub *sc,
48 		struct xfs_buf *bp);
49 void xchk_ino_set_preen(struct xfs_scrub *sc, xfs_ino_t ino);
50 
51 void xchk_set_corrupt(struct xfs_scrub *sc);
52 void xchk_block_set_corrupt(struct xfs_scrub *sc,
53 		struct xfs_buf *bp);
54 void xchk_ino_set_corrupt(struct xfs_scrub *sc, xfs_ino_t ino);
55 void xchk_fblock_set_corrupt(struct xfs_scrub *sc, int whichfork,
56 		xfs_fileoff_t offset);
57 
58 void xchk_block_xref_set_corrupt(struct xfs_scrub *sc,
59 		struct xfs_buf *bp);
60 void xchk_ino_xref_set_corrupt(struct xfs_scrub *sc,
61 		xfs_ino_t ino);
62 void xchk_fblock_xref_set_corrupt(struct xfs_scrub *sc,
63 		int whichfork, xfs_fileoff_t offset);
64 
65 void xchk_ino_set_warning(struct xfs_scrub *sc, xfs_ino_t ino);
66 void xchk_fblock_set_warning(struct xfs_scrub *sc, int whichfork,
67 		xfs_fileoff_t offset);
68 
69 void xchk_set_incomplete(struct xfs_scrub *sc);
70 int xchk_checkpoint_log(struct xfs_mount *mp);
71 
72 /* Are we set up for a cross-referencing check? */
73 bool xchk_should_check_xref(struct xfs_scrub *sc, int *error,
74 			   struct xfs_btree_cur **curpp);
75 
76 /* Setup functions */
77 int xchk_setup_agheader(struct xfs_scrub *sc);
78 int xchk_setup_fs(struct xfs_scrub *sc);
79 int xchk_setup_ag_allocbt(struct xfs_scrub *sc);
80 int xchk_setup_ag_iallocbt(struct xfs_scrub *sc);
81 int xchk_setup_ag_rmapbt(struct xfs_scrub *sc);
82 int xchk_setup_ag_refcountbt(struct xfs_scrub *sc);
83 int xchk_setup_inode(struct xfs_scrub *sc);
84 int xchk_setup_inode_bmap(struct xfs_scrub *sc);
85 int xchk_setup_inode_bmap_data(struct xfs_scrub *sc);
86 int xchk_setup_directory(struct xfs_scrub *sc);
87 int xchk_setup_xattr(struct xfs_scrub *sc);
88 int xchk_setup_symlink(struct xfs_scrub *sc);
89 int xchk_setup_parent(struct xfs_scrub *sc);
90 #ifdef CONFIG_XFS_RT
91 int xchk_setup_rtbitmap(struct xfs_scrub *sc);
92 int xchk_setup_rtsummary(struct xfs_scrub *sc);
93 #else
94 static inline int
xchk_setup_rtbitmap(struct xfs_scrub * sc)95 xchk_setup_rtbitmap(struct xfs_scrub *sc)
96 {
97 	return -ENOENT;
98 }
99 static inline int
xchk_setup_rtsummary(struct xfs_scrub * sc)100 xchk_setup_rtsummary(struct xfs_scrub *sc)
101 {
102 	return -ENOENT;
103 }
104 #endif
105 #ifdef CONFIG_XFS_QUOTA
106 int xchk_setup_quota(struct xfs_scrub *sc);
107 #else
108 static inline int
xchk_setup_quota(struct xfs_scrub * sc)109 xchk_setup_quota(struct xfs_scrub *sc)
110 {
111 	return -ENOENT;
112 }
113 #endif
114 int xchk_setup_fscounters(struct xfs_scrub *sc);
115 
116 void xchk_ag_free(struct xfs_scrub *sc, struct xchk_ag *sa);
117 int xchk_ag_init(struct xfs_scrub *sc, xfs_agnumber_t agno,
118 		struct xchk_ag *sa);
119 
120 /*
121  * Grab all AG resources, treating the inability to grab the perag structure as
122  * a fs corruption.  This is intended for callers checking an ondisk reference
123  * to a given AG, which means that the AG must still exist.
124  */
125 static inline int
xchk_ag_init_existing(struct xfs_scrub * sc,xfs_agnumber_t agno,struct xchk_ag * sa)126 xchk_ag_init_existing(
127 	struct xfs_scrub	*sc,
128 	xfs_agnumber_t		agno,
129 	struct xchk_ag		*sa)
130 {
131 	int			error = xchk_ag_init(sc, agno, sa);
132 
133 	return error == -ENOENT ? -EFSCORRUPTED : error;
134 }
135 
136 int xchk_ag_read_headers(struct xfs_scrub *sc, xfs_agnumber_t agno,
137 		struct xchk_ag *sa);
138 void xchk_ag_btcur_free(struct xchk_ag *sa);
139 void xchk_ag_btcur_init(struct xfs_scrub *sc, struct xchk_ag *sa);
140 int xchk_count_rmap_ownedby_ag(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
141 		const struct xfs_owner_info *oinfo, xfs_filblks_t *blocks);
142 
143 int xchk_setup_ag_btree(struct xfs_scrub *sc, bool force_log);
144 int xchk_iget_for_scrubbing(struct xfs_scrub *sc);
145 int xchk_setup_inode_contents(struct xfs_scrub *sc, unsigned int resblks);
146 int xchk_install_live_inode(struct xfs_scrub *sc, struct xfs_inode *ip);
147 
148 void xchk_ilock(struct xfs_scrub *sc, unsigned int ilock_flags);
149 bool xchk_ilock_nowait(struct xfs_scrub *sc, unsigned int ilock_flags);
150 void xchk_iunlock(struct xfs_scrub *sc, unsigned int ilock_flags);
151 
152 void xchk_buffer_recheck(struct xfs_scrub *sc, struct xfs_buf *bp);
153 
154 /*
155  * Grab the inode at @inum.  The caller must have created a scrub transaction
156  * so that we can confirm the inumber by walking the inobt and not deadlock on
157  * a loop in the inobt.
158  */
159 int xchk_iget(struct xfs_scrub *sc, xfs_ino_t inum, struct xfs_inode **ipp);
160 int xchk_iget_agi(struct xfs_scrub *sc, xfs_ino_t inum,
161 		struct xfs_buf **agi_bpp, struct xfs_inode **ipp);
162 void xchk_irele(struct xfs_scrub *sc, struct xfs_inode *ip);
163 int xchk_install_handle_inode(struct xfs_scrub *sc, struct xfs_inode *ip);
164 
165 /*
166  * Safe version of (untrusted) xchk_iget that uses an empty transaction to
167  * avoid deadlocking on loops in the inobt.  This should only be used in a
168  * scrub or repair setup routine, and only prior to grabbing a transaction.
169  */
170 static inline int
xchk_iget_safe(struct xfs_scrub * sc,xfs_ino_t inum,struct xfs_inode ** ipp)171 xchk_iget_safe(struct xfs_scrub *sc, xfs_ino_t inum, struct xfs_inode **ipp)
172 {
173 	int	error;
174 
175 	ASSERT(sc->tp == NULL);
176 
177 	error = xchk_trans_alloc(sc, 0);
178 	if (error)
179 		return error;
180 	error = xchk_iget(sc, inum, ipp);
181 	xchk_trans_cancel(sc);
182 	return error;
183 }
184 
185 /*
186  * Don't bother cross-referencing if we already found corruption or cross
187  * referencing discrepancies.
188  */
xchk_skip_xref(struct xfs_scrub_metadata * sm)189 static inline bool xchk_skip_xref(struct xfs_scrub_metadata *sm)
190 {
191 	return sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
192 			       XFS_SCRUB_OFLAG_XCORRUPT);
193 }
194 
195 #ifdef CONFIG_XFS_ONLINE_REPAIR
196 /* Decide if a repair is required. */
xchk_needs_repair(const struct xfs_scrub_metadata * sm)197 static inline bool xchk_needs_repair(const struct xfs_scrub_metadata *sm)
198 {
199 	return sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
200 			       XFS_SCRUB_OFLAG_XCORRUPT |
201 			       XFS_SCRUB_OFLAG_PREEN);
202 }
203 #else
204 # define xchk_needs_repair(sc)		(false)
205 #endif /* CONFIG_XFS_ONLINE_REPAIR */
206 
207 int xchk_metadata_inode_forks(struct xfs_scrub *sc);
208 
209 /*
210  * Helper macros to allocate and format xfile description strings.
211  * Callers must kfree the pointer returned.
212  */
213 #define xchk_xfile_descr(sc, fmt, ...) \
214 	kasprintf(XCHK_GFP_FLAGS, "XFS (%s): " fmt, \
215 			(sc)->mp->m_super->s_id, ##__VA_ARGS__)
216 
217 /*
218  * Setting up a hook to wait for intents to drain is costly -- we have to take
219  * the CPU hotplug lock and force an i-cache flush on all CPUs once to set it
220  * up, and again to tear it down.  These costs add up quickly, so we only want
221  * to enable the drain waiter if the drain actually detected a conflict with
222  * running intent chains.
223  */
xchk_need_intent_drain(struct xfs_scrub * sc)224 static inline bool xchk_need_intent_drain(struct xfs_scrub *sc)
225 {
226 	return sc->flags & XCHK_NEED_DRAIN;
227 }
228 
229 void xchk_fsgates_enable(struct xfs_scrub *sc, unsigned int scrub_fshooks);
230 
231 int xchk_inode_is_allocated(struct xfs_scrub *sc, xfs_agino_t agino,
232 		bool *inuse);
233 
234 #endif	/* __XFS_SCRUB_COMMON_H__ */
235