xref: /openbmc/linux/fs/xfs/xfs_mount.h (revision 37744fee)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #ifndef __XFS_MOUNT_H__
7 #define	__XFS_MOUNT_H__
8 
9 struct xlog;
10 struct xfs_inode;
11 struct xfs_mru_cache;
12 struct xfs_ail;
13 struct xfs_quotainfo;
14 struct xfs_da_geometry;
15 
16 /* dynamic preallocation free space thresholds, 5% down to 1% */
17 enum {
18 	XFS_LOWSP_1_PCNT = 0,
19 	XFS_LOWSP_2_PCNT,
20 	XFS_LOWSP_3_PCNT,
21 	XFS_LOWSP_4_PCNT,
22 	XFS_LOWSP_5_PCNT,
23 	XFS_LOWSP_MAX,
24 };
25 
26 /*
27  * Error Configuration
28  *
29  * Error classes define the subsystem the configuration belongs to.
30  * Error numbers define the errors that are configurable.
31  */
32 enum {
33 	XFS_ERR_METADATA,
34 	XFS_ERR_CLASS_MAX,
35 };
36 enum {
37 	XFS_ERR_DEFAULT,
38 	XFS_ERR_EIO,
39 	XFS_ERR_ENOSPC,
40 	XFS_ERR_ENODEV,
41 	XFS_ERR_ERRNO_MAX,
42 };
43 
44 #define XFS_ERR_RETRY_FOREVER	-1
45 
46 /*
47  * Although retry_timeout is in jiffies which is normally an unsigned long,
48  * we limit the retry timeout to 86400 seconds, or one day.  So even a
49  * signed 32-bit long is sufficient for a HZ value up to 24855.  Making it
50  * signed lets us store the special "-1" value, meaning retry forever.
51  */
52 struct xfs_error_cfg {
53 	struct xfs_kobj	kobj;
54 	int		max_retries;
55 	long		retry_timeout;	/* in jiffies, -1 = infinite */
56 };
57 
58 typedef struct xfs_mount {
59 	struct super_block	*m_super;
60 
61 	/*
62 	 * Bitsets of per-fs metadata that have been checked and/or are sick.
63 	 * Callers must hold m_sb_lock to access these two fields.
64 	 */
65 	uint8_t			m_fs_checked;
66 	uint8_t			m_fs_sick;
67 	/*
68 	 * Bitsets of rt metadata that have been checked and/or are sick.
69 	 * Callers must hold m_sb_lock to access this field.
70 	 */
71 	uint8_t			m_rt_checked;
72 	uint8_t			m_rt_sick;
73 
74 	struct xfs_ail		*m_ail;		/* fs active log item list */
75 
76 	struct xfs_sb		m_sb;		/* copy of fs superblock */
77 	spinlock_t		m_sb_lock;	/* sb counter lock */
78 	struct percpu_counter	m_icount;	/* allocated inodes counter */
79 	struct percpu_counter	m_ifree;	/* free inodes counter */
80 	struct percpu_counter	m_fdblocks;	/* free block counter */
81 	/*
82 	 * Count of data device blocks reserved for delayed allocations,
83 	 * including indlen blocks.  Does not include allocated CoW staging
84 	 * extents or anything related to the rt device.
85 	 */
86 	struct percpu_counter	m_delalloc_blks;
87 
88 	struct xfs_buf		*m_sb_bp;	/* buffer for superblock */
89 	char			*m_rtname;	/* realtime device name */
90 	char			*m_logname;	/* external log device name */
91 	int			m_bsize;	/* fs logical block size */
92 	xfs_agnumber_t		m_agfrotor;	/* last ag where space found */
93 	xfs_agnumber_t		m_agirotor;	/* last ag dir inode alloced */
94 	spinlock_t		m_agirotor_lock;/* .. and lock protecting it */
95 	xfs_agnumber_t		m_maxagi;	/* highest inode alloc group */
96 	uint			m_allocsize_log;/* min write size log bytes */
97 	uint			m_allocsize_blocks; /* min write size blocks */
98 	struct xfs_da_geometry	*m_dir_geo;	/* directory block geometry */
99 	struct xfs_da_geometry	*m_attr_geo;	/* attribute block geometry */
100 	struct xlog		*m_log;		/* log specific stuff */
101 	struct xfs_ino_geometry	m_ino_geo;	/* inode geometry */
102 	int			m_logbufs;	/* number of log buffers */
103 	int			m_logbsize;	/* size of each log buffer */
104 	uint			m_rsumlevels;	/* rt summary levels */
105 	uint			m_rsumsize;	/* size of rt summary, bytes */
106 	/*
107 	 * Optional cache of rt summary level per bitmap block with the
108 	 * invariant that m_rsum_cache[bbno] <= the minimum i for which
109 	 * rsum[i][bbno] != 0. Reads and writes are serialized by the rsumip
110 	 * inode lock.
111 	 */
112 	uint8_t			*m_rsum_cache;
113 	struct xfs_inode	*m_rbmip;	/* pointer to bitmap inode */
114 	struct xfs_inode	*m_rsumip;	/* pointer to summary inode */
115 	struct xfs_inode	*m_rootip;	/* pointer to root directory */
116 	struct xfs_quotainfo	*m_quotainfo;	/* disk quota information */
117 	xfs_buftarg_t		*m_ddev_targp;	/* saves taking the address */
118 	xfs_buftarg_t		*m_logdev_targp;/* ptr to log device */
119 	xfs_buftarg_t		*m_rtdev_targp;	/* ptr to rt device */
120 	uint8_t			m_blkbit_log;	/* blocklog + NBBY */
121 	uint8_t			m_blkbb_log;	/* blocklog - BBSHIFT */
122 	uint8_t			m_agno_log;	/* log #ag's */
123 	uint			m_blockmask;	/* sb_blocksize-1 */
124 	uint			m_blockwsize;	/* sb_blocksize in words */
125 	uint			m_blockwmask;	/* blockwsize-1 */
126 	uint			m_alloc_mxr[2];	/* max alloc btree records */
127 	uint			m_alloc_mnr[2];	/* min alloc btree records */
128 	uint			m_bmap_dmxr[2];	/* max bmap btree records */
129 	uint			m_bmap_dmnr[2];	/* min bmap btree records */
130 	uint			m_rmap_mxr[2];	/* max rmap btree records */
131 	uint			m_rmap_mnr[2];	/* min rmap btree records */
132 	uint			m_refc_mxr[2];	/* max refc btree records */
133 	uint			m_refc_mnr[2];	/* min refc btree records */
134 	uint			m_ag_maxlevels;	/* XFS_AG_MAXLEVELS */
135 	uint			m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */
136 	uint			m_rmap_maxlevels; /* max rmap btree levels */
137 	uint			m_refc_maxlevels; /* max refcount btree level */
138 	xfs_extlen_t		m_ag_prealloc_blocks; /* reserved ag blocks */
139 	uint			m_alloc_set_aside; /* space we can't use */
140 	uint			m_ag_max_usable; /* max space per AG */
141 	struct radix_tree_root	m_perag_tree;	/* per-ag accounting info */
142 	spinlock_t		m_perag_lock;	/* lock for m_perag_tree */
143 	struct mutex		m_growlock;	/* growfs mutex */
144 	int			m_fixedfsid[2];	/* unchanged for life of FS */
145 	uint64_t		m_flags;	/* global mount flags */
146 	bool			m_finobt_nores; /* no per-AG finobt resv. */
147 	uint			m_qflags;	/* quota status flags */
148 	struct xfs_trans_resv	m_resv;		/* precomputed res values */
149 	uint64_t		m_resblks;	/* total reserved blocks */
150 	uint64_t		m_resblks_avail;/* available reserved blocks */
151 	uint64_t		m_resblks_save;	/* reserved blks @ remount,ro */
152 	int			m_dalign;	/* stripe unit */
153 	int			m_swidth;	/* stripe width */
154 	uint8_t			m_sectbb_log;	/* sectlog - BBSHIFT */
155 	atomic_t		m_active_trans;	/* number trans frozen */
156 	struct xfs_mru_cache	*m_filestream;  /* per-mount filestream data */
157 	struct delayed_work	m_reclaim_work;	/* background inode reclaim */
158 	struct delayed_work	m_eofblocks_work; /* background eof blocks
159 						     trimming */
160 	struct delayed_work	m_cowblocks_work; /* background cow blocks
161 						     trimming */
162 	bool			m_update_sb;	/* sb needs update in mount */
163 	int64_t			m_low_space[XFS_LOWSP_MAX];
164 						/* low free space thresholds */
165 	struct xfs_kobj		m_kobj;
166 	struct xfs_kobj		m_error_kobj;
167 	struct xfs_kobj		m_error_meta_kobj;
168 	struct xfs_error_cfg	m_error_cfg[XFS_ERR_CLASS_MAX][XFS_ERR_ERRNO_MAX];
169 	struct xstats		m_stats;	/* per-fs stats */
170 
171 	/*
172 	 * Workqueue item so that we can coalesce multiple inode flush attempts
173 	 * into a single flush.
174 	 */
175 	struct work_struct	m_flush_inodes_work;
176 	struct workqueue_struct *m_buf_workqueue;
177 	struct workqueue_struct	*m_unwritten_workqueue;
178 	struct workqueue_struct	*m_cil_workqueue;
179 	struct workqueue_struct	*m_reclaim_workqueue;
180 	struct workqueue_struct *m_eofblocks_workqueue;
181 	struct workqueue_struct	*m_sync_workqueue;
182 
183 	/*
184 	 * Generation of the filesysyem layout.  This is incremented by each
185 	 * growfs, and used by the pNFS server to ensure the client updates
186 	 * its view of the block device once it gets a layout that might
187 	 * reference the newly added blocks.  Does not need to be persistent
188 	 * as long as we only allow file system size increments, but if we
189 	 * ever support shrinks it would have to be persisted in addition
190 	 * to various other kinds of pain inflicted on the pNFS server.
191 	 */
192 	uint32_t		m_generation;
193 
194 	bool			m_always_cow;
195 	bool			m_fail_unmount;
196 #ifdef DEBUG
197 	/*
198 	 * Frequency with which errors are injected.  Replaces xfs_etest; the
199 	 * value stored in here is the inverse of the frequency with which the
200 	 * error triggers.  1 = always, 2 = half the time, etc.
201 	 */
202 	unsigned int		*m_errortag;
203 	struct xfs_kobj		m_errortag_kobj;
204 #endif
205 } xfs_mount_t;
206 
207 #define M_IGEO(mp)		(&(mp)->m_ino_geo)
208 
209 /*
210  * Flags for m_flags.
211  */
212 #define XFS_MOUNT_WSYNC		(1ULL << 0)	/* for nfs - all metadata ops
213 						   must be synchronous except
214 						   for space allocations */
215 #define XFS_MOUNT_UNMOUNTING	(1ULL << 1)	/* filesystem is unmounting */
216 #define XFS_MOUNT_WAS_CLEAN	(1ULL << 3)
217 #define XFS_MOUNT_FS_SHUTDOWN	(1ULL << 4)	/* atomic stop of all filesystem
218 						   operations, typically for
219 						   disk errors in metadata */
220 #define XFS_MOUNT_DISCARD	(1ULL << 5)	/* discard unused blocks */
221 #define XFS_MOUNT_NOALIGN	(1ULL << 7)	/* turn off stripe alignment
222 						   allocations */
223 #define XFS_MOUNT_ATTR2		(1ULL << 8)	/* allow use of attr2 format */
224 #define XFS_MOUNT_GRPID		(1ULL << 9)	/* group-ID assigned from directory */
225 #define XFS_MOUNT_NORECOVERY	(1ULL << 10)	/* no recovery - dirty fs */
226 #define XFS_MOUNT_ALLOCSIZE	(1ULL << 12)	/* specified allocation size */
227 #define XFS_MOUNT_SMALL_INUMS	(1ULL << 14)	/* user wants 32bit inodes */
228 #define XFS_MOUNT_32BITINODES	(1ULL << 15)	/* inode32 allocator active */
229 #define XFS_MOUNT_NOUUID	(1ULL << 16)	/* ignore uuid during mount */
230 #define XFS_MOUNT_IKEEP		(1ULL << 18)	/* keep empty inode clusters*/
231 #define XFS_MOUNT_SWALLOC	(1ULL << 19)	/* turn on stripe width
232 						 * allocation */
233 #define XFS_MOUNT_RDONLY	(1ULL << 20)	/* read-only fs */
234 #define XFS_MOUNT_DIRSYNC	(1ULL << 21)	/* synchronous directory ops */
235 #define XFS_MOUNT_LARGEIO	(1ULL << 22)	/* report large preferred
236 						 * I/O size in stat() */
237 #define XFS_MOUNT_FILESTREAMS	(1ULL << 24)	/* enable the filestreams
238 						   allocator */
239 #define XFS_MOUNT_NOATTR2	(1ULL << 25)	/* disable use of attr2 format */
240 
241 #define XFS_MOUNT_DAX		(1ULL << 62)	/* TEST ONLY! */
242 
243 /*
244  * Max and min values for mount-option defined I/O
245  * preallocation sizes.
246  */
247 #define XFS_MAX_IO_LOG		30	/* 1G */
248 #define XFS_MIN_IO_LOG		PAGE_SHIFT
249 
250 #define XFS_LAST_UNMOUNT_WAS_CLEAN(mp)	\
251 				((mp)->m_flags & XFS_MOUNT_WAS_CLEAN)
252 #define XFS_FORCED_SHUTDOWN(mp)	((mp)->m_flags & XFS_MOUNT_FS_SHUTDOWN)
253 void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname,
254 		int lnnum);
255 #define xfs_force_shutdown(m,f)	\
256 	xfs_do_force_shutdown(m, f, __FILE__, __LINE__)
257 
258 #define SHUTDOWN_META_IO_ERROR	0x0001	/* write attempt to metadata failed */
259 #define SHUTDOWN_LOG_IO_ERROR	0x0002	/* write attempt to the log failed */
260 #define SHUTDOWN_FORCE_UMOUNT	0x0004	/* shutdown from a forced unmount */
261 #define SHUTDOWN_CORRUPT_INCORE	0x0008	/* corrupt in-memory data structures */
262 #define SHUTDOWN_REMOTE_REQ	0x0010	/* shutdown came from remote cell */
263 #define SHUTDOWN_DEVICE_REQ	0x0020	/* failed all paths to the device */
264 
265 /*
266  * Flags for xfs_mountfs
267  */
268 #define XFS_MFSI_QUIET		0x40	/* Be silent if mount errors found */
269 
270 static inline xfs_agnumber_t
271 xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d)
272 {
273 	xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d);
274 	do_div(ld, mp->m_sb.sb_agblocks);
275 	return (xfs_agnumber_t) ld;
276 }
277 
278 static inline xfs_agblock_t
279 xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
280 {
281 	xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d);
282 	return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks);
283 }
284 
285 /* per-AG block reservation data structures*/
286 struct xfs_ag_resv {
287 	/* number of blocks originally reserved here */
288 	xfs_extlen_t			ar_orig_reserved;
289 	/* number of blocks reserved here */
290 	xfs_extlen_t			ar_reserved;
291 	/* number of blocks originally asked for */
292 	xfs_extlen_t			ar_asked;
293 };
294 
295 /*
296  * Per-ag incore structure, copies of information in agf and agi, to improve the
297  * performance of allocation group selection.
298  */
299 typedef struct xfs_perag {
300 	struct xfs_mount *pag_mount;	/* owner filesystem */
301 	xfs_agnumber_t	pag_agno;	/* AG this structure belongs to */
302 	atomic_t	pag_ref;	/* perag reference count */
303 	char		pagf_init;	/* this agf's entry is initialized */
304 	char		pagi_init;	/* this agi's entry is initialized */
305 	char		pagf_metadata;	/* the agf is preferred to be metadata */
306 	char		pagi_inodeok;	/* The agi is ok for inodes */
307 	uint8_t		pagf_levels[XFS_BTNUM_AGF];
308 					/* # of levels in bno & cnt btree */
309 	bool		pagf_agflreset; /* agfl requires reset before use */
310 	uint32_t	pagf_flcount;	/* count of blocks in freelist */
311 	xfs_extlen_t	pagf_freeblks;	/* total free blocks */
312 	xfs_extlen_t	pagf_longest;	/* longest free space */
313 	uint32_t	pagf_btreeblks;	/* # of blocks held in AGF btrees */
314 	xfs_agino_t	pagi_freecount;	/* number of free inodes */
315 	xfs_agino_t	pagi_count;	/* number of allocated inodes */
316 
317 	/*
318 	 * Inode allocation search lookup optimisation.
319 	 * If the pagino matches, the search for new inodes
320 	 * doesn't need to search the near ones again straight away
321 	 */
322 	xfs_agino_t	pagl_pagino;
323 	xfs_agino_t	pagl_leftrec;
324 	xfs_agino_t	pagl_rightrec;
325 
326 	/*
327 	 * Bitsets of per-ag metadata that have been checked and/or are sick.
328 	 * Callers should hold pag_state_lock before accessing this field.
329 	 */
330 	uint16_t	pag_checked;
331 	uint16_t	pag_sick;
332 	spinlock_t	pag_state_lock;
333 
334 	spinlock_t	pagb_lock;	/* lock for pagb_tree */
335 	struct rb_root	pagb_tree;	/* ordered tree of busy extents */
336 	unsigned int	pagb_gen;	/* generation count for pagb_tree */
337 	wait_queue_head_t pagb_wait;	/* woken when pagb_gen changes */
338 
339 	atomic_t        pagf_fstrms;    /* # of filestreams active in this AG */
340 
341 	spinlock_t	pag_ici_lock;	/* incore inode cache lock */
342 	struct radix_tree_root pag_ici_root;	/* incore inode cache root */
343 	int		pag_ici_reclaimable;	/* reclaimable inodes */
344 	struct mutex	pag_ici_reclaim_lock;	/* serialisation point */
345 	unsigned long	pag_ici_reclaim_cursor;	/* reclaim restart point */
346 
347 	/* buffer cache index */
348 	spinlock_t	pag_buf_lock;	/* lock for pag_buf_hash */
349 	struct rhashtable pag_buf_hash;
350 
351 	/* for rcu-safe freeing */
352 	struct rcu_head	rcu_head;
353 	int		pagb_count;	/* pagb slots in use */
354 
355 	/* Blocks reserved for all kinds of metadata. */
356 	struct xfs_ag_resv	pag_meta_resv;
357 	/* Blocks reserved for the reverse mapping btree. */
358 	struct xfs_ag_resv	pag_rmapbt_resv;
359 
360 	/* reference count */
361 	uint8_t			pagf_refcount_level;
362 
363 	/*
364 	 * Unlinked inode information.  This incore information reflects
365 	 * data stored in the AGI, so callers must hold the AGI buffer lock
366 	 * or have some other means to control concurrency.
367 	 */
368 	struct rhashtable	pagi_unlinked_hash;
369 } xfs_perag_t;
370 
371 static inline struct xfs_ag_resv *
372 xfs_perag_resv(
373 	struct xfs_perag	*pag,
374 	enum xfs_ag_resv_type	type)
375 {
376 	switch (type) {
377 	case XFS_AG_RESV_METADATA:
378 		return &pag->pag_meta_resv;
379 	case XFS_AG_RESV_RMAPBT:
380 		return &pag->pag_rmapbt_resv;
381 	default:
382 		return NULL;
383 	}
384 }
385 
386 int xfs_buf_hash_init(xfs_perag_t *pag);
387 void xfs_buf_hash_destroy(xfs_perag_t *pag);
388 
389 extern void	xfs_uuid_table_free(void);
390 extern int	xfs_log_sbcount(xfs_mount_t *);
391 extern uint64_t xfs_default_resblks(xfs_mount_t *mp);
392 extern int	xfs_mountfs(xfs_mount_t *mp);
393 extern int	xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount,
394 				     xfs_agnumber_t *maxagi);
395 extern void	xfs_unmountfs(xfs_mount_t *);
396 
397 extern int	xfs_mod_icount(struct xfs_mount *mp, int64_t delta);
398 extern int	xfs_mod_ifree(struct xfs_mount *mp, int64_t delta);
399 extern int	xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta,
400 				 bool reserved);
401 extern int	xfs_mod_frextents(struct xfs_mount *mp, int64_t delta);
402 
403 extern struct xfs_buf *xfs_getsb(xfs_mount_t *);
404 extern int	xfs_readsb(xfs_mount_t *, int);
405 extern void	xfs_freesb(xfs_mount_t *);
406 extern bool	xfs_fs_writable(struct xfs_mount *mp, int level);
407 extern int	xfs_sb_validate_fsb_count(struct xfs_sb *, uint64_t);
408 
409 extern int	xfs_dev_is_read_only(struct xfs_mount *, char *);
410 
411 extern void	xfs_set_low_space_thresholds(struct xfs_mount *);
412 
413 int	xfs_zero_extent(struct xfs_inode *ip, xfs_fsblock_t start_fsb,
414 			xfs_off_t count_fsb);
415 
416 struct xfs_error_cfg * xfs_error_get_cfg(struct xfs_mount *mp,
417 		int error_class, int error);
418 void xfs_force_summary_recalc(struct xfs_mount *mp);
419 void xfs_mod_delalloc(struct xfs_mount *mp, int64_t delta);
420 
421 #endif	/* __XFS_MOUNT_H__ */
422