1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #ifndef __XFS_MOUNT_H__ 7 #define __XFS_MOUNT_H__ 8 9 struct xlog; 10 struct xfs_inode; 11 struct xfs_mru_cache; 12 struct xfs_ail; 13 struct xfs_quotainfo; 14 struct xfs_da_geometry; 15 16 /* dynamic preallocation free space thresholds, 5% down to 1% */ 17 enum { 18 XFS_LOWSP_1_PCNT = 0, 19 XFS_LOWSP_2_PCNT, 20 XFS_LOWSP_3_PCNT, 21 XFS_LOWSP_4_PCNT, 22 XFS_LOWSP_5_PCNT, 23 XFS_LOWSP_MAX, 24 }; 25 26 /* 27 * Error Configuration 28 * 29 * Error classes define the subsystem the configuration belongs to. 30 * Error numbers define the errors that are configurable. 31 */ 32 enum { 33 XFS_ERR_METADATA, 34 XFS_ERR_CLASS_MAX, 35 }; 36 enum { 37 XFS_ERR_DEFAULT, 38 XFS_ERR_EIO, 39 XFS_ERR_ENOSPC, 40 XFS_ERR_ENODEV, 41 XFS_ERR_ERRNO_MAX, 42 }; 43 44 #define XFS_ERR_RETRY_FOREVER -1 45 46 /* 47 * Although retry_timeout is in jiffies which is normally an unsigned long, 48 * we limit the retry timeout to 86400 seconds, or one day. So even a 49 * signed 32-bit long is sufficient for a HZ value up to 24855. Making it 50 * signed lets us store the special "-1" value, meaning retry forever. 51 */ 52 struct xfs_error_cfg { 53 struct xfs_kobj kobj; 54 int max_retries; 55 long retry_timeout; /* in jiffies, -1 = infinite */ 56 }; 57 58 /* 59 * The struct xfsmount layout is optimised to separate read-mostly variables 60 * from variables that are frequently modified. We put the read-mostly variables 61 * first, then place all the other variables at the end. 62 * 63 * Typically, read-mostly variables are those that are set at mount time and 64 * never changed again, or only change rarely as a result of things like sysfs 65 * knobs being tweaked. 66 */ 67 typedef struct xfs_mount { 68 struct xfs_sb m_sb; /* copy of fs superblock */ 69 struct super_block *m_super; 70 struct xfs_ail *m_ail; /* fs active log item list */ 71 struct xfs_buf *m_sb_bp; /* buffer for superblock */ 72 char *m_rtname; /* realtime device name */ 73 char *m_logname; /* external log device name */ 74 struct xfs_da_geometry *m_dir_geo; /* directory block geometry */ 75 struct xfs_da_geometry *m_attr_geo; /* attribute block geometry */ 76 struct xlog *m_log; /* log specific stuff */ 77 struct xfs_inode *m_rbmip; /* pointer to bitmap inode */ 78 struct xfs_inode *m_rsumip; /* pointer to summary inode */ 79 struct xfs_inode *m_rootip; /* pointer to root directory */ 80 struct xfs_quotainfo *m_quotainfo; /* disk quota information */ 81 xfs_buftarg_t *m_ddev_targp; /* saves taking the address */ 82 xfs_buftarg_t *m_logdev_targp;/* ptr to log device */ 83 xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */ 84 /* 85 * Optional cache of rt summary level per bitmap block with the 86 * invariant that m_rsum_cache[bbno] <= the minimum i for which 87 * rsum[i][bbno] != 0. Reads and writes are serialized by the rsumip 88 * inode lock. 89 */ 90 uint8_t *m_rsum_cache; 91 struct xfs_mru_cache *m_filestream; /* per-mount filestream data */ 92 struct workqueue_struct *m_buf_workqueue; 93 struct workqueue_struct *m_unwritten_workqueue; 94 struct workqueue_struct *m_cil_workqueue; 95 struct workqueue_struct *m_reclaim_workqueue; 96 struct workqueue_struct *m_blockgc_workqueue; 97 struct workqueue_struct *m_sync_workqueue; 98 99 int m_bsize; /* fs logical block size */ 100 uint8_t m_blkbit_log; /* blocklog + NBBY */ 101 uint8_t m_blkbb_log; /* blocklog - BBSHIFT */ 102 uint8_t m_agno_log; /* log #ag's */ 103 uint8_t m_sectbb_log; /* sectlog - BBSHIFT */ 104 uint m_blockmask; /* sb_blocksize-1 */ 105 uint m_blockwsize; /* sb_blocksize in words */ 106 uint m_blockwmask; /* blockwsize-1 */ 107 uint m_alloc_mxr[2]; /* max alloc btree records */ 108 uint m_alloc_mnr[2]; /* min alloc btree records */ 109 uint m_bmap_dmxr[2]; /* max bmap btree records */ 110 uint m_bmap_dmnr[2]; /* min bmap btree records */ 111 uint m_rmap_mxr[2]; /* max rmap btree records */ 112 uint m_rmap_mnr[2]; /* min rmap btree records */ 113 uint m_refc_mxr[2]; /* max refc btree records */ 114 uint m_refc_mnr[2]; /* min refc btree records */ 115 uint m_ag_maxlevels; /* XFS_AG_MAXLEVELS */ 116 uint m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */ 117 uint m_rmap_maxlevels; /* max rmap btree levels */ 118 uint m_refc_maxlevels; /* max refcount btree level */ 119 xfs_extlen_t m_ag_prealloc_blocks; /* reserved ag blocks */ 120 uint m_alloc_set_aside; /* space we can't use */ 121 uint m_ag_max_usable; /* max space per AG */ 122 int m_dalign; /* stripe unit */ 123 int m_swidth; /* stripe width */ 124 xfs_agnumber_t m_maxagi; /* highest inode alloc group */ 125 uint m_allocsize_log;/* min write size log bytes */ 126 uint m_allocsize_blocks; /* min write size blocks */ 127 int m_logbufs; /* number of log buffers */ 128 int m_logbsize; /* size of each log buffer */ 129 uint m_rsumlevels; /* rt summary levels */ 130 uint m_rsumsize; /* size of rt summary, bytes */ 131 int m_fixedfsid[2]; /* unchanged for life of FS */ 132 uint m_qflags; /* quota status flags */ 133 uint64_t m_flags; /* global mount flags */ 134 int64_t m_low_space[XFS_LOWSP_MAX]; 135 struct xfs_ino_geometry m_ino_geo; /* inode geometry */ 136 struct xfs_trans_resv m_resv; /* precomputed res values */ 137 /* low free space thresholds */ 138 bool m_always_cow; 139 bool m_fail_unmount; 140 bool m_finobt_nores; /* no per-AG finobt resv. */ 141 bool m_update_sb; /* sb needs update in mount */ 142 143 /* 144 * Bitsets of per-fs metadata that have been checked and/or are sick. 145 * Callers must hold m_sb_lock to access these two fields. 146 */ 147 uint8_t m_fs_checked; 148 uint8_t m_fs_sick; 149 /* 150 * Bitsets of rt metadata that have been checked and/or are sick. 151 * Callers must hold m_sb_lock to access this field. 152 */ 153 uint8_t m_rt_checked; 154 uint8_t m_rt_sick; 155 156 /* 157 * End of read-mostly variables. Frequently written variables and locks 158 * should be placed below this comment from now on. The first variable 159 * here is marked as cacheline aligned so they it is separated from 160 * the read-mostly variables. 161 */ 162 163 spinlock_t ____cacheline_aligned m_sb_lock; /* sb counter lock */ 164 struct percpu_counter m_icount; /* allocated inodes counter */ 165 struct percpu_counter m_ifree; /* free inodes counter */ 166 struct percpu_counter m_fdblocks; /* free block counter */ 167 /* 168 * Count of data device blocks reserved for delayed allocations, 169 * including indlen blocks. Does not include allocated CoW staging 170 * extents or anything related to the rt device. 171 */ 172 struct percpu_counter m_delalloc_blks; 173 174 struct radix_tree_root m_perag_tree; /* per-ag accounting info */ 175 spinlock_t m_perag_lock; /* lock for m_perag_tree */ 176 uint64_t m_resblks; /* total reserved blocks */ 177 uint64_t m_resblks_avail;/* available reserved blocks */ 178 uint64_t m_resblks_save; /* reserved blks @ remount,ro */ 179 struct delayed_work m_reclaim_work; /* background inode reclaim */ 180 struct xfs_kobj m_kobj; 181 struct xfs_kobj m_error_kobj; 182 struct xfs_kobj m_error_meta_kobj; 183 struct xfs_error_cfg m_error_cfg[XFS_ERR_CLASS_MAX][XFS_ERR_ERRNO_MAX]; 184 struct xstats m_stats; /* per-fs stats */ 185 xfs_agnumber_t m_agfrotor; /* last ag where space found */ 186 xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */ 187 spinlock_t m_agirotor_lock;/* .. and lock protecting it */ 188 189 /* 190 * Workqueue item so that we can coalesce multiple inode flush attempts 191 * into a single flush. 192 */ 193 struct work_struct m_flush_inodes_work; 194 195 /* 196 * Generation of the filesysyem layout. This is incremented by each 197 * growfs, and used by the pNFS server to ensure the client updates 198 * its view of the block device once it gets a layout that might 199 * reference the newly added blocks. Does not need to be persistent 200 * as long as we only allow file system size increments, but if we 201 * ever support shrinks it would have to be persisted in addition 202 * to various other kinds of pain inflicted on the pNFS server. 203 */ 204 uint32_t m_generation; 205 struct mutex m_growlock; /* growfs mutex */ 206 207 #ifdef DEBUG 208 /* 209 * Frequency with which errors are injected. Replaces xfs_etest; the 210 * value stored in here is the inverse of the frequency with which the 211 * error triggers. 1 = always, 2 = half the time, etc. 212 */ 213 unsigned int *m_errortag; 214 struct xfs_kobj m_errortag_kobj; 215 #endif 216 } xfs_mount_t; 217 218 #define M_IGEO(mp) (&(mp)->m_ino_geo) 219 220 /* 221 * Flags for m_flags. 222 */ 223 #define XFS_MOUNT_WSYNC (1ULL << 0) /* for nfs - all metadata ops 224 must be synchronous except 225 for space allocations */ 226 #define XFS_MOUNT_UNMOUNTING (1ULL << 1) /* filesystem is unmounting */ 227 #define XFS_MOUNT_WAS_CLEAN (1ULL << 3) 228 #define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem 229 operations, typically for 230 disk errors in metadata */ 231 #define XFS_MOUNT_DISCARD (1ULL << 5) /* discard unused blocks */ 232 #define XFS_MOUNT_NOALIGN (1ULL << 7) /* turn off stripe alignment 233 allocations */ 234 #define XFS_MOUNT_ATTR2 (1ULL << 8) /* allow use of attr2 format */ 235 #define XFS_MOUNT_GRPID (1ULL << 9) /* group-ID assigned from directory */ 236 #define XFS_MOUNT_NORECOVERY (1ULL << 10) /* no recovery - dirty fs */ 237 #define XFS_MOUNT_ALLOCSIZE (1ULL << 12) /* specified allocation size */ 238 #define XFS_MOUNT_SMALL_INUMS (1ULL << 14) /* user wants 32bit inodes */ 239 #define XFS_MOUNT_32BITINODES (1ULL << 15) /* inode32 allocator active */ 240 #define XFS_MOUNT_NOUUID (1ULL << 16) /* ignore uuid during mount */ 241 #define XFS_MOUNT_IKEEP (1ULL << 18) /* keep empty inode clusters*/ 242 #define XFS_MOUNT_SWALLOC (1ULL << 19) /* turn on stripe width 243 * allocation */ 244 #define XFS_MOUNT_RDONLY (1ULL << 20) /* read-only fs */ 245 #define XFS_MOUNT_DIRSYNC (1ULL << 21) /* synchronous directory ops */ 246 #define XFS_MOUNT_LARGEIO (1ULL << 22) /* report large preferred 247 * I/O size in stat() */ 248 #define XFS_MOUNT_FILESTREAMS (1ULL << 24) /* enable the filestreams 249 allocator */ 250 #define XFS_MOUNT_NOATTR2 (1ULL << 25) /* disable use of attr2 format */ 251 #define XFS_MOUNT_DAX_ALWAYS (1ULL << 26) 252 #define XFS_MOUNT_DAX_NEVER (1ULL << 27) 253 254 /* 255 * Max and min values for mount-option defined I/O 256 * preallocation sizes. 257 */ 258 #define XFS_MAX_IO_LOG 30 /* 1G */ 259 #define XFS_MIN_IO_LOG PAGE_SHIFT 260 261 #define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \ 262 ((mp)->m_flags & XFS_MOUNT_WAS_CLEAN) 263 #define XFS_FORCED_SHUTDOWN(mp) ((mp)->m_flags & XFS_MOUNT_FS_SHUTDOWN) 264 void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname, 265 int lnnum); 266 #define xfs_force_shutdown(m,f) \ 267 xfs_do_force_shutdown(m, f, __FILE__, __LINE__) 268 269 #define SHUTDOWN_META_IO_ERROR 0x0001 /* write attempt to metadata failed */ 270 #define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */ 271 #define SHUTDOWN_FORCE_UMOUNT 0x0004 /* shutdown from a forced unmount */ 272 #define SHUTDOWN_CORRUPT_INCORE 0x0008 /* corrupt in-memory data structures */ 273 274 /* 275 * Flags for xfs_mountfs 276 */ 277 #define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */ 278 279 static inline xfs_agnumber_t 280 xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d) 281 { 282 xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d); 283 do_div(ld, mp->m_sb.sb_agblocks); 284 return (xfs_agnumber_t) ld; 285 } 286 287 static inline xfs_agblock_t 288 xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d) 289 { 290 xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d); 291 return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks); 292 } 293 294 /* per-AG block reservation data structures*/ 295 struct xfs_ag_resv { 296 /* number of blocks originally reserved here */ 297 xfs_extlen_t ar_orig_reserved; 298 /* number of blocks reserved here */ 299 xfs_extlen_t ar_reserved; 300 /* number of blocks originally asked for */ 301 xfs_extlen_t ar_asked; 302 }; 303 304 /* 305 * Per-ag incore structure, copies of information in agf and agi, to improve the 306 * performance of allocation group selection. 307 */ 308 typedef struct xfs_perag { 309 struct xfs_mount *pag_mount; /* owner filesystem */ 310 xfs_agnumber_t pag_agno; /* AG this structure belongs to */ 311 atomic_t pag_ref; /* perag reference count */ 312 char pagf_init; /* this agf's entry is initialized */ 313 char pagi_init; /* this agi's entry is initialized */ 314 char pagf_metadata; /* the agf is preferred to be metadata */ 315 char pagi_inodeok; /* The agi is ok for inodes */ 316 uint8_t pagf_levels[XFS_BTNUM_AGF]; 317 /* # of levels in bno & cnt btree */ 318 bool pagf_agflreset; /* agfl requires reset before use */ 319 uint32_t pagf_flcount; /* count of blocks in freelist */ 320 xfs_extlen_t pagf_freeblks; /* total free blocks */ 321 xfs_extlen_t pagf_longest; /* longest free space */ 322 uint32_t pagf_btreeblks; /* # of blocks held in AGF btrees */ 323 xfs_agino_t pagi_freecount; /* number of free inodes */ 324 xfs_agino_t pagi_count; /* number of allocated inodes */ 325 326 /* 327 * Inode allocation search lookup optimisation. 328 * If the pagino matches, the search for new inodes 329 * doesn't need to search the near ones again straight away 330 */ 331 xfs_agino_t pagl_pagino; 332 xfs_agino_t pagl_leftrec; 333 xfs_agino_t pagl_rightrec; 334 335 /* 336 * Bitsets of per-ag metadata that have been checked and/or are sick. 337 * Callers should hold pag_state_lock before accessing this field. 338 */ 339 uint16_t pag_checked; 340 uint16_t pag_sick; 341 spinlock_t pag_state_lock; 342 343 spinlock_t pagb_lock; /* lock for pagb_tree */ 344 struct rb_root pagb_tree; /* ordered tree of busy extents */ 345 unsigned int pagb_gen; /* generation count for pagb_tree */ 346 wait_queue_head_t pagb_wait; /* woken when pagb_gen changes */ 347 348 atomic_t pagf_fstrms; /* # of filestreams active in this AG */ 349 350 spinlock_t pag_ici_lock; /* incore inode cache lock */ 351 struct radix_tree_root pag_ici_root; /* incore inode cache root */ 352 int pag_ici_reclaimable; /* reclaimable inodes */ 353 unsigned long pag_ici_reclaim_cursor; /* reclaim restart point */ 354 355 /* buffer cache index */ 356 spinlock_t pag_buf_lock; /* lock for pag_buf_hash */ 357 struct rhashtable pag_buf_hash; 358 359 /* for rcu-safe freeing */ 360 struct rcu_head rcu_head; 361 int pagb_count; /* pagb slots in use */ 362 363 /* Blocks reserved for all kinds of metadata. */ 364 struct xfs_ag_resv pag_meta_resv; 365 /* Blocks reserved for the reverse mapping btree. */ 366 struct xfs_ag_resv pag_rmapbt_resv; 367 368 /* background prealloc block trimming */ 369 struct delayed_work pag_blockgc_work; 370 371 /* reference count */ 372 uint8_t pagf_refcount_level; 373 374 /* 375 * Unlinked inode information. This incore information reflects 376 * data stored in the AGI, so callers must hold the AGI buffer lock 377 * or have some other means to control concurrency. 378 */ 379 struct rhashtable pagi_unlinked_hash; 380 } xfs_perag_t; 381 382 static inline struct xfs_ag_resv * 383 xfs_perag_resv( 384 struct xfs_perag *pag, 385 enum xfs_ag_resv_type type) 386 { 387 switch (type) { 388 case XFS_AG_RESV_METADATA: 389 return &pag->pag_meta_resv; 390 case XFS_AG_RESV_RMAPBT: 391 return &pag->pag_rmapbt_resv; 392 default: 393 return NULL; 394 } 395 } 396 397 int xfs_buf_hash_init(xfs_perag_t *pag); 398 void xfs_buf_hash_destroy(xfs_perag_t *pag); 399 400 extern void xfs_uuid_table_free(void); 401 extern uint64_t xfs_default_resblks(xfs_mount_t *mp); 402 extern int xfs_mountfs(xfs_mount_t *mp); 403 extern int xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount, 404 xfs_agnumber_t *maxagi); 405 extern void xfs_unmountfs(xfs_mount_t *); 406 407 extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta, 408 bool reserved); 409 extern int xfs_mod_frextents(struct xfs_mount *mp, int64_t delta); 410 411 extern int xfs_readsb(xfs_mount_t *, int); 412 extern void xfs_freesb(xfs_mount_t *); 413 extern bool xfs_fs_writable(struct xfs_mount *mp, int level); 414 extern int xfs_sb_validate_fsb_count(struct xfs_sb *, uint64_t); 415 416 extern int xfs_dev_is_read_only(struct xfs_mount *, char *); 417 418 extern void xfs_set_low_space_thresholds(struct xfs_mount *); 419 420 int xfs_zero_extent(struct xfs_inode *ip, xfs_fsblock_t start_fsb, 421 xfs_off_t count_fsb); 422 423 struct xfs_error_cfg * xfs_error_get_cfg(struct xfs_mount *mp, 424 int error_class, int error); 425 void xfs_force_summary_recalc(struct xfs_mount *mp); 426 void xfs_mod_delalloc(struct xfs_mount *mp, int64_t delta); 427 428 #endif /* __XFS_MOUNT_H__ */ 429