1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_bit.h" 25 #include "xfs_sb.h" 26 #include "xfs_mount.h" 27 #include "xfs_inode.h" 28 #include "xfs_ialloc.h" 29 #include "xfs_alloc.h" 30 #include "xfs_error.h" 31 #include "xfs_trace.h" 32 #include "xfs_cksum.h" 33 #include "xfs_trans.h" 34 #include "xfs_buf_item.h" 35 #include "xfs_bmap_btree.h" 36 #include "xfs_alloc_btree.h" 37 #include "xfs_ialloc_btree.h" 38 39 /* 40 * Physical superblock buffer manipulations. Shared with libxfs in userspace. 41 */ 42 43 /* 44 * Reference counting access wrappers to the perag structures. 45 * Because we never free per-ag structures, the only thing we 46 * have to protect against changes is the tree structure itself. 47 */ 48 struct xfs_perag * 49 xfs_perag_get( 50 struct xfs_mount *mp, 51 xfs_agnumber_t agno) 52 { 53 struct xfs_perag *pag; 54 int ref = 0; 55 56 rcu_read_lock(); 57 pag = radix_tree_lookup(&mp->m_perag_tree, agno); 58 if (pag) { 59 ASSERT(atomic_read(&pag->pag_ref) >= 0); 60 ref = atomic_inc_return(&pag->pag_ref); 61 } 62 rcu_read_unlock(); 63 trace_xfs_perag_get(mp, agno, ref, _RET_IP_); 64 return pag; 65 } 66 67 /* 68 * search from @first to find the next perag with the given tag set. 69 */ 70 struct xfs_perag * 71 xfs_perag_get_tag( 72 struct xfs_mount *mp, 73 xfs_agnumber_t first, 74 int tag) 75 { 76 struct xfs_perag *pag; 77 int found; 78 int ref; 79 80 rcu_read_lock(); 81 found = radix_tree_gang_lookup_tag(&mp->m_perag_tree, 82 (void **)&pag, first, 1, tag); 83 if (found <= 0) { 84 rcu_read_unlock(); 85 return NULL; 86 } 87 ref = atomic_inc_return(&pag->pag_ref); 88 rcu_read_unlock(); 89 trace_xfs_perag_get_tag(mp, pag->pag_agno, ref, _RET_IP_); 90 return pag; 91 } 92 93 void 94 xfs_perag_put( 95 struct xfs_perag *pag) 96 { 97 int ref; 98 99 ASSERT(atomic_read(&pag->pag_ref) > 0); 100 ref = atomic_dec_return(&pag->pag_ref); 101 trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_); 102 } 103 104 /* 105 * Check the validity of the SB found. 106 */ 107 STATIC int 108 xfs_mount_validate_sb( 109 xfs_mount_t *mp, 110 xfs_sb_t *sbp, 111 bool check_inprogress, 112 bool check_version) 113 { 114 if (sbp->sb_magicnum != XFS_SB_MAGIC) { 115 xfs_warn(mp, "bad magic number"); 116 return -EWRONGFS; 117 } 118 119 120 if (!xfs_sb_good_version(sbp)) { 121 xfs_warn(mp, "bad version"); 122 return -EWRONGFS; 123 } 124 125 /* 126 * Version 5 superblock feature mask validation. Reject combinations the 127 * kernel cannot support up front before checking anything else. For 128 * write validation, we don't need to check feature masks. 129 */ 130 if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) { 131 if (xfs_sb_has_compat_feature(sbp, 132 XFS_SB_FEAT_COMPAT_UNKNOWN)) { 133 xfs_warn(mp, 134 "Superblock has unknown compatible features (0x%x) enabled.", 135 (sbp->sb_features_compat & 136 XFS_SB_FEAT_COMPAT_UNKNOWN)); 137 xfs_warn(mp, 138 "Using a more recent kernel is recommended."); 139 } 140 141 if (xfs_sb_has_ro_compat_feature(sbp, 142 XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) { 143 xfs_alert(mp, 144 "Superblock has unknown read-only compatible features (0x%x) enabled.", 145 (sbp->sb_features_ro_compat & 146 XFS_SB_FEAT_RO_COMPAT_UNKNOWN)); 147 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { 148 xfs_warn(mp, 149 "Attempted to mount read-only compatible filesystem read-write."); 150 xfs_warn(mp, 151 "Filesystem can only be safely mounted read only."); 152 153 return -EINVAL; 154 } 155 } 156 if (xfs_sb_has_incompat_feature(sbp, 157 XFS_SB_FEAT_INCOMPAT_UNKNOWN)) { 158 xfs_warn(mp, 159 "Superblock has unknown incompatible features (0x%x) enabled.", 160 (sbp->sb_features_incompat & 161 XFS_SB_FEAT_INCOMPAT_UNKNOWN)); 162 xfs_warn(mp, 163 "Filesystem can not be safely mounted by this kernel."); 164 return -EINVAL; 165 } 166 } 167 168 if (xfs_sb_version_has_pquotino(sbp)) { 169 if (sbp->sb_qflags & (XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD)) { 170 xfs_notice(mp, 171 "Version 5 of Super block has XFS_OQUOTA bits."); 172 return -EFSCORRUPTED; 173 } 174 } else if (sbp->sb_qflags & (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD | 175 XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD)) { 176 xfs_notice(mp, 177 "Superblock earlier than Version 5 has XFS_[PQ]UOTA_{ENFD|CHKD} bits."); 178 return -EFSCORRUPTED; 179 } 180 181 /* 182 * Full inode chunks must be aligned to inode chunk size when 183 * sparse inodes are enabled to support the sparse chunk 184 * allocation algorithm and prevent overlapping inode records. 185 */ 186 if (xfs_sb_version_hassparseinodes(sbp)) { 187 uint32_t align; 188 189 align = XFS_INODES_PER_CHUNK * sbp->sb_inodesize 190 >> sbp->sb_blocklog; 191 if (sbp->sb_inoalignmt != align) { 192 xfs_warn(mp, 193 "Inode block alignment (%u) must match chunk size (%u) for sparse inodes.", 194 sbp->sb_inoalignmt, align); 195 return -EINVAL; 196 } 197 } 198 199 if (unlikely( 200 sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) { 201 xfs_warn(mp, 202 "filesystem is marked as having an external log; " 203 "specify logdev on the mount command line."); 204 return -EINVAL; 205 } 206 207 if (unlikely( 208 sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) { 209 xfs_warn(mp, 210 "filesystem is marked as having an internal log; " 211 "do not specify logdev on the mount command line."); 212 return -EINVAL; 213 } 214 215 /* 216 * More sanity checking. Most of these were stolen directly from 217 * xfs_repair. 218 */ 219 if (unlikely( 220 sbp->sb_agcount <= 0 || 221 sbp->sb_sectsize < XFS_MIN_SECTORSIZE || 222 sbp->sb_sectsize > XFS_MAX_SECTORSIZE || 223 sbp->sb_sectlog < XFS_MIN_SECTORSIZE_LOG || 224 sbp->sb_sectlog > XFS_MAX_SECTORSIZE_LOG || 225 sbp->sb_sectsize != (1 << sbp->sb_sectlog) || 226 sbp->sb_blocksize < XFS_MIN_BLOCKSIZE || 227 sbp->sb_blocksize > XFS_MAX_BLOCKSIZE || 228 sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG || 229 sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG || 230 sbp->sb_blocksize != (1 << sbp->sb_blocklog) || 231 sbp->sb_dirblklog > XFS_MAX_BLOCKSIZE_LOG || 232 sbp->sb_inodesize < XFS_DINODE_MIN_SIZE || 233 sbp->sb_inodesize > XFS_DINODE_MAX_SIZE || 234 sbp->sb_inodelog < XFS_DINODE_MIN_LOG || 235 sbp->sb_inodelog > XFS_DINODE_MAX_LOG || 236 sbp->sb_inodesize != (1 << sbp->sb_inodelog) || 237 sbp->sb_logsunit > XLOG_MAX_RECORD_BSIZE || 238 sbp->sb_inopblock != howmany(sbp->sb_blocksize,sbp->sb_inodesize) || 239 (sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog) || 240 (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) || 241 (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) || 242 (sbp->sb_imax_pct > 100 /* zero sb_imax_pct is valid */) || 243 sbp->sb_dblocks == 0 || 244 sbp->sb_dblocks > XFS_MAX_DBLOCKS(sbp) || 245 sbp->sb_dblocks < XFS_MIN_DBLOCKS(sbp) || 246 sbp->sb_shared_vn != 0)) { 247 xfs_notice(mp, "SB sanity check failed"); 248 return -EFSCORRUPTED; 249 } 250 251 /* 252 * Until this is fixed only page-sized or smaller data blocks work. 253 */ 254 if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) { 255 xfs_warn(mp, 256 "File system with blocksize %d bytes. " 257 "Only pagesize (%ld) or less will currently work.", 258 sbp->sb_blocksize, PAGE_SIZE); 259 return -ENOSYS; 260 } 261 262 /* 263 * Currently only very few inode sizes are supported. 264 */ 265 switch (sbp->sb_inodesize) { 266 case 256: 267 case 512: 268 case 1024: 269 case 2048: 270 break; 271 default: 272 xfs_warn(mp, "inode size of %d bytes not supported", 273 sbp->sb_inodesize); 274 return -ENOSYS; 275 } 276 277 if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) || 278 xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) { 279 xfs_warn(mp, 280 "file system too large to be mounted on this system."); 281 return -EFBIG; 282 } 283 284 if (check_inprogress && sbp->sb_inprogress) { 285 xfs_warn(mp, "Offline file system operation in progress!"); 286 return -EFSCORRUPTED; 287 } 288 return 0; 289 } 290 291 void 292 xfs_sb_quota_from_disk(struct xfs_sb *sbp) 293 { 294 /* 295 * older mkfs doesn't initialize quota inodes to NULLFSINO. This 296 * leads to in-core values having two different values for a quota 297 * inode to be invalid: 0 and NULLFSINO. Change it to a single value 298 * NULLFSINO. 299 * 300 * Note that this change affect only the in-core values. These 301 * values are not written back to disk unless any quota information 302 * is written to the disk. Even in that case, sb_pquotino field is 303 * not written to disk unless the superblock supports pquotino. 304 */ 305 if (sbp->sb_uquotino == 0) 306 sbp->sb_uquotino = NULLFSINO; 307 if (sbp->sb_gquotino == 0) 308 sbp->sb_gquotino = NULLFSINO; 309 if (sbp->sb_pquotino == 0) 310 sbp->sb_pquotino = NULLFSINO; 311 312 /* 313 * We need to do these manipilations only if we are working 314 * with an older version of on-disk superblock. 315 */ 316 if (xfs_sb_version_has_pquotino(sbp)) 317 return; 318 319 if (sbp->sb_qflags & XFS_OQUOTA_ENFD) 320 sbp->sb_qflags |= (sbp->sb_qflags & XFS_PQUOTA_ACCT) ? 321 XFS_PQUOTA_ENFD : XFS_GQUOTA_ENFD; 322 if (sbp->sb_qflags & XFS_OQUOTA_CHKD) 323 sbp->sb_qflags |= (sbp->sb_qflags & XFS_PQUOTA_ACCT) ? 324 XFS_PQUOTA_CHKD : XFS_GQUOTA_CHKD; 325 sbp->sb_qflags &= ~(XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD); 326 327 if (sbp->sb_qflags & XFS_PQUOTA_ACCT) { 328 /* 329 * In older version of superblock, on-disk superblock only 330 * has sb_gquotino, and in-core superblock has both sb_gquotino 331 * and sb_pquotino. But, only one of them is supported at any 332 * point of time. So, if PQUOTA is set in disk superblock, 333 * copy over sb_gquotino to sb_pquotino. 334 */ 335 sbp->sb_pquotino = sbp->sb_gquotino; 336 sbp->sb_gquotino = NULLFSINO; 337 } 338 } 339 340 static void 341 __xfs_sb_from_disk( 342 struct xfs_sb *to, 343 xfs_dsb_t *from, 344 bool convert_xquota) 345 { 346 to->sb_magicnum = be32_to_cpu(from->sb_magicnum); 347 to->sb_blocksize = be32_to_cpu(from->sb_blocksize); 348 to->sb_dblocks = be64_to_cpu(from->sb_dblocks); 349 to->sb_rblocks = be64_to_cpu(from->sb_rblocks); 350 to->sb_rextents = be64_to_cpu(from->sb_rextents); 351 memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid)); 352 to->sb_logstart = be64_to_cpu(from->sb_logstart); 353 to->sb_rootino = be64_to_cpu(from->sb_rootino); 354 to->sb_rbmino = be64_to_cpu(from->sb_rbmino); 355 to->sb_rsumino = be64_to_cpu(from->sb_rsumino); 356 to->sb_rextsize = be32_to_cpu(from->sb_rextsize); 357 to->sb_agblocks = be32_to_cpu(from->sb_agblocks); 358 to->sb_agcount = be32_to_cpu(from->sb_agcount); 359 to->sb_rbmblocks = be32_to_cpu(from->sb_rbmblocks); 360 to->sb_logblocks = be32_to_cpu(from->sb_logblocks); 361 to->sb_versionnum = be16_to_cpu(from->sb_versionnum); 362 to->sb_sectsize = be16_to_cpu(from->sb_sectsize); 363 to->sb_inodesize = be16_to_cpu(from->sb_inodesize); 364 to->sb_inopblock = be16_to_cpu(from->sb_inopblock); 365 memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname)); 366 to->sb_blocklog = from->sb_blocklog; 367 to->sb_sectlog = from->sb_sectlog; 368 to->sb_inodelog = from->sb_inodelog; 369 to->sb_inopblog = from->sb_inopblog; 370 to->sb_agblklog = from->sb_agblklog; 371 to->sb_rextslog = from->sb_rextslog; 372 to->sb_inprogress = from->sb_inprogress; 373 to->sb_imax_pct = from->sb_imax_pct; 374 to->sb_icount = be64_to_cpu(from->sb_icount); 375 to->sb_ifree = be64_to_cpu(from->sb_ifree); 376 to->sb_fdblocks = be64_to_cpu(from->sb_fdblocks); 377 to->sb_frextents = be64_to_cpu(from->sb_frextents); 378 to->sb_uquotino = be64_to_cpu(from->sb_uquotino); 379 to->sb_gquotino = be64_to_cpu(from->sb_gquotino); 380 to->sb_qflags = be16_to_cpu(from->sb_qflags); 381 to->sb_flags = from->sb_flags; 382 to->sb_shared_vn = from->sb_shared_vn; 383 to->sb_inoalignmt = be32_to_cpu(from->sb_inoalignmt); 384 to->sb_unit = be32_to_cpu(from->sb_unit); 385 to->sb_width = be32_to_cpu(from->sb_width); 386 to->sb_dirblklog = from->sb_dirblklog; 387 to->sb_logsectlog = from->sb_logsectlog; 388 to->sb_logsectsize = be16_to_cpu(from->sb_logsectsize); 389 to->sb_logsunit = be32_to_cpu(from->sb_logsunit); 390 to->sb_features2 = be32_to_cpu(from->sb_features2); 391 to->sb_bad_features2 = be32_to_cpu(from->sb_bad_features2); 392 to->sb_features_compat = be32_to_cpu(from->sb_features_compat); 393 to->sb_features_ro_compat = be32_to_cpu(from->sb_features_ro_compat); 394 to->sb_features_incompat = be32_to_cpu(from->sb_features_incompat); 395 to->sb_features_log_incompat = 396 be32_to_cpu(from->sb_features_log_incompat); 397 /* crc is only used on disk, not in memory; just init to 0 here. */ 398 to->sb_crc = 0; 399 to->sb_spino_align = be32_to_cpu(from->sb_spino_align); 400 to->sb_pquotino = be64_to_cpu(from->sb_pquotino); 401 to->sb_lsn = be64_to_cpu(from->sb_lsn); 402 /* 403 * sb_meta_uuid is only on disk if it differs from sb_uuid and the 404 * feature flag is set; if not set we keep it only in memory. 405 */ 406 if (xfs_sb_version_hasmetauuid(to)) 407 uuid_copy(&to->sb_meta_uuid, &from->sb_meta_uuid); 408 else 409 uuid_copy(&to->sb_meta_uuid, &from->sb_uuid); 410 /* Convert on-disk flags to in-memory flags? */ 411 if (convert_xquota) 412 xfs_sb_quota_from_disk(to); 413 } 414 415 void 416 xfs_sb_from_disk( 417 struct xfs_sb *to, 418 xfs_dsb_t *from) 419 { 420 __xfs_sb_from_disk(to, from, true); 421 } 422 423 static void 424 xfs_sb_quota_to_disk( 425 struct xfs_dsb *to, 426 struct xfs_sb *from) 427 { 428 __uint16_t qflags = from->sb_qflags; 429 430 to->sb_uquotino = cpu_to_be64(from->sb_uquotino); 431 if (xfs_sb_version_has_pquotino(from)) { 432 to->sb_qflags = cpu_to_be16(from->sb_qflags); 433 to->sb_gquotino = cpu_to_be64(from->sb_gquotino); 434 to->sb_pquotino = cpu_to_be64(from->sb_pquotino); 435 return; 436 } 437 438 /* 439 * The in-core version of sb_qflags do not have XFS_OQUOTA_* 440 * flags, whereas the on-disk version does. So, convert incore 441 * XFS_{PG}QUOTA_* flags to on-disk XFS_OQUOTA_* flags. 442 */ 443 qflags &= ~(XFS_PQUOTA_ENFD | XFS_PQUOTA_CHKD | 444 XFS_GQUOTA_ENFD | XFS_GQUOTA_CHKD); 445 446 if (from->sb_qflags & 447 (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD)) 448 qflags |= XFS_OQUOTA_ENFD; 449 if (from->sb_qflags & 450 (XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD)) 451 qflags |= XFS_OQUOTA_CHKD; 452 to->sb_qflags = cpu_to_be16(qflags); 453 454 /* 455 * GQUOTINO and PQUOTINO cannot be used together in versions 456 * of superblock that do not have pquotino. from->sb_flags 457 * tells us which quota is active and should be copied to 458 * disk. If neither are active, we should NULL the inode. 459 * 460 * In all cases, the separate pquotino must remain 0 because it 461 * it beyond the "end" of the valid non-pquotino superblock. 462 */ 463 if (from->sb_qflags & XFS_GQUOTA_ACCT) 464 to->sb_gquotino = cpu_to_be64(from->sb_gquotino); 465 else if (from->sb_qflags & XFS_PQUOTA_ACCT) 466 to->sb_gquotino = cpu_to_be64(from->sb_pquotino); 467 else { 468 /* 469 * We can't rely on just the fields being logged to tell us 470 * that it is safe to write NULLFSINO - we should only do that 471 * if quotas are not actually enabled. Hence only write 472 * NULLFSINO if both in-core quota inodes are NULL. 473 */ 474 if (from->sb_gquotino == NULLFSINO && 475 from->sb_pquotino == NULLFSINO) 476 to->sb_gquotino = cpu_to_be64(NULLFSINO); 477 } 478 479 to->sb_pquotino = 0; 480 } 481 482 void 483 xfs_sb_to_disk( 484 struct xfs_dsb *to, 485 struct xfs_sb *from) 486 { 487 xfs_sb_quota_to_disk(to, from); 488 489 to->sb_magicnum = cpu_to_be32(from->sb_magicnum); 490 to->sb_blocksize = cpu_to_be32(from->sb_blocksize); 491 to->sb_dblocks = cpu_to_be64(from->sb_dblocks); 492 to->sb_rblocks = cpu_to_be64(from->sb_rblocks); 493 to->sb_rextents = cpu_to_be64(from->sb_rextents); 494 memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid)); 495 to->sb_logstart = cpu_to_be64(from->sb_logstart); 496 to->sb_rootino = cpu_to_be64(from->sb_rootino); 497 to->sb_rbmino = cpu_to_be64(from->sb_rbmino); 498 to->sb_rsumino = cpu_to_be64(from->sb_rsumino); 499 to->sb_rextsize = cpu_to_be32(from->sb_rextsize); 500 to->sb_agblocks = cpu_to_be32(from->sb_agblocks); 501 to->sb_agcount = cpu_to_be32(from->sb_agcount); 502 to->sb_rbmblocks = cpu_to_be32(from->sb_rbmblocks); 503 to->sb_logblocks = cpu_to_be32(from->sb_logblocks); 504 to->sb_versionnum = cpu_to_be16(from->sb_versionnum); 505 to->sb_sectsize = cpu_to_be16(from->sb_sectsize); 506 to->sb_inodesize = cpu_to_be16(from->sb_inodesize); 507 to->sb_inopblock = cpu_to_be16(from->sb_inopblock); 508 memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname)); 509 to->sb_blocklog = from->sb_blocklog; 510 to->sb_sectlog = from->sb_sectlog; 511 to->sb_inodelog = from->sb_inodelog; 512 to->sb_inopblog = from->sb_inopblog; 513 to->sb_agblklog = from->sb_agblklog; 514 to->sb_rextslog = from->sb_rextslog; 515 to->sb_inprogress = from->sb_inprogress; 516 to->sb_imax_pct = from->sb_imax_pct; 517 to->sb_icount = cpu_to_be64(from->sb_icount); 518 to->sb_ifree = cpu_to_be64(from->sb_ifree); 519 to->sb_fdblocks = cpu_to_be64(from->sb_fdblocks); 520 to->sb_frextents = cpu_to_be64(from->sb_frextents); 521 522 to->sb_flags = from->sb_flags; 523 to->sb_shared_vn = from->sb_shared_vn; 524 to->sb_inoalignmt = cpu_to_be32(from->sb_inoalignmt); 525 to->sb_unit = cpu_to_be32(from->sb_unit); 526 to->sb_width = cpu_to_be32(from->sb_width); 527 to->sb_dirblklog = from->sb_dirblklog; 528 to->sb_logsectlog = from->sb_logsectlog; 529 to->sb_logsectsize = cpu_to_be16(from->sb_logsectsize); 530 to->sb_logsunit = cpu_to_be32(from->sb_logsunit); 531 532 /* 533 * We need to ensure that bad_features2 always matches features2. 534 * Hence we enforce that here rather than having to remember to do it 535 * everywhere else that updates features2. 536 */ 537 from->sb_bad_features2 = from->sb_features2; 538 to->sb_features2 = cpu_to_be32(from->sb_features2); 539 to->sb_bad_features2 = cpu_to_be32(from->sb_bad_features2); 540 541 if (xfs_sb_version_hascrc(from)) { 542 to->sb_features_compat = cpu_to_be32(from->sb_features_compat); 543 to->sb_features_ro_compat = 544 cpu_to_be32(from->sb_features_ro_compat); 545 to->sb_features_incompat = 546 cpu_to_be32(from->sb_features_incompat); 547 to->sb_features_log_incompat = 548 cpu_to_be32(from->sb_features_log_incompat); 549 to->sb_spino_align = cpu_to_be32(from->sb_spino_align); 550 to->sb_lsn = cpu_to_be64(from->sb_lsn); 551 if (xfs_sb_version_hasmetauuid(from)) 552 uuid_copy(&to->sb_meta_uuid, &from->sb_meta_uuid); 553 } 554 } 555 556 static int 557 xfs_sb_verify( 558 struct xfs_buf *bp, 559 bool check_version) 560 { 561 struct xfs_mount *mp = bp->b_target->bt_mount; 562 struct xfs_sb sb; 563 564 /* 565 * Use call variant which doesn't convert quota flags from disk 566 * format, because xfs_mount_validate_sb checks the on-disk flags. 567 */ 568 __xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp), false); 569 570 /* 571 * Only check the in progress field for the primary superblock as 572 * mkfs.xfs doesn't clear it from secondary superblocks. 573 */ 574 return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR, 575 check_version); 576 } 577 578 /* 579 * If the superblock has the CRC feature bit set or the CRC field is non-null, 580 * check that the CRC is valid. We check the CRC field is non-null because a 581 * single bit error could clear the feature bit and unused parts of the 582 * superblock are supposed to be zero. Hence a non-null crc field indicates that 583 * we've potentially lost a feature bit and we should check it anyway. 584 * 585 * However, past bugs (i.e. in growfs) left non-zeroed regions beyond the 586 * last field in V4 secondary superblocks. So for secondary superblocks, 587 * we are more forgiving, and ignore CRC failures if the primary doesn't 588 * indicate that the fs version is V5. 589 */ 590 static void 591 xfs_sb_read_verify( 592 struct xfs_buf *bp) 593 { 594 struct xfs_mount *mp = bp->b_target->bt_mount; 595 struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp); 596 int error; 597 598 /* 599 * open code the version check to avoid needing to convert the entire 600 * superblock from disk order just to check the version number 601 */ 602 if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC) && 603 (((be16_to_cpu(dsb->sb_versionnum) & XFS_SB_VERSION_NUMBITS) == 604 XFS_SB_VERSION_5) || 605 dsb->sb_crc != 0)) { 606 607 if (!xfs_buf_verify_cksum(bp, XFS_SB_CRC_OFF)) { 608 /* Only fail bad secondaries on a known V5 filesystem */ 609 if (bp->b_bn == XFS_SB_DADDR || 610 xfs_sb_version_hascrc(&mp->m_sb)) { 611 error = -EFSBADCRC; 612 goto out_error; 613 } 614 } 615 } 616 error = xfs_sb_verify(bp, true); 617 618 out_error: 619 if (error) { 620 xfs_buf_ioerror(bp, error); 621 if (error == -EFSCORRUPTED || error == -EFSBADCRC) 622 xfs_verifier_error(bp); 623 } 624 } 625 626 /* 627 * We may be probed for a filesystem match, so we may not want to emit 628 * messages when the superblock buffer is not actually an XFS superblock. 629 * If we find an XFS superblock, then run a normal, noisy mount because we are 630 * really going to mount it and want to know about errors. 631 */ 632 static void 633 xfs_sb_quiet_read_verify( 634 struct xfs_buf *bp) 635 { 636 struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp); 637 638 if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC)) { 639 /* XFS filesystem, verify noisily! */ 640 xfs_sb_read_verify(bp); 641 return; 642 } 643 /* quietly fail */ 644 xfs_buf_ioerror(bp, -EWRONGFS); 645 } 646 647 static void 648 xfs_sb_write_verify( 649 struct xfs_buf *bp) 650 { 651 struct xfs_mount *mp = bp->b_target->bt_mount; 652 struct xfs_buf_log_item *bip = bp->b_fspriv; 653 int error; 654 655 error = xfs_sb_verify(bp, false); 656 if (error) { 657 xfs_buf_ioerror(bp, error); 658 xfs_verifier_error(bp); 659 return; 660 } 661 662 if (!xfs_sb_version_hascrc(&mp->m_sb)) 663 return; 664 665 if (bip) 666 XFS_BUF_TO_SBP(bp)->sb_lsn = cpu_to_be64(bip->bli_item.li_lsn); 667 668 xfs_buf_update_cksum(bp, XFS_SB_CRC_OFF); 669 } 670 671 const struct xfs_buf_ops xfs_sb_buf_ops = { 672 .verify_read = xfs_sb_read_verify, 673 .verify_write = xfs_sb_write_verify, 674 }; 675 676 const struct xfs_buf_ops xfs_sb_quiet_buf_ops = { 677 .verify_read = xfs_sb_quiet_read_verify, 678 .verify_write = xfs_sb_write_verify, 679 }; 680 681 /* 682 * xfs_mount_common 683 * 684 * Mount initialization code establishing various mount 685 * fields from the superblock associated with the given 686 * mount structure 687 */ 688 void 689 xfs_sb_mount_common( 690 struct xfs_mount *mp, 691 struct xfs_sb *sbp) 692 { 693 mp->m_agfrotor = mp->m_agirotor = 0; 694 spin_lock_init(&mp->m_agirotor_lock); 695 mp->m_maxagi = mp->m_sb.sb_agcount; 696 mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG; 697 mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT; 698 mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT; 699 mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1; 700 mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog; 701 mp->m_blockmask = sbp->sb_blocksize - 1; 702 mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG; 703 mp->m_blockwmask = mp->m_blockwsize - 1; 704 705 mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 1); 706 mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 0); 707 mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2; 708 mp->m_alloc_mnr[1] = mp->m_alloc_mxr[1] / 2; 709 710 mp->m_inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1); 711 mp->m_inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0); 712 mp->m_inobt_mnr[0] = mp->m_inobt_mxr[0] / 2; 713 mp->m_inobt_mnr[1] = mp->m_inobt_mxr[1] / 2; 714 715 mp->m_bmap_dmxr[0] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 1); 716 mp->m_bmap_dmxr[1] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 0); 717 mp->m_bmap_dmnr[0] = mp->m_bmap_dmxr[0] / 2; 718 mp->m_bmap_dmnr[1] = mp->m_bmap_dmxr[1] / 2; 719 720 mp->m_bsize = XFS_FSB_TO_BB(mp, 1); 721 mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK, 722 sbp->sb_inopblock); 723 mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog; 724 725 if (sbp->sb_spino_align) 726 mp->m_ialloc_min_blks = sbp->sb_spino_align; 727 else 728 mp->m_ialloc_min_blks = mp->m_ialloc_blks; 729 } 730 731 /* 732 * xfs_initialize_perag_data 733 * 734 * Read in each per-ag structure so we can count up the number of 735 * allocated inodes, free inodes and used filesystem blocks as this 736 * information is no longer persistent in the superblock. Once we have 737 * this information, write it into the in-core superblock structure. 738 */ 739 int 740 xfs_initialize_perag_data( 741 struct xfs_mount *mp, 742 xfs_agnumber_t agcount) 743 { 744 xfs_agnumber_t index; 745 xfs_perag_t *pag; 746 xfs_sb_t *sbp = &mp->m_sb; 747 uint64_t ifree = 0; 748 uint64_t ialloc = 0; 749 uint64_t bfree = 0; 750 uint64_t bfreelst = 0; 751 uint64_t btree = 0; 752 int error; 753 754 for (index = 0; index < agcount; index++) { 755 /* 756 * read the agf, then the agi. This gets us 757 * all the information we need and populates the 758 * per-ag structures for us. 759 */ 760 error = xfs_alloc_pagf_init(mp, NULL, index, 0); 761 if (error) 762 return error; 763 764 error = xfs_ialloc_pagi_init(mp, NULL, index); 765 if (error) 766 return error; 767 pag = xfs_perag_get(mp, index); 768 ifree += pag->pagi_freecount; 769 ialloc += pag->pagi_count; 770 bfree += pag->pagf_freeblks; 771 bfreelst += pag->pagf_flcount; 772 btree += pag->pagf_btreeblks; 773 xfs_perag_put(pag); 774 } 775 776 /* Overwrite incore superblock counters with just-read data */ 777 spin_lock(&mp->m_sb_lock); 778 sbp->sb_ifree = ifree; 779 sbp->sb_icount = ialloc; 780 sbp->sb_fdblocks = bfree + bfreelst + btree; 781 spin_unlock(&mp->m_sb_lock); 782 783 xfs_reinit_percpu_counters(mp); 784 785 return 0; 786 } 787 788 /* 789 * xfs_log_sb() can be used to copy arbitrary changes to the in-core superblock 790 * into the superblock buffer to be logged. It does not provide the higher 791 * level of locking that is needed to protect the in-core superblock from 792 * concurrent access. 793 */ 794 void 795 xfs_log_sb( 796 struct xfs_trans *tp) 797 { 798 struct xfs_mount *mp = tp->t_mountp; 799 struct xfs_buf *bp = xfs_trans_getsb(tp, mp, 0); 800 801 mp->m_sb.sb_icount = percpu_counter_sum(&mp->m_icount); 802 mp->m_sb.sb_ifree = percpu_counter_sum(&mp->m_ifree); 803 mp->m_sb.sb_fdblocks = percpu_counter_sum(&mp->m_fdblocks); 804 805 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb); 806 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); 807 xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb)); 808 } 809 810 /* 811 * xfs_sync_sb 812 * 813 * Sync the superblock to disk. 814 * 815 * Note that the caller is responsible for checking the frozen state of the 816 * filesystem. This procedure uses the non-blocking transaction allocator and 817 * thus will allow modifications to a frozen fs. This is required because this 818 * code can be called during the process of freezing where use of the high-level 819 * allocator would deadlock. 820 */ 821 int 822 xfs_sync_sb( 823 struct xfs_mount *mp, 824 bool wait) 825 { 826 struct xfs_trans *tp; 827 int error; 828 829 tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_CHANGE, KM_SLEEP); 830 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0); 831 if (error) { 832 xfs_trans_cancel(tp); 833 return error; 834 } 835 836 xfs_log_sb(tp); 837 if (wait) 838 xfs_trans_set_sync(tp); 839 return xfs_trans_commit(tp); 840 } 841