1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 7 #include "xfs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_sb.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_btree.h" 16 #include "xfs_bmap.h" 17 #include "xfs_alloc.h" 18 #include "xfs_fsops.h" 19 #include "xfs_trans.h" 20 #include "xfs_buf_item.h" 21 #include "xfs_log.h" 22 #include "xfs_log_priv.h" 23 #include "xfs_dir2.h" 24 #include "xfs_extfree_item.h" 25 #include "xfs_mru_cache.h" 26 #include "xfs_inode_item.h" 27 #include "xfs_icache.h" 28 #include "xfs_trace.h" 29 #include "xfs_icreate_item.h" 30 #include "xfs_filestream.h" 31 #include "xfs_quota.h" 32 #include "xfs_sysfs.h" 33 #include "xfs_ondisk.h" 34 #include "xfs_rmap_item.h" 35 #include "xfs_refcount_item.h" 36 #include "xfs_bmap_item.h" 37 #include "xfs_reflink.h" 38 #include "xfs_pwork.h" 39 #include "xfs_ag.h" 40 #include "xfs_defer.h" 41 #include "xfs_attr_item.h" 42 #include "xfs_xattr.h" 43 #include "xfs_iunlink_item.h" 44 #include "xfs_dahash_test.h" 45 46 #include <linux/magic.h> 47 #include <linux/fs_context.h> 48 #include <linux/fs_parser.h> 49 50 static const struct super_operations xfs_super_operations; 51 52 static struct kset *xfs_kset; /* top-level xfs sysfs dir */ 53 #ifdef DEBUG 54 static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */ 55 #endif 56 57 #ifdef CONFIG_HOTPLUG_CPU 58 static LIST_HEAD(xfs_mount_list); 59 static DEFINE_SPINLOCK(xfs_mount_list_lock); 60 61 static inline void xfs_mount_list_add(struct xfs_mount *mp) 62 { 63 spin_lock(&xfs_mount_list_lock); 64 list_add(&mp->m_mount_list, &xfs_mount_list); 65 spin_unlock(&xfs_mount_list_lock); 66 } 67 68 static inline void xfs_mount_list_del(struct xfs_mount *mp) 69 { 70 spin_lock(&xfs_mount_list_lock); 71 list_del(&mp->m_mount_list); 72 spin_unlock(&xfs_mount_list_lock); 73 } 74 #else /* !CONFIG_HOTPLUG_CPU */ 75 static inline void xfs_mount_list_add(struct xfs_mount *mp) {} 76 static inline void xfs_mount_list_del(struct xfs_mount *mp) {} 77 #endif 78 79 enum xfs_dax_mode { 80 XFS_DAX_INODE = 0, 81 XFS_DAX_ALWAYS = 1, 82 XFS_DAX_NEVER = 2, 83 }; 84 85 static void 86 xfs_mount_set_dax_mode( 87 struct xfs_mount *mp, 88 enum xfs_dax_mode mode) 89 { 90 switch (mode) { 91 case XFS_DAX_INODE: 92 mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER); 93 break; 94 case XFS_DAX_ALWAYS: 95 mp->m_features |= XFS_FEAT_DAX_ALWAYS; 96 mp->m_features &= ~XFS_FEAT_DAX_NEVER; 97 break; 98 case XFS_DAX_NEVER: 99 mp->m_features |= XFS_FEAT_DAX_NEVER; 100 mp->m_features &= ~XFS_FEAT_DAX_ALWAYS; 101 break; 102 } 103 } 104 105 static const struct constant_table dax_param_enums[] = { 106 {"inode", XFS_DAX_INODE }, 107 {"always", XFS_DAX_ALWAYS }, 108 {"never", XFS_DAX_NEVER }, 109 {} 110 }; 111 112 /* 113 * Table driven mount option parser. 114 */ 115 enum { 116 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, 117 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid, 118 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups, 119 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep, 120 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2, 121 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota, 122 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota, 123 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce, 124 Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum, 125 }; 126 127 static const struct fs_parameter_spec xfs_fs_parameters[] = { 128 fsparam_u32("logbufs", Opt_logbufs), 129 fsparam_string("logbsize", Opt_logbsize), 130 fsparam_string("logdev", Opt_logdev), 131 fsparam_string("rtdev", Opt_rtdev), 132 fsparam_flag("wsync", Opt_wsync), 133 fsparam_flag("noalign", Opt_noalign), 134 fsparam_flag("swalloc", Opt_swalloc), 135 fsparam_u32("sunit", Opt_sunit), 136 fsparam_u32("swidth", Opt_swidth), 137 fsparam_flag("nouuid", Opt_nouuid), 138 fsparam_flag("grpid", Opt_grpid), 139 fsparam_flag("nogrpid", Opt_nogrpid), 140 fsparam_flag("bsdgroups", Opt_bsdgroups), 141 fsparam_flag("sysvgroups", Opt_sysvgroups), 142 fsparam_string("allocsize", Opt_allocsize), 143 fsparam_flag("norecovery", Opt_norecovery), 144 fsparam_flag("inode64", Opt_inode64), 145 fsparam_flag("inode32", Opt_inode32), 146 fsparam_flag("ikeep", Opt_ikeep), 147 fsparam_flag("noikeep", Opt_noikeep), 148 fsparam_flag("largeio", Opt_largeio), 149 fsparam_flag("nolargeio", Opt_nolargeio), 150 fsparam_flag("attr2", Opt_attr2), 151 fsparam_flag("noattr2", Opt_noattr2), 152 fsparam_flag("filestreams", Opt_filestreams), 153 fsparam_flag("quota", Opt_quota), 154 fsparam_flag("noquota", Opt_noquota), 155 fsparam_flag("usrquota", Opt_usrquota), 156 fsparam_flag("grpquota", Opt_grpquota), 157 fsparam_flag("prjquota", Opt_prjquota), 158 fsparam_flag("uquota", Opt_uquota), 159 fsparam_flag("gquota", Opt_gquota), 160 fsparam_flag("pquota", Opt_pquota), 161 fsparam_flag("uqnoenforce", Opt_uqnoenforce), 162 fsparam_flag("gqnoenforce", Opt_gqnoenforce), 163 fsparam_flag("pqnoenforce", Opt_pqnoenforce), 164 fsparam_flag("qnoenforce", Opt_qnoenforce), 165 fsparam_flag("discard", Opt_discard), 166 fsparam_flag("nodiscard", Opt_nodiscard), 167 fsparam_flag("dax", Opt_dax), 168 fsparam_enum("dax", Opt_dax_enum, dax_param_enums), 169 {} 170 }; 171 172 struct proc_xfs_info { 173 uint64_t flag; 174 char *str; 175 }; 176 177 static int 178 xfs_fs_show_options( 179 struct seq_file *m, 180 struct dentry *root) 181 { 182 static struct proc_xfs_info xfs_info_set[] = { 183 /* the few simple ones we can get from the mount struct */ 184 { XFS_FEAT_IKEEP, ",ikeep" }, 185 { XFS_FEAT_WSYNC, ",wsync" }, 186 { XFS_FEAT_NOALIGN, ",noalign" }, 187 { XFS_FEAT_SWALLOC, ",swalloc" }, 188 { XFS_FEAT_NOUUID, ",nouuid" }, 189 { XFS_FEAT_NORECOVERY, ",norecovery" }, 190 { XFS_FEAT_ATTR2, ",attr2" }, 191 { XFS_FEAT_FILESTREAMS, ",filestreams" }, 192 { XFS_FEAT_GRPID, ",grpid" }, 193 { XFS_FEAT_DISCARD, ",discard" }, 194 { XFS_FEAT_LARGE_IOSIZE, ",largeio" }, 195 { XFS_FEAT_DAX_ALWAYS, ",dax=always" }, 196 { XFS_FEAT_DAX_NEVER, ",dax=never" }, 197 { 0, NULL } 198 }; 199 struct xfs_mount *mp = XFS_M(root->d_sb); 200 struct proc_xfs_info *xfs_infop; 201 202 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { 203 if (mp->m_features & xfs_infop->flag) 204 seq_puts(m, xfs_infop->str); 205 } 206 207 seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64); 208 209 if (xfs_has_allocsize(mp)) 210 seq_printf(m, ",allocsize=%dk", 211 (1 << mp->m_allocsize_log) >> 10); 212 213 if (mp->m_logbufs > 0) 214 seq_printf(m, ",logbufs=%d", mp->m_logbufs); 215 if (mp->m_logbsize > 0) 216 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10); 217 218 if (mp->m_logname) 219 seq_show_option(m, "logdev", mp->m_logname); 220 if (mp->m_rtname) 221 seq_show_option(m, "rtdev", mp->m_rtname); 222 223 if (mp->m_dalign > 0) 224 seq_printf(m, ",sunit=%d", 225 (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); 226 if (mp->m_swidth > 0) 227 seq_printf(m, ",swidth=%d", 228 (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); 229 230 if (mp->m_qflags & XFS_UQUOTA_ENFD) 231 seq_puts(m, ",usrquota"); 232 else if (mp->m_qflags & XFS_UQUOTA_ACCT) 233 seq_puts(m, ",uqnoenforce"); 234 235 if (mp->m_qflags & XFS_PQUOTA_ENFD) 236 seq_puts(m, ",prjquota"); 237 else if (mp->m_qflags & XFS_PQUOTA_ACCT) 238 seq_puts(m, ",pqnoenforce"); 239 240 if (mp->m_qflags & XFS_GQUOTA_ENFD) 241 seq_puts(m, ",grpquota"); 242 else if (mp->m_qflags & XFS_GQUOTA_ACCT) 243 seq_puts(m, ",gqnoenforce"); 244 245 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) 246 seq_puts(m, ",noquota"); 247 248 return 0; 249 } 250 251 static bool 252 xfs_set_inode_alloc_perag( 253 struct xfs_perag *pag, 254 xfs_ino_t ino, 255 xfs_agnumber_t max_metadata) 256 { 257 if (!xfs_is_inode32(pag->pag_mount)) { 258 set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate); 259 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); 260 return false; 261 } 262 263 if (ino > XFS_MAXINUMBER_32) { 264 clear_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate); 265 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); 266 return false; 267 } 268 269 set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate); 270 if (pag->pag_agno < max_metadata) 271 set_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); 272 else 273 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); 274 return true; 275 } 276 277 /* 278 * Set parameters for inode allocation heuristics, taking into account 279 * filesystem size and inode32/inode64 mount options; i.e. specifically 280 * whether or not XFS_FEAT_SMALL_INUMS is set. 281 * 282 * Inode allocation patterns are altered only if inode32 is requested 283 * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large. 284 * If altered, XFS_OPSTATE_INODE32 is set as well. 285 * 286 * An agcount independent of that in the mount structure is provided 287 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated 288 * to the potentially higher ag count. 289 * 290 * Returns the maximum AG index which may contain inodes. 291 */ 292 xfs_agnumber_t 293 xfs_set_inode_alloc( 294 struct xfs_mount *mp, 295 xfs_agnumber_t agcount) 296 { 297 xfs_agnumber_t index; 298 xfs_agnumber_t maxagi = 0; 299 xfs_sb_t *sbp = &mp->m_sb; 300 xfs_agnumber_t max_metadata; 301 xfs_agino_t agino; 302 xfs_ino_t ino; 303 304 /* 305 * Calculate how much should be reserved for inodes to meet 306 * the max inode percentage. Used only for inode32. 307 */ 308 if (M_IGEO(mp)->maxicount) { 309 uint64_t icount; 310 311 icount = sbp->sb_dblocks * sbp->sb_imax_pct; 312 do_div(icount, 100); 313 icount += sbp->sb_agblocks - 1; 314 do_div(icount, sbp->sb_agblocks); 315 max_metadata = icount; 316 } else { 317 max_metadata = agcount; 318 } 319 320 /* Get the last possible inode in the filesystem */ 321 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1); 322 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); 323 324 /* 325 * If user asked for no more than 32-bit inodes, and the fs is 326 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter 327 * the allocator to accommodate the request. 328 */ 329 if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32) 330 set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate); 331 else 332 clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate); 333 334 for (index = 0; index < agcount; index++) { 335 struct xfs_perag *pag; 336 337 ino = XFS_AGINO_TO_INO(mp, index, agino); 338 339 pag = xfs_perag_get(mp, index); 340 if (xfs_set_inode_alloc_perag(pag, ino, max_metadata)) 341 maxagi++; 342 xfs_perag_put(pag); 343 } 344 345 return xfs_is_inode32(mp) ? maxagi : agcount; 346 } 347 348 static int 349 xfs_setup_dax_always( 350 struct xfs_mount *mp) 351 { 352 if (!mp->m_ddev_targp->bt_daxdev && 353 (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) { 354 xfs_alert(mp, 355 "DAX unsupported by block device. Turning off DAX."); 356 goto disable_dax; 357 } 358 359 if (mp->m_super->s_blocksize != PAGE_SIZE) { 360 xfs_alert(mp, 361 "DAX not supported for blocksize. Turning off DAX."); 362 goto disable_dax; 363 } 364 365 if (xfs_has_reflink(mp) && 366 bdev_is_partition(mp->m_ddev_targp->bt_bdev)) { 367 xfs_alert(mp, 368 "DAX and reflink cannot work with multi-partitions!"); 369 return -EINVAL; 370 } 371 372 xfs_warn(mp, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); 373 return 0; 374 375 disable_dax: 376 xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER); 377 return 0; 378 } 379 380 static void 381 xfs_bdev_mark_dead( 382 struct block_device *bdev) 383 { 384 xfs_force_shutdown(bdev->bd_holder, SHUTDOWN_DEVICE_REMOVED); 385 } 386 387 static const struct blk_holder_ops xfs_holder_ops = { 388 .mark_dead = xfs_bdev_mark_dead, 389 }; 390 391 STATIC int 392 xfs_blkdev_get( 393 xfs_mount_t *mp, 394 const char *name, 395 struct block_device **bdevp) 396 { 397 int error = 0; 398 399 *bdevp = blkdev_get_by_path(name, BLK_OPEN_READ | BLK_OPEN_WRITE, mp, 400 &xfs_holder_ops); 401 if (IS_ERR(*bdevp)) { 402 error = PTR_ERR(*bdevp); 403 xfs_warn(mp, "Invalid device [%s], error=%d", name, error); 404 } 405 406 return error; 407 } 408 409 STATIC void 410 xfs_blkdev_put( 411 struct xfs_mount *mp, 412 struct block_device *bdev) 413 { 414 if (bdev) 415 blkdev_put(bdev, mp); 416 } 417 418 STATIC void 419 xfs_close_devices( 420 struct xfs_mount *mp) 421 { 422 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 423 struct block_device *logdev = mp->m_logdev_targp->bt_bdev; 424 425 xfs_free_buftarg(mp->m_logdev_targp); 426 xfs_blkdev_put(mp, logdev); 427 } 428 if (mp->m_rtdev_targp) { 429 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; 430 431 xfs_free_buftarg(mp->m_rtdev_targp); 432 xfs_blkdev_put(mp, rtdev); 433 } 434 xfs_free_buftarg(mp->m_ddev_targp); 435 } 436 437 /* 438 * The file system configurations are: 439 * (1) device (partition) with data and internal log 440 * (2) logical volume with data and log subvolumes. 441 * (3) logical volume with data, log, and realtime subvolumes. 442 * 443 * We only have to handle opening the log and realtime volumes here if 444 * they are present. The data subvolume has already been opened by 445 * get_sb_bdev() and is stored in sb->s_bdev. 446 */ 447 STATIC int 448 xfs_open_devices( 449 struct xfs_mount *mp) 450 { 451 struct block_device *ddev = mp->m_super->s_bdev; 452 struct block_device *logdev = NULL, *rtdev = NULL; 453 int error; 454 455 /* 456 * Open real time and log devices - order is important. 457 */ 458 if (mp->m_logname) { 459 error = xfs_blkdev_get(mp, mp->m_logname, &logdev); 460 if (error) 461 return error; 462 } 463 464 if (mp->m_rtname) { 465 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev); 466 if (error) 467 goto out_close_logdev; 468 469 if (rtdev == ddev || rtdev == logdev) { 470 xfs_warn(mp, 471 "Cannot mount filesystem with identical rtdev and ddev/logdev."); 472 error = -EINVAL; 473 goto out_close_rtdev; 474 } 475 } 476 477 /* 478 * Setup xfs_mount buffer target pointers 479 */ 480 error = -ENOMEM; 481 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev); 482 if (!mp->m_ddev_targp) 483 goto out_close_rtdev; 484 485 if (rtdev) { 486 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev); 487 if (!mp->m_rtdev_targp) 488 goto out_free_ddev_targ; 489 } 490 491 if (logdev && logdev != ddev) { 492 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev); 493 if (!mp->m_logdev_targp) 494 goto out_free_rtdev_targ; 495 } else { 496 mp->m_logdev_targp = mp->m_ddev_targp; 497 } 498 499 return 0; 500 501 out_free_rtdev_targ: 502 if (mp->m_rtdev_targp) 503 xfs_free_buftarg(mp->m_rtdev_targp); 504 out_free_ddev_targ: 505 xfs_free_buftarg(mp->m_ddev_targp); 506 out_close_rtdev: 507 xfs_blkdev_put(mp, rtdev); 508 out_close_logdev: 509 if (logdev && logdev != ddev) 510 xfs_blkdev_put(mp, logdev); 511 return error; 512 } 513 514 /* 515 * Setup xfs_mount buffer target pointers based on superblock 516 */ 517 STATIC int 518 xfs_setup_devices( 519 struct xfs_mount *mp) 520 { 521 int error; 522 523 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize); 524 if (error) 525 return error; 526 527 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 528 unsigned int log_sector_size = BBSIZE; 529 530 if (xfs_has_sector(mp)) 531 log_sector_size = mp->m_sb.sb_logsectsize; 532 error = xfs_setsize_buftarg(mp->m_logdev_targp, 533 log_sector_size); 534 if (error) 535 return error; 536 } 537 if (mp->m_rtdev_targp) { 538 error = xfs_setsize_buftarg(mp->m_rtdev_targp, 539 mp->m_sb.sb_sectsize); 540 if (error) 541 return error; 542 } 543 544 return 0; 545 } 546 547 STATIC int 548 xfs_init_mount_workqueues( 549 struct xfs_mount *mp) 550 { 551 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s", 552 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 553 1, mp->m_super->s_id); 554 if (!mp->m_buf_workqueue) 555 goto out; 556 557 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s", 558 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 559 0, mp->m_super->s_id); 560 if (!mp->m_unwritten_workqueue) 561 goto out_destroy_buf; 562 563 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s", 564 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 565 0, mp->m_super->s_id); 566 if (!mp->m_reclaim_workqueue) 567 goto out_destroy_unwritten; 568 569 mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s", 570 XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM), 571 0, mp->m_super->s_id); 572 if (!mp->m_blockgc_wq) 573 goto out_destroy_reclaim; 574 575 mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s", 576 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 577 1, mp->m_super->s_id); 578 if (!mp->m_inodegc_wq) 579 goto out_destroy_blockgc; 580 581 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", 582 XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id); 583 if (!mp->m_sync_workqueue) 584 goto out_destroy_inodegc; 585 586 return 0; 587 588 out_destroy_inodegc: 589 destroy_workqueue(mp->m_inodegc_wq); 590 out_destroy_blockgc: 591 destroy_workqueue(mp->m_blockgc_wq); 592 out_destroy_reclaim: 593 destroy_workqueue(mp->m_reclaim_workqueue); 594 out_destroy_unwritten: 595 destroy_workqueue(mp->m_unwritten_workqueue); 596 out_destroy_buf: 597 destroy_workqueue(mp->m_buf_workqueue); 598 out: 599 return -ENOMEM; 600 } 601 602 STATIC void 603 xfs_destroy_mount_workqueues( 604 struct xfs_mount *mp) 605 { 606 destroy_workqueue(mp->m_sync_workqueue); 607 destroy_workqueue(mp->m_blockgc_wq); 608 destroy_workqueue(mp->m_inodegc_wq); 609 destroy_workqueue(mp->m_reclaim_workqueue); 610 destroy_workqueue(mp->m_unwritten_workqueue); 611 destroy_workqueue(mp->m_buf_workqueue); 612 } 613 614 static void 615 xfs_flush_inodes_worker( 616 struct work_struct *work) 617 { 618 struct xfs_mount *mp = container_of(work, struct xfs_mount, 619 m_flush_inodes_work); 620 struct super_block *sb = mp->m_super; 621 622 if (down_read_trylock(&sb->s_umount)) { 623 sync_inodes_sb(sb); 624 up_read(&sb->s_umount); 625 } 626 } 627 628 /* 629 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK 630 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting 631 * for IO to complete so that we effectively throttle multiple callers to the 632 * rate at which IO is completing. 633 */ 634 void 635 xfs_flush_inodes( 636 struct xfs_mount *mp) 637 { 638 /* 639 * If flush_work() returns true then that means we waited for a flush 640 * which was already in progress. Don't bother running another scan. 641 */ 642 if (flush_work(&mp->m_flush_inodes_work)) 643 return; 644 645 queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work); 646 flush_work(&mp->m_flush_inodes_work); 647 } 648 649 /* Catch misguided souls that try to use this interface on XFS */ 650 STATIC struct inode * 651 xfs_fs_alloc_inode( 652 struct super_block *sb) 653 { 654 BUG(); 655 return NULL; 656 } 657 658 /* 659 * Now that the generic code is guaranteed not to be accessing 660 * the linux inode, we can inactivate and reclaim the inode. 661 */ 662 STATIC void 663 xfs_fs_destroy_inode( 664 struct inode *inode) 665 { 666 struct xfs_inode *ip = XFS_I(inode); 667 668 trace_xfs_destroy_inode(ip); 669 670 ASSERT(!rwsem_is_locked(&inode->i_rwsem)); 671 XFS_STATS_INC(ip->i_mount, vn_rele); 672 XFS_STATS_INC(ip->i_mount, vn_remove); 673 xfs_inode_mark_reclaimable(ip); 674 } 675 676 static void 677 xfs_fs_dirty_inode( 678 struct inode *inode, 679 int flags) 680 { 681 struct xfs_inode *ip = XFS_I(inode); 682 struct xfs_mount *mp = ip->i_mount; 683 struct xfs_trans *tp; 684 685 if (!(inode->i_sb->s_flags & SB_LAZYTIME)) 686 return; 687 688 /* 689 * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC) 690 * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed 691 * in flags possibly together with I_DIRTY_SYNC. 692 */ 693 if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME)) 694 return; 695 696 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp)) 697 return; 698 xfs_ilock(ip, XFS_ILOCK_EXCL); 699 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 700 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP); 701 xfs_trans_commit(tp); 702 } 703 704 /* 705 * Slab object creation initialisation for the XFS inode. 706 * This covers only the idempotent fields in the XFS inode; 707 * all other fields need to be initialised on allocation 708 * from the slab. This avoids the need to repeatedly initialise 709 * fields in the xfs inode that left in the initialise state 710 * when freeing the inode. 711 */ 712 STATIC void 713 xfs_fs_inode_init_once( 714 void *inode) 715 { 716 struct xfs_inode *ip = inode; 717 718 memset(ip, 0, sizeof(struct xfs_inode)); 719 720 /* vfs inode */ 721 inode_init_once(VFS_I(ip)); 722 723 /* xfs inode */ 724 atomic_set(&ip->i_pincount, 0); 725 spin_lock_init(&ip->i_flags_lock); 726 727 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, 728 "xfsino", ip->i_ino); 729 } 730 731 /* 732 * We do an unlocked check for XFS_IDONTCACHE here because we are already 733 * serialised against cache hits here via the inode->i_lock and igrab() in 734 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be 735 * racing with us, and it avoids needing to grab a spinlock here for every inode 736 * we drop the final reference on. 737 */ 738 STATIC int 739 xfs_fs_drop_inode( 740 struct inode *inode) 741 { 742 struct xfs_inode *ip = XFS_I(inode); 743 744 /* 745 * If this unlinked inode is in the middle of recovery, don't 746 * drop the inode just yet; log recovery will take care of 747 * that. See the comment for this inode flag. 748 */ 749 if (ip->i_flags & XFS_IRECOVERY) { 750 ASSERT(xlog_recovery_needed(ip->i_mount->m_log)); 751 return 0; 752 } 753 754 return generic_drop_inode(inode); 755 } 756 757 static void 758 xfs_mount_free( 759 struct xfs_mount *mp) 760 { 761 kfree(mp->m_rtname); 762 kfree(mp->m_logname); 763 kmem_free(mp); 764 } 765 766 STATIC int 767 xfs_fs_sync_fs( 768 struct super_block *sb, 769 int wait) 770 { 771 struct xfs_mount *mp = XFS_M(sb); 772 int error; 773 774 trace_xfs_fs_sync_fs(mp, __return_address); 775 776 /* 777 * Doing anything during the async pass would be counterproductive. 778 */ 779 if (!wait) 780 return 0; 781 782 error = xfs_log_force(mp, XFS_LOG_SYNC); 783 if (error) 784 return error; 785 786 if (laptop_mode) { 787 /* 788 * The disk must be active because we're syncing. 789 * We schedule log work now (now that the disk is 790 * active) instead of later (when it might not be). 791 */ 792 flush_delayed_work(&mp->m_log->l_work); 793 } 794 795 /* 796 * If we are called with page faults frozen out, it means we are about 797 * to freeze the transaction subsystem. Take the opportunity to shut 798 * down inodegc because once SB_FREEZE_FS is set it's too late to 799 * prevent inactivation races with freeze. The fs doesn't get called 800 * again by the freezing process until after SB_FREEZE_FS has been set, 801 * so it's now or never. Same logic applies to speculative allocation 802 * garbage collection. 803 * 804 * We don't care if this is a normal syncfs call that does this or 805 * freeze that does this - we can run this multiple times without issue 806 * and we won't race with a restart because a restart can only occur 807 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE. 808 */ 809 if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) { 810 xfs_inodegc_stop(mp); 811 xfs_blockgc_stop(mp); 812 } 813 814 return 0; 815 } 816 817 STATIC int 818 xfs_fs_statfs( 819 struct dentry *dentry, 820 struct kstatfs *statp) 821 { 822 struct xfs_mount *mp = XFS_M(dentry->d_sb); 823 xfs_sb_t *sbp = &mp->m_sb; 824 struct xfs_inode *ip = XFS_I(d_inode(dentry)); 825 uint64_t fakeinos, id; 826 uint64_t icount; 827 uint64_t ifree; 828 uint64_t fdblocks; 829 xfs_extlen_t lsize; 830 int64_t ffree; 831 832 /* 833 * Expedite background inodegc but don't wait. We do not want to block 834 * here waiting hours for a billion extent file to be truncated. 835 */ 836 xfs_inodegc_push(mp); 837 838 statp->f_type = XFS_SUPER_MAGIC; 839 statp->f_namelen = MAXNAMELEN - 1; 840 841 id = huge_encode_dev(mp->m_ddev_targp->bt_dev); 842 statp->f_fsid = u64_to_fsid(id); 843 844 icount = percpu_counter_sum(&mp->m_icount); 845 ifree = percpu_counter_sum(&mp->m_ifree); 846 fdblocks = percpu_counter_sum(&mp->m_fdblocks); 847 848 spin_lock(&mp->m_sb_lock); 849 statp->f_bsize = sbp->sb_blocksize; 850 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; 851 statp->f_blocks = sbp->sb_dblocks - lsize; 852 spin_unlock(&mp->m_sb_lock); 853 854 /* make sure statp->f_bfree does not underflow */ 855 statp->f_bfree = max_t(int64_t, 0, 856 fdblocks - xfs_fdblocks_unavailable(mp)); 857 statp->f_bavail = statp->f_bfree; 858 859 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree); 860 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER); 861 if (M_IGEO(mp)->maxicount) 862 statp->f_files = min_t(typeof(statp->f_files), 863 statp->f_files, 864 M_IGEO(mp)->maxicount); 865 866 /* If sb_icount overshot maxicount, report actual allocation */ 867 statp->f_files = max_t(typeof(statp->f_files), 868 statp->f_files, 869 sbp->sb_icount); 870 871 /* make sure statp->f_ffree does not underflow */ 872 ffree = statp->f_files - (icount - ifree); 873 statp->f_ffree = max_t(int64_t, ffree, 0); 874 875 876 if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) && 877 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) == 878 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD)) 879 xfs_qm_statvfs(ip, statp); 880 881 if (XFS_IS_REALTIME_MOUNT(mp) && 882 (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) { 883 s64 freertx; 884 885 statp->f_blocks = sbp->sb_rblocks; 886 freertx = percpu_counter_sum_positive(&mp->m_frextents); 887 statp->f_bavail = statp->f_bfree = freertx * sbp->sb_rextsize; 888 } 889 890 return 0; 891 } 892 893 STATIC void 894 xfs_save_resvblks(struct xfs_mount *mp) 895 { 896 uint64_t resblks = 0; 897 898 mp->m_resblks_save = mp->m_resblks; 899 xfs_reserve_blocks(mp, &resblks, NULL); 900 } 901 902 STATIC void 903 xfs_restore_resvblks(struct xfs_mount *mp) 904 { 905 uint64_t resblks; 906 907 if (mp->m_resblks_save) { 908 resblks = mp->m_resblks_save; 909 mp->m_resblks_save = 0; 910 } else 911 resblks = xfs_default_resblks(mp); 912 913 xfs_reserve_blocks(mp, &resblks, NULL); 914 } 915 916 /* 917 * Second stage of a freeze. The data is already frozen so we only 918 * need to take care of the metadata. Once that's done sync the superblock 919 * to the log to dirty it in case of a crash while frozen. This ensures that we 920 * will recover the unlinked inode lists on the next mount. 921 */ 922 STATIC int 923 xfs_fs_freeze( 924 struct super_block *sb) 925 { 926 struct xfs_mount *mp = XFS_M(sb); 927 unsigned int flags; 928 int ret; 929 930 /* 931 * The filesystem is now frozen far enough that memory reclaim 932 * cannot safely operate on the filesystem. Hence we need to 933 * set a GFP_NOFS context here to avoid recursion deadlocks. 934 */ 935 flags = memalloc_nofs_save(); 936 xfs_save_resvblks(mp); 937 ret = xfs_log_quiesce(mp); 938 memalloc_nofs_restore(flags); 939 940 /* 941 * For read-write filesystems, we need to restart the inodegc on error 942 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not 943 * going to be run to restart it now. We are at SB_FREEZE_FS level 944 * here, so we can restart safely without racing with a stop in 945 * xfs_fs_sync_fs(). 946 */ 947 if (ret && !xfs_is_readonly(mp)) { 948 xfs_blockgc_start(mp); 949 xfs_inodegc_start(mp); 950 } 951 952 return ret; 953 } 954 955 STATIC int 956 xfs_fs_unfreeze( 957 struct super_block *sb) 958 { 959 struct xfs_mount *mp = XFS_M(sb); 960 961 xfs_restore_resvblks(mp); 962 xfs_log_work_queue(mp); 963 964 /* 965 * Don't reactivate the inodegc worker on a readonly filesystem because 966 * inodes are sent directly to reclaim. Don't reactivate the blockgc 967 * worker because there are no speculative preallocations on a readonly 968 * filesystem. 969 */ 970 if (!xfs_is_readonly(mp)) { 971 xfs_blockgc_start(mp); 972 xfs_inodegc_start(mp); 973 } 974 975 return 0; 976 } 977 978 /* 979 * This function fills in xfs_mount_t fields based on mount args. 980 * Note: the superblock _has_ now been read in. 981 */ 982 STATIC int 983 xfs_finish_flags( 984 struct xfs_mount *mp) 985 { 986 /* Fail a mount where the logbuf is smaller than the log stripe */ 987 if (xfs_has_logv2(mp)) { 988 if (mp->m_logbsize <= 0 && 989 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { 990 mp->m_logbsize = mp->m_sb.sb_logsunit; 991 } else if (mp->m_logbsize > 0 && 992 mp->m_logbsize < mp->m_sb.sb_logsunit) { 993 xfs_warn(mp, 994 "logbuf size must be greater than or equal to log stripe size"); 995 return -EINVAL; 996 } 997 } else { 998 /* Fail a mount if the logbuf is larger than 32K */ 999 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { 1000 xfs_warn(mp, 1001 "logbuf size for version 1 logs must be 16K or 32K"); 1002 return -EINVAL; 1003 } 1004 } 1005 1006 /* 1007 * V5 filesystems always use attr2 format for attributes. 1008 */ 1009 if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) { 1010 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. " 1011 "attr2 is always enabled for V5 filesystems."); 1012 return -EINVAL; 1013 } 1014 1015 /* 1016 * prohibit r/w mounts of read-only filesystems 1017 */ 1018 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) { 1019 xfs_warn(mp, 1020 "cannot mount a read-only filesystem as read-write"); 1021 return -EROFS; 1022 } 1023 1024 if ((mp->m_qflags & XFS_GQUOTA_ACCT) && 1025 (mp->m_qflags & XFS_PQUOTA_ACCT) && 1026 !xfs_has_pquotino(mp)) { 1027 xfs_warn(mp, 1028 "Super block does not support project and group quota together"); 1029 return -EINVAL; 1030 } 1031 1032 return 0; 1033 } 1034 1035 static int 1036 xfs_init_percpu_counters( 1037 struct xfs_mount *mp) 1038 { 1039 int error; 1040 1041 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL); 1042 if (error) 1043 return -ENOMEM; 1044 1045 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL); 1046 if (error) 1047 goto free_icount; 1048 1049 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL); 1050 if (error) 1051 goto free_ifree; 1052 1053 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL); 1054 if (error) 1055 goto free_fdblocks; 1056 1057 error = percpu_counter_init(&mp->m_frextents, 0, GFP_KERNEL); 1058 if (error) 1059 goto free_delalloc; 1060 1061 return 0; 1062 1063 free_delalloc: 1064 percpu_counter_destroy(&mp->m_delalloc_blks); 1065 free_fdblocks: 1066 percpu_counter_destroy(&mp->m_fdblocks); 1067 free_ifree: 1068 percpu_counter_destroy(&mp->m_ifree); 1069 free_icount: 1070 percpu_counter_destroy(&mp->m_icount); 1071 return -ENOMEM; 1072 } 1073 1074 void 1075 xfs_reinit_percpu_counters( 1076 struct xfs_mount *mp) 1077 { 1078 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount); 1079 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree); 1080 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks); 1081 percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents); 1082 } 1083 1084 static void 1085 xfs_destroy_percpu_counters( 1086 struct xfs_mount *mp) 1087 { 1088 percpu_counter_destroy(&mp->m_icount); 1089 percpu_counter_destroy(&mp->m_ifree); 1090 percpu_counter_destroy(&mp->m_fdblocks); 1091 ASSERT(xfs_is_shutdown(mp) || 1092 percpu_counter_sum(&mp->m_delalloc_blks) == 0); 1093 percpu_counter_destroy(&mp->m_delalloc_blks); 1094 percpu_counter_destroy(&mp->m_frextents); 1095 } 1096 1097 static int 1098 xfs_inodegc_init_percpu( 1099 struct xfs_mount *mp) 1100 { 1101 struct xfs_inodegc *gc; 1102 int cpu; 1103 1104 mp->m_inodegc = alloc_percpu(struct xfs_inodegc); 1105 if (!mp->m_inodegc) 1106 return -ENOMEM; 1107 1108 for_each_possible_cpu(cpu) { 1109 gc = per_cpu_ptr(mp->m_inodegc, cpu); 1110 #if defined(DEBUG) || defined(XFS_WARN) 1111 gc->cpu = cpu; 1112 #endif 1113 init_llist_head(&gc->list); 1114 gc->items = 0; 1115 gc->error = 0; 1116 INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker); 1117 } 1118 return 0; 1119 } 1120 1121 static void 1122 xfs_inodegc_free_percpu( 1123 struct xfs_mount *mp) 1124 { 1125 if (!mp->m_inodegc) 1126 return; 1127 free_percpu(mp->m_inodegc); 1128 } 1129 1130 static void 1131 xfs_fs_put_super( 1132 struct super_block *sb) 1133 { 1134 struct xfs_mount *mp = XFS_M(sb); 1135 1136 /* if ->fill_super failed, we have no mount to tear down */ 1137 if (!sb->s_fs_info) 1138 return; 1139 1140 xfs_notice(mp, "Unmounting Filesystem %pU", &mp->m_sb.sb_uuid); 1141 xfs_filestream_unmount(mp); 1142 xfs_unmountfs(mp); 1143 1144 xfs_freesb(mp); 1145 free_percpu(mp->m_stats.xs_stats); 1146 xfs_mount_list_del(mp); 1147 xfs_inodegc_free_percpu(mp); 1148 xfs_destroy_percpu_counters(mp); 1149 xfs_destroy_mount_workqueues(mp); 1150 xfs_close_devices(mp); 1151 1152 sb->s_fs_info = NULL; 1153 xfs_mount_free(mp); 1154 } 1155 1156 static long 1157 xfs_fs_nr_cached_objects( 1158 struct super_block *sb, 1159 struct shrink_control *sc) 1160 { 1161 /* Paranoia: catch incorrect calls during mount setup or teardown */ 1162 if (WARN_ON_ONCE(!sb->s_fs_info)) 1163 return 0; 1164 return xfs_reclaim_inodes_count(XFS_M(sb)); 1165 } 1166 1167 static long 1168 xfs_fs_free_cached_objects( 1169 struct super_block *sb, 1170 struct shrink_control *sc) 1171 { 1172 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan); 1173 } 1174 1175 static void 1176 xfs_fs_shutdown( 1177 struct super_block *sb) 1178 { 1179 xfs_force_shutdown(XFS_M(sb), SHUTDOWN_DEVICE_REMOVED); 1180 } 1181 1182 static const struct super_operations xfs_super_operations = { 1183 .alloc_inode = xfs_fs_alloc_inode, 1184 .destroy_inode = xfs_fs_destroy_inode, 1185 .dirty_inode = xfs_fs_dirty_inode, 1186 .drop_inode = xfs_fs_drop_inode, 1187 .put_super = xfs_fs_put_super, 1188 .sync_fs = xfs_fs_sync_fs, 1189 .freeze_fs = xfs_fs_freeze, 1190 .unfreeze_fs = xfs_fs_unfreeze, 1191 .statfs = xfs_fs_statfs, 1192 .show_options = xfs_fs_show_options, 1193 .nr_cached_objects = xfs_fs_nr_cached_objects, 1194 .free_cached_objects = xfs_fs_free_cached_objects, 1195 .shutdown = xfs_fs_shutdown, 1196 }; 1197 1198 static int 1199 suffix_kstrtoint( 1200 const char *s, 1201 unsigned int base, 1202 int *res) 1203 { 1204 int last, shift_left_factor = 0, _res; 1205 char *value; 1206 int ret = 0; 1207 1208 value = kstrdup(s, GFP_KERNEL); 1209 if (!value) 1210 return -ENOMEM; 1211 1212 last = strlen(value) - 1; 1213 if (value[last] == 'K' || value[last] == 'k') { 1214 shift_left_factor = 10; 1215 value[last] = '\0'; 1216 } 1217 if (value[last] == 'M' || value[last] == 'm') { 1218 shift_left_factor = 20; 1219 value[last] = '\0'; 1220 } 1221 if (value[last] == 'G' || value[last] == 'g') { 1222 shift_left_factor = 30; 1223 value[last] = '\0'; 1224 } 1225 1226 if (kstrtoint(value, base, &_res)) 1227 ret = -EINVAL; 1228 kfree(value); 1229 *res = _res << shift_left_factor; 1230 return ret; 1231 } 1232 1233 static inline void 1234 xfs_fs_warn_deprecated( 1235 struct fs_context *fc, 1236 struct fs_parameter *param, 1237 uint64_t flag, 1238 bool value) 1239 { 1240 /* Don't print the warning if reconfiguring and current mount point 1241 * already had the flag set 1242 */ 1243 if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) && 1244 !!(XFS_M(fc->root->d_sb)->m_features & flag) == value) 1245 return; 1246 xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key); 1247 } 1248 1249 /* 1250 * Set mount state from a mount option. 1251 * 1252 * NOTE: mp->m_super is NULL here! 1253 */ 1254 static int 1255 xfs_fs_parse_param( 1256 struct fs_context *fc, 1257 struct fs_parameter *param) 1258 { 1259 struct xfs_mount *parsing_mp = fc->s_fs_info; 1260 struct fs_parse_result result; 1261 int size = 0; 1262 int opt; 1263 1264 opt = fs_parse(fc, xfs_fs_parameters, param, &result); 1265 if (opt < 0) 1266 return opt; 1267 1268 switch (opt) { 1269 case Opt_logbufs: 1270 parsing_mp->m_logbufs = result.uint_32; 1271 return 0; 1272 case Opt_logbsize: 1273 if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize)) 1274 return -EINVAL; 1275 return 0; 1276 case Opt_logdev: 1277 kfree(parsing_mp->m_logname); 1278 parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL); 1279 if (!parsing_mp->m_logname) 1280 return -ENOMEM; 1281 return 0; 1282 case Opt_rtdev: 1283 kfree(parsing_mp->m_rtname); 1284 parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL); 1285 if (!parsing_mp->m_rtname) 1286 return -ENOMEM; 1287 return 0; 1288 case Opt_allocsize: 1289 if (suffix_kstrtoint(param->string, 10, &size)) 1290 return -EINVAL; 1291 parsing_mp->m_allocsize_log = ffs(size) - 1; 1292 parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE; 1293 return 0; 1294 case Opt_grpid: 1295 case Opt_bsdgroups: 1296 parsing_mp->m_features |= XFS_FEAT_GRPID; 1297 return 0; 1298 case Opt_nogrpid: 1299 case Opt_sysvgroups: 1300 parsing_mp->m_features &= ~XFS_FEAT_GRPID; 1301 return 0; 1302 case Opt_wsync: 1303 parsing_mp->m_features |= XFS_FEAT_WSYNC; 1304 return 0; 1305 case Opt_norecovery: 1306 parsing_mp->m_features |= XFS_FEAT_NORECOVERY; 1307 return 0; 1308 case Opt_noalign: 1309 parsing_mp->m_features |= XFS_FEAT_NOALIGN; 1310 return 0; 1311 case Opt_swalloc: 1312 parsing_mp->m_features |= XFS_FEAT_SWALLOC; 1313 return 0; 1314 case Opt_sunit: 1315 parsing_mp->m_dalign = result.uint_32; 1316 return 0; 1317 case Opt_swidth: 1318 parsing_mp->m_swidth = result.uint_32; 1319 return 0; 1320 case Opt_inode32: 1321 parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS; 1322 return 0; 1323 case Opt_inode64: 1324 parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS; 1325 return 0; 1326 case Opt_nouuid: 1327 parsing_mp->m_features |= XFS_FEAT_NOUUID; 1328 return 0; 1329 case Opt_largeio: 1330 parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE; 1331 return 0; 1332 case Opt_nolargeio: 1333 parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE; 1334 return 0; 1335 case Opt_filestreams: 1336 parsing_mp->m_features |= XFS_FEAT_FILESTREAMS; 1337 return 0; 1338 case Opt_noquota: 1339 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; 1340 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; 1341 return 0; 1342 case Opt_quota: 1343 case Opt_uquota: 1344 case Opt_usrquota: 1345 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD); 1346 return 0; 1347 case Opt_qnoenforce: 1348 case Opt_uqnoenforce: 1349 parsing_mp->m_qflags |= XFS_UQUOTA_ACCT; 1350 parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD; 1351 return 0; 1352 case Opt_pquota: 1353 case Opt_prjquota: 1354 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD); 1355 return 0; 1356 case Opt_pqnoenforce: 1357 parsing_mp->m_qflags |= XFS_PQUOTA_ACCT; 1358 parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD; 1359 return 0; 1360 case Opt_gquota: 1361 case Opt_grpquota: 1362 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD); 1363 return 0; 1364 case Opt_gqnoenforce: 1365 parsing_mp->m_qflags |= XFS_GQUOTA_ACCT; 1366 parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD; 1367 return 0; 1368 case Opt_discard: 1369 parsing_mp->m_features |= XFS_FEAT_DISCARD; 1370 return 0; 1371 case Opt_nodiscard: 1372 parsing_mp->m_features &= ~XFS_FEAT_DISCARD; 1373 return 0; 1374 #ifdef CONFIG_FS_DAX 1375 case Opt_dax: 1376 xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS); 1377 return 0; 1378 case Opt_dax_enum: 1379 xfs_mount_set_dax_mode(parsing_mp, result.uint_32); 1380 return 0; 1381 #endif 1382 /* Following mount options will be removed in September 2025 */ 1383 case Opt_ikeep: 1384 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true); 1385 parsing_mp->m_features |= XFS_FEAT_IKEEP; 1386 return 0; 1387 case Opt_noikeep: 1388 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false); 1389 parsing_mp->m_features &= ~XFS_FEAT_IKEEP; 1390 return 0; 1391 case Opt_attr2: 1392 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true); 1393 parsing_mp->m_features |= XFS_FEAT_ATTR2; 1394 return 0; 1395 case Opt_noattr2: 1396 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true); 1397 parsing_mp->m_features |= XFS_FEAT_NOATTR2; 1398 return 0; 1399 default: 1400 xfs_warn(parsing_mp, "unknown mount option [%s].", param->key); 1401 return -EINVAL; 1402 } 1403 1404 return 0; 1405 } 1406 1407 static int 1408 xfs_fs_validate_params( 1409 struct xfs_mount *mp) 1410 { 1411 /* No recovery flag requires a read-only mount */ 1412 if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) { 1413 xfs_warn(mp, "no-recovery mounts must be read-only."); 1414 return -EINVAL; 1415 } 1416 1417 /* 1418 * We have not read the superblock at this point, so only the attr2 1419 * mount option can set the attr2 feature by this stage. 1420 */ 1421 if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) { 1422 xfs_warn(mp, "attr2 and noattr2 cannot both be specified."); 1423 return -EINVAL; 1424 } 1425 1426 1427 if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) { 1428 xfs_warn(mp, 1429 "sunit and swidth options incompatible with the noalign option"); 1430 return -EINVAL; 1431 } 1432 1433 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) { 1434 xfs_warn(mp, "quota support not available in this kernel."); 1435 return -EINVAL; 1436 } 1437 1438 if ((mp->m_dalign && !mp->m_swidth) || 1439 (!mp->m_dalign && mp->m_swidth)) { 1440 xfs_warn(mp, "sunit and swidth must be specified together"); 1441 return -EINVAL; 1442 } 1443 1444 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) { 1445 xfs_warn(mp, 1446 "stripe width (%d) must be a multiple of the stripe unit (%d)", 1447 mp->m_swidth, mp->m_dalign); 1448 return -EINVAL; 1449 } 1450 1451 if (mp->m_logbufs != -1 && 1452 mp->m_logbufs != 0 && 1453 (mp->m_logbufs < XLOG_MIN_ICLOGS || 1454 mp->m_logbufs > XLOG_MAX_ICLOGS)) { 1455 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", 1456 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); 1457 return -EINVAL; 1458 } 1459 1460 if (mp->m_logbsize != -1 && 1461 mp->m_logbsize != 0 && 1462 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || 1463 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || 1464 !is_power_of_2(mp->m_logbsize))) { 1465 xfs_warn(mp, 1466 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", 1467 mp->m_logbsize); 1468 return -EINVAL; 1469 } 1470 1471 if (xfs_has_allocsize(mp) && 1472 (mp->m_allocsize_log > XFS_MAX_IO_LOG || 1473 mp->m_allocsize_log < XFS_MIN_IO_LOG)) { 1474 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", 1475 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG); 1476 return -EINVAL; 1477 } 1478 1479 return 0; 1480 } 1481 1482 static int 1483 xfs_fs_fill_super( 1484 struct super_block *sb, 1485 struct fs_context *fc) 1486 { 1487 struct xfs_mount *mp = sb->s_fs_info; 1488 struct inode *root; 1489 int flags = 0, error; 1490 1491 mp->m_super = sb; 1492 1493 error = xfs_fs_validate_params(mp); 1494 if (error) 1495 goto out_free_names; 1496 1497 sb_min_blocksize(sb, BBSIZE); 1498 sb->s_xattr = xfs_xattr_handlers; 1499 sb->s_export_op = &xfs_export_operations; 1500 #ifdef CONFIG_XFS_QUOTA 1501 sb->s_qcop = &xfs_quotactl_operations; 1502 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; 1503 #endif 1504 sb->s_op = &xfs_super_operations; 1505 1506 /* 1507 * Delay mount work if the debug hook is set. This is debug 1508 * instrumention to coordinate simulation of xfs mount failures with 1509 * VFS superblock operations 1510 */ 1511 if (xfs_globals.mount_delay) { 1512 xfs_notice(mp, "Delaying mount for %d seconds.", 1513 xfs_globals.mount_delay); 1514 msleep(xfs_globals.mount_delay * 1000); 1515 } 1516 1517 if (fc->sb_flags & SB_SILENT) 1518 flags |= XFS_MFSI_QUIET; 1519 1520 error = xfs_open_devices(mp); 1521 if (error) 1522 goto out_free_names; 1523 1524 error = xfs_init_mount_workqueues(mp); 1525 if (error) 1526 goto out_close_devices; 1527 1528 error = xfs_init_percpu_counters(mp); 1529 if (error) 1530 goto out_destroy_workqueues; 1531 1532 error = xfs_inodegc_init_percpu(mp); 1533 if (error) 1534 goto out_destroy_counters; 1535 1536 /* 1537 * All percpu data structures requiring cleanup when a cpu goes offline 1538 * must be allocated before adding this @mp to the cpu-dead handler's 1539 * mount list. 1540 */ 1541 xfs_mount_list_add(mp); 1542 1543 /* Allocate stats memory before we do operations that might use it */ 1544 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats); 1545 if (!mp->m_stats.xs_stats) { 1546 error = -ENOMEM; 1547 goto out_destroy_inodegc; 1548 } 1549 1550 error = xfs_readsb(mp, flags); 1551 if (error) 1552 goto out_free_stats; 1553 1554 error = xfs_finish_flags(mp); 1555 if (error) 1556 goto out_free_sb; 1557 1558 error = xfs_setup_devices(mp); 1559 if (error) 1560 goto out_free_sb; 1561 1562 /* V4 support is undergoing deprecation. */ 1563 if (!xfs_has_crc(mp)) { 1564 #ifdef CONFIG_XFS_SUPPORT_V4 1565 xfs_warn_once(mp, 1566 "Deprecated V4 format (crc=0) will not be supported after September 2030."); 1567 #else 1568 xfs_warn(mp, 1569 "Deprecated V4 format (crc=0) not supported by kernel."); 1570 error = -EINVAL; 1571 goto out_free_sb; 1572 #endif 1573 } 1574 1575 /* ASCII case insensitivity is undergoing deprecation. */ 1576 if (xfs_has_asciici(mp)) { 1577 #ifdef CONFIG_XFS_SUPPORT_ASCII_CI 1578 xfs_warn_once(mp, 1579 "Deprecated ASCII case-insensitivity feature (ascii-ci=1) will not be supported after September 2030."); 1580 #else 1581 xfs_warn(mp, 1582 "Deprecated ASCII case-insensitivity feature (ascii-ci=1) not supported by kernel."); 1583 error = -EINVAL; 1584 goto out_free_sb; 1585 #endif 1586 } 1587 1588 /* Filesystem claims it needs repair, so refuse the mount. */ 1589 if (xfs_has_needsrepair(mp)) { 1590 xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair."); 1591 error = -EFSCORRUPTED; 1592 goto out_free_sb; 1593 } 1594 1595 /* 1596 * Don't touch the filesystem if a user tool thinks it owns the primary 1597 * superblock. mkfs doesn't clear the flag from secondary supers, so 1598 * we don't check them at all. 1599 */ 1600 if (mp->m_sb.sb_inprogress) { 1601 xfs_warn(mp, "Offline file system operation in progress!"); 1602 error = -EFSCORRUPTED; 1603 goto out_free_sb; 1604 } 1605 1606 /* 1607 * Until this is fixed only page-sized or smaller data blocks work. 1608 */ 1609 if (mp->m_sb.sb_blocksize > PAGE_SIZE) { 1610 xfs_warn(mp, 1611 "File system with blocksize %d bytes. " 1612 "Only pagesize (%ld) or less will currently work.", 1613 mp->m_sb.sb_blocksize, PAGE_SIZE); 1614 error = -ENOSYS; 1615 goto out_free_sb; 1616 } 1617 1618 /* Ensure this filesystem fits in the page cache limits */ 1619 if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) || 1620 xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) { 1621 xfs_warn(mp, 1622 "file system too large to be mounted on this system."); 1623 error = -EFBIG; 1624 goto out_free_sb; 1625 } 1626 1627 /* 1628 * XFS block mappings use 54 bits to store the logical block offset. 1629 * This should suffice to handle the maximum file size that the VFS 1630 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT 1631 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes 1632 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON 1633 * to check this assertion. 1634 * 1635 * Avoid integer overflow by comparing the maximum bmbt offset to the 1636 * maximum pagecache offset in units of fs blocks. 1637 */ 1638 if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) { 1639 xfs_warn(mp, 1640 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!", 1641 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE), 1642 XFS_MAX_FILEOFF); 1643 error = -EINVAL; 1644 goto out_free_sb; 1645 } 1646 1647 error = xfs_filestream_mount(mp); 1648 if (error) 1649 goto out_free_sb; 1650 1651 /* 1652 * we must configure the block size in the superblock before we run the 1653 * full mount process as the mount process can lookup and cache inodes. 1654 */ 1655 sb->s_magic = XFS_SUPER_MAGIC; 1656 sb->s_blocksize = mp->m_sb.sb_blocksize; 1657 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; 1658 sb->s_maxbytes = MAX_LFS_FILESIZE; 1659 sb->s_max_links = XFS_MAXLINK; 1660 sb->s_time_gran = 1; 1661 if (xfs_has_bigtime(mp)) { 1662 sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN); 1663 sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX); 1664 } else { 1665 sb->s_time_min = XFS_LEGACY_TIME_MIN; 1666 sb->s_time_max = XFS_LEGACY_TIME_MAX; 1667 } 1668 trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max); 1669 sb->s_iflags |= SB_I_CGROUPWB; 1670 1671 set_posix_acl_flag(sb); 1672 1673 /* version 5 superblocks support inode version counters. */ 1674 if (xfs_has_crc(mp)) 1675 sb->s_flags |= SB_I_VERSION; 1676 1677 if (xfs_has_dax_always(mp)) { 1678 error = xfs_setup_dax_always(mp); 1679 if (error) 1680 goto out_filestream_unmount; 1681 } 1682 1683 if (xfs_has_discard(mp) && !bdev_max_discard_sectors(sb->s_bdev)) { 1684 xfs_warn(mp, 1685 "mounting with \"discard\" option, but the device does not support discard"); 1686 mp->m_features &= ~XFS_FEAT_DISCARD; 1687 } 1688 1689 if (xfs_has_reflink(mp)) { 1690 if (mp->m_sb.sb_rblocks) { 1691 xfs_alert(mp, 1692 "reflink not compatible with realtime device!"); 1693 error = -EINVAL; 1694 goto out_filestream_unmount; 1695 } 1696 1697 if (xfs_globals.always_cow) { 1698 xfs_info(mp, "using DEBUG-only always_cow mode."); 1699 mp->m_always_cow = true; 1700 } 1701 } 1702 1703 if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) { 1704 xfs_alert(mp, 1705 "reverse mapping btree not compatible with realtime device!"); 1706 error = -EINVAL; 1707 goto out_filestream_unmount; 1708 } 1709 1710 error = xfs_mountfs(mp); 1711 if (error) 1712 goto out_filestream_unmount; 1713 1714 root = igrab(VFS_I(mp->m_rootip)); 1715 if (!root) { 1716 error = -ENOENT; 1717 goto out_unmount; 1718 } 1719 sb->s_root = d_make_root(root); 1720 if (!sb->s_root) { 1721 error = -ENOMEM; 1722 goto out_unmount; 1723 } 1724 1725 return 0; 1726 1727 out_filestream_unmount: 1728 xfs_filestream_unmount(mp); 1729 out_free_sb: 1730 xfs_freesb(mp); 1731 out_free_stats: 1732 free_percpu(mp->m_stats.xs_stats); 1733 out_destroy_inodegc: 1734 xfs_mount_list_del(mp); 1735 xfs_inodegc_free_percpu(mp); 1736 out_destroy_counters: 1737 xfs_destroy_percpu_counters(mp); 1738 out_destroy_workqueues: 1739 xfs_destroy_mount_workqueues(mp); 1740 out_close_devices: 1741 xfs_close_devices(mp); 1742 out_free_names: 1743 sb->s_fs_info = NULL; 1744 xfs_mount_free(mp); 1745 return error; 1746 1747 out_unmount: 1748 xfs_filestream_unmount(mp); 1749 xfs_unmountfs(mp); 1750 goto out_free_sb; 1751 } 1752 1753 static int 1754 xfs_fs_get_tree( 1755 struct fs_context *fc) 1756 { 1757 return get_tree_bdev(fc, xfs_fs_fill_super); 1758 } 1759 1760 static int 1761 xfs_remount_rw( 1762 struct xfs_mount *mp) 1763 { 1764 struct xfs_sb *sbp = &mp->m_sb; 1765 int error; 1766 1767 if (xfs_has_norecovery(mp)) { 1768 xfs_warn(mp, 1769 "ro->rw transition prohibited on norecovery mount"); 1770 return -EINVAL; 1771 } 1772 1773 if (xfs_sb_is_v5(sbp) && 1774 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) { 1775 xfs_warn(mp, 1776 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem", 1777 (sbp->sb_features_ro_compat & 1778 XFS_SB_FEAT_RO_COMPAT_UNKNOWN)); 1779 return -EINVAL; 1780 } 1781 1782 clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 1783 1784 /* 1785 * If this is the first remount to writeable state we might have some 1786 * superblock changes to update. 1787 */ 1788 if (mp->m_update_sb) { 1789 error = xfs_sync_sb(mp, false); 1790 if (error) { 1791 xfs_warn(mp, "failed to write sb changes"); 1792 return error; 1793 } 1794 mp->m_update_sb = false; 1795 } 1796 1797 /* 1798 * Fill out the reserve pool if it is empty. Use the stashed value if 1799 * it is non-zero, otherwise go with the default. 1800 */ 1801 xfs_restore_resvblks(mp); 1802 xfs_log_work_queue(mp); 1803 xfs_blockgc_start(mp); 1804 1805 /* Create the per-AG metadata reservation pool .*/ 1806 error = xfs_fs_reserve_ag_blocks(mp); 1807 if (error && error != -ENOSPC) 1808 return error; 1809 1810 /* Re-enable the background inode inactivation worker. */ 1811 xfs_inodegc_start(mp); 1812 1813 return 0; 1814 } 1815 1816 static int 1817 xfs_remount_ro( 1818 struct xfs_mount *mp) 1819 { 1820 struct xfs_icwalk icw = { 1821 .icw_flags = XFS_ICWALK_FLAG_SYNC, 1822 }; 1823 int error; 1824 1825 /* Flush all the dirty data to disk. */ 1826 error = sync_filesystem(mp->m_super); 1827 if (error) 1828 return error; 1829 1830 /* 1831 * Cancel background eofb scanning so it cannot race with the final 1832 * log force+buftarg wait and deadlock the remount. 1833 */ 1834 xfs_blockgc_stop(mp); 1835 1836 /* 1837 * Clear out all remaining COW staging extents and speculative post-EOF 1838 * preallocations so that we don't leave inodes requiring inactivation 1839 * cleanups during reclaim on a read-only mount. We must process every 1840 * cached inode, so this requires a synchronous cache scan. 1841 */ 1842 error = xfs_blockgc_free_space(mp, &icw); 1843 if (error) { 1844 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1845 return error; 1846 } 1847 1848 /* 1849 * Stop the inodegc background worker. xfs_fs_reconfigure already 1850 * flushed all pending inodegc work when it sync'd the filesystem. 1851 * The VFS holds s_umount, so we know that inodes cannot enter 1852 * xfs_fs_destroy_inode during a remount operation. In readonly mode 1853 * we send inodes straight to reclaim, so no inodes will be queued. 1854 */ 1855 xfs_inodegc_stop(mp); 1856 1857 /* Free the per-AG metadata reservation pool. */ 1858 error = xfs_fs_unreserve_ag_blocks(mp); 1859 if (error) { 1860 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1861 return error; 1862 } 1863 1864 /* 1865 * Before we sync the metadata, we need to free up the reserve block 1866 * pool so that the used block count in the superblock on disk is 1867 * correct at the end of the remount. Stash the current* reserve pool 1868 * size so that if we get remounted rw, we can return it to the same 1869 * size. 1870 */ 1871 xfs_save_resvblks(mp); 1872 1873 xfs_log_clean(mp); 1874 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 1875 1876 return 0; 1877 } 1878 1879 /* 1880 * Logically we would return an error here to prevent users from believing 1881 * they might have changed mount options using remount which can't be changed. 1882 * 1883 * But unfortunately mount(8) adds all options from mtab and fstab to the mount 1884 * arguments in some cases so we can't blindly reject options, but have to 1885 * check for each specified option if it actually differs from the currently 1886 * set option and only reject it if that's the case. 1887 * 1888 * Until that is implemented we return success for every remount request, and 1889 * silently ignore all options that we can't actually change. 1890 */ 1891 static int 1892 xfs_fs_reconfigure( 1893 struct fs_context *fc) 1894 { 1895 struct xfs_mount *mp = XFS_M(fc->root->d_sb); 1896 struct xfs_mount *new_mp = fc->s_fs_info; 1897 int flags = fc->sb_flags; 1898 int error; 1899 1900 /* version 5 superblocks always support version counters. */ 1901 if (xfs_has_crc(mp)) 1902 fc->sb_flags |= SB_I_VERSION; 1903 1904 error = xfs_fs_validate_params(new_mp); 1905 if (error) 1906 return error; 1907 1908 /* inode32 -> inode64 */ 1909 if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) { 1910 mp->m_features &= ~XFS_FEAT_SMALL_INUMS; 1911 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount); 1912 } 1913 1914 /* inode64 -> inode32 */ 1915 if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) { 1916 mp->m_features |= XFS_FEAT_SMALL_INUMS; 1917 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount); 1918 } 1919 1920 /* ro -> rw */ 1921 if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) { 1922 error = xfs_remount_rw(mp); 1923 if (error) 1924 return error; 1925 } 1926 1927 /* rw -> ro */ 1928 if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) { 1929 error = xfs_remount_ro(mp); 1930 if (error) 1931 return error; 1932 } 1933 1934 return 0; 1935 } 1936 1937 static void xfs_fs_free( 1938 struct fs_context *fc) 1939 { 1940 struct xfs_mount *mp = fc->s_fs_info; 1941 1942 /* 1943 * mp is stored in the fs_context when it is initialized. 1944 * mp is transferred to the superblock on a successful mount, 1945 * but if an error occurs before the transfer we have to free 1946 * it here. 1947 */ 1948 if (mp) 1949 xfs_mount_free(mp); 1950 } 1951 1952 static const struct fs_context_operations xfs_context_ops = { 1953 .parse_param = xfs_fs_parse_param, 1954 .get_tree = xfs_fs_get_tree, 1955 .reconfigure = xfs_fs_reconfigure, 1956 .free = xfs_fs_free, 1957 }; 1958 1959 static int xfs_init_fs_context( 1960 struct fs_context *fc) 1961 { 1962 struct xfs_mount *mp; 1963 1964 mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO); 1965 if (!mp) 1966 return -ENOMEM; 1967 1968 spin_lock_init(&mp->m_sb_lock); 1969 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); 1970 spin_lock_init(&mp->m_perag_lock); 1971 mutex_init(&mp->m_growlock); 1972 INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker); 1973 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); 1974 mp->m_kobj.kobject.kset = xfs_kset; 1975 /* 1976 * We don't create the finobt per-ag space reservation until after log 1977 * recovery, so we must set this to true so that an ifree transaction 1978 * started during log recovery will not depend on space reservations 1979 * for finobt expansion. 1980 */ 1981 mp->m_finobt_nores = true; 1982 1983 /* 1984 * These can be overridden by the mount option parsing. 1985 */ 1986 mp->m_logbufs = -1; 1987 mp->m_logbsize = -1; 1988 mp->m_allocsize_log = 16; /* 64k */ 1989 1990 /* 1991 * Copy binary VFS mount flags we are interested in. 1992 */ 1993 if (fc->sb_flags & SB_RDONLY) 1994 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 1995 if (fc->sb_flags & SB_DIRSYNC) 1996 mp->m_features |= XFS_FEAT_DIRSYNC; 1997 if (fc->sb_flags & SB_SYNCHRONOUS) 1998 mp->m_features |= XFS_FEAT_WSYNC; 1999 2000 fc->s_fs_info = mp; 2001 fc->ops = &xfs_context_ops; 2002 2003 return 0; 2004 } 2005 2006 static struct file_system_type xfs_fs_type = { 2007 .owner = THIS_MODULE, 2008 .name = "xfs", 2009 .init_fs_context = xfs_init_fs_context, 2010 .parameters = xfs_fs_parameters, 2011 .kill_sb = kill_block_super, 2012 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, 2013 }; 2014 MODULE_ALIAS_FS("xfs"); 2015 2016 STATIC int __init 2017 xfs_init_caches(void) 2018 { 2019 int error; 2020 2021 xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0, 2022 SLAB_HWCACHE_ALIGN | 2023 SLAB_RECLAIM_ACCOUNT | 2024 SLAB_MEM_SPREAD, 2025 NULL); 2026 if (!xfs_buf_cache) 2027 goto out; 2028 2029 xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket", 2030 sizeof(struct xlog_ticket), 2031 0, 0, NULL); 2032 if (!xfs_log_ticket_cache) 2033 goto out_destroy_buf_cache; 2034 2035 error = xfs_btree_init_cur_caches(); 2036 if (error) 2037 goto out_destroy_log_ticket_cache; 2038 2039 error = xfs_defer_init_item_caches(); 2040 if (error) 2041 goto out_destroy_btree_cur_cache; 2042 2043 xfs_da_state_cache = kmem_cache_create("xfs_da_state", 2044 sizeof(struct xfs_da_state), 2045 0, 0, NULL); 2046 if (!xfs_da_state_cache) 2047 goto out_destroy_defer_item_cache; 2048 2049 xfs_ifork_cache = kmem_cache_create("xfs_ifork", 2050 sizeof(struct xfs_ifork), 2051 0, 0, NULL); 2052 if (!xfs_ifork_cache) 2053 goto out_destroy_da_state_cache; 2054 2055 xfs_trans_cache = kmem_cache_create("xfs_trans", 2056 sizeof(struct xfs_trans), 2057 0, 0, NULL); 2058 if (!xfs_trans_cache) 2059 goto out_destroy_ifork_cache; 2060 2061 2062 /* 2063 * The size of the cache-allocated buf log item is the maximum 2064 * size possible under XFS. This wastes a little bit of memory, 2065 * but it is much faster. 2066 */ 2067 xfs_buf_item_cache = kmem_cache_create("xfs_buf_item", 2068 sizeof(struct xfs_buf_log_item), 2069 0, 0, NULL); 2070 if (!xfs_buf_item_cache) 2071 goto out_destroy_trans_cache; 2072 2073 xfs_efd_cache = kmem_cache_create("xfs_efd_item", 2074 xfs_efd_log_item_sizeof(XFS_EFD_MAX_FAST_EXTENTS), 2075 0, 0, NULL); 2076 if (!xfs_efd_cache) 2077 goto out_destroy_buf_item_cache; 2078 2079 xfs_efi_cache = kmem_cache_create("xfs_efi_item", 2080 xfs_efi_log_item_sizeof(XFS_EFI_MAX_FAST_EXTENTS), 2081 0, 0, NULL); 2082 if (!xfs_efi_cache) 2083 goto out_destroy_efd_cache; 2084 2085 xfs_inode_cache = kmem_cache_create("xfs_inode", 2086 sizeof(struct xfs_inode), 0, 2087 (SLAB_HWCACHE_ALIGN | 2088 SLAB_RECLAIM_ACCOUNT | 2089 SLAB_MEM_SPREAD | SLAB_ACCOUNT), 2090 xfs_fs_inode_init_once); 2091 if (!xfs_inode_cache) 2092 goto out_destroy_efi_cache; 2093 2094 xfs_ili_cache = kmem_cache_create("xfs_ili", 2095 sizeof(struct xfs_inode_log_item), 0, 2096 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, 2097 NULL); 2098 if (!xfs_ili_cache) 2099 goto out_destroy_inode_cache; 2100 2101 xfs_icreate_cache = kmem_cache_create("xfs_icr", 2102 sizeof(struct xfs_icreate_item), 2103 0, 0, NULL); 2104 if (!xfs_icreate_cache) 2105 goto out_destroy_ili_cache; 2106 2107 xfs_rud_cache = kmem_cache_create("xfs_rud_item", 2108 sizeof(struct xfs_rud_log_item), 2109 0, 0, NULL); 2110 if (!xfs_rud_cache) 2111 goto out_destroy_icreate_cache; 2112 2113 xfs_rui_cache = kmem_cache_create("xfs_rui_item", 2114 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS), 2115 0, 0, NULL); 2116 if (!xfs_rui_cache) 2117 goto out_destroy_rud_cache; 2118 2119 xfs_cud_cache = kmem_cache_create("xfs_cud_item", 2120 sizeof(struct xfs_cud_log_item), 2121 0, 0, NULL); 2122 if (!xfs_cud_cache) 2123 goto out_destroy_rui_cache; 2124 2125 xfs_cui_cache = kmem_cache_create("xfs_cui_item", 2126 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS), 2127 0, 0, NULL); 2128 if (!xfs_cui_cache) 2129 goto out_destroy_cud_cache; 2130 2131 xfs_bud_cache = kmem_cache_create("xfs_bud_item", 2132 sizeof(struct xfs_bud_log_item), 2133 0, 0, NULL); 2134 if (!xfs_bud_cache) 2135 goto out_destroy_cui_cache; 2136 2137 xfs_bui_cache = kmem_cache_create("xfs_bui_item", 2138 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS), 2139 0, 0, NULL); 2140 if (!xfs_bui_cache) 2141 goto out_destroy_bud_cache; 2142 2143 xfs_attrd_cache = kmem_cache_create("xfs_attrd_item", 2144 sizeof(struct xfs_attrd_log_item), 2145 0, 0, NULL); 2146 if (!xfs_attrd_cache) 2147 goto out_destroy_bui_cache; 2148 2149 xfs_attri_cache = kmem_cache_create("xfs_attri_item", 2150 sizeof(struct xfs_attri_log_item), 2151 0, 0, NULL); 2152 if (!xfs_attri_cache) 2153 goto out_destroy_attrd_cache; 2154 2155 xfs_iunlink_cache = kmem_cache_create("xfs_iul_item", 2156 sizeof(struct xfs_iunlink_item), 2157 0, 0, NULL); 2158 if (!xfs_iunlink_cache) 2159 goto out_destroy_attri_cache; 2160 2161 return 0; 2162 2163 out_destroy_attri_cache: 2164 kmem_cache_destroy(xfs_attri_cache); 2165 out_destroy_attrd_cache: 2166 kmem_cache_destroy(xfs_attrd_cache); 2167 out_destroy_bui_cache: 2168 kmem_cache_destroy(xfs_bui_cache); 2169 out_destroy_bud_cache: 2170 kmem_cache_destroy(xfs_bud_cache); 2171 out_destroy_cui_cache: 2172 kmem_cache_destroy(xfs_cui_cache); 2173 out_destroy_cud_cache: 2174 kmem_cache_destroy(xfs_cud_cache); 2175 out_destroy_rui_cache: 2176 kmem_cache_destroy(xfs_rui_cache); 2177 out_destroy_rud_cache: 2178 kmem_cache_destroy(xfs_rud_cache); 2179 out_destroy_icreate_cache: 2180 kmem_cache_destroy(xfs_icreate_cache); 2181 out_destroy_ili_cache: 2182 kmem_cache_destroy(xfs_ili_cache); 2183 out_destroy_inode_cache: 2184 kmem_cache_destroy(xfs_inode_cache); 2185 out_destroy_efi_cache: 2186 kmem_cache_destroy(xfs_efi_cache); 2187 out_destroy_efd_cache: 2188 kmem_cache_destroy(xfs_efd_cache); 2189 out_destroy_buf_item_cache: 2190 kmem_cache_destroy(xfs_buf_item_cache); 2191 out_destroy_trans_cache: 2192 kmem_cache_destroy(xfs_trans_cache); 2193 out_destroy_ifork_cache: 2194 kmem_cache_destroy(xfs_ifork_cache); 2195 out_destroy_da_state_cache: 2196 kmem_cache_destroy(xfs_da_state_cache); 2197 out_destroy_defer_item_cache: 2198 xfs_defer_destroy_item_caches(); 2199 out_destroy_btree_cur_cache: 2200 xfs_btree_destroy_cur_caches(); 2201 out_destroy_log_ticket_cache: 2202 kmem_cache_destroy(xfs_log_ticket_cache); 2203 out_destroy_buf_cache: 2204 kmem_cache_destroy(xfs_buf_cache); 2205 out: 2206 return -ENOMEM; 2207 } 2208 2209 STATIC void 2210 xfs_destroy_caches(void) 2211 { 2212 /* 2213 * Make sure all delayed rcu free are flushed before we 2214 * destroy caches. 2215 */ 2216 rcu_barrier(); 2217 kmem_cache_destroy(xfs_iunlink_cache); 2218 kmem_cache_destroy(xfs_attri_cache); 2219 kmem_cache_destroy(xfs_attrd_cache); 2220 kmem_cache_destroy(xfs_bui_cache); 2221 kmem_cache_destroy(xfs_bud_cache); 2222 kmem_cache_destroy(xfs_cui_cache); 2223 kmem_cache_destroy(xfs_cud_cache); 2224 kmem_cache_destroy(xfs_rui_cache); 2225 kmem_cache_destroy(xfs_rud_cache); 2226 kmem_cache_destroy(xfs_icreate_cache); 2227 kmem_cache_destroy(xfs_ili_cache); 2228 kmem_cache_destroy(xfs_inode_cache); 2229 kmem_cache_destroy(xfs_efi_cache); 2230 kmem_cache_destroy(xfs_efd_cache); 2231 kmem_cache_destroy(xfs_buf_item_cache); 2232 kmem_cache_destroy(xfs_trans_cache); 2233 kmem_cache_destroy(xfs_ifork_cache); 2234 kmem_cache_destroy(xfs_da_state_cache); 2235 xfs_defer_destroy_item_caches(); 2236 xfs_btree_destroy_cur_caches(); 2237 kmem_cache_destroy(xfs_log_ticket_cache); 2238 kmem_cache_destroy(xfs_buf_cache); 2239 } 2240 2241 STATIC int __init 2242 xfs_init_workqueues(void) 2243 { 2244 /* 2245 * The allocation workqueue can be used in memory reclaim situations 2246 * (writepage path), and parallelism is only limited by the number of 2247 * AGs in all the filesystems mounted. Hence use the default large 2248 * max_active value for this workqueue. 2249 */ 2250 xfs_alloc_wq = alloc_workqueue("xfsalloc", 2251 XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0); 2252 if (!xfs_alloc_wq) 2253 return -ENOMEM; 2254 2255 xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND), 2256 0); 2257 if (!xfs_discard_wq) 2258 goto out_free_alloc_wq; 2259 2260 return 0; 2261 out_free_alloc_wq: 2262 destroy_workqueue(xfs_alloc_wq); 2263 return -ENOMEM; 2264 } 2265 2266 STATIC void 2267 xfs_destroy_workqueues(void) 2268 { 2269 destroy_workqueue(xfs_discard_wq); 2270 destroy_workqueue(xfs_alloc_wq); 2271 } 2272 2273 #ifdef CONFIG_HOTPLUG_CPU 2274 static int 2275 xfs_cpu_dead( 2276 unsigned int cpu) 2277 { 2278 struct xfs_mount *mp, *n; 2279 2280 spin_lock(&xfs_mount_list_lock); 2281 list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) { 2282 spin_unlock(&xfs_mount_list_lock); 2283 xfs_inodegc_cpu_dead(mp, cpu); 2284 xlog_cil_pcp_dead(mp->m_log, cpu); 2285 spin_lock(&xfs_mount_list_lock); 2286 } 2287 spin_unlock(&xfs_mount_list_lock); 2288 return 0; 2289 } 2290 2291 static int __init 2292 xfs_cpu_hotplug_init(void) 2293 { 2294 int error; 2295 2296 error = cpuhp_setup_state_nocalls(CPUHP_XFS_DEAD, "xfs:dead", NULL, 2297 xfs_cpu_dead); 2298 if (error < 0) 2299 xfs_alert(NULL, 2300 "Failed to initialise CPU hotplug, error %d. XFS is non-functional.", 2301 error); 2302 return error; 2303 } 2304 2305 static void 2306 xfs_cpu_hotplug_destroy(void) 2307 { 2308 cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD); 2309 } 2310 2311 #else /* !CONFIG_HOTPLUG_CPU */ 2312 static inline int xfs_cpu_hotplug_init(void) { return 0; } 2313 static inline void xfs_cpu_hotplug_destroy(void) {} 2314 #endif 2315 2316 STATIC int __init 2317 init_xfs_fs(void) 2318 { 2319 int error; 2320 2321 xfs_check_ondisk_structs(); 2322 2323 error = xfs_dahash_test(); 2324 if (error) 2325 return error; 2326 2327 printk(KERN_INFO XFS_VERSION_STRING " with " 2328 XFS_BUILD_OPTIONS " enabled\n"); 2329 2330 xfs_dir_startup(); 2331 2332 error = xfs_cpu_hotplug_init(); 2333 if (error) 2334 goto out; 2335 2336 error = xfs_init_caches(); 2337 if (error) 2338 goto out_destroy_hp; 2339 2340 error = xfs_init_workqueues(); 2341 if (error) 2342 goto out_destroy_caches; 2343 2344 error = xfs_mru_cache_init(); 2345 if (error) 2346 goto out_destroy_wq; 2347 2348 error = xfs_init_procfs(); 2349 if (error) 2350 goto out_mru_cache_uninit; 2351 2352 error = xfs_sysctl_register(); 2353 if (error) 2354 goto out_cleanup_procfs; 2355 2356 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj); 2357 if (!xfs_kset) { 2358 error = -ENOMEM; 2359 goto out_sysctl_unregister; 2360 } 2361 2362 xfsstats.xs_kobj.kobject.kset = xfs_kset; 2363 2364 xfsstats.xs_stats = alloc_percpu(struct xfsstats); 2365 if (!xfsstats.xs_stats) { 2366 error = -ENOMEM; 2367 goto out_kset_unregister; 2368 } 2369 2370 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL, 2371 "stats"); 2372 if (error) 2373 goto out_free_stats; 2374 2375 #ifdef DEBUG 2376 xfs_dbg_kobj.kobject.kset = xfs_kset; 2377 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug"); 2378 if (error) 2379 goto out_remove_stats_kobj; 2380 #endif 2381 2382 error = xfs_qm_init(); 2383 if (error) 2384 goto out_remove_dbg_kobj; 2385 2386 error = register_filesystem(&xfs_fs_type); 2387 if (error) 2388 goto out_qm_exit; 2389 return 0; 2390 2391 out_qm_exit: 2392 xfs_qm_exit(); 2393 out_remove_dbg_kobj: 2394 #ifdef DEBUG 2395 xfs_sysfs_del(&xfs_dbg_kobj); 2396 out_remove_stats_kobj: 2397 #endif 2398 xfs_sysfs_del(&xfsstats.xs_kobj); 2399 out_free_stats: 2400 free_percpu(xfsstats.xs_stats); 2401 out_kset_unregister: 2402 kset_unregister(xfs_kset); 2403 out_sysctl_unregister: 2404 xfs_sysctl_unregister(); 2405 out_cleanup_procfs: 2406 xfs_cleanup_procfs(); 2407 out_mru_cache_uninit: 2408 xfs_mru_cache_uninit(); 2409 out_destroy_wq: 2410 xfs_destroy_workqueues(); 2411 out_destroy_caches: 2412 xfs_destroy_caches(); 2413 out_destroy_hp: 2414 xfs_cpu_hotplug_destroy(); 2415 out: 2416 return error; 2417 } 2418 2419 STATIC void __exit 2420 exit_xfs_fs(void) 2421 { 2422 xfs_qm_exit(); 2423 unregister_filesystem(&xfs_fs_type); 2424 #ifdef DEBUG 2425 xfs_sysfs_del(&xfs_dbg_kobj); 2426 #endif 2427 xfs_sysfs_del(&xfsstats.xs_kobj); 2428 free_percpu(xfsstats.xs_stats); 2429 kset_unregister(xfs_kset); 2430 xfs_sysctl_unregister(); 2431 xfs_cleanup_procfs(); 2432 xfs_mru_cache_uninit(); 2433 xfs_destroy_workqueues(); 2434 xfs_destroy_caches(); 2435 xfs_uuid_table_free(); 2436 xfs_cpu_hotplug_destroy(); 2437 } 2438 2439 module_init(init_xfs_fs); 2440 module_exit(exit_xfs_fs); 2441 2442 MODULE_AUTHOR("Silicon Graphics, Inc."); 2443 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); 2444 MODULE_LICENSE("GPL"); 2445