1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 7 #include "xfs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_sb.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_btree.h" 16 #include "xfs_bmap.h" 17 #include "xfs_alloc.h" 18 #include "xfs_fsops.h" 19 #include "xfs_trans.h" 20 #include "xfs_buf_item.h" 21 #include "xfs_log.h" 22 #include "xfs_log_priv.h" 23 #include "xfs_dir2.h" 24 #include "xfs_extfree_item.h" 25 #include "xfs_mru_cache.h" 26 #include "xfs_inode_item.h" 27 #include "xfs_icache.h" 28 #include "xfs_trace.h" 29 #include "xfs_icreate_item.h" 30 #include "xfs_filestream.h" 31 #include "xfs_quota.h" 32 #include "xfs_sysfs.h" 33 #include "xfs_ondisk.h" 34 #include "xfs_rmap_item.h" 35 #include "xfs_refcount_item.h" 36 #include "xfs_bmap_item.h" 37 #include "xfs_reflink.h" 38 #include "xfs_pwork.h" 39 #include "xfs_ag.h" 40 #include "xfs_defer.h" 41 #include "xfs_attr_item.h" 42 #include "xfs_xattr.h" 43 #include "xfs_iunlink_item.h" 44 #include "xfs_dahash_test.h" 45 46 #include <linux/magic.h> 47 #include <linux/fs_context.h> 48 #include <linux/fs_parser.h> 49 50 static const struct super_operations xfs_super_operations; 51 52 static struct kset *xfs_kset; /* top-level xfs sysfs dir */ 53 #ifdef DEBUG 54 static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */ 55 #endif 56 57 #ifdef CONFIG_HOTPLUG_CPU 58 static LIST_HEAD(xfs_mount_list); 59 static DEFINE_SPINLOCK(xfs_mount_list_lock); 60 61 static inline void xfs_mount_list_add(struct xfs_mount *mp) 62 { 63 spin_lock(&xfs_mount_list_lock); 64 list_add(&mp->m_mount_list, &xfs_mount_list); 65 spin_unlock(&xfs_mount_list_lock); 66 } 67 68 static inline void xfs_mount_list_del(struct xfs_mount *mp) 69 { 70 spin_lock(&xfs_mount_list_lock); 71 list_del(&mp->m_mount_list); 72 spin_unlock(&xfs_mount_list_lock); 73 } 74 #else /* !CONFIG_HOTPLUG_CPU */ 75 static inline void xfs_mount_list_add(struct xfs_mount *mp) {} 76 static inline void xfs_mount_list_del(struct xfs_mount *mp) {} 77 #endif 78 79 enum xfs_dax_mode { 80 XFS_DAX_INODE = 0, 81 XFS_DAX_ALWAYS = 1, 82 XFS_DAX_NEVER = 2, 83 }; 84 85 static void 86 xfs_mount_set_dax_mode( 87 struct xfs_mount *mp, 88 enum xfs_dax_mode mode) 89 { 90 switch (mode) { 91 case XFS_DAX_INODE: 92 mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER); 93 break; 94 case XFS_DAX_ALWAYS: 95 mp->m_features |= XFS_FEAT_DAX_ALWAYS; 96 mp->m_features &= ~XFS_FEAT_DAX_NEVER; 97 break; 98 case XFS_DAX_NEVER: 99 mp->m_features |= XFS_FEAT_DAX_NEVER; 100 mp->m_features &= ~XFS_FEAT_DAX_ALWAYS; 101 break; 102 } 103 } 104 105 static const struct constant_table dax_param_enums[] = { 106 {"inode", XFS_DAX_INODE }, 107 {"always", XFS_DAX_ALWAYS }, 108 {"never", XFS_DAX_NEVER }, 109 {} 110 }; 111 112 /* 113 * Table driven mount option parser. 114 */ 115 enum { 116 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, 117 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid, 118 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups, 119 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep, 120 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2, 121 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota, 122 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota, 123 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce, 124 Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum, 125 }; 126 127 static const struct fs_parameter_spec xfs_fs_parameters[] = { 128 fsparam_u32("logbufs", Opt_logbufs), 129 fsparam_string("logbsize", Opt_logbsize), 130 fsparam_string("logdev", Opt_logdev), 131 fsparam_string("rtdev", Opt_rtdev), 132 fsparam_flag("wsync", Opt_wsync), 133 fsparam_flag("noalign", Opt_noalign), 134 fsparam_flag("swalloc", Opt_swalloc), 135 fsparam_u32("sunit", Opt_sunit), 136 fsparam_u32("swidth", Opt_swidth), 137 fsparam_flag("nouuid", Opt_nouuid), 138 fsparam_flag("grpid", Opt_grpid), 139 fsparam_flag("nogrpid", Opt_nogrpid), 140 fsparam_flag("bsdgroups", Opt_bsdgroups), 141 fsparam_flag("sysvgroups", Opt_sysvgroups), 142 fsparam_string("allocsize", Opt_allocsize), 143 fsparam_flag("norecovery", Opt_norecovery), 144 fsparam_flag("inode64", Opt_inode64), 145 fsparam_flag("inode32", Opt_inode32), 146 fsparam_flag("ikeep", Opt_ikeep), 147 fsparam_flag("noikeep", Opt_noikeep), 148 fsparam_flag("largeio", Opt_largeio), 149 fsparam_flag("nolargeio", Opt_nolargeio), 150 fsparam_flag("attr2", Opt_attr2), 151 fsparam_flag("noattr2", Opt_noattr2), 152 fsparam_flag("filestreams", Opt_filestreams), 153 fsparam_flag("quota", Opt_quota), 154 fsparam_flag("noquota", Opt_noquota), 155 fsparam_flag("usrquota", Opt_usrquota), 156 fsparam_flag("grpquota", Opt_grpquota), 157 fsparam_flag("prjquota", Opt_prjquota), 158 fsparam_flag("uquota", Opt_uquota), 159 fsparam_flag("gquota", Opt_gquota), 160 fsparam_flag("pquota", Opt_pquota), 161 fsparam_flag("uqnoenforce", Opt_uqnoenforce), 162 fsparam_flag("gqnoenforce", Opt_gqnoenforce), 163 fsparam_flag("pqnoenforce", Opt_pqnoenforce), 164 fsparam_flag("qnoenforce", Opt_qnoenforce), 165 fsparam_flag("discard", Opt_discard), 166 fsparam_flag("nodiscard", Opt_nodiscard), 167 fsparam_flag("dax", Opt_dax), 168 fsparam_enum("dax", Opt_dax_enum, dax_param_enums), 169 {} 170 }; 171 172 struct proc_xfs_info { 173 uint64_t flag; 174 char *str; 175 }; 176 177 static int 178 xfs_fs_show_options( 179 struct seq_file *m, 180 struct dentry *root) 181 { 182 static struct proc_xfs_info xfs_info_set[] = { 183 /* the few simple ones we can get from the mount struct */ 184 { XFS_FEAT_IKEEP, ",ikeep" }, 185 { XFS_FEAT_WSYNC, ",wsync" }, 186 { XFS_FEAT_NOALIGN, ",noalign" }, 187 { XFS_FEAT_SWALLOC, ",swalloc" }, 188 { XFS_FEAT_NOUUID, ",nouuid" }, 189 { XFS_FEAT_NORECOVERY, ",norecovery" }, 190 { XFS_FEAT_ATTR2, ",attr2" }, 191 { XFS_FEAT_FILESTREAMS, ",filestreams" }, 192 { XFS_FEAT_GRPID, ",grpid" }, 193 { XFS_FEAT_DISCARD, ",discard" }, 194 { XFS_FEAT_LARGE_IOSIZE, ",largeio" }, 195 { XFS_FEAT_DAX_ALWAYS, ",dax=always" }, 196 { XFS_FEAT_DAX_NEVER, ",dax=never" }, 197 { 0, NULL } 198 }; 199 struct xfs_mount *mp = XFS_M(root->d_sb); 200 struct proc_xfs_info *xfs_infop; 201 202 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { 203 if (mp->m_features & xfs_infop->flag) 204 seq_puts(m, xfs_infop->str); 205 } 206 207 seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64); 208 209 if (xfs_has_allocsize(mp)) 210 seq_printf(m, ",allocsize=%dk", 211 (1 << mp->m_allocsize_log) >> 10); 212 213 if (mp->m_logbufs > 0) 214 seq_printf(m, ",logbufs=%d", mp->m_logbufs); 215 if (mp->m_logbsize > 0) 216 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10); 217 218 if (mp->m_logname) 219 seq_show_option(m, "logdev", mp->m_logname); 220 if (mp->m_rtname) 221 seq_show_option(m, "rtdev", mp->m_rtname); 222 223 if (mp->m_dalign > 0) 224 seq_printf(m, ",sunit=%d", 225 (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); 226 if (mp->m_swidth > 0) 227 seq_printf(m, ",swidth=%d", 228 (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); 229 230 if (mp->m_qflags & XFS_UQUOTA_ENFD) 231 seq_puts(m, ",usrquota"); 232 else if (mp->m_qflags & XFS_UQUOTA_ACCT) 233 seq_puts(m, ",uqnoenforce"); 234 235 if (mp->m_qflags & XFS_PQUOTA_ENFD) 236 seq_puts(m, ",prjquota"); 237 else if (mp->m_qflags & XFS_PQUOTA_ACCT) 238 seq_puts(m, ",pqnoenforce"); 239 240 if (mp->m_qflags & XFS_GQUOTA_ENFD) 241 seq_puts(m, ",grpquota"); 242 else if (mp->m_qflags & XFS_GQUOTA_ACCT) 243 seq_puts(m, ",gqnoenforce"); 244 245 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) 246 seq_puts(m, ",noquota"); 247 248 return 0; 249 } 250 251 static bool 252 xfs_set_inode_alloc_perag( 253 struct xfs_perag *pag, 254 xfs_ino_t ino, 255 xfs_agnumber_t max_metadata) 256 { 257 if (!xfs_is_inode32(pag->pag_mount)) { 258 set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate); 259 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); 260 return false; 261 } 262 263 if (ino > XFS_MAXINUMBER_32) { 264 clear_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate); 265 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); 266 return false; 267 } 268 269 set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate); 270 if (pag->pag_agno < max_metadata) 271 set_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); 272 else 273 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); 274 return true; 275 } 276 277 /* 278 * Set parameters for inode allocation heuristics, taking into account 279 * filesystem size and inode32/inode64 mount options; i.e. specifically 280 * whether or not XFS_FEAT_SMALL_INUMS is set. 281 * 282 * Inode allocation patterns are altered only if inode32 is requested 283 * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large. 284 * If altered, XFS_OPSTATE_INODE32 is set as well. 285 * 286 * An agcount independent of that in the mount structure is provided 287 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated 288 * to the potentially higher ag count. 289 * 290 * Returns the maximum AG index which may contain inodes. 291 */ 292 xfs_agnumber_t 293 xfs_set_inode_alloc( 294 struct xfs_mount *mp, 295 xfs_agnumber_t agcount) 296 { 297 xfs_agnumber_t index; 298 xfs_agnumber_t maxagi = 0; 299 xfs_sb_t *sbp = &mp->m_sb; 300 xfs_agnumber_t max_metadata; 301 xfs_agino_t agino; 302 xfs_ino_t ino; 303 304 /* 305 * Calculate how much should be reserved for inodes to meet 306 * the max inode percentage. Used only for inode32. 307 */ 308 if (M_IGEO(mp)->maxicount) { 309 uint64_t icount; 310 311 icount = sbp->sb_dblocks * sbp->sb_imax_pct; 312 do_div(icount, 100); 313 icount += sbp->sb_agblocks - 1; 314 do_div(icount, sbp->sb_agblocks); 315 max_metadata = icount; 316 } else { 317 max_metadata = agcount; 318 } 319 320 /* Get the last possible inode in the filesystem */ 321 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1); 322 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); 323 324 /* 325 * If user asked for no more than 32-bit inodes, and the fs is 326 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter 327 * the allocator to accommodate the request. 328 */ 329 if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32) 330 set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate); 331 else 332 clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate); 333 334 for (index = 0; index < agcount; index++) { 335 struct xfs_perag *pag; 336 337 ino = XFS_AGINO_TO_INO(mp, index, agino); 338 339 pag = xfs_perag_get(mp, index); 340 if (xfs_set_inode_alloc_perag(pag, ino, max_metadata)) 341 maxagi++; 342 xfs_perag_put(pag); 343 } 344 345 return xfs_is_inode32(mp) ? maxagi : agcount; 346 } 347 348 static int 349 xfs_setup_dax_always( 350 struct xfs_mount *mp) 351 { 352 if (!mp->m_ddev_targp->bt_daxdev && 353 (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) { 354 xfs_alert(mp, 355 "DAX unsupported by block device. Turning off DAX."); 356 goto disable_dax; 357 } 358 359 if (mp->m_super->s_blocksize != PAGE_SIZE) { 360 xfs_alert(mp, 361 "DAX not supported for blocksize. Turning off DAX."); 362 goto disable_dax; 363 } 364 365 if (xfs_has_reflink(mp) && 366 bdev_is_partition(mp->m_ddev_targp->bt_bdev)) { 367 xfs_alert(mp, 368 "DAX and reflink cannot work with multi-partitions!"); 369 return -EINVAL; 370 } 371 372 xfs_warn(mp, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); 373 return 0; 374 375 disable_dax: 376 xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER); 377 return 0; 378 } 379 380 STATIC int 381 xfs_blkdev_get( 382 xfs_mount_t *mp, 383 const char *name, 384 struct block_device **bdevp) 385 { 386 int error = 0; 387 388 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 389 mp); 390 if (IS_ERR(*bdevp)) { 391 error = PTR_ERR(*bdevp); 392 xfs_warn(mp, "Invalid device [%s], error=%d", name, error); 393 } 394 395 return error; 396 } 397 398 STATIC void 399 xfs_blkdev_put( 400 struct block_device *bdev) 401 { 402 if (bdev) 403 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 404 } 405 406 STATIC void 407 xfs_close_devices( 408 struct xfs_mount *mp) 409 { 410 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 411 struct block_device *logdev = mp->m_logdev_targp->bt_bdev; 412 413 xfs_free_buftarg(mp->m_logdev_targp); 414 xfs_blkdev_put(logdev); 415 } 416 if (mp->m_rtdev_targp) { 417 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; 418 419 xfs_free_buftarg(mp->m_rtdev_targp); 420 xfs_blkdev_put(rtdev); 421 } 422 xfs_free_buftarg(mp->m_ddev_targp); 423 } 424 425 /* 426 * The file system configurations are: 427 * (1) device (partition) with data and internal log 428 * (2) logical volume with data and log subvolumes. 429 * (3) logical volume with data, log, and realtime subvolumes. 430 * 431 * We only have to handle opening the log and realtime volumes here if 432 * they are present. The data subvolume has already been opened by 433 * get_sb_bdev() and is stored in sb->s_bdev. 434 */ 435 STATIC int 436 xfs_open_devices( 437 struct xfs_mount *mp) 438 { 439 struct block_device *ddev = mp->m_super->s_bdev; 440 struct block_device *logdev = NULL, *rtdev = NULL; 441 int error; 442 443 /* 444 * Open real time and log devices - order is important. 445 */ 446 if (mp->m_logname) { 447 error = xfs_blkdev_get(mp, mp->m_logname, &logdev); 448 if (error) 449 return error; 450 } 451 452 if (mp->m_rtname) { 453 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev); 454 if (error) 455 goto out_close_logdev; 456 457 if (rtdev == ddev || rtdev == logdev) { 458 xfs_warn(mp, 459 "Cannot mount filesystem with identical rtdev and ddev/logdev."); 460 error = -EINVAL; 461 goto out_close_rtdev; 462 } 463 } 464 465 /* 466 * Setup xfs_mount buffer target pointers 467 */ 468 error = -ENOMEM; 469 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev); 470 if (!mp->m_ddev_targp) 471 goto out_close_rtdev; 472 473 if (rtdev) { 474 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev); 475 if (!mp->m_rtdev_targp) 476 goto out_free_ddev_targ; 477 } 478 479 if (logdev && logdev != ddev) { 480 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev); 481 if (!mp->m_logdev_targp) 482 goto out_free_rtdev_targ; 483 } else { 484 mp->m_logdev_targp = mp->m_ddev_targp; 485 } 486 487 return 0; 488 489 out_free_rtdev_targ: 490 if (mp->m_rtdev_targp) 491 xfs_free_buftarg(mp->m_rtdev_targp); 492 out_free_ddev_targ: 493 xfs_free_buftarg(mp->m_ddev_targp); 494 out_close_rtdev: 495 xfs_blkdev_put(rtdev); 496 out_close_logdev: 497 if (logdev && logdev != ddev) 498 xfs_blkdev_put(logdev); 499 return error; 500 } 501 502 /* 503 * Setup xfs_mount buffer target pointers based on superblock 504 */ 505 STATIC int 506 xfs_setup_devices( 507 struct xfs_mount *mp) 508 { 509 int error; 510 511 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize); 512 if (error) 513 return error; 514 515 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 516 unsigned int log_sector_size = BBSIZE; 517 518 if (xfs_has_sector(mp)) 519 log_sector_size = mp->m_sb.sb_logsectsize; 520 error = xfs_setsize_buftarg(mp->m_logdev_targp, 521 log_sector_size); 522 if (error) 523 return error; 524 } 525 if (mp->m_rtdev_targp) { 526 error = xfs_setsize_buftarg(mp->m_rtdev_targp, 527 mp->m_sb.sb_sectsize); 528 if (error) 529 return error; 530 } 531 532 return 0; 533 } 534 535 STATIC int 536 xfs_init_mount_workqueues( 537 struct xfs_mount *mp) 538 { 539 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s", 540 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 541 1, mp->m_super->s_id); 542 if (!mp->m_buf_workqueue) 543 goto out; 544 545 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s", 546 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 547 0, mp->m_super->s_id); 548 if (!mp->m_unwritten_workqueue) 549 goto out_destroy_buf; 550 551 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s", 552 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 553 0, mp->m_super->s_id); 554 if (!mp->m_reclaim_workqueue) 555 goto out_destroy_unwritten; 556 557 mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s", 558 XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM), 559 0, mp->m_super->s_id); 560 if (!mp->m_blockgc_wq) 561 goto out_destroy_reclaim; 562 563 mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s", 564 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 565 1, mp->m_super->s_id); 566 if (!mp->m_inodegc_wq) 567 goto out_destroy_blockgc; 568 569 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", 570 XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id); 571 if (!mp->m_sync_workqueue) 572 goto out_destroy_inodegc; 573 574 return 0; 575 576 out_destroy_inodegc: 577 destroy_workqueue(mp->m_inodegc_wq); 578 out_destroy_blockgc: 579 destroy_workqueue(mp->m_blockgc_wq); 580 out_destroy_reclaim: 581 destroy_workqueue(mp->m_reclaim_workqueue); 582 out_destroy_unwritten: 583 destroy_workqueue(mp->m_unwritten_workqueue); 584 out_destroy_buf: 585 destroy_workqueue(mp->m_buf_workqueue); 586 out: 587 return -ENOMEM; 588 } 589 590 STATIC void 591 xfs_destroy_mount_workqueues( 592 struct xfs_mount *mp) 593 { 594 destroy_workqueue(mp->m_sync_workqueue); 595 destroy_workqueue(mp->m_blockgc_wq); 596 destroy_workqueue(mp->m_inodegc_wq); 597 destroy_workqueue(mp->m_reclaim_workqueue); 598 destroy_workqueue(mp->m_unwritten_workqueue); 599 destroy_workqueue(mp->m_buf_workqueue); 600 } 601 602 static void 603 xfs_flush_inodes_worker( 604 struct work_struct *work) 605 { 606 struct xfs_mount *mp = container_of(work, struct xfs_mount, 607 m_flush_inodes_work); 608 struct super_block *sb = mp->m_super; 609 610 if (down_read_trylock(&sb->s_umount)) { 611 sync_inodes_sb(sb); 612 up_read(&sb->s_umount); 613 } 614 } 615 616 /* 617 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK 618 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting 619 * for IO to complete so that we effectively throttle multiple callers to the 620 * rate at which IO is completing. 621 */ 622 void 623 xfs_flush_inodes( 624 struct xfs_mount *mp) 625 { 626 /* 627 * If flush_work() returns true then that means we waited for a flush 628 * which was already in progress. Don't bother running another scan. 629 */ 630 if (flush_work(&mp->m_flush_inodes_work)) 631 return; 632 633 queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work); 634 flush_work(&mp->m_flush_inodes_work); 635 } 636 637 /* Catch misguided souls that try to use this interface on XFS */ 638 STATIC struct inode * 639 xfs_fs_alloc_inode( 640 struct super_block *sb) 641 { 642 BUG(); 643 return NULL; 644 } 645 646 /* 647 * Now that the generic code is guaranteed not to be accessing 648 * the linux inode, we can inactivate and reclaim the inode. 649 */ 650 STATIC void 651 xfs_fs_destroy_inode( 652 struct inode *inode) 653 { 654 struct xfs_inode *ip = XFS_I(inode); 655 656 trace_xfs_destroy_inode(ip); 657 658 ASSERT(!rwsem_is_locked(&inode->i_rwsem)); 659 XFS_STATS_INC(ip->i_mount, vn_rele); 660 XFS_STATS_INC(ip->i_mount, vn_remove); 661 xfs_inode_mark_reclaimable(ip); 662 } 663 664 static void 665 xfs_fs_dirty_inode( 666 struct inode *inode, 667 int flags) 668 { 669 struct xfs_inode *ip = XFS_I(inode); 670 struct xfs_mount *mp = ip->i_mount; 671 struct xfs_trans *tp; 672 673 if (!(inode->i_sb->s_flags & SB_LAZYTIME)) 674 return; 675 676 /* 677 * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC) 678 * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed 679 * in flags possibly together with I_DIRTY_SYNC. 680 */ 681 if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME)) 682 return; 683 684 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp)) 685 return; 686 xfs_ilock(ip, XFS_ILOCK_EXCL); 687 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 688 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP); 689 xfs_trans_commit(tp); 690 } 691 692 /* 693 * Slab object creation initialisation for the XFS inode. 694 * This covers only the idempotent fields in the XFS inode; 695 * all other fields need to be initialised on allocation 696 * from the slab. This avoids the need to repeatedly initialise 697 * fields in the xfs inode that left in the initialise state 698 * when freeing the inode. 699 */ 700 STATIC void 701 xfs_fs_inode_init_once( 702 void *inode) 703 { 704 struct xfs_inode *ip = inode; 705 706 memset(ip, 0, sizeof(struct xfs_inode)); 707 708 /* vfs inode */ 709 inode_init_once(VFS_I(ip)); 710 711 /* xfs inode */ 712 atomic_set(&ip->i_pincount, 0); 713 spin_lock_init(&ip->i_flags_lock); 714 715 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, 716 "xfsino", ip->i_ino); 717 } 718 719 /* 720 * We do an unlocked check for XFS_IDONTCACHE here because we are already 721 * serialised against cache hits here via the inode->i_lock and igrab() in 722 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be 723 * racing with us, and it avoids needing to grab a spinlock here for every inode 724 * we drop the final reference on. 725 */ 726 STATIC int 727 xfs_fs_drop_inode( 728 struct inode *inode) 729 { 730 struct xfs_inode *ip = XFS_I(inode); 731 732 /* 733 * If this unlinked inode is in the middle of recovery, don't 734 * drop the inode just yet; log recovery will take care of 735 * that. See the comment for this inode flag. 736 */ 737 if (ip->i_flags & XFS_IRECOVERY) { 738 ASSERT(xlog_recovery_needed(ip->i_mount->m_log)); 739 return 0; 740 } 741 742 return generic_drop_inode(inode); 743 } 744 745 static void 746 xfs_mount_free( 747 struct xfs_mount *mp) 748 { 749 kfree(mp->m_rtname); 750 kfree(mp->m_logname); 751 kmem_free(mp); 752 } 753 754 STATIC int 755 xfs_fs_sync_fs( 756 struct super_block *sb, 757 int wait) 758 { 759 struct xfs_mount *mp = XFS_M(sb); 760 int error; 761 762 trace_xfs_fs_sync_fs(mp, __return_address); 763 764 /* 765 * Doing anything during the async pass would be counterproductive. 766 */ 767 if (!wait) 768 return 0; 769 770 error = xfs_log_force(mp, XFS_LOG_SYNC); 771 if (error) 772 return error; 773 774 if (laptop_mode) { 775 /* 776 * The disk must be active because we're syncing. 777 * We schedule log work now (now that the disk is 778 * active) instead of later (when it might not be). 779 */ 780 flush_delayed_work(&mp->m_log->l_work); 781 } 782 783 /* 784 * If we are called with page faults frozen out, it means we are about 785 * to freeze the transaction subsystem. Take the opportunity to shut 786 * down inodegc because once SB_FREEZE_FS is set it's too late to 787 * prevent inactivation races with freeze. The fs doesn't get called 788 * again by the freezing process until after SB_FREEZE_FS has been set, 789 * so it's now or never. Same logic applies to speculative allocation 790 * garbage collection. 791 * 792 * We don't care if this is a normal syncfs call that does this or 793 * freeze that does this - we can run this multiple times without issue 794 * and we won't race with a restart because a restart can only occur 795 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE. 796 */ 797 if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) { 798 xfs_inodegc_stop(mp); 799 xfs_blockgc_stop(mp); 800 } 801 802 return 0; 803 } 804 805 STATIC int 806 xfs_fs_statfs( 807 struct dentry *dentry, 808 struct kstatfs *statp) 809 { 810 struct xfs_mount *mp = XFS_M(dentry->d_sb); 811 xfs_sb_t *sbp = &mp->m_sb; 812 struct xfs_inode *ip = XFS_I(d_inode(dentry)); 813 uint64_t fakeinos, id; 814 uint64_t icount; 815 uint64_t ifree; 816 uint64_t fdblocks; 817 xfs_extlen_t lsize; 818 int64_t ffree; 819 820 /* 821 * Expedite background inodegc but don't wait. We do not want to block 822 * here waiting hours for a billion extent file to be truncated. 823 */ 824 xfs_inodegc_push(mp); 825 826 statp->f_type = XFS_SUPER_MAGIC; 827 statp->f_namelen = MAXNAMELEN - 1; 828 829 id = huge_encode_dev(mp->m_ddev_targp->bt_dev); 830 statp->f_fsid = u64_to_fsid(id); 831 832 icount = percpu_counter_sum(&mp->m_icount); 833 ifree = percpu_counter_sum(&mp->m_ifree); 834 fdblocks = percpu_counter_sum(&mp->m_fdblocks); 835 836 spin_lock(&mp->m_sb_lock); 837 statp->f_bsize = sbp->sb_blocksize; 838 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; 839 statp->f_blocks = sbp->sb_dblocks - lsize; 840 spin_unlock(&mp->m_sb_lock); 841 842 /* make sure statp->f_bfree does not underflow */ 843 statp->f_bfree = max_t(int64_t, 0, 844 fdblocks - xfs_fdblocks_unavailable(mp)); 845 statp->f_bavail = statp->f_bfree; 846 847 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree); 848 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER); 849 if (M_IGEO(mp)->maxicount) 850 statp->f_files = min_t(typeof(statp->f_files), 851 statp->f_files, 852 M_IGEO(mp)->maxicount); 853 854 /* If sb_icount overshot maxicount, report actual allocation */ 855 statp->f_files = max_t(typeof(statp->f_files), 856 statp->f_files, 857 sbp->sb_icount); 858 859 /* make sure statp->f_ffree does not underflow */ 860 ffree = statp->f_files - (icount - ifree); 861 statp->f_ffree = max_t(int64_t, ffree, 0); 862 863 864 if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) && 865 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) == 866 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD)) 867 xfs_qm_statvfs(ip, statp); 868 869 if (XFS_IS_REALTIME_MOUNT(mp) && 870 (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) { 871 s64 freertx; 872 873 statp->f_blocks = sbp->sb_rblocks; 874 freertx = percpu_counter_sum_positive(&mp->m_frextents); 875 statp->f_bavail = statp->f_bfree = freertx * sbp->sb_rextsize; 876 } 877 878 return 0; 879 } 880 881 STATIC void 882 xfs_save_resvblks(struct xfs_mount *mp) 883 { 884 uint64_t resblks = 0; 885 886 mp->m_resblks_save = mp->m_resblks; 887 xfs_reserve_blocks(mp, &resblks, NULL); 888 } 889 890 STATIC void 891 xfs_restore_resvblks(struct xfs_mount *mp) 892 { 893 uint64_t resblks; 894 895 if (mp->m_resblks_save) { 896 resblks = mp->m_resblks_save; 897 mp->m_resblks_save = 0; 898 } else 899 resblks = xfs_default_resblks(mp); 900 901 xfs_reserve_blocks(mp, &resblks, NULL); 902 } 903 904 /* 905 * Second stage of a freeze. The data is already frozen so we only 906 * need to take care of the metadata. Once that's done sync the superblock 907 * to the log to dirty it in case of a crash while frozen. This ensures that we 908 * will recover the unlinked inode lists on the next mount. 909 */ 910 STATIC int 911 xfs_fs_freeze( 912 struct super_block *sb) 913 { 914 struct xfs_mount *mp = XFS_M(sb); 915 unsigned int flags; 916 int ret; 917 918 /* 919 * The filesystem is now frozen far enough that memory reclaim 920 * cannot safely operate on the filesystem. Hence we need to 921 * set a GFP_NOFS context here to avoid recursion deadlocks. 922 */ 923 flags = memalloc_nofs_save(); 924 xfs_save_resvblks(mp); 925 ret = xfs_log_quiesce(mp); 926 memalloc_nofs_restore(flags); 927 928 /* 929 * For read-write filesystems, we need to restart the inodegc on error 930 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not 931 * going to be run to restart it now. We are at SB_FREEZE_FS level 932 * here, so we can restart safely without racing with a stop in 933 * xfs_fs_sync_fs(). 934 */ 935 if (ret && !xfs_is_readonly(mp)) { 936 xfs_blockgc_start(mp); 937 xfs_inodegc_start(mp); 938 } 939 940 return ret; 941 } 942 943 STATIC int 944 xfs_fs_unfreeze( 945 struct super_block *sb) 946 { 947 struct xfs_mount *mp = XFS_M(sb); 948 949 xfs_restore_resvblks(mp); 950 xfs_log_work_queue(mp); 951 952 /* 953 * Don't reactivate the inodegc worker on a readonly filesystem because 954 * inodes are sent directly to reclaim. Don't reactivate the blockgc 955 * worker because there are no speculative preallocations on a readonly 956 * filesystem. 957 */ 958 if (!xfs_is_readonly(mp)) { 959 xfs_blockgc_start(mp); 960 xfs_inodegc_start(mp); 961 } 962 963 return 0; 964 } 965 966 /* 967 * This function fills in xfs_mount_t fields based on mount args. 968 * Note: the superblock _has_ now been read in. 969 */ 970 STATIC int 971 xfs_finish_flags( 972 struct xfs_mount *mp) 973 { 974 /* Fail a mount where the logbuf is smaller than the log stripe */ 975 if (xfs_has_logv2(mp)) { 976 if (mp->m_logbsize <= 0 && 977 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { 978 mp->m_logbsize = mp->m_sb.sb_logsunit; 979 } else if (mp->m_logbsize > 0 && 980 mp->m_logbsize < mp->m_sb.sb_logsunit) { 981 xfs_warn(mp, 982 "logbuf size must be greater than or equal to log stripe size"); 983 return -EINVAL; 984 } 985 } else { 986 /* Fail a mount if the logbuf is larger than 32K */ 987 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { 988 xfs_warn(mp, 989 "logbuf size for version 1 logs must be 16K or 32K"); 990 return -EINVAL; 991 } 992 } 993 994 /* 995 * V5 filesystems always use attr2 format for attributes. 996 */ 997 if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) { 998 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. " 999 "attr2 is always enabled for V5 filesystems."); 1000 return -EINVAL; 1001 } 1002 1003 /* 1004 * prohibit r/w mounts of read-only filesystems 1005 */ 1006 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) { 1007 xfs_warn(mp, 1008 "cannot mount a read-only filesystem as read-write"); 1009 return -EROFS; 1010 } 1011 1012 if ((mp->m_qflags & XFS_GQUOTA_ACCT) && 1013 (mp->m_qflags & XFS_PQUOTA_ACCT) && 1014 !xfs_has_pquotino(mp)) { 1015 xfs_warn(mp, 1016 "Super block does not support project and group quota together"); 1017 return -EINVAL; 1018 } 1019 1020 return 0; 1021 } 1022 1023 static int 1024 xfs_init_percpu_counters( 1025 struct xfs_mount *mp) 1026 { 1027 int error; 1028 1029 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL); 1030 if (error) 1031 return -ENOMEM; 1032 1033 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL); 1034 if (error) 1035 goto free_icount; 1036 1037 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL); 1038 if (error) 1039 goto free_ifree; 1040 1041 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL); 1042 if (error) 1043 goto free_fdblocks; 1044 1045 error = percpu_counter_init(&mp->m_frextents, 0, GFP_KERNEL); 1046 if (error) 1047 goto free_delalloc; 1048 1049 return 0; 1050 1051 free_delalloc: 1052 percpu_counter_destroy(&mp->m_delalloc_blks); 1053 free_fdblocks: 1054 percpu_counter_destroy(&mp->m_fdblocks); 1055 free_ifree: 1056 percpu_counter_destroy(&mp->m_ifree); 1057 free_icount: 1058 percpu_counter_destroy(&mp->m_icount); 1059 return -ENOMEM; 1060 } 1061 1062 void 1063 xfs_reinit_percpu_counters( 1064 struct xfs_mount *mp) 1065 { 1066 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount); 1067 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree); 1068 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks); 1069 percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents); 1070 } 1071 1072 static void 1073 xfs_destroy_percpu_counters( 1074 struct xfs_mount *mp) 1075 { 1076 percpu_counter_destroy(&mp->m_icount); 1077 percpu_counter_destroy(&mp->m_ifree); 1078 percpu_counter_destroy(&mp->m_fdblocks); 1079 ASSERT(xfs_is_shutdown(mp) || 1080 percpu_counter_sum(&mp->m_delalloc_blks) == 0); 1081 percpu_counter_destroy(&mp->m_delalloc_blks); 1082 percpu_counter_destroy(&mp->m_frextents); 1083 } 1084 1085 static int 1086 xfs_inodegc_init_percpu( 1087 struct xfs_mount *mp) 1088 { 1089 struct xfs_inodegc *gc; 1090 int cpu; 1091 1092 mp->m_inodegc = alloc_percpu(struct xfs_inodegc); 1093 if (!mp->m_inodegc) 1094 return -ENOMEM; 1095 1096 for_each_possible_cpu(cpu) { 1097 gc = per_cpu_ptr(mp->m_inodegc, cpu); 1098 init_llist_head(&gc->list); 1099 gc->items = 0; 1100 INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker); 1101 } 1102 return 0; 1103 } 1104 1105 static void 1106 xfs_inodegc_free_percpu( 1107 struct xfs_mount *mp) 1108 { 1109 if (!mp->m_inodegc) 1110 return; 1111 free_percpu(mp->m_inodegc); 1112 } 1113 1114 static void 1115 xfs_fs_put_super( 1116 struct super_block *sb) 1117 { 1118 struct xfs_mount *mp = XFS_M(sb); 1119 1120 /* if ->fill_super failed, we have no mount to tear down */ 1121 if (!sb->s_fs_info) 1122 return; 1123 1124 xfs_notice(mp, "Unmounting Filesystem %pU", &mp->m_sb.sb_uuid); 1125 xfs_filestream_unmount(mp); 1126 xfs_unmountfs(mp); 1127 1128 xfs_freesb(mp); 1129 free_percpu(mp->m_stats.xs_stats); 1130 xfs_mount_list_del(mp); 1131 xfs_inodegc_free_percpu(mp); 1132 xfs_destroy_percpu_counters(mp); 1133 xfs_destroy_mount_workqueues(mp); 1134 xfs_close_devices(mp); 1135 1136 sb->s_fs_info = NULL; 1137 xfs_mount_free(mp); 1138 } 1139 1140 static long 1141 xfs_fs_nr_cached_objects( 1142 struct super_block *sb, 1143 struct shrink_control *sc) 1144 { 1145 /* Paranoia: catch incorrect calls during mount setup or teardown */ 1146 if (WARN_ON_ONCE(!sb->s_fs_info)) 1147 return 0; 1148 return xfs_reclaim_inodes_count(XFS_M(sb)); 1149 } 1150 1151 static long 1152 xfs_fs_free_cached_objects( 1153 struct super_block *sb, 1154 struct shrink_control *sc) 1155 { 1156 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan); 1157 } 1158 1159 static const struct super_operations xfs_super_operations = { 1160 .alloc_inode = xfs_fs_alloc_inode, 1161 .destroy_inode = xfs_fs_destroy_inode, 1162 .dirty_inode = xfs_fs_dirty_inode, 1163 .drop_inode = xfs_fs_drop_inode, 1164 .put_super = xfs_fs_put_super, 1165 .sync_fs = xfs_fs_sync_fs, 1166 .freeze_fs = xfs_fs_freeze, 1167 .unfreeze_fs = xfs_fs_unfreeze, 1168 .statfs = xfs_fs_statfs, 1169 .show_options = xfs_fs_show_options, 1170 .nr_cached_objects = xfs_fs_nr_cached_objects, 1171 .free_cached_objects = xfs_fs_free_cached_objects, 1172 }; 1173 1174 static int 1175 suffix_kstrtoint( 1176 const char *s, 1177 unsigned int base, 1178 int *res) 1179 { 1180 int last, shift_left_factor = 0, _res; 1181 char *value; 1182 int ret = 0; 1183 1184 value = kstrdup(s, GFP_KERNEL); 1185 if (!value) 1186 return -ENOMEM; 1187 1188 last = strlen(value) - 1; 1189 if (value[last] == 'K' || value[last] == 'k') { 1190 shift_left_factor = 10; 1191 value[last] = '\0'; 1192 } 1193 if (value[last] == 'M' || value[last] == 'm') { 1194 shift_left_factor = 20; 1195 value[last] = '\0'; 1196 } 1197 if (value[last] == 'G' || value[last] == 'g') { 1198 shift_left_factor = 30; 1199 value[last] = '\0'; 1200 } 1201 1202 if (kstrtoint(value, base, &_res)) 1203 ret = -EINVAL; 1204 kfree(value); 1205 *res = _res << shift_left_factor; 1206 return ret; 1207 } 1208 1209 static inline void 1210 xfs_fs_warn_deprecated( 1211 struct fs_context *fc, 1212 struct fs_parameter *param, 1213 uint64_t flag, 1214 bool value) 1215 { 1216 /* Don't print the warning if reconfiguring and current mount point 1217 * already had the flag set 1218 */ 1219 if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) && 1220 !!(XFS_M(fc->root->d_sb)->m_features & flag) == value) 1221 return; 1222 xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key); 1223 } 1224 1225 /* 1226 * Set mount state from a mount option. 1227 * 1228 * NOTE: mp->m_super is NULL here! 1229 */ 1230 static int 1231 xfs_fs_parse_param( 1232 struct fs_context *fc, 1233 struct fs_parameter *param) 1234 { 1235 struct xfs_mount *parsing_mp = fc->s_fs_info; 1236 struct fs_parse_result result; 1237 int size = 0; 1238 int opt; 1239 1240 opt = fs_parse(fc, xfs_fs_parameters, param, &result); 1241 if (opt < 0) 1242 return opt; 1243 1244 switch (opt) { 1245 case Opt_logbufs: 1246 parsing_mp->m_logbufs = result.uint_32; 1247 return 0; 1248 case Opt_logbsize: 1249 if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize)) 1250 return -EINVAL; 1251 return 0; 1252 case Opt_logdev: 1253 kfree(parsing_mp->m_logname); 1254 parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL); 1255 if (!parsing_mp->m_logname) 1256 return -ENOMEM; 1257 return 0; 1258 case Opt_rtdev: 1259 kfree(parsing_mp->m_rtname); 1260 parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL); 1261 if (!parsing_mp->m_rtname) 1262 return -ENOMEM; 1263 return 0; 1264 case Opt_allocsize: 1265 if (suffix_kstrtoint(param->string, 10, &size)) 1266 return -EINVAL; 1267 parsing_mp->m_allocsize_log = ffs(size) - 1; 1268 parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE; 1269 return 0; 1270 case Opt_grpid: 1271 case Opt_bsdgroups: 1272 parsing_mp->m_features |= XFS_FEAT_GRPID; 1273 return 0; 1274 case Opt_nogrpid: 1275 case Opt_sysvgroups: 1276 parsing_mp->m_features &= ~XFS_FEAT_GRPID; 1277 return 0; 1278 case Opt_wsync: 1279 parsing_mp->m_features |= XFS_FEAT_WSYNC; 1280 return 0; 1281 case Opt_norecovery: 1282 parsing_mp->m_features |= XFS_FEAT_NORECOVERY; 1283 return 0; 1284 case Opt_noalign: 1285 parsing_mp->m_features |= XFS_FEAT_NOALIGN; 1286 return 0; 1287 case Opt_swalloc: 1288 parsing_mp->m_features |= XFS_FEAT_SWALLOC; 1289 return 0; 1290 case Opt_sunit: 1291 parsing_mp->m_dalign = result.uint_32; 1292 return 0; 1293 case Opt_swidth: 1294 parsing_mp->m_swidth = result.uint_32; 1295 return 0; 1296 case Opt_inode32: 1297 parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS; 1298 return 0; 1299 case Opt_inode64: 1300 parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS; 1301 return 0; 1302 case Opt_nouuid: 1303 parsing_mp->m_features |= XFS_FEAT_NOUUID; 1304 return 0; 1305 case Opt_largeio: 1306 parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE; 1307 return 0; 1308 case Opt_nolargeio: 1309 parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE; 1310 return 0; 1311 case Opt_filestreams: 1312 parsing_mp->m_features |= XFS_FEAT_FILESTREAMS; 1313 return 0; 1314 case Opt_noquota: 1315 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; 1316 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; 1317 return 0; 1318 case Opt_quota: 1319 case Opt_uquota: 1320 case Opt_usrquota: 1321 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD); 1322 return 0; 1323 case Opt_qnoenforce: 1324 case Opt_uqnoenforce: 1325 parsing_mp->m_qflags |= XFS_UQUOTA_ACCT; 1326 parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD; 1327 return 0; 1328 case Opt_pquota: 1329 case Opt_prjquota: 1330 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD); 1331 return 0; 1332 case Opt_pqnoenforce: 1333 parsing_mp->m_qflags |= XFS_PQUOTA_ACCT; 1334 parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD; 1335 return 0; 1336 case Opt_gquota: 1337 case Opt_grpquota: 1338 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD); 1339 return 0; 1340 case Opt_gqnoenforce: 1341 parsing_mp->m_qflags |= XFS_GQUOTA_ACCT; 1342 parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD; 1343 return 0; 1344 case Opt_discard: 1345 parsing_mp->m_features |= XFS_FEAT_DISCARD; 1346 return 0; 1347 case Opt_nodiscard: 1348 parsing_mp->m_features &= ~XFS_FEAT_DISCARD; 1349 return 0; 1350 #ifdef CONFIG_FS_DAX 1351 case Opt_dax: 1352 xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS); 1353 return 0; 1354 case Opt_dax_enum: 1355 xfs_mount_set_dax_mode(parsing_mp, result.uint_32); 1356 return 0; 1357 #endif 1358 /* Following mount options will be removed in September 2025 */ 1359 case Opt_ikeep: 1360 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true); 1361 parsing_mp->m_features |= XFS_FEAT_IKEEP; 1362 return 0; 1363 case Opt_noikeep: 1364 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false); 1365 parsing_mp->m_features &= ~XFS_FEAT_IKEEP; 1366 return 0; 1367 case Opt_attr2: 1368 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true); 1369 parsing_mp->m_features |= XFS_FEAT_ATTR2; 1370 return 0; 1371 case Opt_noattr2: 1372 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true); 1373 parsing_mp->m_features |= XFS_FEAT_NOATTR2; 1374 return 0; 1375 default: 1376 xfs_warn(parsing_mp, "unknown mount option [%s].", param->key); 1377 return -EINVAL; 1378 } 1379 1380 return 0; 1381 } 1382 1383 static int 1384 xfs_fs_validate_params( 1385 struct xfs_mount *mp) 1386 { 1387 /* No recovery flag requires a read-only mount */ 1388 if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) { 1389 xfs_warn(mp, "no-recovery mounts must be read-only."); 1390 return -EINVAL; 1391 } 1392 1393 /* 1394 * We have not read the superblock at this point, so only the attr2 1395 * mount option can set the attr2 feature by this stage. 1396 */ 1397 if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) { 1398 xfs_warn(mp, "attr2 and noattr2 cannot both be specified."); 1399 return -EINVAL; 1400 } 1401 1402 1403 if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) { 1404 xfs_warn(mp, 1405 "sunit and swidth options incompatible with the noalign option"); 1406 return -EINVAL; 1407 } 1408 1409 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) { 1410 xfs_warn(mp, "quota support not available in this kernel."); 1411 return -EINVAL; 1412 } 1413 1414 if ((mp->m_dalign && !mp->m_swidth) || 1415 (!mp->m_dalign && mp->m_swidth)) { 1416 xfs_warn(mp, "sunit and swidth must be specified together"); 1417 return -EINVAL; 1418 } 1419 1420 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) { 1421 xfs_warn(mp, 1422 "stripe width (%d) must be a multiple of the stripe unit (%d)", 1423 mp->m_swidth, mp->m_dalign); 1424 return -EINVAL; 1425 } 1426 1427 if (mp->m_logbufs != -1 && 1428 mp->m_logbufs != 0 && 1429 (mp->m_logbufs < XLOG_MIN_ICLOGS || 1430 mp->m_logbufs > XLOG_MAX_ICLOGS)) { 1431 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", 1432 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); 1433 return -EINVAL; 1434 } 1435 1436 if (mp->m_logbsize != -1 && 1437 mp->m_logbsize != 0 && 1438 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || 1439 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || 1440 !is_power_of_2(mp->m_logbsize))) { 1441 xfs_warn(mp, 1442 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", 1443 mp->m_logbsize); 1444 return -EINVAL; 1445 } 1446 1447 if (xfs_has_allocsize(mp) && 1448 (mp->m_allocsize_log > XFS_MAX_IO_LOG || 1449 mp->m_allocsize_log < XFS_MIN_IO_LOG)) { 1450 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", 1451 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG); 1452 return -EINVAL; 1453 } 1454 1455 return 0; 1456 } 1457 1458 static int 1459 xfs_fs_fill_super( 1460 struct super_block *sb, 1461 struct fs_context *fc) 1462 { 1463 struct xfs_mount *mp = sb->s_fs_info; 1464 struct inode *root; 1465 int flags = 0, error; 1466 1467 mp->m_super = sb; 1468 1469 error = xfs_fs_validate_params(mp); 1470 if (error) 1471 goto out_free_names; 1472 1473 sb_min_blocksize(sb, BBSIZE); 1474 sb->s_xattr = xfs_xattr_handlers; 1475 sb->s_export_op = &xfs_export_operations; 1476 #ifdef CONFIG_XFS_QUOTA 1477 sb->s_qcop = &xfs_quotactl_operations; 1478 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; 1479 #endif 1480 sb->s_op = &xfs_super_operations; 1481 1482 /* 1483 * Delay mount work if the debug hook is set. This is debug 1484 * instrumention to coordinate simulation of xfs mount failures with 1485 * VFS superblock operations 1486 */ 1487 if (xfs_globals.mount_delay) { 1488 xfs_notice(mp, "Delaying mount for %d seconds.", 1489 xfs_globals.mount_delay); 1490 msleep(xfs_globals.mount_delay * 1000); 1491 } 1492 1493 if (fc->sb_flags & SB_SILENT) 1494 flags |= XFS_MFSI_QUIET; 1495 1496 error = xfs_open_devices(mp); 1497 if (error) 1498 goto out_free_names; 1499 1500 error = xfs_init_mount_workqueues(mp); 1501 if (error) 1502 goto out_close_devices; 1503 1504 error = xfs_init_percpu_counters(mp); 1505 if (error) 1506 goto out_destroy_workqueues; 1507 1508 error = xfs_inodegc_init_percpu(mp); 1509 if (error) 1510 goto out_destroy_counters; 1511 1512 /* 1513 * All percpu data structures requiring cleanup when a cpu goes offline 1514 * must be allocated before adding this @mp to the cpu-dead handler's 1515 * mount list. 1516 */ 1517 xfs_mount_list_add(mp); 1518 1519 /* Allocate stats memory before we do operations that might use it */ 1520 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats); 1521 if (!mp->m_stats.xs_stats) { 1522 error = -ENOMEM; 1523 goto out_destroy_inodegc; 1524 } 1525 1526 error = xfs_readsb(mp, flags); 1527 if (error) 1528 goto out_free_stats; 1529 1530 error = xfs_finish_flags(mp); 1531 if (error) 1532 goto out_free_sb; 1533 1534 error = xfs_setup_devices(mp); 1535 if (error) 1536 goto out_free_sb; 1537 1538 /* V4 support is undergoing deprecation. */ 1539 if (!xfs_has_crc(mp)) { 1540 #ifdef CONFIG_XFS_SUPPORT_V4 1541 xfs_warn_once(mp, 1542 "Deprecated V4 format (crc=0) will not be supported after September 2030."); 1543 #else 1544 xfs_warn(mp, 1545 "Deprecated V4 format (crc=0) not supported by kernel."); 1546 error = -EINVAL; 1547 goto out_free_sb; 1548 #endif 1549 } 1550 1551 /* Filesystem claims it needs repair, so refuse the mount. */ 1552 if (xfs_has_needsrepair(mp)) { 1553 xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair."); 1554 error = -EFSCORRUPTED; 1555 goto out_free_sb; 1556 } 1557 1558 /* 1559 * Don't touch the filesystem if a user tool thinks it owns the primary 1560 * superblock. mkfs doesn't clear the flag from secondary supers, so 1561 * we don't check them at all. 1562 */ 1563 if (mp->m_sb.sb_inprogress) { 1564 xfs_warn(mp, "Offline file system operation in progress!"); 1565 error = -EFSCORRUPTED; 1566 goto out_free_sb; 1567 } 1568 1569 /* 1570 * Until this is fixed only page-sized or smaller data blocks work. 1571 */ 1572 if (mp->m_sb.sb_blocksize > PAGE_SIZE) { 1573 xfs_warn(mp, 1574 "File system with blocksize %d bytes. " 1575 "Only pagesize (%ld) or less will currently work.", 1576 mp->m_sb.sb_blocksize, PAGE_SIZE); 1577 error = -ENOSYS; 1578 goto out_free_sb; 1579 } 1580 1581 /* Ensure this filesystem fits in the page cache limits */ 1582 if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) || 1583 xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) { 1584 xfs_warn(mp, 1585 "file system too large to be mounted on this system."); 1586 error = -EFBIG; 1587 goto out_free_sb; 1588 } 1589 1590 /* 1591 * XFS block mappings use 54 bits to store the logical block offset. 1592 * This should suffice to handle the maximum file size that the VFS 1593 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT 1594 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes 1595 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON 1596 * to check this assertion. 1597 * 1598 * Avoid integer overflow by comparing the maximum bmbt offset to the 1599 * maximum pagecache offset in units of fs blocks. 1600 */ 1601 if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) { 1602 xfs_warn(mp, 1603 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!", 1604 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE), 1605 XFS_MAX_FILEOFF); 1606 error = -EINVAL; 1607 goto out_free_sb; 1608 } 1609 1610 error = xfs_filestream_mount(mp); 1611 if (error) 1612 goto out_free_sb; 1613 1614 /* 1615 * we must configure the block size in the superblock before we run the 1616 * full mount process as the mount process can lookup and cache inodes. 1617 */ 1618 sb->s_magic = XFS_SUPER_MAGIC; 1619 sb->s_blocksize = mp->m_sb.sb_blocksize; 1620 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; 1621 sb->s_maxbytes = MAX_LFS_FILESIZE; 1622 sb->s_max_links = XFS_MAXLINK; 1623 sb->s_time_gran = 1; 1624 if (xfs_has_bigtime(mp)) { 1625 sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN); 1626 sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX); 1627 } else { 1628 sb->s_time_min = XFS_LEGACY_TIME_MIN; 1629 sb->s_time_max = XFS_LEGACY_TIME_MAX; 1630 } 1631 trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max); 1632 sb->s_iflags |= SB_I_CGROUPWB; 1633 1634 set_posix_acl_flag(sb); 1635 1636 /* version 5 superblocks support inode version counters. */ 1637 if (xfs_has_crc(mp)) 1638 sb->s_flags |= SB_I_VERSION; 1639 1640 if (xfs_has_dax_always(mp)) { 1641 error = xfs_setup_dax_always(mp); 1642 if (error) 1643 goto out_filestream_unmount; 1644 } 1645 1646 if (xfs_has_discard(mp) && !bdev_max_discard_sectors(sb->s_bdev)) { 1647 xfs_warn(mp, 1648 "mounting with \"discard\" option, but the device does not support discard"); 1649 mp->m_features &= ~XFS_FEAT_DISCARD; 1650 } 1651 1652 if (xfs_has_reflink(mp)) { 1653 if (mp->m_sb.sb_rblocks) { 1654 xfs_alert(mp, 1655 "reflink not compatible with realtime device!"); 1656 error = -EINVAL; 1657 goto out_filestream_unmount; 1658 } 1659 1660 if (xfs_globals.always_cow) { 1661 xfs_info(mp, "using DEBUG-only always_cow mode."); 1662 mp->m_always_cow = true; 1663 } 1664 } 1665 1666 if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) { 1667 xfs_alert(mp, 1668 "reverse mapping btree not compatible with realtime device!"); 1669 error = -EINVAL; 1670 goto out_filestream_unmount; 1671 } 1672 1673 if (xfs_has_large_extent_counts(mp)) 1674 xfs_warn(mp, 1675 "EXPERIMENTAL Large extent counts feature in use. Use at your own risk!"); 1676 1677 error = xfs_mountfs(mp); 1678 if (error) 1679 goto out_filestream_unmount; 1680 1681 root = igrab(VFS_I(mp->m_rootip)); 1682 if (!root) { 1683 error = -ENOENT; 1684 goto out_unmount; 1685 } 1686 sb->s_root = d_make_root(root); 1687 if (!sb->s_root) { 1688 error = -ENOMEM; 1689 goto out_unmount; 1690 } 1691 1692 return 0; 1693 1694 out_filestream_unmount: 1695 xfs_filestream_unmount(mp); 1696 out_free_sb: 1697 xfs_freesb(mp); 1698 out_free_stats: 1699 free_percpu(mp->m_stats.xs_stats); 1700 out_destroy_inodegc: 1701 xfs_mount_list_del(mp); 1702 xfs_inodegc_free_percpu(mp); 1703 out_destroy_counters: 1704 xfs_destroy_percpu_counters(mp); 1705 out_destroy_workqueues: 1706 xfs_destroy_mount_workqueues(mp); 1707 out_close_devices: 1708 xfs_close_devices(mp); 1709 out_free_names: 1710 sb->s_fs_info = NULL; 1711 xfs_mount_free(mp); 1712 return error; 1713 1714 out_unmount: 1715 xfs_filestream_unmount(mp); 1716 xfs_unmountfs(mp); 1717 goto out_free_sb; 1718 } 1719 1720 static int 1721 xfs_fs_get_tree( 1722 struct fs_context *fc) 1723 { 1724 return get_tree_bdev(fc, xfs_fs_fill_super); 1725 } 1726 1727 static int 1728 xfs_remount_rw( 1729 struct xfs_mount *mp) 1730 { 1731 struct xfs_sb *sbp = &mp->m_sb; 1732 int error; 1733 1734 if (xfs_has_norecovery(mp)) { 1735 xfs_warn(mp, 1736 "ro->rw transition prohibited on norecovery mount"); 1737 return -EINVAL; 1738 } 1739 1740 if (xfs_sb_is_v5(sbp) && 1741 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) { 1742 xfs_warn(mp, 1743 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem", 1744 (sbp->sb_features_ro_compat & 1745 XFS_SB_FEAT_RO_COMPAT_UNKNOWN)); 1746 return -EINVAL; 1747 } 1748 1749 clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 1750 1751 /* 1752 * If this is the first remount to writeable state we might have some 1753 * superblock changes to update. 1754 */ 1755 if (mp->m_update_sb) { 1756 error = xfs_sync_sb(mp, false); 1757 if (error) { 1758 xfs_warn(mp, "failed to write sb changes"); 1759 return error; 1760 } 1761 mp->m_update_sb = false; 1762 } 1763 1764 /* 1765 * Fill out the reserve pool if it is empty. Use the stashed value if 1766 * it is non-zero, otherwise go with the default. 1767 */ 1768 xfs_restore_resvblks(mp); 1769 xfs_log_work_queue(mp); 1770 xfs_blockgc_start(mp); 1771 1772 /* Create the per-AG metadata reservation pool .*/ 1773 error = xfs_fs_reserve_ag_blocks(mp); 1774 if (error && error != -ENOSPC) 1775 return error; 1776 1777 /* Re-enable the background inode inactivation worker. */ 1778 xfs_inodegc_start(mp); 1779 1780 return 0; 1781 } 1782 1783 static int 1784 xfs_remount_ro( 1785 struct xfs_mount *mp) 1786 { 1787 struct xfs_icwalk icw = { 1788 .icw_flags = XFS_ICWALK_FLAG_SYNC, 1789 }; 1790 int error; 1791 1792 /* Flush all the dirty data to disk. */ 1793 error = sync_filesystem(mp->m_super); 1794 if (error) 1795 return error; 1796 1797 /* 1798 * Cancel background eofb scanning so it cannot race with the final 1799 * log force+buftarg wait and deadlock the remount. 1800 */ 1801 xfs_blockgc_stop(mp); 1802 1803 /* 1804 * Clear out all remaining COW staging extents and speculative post-EOF 1805 * preallocations so that we don't leave inodes requiring inactivation 1806 * cleanups during reclaim on a read-only mount. We must process every 1807 * cached inode, so this requires a synchronous cache scan. 1808 */ 1809 error = xfs_blockgc_free_space(mp, &icw); 1810 if (error) { 1811 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1812 return error; 1813 } 1814 1815 /* 1816 * Stop the inodegc background worker. xfs_fs_reconfigure already 1817 * flushed all pending inodegc work when it sync'd the filesystem. 1818 * The VFS holds s_umount, so we know that inodes cannot enter 1819 * xfs_fs_destroy_inode during a remount operation. In readonly mode 1820 * we send inodes straight to reclaim, so no inodes will be queued. 1821 */ 1822 xfs_inodegc_stop(mp); 1823 1824 /* Free the per-AG metadata reservation pool. */ 1825 error = xfs_fs_unreserve_ag_blocks(mp); 1826 if (error) { 1827 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1828 return error; 1829 } 1830 1831 /* 1832 * Before we sync the metadata, we need to free up the reserve block 1833 * pool so that the used block count in the superblock on disk is 1834 * correct at the end of the remount. Stash the current* reserve pool 1835 * size so that if we get remounted rw, we can return it to the same 1836 * size. 1837 */ 1838 xfs_save_resvblks(mp); 1839 1840 xfs_log_clean(mp); 1841 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 1842 1843 return 0; 1844 } 1845 1846 /* 1847 * Logically we would return an error here to prevent users from believing 1848 * they might have changed mount options using remount which can't be changed. 1849 * 1850 * But unfortunately mount(8) adds all options from mtab and fstab to the mount 1851 * arguments in some cases so we can't blindly reject options, but have to 1852 * check for each specified option if it actually differs from the currently 1853 * set option and only reject it if that's the case. 1854 * 1855 * Until that is implemented we return success for every remount request, and 1856 * silently ignore all options that we can't actually change. 1857 */ 1858 static int 1859 xfs_fs_reconfigure( 1860 struct fs_context *fc) 1861 { 1862 struct xfs_mount *mp = XFS_M(fc->root->d_sb); 1863 struct xfs_mount *new_mp = fc->s_fs_info; 1864 int flags = fc->sb_flags; 1865 int error; 1866 1867 /* version 5 superblocks always support version counters. */ 1868 if (xfs_has_crc(mp)) 1869 fc->sb_flags |= SB_I_VERSION; 1870 1871 error = xfs_fs_validate_params(new_mp); 1872 if (error) 1873 return error; 1874 1875 /* inode32 -> inode64 */ 1876 if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) { 1877 mp->m_features &= ~XFS_FEAT_SMALL_INUMS; 1878 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount); 1879 } 1880 1881 /* inode64 -> inode32 */ 1882 if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) { 1883 mp->m_features |= XFS_FEAT_SMALL_INUMS; 1884 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount); 1885 } 1886 1887 /* ro -> rw */ 1888 if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) { 1889 error = xfs_remount_rw(mp); 1890 if (error) 1891 return error; 1892 } 1893 1894 /* rw -> ro */ 1895 if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) { 1896 error = xfs_remount_ro(mp); 1897 if (error) 1898 return error; 1899 } 1900 1901 return 0; 1902 } 1903 1904 static void xfs_fs_free( 1905 struct fs_context *fc) 1906 { 1907 struct xfs_mount *mp = fc->s_fs_info; 1908 1909 /* 1910 * mp is stored in the fs_context when it is initialized. 1911 * mp is transferred to the superblock on a successful mount, 1912 * but if an error occurs before the transfer we have to free 1913 * it here. 1914 */ 1915 if (mp) 1916 xfs_mount_free(mp); 1917 } 1918 1919 static const struct fs_context_operations xfs_context_ops = { 1920 .parse_param = xfs_fs_parse_param, 1921 .get_tree = xfs_fs_get_tree, 1922 .reconfigure = xfs_fs_reconfigure, 1923 .free = xfs_fs_free, 1924 }; 1925 1926 static int xfs_init_fs_context( 1927 struct fs_context *fc) 1928 { 1929 struct xfs_mount *mp; 1930 1931 mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO); 1932 if (!mp) 1933 return -ENOMEM; 1934 1935 spin_lock_init(&mp->m_sb_lock); 1936 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); 1937 spin_lock_init(&mp->m_perag_lock); 1938 mutex_init(&mp->m_growlock); 1939 INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker); 1940 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); 1941 mp->m_kobj.kobject.kset = xfs_kset; 1942 /* 1943 * We don't create the finobt per-ag space reservation until after log 1944 * recovery, so we must set this to true so that an ifree transaction 1945 * started during log recovery will not depend on space reservations 1946 * for finobt expansion. 1947 */ 1948 mp->m_finobt_nores = true; 1949 1950 /* 1951 * These can be overridden by the mount option parsing. 1952 */ 1953 mp->m_logbufs = -1; 1954 mp->m_logbsize = -1; 1955 mp->m_allocsize_log = 16; /* 64k */ 1956 1957 /* 1958 * Copy binary VFS mount flags we are interested in. 1959 */ 1960 if (fc->sb_flags & SB_RDONLY) 1961 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 1962 if (fc->sb_flags & SB_DIRSYNC) 1963 mp->m_features |= XFS_FEAT_DIRSYNC; 1964 if (fc->sb_flags & SB_SYNCHRONOUS) 1965 mp->m_features |= XFS_FEAT_WSYNC; 1966 1967 fc->s_fs_info = mp; 1968 fc->ops = &xfs_context_ops; 1969 1970 return 0; 1971 } 1972 1973 static struct file_system_type xfs_fs_type = { 1974 .owner = THIS_MODULE, 1975 .name = "xfs", 1976 .init_fs_context = xfs_init_fs_context, 1977 .parameters = xfs_fs_parameters, 1978 .kill_sb = kill_block_super, 1979 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, 1980 }; 1981 MODULE_ALIAS_FS("xfs"); 1982 1983 STATIC int __init 1984 xfs_init_caches(void) 1985 { 1986 int error; 1987 1988 xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0, 1989 SLAB_HWCACHE_ALIGN | 1990 SLAB_RECLAIM_ACCOUNT | 1991 SLAB_MEM_SPREAD, 1992 NULL); 1993 if (!xfs_buf_cache) 1994 goto out; 1995 1996 xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket", 1997 sizeof(struct xlog_ticket), 1998 0, 0, NULL); 1999 if (!xfs_log_ticket_cache) 2000 goto out_destroy_buf_cache; 2001 2002 error = xfs_btree_init_cur_caches(); 2003 if (error) 2004 goto out_destroy_log_ticket_cache; 2005 2006 error = xfs_defer_init_item_caches(); 2007 if (error) 2008 goto out_destroy_btree_cur_cache; 2009 2010 xfs_da_state_cache = kmem_cache_create("xfs_da_state", 2011 sizeof(struct xfs_da_state), 2012 0, 0, NULL); 2013 if (!xfs_da_state_cache) 2014 goto out_destroy_defer_item_cache; 2015 2016 xfs_ifork_cache = kmem_cache_create("xfs_ifork", 2017 sizeof(struct xfs_ifork), 2018 0, 0, NULL); 2019 if (!xfs_ifork_cache) 2020 goto out_destroy_da_state_cache; 2021 2022 xfs_trans_cache = kmem_cache_create("xfs_trans", 2023 sizeof(struct xfs_trans), 2024 0, 0, NULL); 2025 if (!xfs_trans_cache) 2026 goto out_destroy_ifork_cache; 2027 2028 2029 /* 2030 * The size of the cache-allocated buf log item is the maximum 2031 * size possible under XFS. This wastes a little bit of memory, 2032 * but it is much faster. 2033 */ 2034 xfs_buf_item_cache = kmem_cache_create("xfs_buf_item", 2035 sizeof(struct xfs_buf_log_item), 2036 0, 0, NULL); 2037 if (!xfs_buf_item_cache) 2038 goto out_destroy_trans_cache; 2039 2040 xfs_efd_cache = kmem_cache_create("xfs_efd_item", 2041 xfs_efd_log_item_sizeof(XFS_EFD_MAX_FAST_EXTENTS), 2042 0, 0, NULL); 2043 if (!xfs_efd_cache) 2044 goto out_destroy_buf_item_cache; 2045 2046 xfs_efi_cache = kmem_cache_create("xfs_efi_item", 2047 xfs_efi_log_item_sizeof(XFS_EFI_MAX_FAST_EXTENTS), 2048 0, 0, NULL); 2049 if (!xfs_efi_cache) 2050 goto out_destroy_efd_cache; 2051 2052 xfs_inode_cache = kmem_cache_create("xfs_inode", 2053 sizeof(struct xfs_inode), 0, 2054 (SLAB_HWCACHE_ALIGN | 2055 SLAB_RECLAIM_ACCOUNT | 2056 SLAB_MEM_SPREAD | SLAB_ACCOUNT), 2057 xfs_fs_inode_init_once); 2058 if (!xfs_inode_cache) 2059 goto out_destroy_efi_cache; 2060 2061 xfs_ili_cache = kmem_cache_create("xfs_ili", 2062 sizeof(struct xfs_inode_log_item), 0, 2063 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, 2064 NULL); 2065 if (!xfs_ili_cache) 2066 goto out_destroy_inode_cache; 2067 2068 xfs_icreate_cache = kmem_cache_create("xfs_icr", 2069 sizeof(struct xfs_icreate_item), 2070 0, 0, NULL); 2071 if (!xfs_icreate_cache) 2072 goto out_destroy_ili_cache; 2073 2074 xfs_rud_cache = kmem_cache_create("xfs_rud_item", 2075 sizeof(struct xfs_rud_log_item), 2076 0, 0, NULL); 2077 if (!xfs_rud_cache) 2078 goto out_destroy_icreate_cache; 2079 2080 xfs_rui_cache = kmem_cache_create("xfs_rui_item", 2081 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS), 2082 0, 0, NULL); 2083 if (!xfs_rui_cache) 2084 goto out_destroy_rud_cache; 2085 2086 xfs_cud_cache = kmem_cache_create("xfs_cud_item", 2087 sizeof(struct xfs_cud_log_item), 2088 0, 0, NULL); 2089 if (!xfs_cud_cache) 2090 goto out_destroy_rui_cache; 2091 2092 xfs_cui_cache = kmem_cache_create("xfs_cui_item", 2093 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS), 2094 0, 0, NULL); 2095 if (!xfs_cui_cache) 2096 goto out_destroy_cud_cache; 2097 2098 xfs_bud_cache = kmem_cache_create("xfs_bud_item", 2099 sizeof(struct xfs_bud_log_item), 2100 0, 0, NULL); 2101 if (!xfs_bud_cache) 2102 goto out_destroy_cui_cache; 2103 2104 xfs_bui_cache = kmem_cache_create("xfs_bui_item", 2105 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS), 2106 0, 0, NULL); 2107 if (!xfs_bui_cache) 2108 goto out_destroy_bud_cache; 2109 2110 xfs_attrd_cache = kmem_cache_create("xfs_attrd_item", 2111 sizeof(struct xfs_attrd_log_item), 2112 0, 0, NULL); 2113 if (!xfs_attrd_cache) 2114 goto out_destroy_bui_cache; 2115 2116 xfs_attri_cache = kmem_cache_create("xfs_attri_item", 2117 sizeof(struct xfs_attri_log_item), 2118 0, 0, NULL); 2119 if (!xfs_attri_cache) 2120 goto out_destroy_attrd_cache; 2121 2122 xfs_iunlink_cache = kmem_cache_create("xfs_iul_item", 2123 sizeof(struct xfs_iunlink_item), 2124 0, 0, NULL); 2125 if (!xfs_iunlink_cache) 2126 goto out_destroy_attri_cache; 2127 2128 return 0; 2129 2130 out_destroy_attri_cache: 2131 kmem_cache_destroy(xfs_attri_cache); 2132 out_destroy_attrd_cache: 2133 kmem_cache_destroy(xfs_attrd_cache); 2134 out_destroy_bui_cache: 2135 kmem_cache_destroy(xfs_bui_cache); 2136 out_destroy_bud_cache: 2137 kmem_cache_destroy(xfs_bud_cache); 2138 out_destroy_cui_cache: 2139 kmem_cache_destroy(xfs_cui_cache); 2140 out_destroy_cud_cache: 2141 kmem_cache_destroy(xfs_cud_cache); 2142 out_destroy_rui_cache: 2143 kmem_cache_destroy(xfs_rui_cache); 2144 out_destroy_rud_cache: 2145 kmem_cache_destroy(xfs_rud_cache); 2146 out_destroy_icreate_cache: 2147 kmem_cache_destroy(xfs_icreate_cache); 2148 out_destroy_ili_cache: 2149 kmem_cache_destroy(xfs_ili_cache); 2150 out_destroy_inode_cache: 2151 kmem_cache_destroy(xfs_inode_cache); 2152 out_destroy_efi_cache: 2153 kmem_cache_destroy(xfs_efi_cache); 2154 out_destroy_efd_cache: 2155 kmem_cache_destroy(xfs_efd_cache); 2156 out_destroy_buf_item_cache: 2157 kmem_cache_destroy(xfs_buf_item_cache); 2158 out_destroy_trans_cache: 2159 kmem_cache_destroy(xfs_trans_cache); 2160 out_destroy_ifork_cache: 2161 kmem_cache_destroy(xfs_ifork_cache); 2162 out_destroy_da_state_cache: 2163 kmem_cache_destroy(xfs_da_state_cache); 2164 out_destroy_defer_item_cache: 2165 xfs_defer_destroy_item_caches(); 2166 out_destroy_btree_cur_cache: 2167 xfs_btree_destroy_cur_caches(); 2168 out_destroy_log_ticket_cache: 2169 kmem_cache_destroy(xfs_log_ticket_cache); 2170 out_destroy_buf_cache: 2171 kmem_cache_destroy(xfs_buf_cache); 2172 out: 2173 return -ENOMEM; 2174 } 2175 2176 STATIC void 2177 xfs_destroy_caches(void) 2178 { 2179 /* 2180 * Make sure all delayed rcu free are flushed before we 2181 * destroy caches. 2182 */ 2183 rcu_barrier(); 2184 kmem_cache_destroy(xfs_iunlink_cache); 2185 kmem_cache_destroy(xfs_attri_cache); 2186 kmem_cache_destroy(xfs_attrd_cache); 2187 kmem_cache_destroy(xfs_bui_cache); 2188 kmem_cache_destroy(xfs_bud_cache); 2189 kmem_cache_destroy(xfs_cui_cache); 2190 kmem_cache_destroy(xfs_cud_cache); 2191 kmem_cache_destroy(xfs_rui_cache); 2192 kmem_cache_destroy(xfs_rud_cache); 2193 kmem_cache_destroy(xfs_icreate_cache); 2194 kmem_cache_destroy(xfs_ili_cache); 2195 kmem_cache_destroy(xfs_inode_cache); 2196 kmem_cache_destroy(xfs_efi_cache); 2197 kmem_cache_destroy(xfs_efd_cache); 2198 kmem_cache_destroy(xfs_buf_item_cache); 2199 kmem_cache_destroy(xfs_trans_cache); 2200 kmem_cache_destroy(xfs_ifork_cache); 2201 kmem_cache_destroy(xfs_da_state_cache); 2202 xfs_defer_destroy_item_caches(); 2203 xfs_btree_destroy_cur_caches(); 2204 kmem_cache_destroy(xfs_log_ticket_cache); 2205 kmem_cache_destroy(xfs_buf_cache); 2206 } 2207 2208 STATIC int __init 2209 xfs_init_workqueues(void) 2210 { 2211 /* 2212 * The allocation workqueue can be used in memory reclaim situations 2213 * (writepage path), and parallelism is only limited by the number of 2214 * AGs in all the filesystems mounted. Hence use the default large 2215 * max_active value for this workqueue. 2216 */ 2217 xfs_alloc_wq = alloc_workqueue("xfsalloc", 2218 XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0); 2219 if (!xfs_alloc_wq) 2220 return -ENOMEM; 2221 2222 xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND), 2223 0); 2224 if (!xfs_discard_wq) 2225 goto out_free_alloc_wq; 2226 2227 return 0; 2228 out_free_alloc_wq: 2229 destroy_workqueue(xfs_alloc_wq); 2230 return -ENOMEM; 2231 } 2232 2233 STATIC void 2234 xfs_destroy_workqueues(void) 2235 { 2236 destroy_workqueue(xfs_discard_wq); 2237 destroy_workqueue(xfs_alloc_wq); 2238 } 2239 2240 #ifdef CONFIG_HOTPLUG_CPU 2241 static int 2242 xfs_cpu_dead( 2243 unsigned int cpu) 2244 { 2245 struct xfs_mount *mp, *n; 2246 2247 spin_lock(&xfs_mount_list_lock); 2248 list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) { 2249 spin_unlock(&xfs_mount_list_lock); 2250 xfs_inodegc_cpu_dead(mp, cpu); 2251 xlog_cil_pcp_dead(mp->m_log, cpu); 2252 spin_lock(&xfs_mount_list_lock); 2253 } 2254 spin_unlock(&xfs_mount_list_lock); 2255 return 0; 2256 } 2257 2258 static int __init 2259 xfs_cpu_hotplug_init(void) 2260 { 2261 int error; 2262 2263 error = cpuhp_setup_state_nocalls(CPUHP_XFS_DEAD, "xfs:dead", NULL, 2264 xfs_cpu_dead); 2265 if (error < 0) 2266 xfs_alert(NULL, 2267 "Failed to initialise CPU hotplug, error %d. XFS is non-functional.", 2268 error); 2269 return error; 2270 } 2271 2272 static void 2273 xfs_cpu_hotplug_destroy(void) 2274 { 2275 cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD); 2276 } 2277 2278 #else /* !CONFIG_HOTPLUG_CPU */ 2279 static inline int xfs_cpu_hotplug_init(void) { return 0; } 2280 static inline void xfs_cpu_hotplug_destroy(void) {} 2281 #endif 2282 2283 STATIC int __init 2284 init_xfs_fs(void) 2285 { 2286 int error; 2287 2288 xfs_check_ondisk_structs(); 2289 2290 error = xfs_dahash_test(); 2291 if (error) 2292 return error; 2293 2294 printk(KERN_INFO XFS_VERSION_STRING " with " 2295 XFS_BUILD_OPTIONS " enabled\n"); 2296 2297 xfs_dir_startup(); 2298 2299 error = xfs_cpu_hotplug_init(); 2300 if (error) 2301 goto out; 2302 2303 error = xfs_init_caches(); 2304 if (error) 2305 goto out_destroy_hp; 2306 2307 error = xfs_init_workqueues(); 2308 if (error) 2309 goto out_destroy_caches; 2310 2311 error = xfs_mru_cache_init(); 2312 if (error) 2313 goto out_destroy_wq; 2314 2315 error = xfs_init_procfs(); 2316 if (error) 2317 goto out_mru_cache_uninit; 2318 2319 error = xfs_sysctl_register(); 2320 if (error) 2321 goto out_cleanup_procfs; 2322 2323 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj); 2324 if (!xfs_kset) { 2325 error = -ENOMEM; 2326 goto out_sysctl_unregister; 2327 } 2328 2329 xfsstats.xs_kobj.kobject.kset = xfs_kset; 2330 2331 xfsstats.xs_stats = alloc_percpu(struct xfsstats); 2332 if (!xfsstats.xs_stats) { 2333 error = -ENOMEM; 2334 goto out_kset_unregister; 2335 } 2336 2337 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL, 2338 "stats"); 2339 if (error) 2340 goto out_free_stats; 2341 2342 #ifdef DEBUG 2343 xfs_dbg_kobj.kobject.kset = xfs_kset; 2344 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug"); 2345 if (error) 2346 goto out_remove_stats_kobj; 2347 #endif 2348 2349 error = xfs_qm_init(); 2350 if (error) 2351 goto out_remove_dbg_kobj; 2352 2353 error = register_filesystem(&xfs_fs_type); 2354 if (error) 2355 goto out_qm_exit; 2356 return 0; 2357 2358 out_qm_exit: 2359 xfs_qm_exit(); 2360 out_remove_dbg_kobj: 2361 #ifdef DEBUG 2362 xfs_sysfs_del(&xfs_dbg_kobj); 2363 out_remove_stats_kobj: 2364 #endif 2365 xfs_sysfs_del(&xfsstats.xs_kobj); 2366 out_free_stats: 2367 free_percpu(xfsstats.xs_stats); 2368 out_kset_unregister: 2369 kset_unregister(xfs_kset); 2370 out_sysctl_unregister: 2371 xfs_sysctl_unregister(); 2372 out_cleanup_procfs: 2373 xfs_cleanup_procfs(); 2374 out_mru_cache_uninit: 2375 xfs_mru_cache_uninit(); 2376 out_destroy_wq: 2377 xfs_destroy_workqueues(); 2378 out_destroy_caches: 2379 xfs_destroy_caches(); 2380 out_destroy_hp: 2381 xfs_cpu_hotplug_destroy(); 2382 out: 2383 return error; 2384 } 2385 2386 STATIC void __exit 2387 exit_xfs_fs(void) 2388 { 2389 xfs_qm_exit(); 2390 unregister_filesystem(&xfs_fs_type); 2391 #ifdef DEBUG 2392 xfs_sysfs_del(&xfs_dbg_kobj); 2393 #endif 2394 xfs_sysfs_del(&xfsstats.xs_kobj); 2395 free_percpu(xfsstats.xs_stats); 2396 kset_unregister(xfs_kset); 2397 xfs_sysctl_unregister(); 2398 xfs_cleanup_procfs(); 2399 xfs_mru_cache_uninit(); 2400 xfs_destroy_workqueues(); 2401 xfs_destroy_caches(); 2402 xfs_uuid_table_free(); 2403 xfs_cpu_hotplug_destroy(); 2404 } 2405 2406 module_init(init_xfs_fs); 2407 module_exit(exit_xfs_fs); 2408 2409 MODULE_AUTHOR("Silicon Graphics, Inc."); 2410 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); 2411 MODULE_LICENSE("GPL"); 2412