1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 7 #include "xfs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_sb.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_btree.h" 16 #include "xfs_bmap.h" 17 #include "xfs_alloc.h" 18 #include "xfs_fsops.h" 19 #include "xfs_trans.h" 20 #include "xfs_buf_item.h" 21 #include "xfs_log.h" 22 #include "xfs_log_priv.h" 23 #include "xfs_dir2.h" 24 #include "xfs_extfree_item.h" 25 #include "xfs_mru_cache.h" 26 #include "xfs_inode_item.h" 27 #include "xfs_icache.h" 28 #include "xfs_trace.h" 29 #include "xfs_icreate_item.h" 30 #include "xfs_filestream.h" 31 #include "xfs_quota.h" 32 #include "xfs_sysfs.h" 33 #include "xfs_ondisk.h" 34 #include "xfs_rmap_item.h" 35 #include "xfs_refcount_item.h" 36 #include "xfs_bmap_item.h" 37 #include "xfs_reflink.h" 38 #include "xfs_pwork.h" 39 #include "xfs_ag.h" 40 #include "xfs_defer.h" 41 42 #include <linux/magic.h> 43 #include <linux/fs_context.h> 44 #include <linux/fs_parser.h> 45 46 static const struct super_operations xfs_super_operations; 47 48 static struct kset *xfs_kset; /* top-level xfs sysfs dir */ 49 #ifdef DEBUG 50 static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */ 51 #endif 52 53 #ifdef CONFIG_HOTPLUG_CPU 54 static LIST_HEAD(xfs_mount_list); 55 static DEFINE_SPINLOCK(xfs_mount_list_lock); 56 57 static inline void xfs_mount_list_add(struct xfs_mount *mp) 58 { 59 spin_lock(&xfs_mount_list_lock); 60 list_add(&mp->m_mount_list, &xfs_mount_list); 61 spin_unlock(&xfs_mount_list_lock); 62 } 63 64 static inline void xfs_mount_list_del(struct xfs_mount *mp) 65 { 66 spin_lock(&xfs_mount_list_lock); 67 list_del(&mp->m_mount_list); 68 spin_unlock(&xfs_mount_list_lock); 69 } 70 #else /* !CONFIG_HOTPLUG_CPU */ 71 static inline void xfs_mount_list_add(struct xfs_mount *mp) {} 72 static inline void xfs_mount_list_del(struct xfs_mount *mp) {} 73 #endif 74 75 enum xfs_dax_mode { 76 XFS_DAX_INODE = 0, 77 XFS_DAX_ALWAYS = 1, 78 XFS_DAX_NEVER = 2, 79 }; 80 81 static void 82 xfs_mount_set_dax_mode( 83 struct xfs_mount *mp, 84 enum xfs_dax_mode mode) 85 { 86 switch (mode) { 87 case XFS_DAX_INODE: 88 mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER); 89 break; 90 case XFS_DAX_ALWAYS: 91 mp->m_features |= XFS_FEAT_DAX_ALWAYS; 92 mp->m_features &= ~XFS_FEAT_DAX_NEVER; 93 break; 94 case XFS_DAX_NEVER: 95 mp->m_features |= XFS_FEAT_DAX_NEVER; 96 mp->m_features &= ~XFS_FEAT_DAX_ALWAYS; 97 break; 98 } 99 } 100 101 static const struct constant_table dax_param_enums[] = { 102 {"inode", XFS_DAX_INODE }, 103 {"always", XFS_DAX_ALWAYS }, 104 {"never", XFS_DAX_NEVER }, 105 {} 106 }; 107 108 /* 109 * Table driven mount option parser. 110 */ 111 enum { 112 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, 113 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid, 114 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups, 115 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep, 116 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2, 117 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota, 118 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota, 119 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce, 120 Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum, 121 }; 122 123 static const struct fs_parameter_spec xfs_fs_parameters[] = { 124 fsparam_u32("logbufs", Opt_logbufs), 125 fsparam_string("logbsize", Opt_logbsize), 126 fsparam_string("logdev", Opt_logdev), 127 fsparam_string("rtdev", Opt_rtdev), 128 fsparam_flag("wsync", Opt_wsync), 129 fsparam_flag("noalign", Opt_noalign), 130 fsparam_flag("swalloc", Opt_swalloc), 131 fsparam_u32("sunit", Opt_sunit), 132 fsparam_u32("swidth", Opt_swidth), 133 fsparam_flag("nouuid", Opt_nouuid), 134 fsparam_flag("grpid", Opt_grpid), 135 fsparam_flag("nogrpid", Opt_nogrpid), 136 fsparam_flag("bsdgroups", Opt_bsdgroups), 137 fsparam_flag("sysvgroups", Opt_sysvgroups), 138 fsparam_string("allocsize", Opt_allocsize), 139 fsparam_flag("norecovery", Opt_norecovery), 140 fsparam_flag("inode64", Opt_inode64), 141 fsparam_flag("inode32", Opt_inode32), 142 fsparam_flag("ikeep", Opt_ikeep), 143 fsparam_flag("noikeep", Opt_noikeep), 144 fsparam_flag("largeio", Opt_largeio), 145 fsparam_flag("nolargeio", Opt_nolargeio), 146 fsparam_flag("attr2", Opt_attr2), 147 fsparam_flag("noattr2", Opt_noattr2), 148 fsparam_flag("filestreams", Opt_filestreams), 149 fsparam_flag("quota", Opt_quota), 150 fsparam_flag("noquota", Opt_noquota), 151 fsparam_flag("usrquota", Opt_usrquota), 152 fsparam_flag("grpquota", Opt_grpquota), 153 fsparam_flag("prjquota", Opt_prjquota), 154 fsparam_flag("uquota", Opt_uquota), 155 fsparam_flag("gquota", Opt_gquota), 156 fsparam_flag("pquota", Opt_pquota), 157 fsparam_flag("uqnoenforce", Opt_uqnoenforce), 158 fsparam_flag("gqnoenforce", Opt_gqnoenforce), 159 fsparam_flag("pqnoenforce", Opt_pqnoenforce), 160 fsparam_flag("qnoenforce", Opt_qnoenforce), 161 fsparam_flag("discard", Opt_discard), 162 fsparam_flag("nodiscard", Opt_nodiscard), 163 fsparam_flag("dax", Opt_dax), 164 fsparam_enum("dax", Opt_dax_enum, dax_param_enums), 165 {} 166 }; 167 168 struct proc_xfs_info { 169 uint64_t flag; 170 char *str; 171 }; 172 173 static int 174 xfs_fs_show_options( 175 struct seq_file *m, 176 struct dentry *root) 177 { 178 static struct proc_xfs_info xfs_info_set[] = { 179 /* the few simple ones we can get from the mount struct */ 180 { XFS_FEAT_IKEEP, ",ikeep" }, 181 { XFS_FEAT_WSYNC, ",wsync" }, 182 { XFS_FEAT_NOALIGN, ",noalign" }, 183 { XFS_FEAT_SWALLOC, ",swalloc" }, 184 { XFS_FEAT_NOUUID, ",nouuid" }, 185 { XFS_FEAT_NORECOVERY, ",norecovery" }, 186 { XFS_FEAT_ATTR2, ",attr2" }, 187 { XFS_FEAT_FILESTREAMS, ",filestreams" }, 188 { XFS_FEAT_GRPID, ",grpid" }, 189 { XFS_FEAT_DISCARD, ",discard" }, 190 { XFS_FEAT_LARGE_IOSIZE, ",largeio" }, 191 { XFS_FEAT_DAX_ALWAYS, ",dax=always" }, 192 { XFS_FEAT_DAX_NEVER, ",dax=never" }, 193 { 0, NULL } 194 }; 195 struct xfs_mount *mp = XFS_M(root->d_sb); 196 struct proc_xfs_info *xfs_infop; 197 198 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { 199 if (mp->m_features & xfs_infop->flag) 200 seq_puts(m, xfs_infop->str); 201 } 202 203 seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64); 204 205 if (xfs_has_allocsize(mp)) 206 seq_printf(m, ",allocsize=%dk", 207 (1 << mp->m_allocsize_log) >> 10); 208 209 if (mp->m_logbufs > 0) 210 seq_printf(m, ",logbufs=%d", mp->m_logbufs); 211 if (mp->m_logbsize > 0) 212 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10); 213 214 if (mp->m_logname) 215 seq_show_option(m, "logdev", mp->m_logname); 216 if (mp->m_rtname) 217 seq_show_option(m, "rtdev", mp->m_rtname); 218 219 if (mp->m_dalign > 0) 220 seq_printf(m, ",sunit=%d", 221 (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); 222 if (mp->m_swidth > 0) 223 seq_printf(m, ",swidth=%d", 224 (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); 225 226 if (mp->m_qflags & XFS_UQUOTA_ENFD) 227 seq_puts(m, ",usrquota"); 228 else if (mp->m_qflags & XFS_UQUOTA_ACCT) 229 seq_puts(m, ",uqnoenforce"); 230 231 if (mp->m_qflags & XFS_PQUOTA_ENFD) 232 seq_puts(m, ",prjquota"); 233 else if (mp->m_qflags & XFS_PQUOTA_ACCT) 234 seq_puts(m, ",pqnoenforce"); 235 236 if (mp->m_qflags & XFS_GQUOTA_ENFD) 237 seq_puts(m, ",grpquota"); 238 else if (mp->m_qflags & XFS_GQUOTA_ACCT) 239 seq_puts(m, ",gqnoenforce"); 240 241 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) 242 seq_puts(m, ",noquota"); 243 244 return 0; 245 } 246 247 /* 248 * Set parameters for inode allocation heuristics, taking into account 249 * filesystem size and inode32/inode64 mount options; i.e. specifically 250 * whether or not XFS_FEAT_SMALL_INUMS is set. 251 * 252 * Inode allocation patterns are altered only if inode32 is requested 253 * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large. 254 * If altered, XFS_OPSTATE_INODE32 is set as well. 255 * 256 * An agcount independent of that in the mount structure is provided 257 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated 258 * to the potentially higher ag count. 259 * 260 * Returns the maximum AG index which may contain inodes. 261 */ 262 xfs_agnumber_t 263 xfs_set_inode_alloc( 264 struct xfs_mount *mp, 265 xfs_agnumber_t agcount) 266 { 267 xfs_agnumber_t index; 268 xfs_agnumber_t maxagi = 0; 269 xfs_sb_t *sbp = &mp->m_sb; 270 xfs_agnumber_t max_metadata; 271 xfs_agino_t agino; 272 xfs_ino_t ino; 273 274 /* 275 * Calculate how much should be reserved for inodes to meet 276 * the max inode percentage. Used only for inode32. 277 */ 278 if (M_IGEO(mp)->maxicount) { 279 uint64_t icount; 280 281 icount = sbp->sb_dblocks * sbp->sb_imax_pct; 282 do_div(icount, 100); 283 icount += sbp->sb_agblocks - 1; 284 do_div(icount, sbp->sb_agblocks); 285 max_metadata = icount; 286 } else { 287 max_metadata = agcount; 288 } 289 290 /* Get the last possible inode in the filesystem */ 291 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1); 292 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); 293 294 /* 295 * If user asked for no more than 32-bit inodes, and the fs is 296 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter 297 * the allocator to accommodate the request. 298 */ 299 if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32) 300 set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate); 301 else 302 clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate); 303 304 for (index = 0; index < agcount; index++) { 305 struct xfs_perag *pag; 306 307 ino = XFS_AGINO_TO_INO(mp, index, agino); 308 309 pag = xfs_perag_get(mp, index); 310 311 if (xfs_is_inode32(mp)) { 312 if (ino > XFS_MAXINUMBER_32) { 313 pag->pagi_inodeok = 0; 314 pag->pagf_metadata = 0; 315 } else { 316 pag->pagi_inodeok = 1; 317 maxagi++; 318 if (index < max_metadata) 319 pag->pagf_metadata = 1; 320 else 321 pag->pagf_metadata = 0; 322 } 323 } else { 324 pag->pagi_inodeok = 1; 325 pag->pagf_metadata = 0; 326 } 327 328 xfs_perag_put(pag); 329 } 330 331 return xfs_is_inode32(mp) ? maxagi : agcount; 332 } 333 334 static bool 335 xfs_buftarg_is_dax( 336 struct super_block *sb, 337 struct xfs_buftarg *bt) 338 { 339 return dax_supported(bt->bt_daxdev, bt->bt_bdev, sb->s_blocksize, 0, 340 bdev_nr_sectors(bt->bt_bdev)); 341 } 342 343 STATIC int 344 xfs_blkdev_get( 345 xfs_mount_t *mp, 346 const char *name, 347 struct block_device **bdevp) 348 { 349 int error = 0; 350 351 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 352 mp); 353 if (IS_ERR(*bdevp)) { 354 error = PTR_ERR(*bdevp); 355 xfs_warn(mp, "Invalid device [%s], error=%d", name, error); 356 } 357 358 return error; 359 } 360 361 STATIC void 362 xfs_blkdev_put( 363 struct block_device *bdev) 364 { 365 if (bdev) 366 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 367 } 368 369 STATIC void 370 xfs_close_devices( 371 struct xfs_mount *mp) 372 { 373 struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev; 374 375 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 376 struct block_device *logdev = mp->m_logdev_targp->bt_bdev; 377 struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev; 378 379 xfs_free_buftarg(mp->m_logdev_targp); 380 xfs_blkdev_put(logdev); 381 fs_put_dax(dax_logdev); 382 } 383 if (mp->m_rtdev_targp) { 384 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; 385 struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev; 386 387 xfs_free_buftarg(mp->m_rtdev_targp); 388 xfs_blkdev_put(rtdev); 389 fs_put_dax(dax_rtdev); 390 } 391 xfs_free_buftarg(mp->m_ddev_targp); 392 fs_put_dax(dax_ddev); 393 } 394 395 /* 396 * The file system configurations are: 397 * (1) device (partition) with data and internal log 398 * (2) logical volume with data and log subvolumes. 399 * (3) logical volume with data, log, and realtime subvolumes. 400 * 401 * We only have to handle opening the log and realtime volumes here if 402 * they are present. The data subvolume has already been opened by 403 * get_sb_bdev() and is stored in sb->s_bdev. 404 */ 405 STATIC int 406 xfs_open_devices( 407 struct xfs_mount *mp) 408 { 409 struct block_device *ddev = mp->m_super->s_bdev; 410 struct dax_device *dax_ddev = fs_dax_get_by_bdev(ddev); 411 struct dax_device *dax_logdev = NULL, *dax_rtdev = NULL; 412 struct block_device *logdev = NULL, *rtdev = NULL; 413 int error; 414 415 /* 416 * Open real time and log devices - order is important. 417 */ 418 if (mp->m_logname) { 419 error = xfs_blkdev_get(mp, mp->m_logname, &logdev); 420 if (error) 421 goto out; 422 dax_logdev = fs_dax_get_by_bdev(logdev); 423 } 424 425 if (mp->m_rtname) { 426 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev); 427 if (error) 428 goto out_close_logdev; 429 430 if (rtdev == ddev || rtdev == logdev) { 431 xfs_warn(mp, 432 "Cannot mount filesystem with identical rtdev and ddev/logdev."); 433 error = -EINVAL; 434 goto out_close_rtdev; 435 } 436 dax_rtdev = fs_dax_get_by_bdev(rtdev); 437 } 438 439 /* 440 * Setup xfs_mount buffer target pointers 441 */ 442 error = -ENOMEM; 443 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev); 444 if (!mp->m_ddev_targp) 445 goto out_close_rtdev; 446 447 if (rtdev) { 448 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev); 449 if (!mp->m_rtdev_targp) 450 goto out_free_ddev_targ; 451 } 452 453 if (logdev && logdev != ddev) { 454 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev); 455 if (!mp->m_logdev_targp) 456 goto out_free_rtdev_targ; 457 } else { 458 mp->m_logdev_targp = mp->m_ddev_targp; 459 } 460 461 return 0; 462 463 out_free_rtdev_targ: 464 if (mp->m_rtdev_targp) 465 xfs_free_buftarg(mp->m_rtdev_targp); 466 out_free_ddev_targ: 467 xfs_free_buftarg(mp->m_ddev_targp); 468 out_close_rtdev: 469 xfs_blkdev_put(rtdev); 470 fs_put_dax(dax_rtdev); 471 out_close_logdev: 472 if (logdev && logdev != ddev) { 473 xfs_blkdev_put(logdev); 474 fs_put_dax(dax_logdev); 475 } 476 out: 477 fs_put_dax(dax_ddev); 478 return error; 479 } 480 481 /* 482 * Setup xfs_mount buffer target pointers based on superblock 483 */ 484 STATIC int 485 xfs_setup_devices( 486 struct xfs_mount *mp) 487 { 488 int error; 489 490 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize); 491 if (error) 492 return error; 493 494 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 495 unsigned int log_sector_size = BBSIZE; 496 497 if (xfs_has_sector(mp)) 498 log_sector_size = mp->m_sb.sb_logsectsize; 499 error = xfs_setsize_buftarg(mp->m_logdev_targp, 500 log_sector_size); 501 if (error) 502 return error; 503 } 504 if (mp->m_rtdev_targp) { 505 error = xfs_setsize_buftarg(mp->m_rtdev_targp, 506 mp->m_sb.sb_sectsize); 507 if (error) 508 return error; 509 } 510 511 return 0; 512 } 513 514 STATIC int 515 xfs_init_mount_workqueues( 516 struct xfs_mount *mp) 517 { 518 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s", 519 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 520 1, mp->m_super->s_id); 521 if (!mp->m_buf_workqueue) 522 goto out; 523 524 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s", 525 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 526 0, mp->m_super->s_id); 527 if (!mp->m_unwritten_workqueue) 528 goto out_destroy_buf; 529 530 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s", 531 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 532 0, mp->m_super->s_id); 533 if (!mp->m_reclaim_workqueue) 534 goto out_destroy_unwritten; 535 536 mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s", 537 XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM), 538 0, mp->m_super->s_id); 539 if (!mp->m_blockgc_wq) 540 goto out_destroy_reclaim; 541 542 mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s", 543 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 544 1, mp->m_super->s_id); 545 if (!mp->m_inodegc_wq) 546 goto out_destroy_blockgc; 547 548 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", 549 XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id); 550 if (!mp->m_sync_workqueue) 551 goto out_destroy_inodegc; 552 553 return 0; 554 555 out_destroy_inodegc: 556 destroy_workqueue(mp->m_inodegc_wq); 557 out_destroy_blockgc: 558 destroy_workqueue(mp->m_blockgc_wq); 559 out_destroy_reclaim: 560 destroy_workqueue(mp->m_reclaim_workqueue); 561 out_destroy_unwritten: 562 destroy_workqueue(mp->m_unwritten_workqueue); 563 out_destroy_buf: 564 destroy_workqueue(mp->m_buf_workqueue); 565 out: 566 return -ENOMEM; 567 } 568 569 STATIC void 570 xfs_destroy_mount_workqueues( 571 struct xfs_mount *mp) 572 { 573 destroy_workqueue(mp->m_sync_workqueue); 574 destroy_workqueue(mp->m_blockgc_wq); 575 destroy_workqueue(mp->m_inodegc_wq); 576 destroy_workqueue(mp->m_reclaim_workqueue); 577 destroy_workqueue(mp->m_unwritten_workqueue); 578 destroy_workqueue(mp->m_buf_workqueue); 579 } 580 581 static void 582 xfs_flush_inodes_worker( 583 struct work_struct *work) 584 { 585 struct xfs_mount *mp = container_of(work, struct xfs_mount, 586 m_flush_inodes_work); 587 struct super_block *sb = mp->m_super; 588 589 if (down_read_trylock(&sb->s_umount)) { 590 sync_inodes_sb(sb); 591 up_read(&sb->s_umount); 592 } 593 } 594 595 /* 596 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK 597 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting 598 * for IO to complete so that we effectively throttle multiple callers to the 599 * rate at which IO is completing. 600 */ 601 void 602 xfs_flush_inodes( 603 struct xfs_mount *mp) 604 { 605 /* 606 * If flush_work() returns true then that means we waited for a flush 607 * which was already in progress. Don't bother running another scan. 608 */ 609 if (flush_work(&mp->m_flush_inodes_work)) 610 return; 611 612 queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work); 613 flush_work(&mp->m_flush_inodes_work); 614 } 615 616 /* Catch misguided souls that try to use this interface on XFS */ 617 STATIC struct inode * 618 xfs_fs_alloc_inode( 619 struct super_block *sb) 620 { 621 BUG(); 622 return NULL; 623 } 624 625 /* 626 * Now that the generic code is guaranteed not to be accessing 627 * the linux inode, we can inactivate and reclaim the inode. 628 */ 629 STATIC void 630 xfs_fs_destroy_inode( 631 struct inode *inode) 632 { 633 struct xfs_inode *ip = XFS_I(inode); 634 635 trace_xfs_destroy_inode(ip); 636 637 ASSERT(!rwsem_is_locked(&inode->i_rwsem)); 638 XFS_STATS_INC(ip->i_mount, vn_rele); 639 XFS_STATS_INC(ip->i_mount, vn_remove); 640 xfs_inode_mark_reclaimable(ip); 641 } 642 643 static void 644 xfs_fs_dirty_inode( 645 struct inode *inode, 646 int flag) 647 { 648 struct xfs_inode *ip = XFS_I(inode); 649 struct xfs_mount *mp = ip->i_mount; 650 struct xfs_trans *tp; 651 652 if (!(inode->i_sb->s_flags & SB_LAZYTIME)) 653 return; 654 if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME)) 655 return; 656 657 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp)) 658 return; 659 xfs_ilock(ip, XFS_ILOCK_EXCL); 660 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 661 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP); 662 xfs_trans_commit(tp); 663 } 664 665 /* 666 * Slab object creation initialisation for the XFS inode. 667 * This covers only the idempotent fields in the XFS inode; 668 * all other fields need to be initialised on allocation 669 * from the slab. This avoids the need to repeatedly initialise 670 * fields in the xfs inode that left in the initialise state 671 * when freeing the inode. 672 */ 673 STATIC void 674 xfs_fs_inode_init_once( 675 void *inode) 676 { 677 struct xfs_inode *ip = inode; 678 679 memset(ip, 0, sizeof(struct xfs_inode)); 680 681 /* vfs inode */ 682 inode_init_once(VFS_I(ip)); 683 684 /* xfs inode */ 685 atomic_set(&ip->i_pincount, 0); 686 spin_lock_init(&ip->i_flags_lock); 687 688 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, 689 "xfsino", ip->i_ino); 690 } 691 692 /* 693 * We do an unlocked check for XFS_IDONTCACHE here because we are already 694 * serialised against cache hits here via the inode->i_lock and igrab() in 695 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be 696 * racing with us, and it avoids needing to grab a spinlock here for every inode 697 * we drop the final reference on. 698 */ 699 STATIC int 700 xfs_fs_drop_inode( 701 struct inode *inode) 702 { 703 struct xfs_inode *ip = XFS_I(inode); 704 705 /* 706 * If this unlinked inode is in the middle of recovery, don't 707 * drop the inode just yet; log recovery will take care of 708 * that. See the comment for this inode flag. 709 */ 710 if (ip->i_flags & XFS_IRECOVERY) { 711 ASSERT(xlog_recovery_needed(ip->i_mount->m_log)); 712 return 0; 713 } 714 715 return generic_drop_inode(inode); 716 } 717 718 static void 719 xfs_mount_free( 720 struct xfs_mount *mp) 721 { 722 kfree(mp->m_rtname); 723 kfree(mp->m_logname); 724 kmem_free(mp); 725 } 726 727 STATIC int 728 xfs_fs_sync_fs( 729 struct super_block *sb, 730 int wait) 731 { 732 struct xfs_mount *mp = XFS_M(sb); 733 734 trace_xfs_fs_sync_fs(mp, __return_address); 735 736 /* 737 * Doing anything during the async pass would be counterproductive. 738 */ 739 if (!wait) 740 return 0; 741 742 xfs_log_force(mp, XFS_LOG_SYNC); 743 if (laptop_mode) { 744 /* 745 * The disk must be active because we're syncing. 746 * We schedule log work now (now that the disk is 747 * active) instead of later (when it might not be). 748 */ 749 flush_delayed_work(&mp->m_log->l_work); 750 } 751 752 /* 753 * If we are called with page faults frozen out, it means we are about 754 * to freeze the transaction subsystem. Take the opportunity to shut 755 * down inodegc because once SB_FREEZE_FS is set it's too late to 756 * prevent inactivation races with freeze. The fs doesn't get called 757 * again by the freezing process until after SB_FREEZE_FS has been set, 758 * so it's now or never. Same logic applies to speculative allocation 759 * garbage collection. 760 * 761 * We don't care if this is a normal syncfs call that does this or 762 * freeze that does this - we can run this multiple times without issue 763 * and we won't race with a restart because a restart can only occur 764 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE. 765 */ 766 if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) { 767 xfs_inodegc_stop(mp); 768 xfs_blockgc_stop(mp); 769 } 770 771 return 0; 772 } 773 774 STATIC int 775 xfs_fs_statfs( 776 struct dentry *dentry, 777 struct kstatfs *statp) 778 { 779 struct xfs_mount *mp = XFS_M(dentry->d_sb); 780 xfs_sb_t *sbp = &mp->m_sb; 781 struct xfs_inode *ip = XFS_I(d_inode(dentry)); 782 uint64_t fakeinos, id; 783 uint64_t icount; 784 uint64_t ifree; 785 uint64_t fdblocks; 786 xfs_extlen_t lsize; 787 int64_t ffree; 788 789 /* Wait for whatever inactivations are in progress. */ 790 xfs_inodegc_flush(mp); 791 792 statp->f_type = XFS_SUPER_MAGIC; 793 statp->f_namelen = MAXNAMELEN - 1; 794 795 id = huge_encode_dev(mp->m_ddev_targp->bt_dev); 796 statp->f_fsid = u64_to_fsid(id); 797 798 icount = percpu_counter_sum(&mp->m_icount); 799 ifree = percpu_counter_sum(&mp->m_ifree); 800 fdblocks = percpu_counter_sum(&mp->m_fdblocks); 801 802 spin_lock(&mp->m_sb_lock); 803 statp->f_bsize = sbp->sb_blocksize; 804 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; 805 statp->f_blocks = sbp->sb_dblocks - lsize; 806 spin_unlock(&mp->m_sb_lock); 807 808 /* make sure statp->f_bfree does not underflow */ 809 statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0); 810 statp->f_bavail = statp->f_bfree; 811 812 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree); 813 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER); 814 if (M_IGEO(mp)->maxicount) 815 statp->f_files = min_t(typeof(statp->f_files), 816 statp->f_files, 817 M_IGEO(mp)->maxicount); 818 819 /* If sb_icount overshot maxicount, report actual allocation */ 820 statp->f_files = max_t(typeof(statp->f_files), 821 statp->f_files, 822 sbp->sb_icount); 823 824 /* make sure statp->f_ffree does not underflow */ 825 ffree = statp->f_files - (icount - ifree); 826 statp->f_ffree = max_t(int64_t, ffree, 0); 827 828 829 if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) && 830 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) == 831 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD)) 832 xfs_qm_statvfs(ip, statp); 833 834 if (XFS_IS_REALTIME_MOUNT(mp) && 835 (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) { 836 statp->f_blocks = sbp->sb_rblocks; 837 statp->f_bavail = statp->f_bfree = 838 sbp->sb_frextents * sbp->sb_rextsize; 839 } 840 841 return 0; 842 } 843 844 STATIC void 845 xfs_save_resvblks(struct xfs_mount *mp) 846 { 847 uint64_t resblks = 0; 848 849 mp->m_resblks_save = mp->m_resblks; 850 xfs_reserve_blocks(mp, &resblks, NULL); 851 } 852 853 STATIC void 854 xfs_restore_resvblks(struct xfs_mount *mp) 855 { 856 uint64_t resblks; 857 858 if (mp->m_resblks_save) { 859 resblks = mp->m_resblks_save; 860 mp->m_resblks_save = 0; 861 } else 862 resblks = xfs_default_resblks(mp); 863 864 xfs_reserve_blocks(mp, &resblks, NULL); 865 } 866 867 /* 868 * Second stage of a freeze. The data is already frozen so we only 869 * need to take care of the metadata. Once that's done sync the superblock 870 * to the log to dirty it in case of a crash while frozen. This ensures that we 871 * will recover the unlinked inode lists on the next mount. 872 */ 873 STATIC int 874 xfs_fs_freeze( 875 struct super_block *sb) 876 { 877 struct xfs_mount *mp = XFS_M(sb); 878 unsigned int flags; 879 int ret; 880 881 /* 882 * The filesystem is now frozen far enough that memory reclaim 883 * cannot safely operate on the filesystem. Hence we need to 884 * set a GFP_NOFS context here to avoid recursion deadlocks. 885 */ 886 flags = memalloc_nofs_save(); 887 xfs_save_resvblks(mp); 888 ret = xfs_log_quiesce(mp); 889 memalloc_nofs_restore(flags); 890 891 /* 892 * For read-write filesystems, we need to restart the inodegc on error 893 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not 894 * going to be run to restart it now. We are at SB_FREEZE_FS level 895 * here, so we can restart safely without racing with a stop in 896 * xfs_fs_sync_fs(). 897 */ 898 if (ret && !xfs_is_readonly(mp)) { 899 xfs_blockgc_start(mp); 900 xfs_inodegc_start(mp); 901 } 902 903 return ret; 904 } 905 906 STATIC int 907 xfs_fs_unfreeze( 908 struct super_block *sb) 909 { 910 struct xfs_mount *mp = XFS_M(sb); 911 912 xfs_restore_resvblks(mp); 913 xfs_log_work_queue(mp); 914 915 /* 916 * Don't reactivate the inodegc worker on a readonly filesystem because 917 * inodes are sent directly to reclaim. Don't reactivate the blockgc 918 * worker because there are no speculative preallocations on a readonly 919 * filesystem. 920 */ 921 if (!xfs_is_readonly(mp)) { 922 xfs_blockgc_start(mp); 923 xfs_inodegc_start(mp); 924 } 925 926 return 0; 927 } 928 929 /* 930 * This function fills in xfs_mount_t fields based on mount args. 931 * Note: the superblock _has_ now been read in. 932 */ 933 STATIC int 934 xfs_finish_flags( 935 struct xfs_mount *mp) 936 { 937 /* Fail a mount where the logbuf is smaller than the log stripe */ 938 if (xfs_has_logv2(mp)) { 939 if (mp->m_logbsize <= 0 && 940 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { 941 mp->m_logbsize = mp->m_sb.sb_logsunit; 942 } else if (mp->m_logbsize > 0 && 943 mp->m_logbsize < mp->m_sb.sb_logsunit) { 944 xfs_warn(mp, 945 "logbuf size must be greater than or equal to log stripe size"); 946 return -EINVAL; 947 } 948 } else { 949 /* Fail a mount if the logbuf is larger than 32K */ 950 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { 951 xfs_warn(mp, 952 "logbuf size for version 1 logs must be 16K or 32K"); 953 return -EINVAL; 954 } 955 } 956 957 /* 958 * V5 filesystems always use attr2 format for attributes. 959 */ 960 if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) { 961 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. " 962 "attr2 is always enabled for V5 filesystems."); 963 return -EINVAL; 964 } 965 966 /* 967 * prohibit r/w mounts of read-only filesystems 968 */ 969 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) { 970 xfs_warn(mp, 971 "cannot mount a read-only filesystem as read-write"); 972 return -EROFS; 973 } 974 975 if ((mp->m_qflags & XFS_GQUOTA_ACCT) && 976 (mp->m_qflags & XFS_PQUOTA_ACCT) && 977 !xfs_has_pquotino(mp)) { 978 xfs_warn(mp, 979 "Super block does not support project and group quota together"); 980 return -EINVAL; 981 } 982 983 return 0; 984 } 985 986 static int 987 xfs_init_percpu_counters( 988 struct xfs_mount *mp) 989 { 990 int error; 991 992 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL); 993 if (error) 994 return -ENOMEM; 995 996 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL); 997 if (error) 998 goto free_icount; 999 1000 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL); 1001 if (error) 1002 goto free_ifree; 1003 1004 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL); 1005 if (error) 1006 goto free_fdblocks; 1007 1008 return 0; 1009 1010 free_fdblocks: 1011 percpu_counter_destroy(&mp->m_fdblocks); 1012 free_ifree: 1013 percpu_counter_destroy(&mp->m_ifree); 1014 free_icount: 1015 percpu_counter_destroy(&mp->m_icount); 1016 return -ENOMEM; 1017 } 1018 1019 void 1020 xfs_reinit_percpu_counters( 1021 struct xfs_mount *mp) 1022 { 1023 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount); 1024 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree); 1025 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks); 1026 } 1027 1028 static void 1029 xfs_destroy_percpu_counters( 1030 struct xfs_mount *mp) 1031 { 1032 percpu_counter_destroy(&mp->m_icount); 1033 percpu_counter_destroy(&mp->m_ifree); 1034 percpu_counter_destroy(&mp->m_fdblocks); 1035 ASSERT(xfs_is_shutdown(mp) || 1036 percpu_counter_sum(&mp->m_delalloc_blks) == 0); 1037 percpu_counter_destroy(&mp->m_delalloc_blks); 1038 } 1039 1040 static int 1041 xfs_inodegc_init_percpu( 1042 struct xfs_mount *mp) 1043 { 1044 struct xfs_inodegc *gc; 1045 int cpu; 1046 1047 mp->m_inodegc = alloc_percpu(struct xfs_inodegc); 1048 if (!mp->m_inodegc) 1049 return -ENOMEM; 1050 1051 for_each_possible_cpu(cpu) { 1052 gc = per_cpu_ptr(mp->m_inodegc, cpu); 1053 init_llist_head(&gc->list); 1054 gc->items = 0; 1055 INIT_WORK(&gc->work, xfs_inodegc_worker); 1056 } 1057 return 0; 1058 } 1059 1060 static void 1061 xfs_inodegc_free_percpu( 1062 struct xfs_mount *mp) 1063 { 1064 if (!mp->m_inodegc) 1065 return; 1066 free_percpu(mp->m_inodegc); 1067 } 1068 1069 static void 1070 xfs_fs_put_super( 1071 struct super_block *sb) 1072 { 1073 struct xfs_mount *mp = XFS_M(sb); 1074 1075 /* if ->fill_super failed, we have no mount to tear down */ 1076 if (!sb->s_fs_info) 1077 return; 1078 1079 xfs_notice(mp, "Unmounting Filesystem"); 1080 xfs_filestream_unmount(mp); 1081 xfs_unmountfs(mp); 1082 1083 xfs_freesb(mp); 1084 free_percpu(mp->m_stats.xs_stats); 1085 xfs_mount_list_del(mp); 1086 xfs_inodegc_free_percpu(mp); 1087 xfs_destroy_percpu_counters(mp); 1088 xfs_destroy_mount_workqueues(mp); 1089 xfs_close_devices(mp); 1090 1091 sb->s_fs_info = NULL; 1092 xfs_mount_free(mp); 1093 } 1094 1095 static long 1096 xfs_fs_nr_cached_objects( 1097 struct super_block *sb, 1098 struct shrink_control *sc) 1099 { 1100 /* Paranoia: catch incorrect calls during mount setup or teardown */ 1101 if (WARN_ON_ONCE(!sb->s_fs_info)) 1102 return 0; 1103 return xfs_reclaim_inodes_count(XFS_M(sb)); 1104 } 1105 1106 static long 1107 xfs_fs_free_cached_objects( 1108 struct super_block *sb, 1109 struct shrink_control *sc) 1110 { 1111 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan); 1112 } 1113 1114 static const struct super_operations xfs_super_operations = { 1115 .alloc_inode = xfs_fs_alloc_inode, 1116 .destroy_inode = xfs_fs_destroy_inode, 1117 .dirty_inode = xfs_fs_dirty_inode, 1118 .drop_inode = xfs_fs_drop_inode, 1119 .put_super = xfs_fs_put_super, 1120 .sync_fs = xfs_fs_sync_fs, 1121 .freeze_fs = xfs_fs_freeze, 1122 .unfreeze_fs = xfs_fs_unfreeze, 1123 .statfs = xfs_fs_statfs, 1124 .show_options = xfs_fs_show_options, 1125 .nr_cached_objects = xfs_fs_nr_cached_objects, 1126 .free_cached_objects = xfs_fs_free_cached_objects, 1127 }; 1128 1129 static int 1130 suffix_kstrtoint( 1131 const char *s, 1132 unsigned int base, 1133 int *res) 1134 { 1135 int last, shift_left_factor = 0, _res; 1136 char *value; 1137 int ret = 0; 1138 1139 value = kstrdup(s, GFP_KERNEL); 1140 if (!value) 1141 return -ENOMEM; 1142 1143 last = strlen(value) - 1; 1144 if (value[last] == 'K' || value[last] == 'k') { 1145 shift_left_factor = 10; 1146 value[last] = '\0'; 1147 } 1148 if (value[last] == 'M' || value[last] == 'm') { 1149 shift_left_factor = 20; 1150 value[last] = '\0'; 1151 } 1152 if (value[last] == 'G' || value[last] == 'g') { 1153 shift_left_factor = 30; 1154 value[last] = '\0'; 1155 } 1156 1157 if (kstrtoint(value, base, &_res)) 1158 ret = -EINVAL; 1159 kfree(value); 1160 *res = _res << shift_left_factor; 1161 return ret; 1162 } 1163 1164 static inline void 1165 xfs_fs_warn_deprecated( 1166 struct fs_context *fc, 1167 struct fs_parameter *param, 1168 uint64_t flag, 1169 bool value) 1170 { 1171 /* Don't print the warning if reconfiguring and current mount point 1172 * already had the flag set 1173 */ 1174 if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) && 1175 !!(XFS_M(fc->root->d_sb)->m_features & flag) == value) 1176 return; 1177 xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key); 1178 } 1179 1180 /* 1181 * Set mount state from a mount option. 1182 * 1183 * NOTE: mp->m_super is NULL here! 1184 */ 1185 static int 1186 xfs_fs_parse_param( 1187 struct fs_context *fc, 1188 struct fs_parameter *param) 1189 { 1190 struct xfs_mount *parsing_mp = fc->s_fs_info; 1191 struct fs_parse_result result; 1192 int size = 0; 1193 int opt; 1194 1195 opt = fs_parse(fc, xfs_fs_parameters, param, &result); 1196 if (opt < 0) 1197 return opt; 1198 1199 switch (opt) { 1200 case Opt_logbufs: 1201 parsing_mp->m_logbufs = result.uint_32; 1202 return 0; 1203 case Opt_logbsize: 1204 if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize)) 1205 return -EINVAL; 1206 return 0; 1207 case Opt_logdev: 1208 kfree(parsing_mp->m_logname); 1209 parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL); 1210 if (!parsing_mp->m_logname) 1211 return -ENOMEM; 1212 return 0; 1213 case Opt_rtdev: 1214 kfree(parsing_mp->m_rtname); 1215 parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL); 1216 if (!parsing_mp->m_rtname) 1217 return -ENOMEM; 1218 return 0; 1219 case Opt_allocsize: 1220 if (suffix_kstrtoint(param->string, 10, &size)) 1221 return -EINVAL; 1222 parsing_mp->m_allocsize_log = ffs(size) - 1; 1223 parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE; 1224 return 0; 1225 case Opt_grpid: 1226 case Opt_bsdgroups: 1227 parsing_mp->m_features |= XFS_FEAT_GRPID; 1228 return 0; 1229 case Opt_nogrpid: 1230 case Opt_sysvgroups: 1231 parsing_mp->m_features &= ~XFS_FEAT_GRPID; 1232 return 0; 1233 case Opt_wsync: 1234 parsing_mp->m_features |= XFS_FEAT_WSYNC; 1235 return 0; 1236 case Opt_norecovery: 1237 parsing_mp->m_features |= XFS_FEAT_NORECOVERY; 1238 return 0; 1239 case Opt_noalign: 1240 parsing_mp->m_features |= XFS_FEAT_NOALIGN; 1241 return 0; 1242 case Opt_swalloc: 1243 parsing_mp->m_features |= XFS_FEAT_SWALLOC; 1244 return 0; 1245 case Opt_sunit: 1246 parsing_mp->m_dalign = result.uint_32; 1247 return 0; 1248 case Opt_swidth: 1249 parsing_mp->m_swidth = result.uint_32; 1250 return 0; 1251 case Opt_inode32: 1252 parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS; 1253 return 0; 1254 case Opt_inode64: 1255 parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS; 1256 return 0; 1257 case Opt_nouuid: 1258 parsing_mp->m_features |= XFS_FEAT_NOUUID; 1259 return 0; 1260 case Opt_largeio: 1261 parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE; 1262 return 0; 1263 case Opt_nolargeio: 1264 parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE; 1265 return 0; 1266 case Opt_filestreams: 1267 parsing_mp->m_features |= XFS_FEAT_FILESTREAMS; 1268 return 0; 1269 case Opt_noquota: 1270 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; 1271 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; 1272 return 0; 1273 case Opt_quota: 1274 case Opt_uquota: 1275 case Opt_usrquota: 1276 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD); 1277 return 0; 1278 case Opt_qnoenforce: 1279 case Opt_uqnoenforce: 1280 parsing_mp->m_qflags |= XFS_UQUOTA_ACCT; 1281 parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD; 1282 return 0; 1283 case Opt_pquota: 1284 case Opt_prjquota: 1285 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD); 1286 return 0; 1287 case Opt_pqnoenforce: 1288 parsing_mp->m_qflags |= XFS_PQUOTA_ACCT; 1289 parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD; 1290 return 0; 1291 case Opt_gquota: 1292 case Opt_grpquota: 1293 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD); 1294 return 0; 1295 case Opt_gqnoenforce: 1296 parsing_mp->m_qflags |= XFS_GQUOTA_ACCT; 1297 parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD; 1298 return 0; 1299 case Opt_discard: 1300 parsing_mp->m_features |= XFS_FEAT_DISCARD; 1301 return 0; 1302 case Opt_nodiscard: 1303 parsing_mp->m_features &= ~XFS_FEAT_DISCARD; 1304 return 0; 1305 #ifdef CONFIG_FS_DAX 1306 case Opt_dax: 1307 xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS); 1308 return 0; 1309 case Opt_dax_enum: 1310 xfs_mount_set_dax_mode(parsing_mp, result.uint_32); 1311 return 0; 1312 #endif 1313 /* Following mount options will be removed in September 2025 */ 1314 case Opt_ikeep: 1315 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true); 1316 parsing_mp->m_features |= XFS_FEAT_IKEEP; 1317 return 0; 1318 case Opt_noikeep: 1319 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false); 1320 parsing_mp->m_features &= ~XFS_FEAT_IKEEP; 1321 return 0; 1322 case Opt_attr2: 1323 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true); 1324 parsing_mp->m_features |= XFS_FEAT_ATTR2; 1325 return 0; 1326 case Opt_noattr2: 1327 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true); 1328 parsing_mp->m_features |= XFS_FEAT_NOATTR2; 1329 return 0; 1330 default: 1331 xfs_warn(parsing_mp, "unknown mount option [%s].", param->key); 1332 return -EINVAL; 1333 } 1334 1335 return 0; 1336 } 1337 1338 static int 1339 xfs_fs_validate_params( 1340 struct xfs_mount *mp) 1341 { 1342 /* No recovery flag requires a read-only mount */ 1343 if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) { 1344 xfs_warn(mp, "no-recovery mounts must be read-only."); 1345 return -EINVAL; 1346 } 1347 1348 /* 1349 * We have not read the superblock at this point, so only the attr2 1350 * mount option can set the attr2 feature by this stage. 1351 */ 1352 if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) { 1353 xfs_warn(mp, "attr2 and noattr2 cannot both be specified."); 1354 return -EINVAL; 1355 } 1356 1357 1358 if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) { 1359 xfs_warn(mp, 1360 "sunit and swidth options incompatible with the noalign option"); 1361 return -EINVAL; 1362 } 1363 1364 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) { 1365 xfs_warn(mp, "quota support not available in this kernel."); 1366 return -EINVAL; 1367 } 1368 1369 if ((mp->m_dalign && !mp->m_swidth) || 1370 (!mp->m_dalign && mp->m_swidth)) { 1371 xfs_warn(mp, "sunit and swidth must be specified together"); 1372 return -EINVAL; 1373 } 1374 1375 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) { 1376 xfs_warn(mp, 1377 "stripe width (%d) must be a multiple of the stripe unit (%d)", 1378 mp->m_swidth, mp->m_dalign); 1379 return -EINVAL; 1380 } 1381 1382 if (mp->m_logbufs != -1 && 1383 mp->m_logbufs != 0 && 1384 (mp->m_logbufs < XLOG_MIN_ICLOGS || 1385 mp->m_logbufs > XLOG_MAX_ICLOGS)) { 1386 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", 1387 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); 1388 return -EINVAL; 1389 } 1390 1391 if (mp->m_logbsize != -1 && 1392 mp->m_logbsize != 0 && 1393 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || 1394 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || 1395 !is_power_of_2(mp->m_logbsize))) { 1396 xfs_warn(mp, 1397 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", 1398 mp->m_logbsize); 1399 return -EINVAL; 1400 } 1401 1402 if (xfs_has_allocsize(mp) && 1403 (mp->m_allocsize_log > XFS_MAX_IO_LOG || 1404 mp->m_allocsize_log < XFS_MIN_IO_LOG)) { 1405 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", 1406 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG); 1407 return -EINVAL; 1408 } 1409 1410 return 0; 1411 } 1412 1413 static int 1414 xfs_fs_fill_super( 1415 struct super_block *sb, 1416 struct fs_context *fc) 1417 { 1418 struct xfs_mount *mp = sb->s_fs_info; 1419 struct inode *root; 1420 int flags = 0, error; 1421 1422 mp->m_super = sb; 1423 1424 error = xfs_fs_validate_params(mp); 1425 if (error) 1426 goto out_free_names; 1427 1428 sb_min_blocksize(sb, BBSIZE); 1429 sb->s_xattr = xfs_xattr_handlers; 1430 sb->s_export_op = &xfs_export_operations; 1431 #ifdef CONFIG_XFS_QUOTA 1432 sb->s_qcop = &xfs_quotactl_operations; 1433 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; 1434 #endif 1435 sb->s_op = &xfs_super_operations; 1436 1437 /* 1438 * Delay mount work if the debug hook is set. This is debug 1439 * instrumention to coordinate simulation of xfs mount failures with 1440 * VFS superblock operations 1441 */ 1442 if (xfs_globals.mount_delay) { 1443 xfs_notice(mp, "Delaying mount for %d seconds.", 1444 xfs_globals.mount_delay); 1445 msleep(xfs_globals.mount_delay * 1000); 1446 } 1447 1448 if (fc->sb_flags & SB_SILENT) 1449 flags |= XFS_MFSI_QUIET; 1450 1451 error = xfs_open_devices(mp); 1452 if (error) 1453 goto out_free_names; 1454 1455 error = xfs_init_mount_workqueues(mp); 1456 if (error) 1457 goto out_close_devices; 1458 1459 error = xfs_init_percpu_counters(mp); 1460 if (error) 1461 goto out_destroy_workqueues; 1462 1463 error = xfs_inodegc_init_percpu(mp); 1464 if (error) 1465 goto out_destroy_counters; 1466 1467 /* 1468 * All percpu data structures requiring cleanup when a cpu goes offline 1469 * must be allocated before adding this @mp to the cpu-dead handler's 1470 * mount list. 1471 */ 1472 xfs_mount_list_add(mp); 1473 1474 /* Allocate stats memory before we do operations that might use it */ 1475 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats); 1476 if (!mp->m_stats.xs_stats) { 1477 error = -ENOMEM; 1478 goto out_destroy_inodegc; 1479 } 1480 1481 error = xfs_readsb(mp, flags); 1482 if (error) 1483 goto out_free_stats; 1484 1485 error = xfs_finish_flags(mp); 1486 if (error) 1487 goto out_free_sb; 1488 1489 error = xfs_setup_devices(mp); 1490 if (error) 1491 goto out_free_sb; 1492 1493 /* V4 support is undergoing deprecation. */ 1494 if (!xfs_has_crc(mp)) { 1495 #ifdef CONFIG_XFS_SUPPORT_V4 1496 xfs_warn_once(mp, 1497 "Deprecated V4 format (crc=0) will not be supported after September 2030."); 1498 #else 1499 xfs_warn(mp, 1500 "Deprecated V4 format (crc=0) not supported by kernel."); 1501 error = -EINVAL; 1502 goto out_free_sb; 1503 #endif 1504 } 1505 1506 /* Filesystem claims it needs repair, so refuse the mount. */ 1507 if (xfs_has_needsrepair(mp)) { 1508 xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair."); 1509 error = -EFSCORRUPTED; 1510 goto out_free_sb; 1511 } 1512 1513 /* 1514 * Don't touch the filesystem if a user tool thinks it owns the primary 1515 * superblock. mkfs doesn't clear the flag from secondary supers, so 1516 * we don't check them at all. 1517 */ 1518 if (mp->m_sb.sb_inprogress) { 1519 xfs_warn(mp, "Offline file system operation in progress!"); 1520 error = -EFSCORRUPTED; 1521 goto out_free_sb; 1522 } 1523 1524 /* 1525 * Until this is fixed only page-sized or smaller data blocks work. 1526 */ 1527 if (mp->m_sb.sb_blocksize > PAGE_SIZE) { 1528 xfs_warn(mp, 1529 "File system with blocksize %d bytes. " 1530 "Only pagesize (%ld) or less will currently work.", 1531 mp->m_sb.sb_blocksize, PAGE_SIZE); 1532 error = -ENOSYS; 1533 goto out_free_sb; 1534 } 1535 1536 /* Ensure this filesystem fits in the page cache limits */ 1537 if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) || 1538 xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) { 1539 xfs_warn(mp, 1540 "file system too large to be mounted on this system."); 1541 error = -EFBIG; 1542 goto out_free_sb; 1543 } 1544 1545 /* 1546 * XFS block mappings use 54 bits to store the logical block offset. 1547 * This should suffice to handle the maximum file size that the VFS 1548 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT 1549 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes 1550 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON 1551 * to check this assertion. 1552 * 1553 * Avoid integer overflow by comparing the maximum bmbt offset to the 1554 * maximum pagecache offset in units of fs blocks. 1555 */ 1556 if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) { 1557 xfs_warn(mp, 1558 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!", 1559 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE), 1560 XFS_MAX_FILEOFF); 1561 error = -EINVAL; 1562 goto out_free_sb; 1563 } 1564 1565 error = xfs_filestream_mount(mp); 1566 if (error) 1567 goto out_free_sb; 1568 1569 /* 1570 * we must configure the block size in the superblock before we run the 1571 * full mount process as the mount process can lookup and cache inodes. 1572 */ 1573 sb->s_magic = XFS_SUPER_MAGIC; 1574 sb->s_blocksize = mp->m_sb.sb_blocksize; 1575 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; 1576 sb->s_maxbytes = MAX_LFS_FILESIZE; 1577 sb->s_max_links = XFS_MAXLINK; 1578 sb->s_time_gran = 1; 1579 if (xfs_has_bigtime(mp)) { 1580 sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN); 1581 sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX); 1582 } else { 1583 sb->s_time_min = XFS_LEGACY_TIME_MIN; 1584 sb->s_time_max = XFS_LEGACY_TIME_MAX; 1585 } 1586 trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max); 1587 sb->s_iflags |= SB_I_CGROUPWB; 1588 1589 set_posix_acl_flag(sb); 1590 1591 /* version 5 superblocks support inode version counters. */ 1592 if (xfs_has_crc(mp)) 1593 sb->s_flags |= SB_I_VERSION; 1594 1595 if (xfs_has_dax_always(mp)) { 1596 bool rtdev_is_dax = false, datadev_is_dax; 1597 1598 xfs_warn(mp, 1599 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); 1600 1601 datadev_is_dax = xfs_buftarg_is_dax(sb, mp->m_ddev_targp); 1602 if (mp->m_rtdev_targp) 1603 rtdev_is_dax = xfs_buftarg_is_dax(sb, 1604 mp->m_rtdev_targp); 1605 if (!rtdev_is_dax && !datadev_is_dax) { 1606 xfs_alert(mp, 1607 "DAX unsupported by block device. Turning off DAX."); 1608 xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER); 1609 } 1610 if (xfs_has_reflink(mp)) { 1611 xfs_alert(mp, 1612 "DAX and reflink cannot be used together!"); 1613 error = -EINVAL; 1614 goto out_filestream_unmount; 1615 } 1616 } 1617 1618 if (xfs_has_discard(mp)) { 1619 struct request_queue *q = bdev_get_queue(sb->s_bdev); 1620 1621 if (!blk_queue_discard(q)) { 1622 xfs_warn(mp, "mounting with \"discard\" option, but " 1623 "the device does not support discard"); 1624 mp->m_features &= ~XFS_FEAT_DISCARD; 1625 } 1626 } 1627 1628 if (xfs_has_reflink(mp)) { 1629 if (mp->m_sb.sb_rblocks) { 1630 xfs_alert(mp, 1631 "reflink not compatible with realtime device!"); 1632 error = -EINVAL; 1633 goto out_filestream_unmount; 1634 } 1635 1636 if (xfs_globals.always_cow) { 1637 xfs_info(mp, "using DEBUG-only always_cow mode."); 1638 mp->m_always_cow = true; 1639 } 1640 } 1641 1642 if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) { 1643 xfs_alert(mp, 1644 "reverse mapping btree not compatible with realtime device!"); 1645 error = -EINVAL; 1646 goto out_filestream_unmount; 1647 } 1648 1649 error = xfs_mountfs(mp); 1650 if (error) 1651 goto out_filestream_unmount; 1652 1653 root = igrab(VFS_I(mp->m_rootip)); 1654 if (!root) { 1655 error = -ENOENT; 1656 goto out_unmount; 1657 } 1658 sb->s_root = d_make_root(root); 1659 if (!sb->s_root) { 1660 error = -ENOMEM; 1661 goto out_unmount; 1662 } 1663 1664 return 0; 1665 1666 out_filestream_unmount: 1667 xfs_filestream_unmount(mp); 1668 out_free_sb: 1669 xfs_freesb(mp); 1670 out_free_stats: 1671 free_percpu(mp->m_stats.xs_stats); 1672 out_destroy_inodegc: 1673 xfs_mount_list_del(mp); 1674 xfs_inodegc_free_percpu(mp); 1675 out_destroy_counters: 1676 xfs_destroy_percpu_counters(mp); 1677 out_destroy_workqueues: 1678 xfs_destroy_mount_workqueues(mp); 1679 out_close_devices: 1680 xfs_close_devices(mp); 1681 out_free_names: 1682 sb->s_fs_info = NULL; 1683 xfs_mount_free(mp); 1684 return error; 1685 1686 out_unmount: 1687 xfs_filestream_unmount(mp); 1688 xfs_unmountfs(mp); 1689 goto out_free_sb; 1690 } 1691 1692 static int 1693 xfs_fs_get_tree( 1694 struct fs_context *fc) 1695 { 1696 return get_tree_bdev(fc, xfs_fs_fill_super); 1697 } 1698 1699 static int 1700 xfs_remount_rw( 1701 struct xfs_mount *mp) 1702 { 1703 struct xfs_sb *sbp = &mp->m_sb; 1704 int error; 1705 1706 if (xfs_has_norecovery(mp)) { 1707 xfs_warn(mp, 1708 "ro->rw transition prohibited on norecovery mount"); 1709 return -EINVAL; 1710 } 1711 1712 if (xfs_sb_is_v5(sbp) && 1713 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) { 1714 xfs_warn(mp, 1715 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem", 1716 (sbp->sb_features_ro_compat & 1717 XFS_SB_FEAT_RO_COMPAT_UNKNOWN)); 1718 return -EINVAL; 1719 } 1720 1721 clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 1722 1723 /* 1724 * If this is the first remount to writeable state we might have some 1725 * superblock changes to update. 1726 */ 1727 if (mp->m_update_sb) { 1728 error = xfs_sync_sb(mp, false); 1729 if (error) { 1730 xfs_warn(mp, "failed to write sb changes"); 1731 return error; 1732 } 1733 mp->m_update_sb = false; 1734 } 1735 1736 /* 1737 * Fill out the reserve pool if it is empty. Use the stashed value if 1738 * it is non-zero, otherwise go with the default. 1739 */ 1740 xfs_restore_resvblks(mp); 1741 xfs_log_work_queue(mp); 1742 1743 /* Recover any CoW blocks that never got remapped. */ 1744 error = xfs_reflink_recover_cow(mp); 1745 if (error) { 1746 xfs_err(mp, 1747 "Error %d recovering leftover CoW allocations.", error); 1748 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1749 return error; 1750 } 1751 xfs_blockgc_start(mp); 1752 1753 /* Create the per-AG metadata reservation pool .*/ 1754 error = xfs_fs_reserve_ag_blocks(mp); 1755 if (error && error != -ENOSPC) 1756 return error; 1757 1758 /* Re-enable the background inode inactivation worker. */ 1759 xfs_inodegc_start(mp); 1760 1761 return 0; 1762 } 1763 1764 static int 1765 xfs_remount_ro( 1766 struct xfs_mount *mp) 1767 { 1768 int error; 1769 1770 /* 1771 * Cancel background eofb scanning so it cannot race with the final 1772 * log force+buftarg wait and deadlock the remount. 1773 */ 1774 xfs_blockgc_stop(mp); 1775 1776 /* Get rid of any leftover CoW reservations... */ 1777 error = xfs_blockgc_free_space(mp, NULL); 1778 if (error) { 1779 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1780 return error; 1781 } 1782 1783 /* 1784 * Stop the inodegc background worker. xfs_fs_reconfigure already 1785 * flushed all pending inodegc work when it sync'd the filesystem. 1786 * The VFS holds s_umount, so we know that inodes cannot enter 1787 * xfs_fs_destroy_inode during a remount operation. In readonly mode 1788 * we send inodes straight to reclaim, so no inodes will be queued. 1789 */ 1790 xfs_inodegc_stop(mp); 1791 1792 /* Free the per-AG metadata reservation pool. */ 1793 error = xfs_fs_unreserve_ag_blocks(mp); 1794 if (error) { 1795 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1796 return error; 1797 } 1798 1799 /* 1800 * Before we sync the metadata, we need to free up the reserve block 1801 * pool so that the used block count in the superblock on disk is 1802 * correct at the end of the remount. Stash the current* reserve pool 1803 * size so that if we get remounted rw, we can return it to the same 1804 * size. 1805 */ 1806 xfs_save_resvblks(mp); 1807 1808 xfs_log_clean(mp); 1809 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 1810 1811 return 0; 1812 } 1813 1814 /* 1815 * Logically we would return an error here to prevent users from believing 1816 * they might have changed mount options using remount which can't be changed. 1817 * 1818 * But unfortunately mount(8) adds all options from mtab and fstab to the mount 1819 * arguments in some cases so we can't blindly reject options, but have to 1820 * check for each specified option if it actually differs from the currently 1821 * set option and only reject it if that's the case. 1822 * 1823 * Until that is implemented we return success for every remount request, and 1824 * silently ignore all options that we can't actually change. 1825 */ 1826 static int 1827 xfs_fs_reconfigure( 1828 struct fs_context *fc) 1829 { 1830 struct xfs_mount *mp = XFS_M(fc->root->d_sb); 1831 struct xfs_mount *new_mp = fc->s_fs_info; 1832 int flags = fc->sb_flags; 1833 int error; 1834 1835 /* version 5 superblocks always support version counters. */ 1836 if (xfs_has_crc(mp)) 1837 fc->sb_flags |= SB_I_VERSION; 1838 1839 error = xfs_fs_validate_params(new_mp); 1840 if (error) 1841 return error; 1842 1843 sync_filesystem(mp->m_super); 1844 1845 /* inode32 -> inode64 */ 1846 if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) { 1847 mp->m_features &= ~XFS_FEAT_SMALL_INUMS; 1848 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount); 1849 } 1850 1851 /* inode64 -> inode32 */ 1852 if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) { 1853 mp->m_features |= XFS_FEAT_SMALL_INUMS; 1854 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount); 1855 } 1856 1857 /* ro -> rw */ 1858 if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) { 1859 error = xfs_remount_rw(mp); 1860 if (error) 1861 return error; 1862 } 1863 1864 /* rw -> ro */ 1865 if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) { 1866 error = xfs_remount_ro(mp); 1867 if (error) 1868 return error; 1869 } 1870 1871 return 0; 1872 } 1873 1874 static void xfs_fs_free( 1875 struct fs_context *fc) 1876 { 1877 struct xfs_mount *mp = fc->s_fs_info; 1878 1879 /* 1880 * mp is stored in the fs_context when it is initialized. 1881 * mp is transferred to the superblock on a successful mount, 1882 * but if an error occurs before the transfer we have to free 1883 * it here. 1884 */ 1885 if (mp) 1886 xfs_mount_free(mp); 1887 } 1888 1889 static const struct fs_context_operations xfs_context_ops = { 1890 .parse_param = xfs_fs_parse_param, 1891 .get_tree = xfs_fs_get_tree, 1892 .reconfigure = xfs_fs_reconfigure, 1893 .free = xfs_fs_free, 1894 }; 1895 1896 static int xfs_init_fs_context( 1897 struct fs_context *fc) 1898 { 1899 struct xfs_mount *mp; 1900 1901 mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO); 1902 if (!mp) 1903 return -ENOMEM; 1904 1905 spin_lock_init(&mp->m_sb_lock); 1906 spin_lock_init(&mp->m_agirotor_lock); 1907 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); 1908 spin_lock_init(&mp->m_perag_lock); 1909 mutex_init(&mp->m_growlock); 1910 INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker); 1911 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); 1912 mp->m_kobj.kobject.kset = xfs_kset; 1913 /* 1914 * We don't create the finobt per-ag space reservation until after log 1915 * recovery, so we must set this to true so that an ifree transaction 1916 * started during log recovery will not depend on space reservations 1917 * for finobt expansion. 1918 */ 1919 mp->m_finobt_nores = true; 1920 1921 /* 1922 * These can be overridden by the mount option parsing. 1923 */ 1924 mp->m_logbufs = -1; 1925 mp->m_logbsize = -1; 1926 mp->m_allocsize_log = 16; /* 64k */ 1927 1928 /* 1929 * Copy binary VFS mount flags we are interested in. 1930 */ 1931 if (fc->sb_flags & SB_RDONLY) 1932 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 1933 if (fc->sb_flags & SB_DIRSYNC) 1934 mp->m_features |= XFS_FEAT_DIRSYNC; 1935 if (fc->sb_flags & SB_SYNCHRONOUS) 1936 mp->m_features |= XFS_FEAT_WSYNC; 1937 1938 fc->s_fs_info = mp; 1939 fc->ops = &xfs_context_ops; 1940 1941 return 0; 1942 } 1943 1944 static struct file_system_type xfs_fs_type = { 1945 .owner = THIS_MODULE, 1946 .name = "xfs", 1947 .init_fs_context = xfs_init_fs_context, 1948 .parameters = xfs_fs_parameters, 1949 .kill_sb = kill_block_super, 1950 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, 1951 }; 1952 MODULE_ALIAS_FS("xfs"); 1953 1954 STATIC int __init 1955 xfs_init_caches(void) 1956 { 1957 int error; 1958 1959 xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket", 1960 sizeof(struct xlog_ticket), 1961 0, 0, NULL); 1962 if (!xfs_log_ticket_cache) 1963 goto out; 1964 1965 error = xfs_btree_init_cur_caches(); 1966 if (error) 1967 goto out_destroy_log_ticket_cache; 1968 1969 error = xfs_defer_init_item_caches(); 1970 if (error) 1971 goto out_destroy_btree_cur_cache; 1972 1973 xfs_da_state_cache = kmem_cache_create("xfs_da_state", 1974 sizeof(struct xfs_da_state), 1975 0, 0, NULL); 1976 if (!xfs_da_state_cache) 1977 goto out_destroy_defer_item_cache; 1978 1979 xfs_ifork_cache = kmem_cache_create("xfs_ifork", 1980 sizeof(struct xfs_ifork), 1981 0, 0, NULL); 1982 if (!xfs_ifork_cache) 1983 goto out_destroy_da_state_cache; 1984 1985 xfs_trans_cache = kmem_cache_create("xfs_trans", 1986 sizeof(struct xfs_trans), 1987 0, 0, NULL); 1988 if (!xfs_trans_cache) 1989 goto out_destroy_ifork_cache; 1990 1991 1992 /* 1993 * The size of the cache-allocated buf log item is the maximum 1994 * size possible under XFS. This wastes a little bit of memory, 1995 * but it is much faster. 1996 */ 1997 xfs_buf_item_cache = kmem_cache_create("xfs_buf_item", 1998 sizeof(struct xfs_buf_log_item), 1999 0, 0, NULL); 2000 if (!xfs_buf_item_cache) 2001 goto out_destroy_trans_cache; 2002 2003 xfs_efd_cache = kmem_cache_create("xfs_efd_item", 2004 (sizeof(struct xfs_efd_log_item) + 2005 (XFS_EFD_MAX_FAST_EXTENTS - 1) * 2006 sizeof(struct xfs_extent)), 2007 0, 0, NULL); 2008 if (!xfs_efd_cache) 2009 goto out_destroy_buf_item_cache; 2010 2011 xfs_efi_cache = kmem_cache_create("xfs_efi_item", 2012 (sizeof(struct xfs_efi_log_item) + 2013 (XFS_EFI_MAX_FAST_EXTENTS - 1) * 2014 sizeof(struct xfs_extent)), 2015 0, 0, NULL); 2016 if (!xfs_efi_cache) 2017 goto out_destroy_efd_cache; 2018 2019 xfs_inode_cache = kmem_cache_create("xfs_inode", 2020 sizeof(struct xfs_inode), 0, 2021 (SLAB_HWCACHE_ALIGN | 2022 SLAB_RECLAIM_ACCOUNT | 2023 SLAB_MEM_SPREAD | SLAB_ACCOUNT), 2024 xfs_fs_inode_init_once); 2025 if (!xfs_inode_cache) 2026 goto out_destroy_efi_cache; 2027 2028 xfs_ili_cache = kmem_cache_create("xfs_ili", 2029 sizeof(struct xfs_inode_log_item), 0, 2030 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, 2031 NULL); 2032 if (!xfs_ili_cache) 2033 goto out_destroy_inode_cache; 2034 2035 xfs_icreate_cache = kmem_cache_create("xfs_icr", 2036 sizeof(struct xfs_icreate_item), 2037 0, 0, NULL); 2038 if (!xfs_icreate_cache) 2039 goto out_destroy_ili_cache; 2040 2041 xfs_rud_cache = kmem_cache_create("xfs_rud_item", 2042 sizeof(struct xfs_rud_log_item), 2043 0, 0, NULL); 2044 if (!xfs_rud_cache) 2045 goto out_destroy_icreate_cache; 2046 2047 xfs_rui_cache = kmem_cache_create("xfs_rui_item", 2048 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS), 2049 0, 0, NULL); 2050 if (!xfs_rui_cache) 2051 goto out_destroy_rud_cache; 2052 2053 xfs_cud_cache = kmem_cache_create("xfs_cud_item", 2054 sizeof(struct xfs_cud_log_item), 2055 0, 0, NULL); 2056 if (!xfs_cud_cache) 2057 goto out_destroy_rui_cache; 2058 2059 xfs_cui_cache = kmem_cache_create("xfs_cui_item", 2060 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS), 2061 0, 0, NULL); 2062 if (!xfs_cui_cache) 2063 goto out_destroy_cud_cache; 2064 2065 xfs_bud_cache = kmem_cache_create("xfs_bud_item", 2066 sizeof(struct xfs_bud_log_item), 2067 0, 0, NULL); 2068 if (!xfs_bud_cache) 2069 goto out_destroy_cui_cache; 2070 2071 xfs_bui_cache = kmem_cache_create("xfs_bui_item", 2072 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS), 2073 0, 0, NULL); 2074 if (!xfs_bui_cache) 2075 goto out_destroy_bud_cache; 2076 2077 return 0; 2078 2079 out_destroy_bud_cache: 2080 kmem_cache_destroy(xfs_bud_cache); 2081 out_destroy_cui_cache: 2082 kmem_cache_destroy(xfs_cui_cache); 2083 out_destroy_cud_cache: 2084 kmem_cache_destroy(xfs_cud_cache); 2085 out_destroy_rui_cache: 2086 kmem_cache_destroy(xfs_rui_cache); 2087 out_destroy_rud_cache: 2088 kmem_cache_destroy(xfs_rud_cache); 2089 out_destroy_icreate_cache: 2090 kmem_cache_destroy(xfs_icreate_cache); 2091 out_destroy_ili_cache: 2092 kmem_cache_destroy(xfs_ili_cache); 2093 out_destroy_inode_cache: 2094 kmem_cache_destroy(xfs_inode_cache); 2095 out_destroy_efi_cache: 2096 kmem_cache_destroy(xfs_efi_cache); 2097 out_destroy_efd_cache: 2098 kmem_cache_destroy(xfs_efd_cache); 2099 out_destroy_buf_item_cache: 2100 kmem_cache_destroy(xfs_buf_item_cache); 2101 out_destroy_trans_cache: 2102 kmem_cache_destroy(xfs_trans_cache); 2103 out_destroy_ifork_cache: 2104 kmem_cache_destroy(xfs_ifork_cache); 2105 out_destroy_da_state_cache: 2106 kmem_cache_destroy(xfs_da_state_cache); 2107 out_destroy_defer_item_cache: 2108 xfs_defer_destroy_item_caches(); 2109 out_destroy_btree_cur_cache: 2110 xfs_btree_destroy_cur_caches(); 2111 out_destroy_log_ticket_cache: 2112 kmem_cache_destroy(xfs_log_ticket_cache); 2113 out: 2114 return -ENOMEM; 2115 } 2116 2117 STATIC void 2118 xfs_destroy_caches(void) 2119 { 2120 /* 2121 * Make sure all delayed rcu free are flushed before we 2122 * destroy caches. 2123 */ 2124 rcu_barrier(); 2125 kmem_cache_destroy(xfs_bui_cache); 2126 kmem_cache_destroy(xfs_bud_cache); 2127 kmem_cache_destroy(xfs_cui_cache); 2128 kmem_cache_destroy(xfs_cud_cache); 2129 kmem_cache_destroy(xfs_rui_cache); 2130 kmem_cache_destroy(xfs_rud_cache); 2131 kmem_cache_destroy(xfs_icreate_cache); 2132 kmem_cache_destroy(xfs_ili_cache); 2133 kmem_cache_destroy(xfs_inode_cache); 2134 kmem_cache_destroy(xfs_efi_cache); 2135 kmem_cache_destroy(xfs_efd_cache); 2136 kmem_cache_destroy(xfs_buf_item_cache); 2137 kmem_cache_destroy(xfs_trans_cache); 2138 kmem_cache_destroy(xfs_ifork_cache); 2139 kmem_cache_destroy(xfs_da_state_cache); 2140 xfs_defer_destroy_item_caches(); 2141 xfs_btree_destroy_cur_caches(); 2142 kmem_cache_destroy(xfs_log_ticket_cache); 2143 } 2144 2145 STATIC int __init 2146 xfs_init_workqueues(void) 2147 { 2148 /* 2149 * The allocation workqueue can be used in memory reclaim situations 2150 * (writepage path), and parallelism is only limited by the number of 2151 * AGs in all the filesystems mounted. Hence use the default large 2152 * max_active value for this workqueue. 2153 */ 2154 xfs_alloc_wq = alloc_workqueue("xfsalloc", 2155 XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0); 2156 if (!xfs_alloc_wq) 2157 return -ENOMEM; 2158 2159 xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND), 2160 0); 2161 if (!xfs_discard_wq) 2162 goto out_free_alloc_wq; 2163 2164 return 0; 2165 out_free_alloc_wq: 2166 destroy_workqueue(xfs_alloc_wq); 2167 return -ENOMEM; 2168 } 2169 2170 STATIC void 2171 xfs_destroy_workqueues(void) 2172 { 2173 destroy_workqueue(xfs_discard_wq); 2174 destroy_workqueue(xfs_alloc_wq); 2175 } 2176 2177 #ifdef CONFIG_HOTPLUG_CPU 2178 static int 2179 xfs_cpu_dead( 2180 unsigned int cpu) 2181 { 2182 struct xfs_mount *mp, *n; 2183 2184 spin_lock(&xfs_mount_list_lock); 2185 list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) { 2186 spin_unlock(&xfs_mount_list_lock); 2187 xfs_inodegc_cpu_dead(mp, cpu); 2188 spin_lock(&xfs_mount_list_lock); 2189 } 2190 spin_unlock(&xfs_mount_list_lock); 2191 return 0; 2192 } 2193 2194 static int __init 2195 xfs_cpu_hotplug_init(void) 2196 { 2197 int error; 2198 2199 error = cpuhp_setup_state_nocalls(CPUHP_XFS_DEAD, "xfs:dead", NULL, 2200 xfs_cpu_dead); 2201 if (error < 0) 2202 xfs_alert(NULL, 2203 "Failed to initialise CPU hotplug, error %d. XFS is non-functional.", 2204 error); 2205 return error; 2206 } 2207 2208 static void 2209 xfs_cpu_hotplug_destroy(void) 2210 { 2211 cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD); 2212 } 2213 2214 #else /* !CONFIG_HOTPLUG_CPU */ 2215 static inline int xfs_cpu_hotplug_init(void) { return 0; } 2216 static inline void xfs_cpu_hotplug_destroy(void) {} 2217 #endif 2218 2219 STATIC int __init 2220 init_xfs_fs(void) 2221 { 2222 int error; 2223 2224 xfs_check_ondisk_structs(); 2225 2226 printk(KERN_INFO XFS_VERSION_STRING " with " 2227 XFS_BUILD_OPTIONS " enabled\n"); 2228 2229 xfs_dir_startup(); 2230 2231 error = xfs_cpu_hotplug_init(); 2232 if (error) 2233 goto out; 2234 2235 error = xfs_init_caches(); 2236 if (error) 2237 goto out_destroy_hp; 2238 2239 error = xfs_init_workqueues(); 2240 if (error) 2241 goto out_destroy_caches; 2242 2243 error = xfs_mru_cache_init(); 2244 if (error) 2245 goto out_destroy_wq; 2246 2247 error = xfs_buf_init(); 2248 if (error) 2249 goto out_mru_cache_uninit; 2250 2251 error = xfs_init_procfs(); 2252 if (error) 2253 goto out_buf_terminate; 2254 2255 error = xfs_sysctl_register(); 2256 if (error) 2257 goto out_cleanup_procfs; 2258 2259 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj); 2260 if (!xfs_kset) { 2261 error = -ENOMEM; 2262 goto out_sysctl_unregister; 2263 } 2264 2265 xfsstats.xs_kobj.kobject.kset = xfs_kset; 2266 2267 xfsstats.xs_stats = alloc_percpu(struct xfsstats); 2268 if (!xfsstats.xs_stats) { 2269 error = -ENOMEM; 2270 goto out_kset_unregister; 2271 } 2272 2273 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL, 2274 "stats"); 2275 if (error) 2276 goto out_free_stats; 2277 2278 #ifdef DEBUG 2279 xfs_dbg_kobj.kobject.kset = xfs_kset; 2280 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug"); 2281 if (error) 2282 goto out_remove_stats_kobj; 2283 #endif 2284 2285 error = xfs_qm_init(); 2286 if (error) 2287 goto out_remove_dbg_kobj; 2288 2289 error = register_filesystem(&xfs_fs_type); 2290 if (error) 2291 goto out_qm_exit; 2292 return 0; 2293 2294 out_qm_exit: 2295 xfs_qm_exit(); 2296 out_remove_dbg_kobj: 2297 #ifdef DEBUG 2298 xfs_sysfs_del(&xfs_dbg_kobj); 2299 out_remove_stats_kobj: 2300 #endif 2301 xfs_sysfs_del(&xfsstats.xs_kobj); 2302 out_free_stats: 2303 free_percpu(xfsstats.xs_stats); 2304 out_kset_unregister: 2305 kset_unregister(xfs_kset); 2306 out_sysctl_unregister: 2307 xfs_sysctl_unregister(); 2308 out_cleanup_procfs: 2309 xfs_cleanup_procfs(); 2310 out_buf_terminate: 2311 xfs_buf_terminate(); 2312 out_mru_cache_uninit: 2313 xfs_mru_cache_uninit(); 2314 out_destroy_wq: 2315 xfs_destroy_workqueues(); 2316 out_destroy_caches: 2317 xfs_destroy_caches(); 2318 out_destroy_hp: 2319 xfs_cpu_hotplug_destroy(); 2320 out: 2321 return error; 2322 } 2323 2324 STATIC void __exit 2325 exit_xfs_fs(void) 2326 { 2327 xfs_qm_exit(); 2328 unregister_filesystem(&xfs_fs_type); 2329 #ifdef DEBUG 2330 xfs_sysfs_del(&xfs_dbg_kobj); 2331 #endif 2332 xfs_sysfs_del(&xfsstats.xs_kobj); 2333 free_percpu(xfsstats.xs_stats); 2334 kset_unregister(xfs_kset); 2335 xfs_sysctl_unregister(); 2336 xfs_cleanup_procfs(); 2337 xfs_buf_terminate(); 2338 xfs_mru_cache_uninit(); 2339 xfs_destroy_workqueues(); 2340 xfs_destroy_caches(); 2341 xfs_uuid_table_free(); 2342 xfs_cpu_hotplug_destroy(); 2343 } 2344 2345 module_init(init_xfs_fs); 2346 module_exit(exit_xfs_fs); 2347 2348 MODULE_AUTHOR("Silicon Graphics, Inc."); 2349 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); 2350 MODULE_LICENSE("GPL"); 2351