1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 7 #include "xfs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_sb.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_btree.h" 16 #include "xfs_bmap.h" 17 #include "xfs_alloc.h" 18 #include "xfs_fsops.h" 19 #include "xfs_trans.h" 20 #include "xfs_buf_item.h" 21 #include "xfs_log.h" 22 #include "xfs_log_priv.h" 23 #include "xfs_dir2.h" 24 #include "xfs_extfree_item.h" 25 #include "xfs_mru_cache.h" 26 #include "xfs_inode_item.h" 27 #include "xfs_icache.h" 28 #include "xfs_trace.h" 29 #include "xfs_icreate_item.h" 30 #include "xfs_filestream.h" 31 #include "xfs_quota.h" 32 #include "xfs_sysfs.h" 33 #include "xfs_ondisk.h" 34 #include "xfs_rmap_item.h" 35 #include "xfs_refcount_item.h" 36 #include "xfs_bmap_item.h" 37 #include "xfs_reflink.h" 38 #include "xfs_pwork.h" 39 #include "xfs_ag.h" 40 #include "xfs_defer.h" 41 42 #include <linux/magic.h> 43 #include <linux/fs_context.h> 44 #include <linux/fs_parser.h> 45 46 static const struct super_operations xfs_super_operations; 47 48 static struct kset *xfs_kset; /* top-level xfs sysfs dir */ 49 #ifdef DEBUG 50 static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */ 51 #endif 52 53 #ifdef CONFIG_HOTPLUG_CPU 54 static LIST_HEAD(xfs_mount_list); 55 static DEFINE_SPINLOCK(xfs_mount_list_lock); 56 57 static inline void xfs_mount_list_add(struct xfs_mount *mp) 58 { 59 spin_lock(&xfs_mount_list_lock); 60 list_add(&mp->m_mount_list, &xfs_mount_list); 61 spin_unlock(&xfs_mount_list_lock); 62 } 63 64 static inline void xfs_mount_list_del(struct xfs_mount *mp) 65 { 66 spin_lock(&xfs_mount_list_lock); 67 list_del(&mp->m_mount_list); 68 spin_unlock(&xfs_mount_list_lock); 69 } 70 #else /* !CONFIG_HOTPLUG_CPU */ 71 static inline void xfs_mount_list_add(struct xfs_mount *mp) {} 72 static inline void xfs_mount_list_del(struct xfs_mount *mp) {} 73 #endif 74 75 enum xfs_dax_mode { 76 XFS_DAX_INODE = 0, 77 XFS_DAX_ALWAYS = 1, 78 XFS_DAX_NEVER = 2, 79 }; 80 81 static void 82 xfs_mount_set_dax_mode( 83 struct xfs_mount *mp, 84 enum xfs_dax_mode mode) 85 { 86 switch (mode) { 87 case XFS_DAX_INODE: 88 mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER); 89 break; 90 case XFS_DAX_ALWAYS: 91 mp->m_features |= XFS_FEAT_DAX_ALWAYS; 92 mp->m_features &= ~XFS_FEAT_DAX_NEVER; 93 break; 94 case XFS_DAX_NEVER: 95 mp->m_features |= XFS_FEAT_DAX_NEVER; 96 mp->m_features &= ~XFS_FEAT_DAX_ALWAYS; 97 break; 98 } 99 } 100 101 static const struct constant_table dax_param_enums[] = { 102 {"inode", XFS_DAX_INODE }, 103 {"always", XFS_DAX_ALWAYS }, 104 {"never", XFS_DAX_NEVER }, 105 {} 106 }; 107 108 /* 109 * Table driven mount option parser. 110 */ 111 enum { 112 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, 113 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid, 114 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups, 115 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep, 116 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2, 117 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota, 118 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota, 119 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce, 120 Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum, 121 }; 122 123 static const struct fs_parameter_spec xfs_fs_parameters[] = { 124 fsparam_u32("logbufs", Opt_logbufs), 125 fsparam_string("logbsize", Opt_logbsize), 126 fsparam_string("logdev", Opt_logdev), 127 fsparam_string("rtdev", Opt_rtdev), 128 fsparam_flag("wsync", Opt_wsync), 129 fsparam_flag("noalign", Opt_noalign), 130 fsparam_flag("swalloc", Opt_swalloc), 131 fsparam_u32("sunit", Opt_sunit), 132 fsparam_u32("swidth", Opt_swidth), 133 fsparam_flag("nouuid", Opt_nouuid), 134 fsparam_flag("grpid", Opt_grpid), 135 fsparam_flag("nogrpid", Opt_nogrpid), 136 fsparam_flag("bsdgroups", Opt_bsdgroups), 137 fsparam_flag("sysvgroups", Opt_sysvgroups), 138 fsparam_string("allocsize", Opt_allocsize), 139 fsparam_flag("norecovery", Opt_norecovery), 140 fsparam_flag("inode64", Opt_inode64), 141 fsparam_flag("inode32", Opt_inode32), 142 fsparam_flag("ikeep", Opt_ikeep), 143 fsparam_flag("noikeep", Opt_noikeep), 144 fsparam_flag("largeio", Opt_largeio), 145 fsparam_flag("nolargeio", Opt_nolargeio), 146 fsparam_flag("attr2", Opt_attr2), 147 fsparam_flag("noattr2", Opt_noattr2), 148 fsparam_flag("filestreams", Opt_filestreams), 149 fsparam_flag("quota", Opt_quota), 150 fsparam_flag("noquota", Opt_noquota), 151 fsparam_flag("usrquota", Opt_usrquota), 152 fsparam_flag("grpquota", Opt_grpquota), 153 fsparam_flag("prjquota", Opt_prjquota), 154 fsparam_flag("uquota", Opt_uquota), 155 fsparam_flag("gquota", Opt_gquota), 156 fsparam_flag("pquota", Opt_pquota), 157 fsparam_flag("uqnoenforce", Opt_uqnoenforce), 158 fsparam_flag("gqnoenforce", Opt_gqnoenforce), 159 fsparam_flag("pqnoenforce", Opt_pqnoenforce), 160 fsparam_flag("qnoenforce", Opt_qnoenforce), 161 fsparam_flag("discard", Opt_discard), 162 fsparam_flag("nodiscard", Opt_nodiscard), 163 fsparam_flag("dax", Opt_dax), 164 fsparam_enum("dax", Opt_dax_enum, dax_param_enums), 165 {} 166 }; 167 168 struct proc_xfs_info { 169 uint64_t flag; 170 char *str; 171 }; 172 173 static int 174 xfs_fs_show_options( 175 struct seq_file *m, 176 struct dentry *root) 177 { 178 static struct proc_xfs_info xfs_info_set[] = { 179 /* the few simple ones we can get from the mount struct */ 180 { XFS_FEAT_IKEEP, ",ikeep" }, 181 { XFS_FEAT_WSYNC, ",wsync" }, 182 { XFS_FEAT_NOALIGN, ",noalign" }, 183 { XFS_FEAT_SWALLOC, ",swalloc" }, 184 { XFS_FEAT_NOUUID, ",nouuid" }, 185 { XFS_FEAT_NORECOVERY, ",norecovery" }, 186 { XFS_FEAT_ATTR2, ",attr2" }, 187 { XFS_FEAT_FILESTREAMS, ",filestreams" }, 188 { XFS_FEAT_GRPID, ",grpid" }, 189 { XFS_FEAT_DISCARD, ",discard" }, 190 { XFS_FEAT_LARGE_IOSIZE, ",largeio" }, 191 { XFS_FEAT_DAX_ALWAYS, ",dax=always" }, 192 { XFS_FEAT_DAX_NEVER, ",dax=never" }, 193 { 0, NULL } 194 }; 195 struct xfs_mount *mp = XFS_M(root->d_sb); 196 struct proc_xfs_info *xfs_infop; 197 198 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { 199 if (mp->m_features & xfs_infop->flag) 200 seq_puts(m, xfs_infop->str); 201 } 202 203 seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64); 204 205 if (xfs_has_allocsize(mp)) 206 seq_printf(m, ",allocsize=%dk", 207 (1 << mp->m_allocsize_log) >> 10); 208 209 if (mp->m_logbufs > 0) 210 seq_printf(m, ",logbufs=%d", mp->m_logbufs); 211 if (mp->m_logbsize > 0) 212 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10); 213 214 if (mp->m_logname) 215 seq_show_option(m, "logdev", mp->m_logname); 216 if (mp->m_rtname) 217 seq_show_option(m, "rtdev", mp->m_rtname); 218 219 if (mp->m_dalign > 0) 220 seq_printf(m, ",sunit=%d", 221 (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); 222 if (mp->m_swidth > 0) 223 seq_printf(m, ",swidth=%d", 224 (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); 225 226 if (mp->m_qflags & XFS_UQUOTA_ENFD) 227 seq_puts(m, ",usrquota"); 228 else if (mp->m_qflags & XFS_UQUOTA_ACCT) 229 seq_puts(m, ",uqnoenforce"); 230 231 if (mp->m_qflags & XFS_PQUOTA_ENFD) 232 seq_puts(m, ",prjquota"); 233 else if (mp->m_qflags & XFS_PQUOTA_ACCT) 234 seq_puts(m, ",pqnoenforce"); 235 236 if (mp->m_qflags & XFS_GQUOTA_ENFD) 237 seq_puts(m, ",grpquota"); 238 else if (mp->m_qflags & XFS_GQUOTA_ACCT) 239 seq_puts(m, ",gqnoenforce"); 240 241 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) 242 seq_puts(m, ",noquota"); 243 244 return 0; 245 } 246 247 /* 248 * Set parameters for inode allocation heuristics, taking into account 249 * filesystem size and inode32/inode64 mount options; i.e. specifically 250 * whether or not XFS_FEAT_SMALL_INUMS is set. 251 * 252 * Inode allocation patterns are altered only if inode32 is requested 253 * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large. 254 * If altered, XFS_OPSTATE_INODE32 is set as well. 255 * 256 * An agcount independent of that in the mount structure is provided 257 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated 258 * to the potentially higher ag count. 259 * 260 * Returns the maximum AG index which may contain inodes. 261 */ 262 xfs_agnumber_t 263 xfs_set_inode_alloc( 264 struct xfs_mount *mp, 265 xfs_agnumber_t agcount) 266 { 267 xfs_agnumber_t index; 268 xfs_agnumber_t maxagi = 0; 269 xfs_sb_t *sbp = &mp->m_sb; 270 xfs_agnumber_t max_metadata; 271 xfs_agino_t agino; 272 xfs_ino_t ino; 273 274 /* 275 * Calculate how much should be reserved for inodes to meet 276 * the max inode percentage. Used only for inode32. 277 */ 278 if (M_IGEO(mp)->maxicount) { 279 uint64_t icount; 280 281 icount = sbp->sb_dblocks * sbp->sb_imax_pct; 282 do_div(icount, 100); 283 icount += sbp->sb_agblocks - 1; 284 do_div(icount, sbp->sb_agblocks); 285 max_metadata = icount; 286 } else { 287 max_metadata = agcount; 288 } 289 290 /* Get the last possible inode in the filesystem */ 291 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1); 292 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); 293 294 /* 295 * If user asked for no more than 32-bit inodes, and the fs is 296 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter 297 * the allocator to accommodate the request. 298 */ 299 if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32) 300 set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate); 301 else 302 clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate); 303 304 for (index = 0; index < agcount; index++) { 305 struct xfs_perag *pag; 306 307 ino = XFS_AGINO_TO_INO(mp, index, agino); 308 309 pag = xfs_perag_get(mp, index); 310 311 if (xfs_is_inode32(mp)) { 312 if (ino > XFS_MAXINUMBER_32) { 313 pag->pagi_inodeok = 0; 314 pag->pagf_metadata = 0; 315 } else { 316 pag->pagi_inodeok = 1; 317 maxagi++; 318 if (index < max_metadata) 319 pag->pagf_metadata = 1; 320 else 321 pag->pagf_metadata = 0; 322 } 323 } else { 324 pag->pagi_inodeok = 1; 325 pag->pagf_metadata = 0; 326 } 327 328 xfs_perag_put(pag); 329 } 330 331 return xfs_is_inode32(mp) ? maxagi : agcount; 332 } 333 334 static int 335 xfs_setup_dax_always( 336 struct xfs_mount *mp) 337 { 338 if (!mp->m_ddev_targp->bt_daxdev && 339 (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) { 340 xfs_alert(mp, 341 "DAX unsupported by block device. Turning off DAX."); 342 goto disable_dax; 343 } 344 345 if (mp->m_super->s_blocksize != PAGE_SIZE) { 346 xfs_alert(mp, 347 "DAX not supported for blocksize. Turning off DAX."); 348 goto disable_dax; 349 } 350 351 if (xfs_has_reflink(mp)) { 352 xfs_alert(mp, "DAX and reflink cannot be used together!"); 353 return -EINVAL; 354 } 355 356 xfs_warn(mp, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); 357 return 0; 358 359 disable_dax: 360 xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER); 361 return 0; 362 } 363 364 STATIC int 365 xfs_blkdev_get( 366 xfs_mount_t *mp, 367 const char *name, 368 struct block_device **bdevp) 369 { 370 int error = 0; 371 372 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 373 mp); 374 if (IS_ERR(*bdevp)) { 375 error = PTR_ERR(*bdevp); 376 xfs_warn(mp, "Invalid device [%s], error=%d", name, error); 377 } 378 379 return error; 380 } 381 382 STATIC void 383 xfs_blkdev_put( 384 struct block_device *bdev) 385 { 386 if (bdev) 387 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 388 } 389 390 STATIC void 391 xfs_close_devices( 392 struct xfs_mount *mp) 393 { 394 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 395 struct block_device *logdev = mp->m_logdev_targp->bt_bdev; 396 397 xfs_free_buftarg(mp->m_logdev_targp); 398 xfs_blkdev_put(logdev); 399 } 400 if (mp->m_rtdev_targp) { 401 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; 402 403 xfs_free_buftarg(mp->m_rtdev_targp); 404 xfs_blkdev_put(rtdev); 405 } 406 xfs_free_buftarg(mp->m_ddev_targp); 407 } 408 409 /* 410 * The file system configurations are: 411 * (1) device (partition) with data and internal log 412 * (2) logical volume with data and log subvolumes. 413 * (3) logical volume with data, log, and realtime subvolumes. 414 * 415 * We only have to handle opening the log and realtime volumes here if 416 * they are present. The data subvolume has already been opened by 417 * get_sb_bdev() and is stored in sb->s_bdev. 418 */ 419 STATIC int 420 xfs_open_devices( 421 struct xfs_mount *mp) 422 { 423 struct block_device *ddev = mp->m_super->s_bdev; 424 struct block_device *logdev = NULL, *rtdev = NULL; 425 int error; 426 427 /* 428 * Open real time and log devices - order is important. 429 */ 430 if (mp->m_logname) { 431 error = xfs_blkdev_get(mp, mp->m_logname, &logdev); 432 if (error) 433 return error; 434 } 435 436 if (mp->m_rtname) { 437 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev); 438 if (error) 439 goto out_close_logdev; 440 441 if (rtdev == ddev || rtdev == logdev) { 442 xfs_warn(mp, 443 "Cannot mount filesystem with identical rtdev and ddev/logdev."); 444 error = -EINVAL; 445 goto out_close_rtdev; 446 } 447 } 448 449 /* 450 * Setup xfs_mount buffer target pointers 451 */ 452 error = -ENOMEM; 453 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev); 454 if (!mp->m_ddev_targp) 455 goto out_close_rtdev; 456 457 if (rtdev) { 458 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev); 459 if (!mp->m_rtdev_targp) 460 goto out_free_ddev_targ; 461 } 462 463 if (logdev && logdev != ddev) { 464 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev); 465 if (!mp->m_logdev_targp) 466 goto out_free_rtdev_targ; 467 } else { 468 mp->m_logdev_targp = mp->m_ddev_targp; 469 } 470 471 return 0; 472 473 out_free_rtdev_targ: 474 if (mp->m_rtdev_targp) 475 xfs_free_buftarg(mp->m_rtdev_targp); 476 out_free_ddev_targ: 477 xfs_free_buftarg(mp->m_ddev_targp); 478 out_close_rtdev: 479 xfs_blkdev_put(rtdev); 480 out_close_logdev: 481 if (logdev && logdev != ddev) 482 xfs_blkdev_put(logdev); 483 return error; 484 } 485 486 /* 487 * Setup xfs_mount buffer target pointers based on superblock 488 */ 489 STATIC int 490 xfs_setup_devices( 491 struct xfs_mount *mp) 492 { 493 int error; 494 495 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize); 496 if (error) 497 return error; 498 499 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 500 unsigned int log_sector_size = BBSIZE; 501 502 if (xfs_has_sector(mp)) 503 log_sector_size = mp->m_sb.sb_logsectsize; 504 error = xfs_setsize_buftarg(mp->m_logdev_targp, 505 log_sector_size); 506 if (error) 507 return error; 508 } 509 if (mp->m_rtdev_targp) { 510 error = xfs_setsize_buftarg(mp->m_rtdev_targp, 511 mp->m_sb.sb_sectsize); 512 if (error) 513 return error; 514 } 515 516 return 0; 517 } 518 519 STATIC int 520 xfs_init_mount_workqueues( 521 struct xfs_mount *mp) 522 { 523 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s", 524 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 525 1, mp->m_super->s_id); 526 if (!mp->m_buf_workqueue) 527 goto out; 528 529 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s", 530 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 531 0, mp->m_super->s_id); 532 if (!mp->m_unwritten_workqueue) 533 goto out_destroy_buf; 534 535 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s", 536 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 537 0, mp->m_super->s_id); 538 if (!mp->m_reclaim_workqueue) 539 goto out_destroy_unwritten; 540 541 mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s", 542 XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM), 543 0, mp->m_super->s_id); 544 if (!mp->m_blockgc_wq) 545 goto out_destroy_reclaim; 546 547 mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s", 548 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 549 1, mp->m_super->s_id); 550 if (!mp->m_inodegc_wq) 551 goto out_destroy_blockgc; 552 553 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", 554 XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id); 555 if (!mp->m_sync_workqueue) 556 goto out_destroy_inodegc; 557 558 return 0; 559 560 out_destroy_inodegc: 561 destroy_workqueue(mp->m_inodegc_wq); 562 out_destroy_blockgc: 563 destroy_workqueue(mp->m_blockgc_wq); 564 out_destroy_reclaim: 565 destroy_workqueue(mp->m_reclaim_workqueue); 566 out_destroy_unwritten: 567 destroy_workqueue(mp->m_unwritten_workqueue); 568 out_destroy_buf: 569 destroy_workqueue(mp->m_buf_workqueue); 570 out: 571 return -ENOMEM; 572 } 573 574 STATIC void 575 xfs_destroy_mount_workqueues( 576 struct xfs_mount *mp) 577 { 578 destroy_workqueue(mp->m_sync_workqueue); 579 destroy_workqueue(mp->m_blockgc_wq); 580 destroy_workqueue(mp->m_inodegc_wq); 581 destroy_workqueue(mp->m_reclaim_workqueue); 582 destroy_workqueue(mp->m_unwritten_workqueue); 583 destroy_workqueue(mp->m_buf_workqueue); 584 } 585 586 static void 587 xfs_flush_inodes_worker( 588 struct work_struct *work) 589 { 590 struct xfs_mount *mp = container_of(work, struct xfs_mount, 591 m_flush_inodes_work); 592 struct super_block *sb = mp->m_super; 593 594 if (down_read_trylock(&sb->s_umount)) { 595 sync_inodes_sb(sb); 596 up_read(&sb->s_umount); 597 } 598 } 599 600 /* 601 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK 602 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting 603 * for IO to complete so that we effectively throttle multiple callers to the 604 * rate at which IO is completing. 605 */ 606 void 607 xfs_flush_inodes( 608 struct xfs_mount *mp) 609 { 610 /* 611 * If flush_work() returns true then that means we waited for a flush 612 * which was already in progress. Don't bother running another scan. 613 */ 614 if (flush_work(&mp->m_flush_inodes_work)) 615 return; 616 617 queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work); 618 flush_work(&mp->m_flush_inodes_work); 619 } 620 621 /* Catch misguided souls that try to use this interface on XFS */ 622 STATIC struct inode * 623 xfs_fs_alloc_inode( 624 struct super_block *sb) 625 { 626 BUG(); 627 return NULL; 628 } 629 630 /* 631 * Now that the generic code is guaranteed not to be accessing 632 * the linux inode, we can inactivate and reclaim the inode. 633 */ 634 STATIC void 635 xfs_fs_destroy_inode( 636 struct inode *inode) 637 { 638 struct xfs_inode *ip = XFS_I(inode); 639 640 trace_xfs_destroy_inode(ip); 641 642 ASSERT(!rwsem_is_locked(&inode->i_rwsem)); 643 XFS_STATS_INC(ip->i_mount, vn_rele); 644 XFS_STATS_INC(ip->i_mount, vn_remove); 645 xfs_inode_mark_reclaimable(ip); 646 } 647 648 static void 649 xfs_fs_dirty_inode( 650 struct inode *inode, 651 int flag) 652 { 653 struct xfs_inode *ip = XFS_I(inode); 654 struct xfs_mount *mp = ip->i_mount; 655 struct xfs_trans *tp; 656 657 if (!(inode->i_sb->s_flags & SB_LAZYTIME)) 658 return; 659 if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME)) 660 return; 661 662 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp)) 663 return; 664 xfs_ilock(ip, XFS_ILOCK_EXCL); 665 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 666 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP); 667 xfs_trans_commit(tp); 668 } 669 670 /* 671 * Slab object creation initialisation for the XFS inode. 672 * This covers only the idempotent fields in the XFS inode; 673 * all other fields need to be initialised on allocation 674 * from the slab. This avoids the need to repeatedly initialise 675 * fields in the xfs inode that left in the initialise state 676 * when freeing the inode. 677 */ 678 STATIC void 679 xfs_fs_inode_init_once( 680 void *inode) 681 { 682 struct xfs_inode *ip = inode; 683 684 memset(ip, 0, sizeof(struct xfs_inode)); 685 686 /* vfs inode */ 687 inode_init_once(VFS_I(ip)); 688 689 /* xfs inode */ 690 atomic_set(&ip->i_pincount, 0); 691 spin_lock_init(&ip->i_flags_lock); 692 693 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, 694 "xfsino", ip->i_ino); 695 } 696 697 /* 698 * We do an unlocked check for XFS_IDONTCACHE here because we are already 699 * serialised against cache hits here via the inode->i_lock and igrab() in 700 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be 701 * racing with us, and it avoids needing to grab a spinlock here for every inode 702 * we drop the final reference on. 703 */ 704 STATIC int 705 xfs_fs_drop_inode( 706 struct inode *inode) 707 { 708 struct xfs_inode *ip = XFS_I(inode); 709 710 /* 711 * If this unlinked inode is in the middle of recovery, don't 712 * drop the inode just yet; log recovery will take care of 713 * that. See the comment for this inode flag. 714 */ 715 if (ip->i_flags & XFS_IRECOVERY) { 716 ASSERT(xlog_recovery_needed(ip->i_mount->m_log)); 717 return 0; 718 } 719 720 return generic_drop_inode(inode); 721 } 722 723 static void 724 xfs_mount_free( 725 struct xfs_mount *mp) 726 { 727 kfree(mp->m_rtname); 728 kfree(mp->m_logname); 729 kmem_free(mp); 730 } 731 732 STATIC int 733 xfs_fs_sync_fs( 734 struct super_block *sb, 735 int wait) 736 { 737 struct xfs_mount *mp = XFS_M(sb); 738 739 trace_xfs_fs_sync_fs(mp, __return_address); 740 741 /* 742 * Doing anything during the async pass would be counterproductive. 743 */ 744 if (!wait) 745 return 0; 746 747 xfs_log_force(mp, XFS_LOG_SYNC); 748 if (laptop_mode) { 749 /* 750 * The disk must be active because we're syncing. 751 * We schedule log work now (now that the disk is 752 * active) instead of later (when it might not be). 753 */ 754 flush_delayed_work(&mp->m_log->l_work); 755 } 756 757 /* 758 * If we are called with page faults frozen out, it means we are about 759 * to freeze the transaction subsystem. Take the opportunity to shut 760 * down inodegc because once SB_FREEZE_FS is set it's too late to 761 * prevent inactivation races with freeze. The fs doesn't get called 762 * again by the freezing process until after SB_FREEZE_FS has been set, 763 * so it's now or never. Same logic applies to speculative allocation 764 * garbage collection. 765 * 766 * We don't care if this is a normal syncfs call that does this or 767 * freeze that does this - we can run this multiple times without issue 768 * and we won't race with a restart because a restart can only occur 769 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE. 770 */ 771 if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) { 772 xfs_inodegc_stop(mp); 773 xfs_blockgc_stop(mp); 774 } 775 776 return 0; 777 } 778 779 STATIC int 780 xfs_fs_statfs( 781 struct dentry *dentry, 782 struct kstatfs *statp) 783 { 784 struct xfs_mount *mp = XFS_M(dentry->d_sb); 785 xfs_sb_t *sbp = &mp->m_sb; 786 struct xfs_inode *ip = XFS_I(d_inode(dentry)); 787 uint64_t fakeinos, id; 788 uint64_t icount; 789 uint64_t ifree; 790 uint64_t fdblocks; 791 xfs_extlen_t lsize; 792 int64_t ffree; 793 794 /* Wait for whatever inactivations are in progress. */ 795 xfs_inodegc_flush(mp); 796 797 statp->f_type = XFS_SUPER_MAGIC; 798 statp->f_namelen = MAXNAMELEN - 1; 799 800 id = huge_encode_dev(mp->m_ddev_targp->bt_dev); 801 statp->f_fsid = u64_to_fsid(id); 802 803 icount = percpu_counter_sum(&mp->m_icount); 804 ifree = percpu_counter_sum(&mp->m_ifree); 805 fdblocks = percpu_counter_sum(&mp->m_fdblocks); 806 807 spin_lock(&mp->m_sb_lock); 808 statp->f_bsize = sbp->sb_blocksize; 809 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; 810 statp->f_blocks = sbp->sb_dblocks - lsize; 811 spin_unlock(&mp->m_sb_lock); 812 813 /* make sure statp->f_bfree does not underflow */ 814 statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0); 815 statp->f_bavail = statp->f_bfree; 816 817 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree); 818 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER); 819 if (M_IGEO(mp)->maxicount) 820 statp->f_files = min_t(typeof(statp->f_files), 821 statp->f_files, 822 M_IGEO(mp)->maxicount); 823 824 /* If sb_icount overshot maxicount, report actual allocation */ 825 statp->f_files = max_t(typeof(statp->f_files), 826 statp->f_files, 827 sbp->sb_icount); 828 829 /* make sure statp->f_ffree does not underflow */ 830 ffree = statp->f_files - (icount - ifree); 831 statp->f_ffree = max_t(int64_t, ffree, 0); 832 833 834 if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) && 835 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) == 836 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD)) 837 xfs_qm_statvfs(ip, statp); 838 839 if (XFS_IS_REALTIME_MOUNT(mp) && 840 (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) { 841 statp->f_blocks = sbp->sb_rblocks; 842 statp->f_bavail = statp->f_bfree = 843 sbp->sb_frextents * sbp->sb_rextsize; 844 } 845 846 return 0; 847 } 848 849 STATIC void 850 xfs_save_resvblks(struct xfs_mount *mp) 851 { 852 uint64_t resblks = 0; 853 854 mp->m_resblks_save = mp->m_resblks; 855 xfs_reserve_blocks(mp, &resblks, NULL); 856 } 857 858 STATIC void 859 xfs_restore_resvblks(struct xfs_mount *mp) 860 { 861 uint64_t resblks; 862 863 if (mp->m_resblks_save) { 864 resblks = mp->m_resblks_save; 865 mp->m_resblks_save = 0; 866 } else 867 resblks = xfs_default_resblks(mp); 868 869 xfs_reserve_blocks(mp, &resblks, NULL); 870 } 871 872 /* 873 * Second stage of a freeze. The data is already frozen so we only 874 * need to take care of the metadata. Once that's done sync the superblock 875 * to the log to dirty it in case of a crash while frozen. This ensures that we 876 * will recover the unlinked inode lists on the next mount. 877 */ 878 STATIC int 879 xfs_fs_freeze( 880 struct super_block *sb) 881 { 882 struct xfs_mount *mp = XFS_M(sb); 883 unsigned int flags; 884 int ret; 885 886 /* 887 * The filesystem is now frozen far enough that memory reclaim 888 * cannot safely operate on the filesystem. Hence we need to 889 * set a GFP_NOFS context here to avoid recursion deadlocks. 890 */ 891 flags = memalloc_nofs_save(); 892 xfs_save_resvblks(mp); 893 ret = xfs_log_quiesce(mp); 894 memalloc_nofs_restore(flags); 895 896 /* 897 * For read-write filesystems, we need to restart the inodegc on error 898 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not 899 * going to be run to restart it now. We are at SB_FREEZE_FS level 900 * here, so we can restart safely without racing with a stop in 901 * xfs_fs_sync_fs(). 902 */ 903 if (ret && !xfs_is_readonly(mp)) { 904 xfs_blockgc_start(mp); 905 xfs_inodegc_start(mp); 906 } 907 908 return ret; 909 } 910 911 STATIC int 912 xfs_fs_unfreeze( 913 struct super_block *sb) 914 { 915 struct xfs_mount *mp = XFS_M(sb); 916 917 xfs_restore_resvblks(mp); 918 xfs_log_work_queue(mp); 919 920 /* 921 * Don't reactivate the inodegc worker on a readonly filesystem because 922 * inodes are sent directly to reclaim. Don't reactivate the blockgc 923 * worker because there are no speculative preallocations on a readonly 924 * filesystem. 925 */ 926 if (!xfs_is_readonly(mp)) { 927 xfs_blockgc_start(mp); 928 xfs_inodegc_start(mp); 929 } 930 931 return 0; 932 } 933 934 /* 935 * This function fills in xfs_mount_t fields based on mount args. 936 * Note: the superblock _has_ now been read in. 937 */ 938 STATIC int 939 xfs_finish_flags( 940 struct xfs_mount *mp) 941 { 942 /* Fail a mount where the logbuf is smaller than the log stripe */ 943 if (xfs_has_logv2(mp)) { 944 if (mp->m_logbsize <= 0 && 945 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { 946 mp->m_logbsize = mp->m_sb.sb_logsunit; 947 } else if (mp->m_logbsize > 0 && 948 mp->m_logbsize < mp->m_sb.sb_logsunit) { 949 xfs_warn(mp, 950 "logbuf size must be greater than or equal to log stripe size"); 951 return -EINVAL; 952 } 953 } else { 954 /* Fail a mount if the logbuf is larger than 32K */ 955 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { 956 xfs_warn(mp, 957 "logbuf size for version 1 logs must be 16K or 32K"); 958 return -EINVAL; 959 } 960 } 961 962 /* 963 * V5 filesystems always use attr2 format for attributes. 964 */ 965 if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) { 966 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. " 967 "attr2 is always enabled for V5 filesystems."); 968 return -EINVAL; 969 } 970 971 /* 972 * prohibit r/w mounts of read-only filesystems 973 */ 974 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) { 975 xfs_warn(mp, 976 "cannot mount a read-only filesystem as read-write"); 977 return -EROFS; 978 } 979 980 if ((mp->m_qflags & XFS_GQUOTA_ACCT) && 981 (mp->m_qflags & XFS_PQUOTA_ACCT) && 982 !xfs_has_pquotino(mp)) { 983 xfs_warn(mp, 984 "Super block does not support project and group quota together"); 985 return -EINVAL; 986 } 987 988 return 0; 989 } 990 991 static int 992 xfs_init_percpu_counters( 993 struct xfs_mount *mp) 994 { 995 int error; 996 997 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL); 998 if (error) 999 return -ENOMEM; 1000 1001 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL); 1002 if (error) 1003 goto free_icount; 1004 1005 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL); 1006 if (error) 1007 goto free_ifree; 1008 1009 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL); 1010 if (error) 1011 goto free_fdblocks; 1012 1013 return 0; 1014 1015 free_fdblocks: 1016 percpu_counter_destroy(&mp->m_fdblocks); 1017 free_ifree: 1018 percpu_counter_destroy(&mp->m_ifree); 1019 free_icount: 1020 percpu_counter_destroy(&mp->m_icount); 1021 return -ENOMEM; 1022 } 1023 1024 void 1025 xfs_reinit_percpu_counters( 1026 struct xfs_mount *mp) 1027 { 1028 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount); 1029 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree); 1030 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks); 1031 } 1032 1033 static void 1034 xfs_destroy_percpu_counters( 1035 struct xfs_mount *mp) 1036 { 1037 percpu_counter_destroy(&mp->m_icount); 1038 percpu_counter_destroy(&mp->m_ifree); 1039 percpu_counter_destroy(&mp->m_fdblocks); 1040 ASSERT(xfs_is_shutdown(mp) || 1041 percpu_counter_sum(&mp->m_delalloc_blks) == 0); 1042 percpu_counter_destroy(&mp->m_delalloc_blks); 1043 } 1044 1045 static int 1046 xfs_inodegc_init_percpu( 1047 struct xfs_mount *mp) 1048 { 1049 struct xfs_inodegc *gc; 1050 int cpu; 1051 1052 mp->m_inodegc = alloc_percpu(struct xfs_inodegc); 1053 if (!mp->m_inodegc) 1054 return -ENOMEM; 1055 1056 for_each_possible_cpu(cpu) { 1057 gc = per_cpu_ptr(mp->m_inodegc, cpu); 1058 init_llist_head(&gc->list); 1059 gc->items = 0; 1060 INIT_WORK(&gc->work, xfs_inodegc_worker); 1061 } 1062 return 0; 1063 } 1064 1065 static void 1066 xfs_inodegc_free_percpu( 1067 struct xfs_mount *mp) 1068 { 1069 if (!mp->m_inodegc) 1070 return; 1071 free_percpu(mp->m_inodegc); 1072 } 1073 1074 static void 1075 xfs_fs_put_super( 1076 struct super_block *sb) 1077 { 1078 struct xfs_mount *mp = XFS_M(sb); 1079 1080 /* if ->fill_super failed, we have no mount to tear down */ 1081 if (!sb->s_fs_info) 1082 return; 1083 1084 xfs_notice(mp, "Unmounting Filesystem"); 1085 xfs_filestream_unmount(mp); 1086 xfs_unmountfs(mp); 1087 1088 xfs_freesb(mp); 1089 free_percpu(mp->m_stats.xs_stats); 1090 xfs_mount_list_del(mp); 1091 xfs_inodegc_free_percpu(mp); 1092 xfs_destroy_percpu_counters(mp); 1093 xfs_destroy_mount_workqueues(mp); 1094 xfs_close_devices(mp); 1095 1096 sb->s_fs_info = NULL; 1097 xfs_mount_free(mp); 1098 } 1099 1100 static long 1101 xfs_fs_nr_cached_objects( 1102 struct super_block *sb, 1103 struct shrink_control *sc) 1104 { 1105 /* Paranoia: catch incorrect calls during mount setup or teardown */ 1106 if (WARN_ON_ONCE(!sb->s_fs_info)) 1107 return 0; 1108 return xfs_reclaim_inodes_count(XFS_M(sb)); 1109 } 1110 1111 static long 1112 xfs_fs_free_cached_objects( 1113 struct super_block *sb, 1114 struct shrink_control *sc) 1115 { 1116 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan); 1117 } 1118 1119 static const struct super_operations xfs_super_operations = { 1120 .alloc_inode = xfs_fs_alloc_inode, 1121 .destroy_inode = xfs_fs_destroy_inode, 1122 .dirty_inode = xfs_fs_dirty_inode, 1123 .drop_inode = xfs_fs_drop_inode, 1124 .put_super = xfs_fs_put_super, 1125 .sync_fs = xfs_fs_sync_fs, 1126 .freeze_fs = xfs_fs_freeze, 1127 .unfreeze_fs = xfs_fs_unfreeze, 1128 .statfs = xfs_fs_statfs, 1129 .show_options = xfs_fs_show_options, 1130 .nr_cached_objects = xfs_fs_nr_cached_objects, 1131 .free_cached_objects = xfs_fs_free_cached_objects, 1132 }; 1133 1134 static int 1135 suffix_kstrtoint( 1136 const char *s, 1137 unsigned int base, 1138 int *res) 1139 { 1140 int last, shift_left_factor = 0, _res; 1141 char *value; 1142 int ret = 0; 1143 1144 value = kstrdup(s, GFP_KERNEL); 1145 if (!value) 1146 return -ENOMEM; 1147 1148 last = strlen(value) - 1; 1149 if (value[last] == 'K' || value[last] == 'k') { 1150 shift_left_factor = 10; 1151 value[last] = '\0'; 1152 } 1153 if (value[last] == 'M' || value[last] == 'm') { 1154 shift_left_factor = 20; 1155 value[last] = '\0'; 1156 } 1157 if (value[last] == 'G' || value[last] == 'g') { 1158 shift_left_factor = 30; 1159 value[last] = '\0'; 1160 } 1161 1162 if (kstrtoint(value, base, &_res)) 1163 ret = -EINVAL; 1164 kfree(value); 1165 *res = _res << shift_left_factor; 1166 return ret; 1167 } 1168 1169 static inline void 1170 xfs_fs_warn_deprecated( 1171 struct fs_context *fc, 1172 struct fs_parameter *param, 1173 uint64_t flag, 1174 bool value) 1175 { 1176 /* Don't print the warning if reconfiguring and current mount point 1177 * already had the flag set 1178 */ 1179 if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) && 1180 !!(XFS_M(fc->root->d_sb)->m_features & flag) == value) 1181 return; 1182 xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key); 1183 } 1184 1185 /* 1186 * Set mount state from a mount option. 1187 * 1188 * NOTE: mp->m_super is NULL here! 1189 */ 1190 static int 1191 xfs_fs_parse_param( 1192 struct fs_context *fc, 1193 struct fs_parameter *param) 1194 { 1195 struct xfs_mount *parsing_mp = fc->s_fs_info; 1196 struct fs_parse_result result; 1197 int size = 0; 1198 int opt; 1199 1200 opt = fs_parse(fc, xfs_fs_parameters, param, &result); 1201 if (opt < 0) 1202 return opt; 1203 1204 switch (opt) { 1205 case Opt_logbufs: 1206 parsing_mp->m_logbufs = result.uint_32; 1207 return 0; 1208 case Opt_logbsize: 1209 if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize)) 1210 return -EINVAL; 1211 return 0; 1212 case Opt_logdev: 1213 kfree(parsing_mp->m_logname); 1214 parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL); 1215 if (!parsing_mp->m_logname) 1216 return -ENOMEM; 1217 return 0; 1218 case Opt_rtdev: 1219 kfree(parsing_mp->m_rtname); 1220 parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL); 1221 if (!parsing_mp->m_rtname) 1222 return -ENOMEM; 1223 return 0; 1224 case Opt_allocsize: 1225 if (suffix_kstrtoint(param->string, 10, &size)) 1226 return -EINVAL; 1227 parsing_mp->m_allocsize_log = ffs(size) - 1; 1228 parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE; 1229 return 0; 1230 case Opt_grpid: 1231 case Opt_bsdgroups: 1232 parsing_mp->m_features |= XFS_FEAT_GRPID; 1233 return 0; 1234 case Opt_nogrpid: 1235 case Opt_sysvgroups: 1236 parsing_mp->m_features &= ~XFS_FEAT_GRPID; 1237 return 0; 1238 case Opt_wsync: 1239 parsing_mp->m_features |= XFS_FEAT_WSYNC; 1240 return 0; 1241 case Opt_norecovery: 1242 parsing_mp->m_features |= XFS_FEAT_NORECOVERY; 1243 return 0; 1244 case Opt_noalign: 1245 parsing_mp->m_features |= XFS_FEAT_NOALIGN; 1246 return 0; 1247 case Opt_swalloc: 1248 parsing_mp->m_features |= XFS_FEAT_SWALLOC; 1249 return 0; 1250 case Opt_sunit: 1251 parsing_mp->m_dalign = result.uint_32; 1252 return 0; 1253 case Opt_swidth: 1254 parsing_mp->m_swidth = result.uint_32; 1255 return 0; 1256 case Opt_inode32: 1257 parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS; 1258 return 0; 1259 case Opt_inode64: 1260 parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS; 1261 return 0; 1262 case Opt_nouuid: 1263 parsing_mp->m_features |= XFS_FEAT_NOUUID; 1264 return 0; 1265 case Opt_largeio: 1266 parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE; 1267 return 0; 1268 case Opt_nolargeio: 1269 parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE; 1270 return 0; 1271 case Opt_filestreams: 1272 parsing_mp->m_features |= XFS_FEAT_FILESTREAMS; 1273 return 0; 1274 case Opt_noquota: 1275 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; 1276 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; 1277 return 0; 1278 case Opt_quota: 1279 case Opt_uquota: 1280 case Opt_usrquota: 1281 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD); 1282 return 0; 1283 case Opt_qnoenforce: 1284 case Opt_uqnoenforce: 1285 parsing_mp->m_qflags |= XFS_UQUOTA_ACCT; 1286 parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD; 1287 return 0; 1288 case Opt_pquota: 1289 case Opt_prjquota: 1290 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD); 1291 return 0; 1292 case Opt_pqnoenforce: 1293 parsing_mp->m_qflags |= XFS_PQUOTA_ACCT; 1294 parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD; 1295 return 0; 1296 case Opt_gquota: 1297 case Opt_grpquota: 1298 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD); 1299 return 0; 1300 case Opt_gqnoenforce: 1301 parsing_mp->m_qflags |= XFS_GQUOTA_ACCT; 1302 parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD; 1303 return 0; 1304 case Opt_discard: 1305 parsing_mp->m_features |= XFS_FEAT_DISCARD; 1306 return 0; 1307 case Opt_nodiscard: 1308 parsing_mp->m_features &= ~XFS_FEAT_DISCARD; 1309 return 0; 1310 #ifdef CONFIG_FS_DAX 1311 case Opt_dax: 1312 xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS); 1313 return 0; 1314 case Opt_dax_enum: 1315 xfs_mount_set_dax_mode(parsing_mp, result.uint_32); 1316 return 0; 1317 #endif 1318 /* Following mount options will be removed in September 2025 */ 1319 case Opt_ikeep: 1320 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true); 1321 parsing_mp->m_features |= XFS_FEAT_IKEEP; 1322 return 0; 1323 case Opt_noikeep: 1324 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false); 1325 parsing_mp->m_features &= ~XFS_FEAT_IKEEP; 1326 return 0; 1327 case Opt_attr2: 1328 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true); 1329 parsing_mp->m_features |= XFS_FEAT_ATTR2; 1330 return 0; 1331 case Opt_noattr2: 1332 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true); 1333 parsing_mp->m_features |= XFS_FEAT_NOATTR2; 1334 return 0; 1335 default: 1336 xfs_warn(parsing_mp, "unknown mount option [%s].", param->key); 1337 return -EINVAL; 1338 } 1339 1340 return 0; 1341 } 1342 1343 static int 1344 xfs_fs_validate_params( 1345 struct xfs_mount *mp) 1346 { 1347 /* No recovery flag requires a read-only mount */ 1348 if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) { 1349 xfs_warn(mp, "no-recovery mounts must be read-only."); 1350 return -EINVAL; 1351 } 1352 1353 /* 1354 * We have not read the superblock at this point, so only the attr2 1355 * mount option can set the attr2 feature by this stage. 1356 */ 1357 if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) { 1358 xfs_warn(mp, "attr2 and noattr2 cannot both be specified."); 1359 return -EINVAL; 1360 } 1361 1362 1363 if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) { 1364 xfs_warn(mp, 1365 "sunit and swidth options incompatible with the noalign option"); 1366 return -EINVAL; 1367 } 1368 1369 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) { 1370 xfs_warn(mp, "quota support not available in this kernel."); 1371 return -EINVAL; 1372 } 1373 1374 if ((mp->m_dalign && !mp->m_swidth) || 1375 (!mp->m_dalign && mp->m_swidth)) { 1376 xfs_warn(mp, "sunit and swidth must be specified together"); 1377 return -EINVAL; 1378 } 1379 1380 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) { 1381 xfs_warn(mp, 1382 "stripe width (%d) must be a multiple of the stripe unit (%d)", 1383 mp->m_swidth, mp->m_dalign); 1384 return -EINVAL; 1385 } 1386 1387 if (mp->m_logbufs != -1 && 1388 mp->m_logbufs != 0 && 1389 (mp->m_logbufs < XLOG_MIN_ICLOGS || 1390 mp->m_logbufs > XLOG_MAX_ICLOGS)) { 1391 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", 1392 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); 1393 return -EINVAL; 1394 } 1395 1396 if (mp->m_logbsize != -1 && 1397 mp->m_logbsize != 0 && 1398 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || 1399 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || 1400 !is_power_of_2(mp->m_logbsize))) { 1401 xfs_warn(mp, 1402 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", 1403 mp->m_logbsize); 1404 return -EINVAL; 1405 } 1406 1407 if (xfs_has_allocsize(mp) && 1408 (mp->m_allocsize_log > XFS_MAX_IO_LOG || 1409 mp->m_allocsize_log < XFS_MIN_IO_LOG)) { 1410 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", 1411 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG); 1412 return -EINVAL; 1413 } 1414 1415 return 0; 1416 } 1417 1418 static int 1419 xfs_fs_fill_super( 1420 struct super_block *sb, 1421 struct fs_context *fc) 1422 { 1423 struct xfs_mount *mp = sb->s_fs_info; 1424 struct inode *root; 1425 int flags = 0, error; 1426 1427 mp->m_super = sb; 1428 1429 error = xfs_fs_validate_params(mp); 1430 if (error) 1431 goto out_free_names; 1432 1433 sb_min_blocksize(sb, BBSIZE); 1434 sb->s_xattr = xfs_xattr_handlers; 1435 sb->s_export_op = &xfs_export_operations; 1436 #ifdef CONFIG_XFS_QUOTA 1437 sb->s_qcop = &xfs_quotactl_operations; 1438 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; 1439 #endif 1440 sb->s_op = &xfs_super_operations; 1441 1442 /* 1443 * Delay mount work if the debug hook is set. This is debug 1444 * instrumention to coordinate simulation of xfs mount failures with 1445 * VFS superblock operations 1446 */ 1447 if (xfs_globals.mount_delay) { 1448 xfs_notice(mp, "Delaying mount for %d seconds.", 1449 xfs_globals.mount_delay); 1450 msleep(xfs_globals.mount_delay * 1000); 1451 } 1452 1453 if (fc->sb_flags & SB_SILENT) 1454 flags |= XFS_MFSI_QUIET; 1455 1456 error = xfs_open_devices(mp); 1457 if (error) 1458 goto out_free_names; 1459 1460 error = xfs_init_mount_workqueues(mp); 1461 if (error) 1462 goto out_close_devices; 1463 1464 error = xfs_init_percpu_counters(mp); 1465 if (error) 1466 goto out_destroy_workqueues; 1467 1468 error = xfs_inodegc_init_percpu(mp); 1469 if (error) 1470 goto out_destroy_counters; 1471 1472 /* 1473 * All percpu data structures requiring cleanup when a cpu goes offline 1474 * must be allocated before adding this @mp to the cpu-dead handler's 1475 * mount list. 1476 */ 1477 xfs_mount_list_add(mp); 1478 1479 /* Allocate stats memory before we do operations that might use it */ 1480 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats); 1481 if (!mp->m_stats.xs_stats) { 1482 error = -ENOMEM; 1483 goto out_destroy_inodegc; 1484 } 1485 1486 error = xfs_readsb(mp, flags); 1487 if (error) 1488 goto out_free_stats; 1489 1490 error = xfs_finish_flags(mp); 1491 if (error) 1492 goto out_free_sb; 1493 1494 error = xfs_setup_devices(mp); 1495 if (error) 1496 goto out_free_sb; 1497 1498 /* V4 support is undergoing deprecation. */ 1499 if (!xfs_has_crc(mp)) { 1500 #ifdef CONFIG_XFS_SUPPORT_V4 1501 xfs_warn_once(mp, 1502 "Deprecated V4 format (crc=0) will not be supported after September 2030."); 1503 #else 1504 xfs_warn(mp, 1505 "Deprecated V4 format (crc=0) not supported by kernel."); 1506 error = -EINVAL; 1507 goto out_free_sb; 1508 #endif 1509 } 1510 1511 /* Filesystem claims it needs repair, so refuse the mount. */ 1512 if (xfs_has_needsrepair(mp)) { 1513 xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair."); 1514 error = -EFSCORRUPTED; 1515 goto out_free_sb; 1516 } 1517 1518 /* 1519 * Don't touch the filesystem if a user tool thinks it owns the primary 1520 * superblock. mkfs doesn't clear the flag from secondary supers, so 1521 * we don't check them at all. 1522 */ 1523 if (mp->m_sb.sb_inprogress) { 1524 xfs_warn(mp, "Offline file system operation in progress!"); 1525 error = -EFSCORRUPTED; 1526 goto out_free_sb; 1527 } 1528 1529 /* 1530 * Until this is fixed only page-sized or smaller data blocks work. 1531 */ 1532 if (mp->m_sb.sb_blocksize > PAGE_SIZE) { 1533 xfs_warn(mp, 1534 "File system with blocksize %d bytes. " 1535 "Only pagesize (%ld) or less will currently work.", 1536 mp->m_sb.sb_blocksize, PAGE_SIZE); 1537 error = -ENOSYS; 1538 goto out_free_sb; 1539 } 1540 1541 /* Ensure this filesystem fits in the page cache limits */ 1542 if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) || 1543 xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) { 1544 xfs_warn(mp, 1545 "file system too large to be mounted on this system."); 1546 error = -EFBIG; 1547 goto out_free_sb; 1548 } 1549 1550 /* 1551 * XFS block mappings use 54 bits to store the logical block offset. 1552 * This should suffice to handle the maximum file size that the VFS 1553 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT 1554 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes 1555 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON 1556 * to check this assertion. 1557 * 1558 * Avoid integer overflow by comparing the maximum bmbt offset to the 1559 * maximum pagecache offset in units of fs blocks. 1560 */ 1561 if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) { 1562 xfs_warn(mp, 1563 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!", 1564 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE), 1565 XFS_MAX_FILEOFF); 1566 error = -EINVAL; 1567 goto out_free_sb; 1568 } 1569 1570 error = xfs_filestream_mount(mp); 1571 if (error) 1572 goto out_free_sb; 1573 1574 /* 1575 * we must configure the block size in the superblock before we run the 1576 * full mount process as the mount process can lookup and cache inodes. 1577 */ 1578 sb->s_magic = XFS_SUPER_MAGIC; 1579 sb->s_blocksize = mp->m_sb.sb_blocksize; 1580 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; 1581 sb->s_maxbytes = MAX_LFS_FILESIZE; 1582 sb->s_max_links = XFS_MAXLINK; 1583 sb->s_time_gran = 1; 1584 if (xfs_has_bigtime(mp)) { 1585 sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN); 1586 sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX); 1587 } else { 1588 sb->s_time_min = XFS_LEGACY_TIME_MIN; 1589 sb->s_time_max = XFS_LEGACY_TIME_MAX; 1590 } 1591 trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max); 1592 sb->s_iflags |= SB_I_CGROUPWB; 1593 1594 set_posix_acl_flag(sb); 1595 1596 /* version 5 superblocks support inode version counters. */ 1597 if (xfs_has_crc(mp)) 1598 sb->s_flags |= SB_I_VERSION; 1599 1600 if (xfs_has_dax_always(mp)) { 1601 error = xfs_setup_dax_always(mp); 1602 if (error) 1603 goto out_filestream_unmount; 1604 } 1605 1606 if (xfs_has_discard(mp)) { 1607 struct request_queue *q = bdev_get_queue(sb->s_bdev); 1608 1609 if (!blk_queue_discard(q)) { 1610 xfs_warn(mp, "mounting with \"discard\" option, but " 1611 "the device does not support discard"); 1612 mp->m_features &= ~XFS_FEAT_DISCARD; 1613 } 1614 } 1615 1616 if (xfs_has_reflink(mp)) { 1617 if (mp->m_sb.sb_rblocks) { 1618 xfs_alert(mp, 1619 "reflink not compatible with realtime device!"); 1620 error = -EINVAL; 1621 goto out_filestream_unmount; 1622 } 1623 1624 if (xfs_globals.always_cow) { 1625 xfs_info(mp, "using DEBUG-only always_cow mode."); 1626 mp->m_always_cow = true; 1627 } 1628 } 1629 1630 if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) { 1631 xfs_alert(mp, 1632 "reverse mapping btree not compatible with realtime device!"); 1633 error = -EINVAL; 1634 goto out_filestream_unmount; 1635 } 1636 1637 error = xfs_mountfs(mp); 1638 if (error) 1639 goto out_filestream_unmount; 1640 1641 root = igrab(VFS_I(mp->m_rootip)); 1642 if (!root) { 1643 error = -ENOENT; 1644 goto out_unmount; 1645 } 1646 sb->s_root = d_make_root(root); 1647 if (!sb->s_root) { 1648 error = -ENOMEM; 1649 goto out_unmount; 1650 } 1651 1652 return 0; 1653 1654 out_filestream_unmount: 1655 xfs_filestream_unmount(mp); 1656 out_free_sb: 1657 xfs_freesb(mp); 1658 out_free_stats: 1659 free_percpu(mp->m_stats.xs_stats); 1660 out_destroy_inodegc: 1661 xfs_mount_list_del(mp); 1662 xfs_inodegc_free_percpu(mp); 1663 out_destroy_counters: 1664 xfs_destroy_percpu_counters(mp); 1665 out_destroy_workqueues: 1666 xfs_destroy_mount_workqueues(mp); 1667 out_close_devices: 1668 xfs_close_devices(mp); 1669 out_free_names: 1670 sb->s_fs_info = NULL; 1671 xfs_mount_free(mp); 1672 return error; 1673 1674 out_unmount: 1675 xfs_filestream_unmount(mp); 1676 xfs_unmountfs(mp); 1677 goto out_free_sb; 1678 } 1679 1680 static int 1681 xfs_fs_get_tree( 1682 struct fs_context *fc) 1683 { 1684 return get_tree_bdev(fc, xfs_fs_fill_super); 1685 } 1686 1687 static int 1688 xfs_remount_rw( 1689 struct xfs_mount *mp) 1690 { 1691 struct xfs_sb *sbp = &mp->m_sb; 1692 int error; 1693 1694 if (xfs_has_norecovery(mp)) { 1695 xfs_warn(mp, 1696 "ro->rw transition prohibited on norecovery mount"); 1697 return -EINVAL; 1698 } 1699 1700 if (xfs_sb_is_v5(sbp) && 1701 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) { 1702 xfs_warn(mp, 1703 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem", 1704 (sbp->sb_features_ro_compat & 1705 XFS_SB_FEAT_RO_COMPAT_UNKNOWN)); 1706 return -EINVAL; 1707 } 1708 1709 clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 1710 1711 /* 1712 * If this is the first remount to writeable state we might have some 1713 * superblock changes to update. 1714 */ 1715 if (mp->m_update_sb) { 1716 error = xfs_sync_sb(mp, false); 1717 if (error) { 1718 xfs_warn(mp, "failed to write sb changes"); 1719 return error; 1720 } 1721 mp->m_update_sb = false; 1722 } 1723 1724 /* 1725 * Fill out the reserve pool if it is empty. Use the stashed value if 1726 * it is non-zero, otherwise go with the default. 1727 */ 1728 xfs_restore_resvblks(mp); 1729 xfs_log_work_queue(mp); 1730 xfs_blockgc_start(mp); 1731 1732 /* Create the per-AG metadata reservation pool .*/ 1733 error = xfs_fs_reserve_ag_blocks(mp); 1734 if (error && error != -ENOSPC) 1735 return error; 1736 1737 /* Re-enable the background inode inactivation worker. */ 1738 xfs_inodegc_start(mp); 1739 1740 return 0; 1741 } 1742 1743 static int 1744 xfs_remount_ro( 1745 struct xfs_mount *mp) 1746 { 1747 struct xfs_icwalk icw = { 1748 .icw_flags = XFS_ICWALK_FLAG_SYNC, 1749 }; 1750 int error; 1751 1752 /* 1753 * Cancel background eofb scanning so it cannot race with the final 1754 * log force+buftarg wait and deadlock the remount. 1755 */ 1756 xfs_blockgc_stop(mp); 1757 1758 /* 1759 * Clear out all remaining COW staging extents and speculative post-EOF 1760 * preallocations so that we don't leave inodes requiring inactivation 1761 * cleanups during reclaim on a read-only mount. We must process every 1762 * cached inode, so this requires a synchronous cache scan. 1763 */ 1764 error = xfs_blockgc_free_space(mp, &icw); 1765 if (error) { 1766 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1767 return error; 1768 } 1769 1770 /* 1771 * Stop the inodegc background worker. xfs_fs_reconfigure already 1772 * flushed all pending inodegc work when it sync'd the filesystem. 1773 * The VFS holds s_umount, so we know that inodes cannot enter 1774 * xfs_fs_destroy_inode during a remount operation. In readonly mode 1775 * we send inodes straight to reclaim, so no inodes will be queued. 1776 */ 1777 xfs_inodegc_stop(mp); 1778 1779 /* Free the per-AG metadata reservation pool. */ 1780 error = xfs_fs_unreserve_ag_blocks(mp); 1781 if (error) { 1782 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1783 return error; 1784 } 1785 1786 /* 1787 * Before we sync the metadata, we need to free up the reserve block 1788 * pool so that the used block count in the superblock on disk is 1789 * correct at the end of the remount. Stash the current* reserve pool 1790 * size so that if we get remounted rw, we can return it to the same 1791 * size. 1792 */ 1793 xfs_save_resvblks(mp); 1794 1795 xfs_log_clean(mp); 1796 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 1797 1798 return 0; 1799 } 1800 1801 /* 1802 * Logically we would return an error here to prevent users from believing 1803 * they might have changed mount options using remount which can't be changed. 1804 * 1805 * But unfortunately mount(8) adds all options from mtab and fstab to the mount 1806 * arguments in some cases so we can't blindly reject options, but have to 1807 * check for each specified option if it actually differs from the currently 1808 * set option and only reject it if that's the case. 1809 * 1810 * Until that is implemented we return success for every remount request, and 1811 * silently ignore all options that we can't actually change. 1812 */ 1813 static int 1814 xfs_fs_reconfigure( 1815 struct fs_context *fc) 1816 { 1817 struct xfs_mount *mp = XFS_M(fc->root->d_sb); 1818 struct xfs_mount *new_mp = fc->s_fs_info; 1819 int flags = fc->sb_flags; 1820 int error; 1821 1822 /* version 5 superblocks always support version counters. */ 1823 if (xfs_has_crc(mp)) 1824 fc->sb_flags |= SB_I_VERSION; 1825 1826 error = xfs_fs_validate_params(new_mp); 1827 if (error) 1828 return error; 1829 1830 sync_filesystem(mp->m_super); 1831 1832 /* inode32 -> inode64 */ 1833 if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) { 1834 mp->m_features &= ~XFS_FEAT_SMALL_INUMS; 1835 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount); 1836 } 1837 1838 /* inode64 -> inode32 */ 1839 if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) { 1840 mp->m_features |= XFS_FEAT_SMALL_INUMS; 1841 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount); 1842 } 1843 1844 /* ro -> rw */ 1845 if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) { 1846 error = xfs_remount_rw(mp); 1847 if (error) 1848 return error; 1849 } 1850 1851 /* rw -> ro */ 1852 if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) { 1853 error = xfs_remount_ro(mp); 1854 if (error) 1855 return error; 1856 } 1857 1858 return 0; 1859 } 1860 1861 static void xfs_fs_free( 1862 struct fs_context *fc) 1863 { 1864 struct xfs_mount *mp = fc->s_fs_info; 1865 1866 /* 1867 * mp is stored in the fs_context when it is initialized. 1868 * mp is transferred to the superblock on a successful mount, 1869 * but if an error occurs before the transfer we have to free 1870 * it here. 1871 */ 1872 if (mp) 1873 xfs_mount_free(mp); 1874 } 1875 1876 static const struct fs_context_operations xfs_context_ops = { 1877 .parse_param = xfs_fs_parse_param, 1878 .get_tree = xfs_fs_get_tree, 1879 .reconfigure = xfs_fs_reconfigure, 1880 .free = xfs_fs_free, 1881 }; 1882 1883 static int xfs_init_fs_context( 1884 struct fs_context *fc) 1885 { 1886 struct xfs_mount *mp; 1887 1888 mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO); 1889 if (!mp) 1890 return -ENOMEM; 1891 1892 spin_lock_init(&mp->m_sb_lock); 1893 spin_lock_init(&mp->m_agirotor_lock); 1894 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); 1895 spin_lock_init(&mp->m_perag_lock); 1896 mutex_init(&mp->m_growlock); 1897 INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker); 1898 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); 1899 mp->m_kobj.kobject.kset = xfs_kset; 1900 /* 1901 * We don't create the finobt per-ag space reservation until after log 1902 * recovery, so we must set this to true so that an ifree transaction 1903 * started during log recovery will not depend on space reservations 1904 * for finobt expansion. 1905 */ 1906 mp->m_finobt_nores = true; 1907 1908 /* 1909 * These can be overridden by the mount option parsing. 1910 */ 1911 mp->m_logbufs = -1; 1912 mp->m_logbsize = -1; 1913 mp->m_allocsize_log = 16; /* 64k */ 1914 1915 /* 1916 * Copy binary VFS mount flags we are interested in. 1917 */ 1918 if (fc->sb_flags & SB_RDONLY) 1919 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 1920 if (fc->sb_flags & SB_DIRSYNC) 1921 mp->m_features |= XFS_FEAT_DIRSYNC; 1922 if (fc->sb_flags & SB_SYNCHRONOUS) 1923 mp->m_features |= XFS_FEAT_WSYNC; 1924 1925 fc->s_fs_info = mp; 1926 fc->ops = &xfs_context_ops; 1927 1928 return 0; 1929 } 1930 1931 static struct file_system_type xfs_fs_type = { 1932 .owner = THIS_MODULE, 1933 .name = "xfs", 1934 .init_fs_context = xfs_init_fs_context, 1935 .parameters = xfs_fs_parameters, 1936 .kill_sb = kill_block_super, 1937 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, 1938 }; 1939 MODULE_ALIAS_FS("xfs"); 1940 1941 STATIC int __init 1942 xfs_init_caches(void) 1943 { 1944 int error; 1945 1946 xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket", 1947 sizeof(struct xlog_ticket), 1948 0, 0, NULL); 1949 if (!xfs_log_ticket_cache) 1950 goto out; 1951 1952 error = xfs_btree_init_cur_caches(); 1953 if (error) 1954 goto out_destroy_log_ticket_cache; 1955 1956 error = xfs_defer_init_item_caches(); 1957 if (error) 1958 goto out_destroy_btree_cur_cache; 1959 1960 xfs_da_state_cache = kmem_cache_create("xfs_da_state", 1961 sizeof(struct xfs_da_state), 1962 0, 0, NULL); 1963 if (!xfs_da_state_cache) 1964 goto out_destroy_defer_item_cache; 1965 1966 xfs_ifork_cache = kmem_cache_create("xfs_ifork", 1967 sizeof(struct xfs_ifork), 1968 0, 0, NULL); 1969 if (!xfs_ifork_cache) 1970 goto out_destroy_da_state_cache; 1971 1972 xfs_trans_cache = kmem_cache_create("xfs_trans", 1973 sizeof(struct xfs_trans), 1974 0, 0, NULL); 1975 if (!xfs_trans_cache) 1976 goto out_destroy_ifork_cache; 1977 1978 1979 /* 1980 * The size of the cache-allocated buf log item is the maximum 1981 * size possible under XFS. This wastes a little bit of memory, 1982 * but it is much faster. 1983 */ 1984 xfs_buf_item_cache = kmem_cache_create("xfs_buf_item", 1985 sizeof(struct xfs_buf_log_item), 1986 0, 0, NULL); 1987 if (!xfs_buf_item_cache) 1988 goto out_destroy_trans_cache; 1989 1990 xfs_efd_cache = kmem_cache_create("xfs_efd_item", 1991 (sizeof(struct xfs_efd_log_item) + 1992 (XFS_EFD_MAX_FAST_EXTENTS - 1) * 1993 sizeof(struct xfs_extent)), 1994 0, 0, NULL); 1995 if (!xfs_efd_cache) 1996 goto out_destroy_buf_item_cache; 1997 1998 xfs_efi_cache = kmem_cache_create("xfs_efi_item", 1999 (sizeof(struct xfs_efi_log_item) + 2000 (XFS_EFI_MAX_FAST_EXTENTS - 1) * 2001 sizeof(struct xfs_extent)), 2002 0, 0, NULL); 2003 if (!xfs_efi_cache) 2004 goto out_destroy_efd_cache; 2005 2006 xfs_inode_cache = kmem_cache_create("xfs_inode", 2007 sizeof(struct xfs_inode), 0, 2008 (SLAB_HWCACHE_ALIGN | 2009 SLAB_RECLAIM_ACCOUNT | 2010 SLAB_MEM_SPREAD | SLAB_ACCOUNT), 2011 xfs_fs_inode_init_once); 2012 if (!xfs_inode_cache) 2013 goto out_destroy_efi_cache; 2014 2015 xfs_ili_cache = kmem_cache_create("xfs_ili", 2016 sizeof(struct xfs_inode_log_item), 0, 2017 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, 2018 NULL); 2019 if (!xfs_ili_cache) 2020 goto out_destroy_inode_cache; 2021 2022 xfs_icreate_cache = kmem_cache_create("xfs_icr", 2023 sizeof(struct xfs_icreate_item), 2024 0, 0, NULL); 2025 if (!xfs_icreate_cache) 2026 goto out_destroy_ili_cache; 2027 2028 xfs_rud_cache = kmem_cache_create("xfs_rud_item", 2029 sizeof(struct xfs_rud_log_item), 2030 0, 0, NULL); 2031 if (!xfs_rud_cache) 2032 goto out_destroy_icreate_cache; 2033 2034 xfs_rui_cache = kmem_cache_create("xfs_rui_item", 2035 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS), 2036 0, 0, NULL); 2037 if (!xfs_rui_cache) 2038 goto out_destroy_rud_cache; 2039 2040 xfs_cud_cache = kmem_cache_create("xfs_cud_item", 2041 sizeof(struct xfs_cud_log_item), 2042 0, 0, NULL); 2043 if (!xfs_cud_cache) 2044 goto out_destroy_rui_cache; 2045 2046 xfs_cui_cache = kmem_cache_create("xfs_cui_item", 2047 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS), 2048 0, 0, NULL); 2049 if (!xfs_cui_cache) 2050 goto out_destroy_cud_cache; 2051 2052 xfs_bud_cache = kmem_cache_create("xfs_bud_item", 2053 sizeof(struct xfs_bud_log_item), 2054 0, 0, NULL); 2055 if (!xfs_bud_cache) 2056 goto out_destroy_cui_cache; 2057 2058 xfs_bui_cache = kmem_cache_create("xfs_bui_item", 2059 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS), 2060 0, 0, NULL); 2061 if (!xfs_bui_cache) 2062 goto out_destroy_bud_cache; 2063 2064 return 0; 2065 2066 out_destroy_bud_cache: 2067 kmem_cache_destroy(xfs_bud_cache); 2068 out_destroy_cui_cache: 2069 kmem_cache_destroy(xfs_cui_cache); 2070 out_destroy_cud_cache: 2071 kmem_cache_destroy(xfs_cud_cache); 2072 out_destroy_rui_cache: 2073 kmem_cache_destroy(xfs_rui_cache); 2074 out_destroy_rud_cache: 2075 kmem_cache_destroy(xfs_rud_cache); 2076 out_destroy_icreate_cache: 2077 kmem_cache_destroy(xfs_icreate_cache); 2078 out_destroy_ili_cache: 2079 kmem_cache_destroy(xfs_ili_cache); 2080 out_destroy_inode_cache: 2081 kmem_cache_destroy(xfs_inode_cache); 2082 out_destroy_efi_cache: 2083 kmem_cache_destroy(xfs_efi_cache); 2084 out_destroy_efd_cache: 2085 kmem_cache_destroy(xfs_efd_cache); 2086 out_destroy_buf_item_cache: 2087 kmem_cache_destroy(xfs_buf_item_cache); 2088 out_destroy_trans_cache: 2089 kmem_cache_destroy(xfs_trans_cache); 2090 out_destroy_ifork_cache: 2091 kmem_cache_destroy(xfs_ifork_cache); 2092 out_destroy_da_state_cache: 2093 kmem_cache_destroy(xfs_da_state_cache); 2094 out_destroy_defer_item_cache: 2095 xfs_defer_destroy_item_caches(); 2096 out_destroy_btree_cur_cache: 2097 xfs_btree_destroy_cur_caches(); 2098 out_destroy_log_ticket_cache: 2099 kmem_cache_destroy(xfs_log_ticket_cache); 2100 out: 2101 return -ENOMEM; 2102 } 2103 2104 STATIC void 2105 xfs_destroy_caches(void) 2106 { 2107 /* 2108 * Make sure all delayed rcu free are flushed before we 2109 * destroy caches. 2110 */ 2111 rcu_barrier(); 2112 kmem_cache_destroy(xfs_bui_cache); 2113 kmem_cache_destroy(xfs_bud_cache); 2114 kmem_cache_destroy(xfs_cui_cache); 2115 kmem_cache_destroy(xfs_cud_cache); 2116 kmem_cache_destroy(xfs_rui_cache); 2117 kmem_cache_destroy(xfs_rud_cache); 2118 kmem_cache_destroy(xfs_icreate_cache); 2119 kmem_cache_destroy(xfs_ili_cache); 2120 kmem_cache_destroy(xfs_inode_cache); 2121 kmem_cache_destroy(xfs_efi_cache); 2122 kmem_cache_destroy(xfs_efd_cache); 2123 kmem_cache_destroy(xfs_buf_item_cache); 2124 kmem_cache_destroy(xfs_trans_cache); 2125 kmem_cache_destroy(xfs_ifork_cache); 2126 kmem_cache_destroy(xfs_da_state_cache); 2127 xfs_defer_destroy_item_caches(); 2128 xfs_btree_destroy_cur_caches(); 2129 kmem_cache_destroy(xfs_log_ticket_cache); 2130 } 2131 2132 STATIC int __init 2133 xfs_init_workqueues(void) 2134 { 2135 /* 2136 * The allocation workqueue can be used in memory reclaim situations 2137 * (writepage path), and parallelism is only limited by the number of 2138 * AGs in all the filesystems mounted. Hence use the default large 2139 * max_active value for this workqueue. 2140 */ 2141 xfs_alloc_wq = alloc_workqueue("xfsalloc", 2142 XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0); 2143 if (!xfs_alloc_wq) 2144 return -ENOMEM; 2145 2146 xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND), 2147 0); 2148 if (!xfs_discard_wq) 2149 goto out_free_alloc_wq; 2150 2151 return 0; 2152 out_free_alloc_wq: 2153 destroy_workqueue(xfs_alloc_wq); 2154 return -ENOMEM; 2155 } 2156 2157 STATIC void 2158 xfs_destroy_workqueues(void) 2159 { 2160 destroy_workqueue(xfs_discard_wq); 2161 destroy_workqueue(xfs_alloc_wq); 2162 } 2163 2164 #ifdef CONFIG_HOTPLUG_CPU 2165 static int 2166 xfs_cpu_dead( 2167 unsigned int cpu) 2168 { 2169 struct xfs_mount *mp, *n; 2170 2171 spin_lock(&xfs_mount_list_lock); 2172 list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) { 2173 spin_unlock(&xfs_mount_list_lock); 2174 xfs_inodegc_cpu_dead(mp, cpu); 2175 spin_lock(&xfs_mount_list_lock); 2176 } 2177 spin_unlock(&xfs_mount_list_lock); 2178 return 0; 2179 } 2180 2181 static int __init 2182 xfs_cpu_hotplug_init(void) 2183 { 2184 int error; 2185 2186 error = cpuhp_setup_state_nocalls(CPUHP_XFS_DEAD, "xfs:dead", NULL, 2187 xfs_cpu_dead); 2188 if (error < 0) 2189 xfs_alert(NULL, 2190 "Failed to initialise CPU hotplug, error %d. XFS is non-functional.", 2191 error); 2192 return error; 2193 } 2194 2195 static void 2196 xfs_cpu_hotplug_destroy(void) 2197 { 2198 cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD); 2199 } 2200 2201 #else /* !CONFIG_HOTPLUG_CPU */ 2202 static inline int xfs_cpu_hotplug_init(void) { return 0; } 2203 static inline void xfs_cpu_hotplug_destroy(void) {} 2204 #endif 2205 2206 STATIC int __init 2207 init_xfs_fs(void) 2208 { 2209 int error; 2210 2211 xfs_check_ondisk_structs(); 2212 2213 printk(KERN_INFO XFS_VERSION_STRING " with " 2214 XFS_BUILD_OPTIONS " enabled\n"); 2215 2216 xfs_dir_startup(); 2217 2218 error = xfs_cpu_hotplug_init(); 2219 if (error) 2220 goto out; 2221 2222 error = xfs_init_caches(); 2223 if (error) 2224 goto out_destroy_hp; 2225 2226 error = xfs_init_workqueues(); 2227 if (error) 2228 goto out_destroy_caches; 2229 2230 error = xfs_mru_cache_init(); 2231 if (error) 2232 goto out_destroy_wq; 2233 2234 error = xfs_buf_init(); 2235 if (error) 2236 goto out_mru_cache_uninit; 2237 2238 error = xfs_init_procfs(); 2239 if (error) 2240 goto out_buf_terminate; 2241 2242 error = xfs_sysctl_register(); 2243 if (error) 2244 goto out_cleanup_procfs; 2245 2246 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj); 2247 if (!xfs_kset) { 2248 error = -ENOMEM; 2249 goto out_sysctl_unregister; 2250 } 2251 2252 xfsstats.xs_kobj.kobject.kset = xfs_kset; 2253 2254 xfsstats.xs_stats = alloc_percpu(struct xfsstats); 2255 if (!xfsstats.xs_stats) { 2256 error = -ENOMEM; 2257 goto out_kset_unregister; 2258 } 2259 2260 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL, 2261 "stats"); 2262 if (error) 2263 goto out_free_stats; 2264 2265 #ifdef DEBUG 2266 xfs_dbg_kobj.kobject.kset = xfs_kset; 2267 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug"); 2268 if (error) 2269 goto out_remove_stats_kobj; 2270 #endif 2271 2272 error = xfs_qm_init(); 2273 if (error) 2274 goto out_remove_dbg_kobj; 2275 2276 error = register_filesystem(&xfs_fs_type); 2277 if (error) 2278 goto out_qm_exit; 2279 return 0; 2280 2281 out_qm_exit: 2282 xfs_qm_exit(); 2283 out_remove_dbg_kobj: 2284 #ifdef DEBUG 2285 xfs_sysfs_del(&xfs_dbg_kobj); 2286 out_remove_stats_kobj: 2287 #endif 2288 xfs_sysfs_del(&xfsstats.xs_kobj); 2289 out_free_stats: 2290 free_percpu(xfsstats.xs_stats); 2291 out_kset_unregister: 2292 kset_unregister(xfs_kset); 2293 out_sysctl_unregister: 2294 xfs_sysctl_unregister(); 2295 out_cleanup_procfs: 2296 xfs_cleanup_procfs(); 2297 out_buf_terminate: 2298 xfs_buf_terminate(); 2299 out_mru_cache_uninit: 2300 xfs_mru_cache_uninit(); 2301 out_destroy_wq: 2302 xfs_destroy_workqueues(); 2303 out_destroy_caches: 2304 xfs_destroy_caches(); 2305 out_destroy_hp: 2306 xfs_cpu_hotplug_destroy(); 2307 out: 2308 return error; 2309 } 2310 2311 STATIC void __exit 2312 exit_xfs_fs(void) 2313 { 2314 xfs_qm_exit(); 2315 unregister_filesystem(&xfs_fs_type); 2316 #ifdef DEBUG 2317 xfs_sysfs_del(&xfs_dbg_kobj); 2318 #endif 2319 xfs_sysfs_del(&xfsstats.xs_kobj); 2320 free_percpu(xfsstats.xs_stats); 2321 kset_unregister(xfs_kset); 2322 xfs_sysctl_unregister(); 2323 xfs_cleanup_procfs(); 2324 xfs_buf_terminate(); 2325 xfs_mru_cache_uninit(); 2326 xfs_destroy_workqueues(); 2327 xfs_destroy_caches(); 2328 xfs_uuid_table_free(); 2329 xfs_cpu_hotplug_destroy(); 2330 } 2331 2332 module_init(init_xfs_fs); 2333 module_exit(exit_xfs_fs); 2334 2335 MODULE_AUTHOR("Silicon Graphics, Inc."); 2336 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); 2337 MODULE_LICENSE("GPL"); 2338