1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 7 #include "xfs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_sb.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_btree.h" 16 #include "xfs_bmap.h" 17 #include "xfs_alloc.h" 18 #include "xfs_fsops.h" 19 #include "xfs_trans.h" 20 #include "xfs_buf_item.h" 21 #include "xfs_log.h" 22 #include "xfs_log_priv.h" 23 #include "xfs_dir2.h" 24 #include "xfs_extfree_item.h" 25 #include "xfs_mru_cache.h" 26 #include "xfs_inode_item.h" 27 #include "xfs_icache.h" 28 #include "xfs_trace.h" 29 #include "xfs_icreate_item.h" 30 #include "xfs_filestream.h" 31 #include "xfs_quota.h" 32 #include "xfs_sysfs.h" 33 #include "xfs_ondisk.h" 34 #include "xfs_rmap_item.h" 35 #include "xfs_refcount_item.h" 36 #include "xfs_bmap_item.h" 37 #include "xfs_reflink.h" 38 39 #include <linux/magic.h> 40 #include <linux/fs_context.h> 41 #include <linux/fs_parser.h> 42 43 static const struct super_operations xfs_super_operations; 44 45 static struct kset *xfs_kset; /* top-level xfs sysfs dir */ 46 #ifdef DEBUG 47 static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */ 48 #endif 49 50 enum xfs_dax_mode { 51 XFS_DAX_INODE = 0, 52 XFS_DAX_ALWAYS = 1, 53 XFS_DAX_NEVER = 2, 54 }; 55 56 static void 57 xfs_mount_set_dax_mode( 58 struct xfs_mount *mp, 59 enum xfs_dax_mode mode) 60 { 61 switch (mode) { 62 case XFS_DAX_INODE: 63 mp->m_flags &= ~(XFS_MOUNT_DAX_ALWAYS | XFS_MOUNT_DAX_NEVER); 64 break; 65 case XFS_DAX_ALWAYS: 66 mp->m_flags |= XFS_MOUNT_DAX_ALWAYS; 67 mp->m_flags &= ~XFS_MOUNT_DAX_NEVER; 68 break; 69 case XFS_DAX_NEVER: 70 mp->m_flags |= XFS_MOUNT_DAX_NEVER; 71 mp->m_flags &= ~XFS_MOUNT_DAX_ALWAYS; 72 break; 73 } 74 } 75 76 static const struct constant_table dax_param_enums[] = { 77 {"inode", XFS_DAX_INODE }, 78 {"always", XFS_DAX_ALWAYS }, 79 {"never", XFS_DAX_NEVER }, 80 {} 81 }; 82 83 /* 84 * Table driven mount option parser. 85 */ 86 enum { 87 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, 88 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid, 89 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups, 90 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep, 91 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2, 92 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota, 93 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota, 94 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce, 95 Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum, 96 }; 97 98 static const struct fs_parameter_spec xfs_fs_parameters[] = { 99 fsparam_u32("logbufs", Opt_logbufs), 100 fsparam_string("logbsize", Opt_logbsize), 101 fsparam_string("logdev", Opt_logdev), 102 fsparam_string("rtdev", Opt_rtdev), 103 fsparam_flag("wsync", Opt_wsync), 104 fsparam_flag("noalign", Opt_noalign), 105 fsparam_flag("swalloc", Opt_swalloc), 106 fsparam_u32("sunit", Opt_sunit), 107 fsparam_u32("swidth", Opt_swidth), 108 fsparam_flag("nouuid", Opt_nouuid), 109 fsparam_flag("grpid", Opt_grpid), 110 fsparam_flag("nogrpid", Opt_nogrpid), 111 fsparam_flag("bsdgroups", Opt_bsdgroups), 112 fsparam_flag("sysvgroups", Opt_sysvgroups), 113 fsparam_string("allocsize", Opt_allocsize), 114 fsparam_flag("norecovery", Opt_norecovery), 115 fsparam_flag("inode64", Opt_inode64), 116 fsparam_flag("inode32", Opt_inode32), 117 fsparam_flag("ikeep", Opt_ikeep), 118 fsparam_flag("noikeep", Opt_noikeep), 119 fsparam_flag("largeio", Opt_largeio), 120 fsparam_flag("nolargeio", Opt_nolargeio), 121 fsparam_flag("attr2", Opt_attr2), 122 fsparam_flag("noattr2", Opt_noattr2), 123 fsparam_flag("filestreams", Opt_filestreams), 124 fsparam_flag("quota", Opt_quota), 125 fsparam_flag("noquota", Opt_noquota), 126 fsparam_flag("usrquota", Opt_usrquota), 127 fsparam_flag("grpquota", Opt_grpquota), 128 fsparam_flag("prjquota", Opt_prjquota), 129 fsparam_flag("uquota", Opt_uquota), 130 fsparam_flag("gquota", Opt_gquota), 131 fsparam_flag("pquota", Opt_pquota), 132 fsparam_flag("uqnoenforce", Opt_uqnoenforce), 133 fsparam_flag("gqnoenforce", Opt_gqnoenforce), 134 fsparam_flag("pqnoenforce", Opt_pqnoenforce), 135 fsparam_flag("qnoenforce", Opt_qnoenforce), 136 fsparam_flag("discard", Opt_discard), 137 fsparam_flag("nodiscard", Opt_nodiscard), 138 fsparam_flag("dax", Opt_dax), 139 fsparam_enum("dax", Opt_dax_enum, dax_param_enums), 140 {} 141 }; 142 143 struct proc_xfs_info { 144 uint64_t flag; 145 char *str; 146 }; 147 148 static int 149 xfs_fs_show_options( 150 struct seq_file *m, 151 struct dentry *root) 152 { 153 static struct proc_xfs_info xfs_info_set[] = { 154 /* the few simple ones we can get from the mount struct */ 155 { XFS_MOUNT_IKEEP, ",ikeep" }, 156 { XFS_MOUNT_WSYNC, ",wsync" }, 157 { XFS_MOUNT_NOALIGN, ",noalign" }, 158 { XFS_MOUNT_SWALLOC, ",swalloc" }, 159 { XFS_MOUNT_NOUUID, ",nouuid" }, 160 { XFS_MOUNT_NORECOVERY, ",norecovery" }, 161 { XFS_MOUNT_ATTR2, ",attr2" }, 162 { XFS_MOUNT_FILESTREAMS, ",filestreams" }, 163 { XFS_MOUNT_GRPID, ",grpid" }, 164 { XFS_MOUNT_DISCARD, ",discard" }, 165 { XFS_MOUNT_LARGEIO, ",largeio" }, 166 { XFS_MOUNT_DAX_ALWAYS, ",dax=always" }, 167 { XFS_MOUNT_DAX_NEVER, ",dax=never" }, 168 { 0, NULL } 169 }; 170 struct xfs_mount *mp = XFS_M(root->d_sb); 171 struct proc_xfs_info *xfs_infop; 172 173 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { 174 if (mp->m_flags & xfs_infop->flag) 175 seq_puts(m, xfs_infop->str); 176 } 177 178 seq_printf(m, ",inode%d", 179 (mp->m_flags & XFS_MOUNT_SMALL_INUMS) ? 32 : 64); 180 181 if (mp->m_flags & XFS_MOUNT_ALLOCSIZE) 182 seq_printf(m, ",allocsize=%dk", 183 (1 << mp->m_allocsize_log) >> 10); 184 185 if (mp->m_logbufs > 0) 186 seq_printf(m, ",logbufs=%d", mp->m_logbufs); 187 if (mp->m_logbsize > 0) 188 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10); 189 190 if (mp->m_logname) 191 seq_show_option(m, "logdev", mp->m_logname); 192 if (mp->m_rtname) 193 seq_show_option(m, "rtdev", mp->m_rtname); 194 195 if (mp->m_dalign > 0) 196 seq_printf(m, ",sunit=%d", 197 (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); 198 if (mp->m_swidth > 0) 199 seq_printf(m, ",swidth=%d", 200 (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); 201 202 if (mp->m_qflags & XFS_UQUOTA_ACCT) { 203 if (mp->m_qflags & XFS_UQUOTA_ENFD) 204 seq_puts(m, ",usrquota"); 205 else 206 seq_puts(m, ",uqnoenforce"); 207 } 208 209 if (mp->m_qflags & XFS_PQUOTA_ACCT) { 210 if (mp->m_qflags & XFS_PQUOTA_ENFD) 211 seq_puts(m, ",prjquota"); 212 else 213 seq_puts(m, ",pqnoenforce"); 214 } 215 if (mp->m_qflags & XFS_GQUOTA_ACCT) { 216 if (mp->m_qflags & XFS_GQUOTA_ENFD) 217 seq_puts(m, ",grpquota"); 218 else 219 seq_puts(m, ",gqnoenforce"); 220 } 221 222 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) 223 seq_puts(m, ",noquota"); 224 225 return 0; 226 } 227 228 /* 229 * Set parameters for inode allocation heuristics, taking into account 230 * filesystem size and inode32/inode64 mount options; i.e. specifically 231 * whether or not XFS_MOUNT_SMALL_INUMS is set. 232 * 233 * Inode allocation patterns are altered only if inode32 is requested 234 * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large. 235 * If altered, XFS_MOUNT_32BITINODES is set as well. 236 * 237 * An agcount independent of that in the mount structure is provided 238 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated 239 * to the potentially higher ag count. 240 * 241 * Returns the maximum AG index which may contain inodes. 242 */ 243 xfs_agnumber_t 244 xfs_set_inode_alloc( 245 struct xfs_mount *mp, 246 xfs_agnumber_t agcount) 247 { 248 xfs_agnumber_t index; 249 xfs_agnumber_t maxagi = 0; 250 xfs_sb_t *sbp = &mp->m_sb; 251 xfs_agnumber_t max_metadata; 252 xfs_agino_t agino; 253 xfs_ino_t ino; 254 255 /* 256 * Calculate how much should be reserved for inodes to meet 257 * the max inode percentage. Used only for inode32. 258 */ 259 if (M_IGEO(mp)->maxicount) { 260 uint64_t icount; 261 262 icount = sbp->sb_dblocks * sbp->sb_imax_pct; 263 do_div(icount, 100); 264 icount += sbp->sb_agblocks - 1; 265 do_div(icount, sbp->sb_agblocks); 266 max_metadata = icount; 267 } else { 268 max_metadata = agcount; 269 } 270 271 /* Get the last possible inode in the filesystem */ 272 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1); 273 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); 274 275 /* 276 * If user asked for no more than 32-bit inodes, and the fs is 277 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter 278 * the allocator to accommodate the request. 279 */ 280 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32) 281 mp->m_flags |= XFS_MOUNT_32BITINODES; 282 else 283 mp->m_flags &= ~XFS_MOUNT_32BITINODES; 284 285 for (index = 0; index < agcount; index++) { 286 struct xfs_perag *pag; 287 288 ino = XFS_AGINO_TO_INO(mp, index, agino); 289 290 pag = xfs_perag_get(mp, index); 291 292 if (mp->m_flags & XFS_MOUNT_32BITINODES) { 293 if (ino > XFS_MAXINUMBER_32) { 294 pag->pagi_inodeok = 0; 295 pag->pagf_metadata = 0; 296 } else { 297 pag->pagi_inodeok = 1; 298 maxagi++; 299 if (index < max_metadata) 300 pag->pagf_metadata = 1; 301 else 302 pag->pagf_metadata = 0; 303 } 304 } else { 305 pag->pagi_inodeok = 1; 306 pag->pagf_metadata = 0; 307 } 308 309 xfs_perag_put(pag); 310 } 311 312 return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount; 313 } 314 315 STATIC int 316 xfs_blkdev_get( 317 xfs_mount_t *mp, 318 const char *name, 319 struct block_device **bdevp) 320 { 321 int error = 0; 322 323 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 324 mp); 325 if (IS_ERR(*bdevp)) { 326 error = PTR_ERR(*bdevp); 327 xfs_warn(mp, "Invalid device [%s], error=%d", name, error); 328 } 329 330 return error; 331 } 332 333 STATIC void 334 xfs_blkdev_put( 335 struct block_device *bdev) 336 { 337 if (bdev) 338 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 339 } 340 341 void 342 xfs_blkdev_issue_flush( 343 xfs_buftarg_t *buftarg) 344 { 345 blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS); 346 } 347 348 STATIC void 349 xfs_close_devices( 350 struct xfs_mount *mp) 351 { 352 struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev; 353 354 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 355 struct block_device *logdev = mp->m_logdev_targp->bt_bdev; 356 struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev; 357 358 xfs_free_buftarg(mp->m_logdev_targp); 359 xfs_blkdev_put(logdev); 360 fs_put_dax(dax_logdev); 361 } 362 if (mp->m_rtdev_targp) { 363 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; 364 struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev; 365 366 xfs_free_buftarg(mp->m_rtdev_targp); 367 xfs_blkdev_put(rtdev); 368 fs_put_dax(dax_rtdev); 369 } 370 xfs_free_buftarg(mp->m_ddev_targp); 371 fs_put_dax(dax_ddev); 372 } 373 374 /* 375 * The file system configurations are: 376 * (1) device (partition) with data and internal log 377 * (2) logical volume with data and log subvolumes. 378 * (3) logical volume with data, log, and realtime subvolumes. 379 * 380 * We only have to handle opening the log and realtime volumes here if 381 * they are present. The data subvolume has already been opened by 382 * get_sb_bdev() and is stored in sb->s_bdev. 383 */ 384 STATIC int 385 xfs_open_devices( 386 struct xfs_mount *mp) 387 { 388 struct block_device *ddev = mp->m_super->s_bdev; 389 struct dax_device *dax_ddev = fs_dax_get_by_bdev(ddev); 390 struct dax_device *dax_logdev = NULL, *dax_rtdev = NULL; 391 struct block_device *logdev = NULL, *rtdev = NULL; 392 int error; 393 394 /* 395 * Open real time and log devices - order is important. 396 */ 397 if (mp->m_logname) { 398 error = xfs_blkdev_get(mp, mp->m_logname, &logdev); 399 if (error) 400 goto out; 401 dax_logdev = fs_dax_get_by_bdev(logdev); 402 } 403 404 if (mp->m_rtname) { 405 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev); 406 if (error) 407 goto out_close_logdev; 408 409 if (rtdev == ddev || rtdev == logdev) { 410 xfs_warn(mp, 411 "Cannot mount filesystem with identical rtdev and ddev/logdev."); 412 error = -EINVAL; 413 goto out_close_rtdev; 414 } 415 dax_rtdev = fs_dax_get_by_bdev(rtdev); 416 } 417 418 /* 419 * Setup xfs_mount buffer target pointers 420 */ 421 error = -ENOMEM; 422 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev); 423 if (!mp->m_ddev_targp) 424 goto out_close_rtdev; 425 426 if (rtdev) { 427 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev); 428 if (!mp->m_rtdev_targp) 429 goto out_free_ddev_targ; 430 } 431 432 if (logdev && logdev != ddev) { 433 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev); 434 if (!mp->m_logdev_targp) 435 goto out_free_rtdev_targ; 436 } else { 437 mp->m_logdev_targp = mp->m_ddev_targp; 438 } 439 440 return 0; 441 442 out_free_rtdev_targ: 443 if (mp->m_rtdev_targp) 444 xfs_free_buftarg(mp->m_rtdev_targp); 445 out_free_ddev_targ: 446 xfs_free_buftarg(mp->m_ddev_targp); 447 out_close_rtdev: 448 xfs_blkdev_put(rtdev); 449 fs_put_dax(dax_rtdev); 450 out_close_logdev: 451 if (logdev && logdev != ddev) { 452 xfs_blkdev_put(logdev); 453 fs_put_dax(dax_logdev); 454 } 455 out: 456 fs_put_dax(dax_ddev); 457 return error; 458 } 459 460 /* 461 * Setup xfs_mount buffer target pointers based on superblock 462 */ 463 STATIC int 464 xfs_setup_devices( 465 struct xfs_mount *mp) 466 { 467 int error; 468 469 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize); 470 if (error) 471 return error; 472 473 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 474 unsigned int log_sector_size = BBSIZE; 475 476 if (xfs_sb_version_hassector(&mp->m_sb)) 477 log_sector_size = mp->m_sb.sb_logsectsize; 478 error = xfs_setsize_buftarg(mp->m_logdev_targp, 479 log_sector_size); 480 if (error) 481 return error; 482 } 483 if (mp->m_rtdev_targp) { 484 error = xfs_setsize_buftarg(mp->m_rtdev_targp, 485 mp->m_sb.sb_sectsize); 486 if (error) 487 return error; 488 } 489 490 return 0; 491 } 492 493 STATIC int 494 xfs_init_mount_workqueues( 495 struct xfs_mount *mp) 496 { 497 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s", 498 WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_super->s_id); 499 if (!mp->m_buf_workqueue) 500 goto out; 501 502 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s", 503 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id); 504 if (!mp->m_unwritten_workqueue) 505 goto out_destroy_buf; 506 507 mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s", 508 WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND, 509 0, mp->m_super->s_id); 510 if (!mp->m_cil_workqueue) 511 goto out_destroy_unwritten; 512 513 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s", 514 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id); 515 if (!mp->m_reclaim_workqueue) 516 goto out_destroy_cil; 517 518 mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s", 519 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id); 520 if (!mp->m_eofblocks_workqueue) 521 goto out_destroy_reclaim; 522 523 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0, 524 mp->m_super->s_id); 525 if (!mp->m_sync_workqueue) 526 goto out_destroy_eofb; 527 528 return 0; 529 530 out_destroy_eofb: 531 destroy_workqueue(mp->m_eofblocks_workqueue); 532 out_destroy_reclaim: 533 destroy_workqueue(mp->m_reclaim_workqueue); 534 out_destroy_cil: 535 destroy_workqueue(mp->m_cil_workqueue); 536 out_destroy_unwritten: 537 destroy_workqueue(mp->m_unwritten_workqueue); 538 out_destroy_buf: 539 destroy_workqueue(mp->m_buf_workqueue); 540 out: 541 return -ENOMEM; 542 } 543 544 STATIC void 545 xfs_destroy_mount_workqueues( 546 struct xfs_mount *mp) 547 { 548 destroy_workqueue(mp->m_sync_workqueue); 549 destroy_workqueue(mp->m_eofblocks_workqueue); 550 destroy_workqueue(mp->m_reclaim_workqueue); 551 destroy_workqueue(mp->m_cil_workqueue); 552 destroy_workqueue(mp->m_unwritten_workqueue); 553 destroy_workqueue(mp->m_buf_workqueue); 554 } 555 556 static void 557 xfs_flush_inodes_worker( 558 struct work_struct *work) 559 { 560 struct xfs_mount *mp = container_of(work, struct xfs_mount, 561 m_flush_inodes_work); 562 struct super_block *sb = mp->m_super; 563 564 if (down_read_trylock(&sb->s_umount)) { 565 sync_inodes_sb(sb); 566 up_read(&sb->s_umount); 567 } 568 } 569 570 /* 571 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK 572 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting 573 * for IO to complete so that we effectively throttle multiple callers to the 574 * rate at which IO is completing. 575 */ 576 void 577 xfs_flush_inodes( 578 struct xfs_mount *mp) 579 { 580 /* 581 * If flush_work() returns true then that means we waited for a flush 582 * which was already in progress. Don't bother running another scan. 583 */ 584 if (flush_work(&mp->m_flush_inodes_work)) 585 return; 586 587 queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work); 588 flush_work(&mp->m_flush_inodes_work); 589 } 590 591 /* Catch misguided souls that try to use this interface on XFS */ 592 STATIC struct inode * 593 xfs_fs_alloc_inode( 594 struct super_block *sb) 595 { 596 BUG(); 597 return NULL; 598 } 599 600 #ifdef DEBUG 601 static void 602 xfs_check_delalloc( 603 struct xfs_inode *ip, 604 int whichfork) 605 { 606 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 607 struct xfs_bmbt_irec got; 608 struct xfs_iext_cursor icur; 609 610 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got)) 611 return; 612 do { 613 if (isnullstartblock(got.br_startblock)) { 614 xfs_warn(ip->i_mount, 615 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]", 616 ip->i_ino, 617 whichfork == XFS_DATA_FORK ? "data" : "cow", 618 got.br_startoff, got.br_blockcount); 619 } 620 } while (xfs_iext_next_extent(ifp, &icur, &got)); 621 } 622 #else 623 #define xfs_check_delalloc(ip, whichfork) do { } while (0) 624 #endif 625 626 /* 627 * Now that the generic code is guaranteed not to be accessing 628 * the linux inode, we can inactivate and reclaim the inode. 629 */ 630 STATIC void 631 xfs_fs_destroy_inode( 632 struct inode *inode) 633 { 634 struct xfs_inode *ip = XFS_I(inode); 635 636 trace_xfs_destroy_inode(ip); 637 638 ASSERT(!rwsem_is_locked(&inode->i_rwsem)); 639 XFS_STATS_INC(ip->i_mount, vn_rele); 640 XFS_STATS_INC(ip->i_mount, vn_remove); 641 642 xfs_inactive(ip); 643 644 if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) { 645 xfs_check_delalloc(ip, XFS_DATA_FORK); 646 xfs_check_delalloc(ip, XFS_COW_FORK); 647 ASSERT(0); 648 } 649 650 XFS_STATS_INC(ip->i_mount, vn_reclaim); 651 652 /* 653 * We should never get here with one of the reclaim flags already set. 654 */ 655 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE)); 656 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM)); 657 658 /* 659 * We always use background reclaim here because even if the inode is 660 * clean, it still may be under IO and hence we have wait for IO 661 * completion to occur before we can reclaim the inode. The background 662 * reclaim path handles this more efficiently than we can here, so 663 * simply let background reclaim tear down all inodes. 664 */ 665 xfs_inode_set_reclaim_tag(ip); 666 } 667 668 static void 669 xfs_fs_dirty_inode( 670 struct inode *inode, 671 int flag) 672 { 673 struct xfs_inode *ip = XFS_I(inode); 674 struct xfs_mount *mp = ip->i_mount; 675 struct xfs_trans *tp; 676 677 if (!(inode->i_sb->s_flags & SB_LAZYTIME)) 678 return; 679 if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME)) 680 return; 681 682 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp)) 683 return; 684 xfs_ilock(ip, XFS_ILOCK_EXCL); 685 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 686 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP); 687 xfs_trans_commit(tp); 688 } 689 690 /* 691 * Slab object creation initialisation for the XFS inode. 692 * This covers only the idempotent fields in the XFS inode; 693 * all other fields need to be initialised on allocation 694 * from the slab. This avoids the need to repeatedly initialise 695 * fields in the xfs inode that left in the initialise state 696 * when freeing the inode. 697 */ 698 STATIC void 699 xfs_fs_inode_init_once( 700 void *inode) 701 { 702 struct xfs_inode *ip = inode; 703 704 memset(ip, 0, sizeof(struct xfs_inode)); 705 706 /* vfs inode */ 707 inode_init_once(VFS_I(ip)); 708 709 /* xfs inode */ 710 atomic_set(&ip->i_pincount, 0); 711 spin_lock_init(&ip->i_flags_lock); 712 713 mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, 714 "xfsino", ip->i_ino); 715 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, 716 "xfsino", ip->i_ino); 717 } 718 719 /* 720 * We do an unlocked check for XFS_IDONTCACHE here because we are already 721 * serialised against cache hits here via the inode->i_lock and igrab() in 722 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be 723 * racing with us, and it avoids needing to grab a spinlock here for every inode 724 * we drop the final reference on. 725 */ 726 STATIC int 727 xfs_fs_drop_inode( 728 struct inode *inode) 729 { 730 struct xfs_inode *ip = XFS_I(inode); 731 732 /* 733 * If this unlinked inode is in the middle of recovery, don't 734 * drop the inode just yet; log recovery will take care of 735 * that. See the comment for this inode flag. 736 */ 737 if (ip->i_flags & XFS_IRECOVERY) { 738 ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED); 739 return 0; 740 } 741 742 return generic_drop_inode(inode); 743 } 744 745 static void 746 xfs_mount_free( 747 struct xfs_mount *mp) 748 { 749 kfree(mp->m_rtname); 750 kfree(mp->m_logname); 751 kmem_free(mp); 752 } 753 754 STATIC int 755 xfs_fs_sync_fs( 756 struct super_block *sb, 757 int wait) 758 { 759 struct xfs_mount *mp = XFS_M(sb); 760 761 /* 762 * Doing anything during the async pass would be counterproductive. 763 */ 764 if (!wait) 765 return 0; 766 767 xfs_log_force(mp, XFS_LOG_SYNC); 768 if (laptop_mode) { 769 /* 770 * The disk must be active because we're syncing. 771 * We schedule log work now (now that the disk is 772 * active) instead of later (when it might not be). 773 */ 774 flush_delayed_work(&mp->m_log->l_work); 775 } 776 777 return 0; 778 } 779 780 STATIC int 781 xfs_fs_statfs( 782 struct dentry *dentry, 783 struct kstatfs *statp) 784 { 785 struct xfs_mount *mp = XFS_M(dentry->d_sb); 786 xfs_sb_t *sbp = &mp->m_sb; 787 struct xfs_inode *ip = XFS_I(d_inode(dentry)); 788 uint64_t fakeinos, id; 789 uint64_t icount; 790 uint64_t ifree; 791 uint64_t fdblocks; 792 xfs_extlen_t lsize; 793 int64_t ffree; 794 795 statp->f_type = XFS_SUPER_MAGIC; 796 statp->f_namelen = MAXNAMELEN - 1; 797 798 id = huge_encode_dev(mp->m_ddev_targp->bt_dev); 799 statp->f_fsid = u64_to_fsid(id); 800 801 icount = percpu_counter_sum(&mp->m_icount); 802 ifree = percpu_counter_sum(&mp->m_ifree); 803 fdblocks = percpu_counter_sum(&mp->m_fdblocks); 804 805 spin_lock(&mp->m_sb_lock); 806 statp->f_bsize = sbp->sb_blocksize; 807 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; 808 statp->f_blocks = sbp->sb_dblocks - lsize; 809 spin_unlock(&mp->m_sb_lock); 810 811 /* make sure statp->f_bfree does not underflow */ 812 statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0); 813 statp->f_bavail = statp->f_bfree; 814 815 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree); 816 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER); 817 if (M_IGEO(mp)->maxicount) 818 statp->f_files = min_t(typeof(statp->f_files), 819 statp->f_files, 820 M_IGEO(mp)->maxicount); 821 822 /* If sb_icount overshot maxicount, report actual allocation */ 823 statp->f_files = max_t(typeof(statp->f_files), 824 statp->f_files, 825 sbp->sb_icount); 826 827 /* make sure statp->f_ffree does not underflow */ 828 ffree = statp->f_files - (icount - ifree); 829 statp->f_ffree = max_t(int64_t, ffree, 0); 830 831 832 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && 833 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) == 834 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD)) 835 xfs_qm_statvfs(ip, statp); 836 837 if (XFS_IS_REALTIME_MOUNT(mp) && 838 (ip->i_d.di_flags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) { 839 statp->f_blocks = sbp->sb_rblocks; 840 statp->f_bavail = statp->f_bfree = 841 sbp->sb_frextents * sbp->sb_rextsize; 842 } 843 844 return 0; 845 } 846 847 STATIC void 848 xfs_save_resvblks(struct xfs_mount *mp) 849 { 850 uint64_t resblks = 0; 851 852 mp->m_resblks_save = mp->m_resblks; 853 xfs_reserve_blocks(mp, &resblks, NULL); 854 } 855 856 STATIC void 857 xfs_restore_resvblks(struct xfs_mount *mp) 858 { 859 uint64_t resblks; 860 861 if (mp->m_resblks_save) { 862 resblks = mp->m_resblks_save; 863 mp->m_resblks_save = 0; 864 } else 865 resblks = xfs_default_resblks(mp); 866 867 xfs_reserve_blocks(mp, &resblks, NULL); 868 } 869 870 /* 871 * Trigger writeback of all the dirty metadata in the file system. 872 * 873 * This ensures that the metadata is written to their location on disk rather 874 * than just existing in transactions in the log. This means after a quiesce 875 * there is no log replay required to write the inodes to disk - this is the 876 * primary difference between a sync and a quiesce. 877 * 878 * We cancel log work early here to ensure all transactions the log worker may 879 * run have finished before we clean up and log the superblock and write an 880 * unmount record. The unfreeze process is responsible for restarting the log 881 * worker correctly. 882 */ 883 void 884 xfs_quiesce_attr( 885 struct xfs_mount *mp) 886 { 887 int error = 0; 888 889 cancel_delayed_work_sync(&mp->m_log->l_work); 890 891 /* force the log to unpin objects from the now complete transactions */ 892 xfs_log_force(mp, XFS_LOG_SYNC); 893 894 895 /* Push the superblock and write an unmount record */ 896 error = xfs_log_sbcount(mp); 897 if (error) 898 xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. " 899 "Frozen image may not be consistent."); 900 xfs_log_quiesce(mp); 901 } 902 903 /* 904 * Second stage of a freeze. The data is already frozen so we only 905 * need to take care of the metadata. Once that's done sync the superblock 906 * to the log to dirty it in case of a crash while frozen. This ensures that we 907 * will recover the unlinked inode lists on the next mount. 908 */ 909 STATIC int 910 xfs_fs_freeze( 911 struct super_block *sb) 912 { 913 struct xfs_mount *mp = XFS_M(sb); 914 unsigned int flags; 915 int ret; 916 917 /* 918 * The filesystem is now frozen far enough that memory reclaim 919 * cannot safely operate on the filesystem. Hence we need to 920 * set a GFP_NOFS context here to avoid recursion deadlocks. 921 */ 922 flags = memalloc_nofs_save(); 923 xfs_stop_block_reaping(mp); 924 xfs_save_resvblks(mp); 925 xfs_quiesce_attr(mp); 926 ret = xfs_sync_sb(mp, true); 927 memalloc_nofs_restore(flags); 928 return ret; 929 } 930 931 STATIC int 932 xfs_fs_unfreeze( 933 struct super_block *sb) 934 { 935 struct xfs_mount *mp = XFS_M(sb); 936 937 xfs_restore_resvblks(mp); 938 xfs_log_work_queue(mp); 939 xfs_start_block_reaping(mp); 940 return 0; 941 } 942 943 /* 944 * This function fills in xfs_mount_t fields based on mount args. 945 * Note: the superblock _has_ now been read in. 946 */ 947 STATIC int 948 xfs_finish_flags( 949 struct xfs_mount *mp) 950 { 951 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY); 952 953 /* Fail a mount where the logbuf is smaller than the log stripe */ 954 if (xfs_sb_version_haslogv2(&mp->m_sb)) { 955 if (mp->m_logbsize <= 0 && 956 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { 957 mp->m_logbsize = mp->m_sb.sb_logsunit; 958 } else if (mp->m_logbsize > 0 && 959 mp->m_logbsize < mp->m_sb.sb_logsunit) { 960 xfs_warn(mp, 961 "logbuf size must be greater than or equal to log stripe size"); 962 return -EINVAL; 963 } 964 } else { 965 /* Fail a mount if the logbuf is larger than 32K */ 966 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { 967 xfs_warn(mp, 968 "logbuf size for version 1 logs must be 16K or 32K"); 969 return -EINVAL; 970 } 971 } 972 973 /* 974 * V5 filesystems always use attr2 format for attributes. 975 */ 976 if (xfs_sb_version_hascrc(&mp->m_sb) && 977 (mp->m_flags & XFS_MOUNT_NOATTR2)) { 978 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. " 979 "attr2 is always enabled for V5 filesystems."); 980 return -EINVAL; 981 } 982 983 /* 984 * mkfs'ed attr2 will turn on attr2 mount unless explicitly 985 * told by noattr2 to turn it off 986 */ 987 if (xfs_sb_version_hasattr2(&mp->m_sb) && 988 !(mp->m_flags & XFS_MOUNT_NOATTR2)) 989 mp->m_flags |= XFS_MOUNT_ATTR2; 990 991 /* 992 * prohibit r/w mounts of read-only filesystems 993 */ 994 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) { 995 xfs_warn(mp, 996 "cannot mount a read-only filesystem as read-write"); 997 return -EROFS; 998 } 999 1000 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) && 1001 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) && 1002 !xfs_sb_version_has_pquotino(&mp->m_sb)) { 1003 xfs_warn(mp, 1004 "Super block does not support project and group quota together"); 1005 return -EINVAL; 1006 } 1007 1008 return 0; 1009 } 1010 1011 static int 1012 xfs_init_percpu_counters( 1013 struct xfs_mount *mp) 1014 { 1015 int error; 1016 1017 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL); 1018 if (error) 1019 return -ENOMEM; 1020 1021 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL); 1022 if (error) 1023 goto free_icount; 1024 1025 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL); 1026 if (error) 1027 goto free_ifree; 1028 1029 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL); 1030 if (error) 1031 goto free_fdblocks; 1032 1033 return 0; 1034 1035 free_fdblocks: 1036 percpu_counter_destroy(&mp->m_fdblocks); 1037 free_ifree: 1038 percpu_counter_destroy(&mp->m_ifree); 1039 free_icount: 1040 percpu_counter_destroy(&mp->m_icount); 1041 return -ENOMEM; 1042 } 1043 1044 void 1045 xfs_reinit_percpu_counters( 1046 struct xfs_mount *mp) 1047 { 1048 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount); 1049 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree); 1050 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks); 1051 } 1052 1053 static void 1054 xfs_destroy_percpu_counters( 1055 struct xfs_mount *mp) 1056 { 1057 percpu_counter_destroy(&mp->m_icount); 1058 percpu_counter_destroy(&mp->m_ifree); 1059 percpu_counter_destroy(&mp->m_fdblocks); 1060 ASSERT(XFS_FORCED_SHUTDOWN(mp) || 1061 percpu_counter_sum(&mp->m_delalloc_blks) == 0); 1062 percpu_counter_destroy(&mp->m_delalloc_blks); 1063 } 1064 1065 static void 1066 xfs_fs_put_super( 1067 struct super_block *sb) 1068 { 1069 struct xfs_mount *mp = XFS_M(sb); 1070 1071 /* if ->fill_super failed, we have no mount to tear down */ 1072 if (!sb->s_fs_info) 1073 return; 1074 1075 xfs_notice(mp, "Unmounting Filesystem"); 1076 xfs_filestream_unmount(mp); 1077 xfs_unmountfs(mp); 1078 1079 xfs_freesb(mp); 1080 free_percpu(mp->m_stats.xs_stats); 1081 xfs_destroy_percpu_counters(mp); 1082 xfs_destroy_mount_workqueues(mp); 1083 xfs_close_devices(mp); 1084 1085 sb->s_fs_info = NULL; 1086 xfs_mount_free(mp); 1087 } 1088 1089 static long 1090 xfs_fs_nr_cached_objects( 1091 struct super_block *sb, 1092 struct shrink_control *sc) 1093 { 1094 /* Paranoia: catch incorrect calls during mount setup or teardown */ 1095 if (WARN_ON_ONCE(!sb->s_fs_info)) 1096 return 0; 1097 return xfs_reclaim_inodes_count(XFS_M(sb)); 1098 } 1099 1100 static long 1101 xfs_fs_free_cached_objects( 1102 struct super_block *sb, 1103 struct shrink_control *sc) 1104 { 1105 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan); 1106 } 1107 1108 static const struct super_operations xfs_super_operations = { 1109 .alloc_inode = xfs_fs_alloc_inode, 1110 .destroy_inode = xfs_fs_destroy_inode, 1111 .dirty_inode = xfs_fs_dirty_inode, 1112 .drop_inode = xfs_fs_drop_inode, 1113 .put_super = xfs_fs_put_super, 1114 .sync_fs = xfs_fs_sync_fs, 1115 .freeze_fs = xfs_fs_freeze, 1116 .unfreeze_fs = xfs_fs_unfreeze, 1117 .statfs = xfs_fs_statfs, 1118 .show_options = xfs_fs_show_options, 1119 .nr_cached_objects = xfs_fs_nr_cached_objects, 1120 .free_cached_objects = xfs_fs_free_cached_objects, 1121 }; 1122 1123 static int 1124 suffix_kstrtoint( 1125 const char *s, 1126 unsigned int base, 1127 int *res) 1128 { 1129 int last, shift_left_factor = 0, _res; 1130 char *value; 1131 int ret = 0; 1132 1133 value = kstrdup(s, GFP_KERNEL); 1134 if (!value) 1135 return -ENOMEM; 1136 1137 last = strlen(value) - 1; 1138 if (value[last] == 'K' || value[last] == 'k') { 1139 shift_left_factor = 10; 1140 value[last] = '\0'; 1141 } 1142 if (value[last] == 'M' || value[last] == 'm') { 1143 shift_left_factor = 20; 1144 value[last] = '\0'; 1145 } 1146 if (value[last] == 'G' || value[last] == 'g') { 1147 shift_left_factor = 30; 1148 value[last] = '\0'; 1149 } 1150 1151 if (kstrtoint(value, base, &_res)) 1152 ret = -EINVAL; 1153 kfree(value); 1154 *res = _res << shift_left_factor; 1155 return ret; 1156 } 1157 1158 /* 1159 * Set mount state from a mount option. 1160 * 1161 * NOTE: mp->m_super is NULL here! 1162 */ 1163 static int 1164 xfs_fs_parse_param( 1165 struct fs_context *fc, 1166 struct fs_parameter *param) 1167 { 1168 struct xfs_mount *mp = fc->s_fs_info; 1169 struct fs_parse_result result; 1170 int size = 0; 1171 int opt; 1172 1173 opt = fs_parse(fc, xfs_fs_parameters, param, &result); 1174 if (opt < 0) 1175 return opt; 1176 1177 switch (opt) { 1178 case Opt_logbufs: 1179 mp->m_logbufs = result.uint_32; 1180 return 0; 1181 case Opt_logbsize: 1182 if (suffix_kstrtoint(param->string, 10, &mp->m_logbsize)) 1183 return -EINVAL; 1184 return 0; 1185 case Opt_logdev: 1186 kfree(mp->m_logname); 1187 mp->m_logname = kstrdup(param->string, GFP_KERNEL); 1188 if (!mp->m_logname) 1189 return -ENOMEM; 1190 return 0; 1191 case Opt_rtdev: 1192 kfree(mp->m_rtname); 1193 mp->m_rtname = kstrdup(param->string, GFP_KERNEL); 1194 if (!mp->m_rtname) 1195 return -ENOMEM; 1196 return 0; 1197 case Opt_allocsize: 1198 if (suffix_kstrtoint(param->string, 10, &size)) 1199 return -EINVAL; 1200 mp->m_allocsize_log = ffs(size) - 1; 1201 mp->m_flags |= XFS_MOUNT_ALLOCSIZE; 1202 return 0; 1203 case Opt_grpid: 1204 case Opt_bsdgroups: 1205 mp->m_flags |= XFS_MOUNT_GRPID; 1206 return 0; 1207 case Opt_nogrpid: 1208 case Opt_sysvgroups: 1209 mp->m_flags &= ~XFS_MOUNT_GRPID; 1210 return 0; 1211 case Opt_wsync: 1212 mp->m_flags |= XFS_MOUNT_WSYNC; 1213 return 0; 1214 case Opt_norecovery: 1215 mp->m_flags |= XFS_MOUNT_NORECOVERY; 1216 return 0; 1217 case Opt_noalign: 1218 mp->m_flags |= XFS_MOUNT_NOALIGN; 1219 return 0; 1220 case Opt_swalloc: 1221 mp->m_flags |= XFS_MOUNT_SWALLOC; 1222 return 0; 1223 case Opt_sunit: 1224 mp->m_dalign = result.uint_32; 1225 return 0; 1226 case Opt_swidth: 1227 mp->m_swidth = result.uint_32; 1228 return 0; 1229 case Opt_inode32: 1230 mp->m_flags |= XFS_MOUNT_SMALL_INUMS; 1231 return 0; 1232 case Opt_inode64: 1233 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; 1234 return 0; 1235 case Opt_nouuid: 1236 mp->m_flags |= XFS_MOUNT_NOUUID; 1237 return 0; 1238 case Opt_largeio: 1239 mp->m_flags |= XFS_MOUNT_LARGEIO; 1240 return 0; 1241 case Opt_nolargeio: 1242 mp->m_flags &= ~XFS_MOUNT_LARGEIO; 1243 return 0; 1244 case Opt_filestreams: 1245 mp->m_flags |= XFS_MOUNT_FILESTREAMS; 1246 return 0; 1247 case Opt_noquota: 1248 mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; 1249 mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; 1250 mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE; 1251 return 0; 1252 case Opt_quota: 1253 case Opt_uquota: 1254 case Opt_usrquota: 1255 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | 1256 XFS_UQUOTA_ENFD); 1257 return 0; 1258 case Opt_qnoenforce: 1259 case Opt_uqnoenforce: 1260 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE); 1261 mp->m_qflags &= ~XFS_UQUOTA_ENFD; 1262 return 0; 1263 case Opt_pquota: 1264 case Opt_prjquota: 1265 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | 1266 XFS_PQUOTA_ENFD); 1267 return 0; 1268 case Opt_pqnoenforce: 1269 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE); 1270 mp->m_qflags &= ~XFS_PQUOTA_ENFD; 1271 return 0; 1272 case Opt_gquota: 1273 case Opt_grpquota: 1274 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | 1275 XFS_GQUOTA_ENFD); 1276 return 0; 1277 case Opt_gqnoenforce: 1278 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); 1279 mp->m_qflags &= ~XFS_GQUOTA_ENFD; 1280 return 0; 1281 case Opt_discard: 1282 mp->m_flags |= XFS_MOUNT_DISCARD; 1283 return 0; 1284 case Opt_nodiscard: 1285 mp->m_flags &= ~XFS_MOUNT_DISCARD; 1286 return 0; 1287 #ifdef CONFIG_FS_DAX 1288 case Opt_dax: 1289 xfs_mount_set_dax_mode(mp, XFS_DAX_ALWAYS); 1290 return 0; 1291 case Opt_dax_enum: 1292 xfs_mount_set_dax_mode(mp, result.uint_32); 1293 return 0; 1294 #endif 1295 /* Following mount options will be removed in September 2025 */ 1296 case Opt_ikeep: 1297 xfs_warn(mp, "%s mount option is deprecated.", param->key); 1298 mp->m_flags |= XFS_MOUNT_IKEEP; 1299 return 0; 1300 case Opt_noikeep: 1301 xfs_warn(mp, "%s mount option is deprecated.", param->key); 1302 mp->m_flags &= ~XFS_MOUNT_IKEEP; 1303 return 0; 1304 case Opt_attr2: 1305 xfs_warn(mp, "%s mount option is deprecated.", param->key); 1306 mp->m_flags |= XFS_MOUNT_ATTR2; 1307 return 0; 1308 case Opt_noattr2: 1309 xfs_warn(mp, "%s mount option is deprecated.", param->key); 1310 mp->m_flags &= ~XFS_MOUNT_ATTR2; 1311 mp->m_flags |= XFS_MOUNT_NOATTR2; 1312 return 0; 1313 default: 1314 xfs_warn(mp, "unknown mount option [%s].", param->key); 1315 return -EINVAL; 1316 } 1317 1318 return 0; 1319 } 1320 1321 static int 1322 xfs_fs_validate_params( 1323 struct xfs_mount *mp) 1324 { 1325 /* 1326 * no recovery flag requires a read-only mount 1327 */ 1328 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) && 1329 !(mp->m_flags & XFS_MOUNT_RDONLY)) { 1330 xfs_warn(mp, "no-recovery mounts must be read-only."); 1331 return -EINVAL; 1332 } 1333 1334 if ((mp->m_flags & XFS_MOUNT_NOALIGN) && 1335 (mp->m_dalign || mp->m_swidth)) { 1336 xfs_warn(mp, 1337 "sunit and swidth options incompatible with the noalign option"); 1338 return -EINVAL; 1339 } 1340 1341 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) { 1342 xfs_warn(mp, "quota support not available in this kernel."); 1343 return -EINVAL; 1344 } 1345 1346 if ((mp->m_dalign && !mp->m_swidth) || 1347 (!mp->m_dalign && mp->m_swidth)) { 1348 xfs_warn(mp, "sunit and swidth must be specified together"); 1349 return -EINVAL; 1350 } 1351 1352 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) { 1353 xfs_warn(mp, 1354 "stripe width (%d) must be a multiple of the stripe unit (%d)", 1355 mp->m_swidth, mp->m_dalign); 1356 return -EINVAL; 1357 } 1358 1359 if (mp->m_logbufs != -1 && 1360 mp->m_logbufs != 0 && 1361 (mp->m_logbufs < XLOG_MIN_ICLOGS || 1362 mp->m_logbufs > XLOG_MAX_ICLOGS)) { 1363 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", 1364 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); 1365 return -EINVAL; 1366 } 1367 1368 if (mp->m_logbsize != -1 && 1369 mp->m_logbsize != 0 && 1370 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || 1371 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || 1372 !is_power_of_2(mp->m_logbsize))) { 1373 xfs_warn(mp, 1374 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", 1375 mp->m_logbsize); 1376 return -EINVAL; 1377 } 1378 1379 if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) && 1380 (mp->m_allocsize_log > XFS_MAX_IO_LOG || 1381 mp->m_allocsize_log < XFS_MIN_IO_LOG)) { 1382 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", 1383 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG); 1384 return -EINVAL; 1385 } 1386 1387 return 0; 1388 } 1389 1390 static int 1391 xfs_fs_fill_super( 1392 struct super_block *sb, 1393 struct fs_context *fc) 1394 { 1395 struct xfs_mount *mp = sb->s_fs_info; 1396 struct inode *root; 1397 int flags = 0, error; 1398 1399 mp->m_super = sb; 1400 1401 error = xfs_fs_validate_params(mp); 1402 if (error) 1403 goto out_free_names; 1404 1405 sb_min_blocksize(sb, BBSIZE); 1406 sb->s_xattr = xfs_xattr_handlers; 1407 sb->s_export_op = &xfs_export_operations; 1408 #ifdef CONFIG_XFS_QUOTA 1409 sb->s_qcop = &xfs_quotactl_operations; 1410 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; 1411 #endif 1412 sb->s_op = &xfs_super_operations; 1413 1414 /* 1415 * Delay mount work if the debug hook is set. This is debug 1416 * instrumention to coordinate simulation of xfs mount failures with 1417 * VFS superblock operations 1418 */ 1419 if (xfs_globals.mount_delay) { 1420 xfs_notice(mp, "Delaying mount for %d seconds.", 1421 xfs_globals.mount_delay); 1422 msleep(xfs_globals.mount_delay * 1000); 1423 } 1424 1425 if (fc->sb_flags & SB_SILENT) 1426 flags |= XFS_MFSI_QUIET; 1427 1428 error = xfs_open_devices(mp); 1429 if (error) 1430 goto out_free_names; 1431 1432 error = xfs_init_mount_workqueues(mp); 1433 if (error) 1434 goto out_close_devices; 1435 1436 error = xfs_init_percpu_counters(mp); 1437 if (error) 1438 goto out_destroy_workqueues; 1439 1440 /* Allocate stats memory before we do operations that might use it */ 1441 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats); 1442 if (!mp->m_stats.xs_stats) { 1443 error = -ENOMEM; 1444 goto out_destroy_counters; 1445 } 1446 1447 error = xfs_readsb(mp, flags); 1448 if (error) 1449 goto out_free_stats; 1450 1451 error = xfs_finish_flags(mp); 1452 if (error) 1453 goto out_free_sb; 1454 1455 error = xfs_setup_devices(mp); 1456 if (error) 1457 goto out_free_sb; 1458 1459 /* V4 support is undergoing deprecation. */ 1460 if (!xfs_sb_version_hascrc(&mp->m_sb)) { 1461 #ifdef CONFIG_XFS_SUPPORT_V4 1462 xfs_warn_once(mp, 1463 "Deprecated V4 format (crc=0) will not be supported after September 2030."); 1464 #else 1465 xfs_warn(mp, 1466 "Deprecated V4 format (crc=0) not supported by kernel."); 1467 error = -EINVAL; 1468 goto out_free_sb; 1469 #endif 1470 } 1471 1472 /* Filesystem claims it needs repair, so refuse the mount. */ 1473 if (xfs_sb_version_needsrepair(&mp->m_sb)) { 1474 xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair."); 1475 error = -EFSCORRUPTED; 1476 goto out_free_sb; 1477 } 1478 1479 /* 1480 * Don't touch the filesystem if a user tool thinks it owns the primary 1481 * superblock. mkfs doesn't clear the flag from secondary supers, so 1482 * we don't check them at all. 1483 */ 1484 if (mp->m_sb.sb_inprogress) { 1485 xfs_warn(mp, "Offline file system operation in progress!"); 1486 error = -EFSCORRUPTED; 1487 goto out_free_sb; 1488 } 1489 1490 /* 1491 * Until this is fixed only page-sized or smaller data blocks work. 1492 */ 1493 if (mp->m_sb.sb_blocksize > PAGE_SIZE) { 1494 xfs_warn(mp, 1495 "File system with blocksize %d bytes. " 1496 "Only pagesize (%ld) or less will currently work.", 1497 mp->m_sb.sb_blocksize, PAGE_SIZE); 1498 error = -ENOSYS; 1499 goto out_free_sb; 1500 } 1501 1502 /* Ensure this filesystem fits in the page cache limits */ 1503 if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) || 1504 xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) { 1505 xfs_warn(mp, 1506 "file system too large to be mounted on this system."); 1507 error = -EFBIG; 1508 goto out_free_sb; 1509 } 1510 1511 /* 1512 * XFS block mappings use 54 bits to store the logical block offset. 1513 * This should suffice to handle the maximum file size that the VFS 1514 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT 1515 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes 1516 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON 1517 * to check this assertion. 1518 * 1519 * Avoid integer overflow by comparing the maximum bmbt offset to the 1520 * maximum pagecache offset in units of fs blocks. 1521 */ 1522 if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) { 1523 xfs_warn(mp, 1524 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!", 1525 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE), 1526 XFS_MAX_FILEOFF); 1527 error = -EINVAL; 1528 goto out_free_sb; 1529 } 1530 1531 error = xfs_filestream_mount(mp); 1532 if (error) 1533 goto out_free_sb; 1534 1535 /* 1536 * we must configure the block size in the superblock before we run the 1537 * full mount process as the mount process can lookup and cache inodes. 1538 */ 1539 sb->s_magic = XFS_SUPER_MAGIC; 1540 sb->s_blocksize = mp->m_sb.sb_blocksize; 1541 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; 1542 sb->s_maxbytes = MAX_LFS_FILESIZE; 1543 sb->s_max_links = XFS_MAXLINK; 1544 sb->s_time_gran = 1; 1545 if (xfs_sb_version_hasbigtime(&mp->m_sb)) { 1546 sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN); 1547 sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX); 1548 } else { 1549 sb->s_time_min = XFS_LEGACY_TIME_MIN; 1550 sb->s_time_max = XFS_LEGACY_TIME_MAX; 1551 } 1552 trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max); 1553 sb->s_iflags |= SB_I_CGROUPWB; 1554 1555 set_posix_acl_flag(sb); 1556 1557 /* version 5 superblocks support inode version counters. */ 1558 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5) 1559 sb->s_flags |= SB_I_VERSION; 1560 1561 if (xfs_sb_version_hasbigtime(&mp->m_sb)) 1562 xfs_warn(mp, 1563 "EXPERIMENTAL big timestamp feature in use. Use at your own risk!"); 1564 1565 if (mp->m_flags & XFS_MOUNT_DAX_ALWAYS) { 1566 bool rtdev_is_dax = false, datadev_is_dax; 1567 1568 xfs_warn(mp, 1569 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); 1570 1571 datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev, 1572 sb->s_blocksize); 1573 if (mp->m_rtdev_targp) 1574 rtdev_is_dax = bdev_dax_supported( 1575 mp->m_rtdev_targp->bt_bdev, sb->s_blocksize); 1576 if (!rtdev_is_dax && !datadev_is_dax) { 1577 xfs_alert(mp, 1578 "DAX unsupported by block device. Turning off DAX."); 1579 xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER); 1580 } 1581 if (xfs_sb_version_hasreflink(&mp->m_sb)) { 1582 xfs_alert(mp, 1583 "DAX and reflink cannot be used together!"); 1584 error = -EINVAL; 1585 goto out_filestream_unmount; 1586 } 1587 } 1588 1589 if (mp->m_flags & XFS_MOUNT_DISCARD) { 1590 struct request_queue *q = bdev_get_queue(sb->s_bdev); 1591 1592 if (!blk_queue_discard(q)) { 1593 xfs_warn(mp, "mounting with \"discard\" option, but " 1594 "the device does not support discard"); 1595 mp->m_flags &= ~XFS_MOUNT_DISCARD; 1596 } 1597 } 1598 1599 if (xfs_sb_version_hasreflink(&mp->m_sb)) { 1600 if (mp->m_sb.sb_rblocks) { 1601 xfs_alert(mp, 1602 "reflink not compatible with realtime device!"); 1603 error = -EINVAL; 1604 goto out_filestream_unmount; 1605 } 1606 1607 if (xfs_globals.always_cow) { 1608 xfs_info(mp, "using DEBUG-only always_cow mode."); 1609 mp->m_always_cow = true; 1610 } 1611 } 1612 1613 if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) { 1614 xfs_alert(mp, 1615 "reverse mapping btree not compatible with realtime device!"); 1616 error = -EINVAL; 1617 goto out_filestream_unmount; 1618 } 1619 1620 if (xfs_sb_version_hasinobtcounts(&mp->m_sb)) 1621 xfs_warn(mp, 1622 "EXPERIMENTAL inode btree counters feature in use. Use at your own risk!"); 1623 1624 error = xfs_mountfs(mp); 1625 if (error) 1626 goto out_filestream_unmount; 1627 1628 root = igrab(VFS_I(mp->m_rootip)); 1629 if (!root) { 1630 error = -ENOENT; 1631 goto out_unmount; 1632 } 1633 sb->s_root = d_make_root(root); 1634 if (!sb->s_root) { 1635 error = -ENOMEM; 1636 goto out_unmount; 1637 } 1638 1639 return 0; 1640 1641 out_filestream_unmount: 1642 xfs_filestream_unmount(mp); 1643 out_free_sb: 1644 xfs_freesb(mp); 1645 out_free_stats: 1646 free_percpu(mp->m_stats.xs_stats); 1647 out_destroy_counters: 1648 xfs_destroy_percpu_counters(mp); 1649 out_destroy_workqueues: 1650 xfs_destroy_mount_workqueues(mp); 1651 out_close_devices: 1652 xfs_close_devices(mp); 1653 out_free_names: 1654 sb->s_fs_info = NULL; 1655 xfs_mount_free(mp); 1656 return error; 1657 1658 out_unmount: 1659 xfs_filestream_unmount(mp); 1660 xfs_unmountfs(mp); 1661 goto out_free_sb; 1662 } 1663 1664 static int 1665 xfs_fs_get_tree( 1666 struct fs_context *fc) 1667 { 1668 return get_tree_bdev(fc, xfs_fs_fill_super); 1669 } 1670 1671 static int 1672 xfs_remount_rw( 1673 struct xfs_mount *mp) 1674 { 1675 struct xfs_sb *sbp = &mp->m_sb; 1676 int error; 1677 1678 if (mp->m_flags & XFS_MOUNT_NORECOVERY) { 1679 xfs_warn(mp, 1680 "ro->rw transition prohibited on norecovery mount"); 1681 return -EINVAL; 1682 } 1683 1684 if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 && 1685 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) { 1686 xfs_warn(mp, 1687 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem", 1688 (sbp->sb_features_ro_compat & 1689 XFS_SB_FEAT_RO_COMPAT_UNKNOWN)); 1690 return -EINVAL; 1691 } 1692 1693 mp->m_flags &= ~XFS_MOUNT_RDONLY; 1694 1695 /* 1696 * If this is the first remount to writeable state we might have some 1697 * superblock changes to update. 1698 */ 1699 if (mp->m_update_sb) { 1700 error = xfs_sync_sb(mp, false); 1701 if (error) { 1702 xfs_warn(mp, "failed to write sb changes"); 1703 return error; 1704 } 1705 mp->m_update_sb = false; 1706 } 1707 1708 /* 1709 * Fill out the reserve pool if it is empty. Use the stashed value if 1710 * it is non-zero, otherwise go with the default. 1711 */ 1712 xfs_restore_resvblks(mp); 1713 xfs_log_work_queue(mp); 1714 1715 /* Recover any CoW blocks that never got remapped. */ 1716 error = xfs_reflink_recover_cow(mp); 1717 if (error) { 1718 xfs_err(mp, 1719 "Error %d recovering leftover CoW allocations.", error); 1720 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1721 return error; 1722 } 1723 xfs_start_block_reaping(mp); 1724 1725 /* Create the per-AG metadata reservation pool .*/ 1726 error = xfs_fs_reserve_ag_blocks(mp); 1727 if (error && error != -ENOSPC) 1728 return error; 1729 1730 return 0; 1731 } 1732 1733 static int 1734 xfs_remount_ro( 1735 struct xfs_mount *mp) 1736 { 1737 int error; 1738 1739 /* 1740 * Cancel background eofb scanning so it cannot race with the final 1741 * log force+buftarg wait and deadlock the remount. 1742 */ 1743 xfs_stop_block_reaping(mp); 1744 1745 /* Get rid of any leftover CoW reservations... */ 1746 error = xfs_icache_free_cowblocks(mp, NULL); 1747 if (error) { 1748 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1749 return error; 1750 } 1751 1752 /* Free the per-AG metadata reservation pool. */ 1753 error = xfs_fs_unreserve_ag_blocks(mp); 1754 if (error) { 1755 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1756 return error; 1757 } 1758 1759 /* 1760 * Before we sync the metadata, we need to free up the reserve block 1761 * pool so that the used block count in the superblock on disk is 1762 * correct at the end of the remount. Stash the current* reserve pool 1763 * size so that if we get remounted rw, we can return it to the same 1764 * size. 1765 */ 1766 xfs_save_resvblks(mp); 1767 1768 xfs_quiesce_attr(mp); 1769 mp->m_flags |= XFS_MOUNT_RDONLY; 1770 1771 return 0; 1772 } 1773 1774 /* 1775 * Logically we would return an error here to prevent users from believing 1776 * they might have changed mount options using remount which can't be changed. 1777 * 1778 * But unfortunately mount(8) adds all options from mtab and fstab to the mount 1779 * arguments in some cases so we can't blindly reject options, but have to 1780 * check for each specified option if it actually differs from the currently 1781 * set option and only reject it if that's the case. 1782 * 1783 * Until that is implemented we return success for every remount request, and 1784 * silently ignore all options that we can't actually change. 1785 */ 1786 static int 1787 xfs_fs_reconfigure( 1788 struct fs_context *fc) 1789 { 1790 struct xfs_mount *mp = XFS_M(fc->root->d_sb); 1791 struct xfs_mount *new_mp = fc->s_fs_info; 1792 xfs_sb_t *sbp = &mp->m_sb; 1793 int flags = fc->sb_flags; 1794 int error; 1795 1796 /* version 5 superblocks always support version counters. */ 1797 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5) 1798 fc->sb_flags |= SB_I_VERSION; 1799 1800 error = xfs_fs_validate_params(new_mp); 1801 if (error) 1802 return error; 1803 1804 sync_filesystem(mp->m_super); 1805 1806 /* inode32 -> inode64 */ 1807 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && 1808 !(new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) { 1809 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; 1810 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount); 1811 } 1812 1813 /* inode64 -> inode32 */ 1814 if (!(mp->m_flags & XFS_MOUNT_SMALL_INUMS) && 1815 (new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) { 1816 mp->m_flags |= XFS_MOUNT_SMALL_INUMS; 1817 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount); 1818 } 1819 1820 /* ro -> rw */ 1821 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(flags & SB_RDONLY)) { 1822 error = xfs_remount_rw(mp); 1823 if (error) 1824 return error; 1825 } 1826 1827 /* rw -> ro */ 1828 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (flags & SB_RDONLY)) { 1829 error = xfs_remount_ro(mp); 1830 if (error) 1831 return error; 1832 } 1833 1834 return 0; 1835 } 1836 1837 static void xfs_fs_free( 1838 struct fs_context *fc) 1839 { 1840 struct xfs_mount *mp = fc->s_fs_info; 1841 1842 /* 1843 * mp is stored in the fs_context when it is initialized. 1844 * mp is transferred to the superblock on a successful mount, 1845 * but if an error occurs before the transfer we have to free 1846 * it here. 1847 */ 1848 if (mp) 1849 xfs_mount_free(mp); 1850 } 1851 1852 static const struct fs_context_operations xfs_context_ops = { 1853 .parse_param = xfs_fs_parse_param, 1854 .get_tree = xfs_fs_get_tree, 1855 .reconfigure = xfs_fs_reconfigure, 1856 .free = xfs_fs_free, 1857 }; 1858 1859 static int xfs_init_fs_context( 1860 struct fs_context *fc) 1861 { 1862 struct xfs_mount *mp; 1863 1864 mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO); 1865 if (!mp) 1866 return -ENOMEM; 1867 1868 spin_lock_init(&mp->m_sb_lock); 1869 spin_lock_init(&mp->m_agirotor_lock); 1870 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); 1871 spin_lock_init(&mp->m_perag_lock); 1872 mutex_init(&mp->m_growlock); 1873 INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker); 1874 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); 1875 INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker); 1876 INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker); 1877 mp->m_kobj.kobject.kset = xfs_kset; 1878 /* 1879 * We don't create the finobt per-ag space reservation until after log 1880 * recovery, so we must set this to true so that an ifree transaction 1881 * started during log recovery will not depend on space reservations 1882 * for finobt expansion. 1883 */ 1884 mp->m_finobt_nores = true; 1885 1886 /* 1887 * These can be overridden by the mount option parsing. 1888 */ 1889 mp->m_logbufs = -1; 1890 mp->m_logbsize = -1; 1891 mp->m_allocsize_log = 16; /* 64k */ 1892 1893 /* 1894 * Copy binary VFS mount flags we are interested in. 1895 */ 1896 if (fc->sb_flags & SB_RDONLY) 1897 mp->m_flags |= XFS_MOUNT_RDONLY; 1898 if (fc->sb_flags & SB_DIRSYNC) 1899 mp->m_flags |= XFS_MOUNT_DIRSYNC; 1900 if (fc->sb_flags & SB_SYNCHRONOUS) 1901 mp->m_flags |= XFS_MOUNT_WSYNC; 1902 1903 fc->s_fs_info = mp; 1904 fc->ops = &xfs_context_ops; 1905 1906 return 0; 1907 } 1908 1909 static struct file_system_type xfs_fs_type = { 1910 .owner = THIS_MODULE, 1911 .name = "xfs", 1912 .init_fs_context = xfs_init_fs_context, 1913 .parameters = xfs_fs_parameters, 1914 .kill_sb = kill_block_super, 1915 .fs_flags = FS_REQUIRES_DEV, 1916 }; 1917 MODULE_ALIAS_FS("xfs"); 1918 1919 STATIC int __init 1920 xfs_init_zones(void) 1921 { 1922 xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket", 1923 sizeof(struct xlog_ticket), 1924 0, 0, NULL); 1925 if (!xfs_log_ticket_zone) 1926 goto out; 1927 1928 xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item", 1929 sizeof(struct xfs_extent_free_item), 1930 0, 0, NULL); 1931 if (!xfs_bmap_free_item_zone) 1932 goto out_destroy_log_ticket_zone; 1933 1934 xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur", 1935 sizeof(struct xfs_btree_cur), 1936 0, 0, NULL); 1937 if (!xfs_btree_cur_zone) 1938 goto out_destroy_bmap_free_item_zone; 1939 1940 xfs_da_state_zone = kmem_cache_create("xfs_da_state", 1941 sizeof(struct xfs_da_state), 1942 0, 0, NULL); 1943 if (!xfs_da_state_zone) 1944 goto out_destroy_btree_cur_zone; 1945 1946 xfs_ifork_zone = kmem_cache_create("xfs_ifork", 1947 sizeof(struct xfs_ifork), 1948 0, 0, NULL); 1949 if (!xfs_ifork_zone) 1950 goto out_destroy_da_state_zone; 1951 1952 xfs_trans_zone = kmem_cache_create("xf_trans", 1953 sizeof(struct xfs_trans), 1954 0, 0, NULL); 1955 if (!xfs_trans_zone) 1956 goto out_destroy_ifork_zone; 1957 1958 1959 /* 1960 * The size of the zone allocated buf log item is the maximum 1961 * size possible under XFS. This wastes a little bit of memory, 1962 * but it is much faster. 1963 */ 1964 xfs_buf_item_zone = kmem_cache_create("xfs_buf_item", 1965 sizeof(struct xfs_buf_log_item), 1966 0, 0, NULL); 1967 if (!xfs_buf_item_zone) 1968 goto out_destroy_trans_zone; 1969 1970 xfs_efd_zone = kmem_cache_create("xfs_efd_item", 1971 (sizeof(struct xfs_efd_log_item) + 1972 (XFS_EFD_MAX_FAST_EXTENTS - 1) * 1973 sizeof(struct xfs_extent)), 1974 0, 0, NULL); 1975 if (!xfs_efd_zone) 1976 goto out_destroy_buf_item_zone; 1977 1978 xfs_efi_zone = kmem_cache_create("xfs_efi_item", 1979 (sizeof(struct xfs_efi_log_item) + 1980 (XFS_EFI_MAX_FAST_EXTENTS - 1) * 1981 sizeof(struct xfs_extent)), 1982 0, 0, NULL); 1983 if (!xfs_efi_zone) 1984 goto out_destroy_efd_zone; 1985 1986 xfs_inode_zone = kmem_cache_create("xfs_inode", 1987 sizeof(struct xfs_inode), 0, 1988 (SLAB_HWCACHE_ALIGN | 1989 SLAB_RECLAIM_ACCOUNT | 1990 SLAB_MEM_SPREAD | SLAB_ACCOUNT), 1991 xfs_fs_inode_init_once); 1992 if (!xfs_inode_zone) 1993 goto out_destroy_efi_zone; 1994 1995 xfs_ili_zone = kmem_cache_create("xfs_ili", 1996 sizeof(struct xfs_inode_log_item), 0, 1997 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, 1998 NULL); 1999 if (!xfs_ili_zone) 2000 goto out_destroy_inode_zone; 2001 2002 xfs_icreate_zone = kmem_cache_create("xfs_icr", 2003 sizeof(struct xfs_icreate_item), 2004 0, 0, NULL); 2005 if (!xfs_icreate_zone) 2006 goto out_destroy_ili_zone; 2007 2008 xfs_rud_zone = kmem_cache_create("xfs_rud_item", 2009 sizeof(struct xfs_rud_log_item), 2010 0, 0, NULL); 2011 if (!xfs_rud_zone) 2012 goto out_destroy_icreate_zone; 2013 2014 xfs_rui_zone = kmem_cache_create("xfs_rui_item", 2015 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS), 2016 0, 0, NULL); 2017 if (!xfs_rui_zone) 2018 goto out_destroy_rud_zone; 2019 2020 xfs_cud_zone = kmem_cache_create("xfs_cud_item", 2021 sizeof(struct xfs_cud_log_item), 2022 0, 0, NULL); 2023 if (!xfs_cud_zone) 2024 goto out_destroy_rui_zone; 2025 2026 xfs_cui_zone = kmem_cache_create("xfs_cui_item", 2027 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS), 2028 0, 0, NULL); 2029 if (!xfs_cui_zone) 2030 goto out_destroy_cud_zone; 2031 2032 xfs_bud_zone = kmem_cache_create("xfs_bud_item", 2033 sizeof(struct xfs_bud_log_item), 2034 0, 0, NULL); 2035 if (!xfs_bud_zone) 2036 goto out_destroy_cui_zone; 2037 2038 xfs_bui_zone = kmem_cache_create("xfs_bui_item", 2039 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS), 2040 0, 0, NULL); 2041 if (!xfs_bui_zone) 2042 goto out_destroy_bud_zone; 2043 2044 return 0; 2045 2046 out_destroy_bud_zone: 2047 kmem_cache_destroy(xfs_bud_zone); 2048 out_destroy_cui_zone: 2049 kmem_cache_destroy(xfs_cui_zone); 2050 out_destroy_cud_zone: 2051 kmem_cache_destroy(xfs_cud_zone); 2052 out_destroy_rui_zone: 2053 kmem_cache_destroy(xfs_rui_zone); 2054 out_destroy_rud_zone: 2055 kmem_cache_destroy(xfs_rud_zone); 2056 out_destroy_icreate_zone: 2057 kmem_cache_destroy(xfs_icreate_zone); 2058 out_destroy_ili_zone: 2059 kmem_cache_destroy(xfs_ili_zone); 2060 out_destroy_inode_zone: 2061 kmem_cache_destroy(xfs_inode_zone); 2062 out_destroy_efi_zone: 2063 kmem_cache_destroy(xfs_efi_zone); 2064 out_destroy_efd_zone: 2065 kmem_cache_destroy(xfs_efd_zone); 2066 out_destroy_buf_item_zone: 2067 kmem_cache_destroy(xfs_buf_item_zone); 2068 out_destroy_trans_zone: 2069 kmem_cache_destroy(xfs_trans_zone); 2070 out_destroy_ifork_zone: 2071 kmem_cache_destroy(xfs_ifork_zone); 2072 out_destroy_da_state_zone: 2073 kmem_cache_destroy(xfs_da_state_zone); 2074 out_destroy_btree_cur_zone: 2075 kmem_cache_destroy(xfs_btree_cur_zone); 2076 out_destroy_bmap_free_item_zone: 2077 kmem_cache_destroy(xfs_bmap_free_item_zone); 2078 out_destroy_log_ticket_zone: 2079 kmem_cache_destroy(xfs_log_ticket_zone); 2080 out: 2081 return -ENOMEM; 2082 } 2083 2084 STATIC void 2085 xfs_destroy_zones(void) 2086 { 2087 /* 2088 * Make sure all delayed rcu free are flushed before we 2089 * destroy caches. 2090 */ 2091 rcu_barrier(); 2092 kmem_cache_destroy(xfs_bui_zone); 2093 kmem_cache_destroy(xfs_bud_zone); 2094 kmem_cache_destroy(xfs_cui_zone); 2095 kmem_cache_destroy(xfs_cud_zone); 2096 kmem_cache_destroy(xfs_rui_zone); 2097 kmem_cache_destroy(xfs_rud_zone); 2098 kmem_cache_destroy(xfs_icreate_zone); 2099 kmem_cache_destroy(xfs_ili_zone); 2100 kmem_cache_destroy(xfs_inode_zone); 2101 kmem_cache_destroy(xfs_efi_zone); 2102 kmem_cache_destroy(xfs_efd_zone); 2103 kmem_cache_destroy(xfs_buf_item_zone); 2104 kmem_cache_destroy(xfs_trans_zone); 2105 kmem_cache_destroy(xfs_ifork_zone); 2106 kmem_cache_destroy(xfs_da_state_zone); 2107 kmem_cache_destroy(xfs_btree_cur_zone); 2108 kmem_cache_destroy(xfs_bmap_free_item_zone); 2109 kmem_cache_destroy(xfs_log_ticket_zone); 2110 } 2111 2112 STATIC int __init 2113 xfs_init_workqueues(void) 2114 { 2115 /* 2116 * The allocation workqueue can be used in memory reclaim situations 2117 * (writepage path), and parallelism is only limited by the number of 2118 * AGs in all the filesystems mounted. Hence use the default large 2119 * max_active value for this workqueue. 2120 */ 2121 xfs_alloc_wq = alloc_workqueue("xfsalloc", 2122 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0); 2123 if (!xfs_alloc_wq) 2124 return -ENOMEM; 2125 2126 xfs_discard_wq = alloc_workqueue("xfsdiscard", WQ_UNBOUND, 0); 2127 if (!xfs_discard_wq) 2128 goto out_free_alloc_wq; 2129 2130 return 0; 2131 out_free_alloc_wq: 2132 destroy_workqueue(xfs_alloc_wq); 2133 return -ENOMEM; 2134 } 2135 2136 STATIC void 2137 xfs_destroy_workqueues(void) 2138 { 2139 destroy_workqueue(xfs_discard_wq); 2140 destroy_workqueue(xfs_alloc_wq); 2141 } 2142 2143 STATIC int __init 2144 init_xfs_fs(void) 2145 { 2146 int error; 2147 2148 xfs_check_ondisk_structs(); 2149 2150 printk(KERN_INFO XFS_VERSION_STRING " with " 2151 XFS_BUILD_OPTIONS " enabled\n"); 2152 2153 xfs_dir_startup(); 2154 2155 error = xfs_init_zones(); 2156 if (error) 2157 goto out; 2158 2159 error = xfs_init_workqueues(); 2160 if (error) 2161 goto out_destroy_zones; 2162 2163 error = xfs_mru_cache_init(); 2164 if (error) 2165 goto out_destroy_wq; 2166 2167 error = xfs_buf_init(); 2168 if (error) 2169 goto out_mru_cache_uninit; 2170 2171 error = xfs_init_procfs(); 2172 if (error) 2173 goto out_buf_terminate; 2174 2175 error = xfs_sysctl_register(); 2176 if (error) 2177 goto out_cleanup_procfs; 2178 2179 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj); 2180 if (!xfs_kset) { 2181 error = -ENOMEM; 2182 goto out_sysctl_unregister; 2183 } 2184 2185 xfsstats.xs_kobj.kobject.kset = xfs_kset; 2186 2187 xfsstats.xs_stats = alloc_percpu(struct xfsstats); 2188 if (!xfsstats.xs_stats) { 2189 error = -ENOMEM; 2190 goto out_kset_unregister; 2191 } 2192 2193 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL, 2194 "stats"); 2195 if (error) 2196 goto out_free_stats; 2197 2198 #ifdef DEBUG 2199 xfs_dbg_kobj.kobject.kset = xfs_kset; 2200 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug"); 2201 if (error) 2202 goto out_remove_stats_kobj; 2203 #endif 2204 2205 error = xfs_qm_init(); 2206 if (error) 2207 goto out_remove_dbg_kobj; 2208 2209 error = register_filesystem(&xfs_fs_type); 2210 if (error) 2211 goto out_qm_exit; 2212 return 0; 2213 2214 out_qm_exit: 2215 xfs_qm_exit(); 2216 out_remove_dbg_kobj: 2217 #ifdef DEBUG 2218 xfs_sysfs_del(&xfs_dbg_kobj); 2219 out_remove_stats_kobj: 2220 #endif 2221 xfs_sysfs_del(&xfsstats.xs_kobj); 2222 out_free_stats: 2223 free_percpu(xfsstats.xs_stats); 2224 out_kset_unregister: 2225 kset_unregister(xfs_kset); 2226 out_sysctl_unregister: 2227 xfs_sysctl_unregister(); 2228 out_cleanup_procfs: 2229 xfs_cleanup_procfs(); 2230 out_buf_terminate: 2231 xfs_buf_terminate(); 2232 out_mru_cache_uninit: 2233 xfs_mru_cache_uninit(); 2234 out_destroy_wq: 2235 xfs_destroy_workqueues(); 2236 out_destroy_zones: 2237 xfs_destroy_zones(); 2238 out: 2239 return error; 2240 } 2241 2242 STATIC void __exit 2243 exit_xfs_fs(void) 2244 { 2245 xfs_qm_exit(); 2246 unregister_filesystem(&xfs_fs_type); 2247 #ifdef DEBUG 2248 xfs_sysfs_del(&xfs_dbg_kobj); 2249 #endif 2250 xfs_sysfs_del(&xfsstats.xs_kobj); 2251 free_percpu(xfsstats.xs_stats); 2252 kset_unregister(xfs_kset); 2253 xfs_sysctl_unregister(); 2254 xfs_cleanup_procfs(); 2255 xfs_buf_terminate(); 2256 xfs_mru_cache_uninit(); 2257 xfs_destroy_workqueues(); 2258 xfs_destroy_zones(); 2259 xfs_uuid_table_free(); 2260 } 2261 2262 module_init(init_xfs_fs); 2263 module_exit(exit_xfs_fs); 2264 2265 MODULE_AUTHOR("Silicon Graphics, Inc."); 2266 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); 2267 MODULE_LICENSE("GPL"); 2268