1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 19 #include "xfs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_sb.h" 25 #include "xfs_mount.h" 26 #include "xfs_da_format.h" 27 #include "xfs_inode.h" 28 #include "xfs_btree.h" 29 #include "xfs_bmap.h" 30 #include "xfs_alloc.h" 31 #include "xfs_error.h" 32 #include "xfs_fsops.h" 33 #include "xfs_trans.h" 34 #include "xfs_buf_item.h" 35 #include "xfs_log.h" 36 #include "xfs_log_priv.h" 37 #include "xfs_da_btree.h" 38 #include "xfs_dir2.h" 39 #include "xfs_extfree_item.h" 40 #include "xfs_mru_cache.h" 41 #include "xfs_inode_item.h" 42 #include "xfs_icache.h" 43 #include "xfs_trace.h" 44 #include "xfs_icreate_item.h" 45 #include "xfs_filestream.h" 46 #include "xfs_quota.h" 47 #include "xfs_sysfs.h" 48 #include "xfs_ondisk.h" 49 #include "xfs_rmap_item.h" 50 #include "xfs_refcount_item.h" 51 #include "xfs_bmap_item.h" 52 #include "xfs_reflink.h" 53 54 #include <linux/namei.h> 55 #include <linux/dax.h> 56 #include <linux/init.h> 57 #include <linux/slab.h> 58 #include <linux/mount.h> 59 #include <linux/mempool.h> 60 #include <linux/writeback.h> 61 #include <linux/kthread.h> 62 #include <linux/freezer.h> 63 #include <linux/parser.h> 64 65 static const struct super_operations xfs_super_operations; 66 struct bio_set *xfs_ioend_bioset; 67 68 static struct kset *xfs_kset; /* top-level xfs sysfs dir */ 69 #ifdef DEBUG 70 static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */ 71 #endif 72 73 /* 74 * Table driven mount option parser. 75 */ 76 enum { 77 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, Opt_biosize, 78 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid, 79 Opt_mtpt, Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups, 80 Opt_allocsize, Opt_norecovery, Opt_barrier, Opt_nobarrier, 81 Opt_inode64, Opt_inode32, Opt_ikeep, Opt_noikeep, 82 Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2, Opt_filestreams, 83 Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota, Opt_prjquota, 84 Opt_uquota, Opt_gquota, Opt_pquota, 85 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce, 86 Opt_discard, Opt_nodiscard, Opt_dax, Opt_err, 87 }; 88 89 static const match_table_t tokens = { 90 {Opt_logbufs, "logbufs=%u"}, /* number of XFS log buffers */ 91 {Opt_logbsize, "logbsize=%s"}, /* size of XFS log buffers */ 92 {Opt_logdev, "logdev=%s"}, /* log device */ 93 {Opt_rtdev, "rtdev=%s"}, /* realtime I/O device */ 94 {Opt_biosize, "biosize=%u"}, /* log2 of preferred buffered io size */ 95 {Opt_wsync, "wsync"}, /* safe-mode nfs compatible mount */ 96 {Opt_noalign, "noalign"}, /* turn off stripe alignment */ 97 {Opt_swalloc, "swalloc"}, /* turn on stripe width allocation */ 98 {Opt_sunit, "sunit=%u"}, /* data volume stripe unit */ 99 {Opt_swidth, "swidth=%u"}, /* data volume stripe width */ 100 {Opt_nouuid, "nouuid"}, /* ignore filesystem UUID */ 101 {Opt_mtpt, "mtpt"}, /* filesystem mount point */ 102 {Opt_grpid, "grpid"}, /* group-ID from parent directory */ 103 {Opt_nogrpid, "nogrpid"}, /* group-ID from current process */ 104 {Opt_bsdgroups, "bsdgroups"}, /* group-ID from parent directory */ 105 {Opt_sysvgroups,"sysvgroups"}, /* group-ID from current process */ 106 {Opt_allocsize, "allocsize=%s"},/* preferred allocation size */ 107 {Opt_norecovery,"norecovery"}, /* don't run XFS recovery */ 108 {Opt_inode64, "inode64"}, /* inodes can be allocated anywhere */ 109 {Opt_inode32, "inode32"}, /* inode allocation limited to 110 * XFS_MAXINUMBER_32 */ 111 {Opt_ikeep, "ikeep"}, /* do not free empty inode clusters */ 112 {Opt_noikeep, "noikeep"}, /* free empty inode clusters */ 113 {Opt_largeio, "largeio"}, /* report large I/O sizes in stat() */ 114 {Opt_nolargeio, "nolargeio"}, /* do not report large I/O sizes 115 * in stat(). */ 116 {Opt_attr2, "attr2"}, /* do use attr2 attribute format */ 117 {Opt_noattr2, "noattr2"}, /* do not use attr2 attribute format */ 118 {Opt_filestreams,"filestreams"},/* use filestreams allocator */ 119 {Opt_quota, "quota"}, /* disk quotas (user) */ 120 {Opt_noquota, "noquota"}, /* no quotas */ 121 {Opt_usrquota, "usrquota"}, /* user quota enabled */ 122 {Opt_grpquota, "grpquota"}, /* group quota enabled */ 123 {Opt_prjquota, "prjquota"}, /* project quota enabled */ 124 {Opt_uquota, "uquota"}, /* user quota (IRIX variant) */ 125 {Opt_gquota, "gquota"}, /* group quota (IRIX variant) */ 126 {Opt_pquota, "pquota"}, /* project quota (IRIX variant) */ 127 {Opt_uqnoenforce,"uqnoenforce"},/* user quota limit enforcement */ 128 {Opt_gqnoenforce,"gqnoenforce"},/* group quota limit enforcement */ 129 {Opt_pqnoenforce,"pqnoenforce"},/* project quota limit enforcement */ 130 {Opt_qnoenforce, "qnoenforce"}, /* same as uqnoenforce */ 131 {Opt_discard, "discard"}, /* Discard unused blocks */ 132 {Opt_nodiscard, "nodiscard"}, /* Do not discard unused blocks */ 133 134 {Opt_dax, "dax"}, /* Enable direct access to bdev pages */ 135 136 /* Deprecated mount options scheduled for removal */ 137 {Opt_barrier, "barrier"}, /* use writer barriers for log write and 138 * unwritten extent conversion */ 139 {Opt_nobarrier, "nobarrier"}, /* .. disable */ 140 141 {Opt_err, NULL}, 142 }; 143 144 145 STATIC int 146 suffix_kstrtoint(const substring_t *s, unsigned int base, int *res) 147 { 148 int last, shift_left_factor = 0, _res; 149 char *value; 150 int ret = 0; 151 152 value = match_strdup(s); 153 if (!value) 154 return -ENOMEM; 155 156 last = strlen(value) - 1; 157 if (value[last] == 'K' || value[last] == 'k') { 158 shift_left_factor = 10; 159 value[last] = '\0'; 160 } 161 if (value[last] == 'M' || value[last] == 'm') { 162 shift_left_factor = 20; 163 value[last] = '\0'; 164 } 165 if (value[last] == 'G' || value[last] == 'g') { 166 shift_left_factor = 30; 167 value[last] = '\0'; 168 } 169 170 if (kstrtoint(value, base, &_res)) 171 ret = -EINVAL; 172 kfree(value); 173 *res = _res << shift_left_factor; 174 return ret; 175 } 176 177 /* 178 * This function fills in xfs_mount_t fields based on mount args. 179 * Note: the superblock has _not_ yet been read in. 180 * 181 * Note that this function leaks the various device name allocations on 182 * failure. The caller takes care of them. 183 * 184 * *sb is const because this is also used to test options on the remount 185 * path, and we don't want this to have any side effects at remount time. 186 * Today this function does not change *sb, but just to future-proof... 187 */ 188 STATIC int 189 xfs_parseargs( 190 struct xfs_mount *mp, 191 char *options) 192 { 193 const struct super_block *sb = mp->m_super; 194 char *p; 195 substring_t args[MAX_OPT_ARGS]; 196 int dsunit = 0; 197 int dswidth = 0; 198 int iosize = 0; 199 uint8_t iosizelog = 0; 200 201 /* 202 * set up the mount name first so all the errors will refer to the 203 * correct device. 204 */ 205 mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL); 206 if (!mp->m_fsname) 207 return -ENOMEM; 208 mp->m_fsname_len = strlen(mp->m_fsname) + 1; 209 210 /* 211 * Copy binary VFS mount flags we are interested in. 212 */ 213 if (sb_rdonly(sb)) 214 mp->m_flags |= XFS_MOUNT_RDONLY; 215 if (sb->s_flags & MS_DIRSYNC) 216 mp->m_flags |= XFS_MOUNT_DIRSYNC; 217 if (sb->s_flags & MS_SYNCHRONOUS) 218 mp->m_flags |= XFS_MOUNT_WSYNC; 219 220 /* 221 * Set some default flags that could be cleared by the mount option 222 * parsing. 223 */ 224 mp->m_flags |= XFS_MOUNT_BARRIER; 225 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; 226 227 /* 228 * These can be overridden by the mount option parsing. 229 */ 230 mp->m_logbufs = -1; 231 mp->m_logbsize = -1; 232 233 if (!options) 234 goto done; 235 236 while ((p = strsep(&options, ",")) != NULL) { 237 int token; 238 239 if (!*p) 240 continue; 241 242 token = match_token(p, tokens, args); 243 switch (token) { 244 case Opt_logbufs: 245 if (match_int(args, &mp->m_logbufs)) 246 return -EINVAL; 247 break; 248 case Opt_logbsize: 249 if (suffix_kstrtoint(args, 10, &mp->m_logbsize)) 250 return -EINVAL; 251 break; 252 case Opt_logdev: 253 mp->m_logname = match_strdup(args); 254 if (!mp->m_logname) 255 return -ENOMEM; 256 break; 257 case Opt_mtpt: 258 xfs_warn(mp, "%s option not allowed on this system", p); 259 return -EINVAL; 260 case Opt_rtdev: 261 mp->m_rtname = match_strdup(args); 262 if (!mp->m_rtname) 263 return -ENOMEM; 264 break; 265 case Opt_allocsize: 266 case Opt_biosize: 267 if (suffix_kstrtoint(args, 10, &iosize)) 268 return -EINVAL; 269 iosizelog = ffs(iosize) - 1; 270 break; 271 case Opt_grpid: 272 case Opt_bsdgroups: 273 mp->m_flags |= XFS_MOUNT_GRPID; 274 break; 275 case Opt_nogrpid: 276 case Opt_sysvgroups: 277 mp->m_flags &= ~XFS_MOUNT_GRPID; 278 break; 279 case Opt_wsync: 280 mp->m_flags |= XFS_MOUNT_WSYNC; 281 break; 282 case Opt_norecovery: 283 mp->m_flags |= XFS_MOUNT_NORECOVERY; 284 break; 285 case Opt_noalign: 286 mp->m_flags |= XFS_MOUNT_NOALIGN; 287 break; 288 case Opt_swalloc: 289 mp->m_flags |= XFS_MOUNT_SWALLOC; 290 break; 291 case Opt_sunit: 292 if (match_int(args, &dsunit)) 293 return -EINVAL; 294 break; 295 case Opt_swidth: 296 if (match_int(args, &dswidth)) 297 return -EINVAL; 298 break; 299 case Opt_inode32: 300 mp->m_flags |= XFS_MOUNT_SMALL_INUMS; 301 break; 302 case Opt_inode64: 303 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; 304 break; 305 case Opt_nouuid: 306 mp->m_flags |= XFS_MOUNT_NOUUID; 307 break; 308 case Opt_ikeep: 309 mp->m_flags |= XFS_MOUNT_IKEEP; 310 break; 311 case Opt_noikeep: 312 mp->m_flags &= ~XFS_MOUNT_IKEEP; 313 break; 314 case Opt_largeio: 315 mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE; 316 break; 317 case Opt_nolargeio: 318 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; 319 break; 320 case Opt_attr2: 321 mp->m_flags |= XFS_MOUNT_ATTR2; 322 break; 323 case Opt_noattr2: 324 mp->m_flags &= ~XFS_MOUNT_ATTR2; 325 mp->m_flags |= XFS_MOUNT_NOATTR2; 326 break; 327 case Opt_filestreams: 328 mp->m_flags |= XFS_MOUNT_FILESTREAMS; 329 break; 330 case Opt_noquota: 331 mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; 332 mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; 333 mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE; 334 break; 335 case Opt_quota: 336 case Opt_uquota: 337 case Opt_usrquota: 338 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | 339 XFS_UQUOTA_ENFD); 340 break; 341 case Opt_qnoenforce: 342 case Opt_uqnoenforce: 343 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE); 344 mp->m_qflags &= ~XFS_UQUOTA_ENFD; 345 break; 346 case Opt_pquota: 347 case Opt_prjquota: 348 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | 349 XFS_PQUOTA_ENFD); 350 break; 351 case Opt_pqnoenforce: 352 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE); 353 mp->m_qflags &= ~XFS_PQUOTA_ENFD; 354 break; 355 case Opt_gquota: 356 case Opt_grpquota: 357 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | 358 XFS_GQUOTA_ENFD); 359 break; 360 case Opt_gqnoenforce: 361 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); 362 mp->m_qflags &= ~XFS_GQUOTA_ENFD; 363 break; 364 case Opt_discard: 365 mp->m_flags |= XFS_MOUNT_DISCARD; 366 break; 367 case Opt_nodiscard: 368 mp->m_flags &= ~XFS_MOUNT_DISCARD; 369 break; 370 #ifdef CONFIG_FS_DAX 371 case Opt_dax: 372 mp->m_flags |= XFS_MOUNT_DAX; 373 break; 374 #endif 375 case Opt_barrier: 376 xfs_warn(mp, "%s option is deprecated, ignoring.", p); 377 mp->m_flags |= XFS_MOUNT_BARRIER; 378 break; 379 case Opt_nobarrier: 380 xfs_warn(mp, "%s option is deprecated, ignoring.", p); 381 mp->m_flags &= ~XFS_MOUNT_BARRIER; 382 break; 383 default: 384 xfs_warn(mp, "unknown mount option [%s].", p); 385 return -EINVAL; 386 } 387 } 388 389 /* 390 * no recovery flag requires a read-only mount 391 */ 392 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) && 393 !(mp->m_flags & XFS_MOUNT_RDONLY)) { 394 xfs_warn(mp, "no-recovery mounts must be read-only."); 395 return -EINVAL; 396 } 397 398 if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) { 399 xfs_warn(mp, 400 "sunit and swidth options incompatible with the noalign option"); 401 return -EINVAL; 402 } 403 404 #ifndef CONFIG_XFS_QUOTA 405 if (XFS_IS_QUOTA_RUNNING(mp)) { 406 xfs_warn(mp, "quota support not available in this kernel."); 407 return -EINVAL; 408 } 409 #endif 410 411 if ((dsunit && !dswidth) || (!dsunit && dswidth)) { 412 xfs_warn(mp, "sunit and swidth must be specified together"); 413 return -EINVAL; 414 } 415 416 if (dsunit && (dswidth % dsunit != 0)) { 417 xfs_warn(mp, 418 "stripe width (%d) must be a multiple of the stripe unit (%d)", 419 dswidth, dsunit); 420 return -EINVAL; 421 } 422 423 done: 424 if (dsunit && !(mp->m_flags & XFS_MOUNT_NOALIGN)) { 425 /* 426 * At this point the superblock has not been read 427 * in, therefore we do not know the block size. 428 * Before the mount call ends we will convert 429 * these to FSBs. 430 */ 431 mp->m_dalign = dsunit; 432 mp->m_swidth = dswidth; 433 } 434 435 if (mp->m_logbufs != -1 && 436 mp->m_logbufs != 0 && 437 (mp->m_logbufs < XLOG_MIN_ICLOGS || 438 mp->m_logbufs > XLOG_MAX_ICLOGS)) { 439 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", 440 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); 441 return -EINVAL; 442 } 443 if (mp->m_logbsize != -1 && 444 mp->m_logbsize != 0 && 445 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || 446 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || 447 !is_power_of_2(mp->m_logbsize))) { 448 xfs_warn(mp, 449 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", 450 mp->m_logbsize); 451 return -EINVAL; 452 } 453 454 if (iosizelog) { 455 if (iosizelog > XFS_MAX_IO_LOG || 456 iosizelog < XFS_MIN_IO_LOG) { 457 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", 458 iosizelog, XFS_MIN_IO_LOG, 459 XFS_MAX_IO_LOG); 460 return -EINVAL; 461 } 462 463 mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE; 464 mp->m_readio_log = iosizelog; 465 mp->m_writeio_log = iosizelog; 466 } 467 468 return 0; 469 } 470 471 struct proc_xfs_info { 472 uint64_t flag; 473 char *str; 474 }; 475 476 STATIC int 477 xfs_showargs( 478 struct xfs_mount *mp, 479 struct seq_file *m) 480 { 481 static struct proc_xfs_info xfs_info_set[] = { 482 /* the few simple ones we can get from the mount struct */ 483 { XFS_MOUNT_IKEEP, ",ikeep" }, 484 { XFS_MOUNT_WSYNC, ",wsync" }, 485 { XFS_MOUNT_NOALIGN, ",noalign" }, 486 { XFS_MOUNT_SWALLOC, ",swalloc" }, 487 { XFS_MOUNT_NOUUID, ",nouuid" }, 488 { XFS_MOUNT_NORECOVERY, ",norecovery" }, 489 { XFS_MOUNT_ATTR2, ",attr2" }, 490 { XFS_MOUNT_FILESTREAMS, ",filestreams" }, 491 { XFS_MOUNT_GRPID, ",grpid" }, 492 { XFS_MOUNT_DISCARD, ",discard" }, 493 { XFS_MOUNT_SMALL_INUMS, ",inode32" }, 494 { XFS_MOUNT_DAX, ",dax" }, 495 { 0, NULL } 496 }; 497 static struct proc_xfs_info xfs_info_unset[] = { 498 /* the few simple ones we can get from the mount struct */ 499 { XFS_MOUNT_COMPAT_IOSIZE, ",largeio" }, 500 { XFS_MOUNT_BARRIER, ",nobarrier" }, 501 { XFS_MOUNT_SMALL_INUMS, ",inode64" }, 502 { 0, NULL } 503 }; 504 struct proc_xfs_info *xfs_infop; 505 506 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { 507 if (mp->m_flags & xfs_infop->flag) 508 seq_puts(m, xfs_infop->str); 509 } 510 for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) { 511 if (!(mp->m_flags & xfs_infop->flag)) 512 seq_puts(m, xfs_infop->str); 513 } 514 515 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) 516 seq_printf(m, ",allocsize=%dk", 517 (int)(1 << mp->m_writeio_log) >> 10); 518 519 if (mp->m_logbufs > 0) 520 seq_printf(m, ",logbufs=%d", mp->m_logbufs); 521 if (mp->m_logbsize > 0) 522 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10); 523 524 if (mp->m_logname) 525 seq_show_option(m, "logdev", mp->m_logname); 526 if (mp->m_rtname) 527 seq_show_option(m, "rtdev", mp->m_rtname); 528 529 if (mp->m_dalign > 0) 530 seq_printf(m, ",sunit=%d", 531 (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); 532 if (mp->m_swidth > 0) 533 seq_printf(m, ",swidth=%d", 534 (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); 535 536 if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD)) 537 seq_puts(m, ",usrquota"); 538 else if (mp->m_qflags & XFS_UQUOTA_ACCT) 539 seq_puts(m, ",uqnoenforce"); 540 541 if (mp->m_qflags & XFS_PQUOTA_ACCT) { 542 if (mp->m_qflags & XFS_PQUOTA_ENFD) 543 seq_puts(m, ",prjquota"); 544 else 545 seq_puts(m, ",pqnoenforce"); 546 } 547 if (mp->m_qflags & XFS_GQUOTA_ACCT) { 548 if (mp->m_qflags & XFS_GQUOTA_ENFD) 549 seq_puts(m, ",grpquota"); 550 else 551 seq_puts(m, ",gqnoenforce"); 552 } 553 554 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) 555 seq_puts(m, ",noquota"); 556 557 return 0; 558 } 559 static uint64_t 560 xfs_max_file_offset( 561 unsigned int blockshift) 562 { 563 unsigned int pagefactor = 1; 564 unsigned int bitshift = BITS_PER_LONG - 1; 565 566 /* Figure out maximum filesize, on Linux this can depend on 567 * the filesystem blocksize (on 32 bit platforms). 568 * __block_write_begin does this in an [unsigned] long... 569 * page->index << (PAGE_SHIFT - bbits) 570 * So, for page sized blocks (4K on 32 bit platforms), 571 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is 572 * (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1) 573 * but for smaller blocksizes it is less (bbits = log2 bsize). 574 * Note1: get_block_t takes a long (implicit cast from above) 575 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch 576 * can optionally convert the [unsigned] long from above into 577 * an [unsigned] long long. 578 */ 579 580 #if BITS_PER_LONG == 32 581 # if defined(CONFIG_LBDAF) 582 ASSERT(sizeof(sector_t) == 8); 583 pagefactor = PAGE_SIZE; 584 bitshift = BITS_PER_LONG; 585 # else 586 pagefactor = PAGE_SIZE >> (PAGE_SHIFT - blockshift); 587 # endif 588 #endif 589 590 return (((uint64_t)pagefactor) << bitshift) - 1; 591 } 592 593 /* 594 * Set parameters for inode allocation heuristics, taking into account 595 * filesystem size and inode32/inode64 mount options; i.e. specifically 596 * whether or not XFS_MOUNT_SMALL_INUMS is set. 597 * 598 * Inode allocation patterns are altered only if inode32 is requested 599 * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large. 600 * If altered, XFS_MOUNT_32BITINODES is set as well. 601 * 602 * An agcount independent of that in the mount structure is provided 603 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated 604 * to the potentially higher ag count. 605 * 606 * Returns the maximum AG index which may contain inodes. 607 */ 608 xfs_agnumber_t 609 xfs_set_inode_alloc( 610 struct xfs_mount *mp, 611 xfs_agnumber_t agcount) 612 { 613 xfs_agnumber_t index; 614 xfs_agnumber_t maxagi = 0; 615 xfs_sb_t *sbp = &mp->m_sb; 616 xfs_agnumber_t max_metadata; 617 xfs_agino_t agino; 618 xfs_ino_t ino; 619 620 /* 621 * Calculate how much should be reserved for inodes to meet 622 * the max inode percentage. Used only for inode32. 623 */ 624 if (mp->m_maxicount) { 625 uint64_t icount; 626 627 icount = sbp->sb_dblocks * sbp->sb_imax_pct; 628 do_div(icount, 100); 629 icount += sbp->sb_agblocks - 1; 630 do_div(icount, sbp->sb_agblocks); 631 max_metadata = icount; 632 } else { 633 max_metadata = agcount; 634 } 635 636 /* Get the last possible inode in the filesystem */ 637 agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0); 638 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); 639 640 /* 641 * If user asked for no more than 32-bit inodes, and the fs is 642 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter 643 * the allocator to accommodate the request. 644 */ 645 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32) 646 mp->m_flags |= XFS_MOUNT_32BITINODES; 647 else 648 mp->m_flags &= ~XFS_MOUNT_32BITINODES; 649 650 for (index = 0; index < agcount; index++) { 651 struct xfs_perag *pag; 652 653 ino = XFS_AGINO_TO_INO(mp, index, agino); 654 655 pag = xfs_perag_get(mp, index); 656 657 if (mp->m_flags & XFS_MOUNT_32BITINODES) { 658 if (ino > XFS_MAXINUMBER_32) { 659 pag->pagi_inodeok = 0; 660 pag->pagf_metadata = 0; 661 } else { 662 pag->pagi_inodeok = 1; 663 maxagi++; 664 if (index < max_metadata) 665 pag->pagf_metadata = 1; 666 else 667 pag->pagf_metadata = 0; 668 } 669 } else { 670 pag->pagi_inodeok = 1; 671 pag->pagf_metadata = 0; 672 } 673 674 xfs_perag_put(pag); 675 } 676 677 return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount; 678 } 679 680 STATIC int 681 xfs_blkdev_get( 682 xfs_mount_t *mp, 683 const char *name, 684 struct block_device **bdevp) 685 { 686 int error = 0; 687 688 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 689 mp); 690 if (IS_ERR(*bdevp)) { 691 error = PTR_ERR(*bdevp); 692 xfs_warn(mp, "Invalid device [%s], error=%d", name, error); 693 } 694 695 return error; 696 } 697 698 STATIC void 699 xfs_blkdev_put( 700 struct block_device *bdev) 701 { 702 if (bdev) 703 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 704 } 705 706 void 707 xfs_blkdev_issue_flush( 708 xfs_buftarg_t *buftarg) 709 { 710 blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS, NULL); 711 } 712 713 STATIC void 714 xfs_close_devices( 715 struct xfs_mount *mp) 716 { 717 struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev; 718 719 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 720 struct block_device *logdev = mp->m_logdev_targp->bt_bdev; 721 struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev; 722 723 xfs_free_buftarg(mp, mp->m_logdev_targp); 724 xfs_blkdev_put(logdev); 725 fs_put_dax(dax_logdev); 726 } 727 if (mp->m_rtdev_targp) { 728 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; 729 struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev; 730 731 xfs_free_buftarg(mp, mp->m_rtdev_targp); 732 xfs_blkdev_put(rtdev); 733 fs_put_dax(dax_rtdev); 734 } 735 xfs_free_buftarg(mp, mp->m_ddev_targp); 736 fs_put_dax(dax_ddev); 737 } 738 739 /* 740 * The file system configurations are: 741 * (1) device (partition) with data and internal log 742 * (2) logical volume with data and log subvolumes. 743 * (3) logical volume with data, log, and realtime subvolumes. 744 * 745 * We only have to handle opening the log and realtime volumes here if 746 * they are present. The data subvolume has already been opened by 747 * get_sb_bdev() and is stored in sb->s_bdev. 748 */ 749 STATIC int 750 xfs_open_devices( 751 struct xfs_mount *mp) 752 { 753 struct block_device *ddev = mp->m_super->s_bdev; 754 struct dax_device *dax_ddev = fs_dax_get_by_bdev(ddev); 755 struct dax_device *dax_logdev = NULL, *dax_rtdev = NULL; 756 struct block_device *logdev = NULL, *rtdev = NULL; 757 int error; 758 759 /* 760 * Open real time and log devices - order is important. 761 */ 762 if (mp->m_logname) { 763 error = xfs_blkdev_get(mp, mp->m_logname, &logdev); 764 if (error) 765 goto out; 766 dax_logdev = fs_dax_get_by_bdev(logdev); 767 } 768 769 if (mp->m_rtname) { 770 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev); 771 if (error) 772 goto out_close_logdev; 773 774 if (rtdev == ddev || rtdev == logdev) { 775 xfs_warn(mp, 776 "Cannot mount filesystem with identical rtdev and ddev/logdev."); 777 error = -EINVAL; 778 goto out_close_rtdev; 779 } 780 dax_rtdev = fs_dax_get_by_bdev(rtdev); 781 } 782 783 /* 784 * Setup xfs_mount buffer target pointers 785 */ 786 error = -ENOMEM; 787 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev); 788 if (!mp->m_ddev_targp) 789 goto out_close_rtdev; 790 791 if (rtdev) { 792 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev); 793 if (!mp->m_rtdev_targp) 794 goto out_free_ddev_targ; 795 } 796 797 if (logdev && logdev != ddev) { 798 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev); 799 if (!mp->m_logdev_targp) 800 goto out_free_rtdev_targ; 801 } else { 802 mp->m_logdev_targp = mp->m_ddev_targp; 803 } 804 805 return 0; 806 807 out_free_rtdev_targ: 808 if (mp->m_rtdev_targp) 809 xfs_free_buftarg(mp, mp->m_rtdev_targp); 810 out_free_ddev_targ: 811 xfs_free_buftarg(mp, mp->m_ddev_targp); 812 out_close_rtdev: 813 xfs_blkdev_put(rtdev); 814 fs_put_dax(dax_rtdev); 815 out_close_logdev: 816 if (logdev && logdev != ddev) { 817 xfs_blkdev_put(logdev); 818 fs_put_dax(dax_logdev); 819 } 820 out: 821 fs_put_dax(dax_ddev); 822 return error; 823 } 824 825 /* 826 * Setup xfs_mount buffer target pointers based on superblock 827 */ 828 STATIC int 829 xfs_setup_devices( 830 struct xfs_mount *mp) 831 { 832 int error; 833 834 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize); 835 if (error) 836 return error; 837 838 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 839 unsigned int log_sector_size = BBSIZE; 840 841 if (xfs_sb_version_hassector(&mp->m_sb)) 842 log_sector_size = mp->m_sb.sb_logsectsize; 843 error = xfs_setsize_buftarg(mp->m_logdev_targp, 844 log_sector_size); 845 if (error) 846 return error; 847 } 848 if (mp->m_rtdev_targp) { 849 error = xfs_setsize_buftarg(mp->m_rtdev_targp, 850 mp->m_sb.sb_sectsize); 851 if (error) 852 return error; 853 } 854 855 return 0; 856 } 857 858 STATIC int 859 xfs_init_mount_workqueues( 860 struct xfs_mount *mp) 861 { 862 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s", 863 WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_fsname); 864 if (!mp->m_buf_workqueue) 865 goto out; 866 867 mp->m_data_workqueue = alloc_workqueue("xfs-data/%s", 868 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname); 869 if (!mp->m_data_workqueue) 870 goto out_destroy_buf; 871 872 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s", 873 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname); 874 if (!mp->m_unwritten_workqueue) 875 goto out_destroy_data_iodone_queue; 876 877 mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s", 878 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname); 879 if (!mp->m_cil_workqueue) 880 goto out_destroy_unwritten; 881 882 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s", 883 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname); 884 if (!mp->m_reclaim_workqueue) 885 goto out_destroy_cil; 886 887 mp->m_log_workqueue = alloc_workqueue("xfs-log/%s", 888 WQ_MEM_RECLAIM|WQ_FREEZABLE|WQ_HIGHPRI, 0, 889 mp->m_fsname); 890 if (!mp->m_log_workqueue) 891 goto out_destroy_reclaim; 892 893 mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s", 894 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname); 895 if (!mp->m_eofblocks_workqueue) 896 goto out_destroy_log; 897 898 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0, 899 mp->m_fsname); 900 if (!mp->m_sync_workqueue) 901 goto out_destroy_eofb; 902 903 return 0; 904 905 out_destroy_eofb: 906 destroy_workqueue(mp->m_eofblocks_workqueue); 907 out_destroy_log: 908 destroy_workqueue(mp->m_log_workqueue); 909 out_destroy_reclaim: 910 destroy_workqueue(mp->m_reclaim_workqueue); 911 out_destroy_cil: 912 destroy_workqueue(mp->m_cil_workqueue); 913 out_destroy_unwritten: 914 destroy_workqueue(mp->m_unwritten_workqueue); 915 out_destroy_data_iodone_queue: 916 destroy_workqueue(mp->m_data_workqueue); 917 out_destroy_buf: 918 destroy_workqueue(mp->m_buf_workqueue); 919 out: 920 return -ENOMEM; 921 } 922 923 STATIC void 924 xfs_destroy_mount_workqueues( 925 struct xfs_mount *mp) 926 { 927 destroy_workqueue(mp->m_sync_workqueue); 928 destroy_workqueue(mp->m_eofblocks_workqueue); 929 destroy_workqueue(mp->m_log_workqueue); 930 destroy_workqueue(mp->m_reclaim_workqueue); 931 destroy_workqueue(mp->m_cil_workqueue); 932 destroy_workqueue(mp->m_data_workqueue); 933 destroy_workqueue(mp->m_unwritten_workqueue); 934 destroy_workqueue(mp->m_buf_workqueue); 935 } 936 937 /* 938 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK 939 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting 940 * for IO to complete so that we effectively throttle multiple callers to the 941 * rate at which IO is completing. 942 */ 943 void 944 xfs_flush_inodes( 945 struct xfs_mount *mp) 946 { 947 struct super_block *sb = mp->m_super; 948 949 if (down_read_trylock(&sb->s_umount)) { 950 sync_inodes_sb(sb); 951 up_read(&sb->s_umount); 952 } 953 } 954 955 /* Catch misguided souls that try to use this interface on XFS */ 956 STATIC struct inode * 957 xfs_fs_alloc_inode( 958 struct super_block *sb) 959 { 960 BUG(); 961 return NULL; 962 } 963 964 /* 965 * Now that the generic code is guaranteed not to be accessing 966 * the linux inode, we can inactivate and reclaim the inode. 967 */ 968 STATIC void 969 xfs_fs_destroy_inode( 970 struct inode *inode) 971 { 972 struct xfs_inode *ip = XFS_I(inode); 973 int error; 974 975 trace_xfs_destroy_inode(ip); 976 977 ASSERT(!rwsem_is_locked(&inode->i_rwsem)); 978 XFS_STATS_INC(ip->i_mount, vn_rele); 979 XFS_STATS_INC(ip->i_mount, vn_remove); 980 981 if (xfs_is_reflink_inode(ip)) { 982 error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true); 983 if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount)) 984 xfs_warn(ip->i_mount, 985 "Error %d while evicting CoW blocks for inode %llu.", 986 error, ip->i_ino); 987 } 988 989 xfs_inactive(ip); 990 991 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0); 992 XFS_STATS_INC(ip->i_mount, vn_reclaim); 993 994 /* 995 * We should never get here with one of the reclaim flags already set. 996 */ 997 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE)); 998 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM)); 999 1000 /* 1001 * We always use background reclaim here because even if the 1002 * inode is clean, it still may be under IO and hence we have 1003 * to take the flush lock. The background reclaim path handles 1004 * this more efficiently than we can here, so simply let background 1005 * reclaim tear down all inodes. 1006 */ 1007 xfs_inode_set_reclaim_tag(ip); 1008 } 1009 1010 /* 1011 * Slab object creation initialisation for the XFS inode. 1012 * This covers only the idempotent fields in the XFS inode; 1013 * all other fields need to be initialised on allocation 1014 * from the slab. This avoids the need to repeatedly initialise 1015 * fields in the xfs inode that left in the initialise state 1016 * when freeing the inode. 1017 */ 1018 STATIC void 1019 xfs_fs_inode_init_once( 1020 void *inode) 1021 { 1022 struct xfs_inode *ip = inode; 1023 1024 memset(ip, 0, sizeof(struct xfs_inode)); 1025 1026 /* vfs inode */ 1027 inode_init_once(VFS_I(ip)); 1028 1029 /* xfs inode */ 1030 atomic_set(&ip->i_pincount, 0); 1031 spin_lock_init(&ip->i_flags_lock); 1032 1033 mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, 1034 "xfsino", ip->i_ino); 1035 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, 1036 "xfsino", ip->i_ino); 1037 } 1038 1039 /* 1040 * We do an unlocked check for XFS_IDONTCACHE here because we are already 1041 * serialised against cache hits here via the inode->i_lock and igrab() in 1042 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be 1043 * racing with us, and it avoids needing to grab a spinlock here for every inode 1044 * we drop the final reference on. 1045 */ 1046 STATIC int 1047 xfs_fs_drop_inode( 1048 struct inode *inode) 1049 { 1050 struct xfs_inode *ip = XFS_I(inode); 1051 1052 /* 1053 * If this unlinked inode is in the middle of recovery, don't 1054 * drop the inode just yet; log recovery will take care of 1055 * that. See the comment for this inode flag. 1056 */ 1057 if (ip->i_flags & XFS_IRECOVERY) { 1058 ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED); 1059 return 0; 1060 } 1061 1062 return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE); 1063 } 1064 1065 STATIC void 1066 xfs_free_fsname( 1067 struct xfs_mount *mp) 1068 { 1069 kfree(mp->m_fsname); 1070 kfree(mp->m_rtname); 1071 kfree(mp->m_logname); 1072 } 1073 1074 STATIC int 1075 xfs_fs_sync_fs( 1076 struct super_block *sb, 1077 int wait) 1078 { 1079 struct xfs_mount *mp = XFS_M(sb); 1080 1081 /* 1082 * Doing anything during the async pass would be counterproductive. 1083 */ 1084 if (!wait) 1085 return 0; 1086 1087 xfs_log_force(mp, XFS_LOG_SYNC); 1088 if (laptop_mode) { 1089 /* 1090 * The disk must be active because we're syncing. 1091 * We schedule log work now (now that the disk is 1092 * active) instead of later (when it might not be). 1093 */ 1094 flush_delayed_work(&mp->m_log->l_work); 1095 } 1096 1097 return 0; 1098 } 1099 1100 STATIC int 1101 xfs_fs_statfs( 1102 struct dentry *dentry, 1103 struct kstatfs *statp) 1104 { 1105 struct xfs_mount *mp = XFS_M(dentry->d_sb); 1106 xfs_sb_t *sbp = &mp->m_sb; 1107 struct xfs_inode *ip = XFS_I(d_inode(dentry)); 1108 uint64_t fakeinos, id; 1109 uint64_t icount; 1110 uint64_t ifree; 1111 uint64_t fdblocks; 1112 xfs_extlen_t lsize; 1113 int64_t ffree; 1114 1115 statp->f_type = XFS_SB_MAGIC; 1116 statp->f_namelen = MAXNAMELEN - 1; 1117 1118 id = huge_encode_dev(mp->m_ddev_targp->bt_dev); 1119 statp->f_fsid.val[0] = (u32)id; 1120 statp->f_fsid.val[1] = (u32)(id >> 32); 1121 1122 icount = percpu_counter_sum(&mp->m_icount); 1123 ifree = percpu_counter_sum(&mp->m_ifree); 1124 fdblocks = percpu_counter_sum(&mp->m_fdblocks); 1125 1126 spin_lock(&mp->m_sb_lock); 1127 statp->f_bsize = sbp->sb_blocksize; 1128 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; 1129 statp->f_blocks = sbp->sb_dblocks - lsize; 1130 spin_unlock(&mp->m_sb_lock); 1131 1132 statp->f_bfree = fdblocks - mp->m_alloc_set_aside; 1133 statp->f_bavail = statp->f_bfree; 1134 1135 fakeinos = statp->f_bfree << sbp->sb_inopblog; 1136 statp->f_files = MIN(icount + fakeinos, (uint64_t)XFS_MAXINUMBER); 1137 if (mp->m_maxicount) 1138 statp->f_files = min_t(typeof(statp->f_files), 1139 statp->f_files, 1140 mp->m_maxicount); 1141 1142 /* If sb_icount overshot maxicount, report actual allocation */ 1143 statp->f_files = max_t(typeof(statp->f_files), 1144 statp->f_files, 1145 sbp->sb_icount); 1146 1147 /* make sure statp->f_ffree does not underflow */ 1148 ffree = statp->f_files - (icount - ifree); 1149 statp->f_ffree = max_t(int64_t, ffree, 0); 1150 1151 1152 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && 1153 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) == 1154 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD)) 1155 xfs_qm_statvfs(ip, statp); 1156 return 0; 1157 } 1158 1159 STATIC void 1160 xfs_save_resvblks(struct xfs_mount *mp) 1161 { 1162 uint64_t resblks = 0; 1163 1164 mp->m_resblks_save = mp->m_resblks; 1165 xfs_reserve_blocks(mp, &resblks, NULL); 1166 } 1167 1168 STATIC void 1169 xfs_restore_resvblks(struct xfs_mount *mp) 1170 { 1171 uint64_t resblks; 1172 1173 if (mp->m_resblks_save) { 1174 resblks = mp->m_resblks_save; 1175 mp->m_resblks_save = 0; 1176 } else 1177 resblks = xfs_default_resblks(mp); 1178 1179 xfs_reserve_blocks(mp, &resblks, NULL); 1180 } 1181 1182 /* 1183 * Trigger writeback of all the dirty metadata in the file system. 1184 * 1185 * This ensures that the metadata is written to their location on disk rather 1186 * than just existing in transactions in the log. This means after a quiesce 1187 * there is no log replay required to write the inodes to disk - this is the 1188 * primary difference between a sync and a quiesce. 1189 * 1190 * Note: xfs_log_quiesce() stops background log work - the callers must ensure 1191 * it is started again when appropriate. 1192 */ 1193 void 1194 xfs_quiesce_attr( 1195 struct xfs_mount *mp) 1196 { 1197 int error = 0; 1198 1199 /* wait for all modifications to complete */ 1200 while (atomic_read(&mp->m_active_trans) > 0) 1201 delay(100); 1202 1203 /* force the log to unpin objects from the now complete transactions */ 1204 xfs_log_force(mp, XFS_LOG_SYNC); 1205 1206 /* reclaim inodes to do any IO before the freeze completes */ 1207 xfs_reclaim_inodes(mp, 0); 1208 xfs_reclaim_inodes(mp, SYNC_WAIT); 1209 1210 /* Push the superblock and write an unmount record */ 1211 error = xfs_log_sbcount(mp); 1212 if (error) 1213 xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. " 1214 "Frozen image may not be consistent."); 1215 /* 1216 * Just warn here till VFS can correctly support 1217 * read-only remount without racing. 1218 */ 1219 WARN_ON(atomic_read(&mp->m_active_trans) != 0); 1220 1221 xfs_log_quiesce(mp); 1222 } 1223 1224 STATIC int 1225 xfs_test_remount_options( 1226 struct super_block *sb, 1227 struct xfs_mount *mp, 1228 char *options) 1229 { 1230 int error = 0; 1231 struct xfs_mount *tmp_mp; 1232 1233 tmp_mp = kmem_zalloc(sizeof(*tmp_mp), KM_MAYFAIL); 1234 if (!tmp_mp) 1235 return -ENOMEM; 1236 1237 tmp_mp->m_super = sb; 1238 error = xfs_parseargs(tmp_mp, options); 1239 xfs_free_fsname(tmp_mp); 1240 kmem_free(tmp_mp); 1241 1242 return error; 1243 } 1244 1245 STATIC int 1246 xfs_fs_remount( 1247 struct super_block *sb, 1248 int *flags, 1249 char *options) 1250 { 1251 struct xfs_mount *mp = XFS_M(sb); 1252 xfs_sb_t *sbp = &mp->m_sb; 1253 substring_t args[MAX_OPT_ARGS]; 1254 char *p; 1255 int error; 1256 1257 /* First, check for complete junk; i.e. invalid options */ 1258 error = xfs_test_remount_options(sb, mp, options); 1259 if (error) 1260 return error; 1261 1262 sync_filesystem(sb); 1263 while ((p = strsep(&options, ",")) != NULL) { 1264 int token; 1265 1266 if (!*p) 1267 continue; 1268 1269 token = match_token(p, tokens, args); 1270 switch (token) { 1271 case Opt_barrier: 1272 xfs_warn(mp, "%s option is deprecated, ignoring.", p); 1273 mp->m_flags |= XFS_MOUNT_BARRIER; 1274 break; 1275 case Opt_nobarrier: 1276 xfs_warn(mp, "%s option is deprecated, ignoring.", p); 1277 mp->m_flags &= ~XFS_MOUNT_BARRIER; 1278 break; 1279 case Opt_inode64: 1280 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; 1281 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount); 1282 break; 1283 case Opt_inode32: 1284 mp->m_flags |= XFS_MOUNT_SMALL_INUMS; 1285 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount); 1286 break; 1287 default: 1288 /* 1289 * Logically we would return an error here to prevent 1290 * users from believing they might have changed 1291 * mount options using remount which can't be changed. 1292 * 1293 * But unfortunately mount(8) adds all options from 1294 * mtab and fstab to the mount arguments in some cases 1295 * so we can't blindly reject options, but have to 1296 * check for each specified option if it actually 1297 * differs from the currently set option and only 1298 * reject it if that's the case. 1299 * 1300 * Until that is implemented we return success for 1301 * every remount request, and silently ignore all 1302 * options that we can't actually change. 1303 */ 1304 #if 0 1305 xfs_info(mp, 1306 "mount option \"%s\" not supported for remount", p); 1307 return -EINVAL; 1308 #else 1309 break; 1310 #endif 1311 } 1312 } 1313 1314 /* ro -> rw */ 1315 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) { 1316 if (mp->m_flags & XFS_MOUNT_NORECOVERY) { 1317 xfs_warn(mp, 1318 "ro->rw transition prohibited on norecovery mount"); 1319 return -EINVAL; 1320 } 1321 1322 if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 && 1323 xfs_sb_has_ro_compat_feature(sbp, 1324 XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) { 1325 xfs_warn(mp, 1326 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem", 1327 (sbp->sb_features_ro_compat & 1328 XFS_SB_FEAT_RO_COMPAT_UNKNOWN)); 1329 return -EINVAL; 1330 } 1331 1332 mp->m_flags &= ~XFS_MOUNT_RDONLY; 1333 1334 /* 1335 * If this is the first remount to writeable state we 1336 * might have some superblock changes to update. 1337 */ 1338 if (mp->m_update_sb) { 1339 error = xfs_sync_sb(mp, false); 1340 if (error) { 1341 xfs_warn(mp, "failed to write sb changes"); 1342 return error; 1343 } 1344 mp->m_update_sb = false; 1345 } 1346 1347 /* 1348 * Fill out the reserve pool if it is empty. Use the stashed 1349 * value if it is non-zero, otherwise go with the default. 1350 */ 1351 xfs_restore_resvblks(mp); 1352 xfs_log_work_queue(mp); 1353 xfs_queue_eofblocks(mp); 1354 1355 /* Recover any CoW blocks that never got remapped. */ 1356 error = xfs_reflink_recover_cow(mp); 1357 if (error) { 1358 xfs_err(mp, 1359 "Error %d recovering leftover CoW allocations.", error); 1360 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1361 return error; 1362 } 1363 1364 /* Create the per-AG metadata reservation pool .*/ 1365 error = xfs_fs_reserve_ag_blocks(mp); 1366 if (error && error != -ENOSPC) 1367 return error; 1368 } 1369 1370 /* rw -> ro */ 1371 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) { 1372 /* Free the per-AG metadata reservation pool. */ 1373 error = xfs_fs_unreserve_ag_blocks(mp); 1374 if (error) { 1375 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1376 return error; 1377 } 1378 1379 /* 1380 * Before we sync the metadata, we need to free up the reserve 1381 * block pool so that the used block count in the superblock on 1382 * disk is correct at the end of the remount. Stash the current 1383 * reserve pool size so that if we get remounted rw, we can 1384 * return it to the same size. 1385 */ 1386 xfs_save_resvblks(mp); 1387 1388 /* 1389 * Cancel background eofb scanning so it cannot race with the 1390 * final log force+buftarg wait and deadlock the remount. 1391 */ 1392 cancel_delayed_work_sync(&mp->m_eofblocks_work); 1393 1394 xfs_quiesce_attr(mp); 1395 mp->m_flags |= XFS_MOUNT_RDONLY; 1396 } 1397 1398 return 0; 1399 } 1400 1401 /* 1402 * Second stage of a freeze. The data is already frozen so we only 1403 * need to take care of the metadata. Once that's done sync the superblock 1404 * to the log to dirty it in case of a crash while frozen. This ensures that we 1405 * will recover the unlinked inode lists on the next mount. 1406 */ 1407 STATIC int 1408 xfs_fs_freeze( 1409 struct super_block *sb) 1410 { 1411 struct xfs_mount *mp = XFS_M(sb); 1412 1413 xfs_save_resvblks(mp); 1414 xfs_quiesce_attr(mp); 1415 return xfs_sync_sb(mp, true); 1416 } 1417 1418 STATIC int 1419 xfs_fs_unfreeze( 1420 struct super_block *sb) 1421 { 1422 struct xfs_mount *mp = XFS_M(sb); 1423 1424 xfs_restore_resvblks(mp); 1425 xfs_log_work_queue(mp); 1426 return 0; 1427 } 1428 1429 STATIC int 1430 xfs_fs_show_options( 1431 struct seq_file *m, 1432 struct dentry *root) 1433 { 1434 return xfs_showargs(XFS_M(root->d_sb), m); 1435 } 1436 1437 /* 1438 * This function fills in xfs_mount_t fields based on mount args. 1439 * Note: the superblock _has_ now been read in. 1440 */ 1441 STATIC int 1442 xfs_finish_flags( 1443 struct xfs_mount *mp) 1444 { 1445 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY); 1446 1447 /* Fail a mount where the logbuf is smaller than the log stripe */ 1448 if (xfs_sb_version_haslogv2(&mp->m_sb)) { 1449 if (mp->m_logbsize <= 0 && 1450 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { 1451 mp->m_logbsize = mp->m_sb.sb_logsunit; 1452 } else if (mp->m_logbsize > 0 && 1453 mp->m_logbsize < mp->m_sb.sb_logsunit) { 1454 xfs_warn(mp, 1455 "logbuf size must be greater than or equal to log stripe size"); 1456 return -EINVAL; 1457 } 1458 } else { 1459 /* Fail a mount if the logbuf is larger than 32K */ 1460 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { 1461 xfs_warn(mp, 1462 "logbuf size for version 1 logs must be 16K or 32K"); 1463 return -EINVAL; 1464 } 1465 } 1466 1467 /* 1468 * V5 filesystems always use attr2 format for attributes. 1469 */ 1470 if (xfs_sb_version_hascrc(&mp->m_sb) && 1471 (mp->m_flags & XFS_MOUNT_NOATTR2)) { 1472 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. " 1473 "attr2 is always enabled for V5 filesystems."); 1474 return -EINVAL; 1475 } 1476 1477 /* 1478 * mkfs'ed attr2 will turn on attr2 mount unless explicitly 1479 * told by noattr2 to turn it off 1480 */ 1481 if (xfs_sb_version_hasattr2(&mp->m_sb) && 1482 !(mp->m_flags & XFS_MOUNT_NOATTR2)) 1483 mp->m_flags |= XFS_MOUNT_ATTR2; 1484 1485 /* 1486 * prohibit r/w mounts of read-only filesystems 1487 */ 1488 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) { 1489 xfs_warn(mp, 1490 "cannot mount a read-only filesystem as read-write"); 1491 return -EROFS; 1492 } 1493 1494 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) && 1495 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) && 1496 !xfs_sb_version_has_pquotino(&mp->m_sb)) { 1497 xfs_warn(mp, 1498 "Super block does not support project and group quota together"); 1499 return -EINVAL; 1500 } 1501 1502 return 0; 1503 } 1504 1505 static int 1506 xfs_init_percpu_counters( 1507 struct xfs_mount *mp) 1508 { 1509 int error; 1510 1511 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL); 1512 if (error) 1513 return -ENOMEM; 1514 1515 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL); 1516 if (error) 1517 goto free_icount; 1518 1519 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL); 1520 if (error) 1521 goto free_ifree; 1522 1523 return 0; 1524 1525 free_ifree: 1526 percpu_counter_destroy(&mp->m_ifree); 1527 free_icount: 1528 percpu_counter_destroy(&mp->m_icount); 1529 return -ENOMEM; 1530 } 1531 1532 void 1533 xfs_reinit_percpu_counters( 1534 struct xfs_mount *mp) 1535 { 1536 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount); 1537 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree); 1538 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks); 1539 } 1540 1541 static void 1542 xfs_destroy_percpu_counters( 1543 struct xfs_mount *mp) 1544 { 1545 percpu_counter_destroy(&mp->m_icount); 1546 percpu_counter_destroy(&mp->m_ifree); 1547 percpu_counter_destroy(&mp->m_fdblocks); 1548 } 1549 1550 STATIC int 1551 xfs_fs_fill_super( 1552 struct super_block *sb, 1553 void *data, 1554 int silent) 1555 { 1556 struct inode *root; 1557 struct xfs_mount *mp = NULL; 1558 int flags = 0, error = -ENOMEM; 1559 1560 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL); 1561 if (!mp) 1562 goto out; 1563 1564 spin_lock_init(&mp->m_sb_lock); 1565 mutex_init(&mp->m_growlock); 1566 atomic_set(&mp->m_active_trans, 0); 1567 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); 1568 INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker); 1569 INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker); 1570 mp->m_kobj.kobject.kset = xfs_kset; 1571 1572 mp->m_super = sb; 1573 sb->s_fs_info = mp; 1574 1575 error = xfs_parseargs(mp, (char *)data); 1576 if (error) 1577 goto out_free_fsname; 1578 1579 sb_min_blocksize(sb, BBSIZE); 1580 sb->s_xattr = xfs_xattr_handlers; 1581 sb->s_export_op = &xfs_export_operations; 1582 #ifdef CONFIG_XFS_QUOTA 1583 sb->s_qcop = &xfs_quotactl_operations; 1584 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; 1585 #endif 1586 sb->s_op = &xfs_super_operations; 1587 1588 if (silent) 1589 flags |= XFS_MFSI_QUIET; 1590 1591 error = xfs_open_devices(mp); 1592 if (error) 1593 goto out_free_fsname; 1594 1595 error = xfs_init_mount_workqueues(mp); 1596 if (error) 1597 goto out_close_devices; 1598 1599 error = xfs_init_percpu_counters(mp); 1600 if (error) 1601 goto out_destroy_workqueues; 1602 1603 /* Allocate stats memory before we do operations that might use it */ 1604 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats); 1605 if (!mp->m_stats.xs_stats) { 1606 error = -ENOMEM; 1607 goto out_destroy_counters; 1608 } 1609 1610 error = xfs_readsb(mp, flags); 1611 if (error) 1612 goto out_free_stats; 1613 1614 error = xfs_finish_flags(mp); 1615 if (error) 1616 goto out_free_sb; 1617 1618 error = xfs_setup_devices(mp); 1619 if (error) 1620 goto out_free_sb; 1621 1622 error = xfs_filestream_mount(mp); 1623 if (error) 1624 goto out_free_sb; 1625 1626 /* 1627 * we must configure the block size in the superblock before we run the 1628 * full mount process as the mount process can lookup and cache inodes. 1629 */ 1630 sb->s_magic = XFS_SB_MAGIC; 1631 sb->s_blocksize = mp->m_sb.sb_blocksize; 1632 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; 1633 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits); 1634 sb->s_max_links = XFS_MAXLINK; 1635 sb->s_time_gran = 1; 1636 set_posix_acl_flag(sb); 1637 1638 /* version 5 superblocks support inode version counters. */ 1639 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5) 1640 sb->s_flags |= MS_I_VERSION; 1641 1642 if (mp->m_flags & XFS_MOUNT_DAX) { 1643 xfs_warn(mp, 1644 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); 1645 1646 error = bdev_dax_supported(sb, sb->s_blocksize); 1647 if (error) { 1648 xfs_alert(mp, 1649 "DAX unsupported by block device. Turning off DAX."); 1650 mp->m_flags &= ~XFS_MOUNT_DAX; 1651 } 1652 if (xfs_sb_version_hasreflink(&mp->m_sb)) 1653 xfs_alert(mp, 1654 "DAX and reflink have not been tested together!"); 1655 } 1656 1657 if (mp->m_flags & XFS_MOUNT_DISCARD) { 1658 struct request_queue *q = bdev_get_queue(sb->s_bdev); 1659 1660 if (!blk_queue_discard(q)) { 1661 xfs_warn(mp, "mounting with \"discard\" option, but " 1662 "the device does not support discard"); 1663 mp->m_flags &= ~XFS_MOUNT_DISCARD; 1664 } 1665 } 1666 1667 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) { 1668 if (mp->m_sb.sb_rblocks) { 1669 xfs_alert(mp, 1670 "EXPERIMENTAL reverse mapping btree not compatible with realtime device!"); 1671 error = -EINVAL; 1672 goto out_filestream_unmount; 1673 } 1674 xfs_alert(mp, 1675 "EXPERIMENTAL reverse mapping btree feature enabled. Use at your own risk!"); 1676 } 1677 1678 if (xfs_sb_version_hasreflink(&mp->m_sb)) 1679 xfs_alert(mp, 1680 "EXPERIMENTAL reflink feature enabled. Use at your own risk!"); 1681 1682 error = xfs_mountfs(mp); 1683 if (error) 1684 goto out_filestream_unmount; 1685 1686 root = igrab(VFS_I(mp->m_rootip)); 1687 if (!root) { 1688 error = -ENOENT; 1689 goto out_unmount; 1690 } 1691 sb->s_root = d_make_root(root); 1692 if (!sb->s_root) { 1693 error = -ENOMEM; 1694 goto out_unmount; 1695 } 1696 1697 return 0; 1698 1699 out_filestream_unmount: 1700 xfs_filestream_unmount(mp); 1701 out_free_sb: 1702 xfs_freesb(mp); 1703 out_free_stats: 1704 free_percpu(mp->m_stats.xs_stats); 1705 out_destroy_counters: 1706 xfs_destroy_percpu_counters(mp); 1707 out_destroy_workqueues: 1708 xfs_destroy_mount_workqueues(mp); 1709 out_close_devices: 1710 xfs_close_devices(mp); 1711 out_free_fsname: 1712 xfs_free_fsname(mp); 1713 kfree(mp); 1714 out: 1715 return error; 1716 1717 out_unmount: 1718 xfs_filestream_unmount(mp); 1719 xfs_unmountfs(mp); 1720 goto out_free_sb; 1721 } 1722 1723 STATIC void 1724 xfs_fs_put_super( 1725 struct super_block *sb) 1726 { 1727 struct xfs_mount *mp = XFS_M(sb); 1728 1729 xfs_notice(mp, "Unmounting Filesystem"); 1730 xfs_filestream_unmount(mp); 1731 xfs_unmountfs(mp); 1732 1733 xfs_freesb(mp); 1734 free_percpu(mp->m_stats.xs_stats); 1735 xfs_destroy_percpu_counters(mp); 1736 xfs_destroy_mount_workqueues(mp); 1737 xfs_close_devices(mp); 1738 xfs_free_fsname(mp); 1739 kfree(mp); 1740 } 1741 1742 STATIC struct dentry * 1743 xfs_fs_mount( 1744 struct file_system_type *fs_type, 1745 int flags, 1746 const char *dev_name, 1747 void *data) 1748 { 1749 return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super); 1750 } 1751 1752 static long 1753 xfs_fs_nr_cached_objects( 1754 struct super_block *sb, 1755 struct shrink_control *sc) 1756 { 1757 return xfs_reclaim_inodes_count(XFS_M(sb)); 1758 } 1759 1760 static long 1761 xfs_fs_free_cached_objects( 1762 struct super_block *sb, 1763 struct shrink_control *sc) 1764 { 1765 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan); 1766 } 1767 1768 static const struct super_operations xfs_super_operations = { 1769 .alloc_inode = xfs_fs_alloc_inode, 1770 .destroy_inode = xfs_fs_destroy_inode, 1771 .drop_inode = xfs_fs_drop_inode, 1772 .put_super = xfs_fs_put_super, 1773 .sync_fs = xfs_fs_sync_fs, 1774 .freeze_fs = xfs_fs_freeze, 1775 .unfreeze_fs = xfs_fs_unfreeze, 1776 .statfs = xfs_fs_statfs, 1777 .remount_fs = xfs_fs_remount, 1778 .show_options = xfs_fs_show_options, 1779 .nr_cached_objects = xfs_fs_nr_cached_objects, 1780 .free_cached_objects = xfs_fs_free_cached_objects, 1781 }; 1782 1783 static struct file_system_type xfs_fs_type = { 1784 .owner = THIS_MODULE, 1785 .name = "xfs", 1786 .mount = xfs_fs_mount, 1787 .kill_sb = kill_block_super, 1788 .fs_flags = FS_REQUIRES_DEV, 1789 }; 1790 MODULE_ALIAS_FS("xfs"); 1791 1792 STATIC int __init 1793 xfs_init_zones(void) 1794 { 1795 xfs_ioend_bioset = bioset_create(4 * MAX_BUF_PER_PAGE, 1796 offsetof(struct xfs_ioend, io_inline_bio), 1797 BIOSET_NEED_BVECS); 1798 if (!xfs_ioend_bioset) 1799 goto out; 1800 1801 xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t), 1802 "xfs_log_ticket"); 1803 if (!xfs_log_ticket_zone) 1804 goto out_free_ioend_bioset; 1805 1806 xfs_bmap_free_item_zone = kmem_zone_init( 1807 sizeof(struct xfs_extent_free_item), 1808 "xfs_bmap_free_item"); 1809 if (!xfs_bmap_free_item_zone) 1810 goto out_destroy_log_ticket_zone; 1811 1812 xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t), 1813 "xfs_btree_cur"); 1814 if (!xfs_btree_cur_zone) 1815 goto out_destroy_bmap_free_item_zone; 1816 1817 xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t), 1818 "xfs_da_state"); 1819 if (!xfs_da_state_zone) 1820 goto out_destroy_btree_cur_zone; 1821 1822 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); 1823 if (!xfs_ifork_zone) 1824 goto out_destroy_da_state_zone; 1825 1826 xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans"); 1827 if (!xfs_trans_zone) 1828 goto out_destroy_ifork_zone; 1829 1830 xfs_log_item_desc_zone = 1831 kmem_zone_init(sizeof(struct xfs_log_item_desc), 1832 "xfs_log_item_desc"); 1833 if (!xfs_log_item_desc_zone) 1834 goto out_destroy_trans_zone; 1835 1836 /* 1837 * The size of the zone allocated buf log item is the maximum 1838 * size possible under XFS. This wastes a little bit of memory, 1839 * but it is much faster. 1840 */ 1841 xfs_buf_item_zone = kmem_zone_init(sizeof(struct xfs_buf_log_item), 1842 "xfs_buf_item"); 1843 if (!xfs_buf_item_zone) 1844 goto out_destroy_log_item_desc_zone; 1845 1846 xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) + 1847 ((XFS_EFD_MAX_FAST_EXTENTS - 1) * 1848 sizeof(xfs_extent_t))), "xfs_efd_item"); 1849 if (!xfs_efd_zone) 1850 goto out_destroy_buf_item_zone; 1851 1852 xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) + 1853 ((XFS_EFI_MAX_FAST_EXTENTS - 1) * 1854 sizeof(xfs_extent_t))), "xfs_efi_item"); 1855 if (!xfs_efi_zone) 1856 goto out_destroy_efd_zone; 1857 1858 xfs_inode_zone = 1859 kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode", 1860 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD | 1861 KM_ZONE_ACCOUNT, xfs_fs_inode_init_once); 1862 if (!xfs_inode_zone) 1863 goto out_destroy_efi_zone; 1864 1865 xfs_ili_zone = 1866 kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili", 1867 KM_ZONE_SPREAD, NULL); 1868 if (!xfs_ili_zone) 1869 goto out_destroy_inode_zone; 1870 xfs_icreate_zone = kmem_zone_init(sizeof(struct xfs_icreate_item), 1871 "xfs_icr"); 1872 if (!xfs_icreate_zone) 1873 goto out_destroy_ili_zone; 1874 1875 xfs_rud_zone = kmem_zone_init(sizeof(struct xfs_rud_log_item), 1876 "xfs_rud_item"); 1877 if (!xfs_rud_zone) 1878 goto out_destroy_icreate_zone; 1879 1880 xfs_rui_zone = kmem_zone_init( 1881 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS), 1882 "xfs_rui_item"); 1883 if (!xfs_rui_zone) 1884 goto out_destroy_rud_zone; 1885 1886 xfs_cud_zone = kmem_zone_init(sizeof(struct xfs_cud_log_item), 1887 "xfs_cud_item"); 1888 if (!xfs_cud_zone) 1889 goto out_destroy_rui_zone; 1890 1891 xfs_cui_zone = kmem_zone_init( 1892 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS), 1893 "xfs_cui_item"); 1894 if (!xfs_cui_zone) 1895 goto out_destroy_cud_zone; 1896 1897 xfs_bud_zone = kmem_zone_init(sizeof(struct xfs_bud_log_item), 1898 "xfs_bud_item"); 1899 if (!xfs_bud_zone) 1900 goto out_destroy_cui_zone; 1901 1902 xfs_bui_zone = kmem_zone_init( 1903 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS), 1904 "xfs_bui_item"); 1905 if (!xfs_bui_zone) 1906 goto out_destroy_bud_zone; 1907 1908 return 0; 1909 1910 out_destroy_bud_zone: 1911 kmem_zone_destroy(xfs_bud_zone); 1912 out_destroy_cui_zone: 1913 kmem_zone_destroy(xfs_cui_zone); 1914 out_destroy_cud_zone: 1915 kmem_zone_destroy(xfs_cud_zone); 1916 out_destroy_rui_zone: 1917 kmem_zone_destroy(xfs_rui_zone); 1918 out_destroy_rud_zone: 1919 kmem_zone_destroy(xfs_rud_zone); 1920 out_destroy_icreate_zone: 1921 kmem_zone_destroy(xfs_icreate_zone); 1922 out_destroy_ili_zone: 1923 kmem_zone_destroy(xfs_ili_zone); 1924 out_destroy_inode_zone: 1925 kmem_zone_destroy(xfs_inode_zone); 1926 out_destroy_efi_zone: 1927 kmem_zone_destroy(xfs_efi_zone); 1928 out_destroy_efd_zone: 1929 kmem_zone_destroy(xfs_efd_zone); 1930 out_destroy_buf_item_zone: 1931 kmem_zone_destroy(xfs_buf_item_zone); 1932 out_destroy_log_item_desc_zone: 1933 kmem_zone_destroy(xfs_log_item_desc_zone); 1934 out_destroy_trans_zone: 1935 kmem_zone_destroy(xfs_trans_zone); 1936 out_destroy_ifork_zone: 1937 kmem_zone_destroy(xfs_ifork_zone); 1938 out_destroy_da_state_zone: 1939 kmem_zone_destroy(xfs_da_state_zone); 1940 out_destroy_btree_cur_zone: 1941 kmem_zone_destroy(xfs_btree_cur_zone); 1942 out_destroy_bmap_free_item_zone: 1943 kmem_zone_destroy(xfs_bmap_free_item_zone); 1944 out_destroy_log_ticket_zone: 1945 kmem_zone_destroy(xfs_log_ticket_zone); 1946 out_free_ioend_bioset: 1947 bioset_free(xfs_ioend_bioset); 1948 out: 1949 return -ENOMEM; 1950 } 1951 1952 STATIC void 1953 xfs_destroy_zones(void) 1954 { 1955 /* 1956 * Make sure all delayed rcu free are flushed before we 1957 * destroy caches. 1958 */ 1959 rcu_barrier(); 1960 kmem_zone_destroy(xfs_bui_zone); 1961 kmem_zone_destroy(xfs_bud_zone); 1962 kmem_zone_destroy(xfs_cui_zone); 1963 kmem_zone_destroy(xfs_cud_zone); 1964 kmem_zone_destroy(xfs_rui_zone); 1965 kmem_zone_destroy(xfs_rud_zone); 1966 kmem_zone_destroy(xfs_icreate_zone); 1967 kmem_zone_destroy(xfs_ili_zone); 1968 kmem_zone_destroy(xfs_inode_zone); 1969 kmem_zone_destroy(xfs_efi_zone); 1970 kmem_zone_destroy(xfs_efd_zone); 1971 kmem_zone_destroy(xfs_buf_item_zone); 1972 kmem_zone_destroy(xfs_log_item_desc_zone); 1973 kmem_zone_destroy(xfs_trans_zone); 1974 kmem_zone_destroy(xfs_ifork_zone); 1975 kmem_zone_destroy(xfs_da_state_zone); 1976 kmem_zone_destroy(xfs_btree_cur_zone); 1977 kmem_zone_destroy(xfs_bmap_free_item_zone); 1978 kmem_zone_destroy(xfs_log_ticket_zone); 1979 bioset_free(xfs_ioend_bioset); 1980 } 1981 1982 STATIC int __init 1983 xfs_init_workqueues(void) 1984 { 1985 /* 1986 * The allocation workqueue can be used in memory reclaim situations 1987 * (writepage path), and parallelism is only limited by the number of 1988 * AGs in all the filesystems mounted. Hence use the default large 1989 * max_active value for this workqueue. 1990 */ 1991 xfs_alloc_wq = alloc_workqueue("xfsalloc", 1992 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0); 1993 if (!xfs_alloc_wq) 1994 return -ENOMEM; 1995 1996 xfs_discard_wq = alloc_workqueue("xfsdiscard", WQ_UNBOUND, 0); 1997 if (!xfs_discard_wq) 1998 goto out_free_alloc_wq; 1999 2000 return 0; 2001 out_free_alloc_wq: 2002 destroy_workqueue(xfs_alloc_wq); 2003 return -ENOMEM; 2004 } 2005 2006 STATIC void 2007 xfs_destroy_workqueues(void) 2008 { 2009 destroy_workqueue(xfs_discard_wq); 2010 destroy_workqueue(xfs_alloc_wq); 2011 } 2012 2013 STATIC int __init 2014 init_xfs_fs(void) 2015 { 2016 int error; 2017 2018 xfs_check_ondisk_structs(); 2019 2020 printk(KERN_INFO XFS_VERSION_STRING " with " 2021 XFS_BUILD_OPTIONS " enabled\n"); 2022 2023 xfs_extent_free_init_defer_op(); 2024 xfs_rmap_update_init_defer_op(); 2025 xfs_refcount_update_init_defer_op(); 2026 xfs_bmap_update_init_defer_op(); 2027 2028 xfs_dir_startup(); 2029 2030 error = xfs_init_zones(); 2031 if (error) 2032 goto out; 2033 2034 error = xfs_init_workqueues(); 2035 if (error) 2036 goto out_destroy_zones; 2037 2038 error = xfs_mru_cache_init(); 2039 if (error) 2040 goto out_destroy_wq; 2041 2042 error = xfs_buf_init(); 2043 if (error) 2044 goto out_mru_cache_uninit; 2045 2046 error = xfs_init_procfs(); 2047 if (error) 2048 goto out_buf_terminate; 2049 2050 error = xfs_sysctl_register(); 2051 if (error) 2052 goto out_cleanup_procfs; 2053 2054 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj); 2055 if (!xfs_kset) { 2056 error = -ENOMEM; 2057 goto out_sysctl_unregister; 2058 } 2059 2060 xfsstats.xs_kobj.kobject.kset = xfs_kset; 2061 2062 xfsstats.xs_stats = alloc_percpu(struct xfsstats); 2063 if (!xfsstats.xs_stats) { 2064 error = -ENOMEM; 2065 goto out_kset_unregister; 2066 } 2067 2068 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL, 2069 "stats"); 2070 if (error) 2071 goto out_free_stats; 2072 2073 #ifdef DEBUG 2074 xfs_dbg_kobj.kobject.kset = xfs_kset; 2075 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug"); 2076 if (error) 2077 goto out_remove_stats_kobj; 2078 #endif 2079 2080 error = xfs_qm_init(); 2081 if (error) 2082 goto out_remove_dbg_kobj; 2083 2084 error = register_filesystem(&xfs_fs_type); 2085 if (error) 2086 goto out_qm_exit; 2087 return 0; 2088 2089 out_qm_exit: 2090 xfs_qm_exit(); 2091 out_remove_dbg_kobj: 2092 #ifdef DEBUG 2093 xfs_sysfs_del(&xfs_dbg_kobj); 2094 out_remove_stats_kobj: 2095 #endif 2096 xfs_sysfs_del(&xfsstats.xs_kobj); 2097 out_free_stats: 2098 free_percpu(xfsstats.xs_stats); 2099 out_kset_unregister: 2100 kset_unregister(xfs_kset); 2101 out_sysctl_unregister: 2102 xfs_sysctl_unregister(); 2103 out_cleanup_procfs: 2104 xfs_cleanup_procfs(); 2105 out_buf_terminate: 2106 xfs_buf_terminate(); 2107 out_mru_cache_uninit: 2108 xfs_mru_cache_uninit(); 2109 out_destroy_wq: 2110 xfs_destroy_workqueues(); 2111 out_destroy_zones: 2112 xfs_destroy_zones(); 2113 out: 2114 return error; 2115 } 2116 2117 STATIC void __exit 2118 exit_xfs_fs(void) 2119 { 2120 xfs_qm_exit(); 2121 unregister_filesystem(&xfs_fs_type); 2122 #ifdef DEBUG 2123 xfs_sysfs_del(&xfs_dbg_kobj); 2124 #endif 2125 xfs_sysfs_del(&xfsstats.xs_kobj); 2126 free_percpu(xfsstats.xs_stats); 2127 kset_unregister(xfs_kset); 2128 xfs_sysctl_unregister(); 2129 xfs_cleanup_procfs(); 2130 xfs_buf_terminate(); 2131 xfs_mru_cache_uninit(); 2132 xfs_destroy_workqueues(); 2133 xfs_destroy_zones(); 2134 xfs_uuid_table_free(); 2135 } 2136 2137 module_init(init_xfs_fs); 2138 module_exit(exit_xfs_fs); 2139 2140 MODULE_AUTHOR("Silicon Graphics, Inc."); 2141 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); 2142 MODULE_LICENSE("GPL"); 2143