1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 19 #include "xfs.h" 20 #include "xfs_bit.h" 21 #include "xfs_log.h" 22 #include "xfs_inum.h" 23 #include "xfs_trans.h" 24 #include "xfs_sb.h" 25 #include "xfs_ag.h" 26 #include "xfs_dir2.h" 27 #include "xfs_alloc.h" 28 #include "xfs_quota.h" 29 #include "xfs_mount.h" 30 #include "xfs_bmap_btree.h" 31 #include "xfs_alloc_btree.h" 32 #include "xfs_ialloc_btree.h" 33 #include "xfs_dinode.h" 34 #include "xfs_inode.h" 35 #include "xfs_btree.h" 36 #include "xfs_ialloc.h" 37 #include "xfs_bmap.h" 38 #include "xfs_rtalloc.h" 39 #include "xfs_error.h" 40 #include "xfs_itable.h" 41 #include "xfs_fsops.h" 42 #include "xfs_attr.h" 43 #include "xfs_buf_item.h" 44 #include "xfs_utils.h" 45 #include "xfs_vnodeops.h" 46 #include "xfs_log_priv.h" 47 #include "xfs_trans_priv.h" 48 #include "xfs_filestream.h" 49 #include "xfs_da_btree.h" 50 #include "xfs_extfree_item.h" 51 #include "xfs_mru_cache.h" 52 #include "xfs_inode_item.h" 53 #include "xfs_sync.h" 54 #include "xfs_trace.h" 55 56 #include <linux/namei.h> 57 #include <linux/init.h> 58 #include <linux/slab.h> 59 #include <linux/mount.h> 60 #include <linux/mempool.h> 61 #include <linux/writeback.h> 62 #include <linux/kthread.h> 63 #include <linux/freezer.h> 64 #include <linux/parser.h> 65 66 static const struct super_operations xfs_super_operations; 67 static kmem_zone_t *xfs_ioend_zone; 68 mempool_t *xfs_ioend_pool; 69 70 #define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */ 71 #define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */ 72 #define MNTOPT_LOGDEV "logdev" /* log device */ 73 #define MNTOPT_RTDEV "rtdev" /* realtime I/O device */ 74 #define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */ 75 #define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */ 76 #define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */ 77 #define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */ 78 #define MNTOPT_SUNIT "sunit" /* data volume stripe unit */ 79 #define MNTOPT_SWIDTH "swidth" /* data volume stripe width */ 80 #define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */ 81 #define MNTOPT_MTPT "mtpt" /* filesystem mount point */ 82 #define MNTOPT_GRPID "grpid" /* group-ID from parent directory */ 83 #define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */ 84 #define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */ 85 #define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */ 86 #define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */ 87 #define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */ 88 #define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and 89 * unwritten extent conversion */ 90 #define MNTOPT_NOBARRIER "nobarrier" /* .. disable */ 91 #define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */ 92 #define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */ 93 #define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */ 94 #define MNTOPT_LARGEIO "largeio" /* report large I/O sizes in stat() */ 95 #define MNTOPT_NOLARGEIO "nolargeio" /* do not report large I/O sizes 96 * in stat(). */ 97 #define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */ 98 #define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */ 99 #define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */ 100 #define MNTOPT_QUOTA "quota" /* disk quotas (user) */ 101 #define MNTOPT_NOQUOTA "noquota" /* no quotas */ 102 #define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */ 103 #define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */ 104 #define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */ 105 #define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */ 106 #define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */ 107 #define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */ 108 #define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */ 109 #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ 110 #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */ 111 #define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ 112 #define MNTOPT_DELAYLOG "delaylog" /* Delayed logging enabled */ 113 #define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed logging disabled */ 114 #define MNTOPT_DISCARD "discard" /* Discard unused blocks */ 115 #define MNTOPT_NODISCARD "nodiscard" /* Do not discard unused blocks */ 116 117 /* 118 * Table driven mount option parser. 119 * 120 * Currently only used for remount, but it will be used for mount 121 * in the future, too. 122 */ 123 enum { 124 Opt_barrier, Opt_nobarrier, Opt_err 125 }; 126 127 static const match_table_t tokens = { 128 {Opt_barrier, "barrier"}, 129 {Opt_nobarrier, "nobarrier"}, 130 {Opt_err, NULL} 131 }; 132 133 134 STATIC unsigned long 135 suffix_strtoul(char *s, char **endp, unsigned int base) 136 { 137 int last, shift_left_factor = 0; 138 char *value = s; 139 140 last = strlen(value) - 1; 141 if (value[last] == 'K' || value[last] == 'k') { 142 shift_left_factor = 10; 143 value[last] = '\0'; 144 } 145 if (value[last] == 'M' || value[last] == 'm') { 146 shift_left_factor = 20; 147 value[last] = '\0'; 148 } 149 if (value[last] == 'G' || value[last] == 'g') { 150 shift_left_factor = 30; 151 value[last] = '\0'; 152 } 153 154 return simple_strtoul((const char *)s, endp, base) << shift_left_factor; 155 } 156 157 /* 158 * This function fills in xfs_mount_t fields based on mount args. 159 * Note: the superblock has _not_ yet been read in. 160 * 161 * Note that this function leaks the various device name allocations on 162 * failure. The caller takes care of them. 163 */ 164 STATIC int 165 xfs_parseargs( 166 struct xfs_mount *mp, 167 char *options) 168 { 169 struct super_block *sb = mp->m_super; 170 char *this_char, *value, *eov; 171 int dsunit = 0; 172 int dswidth = 0; 173 int iosize = 0; 174 __uint8_t iosizelog = 0; 175 176 /* 177 * set up the mount name first so all the errors will refer to the 178 * correct device. 179 */ 180 mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL); 181 if (!mp->m_fsname) 182 return ENOMEM; 183 mp->m_fsname_len = strlen(mp->m_fsname) + 1; 184 185 /* 186 * Copy binary VFS mount flags we are interested in. 187 */ 188 if (sb->s_flags & MS_RDONLY) 189 mp->m_flags |= XFS_MOUNT_RDONLY; 190 if (sb->s_flags & MS_DIRSYNC) 191 mp->m_flags |= XFS_MOUNT_DIRSYNC; 192 if (sb->s_flags & MS_SYNCHRONOUS) 193 mp->m_flags |= XFS_MOUNT_WSYNC; 194 195 /* 196 * Set some default flags that could be cleared by the mount option 197 * parsing. 198 */ 199 mp->m_flags |= XFS_MOUNT_BARRIER; 200 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; 201 mp->m_flags |= XFS_MOUNT_SMALL_INUMS; 202 203 /* 204 * These can be overridden by the mount option parsing. 205 */ 206 mp->m_logbufs = -1; 207 mp->m_logbsize = -1; 208 209 if (!options) 210 goto done; 211 212 while ((this_char = strsep(&options, ",")) != NULL) { 213 if (!*this_char) 214 continue; 215 if ((value = strchr(this_char, '=')) != NULL) 216 *value++ = 0; 217 218 if (!strcmp(this_char, MNTOPT_LOGBUFS)) { 219 if (!value || !*value) { 220 xfs_warn(mp, "%s option requires an argument", 221 this_char); 222 return EINVAL; 223 } 224 mp->m_logbufs = simple_strtoul(value, &eov, 10); 225 } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) { 226 if (!value || !*value) { 227 xfs_warn(mp, "%s option requires an argument", 228 this_char); 229 return EINVAL; 230 } 231 mp->m_logbsize = suffix_strtoul(value, &eov, 10); 232 } else if (!strcmp(this_char, MNTOPT_LOGDEV)) { 233 if (!value || !*value) { 234 xfs_warn(mp, "%s option requires an argument", 235 this_char); 236 return EINVAL; 237 } 238 mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); 239 if (!mp->m_logname) 240 return ENOMEM; 241 } else if (!strcmp(this_char, MNTOPT_MTPT)) { 242 xfs_warn(mp, "%s option not allowed on this system", 243 this_char); 244 return EINVAL; 245 } else if (!strcmp(this_char, MNTOPT_RTDEV)) { 246 if (!value || !*value) { 247 xfs_warn(mp, "%s option requires an argument", 248 this_char); 249 return EINVAL; 250 } 251 mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); 252 if (!mp->m_rtname) 253 return ENOMEM; 254 } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) { 255 if (!value || !*value) { 256 xfs_warn(mp, "%s option requires an argument", 257 this_char); 258 return EINVAL; 259 } 260 iosize = simple_strtoul(value, &eov, 10); 261 iosizelog = ffs(iosize) - 1; 262 } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) { 263 if (!value || !*value) { 264 xfs_warn(mp, "%s option requires an argument", 265 this_char); 266 return EINVAL; 267 } 268 iosize = suffix_strtoul(value, &eov, 10); 269 iosizelog = ffs(iosize) - 1; 270 } else if (!strcmp(this_char, MNTOPT_GRPID) || 271 !strcmp(this_char, MNTOPT_BSDGROUPS)) { 272 mp->m_flags |= XFS_MOUNT_GRPID; 273 } else if (!strcmp(this_char, MNTOPT_NOGRPID) || 274 !strcmp(this_char, MNTOPT_SYSVGROUPS)) { 275 mp->m_flags &= ~XFS_MOUNT_GRPID; 276 } else if (!strcmp(this_char, MNTOPT_WSYNC)) { 277 mp->m_flags |= XFS_MOUNT_WSYNC; 278 } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) { 279 mp->m_flags |= XFS_MOUNT_NORECOVERY; 280 } else if (!strcmp(this_char, MNTOPT_NOALIGN)) { 281 mp->m_flags |= XFS_MOUNT_NOALIGN; 282 } else if (!strcmp(this_char, MNTOPT_SWALLOC)) { 283 mp->m_flags |= XFS_MOUNT_SWALLOC; 284 } else if (!strcmp(this_char, MNTOPT_SUNIT)) { 285 if (!value || !*value) { 286 xfs_warn(mp, "%s option requires an argument", 287 this_char); 288 return EINVAL; 289 } 290 dsunit = simple_strtoul(value, &eov, 10); 291 } else if (!strcmp(this_char, MNTOPT_SWIDTH)) { 292 if (!value || !*value) { 293 xfs_warn(mp, "%s option requires an argument", 294 this_char); 295 return EINVAL; 296 } 297 dswidth = simple_strtoul(value, &eov, 10); 298 } else if (!strcmp(this_char, MNTOPT_64BITINODE)) { 299 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; 300 #if !XFS_BIG_INUMS 301 xfs_warn(mp, "%s option not allowed on this system", 302 this_char); 303 return EINVAL; 304 #endif 305 } else if (!strcmp(this_char, MNTOPT_NOUUID)) { 306 mp->m_flags |= XFS_MOUNT_NOUUID; 307 } else if (!strcmp(this_char, MNTOPT_BARRIER)) { 308 mp->m_flags |= XFS_MOUNT_BARRIER; 309 } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) { 310 mp->m_flags &= ~XFS_MOUNT_BARRIER; 311 } else if (!strcmp(this_char, MNTOPT_IKEEP)) { 312 mp->m_flags |= XFS_MOUNT_IKEEP; 313 } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) { 314 mp->m_flags &= ~XFS_MOUNT_IKEEP; 315 } else if (!strcmp(this_char, MNTOPT_LARGEIO)) { 316 mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE; 317 } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) { 318 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; 319 } else if (!strcmp(this_char, MNTOPT_ATTR2)) { 320 mp->m_flags |= XFS_MOUNT_ATTR2; 321 } else if (!strcmp(this_char, MNTOPT_NOATTR2)) { 322 mp->m_flags &= ~XFS_MOUNT_ATTR2; 323 mp->m_flags |= XFS_MOUNT_NOATTR2; 324 } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) { 325 mp->m_flags |= XFS_MOUNT_FILESTREAMS; 326 } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) { 327 mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; 328 mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; 329 mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE; 330 } else if (!strcmp(this_char, MNTOPT_QUOTA) || 331 !strcmp(this_char, MNTOPT_UQUOTA) || 332 !strcmp(this_char, MNTOPT_USRQUOTA)) { 333 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | 334 XFS_UQUOTA_ENFD); 335 } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) || 336 !strcmp(this_char, MNTOPT_UQUOTANOENF)) { 337 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE); 338 mp->m_qflags &= ~XFS_UQUOTA_ENFD; 339 } else if (!strcmp(this_char, MNTOPT_PQUOTA) || 340 !strcmp(this_char, MNTOPT_PRJQUOTA)) { 341 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | 342 XFS_OQUOTA_ENFD); 343 } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) { 344 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE); 345 mp->m_qflags &= ~XFS_OQUOTA_ENFD; 346 } else if (!strcmp(this_char, MNTOPT_GQUOTA) || 347 !strcmp(this_char, MNTOPT_GRPQUOTA)) { 348 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | 349 XFS_OQUOTA_ENFD); 350 } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { 351 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); 352 mp->m_qflags &= ~XFS_OQUOTA_ENFD; 353 } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) { 354 xfs_warn(mp, 355 "delaylog is the default now, option is deprecated."); 356 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { 357 xfs_warn(mp, 358 "nodelaylog support has been removed, option is deprecated."); 359 } else if (!strcmp(this_char, MNTOPT_DISCARD)) { 360 mp->m_flags |= XFS_MOUNT_DISCARD; 361 } else if (!strcmp(this_char, MNTOPT_NODISCARD)) { 362 mp->m_flags &= ~XFS_MOUNT_DISCARD; 363 } else if (!strcmp(this_char, "ihashsize")) { 364 xfs_warn(mp, 365 "ihashsize no longer used, option is deprecated."); 366 } else if (!strcmp(this_char, "osyncisdsync")) { 367 xfs_warn(mp, 368 "osyncisdsync has no effect, option is deprecated."); 369 } else if (!strcmp(this_char, "osyncisosync")) { 370 xfs_warn(mp, 371 "osyncisosync has no effect, option is deprecated."); 372 } else if (!strcmp(this_char, "irixsgid")) { 373 xfs_warn(mp, 374 "irixsgid is now a sysctl(2) variable, option is deprecated."); 375 } else { 376 xfs_warn(mp, "unknown mount option [%s].", this_char); 377 return EINVAL; 378 } 379 } 380 381 /* 382 * no recovery flag requires a read-only mount 383 */ 384 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) && 385 !(mp->m_flags & XFS_MOUNT_RDONLY)) { 386 xfs_warn(mp, "no-recovery mounts must be read-only."); 387 return EINVAL; 388 } 389 390 if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) { 391 xfs_warn(mp, 392 "sunit and swidth options incompatible with the noalign option"); 393 return EINVAL; 394 } 395 396 #ifndef CONFIG_XFS_QUOTA 397 if (XFS_IS_QUOTA_RUNNING(mp)) { 398 xfs_warn(mp, "quota support not available in this kernel."); 399 return EINVAL; 400 } 401 #endif 402 403 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) && 404 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) { 405 xfs_warn(mp, "cannot mount with both project and group quota"); 406 return EINVAL; 407 } 408 409 if ((dsunit && !dswidth) || (!dsunit && dswidth)) { 410 xfs_warn(mp, "sunit and swidth must be specified together"); 411 return EINVAL; 412 } 413 414 if (dsunit && (dswidth % dsunit != 0)) { 415 xfs_warn(mp, 416 "stripe width (%d) must be a multiple of the stripe unit (%d)", 417 dswidth, dsunit); 418 return EINVAL; 419 } 420 421 done: 422 if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) { 423 /* 424 * At this point the superblock has not been read 425 * in, therefore we do not know the block size. 426 * Before the mount call ends we will convert 427 * these to FSBs. 428 */ 429 if (dsunit) { 430 mp->m_dalign = dsunit; 431 mp->m_flags |= XFS_MOUNT_RETERR; 432 } 433 434 if (dswidth) 435 mp->m_swidth = dswidth; 436 } 437 438 if (mp->m_logbufs != -1 && 439 mp->m_logbufs != 0 && 440 (mp->m_logbufs < XLOG_MIN_ICLOGS || 441 mp->m_logbufs > XLOG_MAX_ICLOGS)) { 442 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", 443 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); 444 return XFS_ERROR(EINVAL); 445 } 446 if (mp->m_logbsize != -1 && 447 mp->m_logbsize != 0 && 448 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || 449 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || 450 !is_power_of_2(mp->m_logbsize))) { 451 xfs_warn(mp, 452 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", 453 mp->m_logbsize); 454 return XFS_ERROR(EINVAL); 455 } 456 457 if (iosizelog) { 458 if (iosizelog > XFS_MAX_IO_LOG || 459 iosizelog < XFS_MIN_IO_LOG) { 460 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", 461 iosizelog, XFS_MIN_IO_LOG, 462 XFS_MAX_IO_LOG); 463 return XFS_ERROR(EINVAL); 464 } 465 466 mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE; 467 mp->m_readio_log = iosizelog; 468 mp->m_writeio_log = iosizelog; 469 } 470 471 return 0; 472 } 473 474 struct proc_xfs_info { 475 int flag; 476 char *str; 477 }; 478 479 STATIC int 480 xfs_showargs( 481 struct xfs_mount *mp, 482 struct seq_file *m) 483 { 484 static struct proc_xfs_info xfs_info_set[] = { 485 /* the few simple ones we can get from the mount struct */ 486 { XFS_MOUNT_IKEEP, "," MNTOPT_IKEEP }, 487 { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC }, 488 { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN }, 489 { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC }, 490 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, 491 { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, 492 { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 }, 493 { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM }, 494 { XFS_MOUNT_GRPID, "," MNTOPT_GRPID }, 495 { XFS_MOUNT_DISCARD, "," MNTOPT_DISCARD }, 496 { 0, NULL } 497 }; 498 static struct proc_xfs_info xfs_info_unset[] = { 499 /* the few simple ones we can get from the mount struct */ 500 { XFS_MOUNT_COMPAT_IOSIZE, "," MNTOPT_LARGEIO }, 501 { XFS_MOUNT_BARRIER, "," MNTOPT_NOBARRIER }, 502 { XFS_MOUNT_SMALL_INUMS, "," MNTOPT_64BITINODE }, 503 { 0, NULL } 504 }; 505 struct proc_xfs_info *xfs_infop; 506 507 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { 508 if (mp->m_flags & xfs_infop->flag) 509 seq_puts(m, xfs_infop->str); 510 } 511 for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) { 512 if (!(mp->m_flags & xfs_infop->flag)) 513 seq_puts(m, xfs_infop->str); 514 } 515 516 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) 517 seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk", 518 (int)(1 << mp->m_writeio_log) >> 10); 519 520 if (mp->m_logbufs > 0) 521 seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs); 522 if (mp->m_logbsize > 0) 523 seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10); 524 525 if (mp->m_logname) 526 seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname); 527 if (mp->m_rtname) 528 seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname); 529 530 if (mp->m_dalign > 0) 531 seq_printf(m, "," MNTOPT_SUNIT "=%d", 532 (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); 533 if (mp->m_swidth > 0) 534 seq_printf(m, "," MNTOPT_SWIDTH "=%d", 535 (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); 536 537 if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD)) 538 seq_puts(m, "," MNTOPT_USRQUOTA); 539 else if (mp->m_qflags & XFS_UQUOTA_ACCT) 540 seq_puts(m, "," MNTOPT_UQUOTANOENF); 541 542 /* Either project or group quotas can be active, not both */ 543 544 if (mp->m_qflags & XFS_PQUOTA_ACCT) { 545 if (mp->m_qflags & XFS_OQUOTA_ENFD) 546 seq_puts(m, "," MNTOPT_PRJQUOTA); 547 else 548 seq_puts(m, "," MNTOPT_PQUOTANOENF); 549 } else if (mp->m_qflags & XFS_GQUOTA_ACCT) { 550 if (mp->m_qflags & XFS_OQUOTA_ENFD) 551 seq_puts(m, "," MNTOPT_GRPQUOTA); 552 else 553 seq_puts(m, "," MNTOPT_GQUOTANOENF); 554 } 555 556 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) 557 seq_puts(m, "," MNTOPT_NOQUOTA); 558 559 return 0; 560 } 561 __uint64_t 562 xfs_max_file_offset( 563 unsigned int blockshift) 564 { 565 unsigned int pagefactor = 1; 566 unsigned int bitshift = BITS_PER_LONG - 1; 567 568 /* Figure out maximum filesize, on Linux this can depend on 569 * the filesystem blocksize (on 32 bit platforms). 570 * __block_write_begin does this in an [unsigned] long... 571 * page->index << (PAGE_CACHE_SHIFT - bbits) 572 * So, for page sized blocks (4K on 32 bit platforms), 573 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is 574 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 575 * but for smaller blocksizes it is less (bbits = log2 bsize). 576 * Note1: get_block_t takes a long (implicit cast from above) 577 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch 578 * can optionally convert the [unsigned] long from above into 579 * an [unsigned] long long. 580 */ 581 582 #if BITS_PER_LONG == 32 583 # if defined(CONFIG_LBDAF) 584 ASSERT(sizeof(sector_t) == 8); 585 pagefactor = PAGE_CACHE_SIZE; 586 bitshift = BITS_PER_LONG; 587 # else 588 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift); 589 # endif 590 #endif 591 592 return (((__uint64_t)pagefactor) << bitshift) - 1; 593 } 594 595 STATIC int 596 xfs_blkdev_get( 597 xfs_mount_t *mp, 598 const char *name, 599 struct block_device **bdevp) 600 { 601 int error = 0; 602 603 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 604 mp); 605 if (IS_ERR(*bdevp)) { 606 error = PTR_ERR(*bdevp); 607 xfs_warn(mp, "Invalid device [%s], error=%d\n", name, error); 608 } 609 610 return -error; 611 } 612 613 STATIC void 614 xfs_blkdev_put( 615 struct block_device *bdev) 616 { 617 if (bdev) 618 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 619 } 620 621 void 622 xfs_blkdev_issue_flush( 623 xfs_buftarg_t *buftarg) 624 { 625 blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL); 626 } 627 628 STATIC void 629 xfs_close_devices( 630 struct xfs_mount *mp) 631 { 632 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 633 struct block_device *logdev = mp->m_logdev_targp->bt_bdev; 634 xfs_free_buftarg(mp, mp->m_logdev_targp); 635 xfs_blkdev_put(logdev); 636 } 637 if (mp->m_rtdev_targp) { 638 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; 639 xfs_free_buftarg(mp, mp->m_rtdev_targp); 640 xfs_blkdev_put(rtdev); 641 } 642 xfs_free_buftarg(mp, mp->m_ddev_targp); 643 } 644 645 /* 646 * The file system configurations are: 647 * (1) device (partition) with data and internal log 648 * (2) logical volume with data and log subvolumes. 649 * (3) logical volume with data, log, and realtime subvolumes. 650 * 651 * We only have to handle opening the log and realtime volumes here if 652 * they are present. The data subvolume has already been opened by 653 * get_sb_bdev() and is stored in sb->s_bdev. 654 */ 655 STATIC int 656 xfs_open_devices( 657 struct xfs_mount *mp) 658 { 659 struct block_device *ddev = mp->m_super->s_bdev; 660 struct block_device *logdev = NULL, *rtdev = NULL; 661 int error; 662 663 /* 664 * Open real time and log devices - order is important. 665 */ 666 if (mp->m_logname) { 667 error = xfs_blkdev_get(mp, mp->m_logname, &logdev); 668 if (error) 669 goto out; 670 } 671 672 if (mp->m_rtname) { 673 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev); 674 if (error) 675 goto out_close_logdev; 676 677 if (rtdev == ddev || rtdev == logdev) { 678 xfs_warn(mp, 679 "Cannot mount filesystem with identical rtdev and ddev/logdev."); 680 error = EINVAL; 681 goto out_close_rtdev; 682 } 683 } 684 685 /* 686 * Setup xfs_mount buffer target pointers 687 */ 688 error = ENOMEM; 689 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, 0, mp->m_fsname); 690 if (!mp->m_ddev_targp) 691 goto out_close_rtdev; 692 693 if (rtdev) { 694 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, 1, 695 mp->m_fsname); 696 if (!mp->m_rtdev_targp) 697 goto out_free_ddev_targ; 698 } 699 700 if (logdev && logdev != ddev) { 701 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, 1, 702 mp->m_fsname); 703 if (!mp->m_logdev_targp) 704 goto out_free_rtdev_targ; 705 } else { 706 mp->m_logdev_targp = mp->m_ddev_targp; 707 } 708 709 return 0; 710 711 out_free_rtdev_targ: 712 if (mp->m_rtdev_targp) 713 xfs_free_buftarg(mp, mp->m_rtdev_targp); 714 out_free_ddev_targ: 715 xfs_free_buftarg(mp, mp->m_ddev_targp); 716 out_close_rtdev: 717 if (rtdev) 718 xfs_blkdev_put(rtdev); 719 out_close_logdev: 720 if (logdev && logdev != ddev) 721 xfs_blkdev_put(logdev); 722 out: 723 return error; 724 } 725 726 /* 727 * Setup xfs_mount buffer target pointers based on superblock 728 */ 729 STATIC int 730 xfs_setup_devices( 731 struct xfs_mount *mp) 732 { 733 int error; 734 735 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize, 736 mp->m_sb.sb_sectsize); 737 if (error) 738 return error; 739 740 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 741 unsigned int log_sector_size = BBSIZE; 742 743 if (xfs_sb_version_hassector(&mp->m_sb)) 744 log_sector_size = mp->m_sb.sb_logsectsize; 745 error = xfs_setsize_buftarg(mp->m_logdev_targp, 746 mp->m_sb.sb_blocksize, 747 log_sector_size); 748 if (error) 749 return error; 750 } 751 if (mp->m_rtdev_targp) { 752 error = xfs_setsize_buftarg(mp->m_rtdev_targp, 753 mp->m_sb.sb_blocksize, 754 mp->m_sb.sb_sectsize); 755 if (error) 756 return error; 757 } 758 759 return 0; 760 } 761 762 STATIC int 763 xfs_init_mount_workqueues( 764 struct xfs_mount *mp) 765 { 766 mp->m_data_workqueue = alloc_workqueue("xfs-data/%s", 767 WQ_MEM_RECLAIM, 0, mp->m_fsname); 768 if (!mp->m_data_workqueue) 769 goto out; 770 771 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s", 772 WQ_MEM_RECLAIM, 0, mp->m_fsname); 773 if (!mp->m_unwritten_workqueue) 774 goto out_destroy_data_iodone_queue; 775 776 return 0; 777 778 out_destroy_data_iodone_queue: 779 destroy_workqueue(mp->m_data_workqueue); 780 out: 781 return -ENOMEM; 782 } 783 784 STATIC void 785 xfs_destroy_mount_workqueues( 786 struct xfs_mount *mp) 787 { 788 destroy_workqueue(mp->m_data_workqueue); 789 destroy_workqueue(mp->m_unwritten_workqueue); 790 } 791 792 /* Catch misguided souls that try to use this interface on XFS */ 793 STATIC struct inode * 794 xfs_fs_alloc_inode( 795 struct super_block *sb) 796 { 797 BUG(); 798 return NULL; 799 } 800 801 /* 802 * Now that the generic code is guaranteed not to be accessing 803 * the linux inode, we can reclaim the inode. 804 */ 805 STATIC void 806 xfs_fs_destroy_inode( 807 struct inode *inode) 808 { 809 struct xfs_inode *ip = XFS_I(inode); 810 811 trace_xfs_destroy_inode(ip); 812 813 XFS_STATS_INC(vn_reclaim); 814 815 /* bad inode, get out here ASAP */ 816 if (is_bad_inode(inode)) 817 goto out_reclaim; 818 819 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0); 820 821 /* 822 * We should never get here with one of the reclaim flags already set. 823 */ 824 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE)); 825 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM)); 826 827 /* 828 * We always use background reclaim here because even if the 829 * inode is clean, it still may be under IO and hence we have 830 * to take the flush lock. The background reclaim path handles 831 * this more efficiently than we can here, so simply let background 832 * reclaim tear down all inodes. 833 */ 834 out_reclaim: 835 xfs_inode_set_reclaim_tag(ip); 836 } 837 838 /* 839 * Slab object creation initialisation for the XFS inode. 840 * This covers only the idempotent fields in the XFS inode; 841 * all other fields need to be initialised on allocation 842 * from the slab. This avoids the need to repeatedly initialise 843 * fields in the xfs inode that left in the initialise state 844 * when freeing the inode. 845 */ 846 STATIC void 847 xfs_fs_inode_init_once( 848 void *inode) 849 { 850 struct xfs_inode *ip = inode; 851 852 memset(ip, 0, sizeof(struct xfs_inode)); 853 854 /* vfs inode */ 855 inode_init_once(VFS_I(ip)); 856 857 /* xfs inode */ 858 atomic_set(&ip->i_pincount, 0); 859 spin_lock_init(&ip->i_flags_lock); 860 861 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, 862 "xfsino", ip->i_ino); 863 } 864 865 /* 866 * This is called by the VFS when dirtying inode metadata. This can happen 867 * for a few reasons, but we only care about timestamp updates, given that 868 * we handled the rest ourselves. In theory no other calls should happen, 869 * but for example generic_write_end() keeps dirtying the inode after 870 * updating i_size. Thus we check that the flags are exactly I_DIRTY_SYNC, 871 * and skip this call otherwise. 872 * 873 * We'll hopefull get a different method just for updating timestamps soon, 874 * at which point this hack can go away, and maybe we'll also get real 875 * error handling here. 876 */ 877 STATIC void 878 xfs_fs_dirty_inode( 879 struct inode *inode, 880 int flags) 881 { 882 struct xfs_inode *ip = XFS_I(inode); 883 struct xfs_mount *mp = ip->i_mount; 884 struct xfs_trans *tp; 885 int error; 886 887 if (flags != I_DIRTY_SYNC) 888 return; 889 890 trace_xfs_dirty_inode(ip); 891 892 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); 893 error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); 894 if (error) { 895 xfs_trans_cancel(tp, 0); 896 goto trouble; 897 } 898 xfs_ilock(ip, XFS_ILOCK_EXCL); 899 /* 900 * Grab all the latest timestamps from the Linux inode. 901 */ 902 ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec; 903 ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec; 904 ip->i_d.di_ctime.t_sec = (__int32_t)inode->i_ctime.tv_sec; 905 ip->i_d.di_ctime.t_nsec = (__int32_t)inode->i_ctime.tv_nsec; 906 ip->i_d.di_mtime.t_sec = (__int32_t)inode->i_mtime.tv_sec; 907 ip->i_d.di_mtime.t_nsec = (__int32_t)inode->i_mtime.tv_nsec; 908 909 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 910 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP); 911 error = xfs_trans_commit(tp, 0); 912 if (error) 913 goto trouble; 914 return; 915 916 trouble: 917 xfs_warn(mp, "failed to update timestamps for inode 0x%llx", ip->i_ino); 918 } 919 920 STATIC void 921 xfs_fs_evict_inode( 922 struct inode *inode) 923 { 924 xfs_inode_t *ip = XFS_I(inode); 925 926 trace_xfs_evict_inode(ip); 927 928 truncate_inode_pages(&inode->i_data, 0); 929 end_writeback(inode); 930 XFS_STATS_INC(vn_rele); 931 XFS_STATS_INC(vn_remove); 932 XFS_STATS_DEC(vn_active); 933 934 /* 935 * The iolock is used by the file system to coordinate reads, 936 * writes, and block truncates. Up to this point the lock 937 * protected concurrent accesses by users of the inode. But 938 * from here forward we're doing some final processing of the 939 * inode because we're done with it, and although we reuse the 940 * iolock for protection it is really a distinct lock class 941 * (in the lockdep sense) from before. To keep lockdep happy 942 * (and basically indicate what we are doing), we explicitly 943 * re-init the iolock here. 944 */ 945 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock)); 946 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); 947 lockdep_set_class_and_name(&ip->i_iolock.mr_lock, 948 &xfs_iolock_reclaimable, "xfs_iolock_reclaimable"); 949 950 xfs_inactive(ip); 951 } 952 953 /* 954 * We do an unlocked check for XFS_IDONTCACHE here because we are already 955 * serialised against cache hits here via the inode->i_lock and igrab() in 956 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be 957 * racing with us, and it avoids needing to grab a spinlock here for every inode 958 * we drop the final reference on. 959 */ 960 STATIC int 961 xfs_fs_drop_inode( 962 struct inode *inode) 963 { 964 struct xfs_inode *ip = XFS_I(inode); 965 966 return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE); 967 } 968 969 STATIC void 970 xfs_free_fsname( 971 struct xfs_mount *mp) 972 { 973 kfree(mp->m_fsname); 974 kfree(mp->m_rtname); 975 kfree(mp->m_logname); 976 } 977 978 STATIC void 979 xfs_fs_put_super( 980 struct super_block *sb) 981 { 982 struct xfs_mount *mp = XFS_M(sb); 983 984 xfs_syncd_stop(mp); 985 986 /* 987 * Blow away any referenced inode in the filestreams cache. 988 * This can and will cause log traffic as inodes go inactive 989 * here. 990 */ 991 xfs_filestream_unmount(mp); 992 993 xfs_flush_buftarg(mp->m_ddev_targp, 1); 994 995 xfs_unmountfs(mp); 996 xfs_freesb(mp); 997 xfs_icsb_destroy_counters(mp); 998 xfs_destroy_mount_workqueues(mp); 999 xfs_close_devices(mp); 1000 xfs_free_fsname(mp); 1001 kfree(mp); 1002 } 1003 1004 STATIC int 1005 xfs_fs_sync_fs( 1006 struct super_block *sb, 1007 int wait) 1008 { 1009 struct xfs_mount *mp = XFS_M(sb); 1010 int error; 1011 1012 /* 1013 * Doing anything during the async pass would be counterproductive. 1014 */ 1015 if (!wait) 1016 return 0; 1017 1018 error = xfs_quiesce_data(mp); 1019 if (error) 1020 return -error; 1021 1022 if (laptop_mode) { 1023 /* 1024 * The disk must be active because we're syncing. 1025 * We schedule xfssyncd now (now that the disk is 1026 * active) instead of later (when it might not be). 1027 */ 1028 flush_delayed_work_sync(&mp->m_sync_work); 1029 } 1030 1031 return 0; 1032 } 1033 1034 STATIC int 1035 xfs_fs_statfs( 1036 struct dentry *dentry, 1037 struct kstatfs *statp) 1038 { 1039 struct xfs_mount *mp = XFS_M(dentry->d_sb); 1040 xfs_sb_t *sbp = &mp->m_sb; 1041 struct xfs_inode *ip = XFS_I(dentry->d_inode); 1042 __uint64_t fakeinos, id; 1043 xfs_extlen_t lsize; 1044 __int64_t ffree; 1045 1046 statp->f_type = XFS_SB_MAGIC; 1047 statp->f_namelen = MAXNAMELEN - 1; 1048 1049 id = huge_encode_dev(mp->m_ddev_targp->bt_dev); 1050 statp->f_fsid.val[0] = (u32)id; 1051 statp->f_fsid.val[1] = (u32)(id >> 32); 1052 1053 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); 1054 1055 spin_lock(&mp->m_sb_lock); 1056 statp->f_bsize = sbp->sb_blocksize; 1057 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; 1058 statp->f_blocks = sbp->sb_dblocks - lsize; 1059 statp->f_bfree = statp->f_bavail = 1060 sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 1061 fakeinos = statp->f_bfree << sbp->sb_inopblog; 1062 statp->f_files = 1063 MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER); 1064 if (mp->m_maxicount) 1065 statp->f_files = min_t(typeof(statp->f_files), 1066 statp->f_files, 1067 mp->m_maxicount); 1068 1069 /* make sure statp->f_ffree does not underflow */ 1070 ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); 1071 statp->f_ffree = max_t(__int64_t, ffree, 0); 1072 1073 spin_unlock(&mp->m_sb_lock); 1074 1075 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) || 1076 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) == 1077 (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD)) 1078 xfs_qm_statvfs(ip, statp); 1079 return 0; 1080 } 1081 1082 STATIC void 1083 xfs_save_resvblks(struct xfs_mount *mp) 1084 { 1085 __uint64_t resblks = 0; 1086 1087 mp->m_resblks_save = mp->m_resblks; 1088 xfs_reserve_blocks(mp, &resblks, NULL); 1089 } 1090 1091 STATIC void 1092 xfs_restore_resvblks(struct xfs_mount *mp) 1093 { 1094 __uint64_t resblks; 1095 1096 if (mp->m_resblks_save) { 1097 resblks = mp->m_resblks_save; 1098 mp->m_resblks_save = 0; 1099 } else 1100 resblks = xfs_default_resblks(mp); 1101 1102 xfs_reserve_blocks(mp, &resblks, NULL); 1103 } 1104 1105 STATIC int 1106 xfs_fs_remount( 1107 struct super_block *sb, 1108 int *flags, 1109 char *options) 1110 { 1111 struct xfs_mount *mp = XFS_M(sb); 1112 substring_t args[MAX_OPT_ARGS]; 1113 char *p; 1114 int error; 1115 1116 while ((p = strsep(&options, ",")) != NULL) { 1117 int token; 1118 1119 if (!*p) 1120 continue; 1121 1122 token = match_token(p, tokens, args); 1123 switch (token) { 1124 case Opt_barrier: 1125 mp->m_flags |= XFS_MOUNT_BARRIER; 1126 break; 1127 case Opt_nobarrier: 1128 mp->m_flags &= ~XFS_MOUNT_BARRIER; 1129 break; 1130 default: 1131 /* 1132 * Logically we would return an error here to prevent 1133 * users from believing they might have changed 1134 * mount options using remount which can't be changed. 1135 * 1136 * But unfortunately mount(8) adds all options from 1137 * mtab and fstab to the mount arguments in some cases 1138 * so we can't blindly reject options, but have to 1139 * check for each specified option if it actually 1140 * differs from the currently set option and only 1141 * reject it if that's the case. 1142 * 1143 * Until that is implemented we return success for 1144 * every remount request, and silently ignore all 1145 * options that we can't actually change. 1146 */ 1147 #if 0 1148 xfs_info(mp, 1149 "mount option \"%s\" not supported for remount\n", p); 1150 return -EINVAL; 1151 #else 1152 break; 1153 #endif 1154 } 1155 } 1156 1157 /* ro -> rw */ 1158 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) { 1159 mp->m_flags &= ~XFS_MOUNT_RDONLY; 1160 1161 /* 1162 * If this is the first remount to writeable state we 1163 * might have some superblock changes to update. 1164 */ 1165 if (mp->m_update_flags) { 1166 error = xfs_mount_log_sb(mp, mp->m_update_flags); 1167 if (error) { 1168 xfs_warn(mp, "failed to write sb changes"); 1169 return error; 1170 } 1171 mp->m_update_flags = 0; 1172 } 1173 1174 /* 1175 * Fill out the reserve pool if it is empty. Use the stashed 1176 * value if it is non-zero, otherwise go with the default. 1177 */ 1178 xfs_restore_resvblks(mp); 1179 } 1180 1181 /* rw -> ro */ 1182 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) { 1183 /* 1184 * After we have synced the data but before we sync the 1185 * metadata, we need to free up the reserve block pool so that 1186 * the used block count in the superblock on disk is correct at 1187 * the end of the remount. Stash the current reserve pool size 1188 * so that if we get remounted rw, we can return it to the same 1189 * size. 1190 */ 1191 1192 xfs_quiesce_data(mp); 1193 xfs_save_resvblks(mp); 1194 xfs_quiesce_attr(mp); 1195 mp->m_flags |= XFS_MOUNT_RDONLY; 1196 } 1197 1198 return 0; 1199 } 1200 1201 /* 1202 * Second stage of a freeze. The data is already frozen so we only 1203 * need to take care of the metadata. Once that's done write a dummy 1204 * record to dirty the log in case of a crash while frozen. 1205 */ 1206 STATIC int 1207 xfs_fs_freeze( 1208 struct super_block *sb) 1209 { 1210 struct xfs_mount *mp = XFS_M(sb); 1211 1212 xfs_save_resvblks(mp); 1213 xfs_quiesce_attr(mp); 1214 return -xfs_fs_log_dummy(mp); 1215 } 1216 1217 STATIC int 1218 xfs_fs_unfreeze( 1219 struct super_block *sb) 1220 { 1221 struct xfs_mount *mp = XFS_M(sb); 1222 1223 xfs_restore_resvblks(mp); 1224 return 0; 1225 } 1226 1227 STATIC int 1228 xfs_fs_show_options( 1229 struct seq_file *m, 1230 struct dentry *root) 1231 { 1232 return -xfs_showargs(XFS_M(root->d_sb), m); 1233 } 1234 1235 /* 1236 * This function fills in xfs_mount_t fields based on mount args. 1237 * Note: the superblock _has_ now been read in. 1238 */ 1239 STATIC int 1240 xfs_finish_flags( 1241 struct xfs_mount *mp) 1242 { 1243 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY); 1244 1245 /* Fail a mount where the logbuf is smaller than the log stripe */ 1246 if (xfs_sb_version_haslogv2(&mp->m_sb)) { 1247 if (mp->m_logbsize <= 0 && 1248 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { 1249 mp->m_logbsize = mp->m_sb.sb_logsunit; 1250 } else if (mp->m_logbsize > 0 && 1251 mp->m_logbsize < mp->m_sb.sb_logsunit) { 1252 xfs_warn(mp, 1253 "logbuf size must be greater than or equal to log stripe size"); 1254 return XFS_ERROR(EINVAL); 1255 } 1256 } else { 1257 /* Fail a mount if the logbuf is larger than 32K */ 1258 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { 1259 xfs_warn(mp, 1260 "logbuf size for version 1 logs must be 16K or 32K"); 1261 return XFS_ERROR(EINVAL); 1262 } 1263 } 1264 1265 /* 1266 * mkfs'ed attr2 will turn on attr2 mount unless explicitly 1267 * told by noattr2 to turn it off 1268 */ 1269 if (xfs_sb_version_hasattr2(&mp->m_sb) && 1270 !(mp->m_flags & XFS_MOUNT_NOATTR2)) 1271 mp->m_flags |= XFS_MOUNT_ATTR2; 1272 1273 /* 1274 * prohibit r/w mounts of read-only filesystems 1275 */ 1276 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) { 1277 xfs_warn(mp, 1278 "cannot mount a read-only filesystem as read-write"); 1279 return XFS_ERROR(EROFS); 1280 } 1281 1282 return 0; 1283 } 1284 1285 STATIC int 1286 xfs_fs_fill_super( 1287 struct super_block *sb, 1288 void *data, 1289 int silent) 1290 { 1291 struct inode *root; 1292 struct xfs_mount *mp = NULL; 1293 int flags = 0, error = ENOMEM; 1294 1295 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL); 1296 if (!mp) 1297 goto out; 1298 1299 spin_lock_init(&mp->m_sb_lock); 1300 mutex_init(&mp->m_growlock); 1301 atomic_set(&mp->m_active_trans, 0); 1302 1303 mp->m_super = sb; 1304 sb->s_fs_info = mp; 1305 1306 error = xfs_parseargs(mp, (char *)data); 1307 if (error) 1308 goto out_free_fsname; 1309 1310 sb_min_blocksize(sb, BBSIZE); 1311 sb->s_xattr = xfs_xattr_handlers; 1312 sb->s_export_op = &xfs_export_operations; 1313 #ifdef CONFIG_XFS_QUOTA 1314 sb->s_qcop = &xfs_quotactl_operations; 1315 #endif 1316 sb->s_op = &xfs_super_operations; 1317 1318 if (silent) 1319 flags |= XFS_MFSI_QUIET; 1320 1321 error = xfs_open_devices(mp); 1322 if (error) 1323 goto out_free_fsname; 1324 1325 error = xfs_init_mount_workqueues(mp); 1326 if (error) 1327 goto out_close_devices; 1328 1329 error = xfs_icsb_init_counters(mp); 1330 if (error) 1331 goto out_destroy_workqueues; 1332 1333 error = xfs_readsb(mp, flags); 1334 if (error) 1335 goto out_destroy_counters; 1336 1337 error = xfs_finish_flags(mp); 1338 if (error) 1339 goto out_free_sb; 1340 1341 error = xfs_setup_devices(mp); 1342 if (error) 1343 goto out_free_sb; 1344 1345 error = xfs_filestream_mount(mp); 1346 if (error) 1347 goto out_free_sb; 1348 1349 /* 1350 * we must configure the block size in the superblock before we run the 1351 * full mount process as the mount process can lookup and cache inodes. 1352 * For the same reason we must also initialise the syncd and register 1353 * the inode cache shrinker so that inodes can be reclaimed during 1354 * operations like a quotacheck that iterate all inodes in the 1355 * filesystem. 1356 */ 1357 sb->s_magic = XFS_SB_MAGIC; 1358 sb->s_blocksize = mp->m_sb.sb_blocksize; 1359 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; 1360 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits); 1361 sb->s_max_links = XFS_MAXLINK; 1362 sb->s_time_gran = 1; 1363 set_posix_acl_flag(sb); 1364 1365 error = xfs_mountfs(mp); 1366 if (error) 1367 goto out_filestream_unmount; 1368 1369 error = xfs_syncd_init(mp); 1370 if (error) 1371 goto out_unmount; 1372 1373 root = igrab(VFS_I(mp->m_rootip)); 1374 if (!root) { 1375 error = ENOENT; 1376 goto out_syncd_stop; 1377 } 1378 if (is_bad_inode(root)) { 1379 error = EINVAL; 1380 goto out_syncd_stop; 1381 } 1382 sb->s_root = d_make_root(root); 1383 if (!sb->s_root) { 1384 error = ENOMEM; 1385 goto out_syncd_stop; 1386 } 1387 1388 return 0; 1389 1390 out_filestream_unmount: 1391 xfs_filestream_unmount(mp); 1392 out_free_sb: 1393 xfs_freesb(mp); 1394 out_destroy_counters: 1395 xfs_icsb_destroy_counters(mp); 1396 out_destroy_workqueues: 1397 xfs_destroy_mount_workqueues(mp); 1398 out_close_devices: 1399 xfs_close_devices(mp); 1400 out_free_fsname: 1401 xfs_free_fsname(mp); 1402 kfree(mp); 1403 out: 1404 return -error; 1405 1406 out_syncd_stop: 1407 xfs_syncd_stop(mp); 1408 out_unmount: 1409 /* 1410 * Blow away any referenced inode in the filestreams cache. 1411 * This can and will cause log traffic as inodes go inactive 1412 * here. 1413 */ 1414 xfs_filestream_unmount(mp); 1415 1416 xfs_flush_buftarg(mp->m_ddev_targp, 1); 1417 1418 xfs_unmountfs(mp); 1419 goto out_free_sb; 1420 } 1421 1422 STATIC struct dentry * 1423 xfs_fs_mount( 1424 struct file_system_type *fs_type, 1425 int flags, 1426 const char *dev_name, 1427 void *data) 1428 { 1429 return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super); 1430 } 1431 1432 static int 1433 xfs_fs_nr_cached_objects( 1434 struct super_block *sb) 1435 { 1436 return xfs_reclaim_inodes_count(XFS_M(sb)); 1437 } 1438 1439 static void 1440 xfs_fs_free_cached_objects( 1441 struct super_block *sb, 1442 int nr_to_scan) 1443 { 1444 xfs_reclaim_inodes_nr(XFS_M(sb), nr_to_scan); 1445 } 1446 1447 static const struct super_operations xfs_super_operations = { 1448 .alloc_inode = xfs_fs_alloc_inode, 1449 .destroy_inode = xfs_fs_destroy_inode, 1450 .dirty_inode = xfs_fs_dirty_inode, 1451 .evict_inode = xfs_fs_evict_inode, 1452 .drop_inode = xfs_fs_drop_inode, 1453 .put_super = xfs_fs_put_super, 1454 .sync_fs = xfs_fs_sync_fs, 1455 .freeze_fs = xfs_fs_freeze, 1456 .unfreeze_fs = xfs_fs_unfreeze, 1457 .statfs = xfs_fs_statfs, 1458 .remount_fs = xfs_fs_remount, 1459 .show_options = xfs_fs_show_options, 1460 .nr_cached_objects = xfs_fs_nr_cached_objects, 1461 .free_cached_objects = xfs_fs_free_cached_objects, 1462 }; 1463 1464 static struct file_system_type xfs_fs_type = { 1465 .owner = THIS_MODULE, 1466 .name = "xfs", 1467 .mount = xfs_fs_mount, 1468 .kill_sb = kill_block_super, 1469 .fs_flags = FS_REQUIRES_DEV, 1470 }; 1471 1472 STATIC int __init 1473 xfs_init_zones(void) 1474 { 1475 1476 xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend"); 1477 if (!xfs_ioend_zone) 1478 goto out; 1479 1480 xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE, 1481 xfs_ioend_zone); 1482 if (!xfs_ioend_pool) 1483 goto out_destroy_ioend_zone; 1484 1485 xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t), 1486 "xfs_log_ticket"); 1487 if (!xfs_log_ticket_zone) 1488 goto out_destroy_ioend_pool; 1489 1490 xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t), 1491 "xfs_bmap_free_item"); 1492 if (!xfs_bmap_free_item_zone) 1493 goto out_destroy_log_ticket_zone; 1494 1495 xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t), 1496 "xfs_btree_cur"); 1497 if (!xfs_btree_cur_zone) 1498 goto out_destroy_bmap_free_item_zone; 1499 1500 xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t), 1501 "xfs_da_state"); 1502 if (!xfs_da_state_zone) 1503 goto out_destroy_btree_cur_zone; 1504 1505 xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf"); 1506 if (!xfs_dabuf_zone) 1507 goto out_destroy_da_state_zone; 1508 1509 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); 1510 if (!xfs_ifork_zone) 1511 goto out_destroy_dabuf_zone; 1512 1513 xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans"); 1514 if (!xfs_trans_zone) 1515 goto out_destroy_ifork_zone; 1516 1517 xfs_log_item_desc_zone = 1518 kmem_zone_init(sizeof(struct xfs_log_item_desc), 1519 "xfs_log_item_desc"); 1520 if (!xfs_log_item_desc_zone) 1521 goto out_destroy_trans_zone; 1522 1523 /* 1524 * The size of the zone allocated buf log item is the maximum 1525 * size possible under XFS. This wastes a little bit of memory, 1526 * but it is much faster. 1527 */ 1528 xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) + 1529 (((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) / 1530 NBWORD) * sizeof(int))), "xfs_buf_item"); 1531 if (!xfs_buf_item_zone) 1532 goto out_destroy_log_item_desc_zone; 1533 1534 xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) + 1535 ((XFS_EFD_MAX_FAST_EXTENTS - 1) * 1536 sizeof(xfs_extent_t))), "xfs_efd_item"); 1537 if (!xfs_efd_zone) 1538 goto out_destroy_buf_item_zone; 1539 1540 xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) + 1541 ((XFS_EFI_MAX_FAST_EXTENTS - 1) * 1542 sizeof(xfs_extent_t))), "xfs_efi_item"); 1543 if (!xfs_efi_zone) 1544 goto out_destroy_efd_zone; 1545 1546 xfs_inode_zone = 1547 kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode", 1548 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD, 1549 xfs_fs_inode_init_once); 1550 if (!xfs_inode_zone) 1551 goto out_destroy_efi_zone; 1552 1553 xfs_ili_zone = 1554 kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili", 1555 KM_ZONE_SPREAD, NULL); 1556 if (!xfs_ili_zone) 1557 goto out_destroy_inode_zone; 1558 1559 return 0; 1560 1561 out_destroy_inode_zone: 1562 kmem_zone_destroy(xfs_inode_zone); 1563 out_destroy_efi_zone: 1564 kmem_zone_destroy(xfs_efi_zone); 1565 out_destroy_efd_zone: 1566 kmem_zone_destroy(xfs_efd_zone); 1567 out_destroy_buf_item_zone: 1568 kmem_zone_destroy(xfs_buf_item_zone); 1569 out_destroy_log_item_desc_zone: 1570 kmem_zone_destroy(xfs_log_item_desc_zone); 1571 out_destroy_trans_zone: 1572 kmem_zone_destroy(xfs_trans_zone); 1573 out_destroy_ifork_zone: 1574 kmem_zone_destroy(xfs_ifork_zone); 1575 out_destroy_dabuf_zone: 1576 kmem_zone_destroy(xfs_dabuf_zone); 1577 out_destroy_da_state_zone: 1578 kmem_zone_destroy(xfs_da_state_zone); 1579 out_destroy_btree_cur_zone: 1580 kmem_zone_destroy(xfs_btree_cur_zone); 1581 out_destroy_bmap_free_item_zone: 1582 kmem_zone_destroy(xfs_bmap_free_item_zone); 1583 out_destroy_log_ticket_zone: 1584 kmem_zone_destroy(xfs_log_ticket_zone); 1585 out_destroy_ioend_pool: 1586 mempool_destroy(xfs_ioend_pool); 1587 out_destroy_ioend_zone: 1588 kmem_zone_destroy(xfs_ioend_zone); 1589 out: 1590 return -ENOMEM; 1591 } 1592 1593 STATIC void 1594 xfs_destroy_zones(void) 1595 { 1596 kmem_zone_destroy(xfs_ili_zone); 1597 kmem_zone_destroy(xfs_inode_zone); 1598 kmem_zone_destroy(xfs_efi_zone); 1599 kmem_zone_destroy(xfs_efd_zone); 1600 kmem_zone_destroy(xfs_buf_item_zone); 1601 kmem_zone_destroy(xfs_log_item_desc_zone); 1602 kmem_zone_destroy(xfs_trans_zone); 1603 kmem_zone_destroy(xfs_ifork_zone); 1604 kmem_zone_destroy(xfs_dabuf_zone); 1605 kmem_zone_destroy(xfs_da_state_zone); 1606 kmem_zone_destroy(xfs_btree_cur_zone); 1607 kmem_zone_destroy(xfs_bmap_free_item_zone); 1608 kmem_zone_destroy(xfs_log_ticket_zone); 1609 mempool_destroy(xfs_ioend_pool); 1610 kmem_zone_destroy(xfs_ioend_zone); 1611 1612 } 1613 1614 STATIC int __init 1615 xfs_init_workqueues(void) 1616 { 1617 /* 1618 * We never want to the same work item to run twice, reclaiming inodes 1619 * or idling the log is not going to get any faster by multiple CPUs 1620 * competing for ressources. Use the default large max_active value 1621 * so that even lots of filesystems can perform these task in parallel. 1622 */ 1623 xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_NON_REENTRANT, 0); 1624 if (!xfs_syncd_wq) 1625 return -ENOMEM; 1626 1627 /* 1628 * The allocation workqueue can be used in memory reclaim situations 1629 * (writepage path), and parallelism is only limited by the number of 1630 * AGs in all the filesystems mounted. Hence use the default large 1631 * max_active value for this workqueue. 1632 */ 1633 xfs_alloc_wq = alloc_workqueue("xfsalloc", WQ_MEM_RECLAIM, 0); 1634 if (!xfs_alloc_wq) 1635 goto out_destroy_syncd; 1636 1637 return 0; 1638 1639 out_destroy_syncd: 1640 destroy_workqueue(xfs_syncd_wq); 1641 return -ENOMEM; 1642 } 1643 1644 STATIC void 1645 xfs_destroy_workqueues(void) 1646 { 1647 destroy_workqueue(xfs_alloc_wq); 1648 destroy_workqueue(xfs_syncd_wq); 1649 } 1650 1651 STATIC int __init 1652 init_xfs_fs(void) 1653 { 1654 int error; 1655 1656 printk(KERN_INFO XFS_VERSION_STRING " with " 1657 XFS_BUILD_OPTIONS " enabled\n"); 1658 1659 xfs_dir_startup(); 1660 1661 error = xfs_init_zones(); 1662 if (error) 1663 goto out; 1664 1665 error = xfs_init_workqueues(); 1666 if (error) 1667 goto out_destroy_zones; 1668 1669 error = xfs_mru_cache_init(); 1670 if (error) 1671 goto out_destroy_wq; 1672 1673 error = xfs_filestream_init(); 1674 if (error) 1675 goto out_mru_cache_uninit; 1676 1677 error = xfs_buf_init(); 1678 if (error) 1679 goto out_filestream_uninit; 1680 1681 error = xfs_init_procfs(); 1682 if (error) 1683 goto out_buf_terminate; 1684 1685 error = xfs_sysctl_register(); 1686 if (error) 1687 goto out_cleanup_procfs; 1688 1689 error = xfs_qm_init(); 1690 if (error) 1691 goto out_sysctl_unregister; 1692 1693 error = register_filesystem(&xfs_fs_type); 1694 if (error) 1695 goto out_qm_exit; 1696 return 0; 1697 1698 out_qm_exit: 1699 xfs_qm_exit(); 1700 out_sysctl_unregister: 1701 xfs_sysctl_unregister(); 1702 out_cleanup_procfs: 1703 xfs_cleanup_procfs(); 1704 out_buf_terminate: 1705 xfs_buf_terminate(); 1706 out_filestream_uninit: 1707 xfs_filestream_uninit(); 1708 out_mru_cache_uninit: 1709 xfs_mru_cache_uninit(); 1710 out_destroy_wq: 1711 xfs_destroy_workqueues(); 1712 out_destroy_zones: 1713 xfs_destroy_zones(); 1714 out: 1715 return error; 1716 } 1717 1718 STATIC void __exit 1719 exit_xfs_fs(void) 1720 { 1721 xfs_qm_exit(); 1722 unregister_filesystem(&xfs_fs_type); 1723 xfs_sysctl_unregister(); 1724 xfs_cleanup_procfs(); 1725 xfs_buf_terminate(); 1726 xfs_filestream_uninit(); 1727 xfs_mru_cache_uninit(); 1728 xfs_destroy_workqueues(); 1729 xfs_destroy_zones(); 1730 } 1731 1732 module_init(init_xfs_fs); 1733 module_exit(exit_xfs_fs); 1734 1735 MODULE_AUTHOR("Silicon Graphics, Inc."); 1736 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); 1737 MODULE_LICENSE("GPL"); 1738