1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/sched.h> 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <linux/completion.h> 13 #include <linux/buffer_head.h> 14 #include <linux/blkdev.h> 15 #include <linux/kthread.h> 16 #include <linux/export.h> 17 #include <linux/namei.h> 18 #include <linux/mount.h> 19 #include <linux/gfs2_ondisk.h> 20 #include <linux/quotaops.h> 21 #include <linux/lockdep.h> 22 #include <linux/module.h> 23 #include <linux/backing-dev.h> 24 25 #include "gfs2.h" 26 #include "incore.h" 27 #include "bmap.h" 28 #include "glock.h" 29 #include "glops.h" 30 #include "inode.h" 31 #include "recovery.h" 32 #include "rgrp.h" 33 #include "super.h" 34 #include "sys.h" 35 #include "util.h" 36 #include "log.h" 37 #include "quota.h" 38 #include "dir.h" 39 #include "meta_io.h" 40 #include "trace_gfs2.h" 41 #include "lops.h" 42 43 #define DO 0 44 #define UNDO 1 45 46 /** 47 * gfs2_tune_init - Fill a gfs2_tune structure with default values 48 * @gt: tune 49 * 50 */ 51 52 static void gfs2_tune_init(struct gfs2_tune *gt) 53 { 54 spin_lock_init(>->gt_spin); 55 56 gt->gt_quota_warn_period = 10; 57 gt->gt_quota_scale_num = 1; 58 gt->gt_quota_scale_den = 1; 59 gt->gt_new_files_jdata = 0; 60 gt->gt_max_readahead = BIT(18); 61 gt->gt_complain_secs = 10; 62 } 63 64 void free_sbd(struct gfs2_sbd *sdp) 65 { 66 if (sdp->sd_lkstats) 67 free_percpu(sdp->sd_lkstats); 68 kfree(sdp); 69 } 70 71 static struct gfs2_sbd *init_sbd(struct super_block *sb) 72 { 73 struct gfs2_sbd *sdp; 74 struct address_space *mapping; 75 76 sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL); 77 if (!sdp) 78 return NULL; 79 80 sdp->sd_vfs = sb; 81 sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats); 82 if (!sdp->sd_lkstats) 83 goto fail; 84 sb->s_fs_info = sdp; 85 86 set_bit(SDF_NOJOURNALID, &sdp->sd_flags); 87 gfs2_tune_init(&sdp->sd_tune); 88 89 init_waitqueue_head(&sdp->sd_glock_wait); 90 atomic_set(&sdp->sd_glock_disposal, 0); 91 init_completion(&sdp->sd_locking_init); 92 init_completion(&sdp->sd_wdack); 93 spin_lock_init(&sdp->sd_statfs_spin); 94 95 spin_lock_init(&sdp->sd_rindex_spin); 96 sdp->sd_rindex_tree.rb_node = NULL; 97 98 INIT_LIST_HEAD(&sdp->sd_jindex_list); 99 spin_lock_init(&sdp->sd_jindex_spin); 100 mutex_init(&sdp->sd_jindex_mutex); 101 init_completion(&sdp->sd_journal_ready); 102 103 INIT_LIST_HEAD(&sdp->sd_quota_list); 104 mutex_init(&sdp->sd_quota_mutex); 105 mutex_init(&sdp->sd_quota_sync_mutex); 106 init_waitqueue_head(&sdp->sd_quota_wait); 107 INIT_LIST_HEAD(&sdp->sd_trunc_list); 108 spin_lock_init(&sdp->sd_trunc_lock); 109 spin_lock_init(&sdp->sd_bitmap_lock); 110 111 mapping = &sdp->sd_aspace; 112 113 address_space_init_once(mapping); 114 mapping->a_ops = &gfs2_rgrp_aops; 115 mapping->host = sb->s_bdev->bd_inode; 116 mapping->flags = 0; 117 mapping_set_gfp_mask(mapping, GFP_NOFS); 118 mapping->private_data = NULL; 119 mapping->writeback_index = 0; 120 121 spin_lock_init(&sdp->sd_log_lock); 122 atomic_set(&sdp->sd_log_pinned, 0); 123 INIT_LIST_HEAD(&sdp->sd_log_revokes); 124 INIT_LIST_HEAD(&sdp->sd_log_ordered); 125 spin_lock_init(&sdp->sd_ordered_lock); 126 127 init_waitqueue_head(&sdp->sd_log_waitq); 128 init_waitqueue_head(&sdp->sd_logd_waitq); 129 spin_lock_init(&sdp->sd_ail_lock); 130 INIT_LIST_HEAD(&sdp->sd_ail1_list); 131 INIT_LIST_HEAD(&sdp->sd_ail2_list); 132 133 init_rwsem(&sdp->sd_log_flush_lock); 134 atomic_set(&sdp->sd_log_in_flight, 0); 135 atomic_set(&sdp->sd_reserving_log, 0); 136 init_waitqueue_head(&sdp->sd_reserving_log_wait); 137 init_waitqueue_head(&sdp->sd_log_flush_wait); 138 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN); 139 mutex_init(&sdp->sd_freeze_mutex); 140 141 return sdp; 142 143 fail: 144 free_sbd(sdp); 145 return NULL; 146 } 147 148 /** 149 * gfs2_check_sb - Check superblock 150 * @sdp: the filesystem 151 * @sb: The superblock 152 * @silent: Don't print a message if the check fails 153 * 154 * Checks the version code of the FS is one that we understand how to 155 * read and that the sizes of the various on-disk structures have not 156 * changed. 157 */ 158 159 static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent) 160 { 161 struct gfs2_sb_host *sb = &sdp->sd_sb; 162 163 if (sb->sb_magic != GFS2_MAGIC || 164 sb->sb_type != GFS2_METATYPE_SB) { 165 if (!silent) 166 pr_warn("not a GFS2 filesystem\n"); 167 return -EINVAL; 168 } 169 170 /* If format numbers match exactly, we're done. */ 171 172 if (sb->sb_fs_format == GFS2_FORMAT_FS && 173 sb->sb_multihost_format == GFS2_FORMAT_MULTI) 174 return 0; 175 176 fs_warn(sdp, "Unknown on-disk format, unable to mount\n"); 177 178 return -EINVAL; 179 } 180 181 static void end_bio_io_page(struct bio *bio) 182 { 183 struct page *page = bio->bi_private; 184 185 if (!bio->bi_status) 186 SetPageUptodate(page); 187 else 188 pr_warn("error %d reading superblock\n", bio->bi_status); 189 unlock_page(page); 190 } 191 192 static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf) 193 { 194 struct gfs2_sb_host *sb = &sdp->sd_sb; 195 struct super_block *s = sdp->sd_vfs; 196 const struct gfs2_sb *str = buf; 197 198 sb->sb_magic = be32_to_cpu(str->sb_header.mh_magic); 199 sb->sb_type = be32_to_cpu(str->sb_header.mh_type); 200 sb->sb_format = be32_to_cpu(str->sb_header.mh_format); 201 sb->sb_fs_format = be32_to_cpu(str->sb_fs_format); 202 sb->sb_multihost_format = be32_to_cpu(str->sb_multihost_format); 203 sb->sb_bsize = be32_to_cpu(str->sb_bsize); 204 sb->sb_bsize_shift = be32_to_cpu(str->sb_bsize_shift); 205 sb->sb_master_dir.no_addr = be64_to_cpu(str->sb_master_dir.no_addr); 206 sb->sb_master_dir.no_formal_ino = be64_to_cpu(str->sb_master_dir.no_formal_ino); 207 sb->sb_root_dir.no_addr = be64_to_cpu(str->sb_root_dir.no_addr); 208 sb->sb_root_dir.no_formal_ino = be64_to_cpu(str->sb_root_dir.no_formal_ino); 209 210 memcpy(sb->sb_lockproto, str->sb_lockproto, GFS2_LOCKNAME_LEN); 211 memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN); 212 memcpy(&s->s_uuid, str->sb_uuid, 16); 213 } 214 215 /** 216 * gfs2_read_super - Read the gfs2 super block from disk 217 * @sdp: The GFS2 super block 218 * @sector: The location of the super block 219 * @error: The error code to return 220 * 221 * This uses the bio functions to read the super block from disk 222 * because we want to be 100% sure that we never read cached data. 223 * A super block is read twice only during each GFS2 mount and is 224 * never written to by the filesystem. The first time its read no 225 * locks are held, and the only details which are looked at are those 226 * relating to the locking protocol. Once locking is up and working, 227 * the sb is read again under the lock to establish the location of 228 * the master directory (contains pointers to journals etc) and the 229 * root directory. 230 * 231 * Returns: 0 on success or error 232 */ 233 234 static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent) 235 { 236 struct super_block *sb = sdp->sd_vfs; 237 struct gfs2_sb *p; 238 struct page *page; 239 struct bio *bio; 240 241 page = alloc_page(GFP_NOFS); 242 if (unlikely(!page)) 243 return -ENOMEM; 244 245 ClearPageUptodate(page); 246 ClearPageDirty(page); 247 lock_page(page); 248 249 bio = bio_alloc(GFP_NOFS, 1); 250 bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9); 251 bio_set_dev(bio, sb->s_bdev); 252 bio_add_page(bio, page, PAGE_SIZE, 0); 253 254 bio->bi_end_io = end_bio_io_page; 255 bio->bi_private = page; 256 bio_set_op_attrs(bio, REQ_OP_READ, REQ_META); 257 submit_bio(bio); 258 wait_on_page_locked(page); 259 bio_put(bio); 260 if (!PageUptodate(page)) { 261 __free_page(page); 262 return -EIO; 263 } 264 p = kmap(page); 265 gfs2_sb_in(sdp, p); 266 kunmap(page); 267 __free_page(page); 268 return gfs2_check_sb(sdp, silent); 269 } 270 271 /** 272 * gfs2_read_sb - Read super block 273 * @sdp: The GFS2 superblock 274 * @silent: Don't print message if mount fails 275 * 276 */ 277 278 static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent) 279 { 280 u32 hash_blocks, ind_blocks, leaf_blocks; 281 u32 tmp_blocks; 282 unsigned int x; 283 int error; 284 285 error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent); 286 if (error) { 287 if (!silent) 288 fs_err(sdp, "can't read superblock\n"); 289 return error; 290 } 291 292 sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - 293 GFS2_BASIC_BLOCK_SHIFT; 294 sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift); 295 sdp->sd_diptrs = (sdp->sd_sb.sb_bsize - 296 sizeof(struct gfs2_dinode)) / sizeof(u64); 297 sdp->sd_inptrs = (sdp->sd_sb.sb_bsize - 298 sizeof(struct gfs2_meta_header)) / sizeof(u64); 299 sdp->sd_jbsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header); 300 sdp->sd_hash_bsize = sdp->sd_sb.sb_bsize / 2; 301 sdp->sd_hash_bsize_shift = sdp->sd_sb.sb_bsize_shift - 1; 302 sdp->sd_hash_ptrs = sdp->sd_hash_bsize / sizeof(u64); 303 sdp->sd_qc_per_block = (sdp->sd_sb.sb_bsize - 304 sizeof(struct gfs2_meta_header)) / 305 sizeof(struct gfs2_quota_change); 306 sdp->sd_blocks_per_bitmap = (sdp->sd_sb.sb_bsize - 307 sizeof(struct gfs2_meta_header)) 308 * GFS2_NBBY; /* not the rgrp bitmap, subsequent bitmaps only */ 309 310 /* Compute maximum reservation required to add a entry to a directory */ 311 312 hash_blocks = DIV_ROUND_UP(sizeof(u64) * BIT(GFS2_DIR_MAX_DEPTH), 313 sdp->sd_jbsize); 314 315 ind_blocks = 0; 316 for (tmp_blocks = hash_blocks; tmp_blocks > sdp->sd_diptrs;) { 317 tmp_blocks = DIV_ROUND_UP(tmp_blocks, sdp->sd_inptrs); 318 ind_blocks += tmp_blocks; 319 } 320 321 leaf_blocks = 2 + GFS2_DIR_MAX_DEPTH; 322 323 sdp->sd_max_dirres = hash_blocks + ind_blocks + leaf_blocks; 324 325 sdp->sd_heightsize[0] = sdp->sd_sb.sb_bsize - 326 sizeof(struct gfs2_dinode); 327 sdp->sd_heightsize[1] = sdp->sd_sb.sb_bsize * sdp->sd_diptrs; 328 for (x = 2;; x++) { 329 u64 space, d; 330 u32 m; 331 332 space = sdp->sd_heightsize[x - 1] * sdp->sd_inptrs; 333 d = space; 334 m = do_div(d, sdp->sd_inptrs); 335 336 if (d != sdp->sd_heightsize[x - 1] || m) 337 break; 338 sdp->sd_heightsize[x] = space; 339 } 340 sdp->sd_max_height = x; 341 sdp->sd_heightsize[x] = ~0; 342 gfs2_assert(sdp, sdp->sd_max_height <= GFS2_MAX_META_HEIGHT); 343 344 sdp->sd_max_dents_per_leaf = (sdp->sd_sb.sb_bsize - 345 sizeof(struct gfs2_leaf)) / 346 GFS2_MIN_DIRENT_SIZE; 347 return 0; 348 } 349 350 static int init_names(struct gfs2_sbd *sdp, int silent) 351 { 352 char *proto, *table; 353 int error = 0; 354 355 proto = sdp->sd_args.ar_lockproto; 356 table = sdp->sd_args.ar_locktable; 357 358 /* Try to autodetect */ 359 360 if (!proto[0] || !table[0]) { 361 error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent); 362 if (error) 363 return error; 364 365 if (!proto[0]) 366 proto = sdp->sd_sb.sb_lockproto; 367 if (!table[0]) 368 table = sdp->sd_sb.sb_locktable; 369 } 370 371 if (!table[0]) 372 table = sdp->sd_vfs->s_id; 373 374 strlcpy(sdp->sd_proto_name, proto, GFS2_FSNAME_LEN); 375 strlcpy(sdp->sd_table_name, table, GFS2_FSNAME_LEN); 376 377 table = sdp->sd_table_name; 378 while ((table = strchr(table, '/'))) 379 *table = '_'; 380 381 return error; 382 } 383 384 static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh, 385 int undo) 386 { 387 int error = 0; 388 389 if (undo) 390 goto fail_trans; 391 392 error = gfs2_glock_nq_num(sdp, 393 GFS2_MOUNT_LOCK, &gfs2_nondisk_glops, 394 LM_ST_EXCLUSIVE, LM_FLAG_NOEXP | GL_NOCACHE, 395 mount_gh); 396 if (error) { 397 fs_err(sdp, "can't acquire mount glock: %d\n", error); 398 goto fail; 399 } 400 401 error = gfs2_glock_nq_num(sdp, 402 GFS2_LIVE_LOCK, &gfs2_nondisk_glops, 403 LM_ST_SHARED, 404 LM_FLAG_NOEXP | GL_EXACT, 405 &sdp->sd_live_gh); 406 if (error) { 407 fs_err(sdp, "can't acquire live glock: %d\n", error); 408 goto fail_mount; 409 } 410 411 error = gfs2_glock_get(sdp, GFS2_RENAME_LOCK, &gfs2_nondisk_glops, 412 CREATE, &sdp->sd_rename_gl); 413 if (error) { 414 fs_err(sdp, "can't create rename glock: %d\n", error); 415 goto fail_live; 416 } 417 418 error = gfs2_glock_get(sdp, GFS2_FREEZE_LOCK, &gfs2_freeze_glops, 419 CREATE, &sdp->sd_freeze_gl); 420 if (error) { 421 fs_err(sdp, "can't create transaction glock: %d\n", error); 422 goto fail_rename; 423 } 424 425 return 0; 426 427 fail_trans: 428 gfs2_glock_put(sdp->sd_freeze_gl); 429 fail_rename: 430 gfs2_glock_put(sdp->sd_rename_gl); 431 fail_live: 432 gfs2_glock_dq_uninit(&sdp->sd_live_gh); 433 fail_mount: 434 gfs2_glock_dq_uninit(mount_gh); 435 fail: 436 return error; 437 } 438 439 static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr, 440 u64 no_addr, const char *name) 441 { 442 struct gfs2_sbd *sdp = sb->s_fs_info; 443 struct dentry *dentry; 444 struct inode *inode; 445 446 inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0, 447 GFS2_BLKST_FREE /* ignore */); 448 if (IS_ERR(inode)) { 449 fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode)); 450 return PTR_ERR(inode); 451 } 452 dentry = d_make_root(inode); 453 if (!dentry) { 454 fs_err(sdp, "can't alloc %s dentry\n", name); 455 return -ENOMEM; 456 } 457 *dptr = dentry; 458 return 0; 459 } 460 461 static int init_sb(struct gfs2_sbd *sdp, int silent) 462 { 463 struct super_block *sb = sdp->sd_vfs; 464 struct gfs2_holder sb_gh; 465 u64 no_addr; 466 int ret; 467 468 ret = gfs2_glock_nq_num(sdp, GFS2_SB_LOCK, &gfs2_meta_glops, 469 LM_ST_SHARED, 0, &sb_gh); 470 if (ret) { 471 fs_err(sdp, "can't acquire superblock glock: %d\n", ret); 472 return ret; 473 } 474 475 ret = gfs2_read_sb(sdp, silent); 476 if (ret) { 477 fs_err(sdp, "can't read superblock: %d\n", ret); 478 goto out; 479 } 480 481 /* Set up the buffer cache and SB for real */ 482 if (sdp->sd_sb.sb_bsize < bdev_logical_block_size(sb->s_bdev)) { 483 ret = -EINVAL; 484 fs_err(sdp, "FS block size (%u) is too small for device " 485 "block size (%u)\n", 486 sdp->sd_sb.sb_bsize, bdev_logical_block_size(sb->s_bdev)); 487 goto out; 488 } 489 if (sdp->sd_sb.sb_bsize > PAGE_SIZE) { 490 ret = -EINVAL; 491 fs_err(sdp, "FS block size (%u) is too big for machine " 492 "page size (%u)\n", 493 sdp->sd_sb.sb_bsize, (unsigned int)PAGE_SIZE); 494 goto out; 495 } 496 sb_set_blocksize(sb, sdp->sd_sb.sb_bsize); 497 498 /* Get the root inode */ 499 no_addr = sdp->sd_sb.sb_root_dir.no_addr; 500 ret = gfs2_lookup_root(sb, &sdp->sd_root_dir, no_addr, "root"); 501 if (ret) 502 goto out; 503 504 /* Get the master inode */ 505 no_addr = sdp->sd_sb.sb_master_dir.no_addr; 506 ret = gfs2_lookup_root(sb, &sdp->sd_master_dir, no_addr, "master"); 507 if (ret) { 508 dput(sdp->sd_root_dir); 509 goto out; 510 } 511 sb->s_root = dget(sdp->sd_args.ar_meta ? sdp->sd_master_dir : sdp->sd_root_dir); 512 out: 513 gfs2_glock_dq_uninit(&sb_gh); 514 return ret; 515 } 516 517 static void gfs2_others_may_mount(struct gfs2_sbd *sdp) 518 { 519 char *message = "FIRSTMOUNT=Done"; 520 char *envp[] = { message, NULL }; 521 522 fs_info(sdp, "first mount done, others may mount\n"); 523 524 if (sdp->sd_lockstruct.ls_ops->lm_first_done) 525 sdp->sd_lockstruct.ls_ops->lm_first_done(sdp); 526 527 kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp); 528 } 529 530 /** 531 * gfs2_jindex_hold - Grab a lock on the jindex 532 * @sdp: The GFS2 superblock 533 * @ji_gh: the holder for the jindex glock 534 * 535 * Returns: errno 536 */ 537 538 static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh) 539 { 540 struct gfs2_inode *dip = GFS2_I(sdp->sd_jindex); 541 struct qstr name; 542 char buf[20]; 543 struct gfs2_jdesc *jd; 544 int error; 545 546 name.name = buf; 547 548 mutex_lock(&sdp->sd_jindex_mutex); 549 550 for (;;) { 551 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh); 552 if (error) 553 break; 554 555 name.len = sprintf(buf, "journal%u", sdp->sd_journals); 556 name.hash = gfs2_disk_hash(name.name, name.len); 557 558 error = gfs2_dir_check(sdp->sd_jindex, &name, NULL); 559 if (error == -ENOENT) { 560 error = 0; 561 break; 562 } 563 564 gfs2_glock_dq_uninit(ji_gh); 565 566 if (error) 567 break; 568 569 error = -ENOMEM; 570 jd = kzalloc(sizeof(struct gfs2_jdesc), GFP_KERNEL); 571 if (!jd) 572 break; 573 574 INIT_LIST_HEAD(&jd->extent_list); 575 INIT_LIST_HEAD(&jd->jd_revoke_list); 576 577 INIT_WORK(&jd->jd_work, gfs2_recover_func); 578 jd->jd_inode = gfs2_lookupi(sdp->sd_jindex, &name, 1); 579 if (IS_ERR_OR_NULL(jd->jd_inode)) { 580 if (!jd->jd_inode) 581 error = -ENOENT; 582 else 583 error = PTR_ERR(jd->jd_inode); 584 kfree(jd); 585 break; 586 } 587 588 spin_lock(&sdp->sd_jindex_spin); 589 jd->jd_jid = sdp->sd_journals++; 590 list_add_tail(&jd->jd_list, &sdp->sd_jindex_list); 591 spin_unlock(&sdp->sd_jindex_spin); 592 } 593 594 mutex_unlock(&sdp->sd_jindex_mutex); 595 596 return error; 597 } 598 599 /** 600 * check_journal_clean - Make sure a journal is clean for a spectator mount 601 * @sdp: The GFS2 superblock 602 * @jd: The journal descriptor 603 * 604 * Returns: 0 if the journal is clean or locked, else an error 605 */ 606 static int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd) 607 { 608 int error; 609 struct gfs2_holder j_gh; 610 struct gfs2_log_header_host head; 611 struct gfs2_inode *ip; 612 613 ip = GFS2_I(jd->jd_inode); 614 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_NOEXP | 615 GL_EXACT | GL_NOCACHE, &j_gh); 616 if (error) { 617 fs_err(sdp, "Error locking journal for spectator mount.\n"); 618 return -EPERM; 619 } 620 error = gfs2_jdesc_check(jd); 621 if (error) { 622 fs_err(sdp, "Error checking journal for spectator mount.\n"); 623 goto out_unlock; 624 } 625 error = gfs2_find_jhead(jd, &head, false); 626 if (error) { 627 fs_err(sdp, "Error parsing journal for spectator mount.\n"); 628 goto out_unlock; 629 } 630 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { 631 error = -EPERM; 632 fs_err(sdp, "jid=%u: Journal is dirty, so the first mounter " 633 "must not be a spectator.\n", jd->jd_jid); 634 } 635 636 out_unlock: 637 gfs2_glock_dq_uninit(&j_gh); 638 return error; 639 } 640 641 static int init_journal(struct gfs2_sbd *sdp, int undo) 642 { 643 struct inode *master = d_inode(sdp->sd_master_dir); 644 struct gfs2_holder ji_gh; 645 struct gfs2_inode *ip; 646 int jindex = 1; 647 int error = 0; 648 649 if (undo) { 650 jindex = 0; 651 goto fail_jinode_gh; 652 } 653 654 sdp->sd_jindex = gfs2_lookup_simple(master, "jindex"); 655 if (IS_ERR(sdp->sd_jindex)) { 656 fs_err(sdp, "can't lookup journal index: %d\n", error); 657 return PTR_ERR(sdp->sd_jindex); 658 } 659 660 /* Load in the journal index special file */ 661 662 error = gfs2_jindex_hold(sdp, &ji_gh); 663 if (error) { 664 fs_err(sdp, "can't read journal index: %d\n", error); 665 goto fail; 666 } 667 668 error = -EUSERS; 669 if (!gfs2_jindex_size(sdp)) { 670 fs_err(sdp, "no journals!\n"); 671 goto fail_jindex; 672 } 673 674 atomic_set(&sdp->sd_log_blks_needed, 0); 675 if (sdp->sd_args.ar_spectator) { 676 sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0); 677 atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks); 678 atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5); 679 atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5); 680 } else { 681 if (sdp->sd_lockstruct.ls_jid >= gfs2_jindex_size(sdp)) { 682 fs_err(sdp, "can't mount journal #%u\n", 683 sdp->sd_lockstruct.ls_jid); 684 fs_err(sdp, "there are only %u journals (0 - %u)\n", 685 gfs2_jindex_size(sdp), 686 gfs2_jindex_size(sdp) - 1); 687 goto fail_jindex; 688 } 689 sdp->sd_jdesc = gfs2_jdesc_find(sdp, sdp->sd_lockstruct.ls_jid); 690 691 error = gfs2_glock_nq_num(sdp, sdp->sd_lockstruct.ls_jid, 692 &gfs2_journal_glops, 693 LM_ST_EXCLUSIVE, LM_FLAG_NOEXP, 694 &sdp->sd_journal_gh); 695 if (error) { 696 fs_err(sdp, "can't acquire journal glock: %d\n", error); 697 goto fail_jindex; 698 } 699 700 ip = GFS2_I(sdp->sd_jdesc->jd_inode); 701 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 702 LM_FLAG_NOEXP | GL_EXACT | GL_NOCACHE, 703 &sdp->sd_jinode_gh); 704 if (error) { 705 fs_err(sdp, "can't acquire journal inode glock: %d\n", 706 error); 707 goto fail_journal_gh; 708 } 709 710 error = gfs2_jdesc_check(sdp->sd_jdesc); 711 if (error) { 712 fs_err(sdp, "my journal (%u) is bad: %d\n", 713 sdp->sd_jdesc->jd_jid, error); 714 goto fail_jinode_gh; 715 } 716 atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks); 717 atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5); 718 atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5); 719 720 /* Map the extents for this journal's blocks */ 721 gfs2_map_journal_extents(sdp, sdp->sd_jdesc); 722 } 723 trace_gfs2_log_blocks(sdp, atomic_read(&sdp->sd_log_blks_free)); 724 725 if (sdp->sd_lockstruct.ls_first) { 726 unsigned int x; 727 for (x = 0; x < sdp->sd_journals; x++) { 728 struct gfs2_jdesc *jd = gfs2_jdesc_find(sdp, x); 729 730 if (sdp->sd_args.ar_spectator) { 731 error = check_journal_clean(sdp, jd); 732 if (error) 733 goto fail_jinode_gh; 734 continue; 735 } 736 error = gfs2_recover_journal(jd, true); 737 if (error) { 738 fs_err(sdp, "error recovering journal %u: %d\n", 739 x, error); 740 goto fail_jinode_gh; 741 } 742 } 743 744 gfs2_others_may_mount(sdp); 745 } else if (!sdp->sd_args.ar_spectator) { 746 error = gfs2_recover_journal(sdp->sd_jdesc, true); 747 if (error) { 748 fs_err(sdp, "error recovering my journal: %d\n", error); 749 goto fail_jinode_gh; 750 } 751 } 752 753 sdp->sd_log_idle = 1; 754 set_bit(SDF_JOURNAL_CHECKED, &sdp->sd_flags); 755 gfs2_glock_dq_uninit(&ji_gh); 756 jindex = 0; 757 INIT_WORK(&sdp->sd_freeze_work, gfs2_freeze_func); 758 return 0; 759 760 fail_jinode_gh: 761 if (!sdp->sd_args.ar_spectator) 762 gfs2_glock_dq_uninit(&sdp->sd_jinode_gh); 763 fail_journal_gh: 764 if (!sdp->sd_args.ar_spectator) 765 gfs2_glock_dq_uninit(&sdp->sd_journal_gh); 766 fail_jindex: 767 gfs2_jindex_free(sdp); 768 if (jindex) 769 gfs2_glock_dq_uninit(&ji_gh); 770 fail: 771 iput(sdp->sd_jindex); 772 return error; 773 } 774 775 static struct lock_class_key gfs2_quota_imutex_key; 776 777 static int init_inodes(struct gfs2_sbd *sdp, int undo) 778 { 779 int error = 0; 780 struct inode *master = d_inode(sdp->sd_master_dir); 781 782 if (undo) 783 goto fail_qinode; 784 785 error = init_journal(sdp, undo); 786 complete_all(&sdp->sd_journal_ready); 787 if (error) 788 goto fail; 789 790 /* Read in the master statfs inode */ 791 sdp->sd_statfs_inode = gfs2_lookup_simple(master, "statfs"); 792 if (IS_ERR(sdp->sd_statfs_inode)) { 793 error = PTR_ERR(sdp->sd_statfs_inode); 794 fs_err(sdp, "can't read in statfs inode: %d\n", error); 795 goto fail_journal; 796 } 797 798 /* Read in the resource index inode */ 799 sdp->sd_rindex = gfs2_lookup_simple(master, "rindex"); 800 if (IS_ERR(sdp->sd_rindex)) { 801 error = PTR_ERR(sdp->sd_rindex); 802 fs_err(sdp, "can't get resource index inode: %d\n", error); 803 goto fail_statfs; 804 } 805 sdp->sd_rindex_uptodate = 0; 806 807 /* Read in the quota inode */ 808 sdp->sd_quota_inode = gfs2_lookup_simple(master, "quota"); 809 if (IS_ERR(sdp->sd_quota_inode)) { 810 error = PTR_ERR(sdp->sd_quota_inode); 811 fs_err(sdp, "can't get quota file inode: %d\n", error); 812 goto fail_rindex; 813 } 814 /* 815 * i_rwsem on quota files is special. Since this inode is hidden system 816 * file, we are safe to define locking ourselves. 817 */ 818 lockdep_set_class(&sdp->sd_quota_inode->i_rwsem, 819 &gfs2_quota_imutex_key); 820 821 error = gfs2_rindex_update(sdp); 822 if (error) 823 goto fail_qinode; 824 825 return 0; 826 827 fail_qinode: 828 iput(sdp->sd_quota_inode); 829 fail_rindex: 830 gfs2_clear_rgrpd(sdp); 831 iput(sdp->sd_rindex); 832 fail_statfs: 833 iput(sdp->sd_statfs_inode); 834 fail_journal: 835 init_journal(sdp, UNDO); 836 fail: 837 return error; 838 } 839 840 static int init_per_node(struct gfs2_sbd *sdp, int undo) 841 { 842 struct inode *pn = NULL; 843 char buf[30]; 844 int error = 0; 845 struct gfs2_inode *ip; 846 struct inode *master = d_inode(sdp->sd_master_dir); 847 848 if (sdp->sd_args.ar_spectator) 849 return 0; 850 851 if (undo) 852 goto fail_qc_gh; 853 854 pn = gfs2_lookup_simple(master, "per_node"); 855 if (IS_ERR(pn)) { 856 error = PTR_ERR(pn); 857 fs_err(sdp, "can't find per_node directory: %d\n", error); 858 return error; 859 } 860 861 sprintf(buf, "statfs_change%u", sdp->sd_jdesc->jd_jid); 862 sdp->sd_sc_inode = gfs2_lookup_simple(pn, buf); 863 if (IS_ERR(sdp->sd_sc_inode)) { 864 error = PTR_ERR(sdp->sd_sc_inode); 865 fs_err(sdp, "can't find local \"sc\" file: %d\n", error); 866 goto fail; 867 } 868 869 sprintf(buf, "quota_change%u", sdp->sd_jdesc->jd_jid); 870 sdp->sd_qc_inode = gfs2_lookup_simple(pn, buf); 871 if (IS_ERR(sdp->sd_qc_inode)) { 872 error = PTR_ERR(sdp->sd_qc_inode); 873 fs_err(sdp, "can't find local \"qc\" file: %d\n", error); 874 goto fail_ut_i; 875 } 876 877 iput(pn); 878 pn = NULL; 879 880 ip = GFS2_I(sdp->sd_sc_inode); 881 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, 882 &sdp->sd_sc_gh); 883 if (error) { 884 fs_err(sdp, "can't lock local \"sc\" file: %d\n", error); 885 goto fail_qc_i; 886 } 887 888 ip = GFS2_I(sdp->sd_qc_inode); 889 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, 890 &sdp->sd_qc_gh); 891 if (error) { 892 fs_err(sdp, "can't lock local \"qc\" file: %d\n", error); 893 goto fail_ut_gh; 894 } 895 896 return 0; 897 898 fail_qc_gh: 899 gfs2_glock_dq_uninit(&sdp->sd_qc_gh); 900 fail_ut_gh: 901 gfs2_glock_dq_uninit(&sdp->sd_sc_gh); 902 fail_qc_i: 903 iput(sdp->sd_qc_inode); 904 fail_ut_i: 905 iput(sdp->sd_sc_inode); 906 fail: 907 iput(pn); 908 return error; 909 } 910 911 static const match_table_t nolock_tokens = { 912 { Opt_jid, "jid=%d\n", }, 913 { Opt_err, NULL }, 914 }; 915 916 static const struct lm_lockops nolock_ops = { 917 .lm_proto_name = "lock_nolock", 918 .lm_put_lock = gfs2_glock_free, 919 .lm_tokens = &nolock_tokens, 920 }; 921 922 /** 923 * gfs2_lm_mount - mount a locking protocol 924 * @sdp: the filesystem 925 * @args: mount arguments 926 * @silent: if 1, don't complain if the FS isn't a GFS2 fs 927 * 928 * Returns: errno 929 */ 930 931 static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent) 932 { 933 const struct lm_lockops *lm; 934 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 935 struct gfs2_args *args = &sdp->sd_args; 936 const char *proto = sdp->sd_proto_name; 937 const char *table = sdp->sd_table_name; 938 char *o, *options; 939 int ret; 940 941 if (!strcmp("lock_nolock", proto)) { 942 lm = &nolock_ops; 943 sdp->sd_args.ar_localflocks = 1; 944 #ifdef CONFIG_GFS2_FS_LOCKING_DLM 945 } else if (!strcmp("lock_dlm", proto)) { 946 lm = &gfs2_dlm_ops; 947 #endif 948 } else { 949 pr_info("can't find protocol %s\n", proto); 950 return -ENOENT; 951 } 952 953 fs_info(sdp, "Trying to join cluster \"%s\", \"%s\"\n", proto, table); 954 955 ls->ls_ops = lm; 956 ls->ls_first = 1; 957 958 for (options = args->ar_hostdata; (o = strsep(&options, ":")); ) { 959 substring_t tmp[MAX_OPT_ARGS]; 960 int token, option; 961 962 if (!o || !*o) 963 continue; 964 965 token = match_token(o, *lm->lm_tokens, tmp); 966 switch (token) { 967 case Opt_jid: 968 ret = match_int(&tmp[0], &option); 969 if (ret || option < 0) 970 goto hostdata_error; 971 if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags)) 972 ls->ls_jid = option; 973 break; 974 case Opt_id: 975 case Opt_nodir: 976 /* Obsolete, but left for backward compat purposes */ 977 break; 978 case Opt_first: 979 ret = match_int(&tmp[0], &option); 980 if (ret || (option != 0 && option != 1)) 981 goto hostdata_error; 982 ls->ls_first = option; 983 break; 984 case Opt_err: 985 default: 986 hostdata_error: 987 fs_info(sdp, "unknown hostdata (%s)\n", o); 988 return -EINVAL; 989 } 990 } 991 992 if (lm->lm_mount == NULL) { 993 fs_info(sdp, "Now mounting FS...\n"); 994 complete_all(&sdp->sd_locking_init); 995 return 0; 996 } 997 ret = lm->lm_mount(sdp, table); 998 if (ret == 0) 999 fs_info(sdp, "Joined cluster. Now mounting FS...\n"); 1000 complete_all(&sdp->sd_locking_init); 1001 return ret; 1002 } 1003 1004 void gfs2_lm_unmount(struct gfs2_sbd *sdp) 1005 { 1006 const struct lm_lockops *lm = sdp->sd_lockstruct.ls_ops; 1007 if (likely(!test_bit(SDF_WITHDRAWN, &sdp->sd_flags)) && 1008 lm->lm_unmount) 1009 lm->lm_unmount(sdp); 1010 } 1011 1012 static int wait_on_journal(struct gfs2_sbd *sdp) 1013 { 1014 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) 1015 return 0; 1016 1017 return wait_on_bit(&sdp->sd_flags, SDF_NOJOURNALID, TASK_INTERRUPTIBLE) 1018 ? -EINTR : 0; 1019 } 1020 1021 void gfs2_online_uevent(struct gfs2_sbd *sdp) 1022 { 1023 struct super_block *sb = sdp->sd_vfs; 1024 char ro[20]; 1025 char spectator[20]; 1026 char *envp[] = { ro, spectator, NULL }; 1027 sprintf(ro, "RDONLY=%d", sb_rdonly(sb)); 1028 sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0); 1029 kobject_uevent_env(&sdp->sd_kobj, KOBJ_ONLINE, envp); 1030 } 1031 1032 /** 1033 * fill_super - Read in superblock 1034 * @sb: The VFS superblock 1035 * @data: Mount options 1036 * @silent: Don't complain if it's not a GFS2 filesystem 1037 * 1038 * Returns: errno 1039 */ 1040 1041 static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent) 1042 { 1043 struct gfs2_sbd *sdp; 1044 struct gfs2_holder mount_gh; 1045 int error; 1046 1047 sdp = init_sbd(sb); 1048 if (!sdp) { 1049 pr_warn("can't alloc struct gfs2_sbd\n"); 1050 return -ENOMEM; 1051 } 1052 sdp->sd_args = *args; 1053 1054 if (sdp->sd_args.ar_spectator) { 1055 sb->s_flags |= SB_RDONLY; 1056 set_bit(SDF_RORECOVERY, &sdp->sd_flags); 1057 } 1058 if (sdp->sd_args.ar_posix_acl) 1059 sb->s_flags |= SB_POSIXACL; 1060 if (sdp->sd_args.ar_nobarrier) 1061 set_bit(SDF_NOBARRIERS, &sdp->sd_flags); 1062 1063 sb->s_flags |= SB_NOSEC; 1064 sb->s_magic = GFS2_MAGIC; 1065 sb->s_op = &gfs2_super_ops; 1066 sb->s_d_op = &gfs2_dops; 1067 sb->s_export_op = &gfs2_export_ops; 1068 sb->s_xattr = gfs2_xattr_handlers; 1069 sb->s_qcop = &gfs2_quotactl_ops; 1070 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP; 1071 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE; 1072 sb->s_time_gran = 1; 1073 sb->s_maxbytes = MAX_LFS_FILESIZE; 1074 1075 /* Set up the buffer cache and fill in some fake block size values 1076 to allow us to read-in the on-disk superblock. */ 1077 sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, GFS2_BASIC_BLOCK); 1078 sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits; 1079 sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - 1080 GFS2_BASIC_BLOCK_SHIFT; 1081 sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift); 1082 1083 sdp->sd_tune.gt_logd_secs = sdp->sd_args.ar_commit; 1084 sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum; 1085 if (sdp->sd_args.ar_statfs_quantum) { 1086 sdp->sd_tune.gt_statfs_slow = 0; 1087 sdp->sd_tune.gt_statfs_quantum = sdp->sd_args.ar_statfs_quantum; 1088 } else { 1089 sdp->sd_tune.gt_statfs_slow = 1; 1090 sdp->sd_tune.gt_statfs_quantum = 30; 1091 } 1092 1093 error = init_names(sdp, silent); 1094 if (error) { 1095 /* In this case, we haven't initialized sysfs, so we have to 1096 manually free the sdp. */ 1097 free_sbd(sdp); 1098 sb->s_fs_info = NULL; 1099 return error; 1100 } 1101 1102 snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s", sdp->sd_table_name); 1103 1104 error = gfs2_sys_fs_add(sdp); 1105 /* 1106 * If we hit an error here, gfs2_sys_fs_add will have called function 1107 * kobject_put which causes the sysfs usage count to go to zero, which 1108 * causes sysfs to call function gfs2_sbd_release, which frees sdp. 1109 * Subsequent error paths here will call gfs2_sys_fs_del, which also 1110 * kobject_put to free sdp. 1111 */ 1112 if (error) 1113 return error; 1114 1115 gfs2_create_debugfs_file(sdp); 1116 1117 error = gfs2_lm_mount(sdp, silent); 1118 if (error) 1119 goto fail_debug; 1120 1121 error = init_locking(sdp, &mount_gh, DO); 1122 if (error) 1123 goto fail_lm; 1124 1125 error = init_sb(sdp, silent); 1126 if (error) 1127 goto fail_locking; 1128 1129 error = wait_on_journal(sdp); 1130 if (error) 1131 goto fail_sb; 1132 1133 /* 1134 * If user space has failed to join the cluster or some similar 1135 * failure has occurred, then the journal id will contain a 1136 * negative (error) number. This will then be returned to the 1137 * caller (of the mount syscall). We do this even for spectator 1138 * mounts (which just write a jid of 0 to indicate "ok" even though 1139 * the jid is unused in the spectator case) 1140 */ 1141 if (sdp->sd_lockstruct.ls_jid < 0) { 1142 error = sdp->sd_lockstruct.ls_jid; 1143 sdp->sd_lockstruct.ls_jid = 0; 1144 goto fail_sb; 1145 } 1146 1147 if (sdp->sd_args.ar_spectator) 1148 snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s.s", 1149 sdp->sd_table_name); 1150 else 1151 snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s.%u", 1152 sdp->sd_table_name, sdp->sd_lockstruct.ls_jid); 1153 1154 error = init_inodes(sdp, DO); 1155 if (error) 1156 goto fail_sb; 1157 1158 error = init_per_node(sdp, DO); 1159 if (error) 1160 goto fail_inodes; 1161 1162 error = gfs2_statfs_init(sdp); 1163 if (error) { 1164 fs_err(sdp, "can't initialize statfs subsystem: %d\n", error); 1165 goto fail_per_node; 1166 } 1167 1168 if (!sb_rdonly(sb)) { 1169 error = gfs2_make_fs_rw(sdp); 1170 if (error) { 1171 fs_err(sdp, "can't make FS RW: %d\n", error); 1172 goto fail_per_node; 1173 } 1174 } 1175 1176 gfs2_glock_dq_uninit(&mount_gh); 1177 gfs2_online_uevent(sdp); 1178 return 0; 1179 1180 fail_per_node: 1181 init_per_node(sdp, UNDO); 1182 fail_inodes: 1183 init_inodes(sdp, UNDO); 1184 fail_sb: 1185 if (sdp->sd_root_dir) 1186 dput(sdp->sd_root_dir); 1187 if (sdp->sd_master_dir) 1188 dput(sdp->sd_master_dir); 1189 if (sb->s_root) 1190 dput(sb->s_root); 1191 sb->s_root = NULL; 1192 fail_locking: 1193 init_locking(sdp, &mount_gh, UNDO); 1194 fail_lm: 1195 complete_all(&sdp->sd_journal_ready); 1196 gfs2_gl_hash_clear(sdp); 1197 gfs2_lm_unmount(sdp); 1198 fail_debug: 1199 gfs2_delete_debugfs_file(sdp); 1200 /* gfs2_sys_fs_del must be the last thing we do, since it causes 1201 * sysfs to call function gfs2_sbd_release, which frees sdp. */ 1202 gfs2_sys_fs_del(sdp); 1203 sb->s_fs_info = NULL; 1204 return error; 1205 } 1206 1207 static int set_gfs2_super(struct super_block *s, void *data) 1208 { 1209 s->s_bdev = data; 1210 s->s_dev = s->s_bdev->bd_dev; 1211 s->s_bdi = bdi_get(s->s_bdev->bd_bdi); 1212 return 0; 1213 } 1214 1215 static int test_gfs2_super(struct super_block *s, void *ptr) 1216 { 1217 struct block_device *bdev = ptr; 1218 return (bdev == s->s_bdev); 1219 } 1220 1221 /** 1222 * gfs2_mount - Get the GFS2 superblock 1223 * @fs_type: The GFS2 filesystem type 1224 * @flags: Mount flags 1225 * @dev_name: The name of the device 1226 * @data: The mount arguments 1227 * 1228 * Q. Why not use get_sb_bdev() ? 1229 * A. We need to select one of two root directories to mount, independent 1230 * of whether this is the initial, or subsequent, mount of this sb 1231 * 1232 * Returns: 0 or -ve on error 1233 */ 1234 1235 static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags, 1236 const char *dev_name, void *data) 1237 { 1238 struct block_device *bdev; 1239 struct super_block *s; 1240 fmode_t mode = FMODE_READ | FMODE_EXCL; 1241 int error; 1242 struct gfs2_args args; 1243 struct gfs2_sbd *sdp; 1244 1245 if (!(flags & SB_RDONLY)) 1246 mode |= FMODE_WRITE; 1247 1248 bdev = blkdev_get_by_path(dev_name, mode, fs_type); 1249 if (IS_ERR(bdev)) 1250 return ERR_CAST(bdev); 1251 1252 /* 1253 * once the super is inserted into the list by sget, s_umount 1254 * will protect the lockfs code from trying to start a snapshot 1255 * while we are mounting 1256 */ 1257 mutex_lock(&bdev->bd_fsfreeze_mutex); 1258 if (bdev->bd_fsfreeze_count > 0) { 1259 mutex_unlock(&bdev->bd_fsfreeze_mutex); 1260 error = -EBUSY; 1261 goto error_bdev; 1262 } 1263 s = sget(fs_type, test_gfs2_super, set_gfs2_super, flags, bdev); 1264 mutex_unlock(&bdev->bd_fsfreeze_mutex); 1265 error = PTR_ERR(s); 1266 if (IS_ERR(s)) 1267 goto error_bdev; 1268 1269 if (s->s_root) { 1270 /* 1271 * s_umount nests inside bd_mutex during 1272 * __invalidate_device(). blkdev_put() acquires 1273 * bd_mutex and can't be called under s_umount. Drop 1274 * s_umount temporarily. This is safe as we're 1275 * holding an active reference. 1276 */ 1277 up_write(&s->s_umount); 1278 blkdev_put(bdev, mode); 1279 down_write(&s->s_umount); 1280 } else { 1281 /* s_mode must be set before deactivate_locked_super calls */ 1282 s->s_mode = mode; 1283 } 1284 1285 memset(&args, 0, sizeof(args)); 1286 args.ar_quota = GFS2_QUOTA_DEFAULT; 1287 args.ar_data = GFS2_DATA_DEFAULT; 1288 args.ar_commit = 30; 1289 args.ar_statfs_quantum = 30; 1290 args.ar_quota_quantum = 60; 1291 args.ar_errors = GFS2_ERRORS_DEFAULT; 1292 1293 error = gfs2_mount_args(&args, data); 1294 if (error) { 1295 pr_warn("can't parse mount arguments\n"); 1296 goto error_super; 1297 } 1298 1299 if (s->s_root) { 1300 error = -EBUSY; 1301 if ((flags ^ s->s_flags) & SB_RDONLY) 1302 goto error_super; 1303 } else { 1304 snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev); 1305 sb_set_blocksize(s, block_size(bdev)); 1306 error = fill_super(s, &args, flags & SB_SILENT ? 1 : 0); 1307 if (error) 1308 goto error_super; 1309 s->s_flags |= SB_ACTIVE; 1310 bdev->bd_super = s; 1311 } 1312 1313 sdp = s->s_fs_info; 1314 if (args.ar_meta) 1315 return dget(sdp->sd_master_dir); 1316 else 1317 return dget(sdp->sd_root_dir); 1318 1319 error_super: 1320 deactivate_locked_super(s); 1321 return ERR_PTR(error); 1322 error_bdev: 1323 blkdev_put(bdev, mode); 1324 return ERR_PTR(error); 1325 } 1326 1327 static int set_meta_super(struct super_block *s, void *ptr) 1328 { 1329 return -EINVAL; 1330 } 1331 1332 static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type, 1333 int flags, const char *dev_name, void *data) 1334 { 1335 struct super_block *s; 1336 struct gfs2_sbd *sdp; 1337 struct path path; 1338 int error; 1339 1340 if (!dev_name || !*dev_name) 1341 return ERR_PTR(-EINVAL); 1342 1343 error = kern_path(dev_name, LOOKUP_FOLLOW, &path); 1344 if (error) { 1345 pr_warn("path_lookup on %s returned error %d\n", 1346 dev_name, error); 1347 return ERR_PTR(error); 1348 } 1349 s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super, flags, 1350 path.dentry->d_sb->s_bdev); 1351 path_put(&path); 1352 if (IS_ERR(s)) { 1353 pr_warn("gfs2 mount does not exist\n"); 1354 return ERR_CAST(s); 1355 } 1356 if ((flags ^ s->s_flags) & SB_RDONLY) { 1357 deactivate_locked_super(s); 1358 return ERR_PTR(-EBUSY); 1359 } 1360 sdp = s->s_fs_info; 1361 return dget(sdp->sd_master_dir); 1362 } 1363 1364 static void gfs2_kill_sb(struct super_block *sb) 1365 { 1366 struct gfs2_sbd *sdp = sb->s_fs_info; 1367 1368 if (sdp == NULL) { 1369 kill_block_super(sb); 1370 return; 1371 } 1372 1373 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SYNC | GFS2_LFC_KILL_SB); 1374 dput(sdp->sd_root_dir); 1375 dput(sdp->sd_master_dir); 1376 sdp->sd_root_dir = NULL; 1377 sdp->sd_master_dir = NULL; 1378 shrink_dcache_sb(sb); 1379 kill_block_super(sb); 1380 } 1381 1382 struct file_system_type gfs2_fs_type = { 1383 .name = "gfs2", 1384 .fs_flags = FS_REQUIRES_DEV, 1385 .mount = gfs2_mount, 1386 .kill_sb = gfs2_kill_sb, 1387 .owner = THIS_MODULE, 1388 }; 1389 MODULE_ALIAS_FS("gfs2"); 1390 1391 struct file_system_type gfs2meta_fs_type = { 1392 .name = "gfs2meta", 1393 .fs_flags = FS_REQUIRES_DEV, 1394 .mount = gfs2_mount_meta, 1395 .owner = THIS_MODULE, 1396 }; 1397 MODULE_ALIAS_FS("gfs2meta"); 1398