1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/bio.h> 10 #include <linux/sched/signal.h> 11 #include <linux/slab.h> 12 #include <linux/spinlock.h> 13 #include <linux/completion.h> 14 #include <linux/buffer_head.h> 15 #include <linux/statfs.h> 16 #include <linux/seq_file.h> 17 #include <linux/mount.h> 18 #include <linux/kthread.h> 19 #include <linux/delay.h> 20 #include <linux/gfs2_ondisk.h> 21 #include <linux/crc32.h> 22 #include <linux/time.h> 23 #include <linux/wait.h> 24 #include <linux/writeback.h> 25 #include <linux/backing-dev.h> 26 #include <linux/kernel.h> 27 28 #include "gfs2.h" 29 #include "incore.h" 30 #include "bmap.h" 31 #include "dir.h" 32 #include "glock.h" 33 #include "glops.h" 34 #include "inode.h" 35 #include "log.h" 36 #include "meta_io.h" 37 #include "quota.h" 38 #include "recovery.h" 39 #include "rgrp.h" 40 #include "super.h" 41 #include "trans.h" 42 #include "util.h" 43 #include "sys.h" 44 #include "xattr.h" 45 #include "lops.h" 46 47 enum dinode_demise { 48 SHOULD_DELETE_DINODE, 49 SHOULD_NOT_DELETE_DINODE, 50 SHOULD_DEFER_EVICTION, 51 }; 52 53 /** 54 * gfs2_jindex_free - Clear all the journal index information 55 * @sdp: The GFS2 superblock 56 * 57 */ 58 59 void gfs2_jindex_free(struct gfs2_sbd *sdp) 60 { 61 struct list_head list; 62 struct gfs2_jdesc *jd; 63 64 spin_lock(&sdp->sd_jindex_spin); 65 list_add(&list, &sdp->sd_jindex_list); 66 list_del_init(&sdp->sd_jindex_list); 67 sdp->sd_journals = 0; 68 spin_unlock(&sdp->sd_jindex_spin); 69 70 down_write(&sdp->sd_log_flush_lock); 71 sdp->sd_jdesc = NULL; 72 up_write(&sdp->sd_log_flush_lock); 73 74 while (!list_empty(&list)) { 75 jd = list_first_entry(&list, struct gfs2_jdesc, jd_list); 76 BUG_ON(jd->jd_log_bio); 77 gfs2_free_journal_extents(jd); 78 list_del(&jd->jd_list); 79 iput(jd->jd_inode); 80 jd->jd_inode = NULL; 81 kfree(jd); 82 } 83 } 84 85 static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid) 86 { 87 struct gfs2_jdesc *jd; 88 89 list_for_each_entry(jd, head, jd_list) { 90 if (jd->jd_jid == jid) 91 return jd; 92 } 93 return NULL; 94 } 95 96 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid) 97 { 98 struct gfs2_jdesc *jd; 99 100 spin_lock(&sdp->sd_jindex_spin); 101 jd = jdesc_find_i(&sdp->sd_jindex_list, jid); 102 spin_unlock(&sdp->sd_jindex_spin); 103 104 return jd; 105 } 106 107 int gfs2_jdesc_check(struct gfs2_jdesc *jd) 108 { 109 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 110 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 111 u64 size = i_size_read(jd->jd_inode); 112 113 if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30))) 114 return -EIO; 115 116 jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift; 117 118 if (gfs2_write_alloc_required(ip, 0, size)) { 119 gfs2_consist_inode(ip); 120 return -EIO; 121 } 122 123 return 0; 124 } 125 126 /** 127 * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one 128 * @sdp: the filesystem 129 * 130 * Returns: errno 131 */ 132 133 int gfs2_make_fs_rw(struct gfs2_sbd *sdp) 134 { 135 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 136 struct gfs2_glock *j_gl = ip->i_gl; 137 struct gfs2_log_header_host head; 138 int error; 139 140 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); 141 if (gfs2_withdrawing_or_withdrawn(sdp)) 142 return -EIO; 143 144 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false); 145 if (error) { 146 gfs2_consist(sdp); 147 return error; 148 } 149 150 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { 151 gfs2_consist(sdp); 152 return -EIO; 153 } 154 155 /* Initialize some head of the log stuff */ 156 sdp->sd_log_sequence = head.lh_sequence + 1; 157 gfs2_log_pointers_init(sdp, head.lh_blkno); 158 159 error = gfs2_quota_init(sdp); 160 if (!error && gfs2_withdrawing_or_withdrawn(sdp)) 161 error = -EIO; 162 if (!error) 163 set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); 164 return error; 165 } 166 167 void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf) 168 { 169 const struct gfs2_statfs_change *str = buf; 170 171 sc->sc_total = be64_to_cpu(str->sc_total); 172 sc->sc_free = be64_to_cpu(str->sc_free); 173 sc->sc_dinodes = be64_to_cpu(str->sc_dinodes); 174 } 175 176 void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf) 177 { 178 struct gfs2_statfs_change *str = buf; 179 180 str->sc_total = cpu_to_be64(sc->sc_total); 181 str->sc_free = cpu_to_be64(sc->sc_free); 182 str->sc_dinodes = cpu_to_be64(sc->sc_dinodes); 183 } 184 185 int gfs2_statfs_init(struct gfs2_sbd *sdp) 186 { 187 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 188 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 189 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 190 struct buffer_head *m_bh; 191 struct gfs2_holder gh; 192 int error; 193 194 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, 195 &gh); 196 if (error) 197 return error; 198 199 error = gfs2_meta_inode_buffer(m_ip, &m_bh); 200 if (error) 201 goto out; 202 203 if (sdp->sd_args.ar_spectator) { 204 spin_lock(&sdp->sd_statfs_spin); 205 gfs2_statfs_change_in(m_sc, m_bh->b_data + 206 sizeof(struct gfs2_dinode)); 207 spin_unlock(&sdp->sd_statfs_spin); 208 } else { 209 spin_lock(&sdp->sd_statfs_spin); 210 gfs2_statfs_change_in(m_sc, m_bh->b_data + 211 sizeof(struct gfs2_dinode)); 212 gfs2_statfs_change_in(l_sc, sdp->sd_sc_bh->b_data + 213 sizeof(struct gfs2_dinode)); 214 spin_unlock(&sdp->sd_statfs_spin); 215 216 } 217 218 brelse(m_bh); 219 out: 220 gfs2_glock_dq_uninit(&gh); 221 return 0; 222 } 223 224 void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free, 225 s64 dinodes) 226 { 227 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 228 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 229 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 230 s64 x, y; 231 int need_sync = 0; 232 233 gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh); 234 235 spin_lock(&sdp->sd_statfs_spin); 236 l_sc->sc_total += total; 237 l_sc->sc_free += free; 238 l_sc->sc_dinodes += dinodes; 239 gfs2_statfs_change_out(l_sc, sdp->sd_sc_bh->b_data + 240 sizeof(struct gfs2_dinode)); 241 if (sdp->sd_args.ar_statfs_percent) { 242 x = 100 * l_sc->sc_free; 243 y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent; 244 if (x >= y || x <= -y) 245 need_sync = 1; 246 } 247 spin_unlock(&sdp->sd_statfs_spin); 248 249 if (need_sync) 250 gfs2_wake_up_statfs(sdp); 251 } 252 253 void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh) 254 { 255 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 256 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 257 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 258 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 259 260 gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh); 261 gfs2_trans_add_meta(m_ip->i_gl, m_bh); 262 263 spin_lock(&sdp->sd_statfs_spin); 264 m_sc->sc_total += l_sc->sc_total; 265 m_sc->sc_free += l_sc->sc_free; 266 m_sc->sc_dinodes += l_sc->sc_dinodes; 267 memset(l_sc, 0, sizeof(struct gfs2_statfs_change)); 268 memset(sdp->sd_sc_bh->b_data + sizeof(struct gfs2_dinode), 269 0, sizeof(struct gfs2_statfs_change)); 270 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); 271 spin_unlock(&sdp->sd_statfs_spin); 272 } 273 274 int gfs2_statfs_sync(struct super_block *sb, int type) 275 { 276 struct gfs2_sbd *sdp = sb->s_fs_info; 277 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 278 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 279 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 280 struct gfs2_holder gh; 281 struct buffer_head *m_bh; 282 int error; 283 284 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, 285 &gh); 286 if (error) 287 goto out; 288 289 error = gfs2_meta_inode_buffer(m_ip, &m_bh); 290 if (error) 291 goto out_unlock; 292 293 spin_lock(&sdp->sd_statfs_spin); 294 gfs2_statfs_change_in(m_sc, m_bh->b_data + 295 sizeof(struct gfs2_dinode)); 296 if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) { 297 spin_unlock(&sdp->sd_statfs_spin); 298 goto out_bh; 299 } 300 spin_unlock(&sdp->sd_statfs_spin); 301 302 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0); 303 if (error) 304 goto out_bh; 305 306 update_statfs(sdp, m_bh); 307 sdp->sd_statfs_force_sync = 0; 308 309 gfs2_trans_end(sdp); 310 311 out_bh: 312 brelse(m_bh); 313 out_unlock: 314 gfs2_glock_dq_uninit(&gh); 315 out: 316 return error; 317 } 318 319 struct lfcc { 320 struct list_head list; 321 struct gfs2_holder gh; 322 }; 323 324 /** 325 * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all 326 * journals are clean 327 * @sdp: the file system 328 * 329 * Returns: errno 330 */ 331 332 static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp) 333 { 334 struct gfs2_inode *ip; 335 struct gfs2_jdesc *jd; 336 struct lfcc *lfcc; 337 LIST_HEAD(list); 338 struct gfs2_log_header_host lh; 339 int error, error2; 340 341 /* 342 * Grab all the journal glocks in SH mode. We are *probably* doing 343 * that to prevent recovery. 344 */ 345 346 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { 347 lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL); 348 if (!lfcc) { 349 error = -ENOMEM; 350 goto out; 351 } 352 ip = GFS2_I(jd->jd_inode); 353 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh); 354 if (error) { 355 kfree(lfcc); 356 goto out; 357 } 358 list_add(&lfcc->list, &list); 359 } 360 361 gfs2_freeze_unlock(&sdp->sd_freeze_gh); 362 363 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE, 364 LM_FLAG_NOEXP | GL_NOPID, 365 &sdp->sd_freeze_gh); 366 if (error) 367 goto relock_shared; 368 369 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { 370 error = gfs2_jdesc_check(jd); 371 if (error) 372 break; 373 error = gfs2_find_jhead(jd, &lh, false); 374 if (error) 375 break; 376 if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { 377 error = -EBUSY; 378 break; 379 } 380 } 381 382 if (!error) 383 goto out; /* success */ 384 385 gfs2_freeze_unlock(&sdp->sd_freeze_gh); 386 387 relock_shared: 388 error2 = gfs2_freeze_lock_shared(sdp); 389 gfs2_assert_withdraw(sdp, !error2); 390 391 out: 392 while (!list_empty(&list)) { 393 lfcc = list_first_entry(&list, struct lfcc, list); 394 list_del(&lfcc->list); 395 gfs2_glock_dq_uninit(&lfcc->gh); 396 kfree(lfcc); 397 } 398 return error; 399 } 400 401 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf) 402 { 403 const struct inode *inode = &ip->i_inode; 404 struct gfs2_dinode *str = buf; 405 406 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 407 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI); 408 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI); 409 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr); 410 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino); 411 str->di_mode = cpu_to_be32(inode->i_mode); 412 str->di_uid = cpu_to_be32(i_uid_read(inode)); 413 str->di_gid = cpu_to_be32(i_gid_read(inode)); 414 str->di_nlink = cpu_to_be32(inode->i_nlink); 415 str->di_size = cpu_to_be64(i_size_read(inode)); 416 str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(inode)); 417 str->di_atime = cpu_to_be64(inode->i_atime.tv_sec); 418 str->di_mtime = cpu_to_be64(inode->i_mtime.tv_sec); 419 str->di_ctime = cpu_to_be64(inode_get_ctime(inode).tv_sec); 420 421 str->di_goal_meta = cpu_to_be64(ip->i_goal); 422 str->di_goal_data = cpu_to_be64(ip->i_goal); 423 str->di_generation = cpu_to_be64(ip->i_generation); 424 425 str->di_flags = cpu_to_be32(ip->i_diskflags); 426 str->di_height = cpu_to_be16(ip->i_height); 427 str->di_payload_format = cpu_to_be32(S_ISDIR(inode->i_mode) && 428 !(ip->i_diskflags & GFS2_DIF_EXHASH) ? 429 GFS2_FORMAT_DE : 0); 430 str->di_depth = cpu_to_be16(ip->i_depth); 431 str->di_entries = cpu_to_be32(ip->i_entries); 432 433 str->di_eattr = cpu_to_be64(ip->i_eattr); 434 str->di_atime_nsec = cpu_to_be32(inode->i_atime.tv_nsec); 435 str->di_mtime_nsec = cpu_to_be32(inode->i_mtime.tv_nsec); 436 str->di_ctime_nsec = cpu_to_be32(inode_get_ctime(inode).tv_nsec); 437 } 438 439 /** 440 * gfs2_write_inode - Make sure the inode is stable on the disk 441 * @inode: The inode 442 * @wbc: The writeback control structure 443 * 444 * Returns: errno 445 */ 446 447 static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc) 448 { 449 struct gfs2_inode *ip = GFS2_I(inode); 450 struct gfs2_sbd *sdp = GFS2_SB(inode); 451 struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl); 452 struct backing_dev_info *bdi = inode_to_bdi(metamapping->host); 453 int ret = 0; 454 bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip)); 455 456 if (flush_all) 457 gfs2_log_flush(GFS2_SB(inode), ip->i_gl, 458 GFS2_LOG_HEAD_FLUSH_NORMAL | 459 GFS2_LFC_WRITE_INODE); 460 if (bdi->wb.dirty_exceeded) 461 gfs2_ail1_flush(sdp, wbc); 462 else 463 filemap_fdatawrite(metamapping); 464 if (flush_all) 465 ret = filemap_fdatawait(metamapping); 466 if (ret) 467 mark_inode_dirty_sync(inode); 468 else { 469 spin_lock(&inode->i_lock); 470 if (!(inode->i_flags & I_DIRTY)) 471 gfs2_ordered_del_inode(ip); 472 spin_unlock(&inode->i_lock); 473 } 474 return ret; 475 } 476 477 /** 478 * gfs2_dirty_inode - check for atime updates 479 * @inode: The inode in question 480 * @flags: The type of dirty 481 * 482 * Unfortunately it can be called under any combination of inode 483 * glock and freeze glock, so we have to check carefully. 484 * 485 * At the moment this deals only with atime - it should be possible 486 * to expand that role in future, once a review of the locking has 487 * been carried out. 488 */ 489 490 static void gfs2_dirty_inode(struct inode *inode, int flags) 491 { 492 struct gfs2_inode *ip = GFS2_I(inode); 493 struct gfs2_sbd *sdp = GFS2_SB(inode); 494 struct buffer_head *bh; 495 struct gfs2_holder gh; 496 int need_unlock = 0; 497 int need_endtrans = 0; 498 int ret; 499 500 if (unlikely(!ip->i_gl)) { 501 /* This can only happen during incomplete inode creation. */ 502 BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags)); 503 return; 504 } 505 506 if (gfs2_withdrawing_or_withdrawn(sdp)) 507 return; 508 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { 509 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 510 if (ret) { 511 fs_err(sdp, "dirty_inode: glock %d\n", ret); 512 gfs2_dump_glock(NULL, ip->i_gl, true); 513 return; 514 } 515 need_unlock = 1; 516 } else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE)) 517 return; 518 519 if (current->journal_info == NULL) { 520 ret = gfs2_trans_begin(sdp, RES_DINODE, 0); 521 if (ret) { 522 fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret); 523 goto out; 524 } 525 need_endtrans = 1; 526 } 527 528 ret = gfs2_meta_inode_buffer(ip, &bh); 529 if (ret == 0) { 530 gfs2_trans_add_meta(ip->i_gl, bh); 531 gfs2_dinode_out(ip, bh->b_data); 532 brelse(bh); 533 } 534 535 if (need_endtrans) 536 gfs2_trans_end(sdp); 537 out: 538 if (need_unlock) 539 gfs2_glock_dq_uninit(&gh); 540 } 541 542 /** 543 * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one 544 * @sdp: the filesystem 545 * 546 * Returns: errno 547 */ 548 549 void gfs2_make_fs_ro(struct gfs2_sbd *sdp) 550 { 551 int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); 552 553 if (!test_bit(SDF_KILL, &sdp->sd_flags)) 554 gfs2_flush_delete_work(sdp); 555 556 gfs2_destroy_threads(sdp); 557 558 if (log_write_allowed) { 559 gfs2_quota_sync(sdp->sd_vfs, 0); 560 gfs2_statfs_sync(sdp->sd_vfs, 0); 561 562 /* We do two log flushes here. The first one commits dirty inodes 563 * and rgrps to the journal, but queues up revokes to the ail list. 564 * The second flush writes out and removes the revokes. 565 * 566 * The first must be done before the FLUSH_SHUTDOWN code 567 * clears the LIVE flag, otherwise it will not be able to start 568 * a transaction to write its revokes, and the error will cause 569 * a withdraw of the file system. */ 570 gfs2_log_flush(sdp, NULL, GFS2_LFC_MAKE_FS_RO); 571 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN | 572 GFS2_LFC_MAKE_FS_RO); 573 wait_event_timeout(sdp->sd_log_waitq, 574 gfs2_log_is_empty(sdp), 575 HZ * 5); 576 gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp)); 577 } 578 gfs2_quota_cleanup(sdp); 579 } 580 581 /** 582 * gfs2_put_super - Unmount the filesystem 583 * @sb: The VFS superblock 584 * 585 */ 586 587 static void gfs2_put_super(struct super_block *sb) 588 { 589 struct gfs2_sbd *sdp = sb->s_fs_info; 590 struct gfs2_jdesc *jd; 591 592 /* No more recovery requests */ 593 set_bit(SDF_NORECOVERY, &sdp->sd_flags); 594 smp_mb(); 595 596 /* Wait on outstanding recovery */ 597 restart: 598 spin_lock(&sdp->sd_jindex_spin); 599 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { 600 if (!test_bit(JDF_RECOVERY, &jd->jd_flags)) 601 continue; 602 spin_unlock(&sdp->sd_jindex_spin); 603 wait_on_bit(&jd->jd_flags, JDF_RECOVERY, 604 TASK_UNINTERRUPTIBLE); 605 goto restart; 606 } 607 spin_unlock(&sdp->sd_jindex_spin); 608 609 if (!sb_rdonly(sb)) 610 gfs2_make_fs_ro(sdp); 611 else { 612 if (gfs2_withdrawing_or_withdrawn(sdp)) 613 gfs2_destroy_threads(sdp); 614 615 gfs2_quota_cleanup(sdp); 616 } 617 618 WARN_ON(gfs2_withdrawing(sdp)); 619 620 /* At this point, we're through modifying the disk */ 621 622 /* Release stuff */ 623 624 gfs2_freeze_unlock(&sdp->sd_freeze_gh); 625 626 iput(sdp->sd_jindex); 627 iput(sdp->sd_statfs_inode); 628 iput(sdp->sd_rindex); 629 iput(sdp->sd_quota_inode); 630 631 gfs2_glock_put(sdp->sd_rename_gl); 632 gfs2_glock_put(sdp->sd_freeze_gl); 633 634 if (!sdp->sd_args.ar_spectator) { 635 if (gfs2_holder_initialized(&sdp->sd_journal_gh)) 636 gfs2_glock_dq_uninit(&sdp->sd_journal_gh); 637 if (gfs2_holder_initialized(&sdp->sd_jinode_gh)) 638 gfs2_glock_dq_uninit(&sdp->sd_jinode_gh); 639 brelse(sdp->sd_sc_bh); 640 gfs2_glock_dq_uninit(&sdp->sd_sc_gh); 641 gfs2_glock_dq_uninit(&sdp->sd_qc_gh); 642 free_local_statfs_inodes(sdp); 643 iput(sdp->sd_qc_inode); 644 } 645 646 gfs2_glock_dq_uninit(&sdp->sd_live_gh); 647 gfs2_clear_rgrpd(sdp); 648 gfs2_jindex_free(sdp); 649 /* Take apart glock structures and buffer lists */ 650 gfs2_gl_hash_clear(sdp); 651 truncate_inode_pages_final(&sdp->sd_aspace); 652 gfs2_delete_debugfs_file(sdp); 653 654 gfs2_sys_fs_del(sdp); 655 free_sbd(sdp); 656 } 657 658 /** 659 * gfs2_sync_fs - sync the filesystem 660 * @sb: the superblock 661 * @wait: true to wait for completion 662 * 663 * Flushes the log to disk. 664 */ 665 666 static int gfs2_sync_fs(struct super_block *sb, int wait) 667 { 668 struct gfs2_sbd *sdp = sb->s_fs_info; 669 670 gfs2_quota_sync(sb, -1); 671 if (wait) 672 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | 673 GFS2_LFC_SYNC_FS); 674 return sdp->sd_log_error; 675 } 676 677 static int gfs2_freeze_locally(struct gfs2_sbd *sdp) 678 { 679 struct super_block *sb = sdp->sd_vfs; 680 int error; 681 682 error = freeze_super(sb, FREEZE_HOLDER_USERSPACE); 683 if (error) 684 return error; 685 686 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 687 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE | 688 GFS2_LFC_FREEZE_GO_SYNC); 689 if (gfs2_withdrawing_or_withdrawn(sdp)) { 690 error = thaw_super(sb, FREEZE_HOLDER_USERSPACE); 691 if (error) 692 return error; 693 return -EIO; 694 } 695 } 696 return 0; 697 } 698 699 static int gfs2_do_thaw(struct gfs2_sbd *sdp) 700 { 701 struct super_block *sb = sdp->sd_vfs; 702 int error; 703 704 error = gfs2_freeze_lock_shared(sdp); 705 if (error) 706 goto fail; 707 error = thaw_super(sb, FREEZE_HOLDER_USERSPACE); 708 if (!error) 709 return 0; 710 711 fail: 712 fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n", error); 713 gfs2_assert_withdraw(sdp, 0); 714 return error; 715 } 716 717 void gfs2_freeze_func(struct work_struct *work) 718 { 719 struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work); 720 struct super_block *sb = sdp->sd_vfs; 721 int error; 722 723 mutex_lock(&sdp->sd_freeze_mutex); 724 error = -EBUSY; 725 if (test_bit(SDF_FROZEN, &sdp->sd_flags)) 726 goto freeze_failed; 727 728 error = gfs2_freeze_locally(sdp); 729 if (error) 730 goto freeze_failed; 731 732 gfs2_freeze_unlock(&sdp->sd_freeze_gh); 733 set_bit(SDF_FROZEN, &sdp->sd_flags); 734 735 error = gfs2_do_thaw(sdp); 736 if (error) 737 goto out; 738 739 clear_bit(SDF_FROZEN, &sdp->sd_flags); 740 goto out; 741 742 freeze_failed: 743 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n", error); 744 745 out: 746 mutex_unlock(&sdp->sd_freeze_mutex); 747 deactivate_super(sb); 748 } 749 750 /** 751 * gfs2_freeze_super - prevent further writes to the filesystem 752 * @sb: the VFS structure for the filesystem 753 * 754 */ 755 756 static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who) 757 { 758 struct gfs2_sbd *sdp = sb->s_fs_info; 759 int error; 760 761 if (!mutex_trylock(&sdp->sd_freeze_mutex)) 762 return -EBUSY; 763 error = -EBUSY; 764 if (test_bit(SDF_FROZEN, &sdp->sd_flags)) 765 goto out; 766 767 for (;;) { 768 error = gfs2_freeze_locally(sdp); 769 if (error) { 770 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n", 771 error); 772 goto out; 773 } 774 775 error = gfs2_lock_fs_check_clean(sdp); 776 if (!error) 777 break; /* success */ 778 779 error = gfs2_do_thaw(sdp); 780 if (error) 781 goto out; 782 783 if (error == -EBUSY) 784 fs_err(sdp, "waiting for recovery before freeze\n"); 785 else if (error == -EIO) { 786 fs_err(sdp, "Fatal IO error: cannot freeze gfs2 due " 787 "to recovery error.\n"); 788 goto out; 789 } else { 790 fs_err(sdp, "error freezing FS: %d\n", error); 791 } 792 fs_err(sdp, "retrying...\n"); 793 msleep(1000); 794 } 795 796 out: 797 if (!error) { 798 set_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags); 799 set_bit(SDF_FROZEN, &sdp->sd_flags); 800 } 801 mutex_unlock(&sdp->sd_freeze_mutex); 802 return error; 803 } 804 805 /** 806 * gfs2_thaw_super - reallow writes to the filesystem 807 * @sb: the VFS structure for the filesystem 808 * 809 */ 810 811 static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who) 812 { 813 struct gfs2_sbd *sdp = sb->s_fs_info; 814 int error; 815 816 if (!mutex_trylock(&sdp->sd_freeze_mutex)) 817 return -EBUSY; 818 error = -EINVAL; 819 if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags)) 820 goto out; 821 822 gfs2_freeze_unlock(&sdp->sd_freeze_gh); 823 824 error = gfs2_do_thaw(sdp); 825 826 if (!error) { 827 clear_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags); 828 clear_bit(SDF_FROZEN, &sdp->sd_flags); 829 } 830 out: 831 mutex_unlock(&sdp->sd_freeze_mutex); 832 return error; 833 } 834 835 void gfs2_thaw_freeze_initiator(struct super_block *sb) 836 { 837 struct gfs2_sbd *sdp = sb->s_fs_info; 838 839 mutex_lock(&sdp->sd_freeze_mutex); 840 if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags)) 841 goto out; 842 843 gfs2_freeze_unlock(&sdp->sd_freeze_gh); 844 845 out: 846 mutex_unlock(&sdp->sd_freeze_mutex); 847 } 848 849 /** 850 * statfs_slow_fill - fill in the sg for a given RG 851 * @rgd: the RG 852 * @sc: the sc structure 853 * 854 * Returns: 0 on success, -ESTALE if the LVB is invalid 855 */ 856 857 static int statfs_slow_fill(struct gfs2_rgrpd *rgd, 858 struct gfs2_statfs_change_host *sc) 859 { 860 gfs2_rgrp_verify(rgd); 861 sc->sc_total += rgd->rd_data; 862 sc->sc_free += rgd->rd_free; 863 sc->sc_dinodes += rgd->rd_dinodes; 864 return 0; 865 } 866 867 /** 868 * gfs2_statfs_slow - Stat a filesystem using asynchronous locking 869 * @sdp: the filesystem 870 * @sc: the sc info that will be returned 871 * 872 * Any error (other than a signal) will cause this routine to fall back 873 * to the synchronous version. 874 * 875 * FIXME: This really shouldn't busy wait like this. 876 * 877 * Returns: errno 878 */ 879 880 static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc) 881 { 882 struct gfs2_rgrpd *rgd_next; 883 struct gfs2_holder *gha, *gh; 884 unsigned int slots = 64; 885 unsigned int x; 886 int done; 887 int error = 0, err; 888 889 memset(sc, 0, sizeof(struct gfs2_statfs_change_host)); 890 gha = kmalloc_array(slots, sizeof(struct gfs2_holder), GFP_KERNEL); 891 if (!gha) 892 return -ENOMEM; 893 for (x = 0; x < slots; x++) 894 gfs2_holder_mark_uninitialized(gha + x); 895 896 rgd_next = gfs2_rgrpd_get_first(sdp); 897 898 for (;;) { 899 done = 1; 900 901 for (x = 0; x < slots; x++) { 902 gh = gha + x; 903 904 if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) { 905 err = gfs2_glock_wait(gh); 906 if (err) { 907 gfs2_holder_uninit(gh); 908 error = err; 909 } else { 910 if (!error) { 911 struct gfs2_rgrpd *rgd = 912 gfs2_glock2rgrp(gh->gh_gl); 913 914 error = statfs_slow_fill(rgd, sc); 915 } 916 gfs2_glock_dq_uninit(gh); 917 } 918 } 919 920 if (gfs2_holder_initialized(gh)) 921 done = 0; 922 else if (rgd_next && !error) { 923 error = gfs2_glock_nq_init(rgd_next->rd_gl, 924 LM_ST_SHARED, 925 GL_ASYNC, 926 gh); 927 rgd_next = gfs2_rgrpd_get_next(rgd_next); 928 done = 0; 929 } 930 931 if (signal_pending(current)) 932 error = -ERESTARTSYS; 933 } 934 935 if (done) 936 break; 937 938 yield(); 939 } 940 941 kfree(gha); 942 return error; 943 } 944 945 /** 946 * gfs2_statfs_i - Do a statfs 947 * @sdp: the filesystem 948 * @sc: the sc structure 949 * 950 * Returns: errno 951 */ 952 953 static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc) 954 { 955 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 956 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 957 958 spin_lock(&sdp->sd_statfs_spin); 959 960 *sc = *m_sc; 961 sc->sc_total += l_sc->sc_total; 962 sc->sc_free += l_sc->sc_free; 963 sc->sc_dinodes += l_sc->sc_dinodes; 964 965 spin_unlock(&sdp->sd_statfs_spin); 966 967 if (sc->sc_free < 0) 968 sc->sc_free = 0; 969 if (sc->sc_free > sc->sc_total) 970 sc->sc_free = sc->sc_total; 971 if (sc->sc_dinodes < 0) 972 sc->sc_dinodes = 0; 973 974 return 0; 975 } 976 977 /** 978 * gfs2_statfs - Gather and return stats about the filesystem 979 * @dentry: The name of the link 980 * @buf: The buffer 981 * 982 * Returns: 0 on success or error code 983 */ 984 985 static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf) 986 { 987 struct super_block *sb = dentry->d_sb; 988 struct gfs2_sbd *sdp = sb->s_fs_info; 989 struct gfs2_statfs_change_host sc; 990 int error; 991 992 error = gfs2_rindex_update(sdp); 993 if (error) 994 return error; 995 996 if (gfs2_tune_get(sdp, gt_statfs_slow)) 997 error = gfs2_statfs_slow(sdp, &sc); 998 else 999 error = gfs2_statfs_i(sdp, &sc); 1000 1001 if (error) 1002 return error; 1003 1004 buf->f_type = GFS2_MAGIC; 1005 buf->f_bsize = sdp->sd_sb.sb_bsize; 1006 buf->f_blocks = sc.sc_total; 1007 buf->f_bfree = sc.sc_free; 1008 buf->f_bavail = sc.sc_free; 1009 buf->f_files = sc.sc_dinodes + sc.sc_free; 1010 buf->f_ffree = sc.sc_free; 1011 buf->f_namelen = GFS2_FNAMESIZE; 1012 1013 return 0; 1014 } 1015 1016 /** 1017 * gfs2_drop_inode - Drop an inode (test for remote unlink) 1018 * @inode: The inode to drop 1019 * 1020 * If we've received a callback on an iopen lock then it's because a 1021 * remote node tried to deallocate the inode but failed due to this node 1022 * still having the inode open. Here we mark the link count zero 1023 * since we know that it must have reached zero if the GLF_DEMOTE flag 1024 * is set on the iopen glock. If we didn't do a disk read since the 1025 * remote node removed the final link then we might otherwise miss 1026 * this event. This check ensures that this node will deallocate the 1027 * inode's blocks, or alternatively pass the baton on to another 1028 * node for later deallocation. 1029 */ 1030 1031 static int gfs2_drop_inode(struct inode *inode) 1032 { 1033 struct gfs2_inode *ip = GFS2_I(inode); 1034 struct gfs2_sbd *sdp = GFS2_SB(inode); 1035 1036 if (inode->i_nlink && 1037 gfs2_holder_initialized(&ip->i_iopen_gh)) { 1038 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; 1039 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) 1040 clear_nlink(inode); 1041 } 1042 1043 /* 1044 * When under memory pressure when an inode's link count has dropped to 1045 * zero, defer deleting the inode to the delete workqueue. This avoids 1046 * calling into DLM under memory pressure, which can deadlock. 1047 */ 1048 if (!inode->i_nlink && 1049 unlikely(current->flags & PF_MEMALLOC) && 1050 gfs2_holder_initialized(&ip->i_iopen_gh)) { 1051 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; 1052 1053 gfs2_glock_hold(gl); 1054 if (!gfs2_queue_try_to_evict(gl)) 1055 gfs2_glock_queue_put(gl); 1056 return 0; 1057 } 1058 1059 /* 1060 * No longer cache inodes when trying to evict them all. 1061 */ 1062 if (test_bit(SDF_EVICTING, &sdp->sd_flags)) 1063 return 1; 1064 1065 return generic_drop_inode(inode); 1066 } 1067 1068 static int is_ancestor(const struct dentry *d1, const struct dentry *d2) 1069 { 1070 do { 1071 if (d1 == d2) 1072 return 1; 1073 d1 = d1->d_parent; 1074 } while (!IS_ROOT(d1)); 1075 return 0; 1076 } 1077 1078 /** 1079 * gfs2_show_options - Show mount options for /proc/mounts 1080 * @s: seq_file structure 1081 * @root: root of this (sub)tree 1082 * 1083 * Returns: 0 on success or error code 1084 */ 1085 1086 static int gfs2_show_options(struct seq_file *s, struct dentry *root) 1087 { 1088 struct gfs2_sbd *sdp = root->d_sb->s_fs_info; 1089 struct gfs2_args *args = &sdp->sd_args; 1090 unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum; 1091 1092 spin_lock(&sdp->sd_tune.gt_spin); 1093 logd_secs = sdp->sd_tune.gt_logd_secs; 1094 quota_quantum = sdp->sd_tune.gt_quota_quantum; 1095 statfs_quantum = sdp->sd_tune.gt_statfs_quantum; 1096 statfs_slow = sdp->sd_tune.gt_statfs_slow; 1097 spin_unlock(&sdp->sd_tune.gt_spin); 1098 1099 if (is_ancestor(root, sdp->sd_master_dir)) 1100 seq_puts(s, ",meta"); 1101 if (args->ar_lockproto[0]) 1102 seq_show_option(s, "lockproto", args->ar_lockproto); 1103 if (args->ar_locktable[0]) 1104 seq_show_option(s, "locktable", args->ar_locktable); 1105 if (args->ar_hostdata[0]) 1106 seq_show_option(s, "hostdata", args->ar_hostdata); 1107 if (args->ar_spectator) 1108 seq_puts(s, ",spectator"); 1109 if (args->ar_localflocks) 1110 seq_puts(s, ",localflocks"); 1111 if (args->ar_debug) 1112 seq_puts(s, ",debug"); 1113 if (args->ar_posix_acl) 1114 seq_puts(s, ",acl"); 1115 if (args->ar_quota != GFS2_QUOTA_DEFAULT) { 1116 char *state; 1117 switch (args->ar_quota) { 1118 case GFS2_QUOTA_OFF: 1119 state = "off"; 1120 break; 1121 case GFS2_QUOTA_ACCOUNT: 1122 state = "account"; 1123 break; 1124 case GFS2_QUOTA_ON: 1125 state = "on"; 1126 break; 1127 case GFS2_QUOTA_QUIET: 1128 state = "quiet"; 1129 break; 1130 default: 1131 state = "unknown"; 1132 break; 1133 } 1134 seq_printf(s, ",quota=%s", state); 1135 } 1136 if (args->ar_suiddir) 1137 seq_puts(s, ",suiddir"); 1138 if (args->ar_data != GFS2_DATA_DEFAULT) { 1139 char *state; 1140 switch (args->ar_data) { 1141 case GFS2_DATA_WRITEBACK: 1142 state = "writeback"; 1143 break; 1144 case GFS2_DATA_ORDERED: 1145 state = "ordered"; 1146 break; 1147 default: 1148 state = "unknown"; 1149 break; 1150 } 1151 seq_printf(s, ",data=%s", state); 1152 } 1153 if (args->ar_discard) 1154 seq_puts(s, ",discard"); 1155 if (logd_secs != 30) 1156 seq_printf(s, ",commit=%d", logd_secs); 1157 if (statfs_quantum != 30) 1158 seq_printf(s, ",statfs_quantum=%d", statfs_quantum); 1159 else if (statfs_slow) 1160 seq_puts(s, ",statfs_quantum=0"); 1161 if (quota_quantum != 60) 1162 seq_printf(s, ",quota_quantum=%d", quota_quantum); 1163 if (args->ar_statfs_percent) 1164 seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent); 1165 if (args->ar_errors != GFS2_ERRORS_DEFAULT) { 1166 const char *state; 1167 1168 switch (args->ar_errors) { 1169 case GFS2_ERRORS_WITHDRAW: 1170 state = "withdraw"; 1171 break; 1172 case GFS2_ERRORS_PANIC: 1173 state = "panic"; 1174 break; 1175 default: 1176 state = "unknown"; 1177 break; 1178 } 1179 seq_printf(s, ",errors=%s", state); 1180 } 1181 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) 1182 seq_puts(s, ",nobarrier"); 1183 if (test_bit(SDF_DEMOTE, &sdp->sd_flags)) 1184 seq_puts(s, ",demote_interface_used"); 1185 if (args->ar_rgrplvb) 1186 seq_puts(s, ",rgrplvb"); 1187 if (args->ar_loccookie) 1188 seq_puts(s, ",loccookie"); 1189 return 0; 1190 } 1191 1192 static void gfs2_final_release_pages(struct gfs2_inode *ip) 1193 { 1194 struct inode *inode = &ip->i_inode; 1195 struct gfs2_glock *gl = ip->i_gl; 1196 1197 if (unlikely(!gl)) { 1198 /* This can only happen during incomplete inode creation. */ 1199 BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags)); 1200 return; 1201 } 1202 1203 truncate_inode_pages(gfs2_glock2aspace(gl), 0); 1204 truncate_inode_pages(&inode->i_data, 0); 1205 1206 if (atomic_read(&gl->gl_revokes) == 0) { 1207 clear_bit(GLF_LFLUSH, &gl->gl_flags); 1208 clear_bit(GLF_DIRTY, &gl->gl_flags); 1209 } 1210 } 1211 1212 static int gfs2_dinode_dealloc(struct gfs2_inode *ip) 1213 { 1214 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1215 struct gfs2_rgrpd *rgd; 1216 struct gfs2_holder gh; 1217 int error; 1218 1219 if (gfs2_get_inode_blocks(&ip->i_inode) != 1) { 1220 gfs2_consist_inode(ip); 1221 return -EIO; 1222 } 1223 1224 gfs2_rindex_update(sdp); 1225 1226 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); 1227 if (error) 1228 return error; 1229 1230 rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1); 1231 if (!rgd) { 1232 gfs2_consist_inode(ip); 1233 error = -EIO; 1234 goto out_qs; 1235 } 1236 1237 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 1238 LM_FLAG_NODE_SCOPE, &gh); 1239 if (error) 1240 goto out_qs; 1241 1242 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1243 sdp->sd_jdesc->jd_blocks); 1244 if (error) 1245 goto out_rg_gunlock; 1246 1247 gfs2_free_di(rgd, ip); 1248 1249 gfs2_final_release_pages(ip); 1250 1251 gfs2_trans_end(sdp); 1252 1253 out_rg_gunlock: 1254 gfs2_glock_dq_uninit(&gh); 1255 out_qs: 1256 gfs2_quota_unhold(ip); 1257 return error; 1258 } 1259 1260 /** 1261 * gfs2_glock_put_eventually 1262 * @gl: The glock to put 1263 * 1264 * When under memory pressure, trigger a deferred glock put to make sure we 1265 * won't call into DLM and deadlock. Otherwise, put the glock directly. 1266 */ 1267 1268 static void gfs2_glock_put_eventually(struct gfs2_glock *gl) 1269 { 1270 if (current->flags & PF_MEMALLOC) 1271 gfs2_glock_queue_put(gl); 1272 else 1273 gfs2_glock_put(gl); 1274 } 1275 1276 static bool gfs2_upgrade_iopen_glock(struct inode *inode) 1277 { 1278 struct gfs2_inode *ip = GFS2_I(inode); 1279 struct gfs2_sbd *sdp = GFS2_SB(inode); 1280 struct gfs2_holder *gh = &ip->i_iopen_gh; 1281 long timeout = 5 * HZ; 1282 int error; 1283 1284 gh->gh_flags |= GL_NOCACHE; 1285 gfs2_glock_dq_wait(gh); 1286 1287 /* 1288 * If there are no other lock holders, we will immediately get 1289 * exclusive access to the iopen glock here. 1290 * 1291 * Otherwise, the other nodes holding the lock will be notified about 1292 * our locking request. If they do not have the inode open, they are 1293 * expected to evict the cached inode and release the lock, allowing us 1294 * to proceed. 1295 * 1296 * Otherwise, if they cannot evict the inode, they are expected to poke 1297 * the inode glock (note: not the iopen glock). We will notice that 1298 * and stop waiting for the iopen glock immediately. The other node(s) 1299 * are then expected to take care of deleting the inode when they no 1300 * longer use it. 1301 * 1302 * As a last resort, if another node keeps holding the iopen glock 1303 * without showing any activity on the inode glock, we will eventually 1304 * time out and fail the iopen glock upgrade. 1305 * 1306 * Note that we're passing the LM_FLAG_TRY_1CB flag to the first 1307 * locking request as an optimization to notify lock holders as soon as 1308 * possible. Without that flag, they'd be notified implicitly by the 1309 * second locking request. 1310 */ 1311 1312 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, gh); 1313 error = gfs2_glock_nq(gh); 1314 if (error != GLR_TRYFAILED) 1315 return !error; 1316 1317 gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh); 1318 error = gfs2_glock_nq(gh); 1319 if (error) 1320 return false; 1321 1322 timeout = wait_event_interruptible_timeout(sdp->sd_async_glock_wait, 1323 !test_bit(HIF_WAIT, &gh->gh_iflags) || 1324 test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags), 1325 timeout); 1326 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) { 1327 gfs2_glock_dq(gh); 1328 return false; 1329 } 1330 return gfs2_glock_holder_ready(gh) == 0; 1331 } 1332 1333 /** 1334 * evict_should_delete - determine whether the inode is eligible for deletion 1335 * @inode: The inode to evict 1336 * @gh: The glock holder structure 1337 * 1338 * This function determines whether the evicted inode is eligible to be deleted 1339 * and locks the inode glock. 1340 * 1341 * Returns: the fate of the dinode 1342 */ 1343 static enum dinode_demise evict_should_delete(struct inode *inode, 1344 struct gfs2_holder *gh) 1345 { 1346 struct gfs2_inode *ip = GFS2_I(inode); 1347 struct super_block *sb = inode->i_sb; 1348 struct gfs2_sbd *sdp = sb->s_fs_info; 1349 int ret; 1350 1351 if (unlikely(test_bit(GIF_ALLOC_FAILED, &ip->i_flags))) 1352 goto should_delete; 1353 1354 if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags)) 1355 return SHOULD_DEFER_EVICTION; 1356 1357 /* Deletes should never happen under memory pressure anymore. */ 1358 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC)) 1359 return SHOULD_DEFER_EVICTION; 1360 1361 /* Must not read inode block until block type has been verified */ 1362 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh); 1363 if (unlikely(ret)) { 1364 glock_clear_object(ip->i_iopen_gh.gh_gl, ip); 1365 ip->i_iopen_gh.gh_flags |= GL_NOCACHE; 1366 gfs2_glock_dq_uninit(&ip->i_iopen_gh); 1367 return SHOULD_DEFER_EVICTION; 1368 } 1369 1370 if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino)) 1371 return SHOULD_NOT_DELETE_DINODE; 1372 ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED); 1373 if (ret) 1374 return SHOULD_NOT_DELETE_DINODE; 1375 1376 ret = gfs2_instantiate(gh); 1377 if (ret) 1378 return SHOULD_NOT_DELETE_DINODE; 1379 1380 /* 1381 * The inode may have been recreated in the meantime. 1382 */ 1383 if (inode->i_nlink) 1384 return SHOULD_NOT_DELETE_DINODE; 1385 1386 should_delete: 1387 if (gfs2_holder_initialized(&ip->i_iopen_gh) && 1388 test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) { 1389 if (!gfs2_upgrade_iopen_glock(inode)) { 1390 gfs2_holder_uninit(&ip->i_iopen_gh); 1391 return SHOULD_NOT_DELETE_DINODE; 1392 } 1393 } 1394 return SHOULD_DELETE_DINODE; 1395 } 1396 1397 /** 1398 * evict_unlinked_inode - delete the pieces of an unlinked evicted inode 1399 * @inode: The inode to evict 1400 */ 1401 static int evict_unlinked_inode(struct inode *inode) 1402 { 1403 struct gfs2_inode *ip = GFS2_I(inode); 1404 int ret; 1405 1406 if (S_ISDIR(inode->i_mode) && 1407 (ip->i_diskflags & GFS2_DIF_EXHASH)) { 1408 ret = gfs2_dir_exhash_dealloc(ip); 1409 if (ret) 1410 goto out; 1411 } 1412 1413 if (ip->i_eattr) { 1414 ret = gfs2_ea_dealloc(ip); 1415 if (ret) 1416 goto out; 1417 } 1418 1419 if (!gfs2_is_stuffed(ip)) { 1420 ret = gfs2_file_dealloc(ip); 1421 if (ret) 1422 goto out; 1423 } 1424 1425 /* 1426 * As soon as we clear the bitmap for the dinode, gfs2_create_inode() 1427 * can get called to recreate it, or even gfs2_inode_lookup() if the 1428 * inode was recreated on another node in the meantime. 1429 * 1430 * However, inserting the new inode into the inode hash table will not 1431 * succeed until the old inode is removed, and that only happens after 1432 * ->evict_inode() returns. The new inode is attached to its inode and 1433 * iopen glocks after inserting it into the inode hash table, so at 1434 * that point we can be sure that both glocks are unused. 1435 */ 1436 1437 ret = gfs2_dinode_dealloc(ip); 1438 if (!ret && ip->i_gl) 1439 gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino); 1440 1441 out: 1442 return ret; 1443 } 1444 1445 /* 1446 * evict_linked_inode - evict an inode whose dinode has not been unlinked 1447 * @inode: The inode to evict 1448 */ 1449 static int evict_linked_inode(struct inode *inode) 1450 { 1451 struct super_block *sb = inode->i_sb; 1452 struct gfs2_sbd *sdp = sb->s_fs_info; 1453 struct gfs2_inode *ip = GFS2_I(inode); 1454 struct address_space *metamapping; 1455 int ret; 1456 1457 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL | 1458 GFS2_LFC_EVICT_INODE); 1459 metamapping = gfs2_glock2aspace(ip->i_gl); 1460 if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) { 1461 filemap_fdatawrite(metamapping); 1462 filemap_fdatawait(metamapping); 1463 } 1464 write_inode_now(inode, 1); 1465 gfs2_ail_flush(ip->i_gl, 0); 1466 1467 ret = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks); 1468 if (ret) 1469 return ret; 1470 1471 /* Needs to be done before glock release & also in a transaction */ 1472 truncate_inode_pages(&inode->i_data, 0); 1473 truncate_inode_pages(metamapping, 0); 1474 gfs2_trans_end(sdp); 1475 return 0; 1476 } 1477 1478 /** 1479 * gfs2_evict_inode - Remove an inode from cache 1480 * @inode: The inode to evict 1481 * 1482 * There are three cases to consider: 1483 * 1. i_nlink == 0, we are final opener (and must deallocate) 1484 * 2. i_nlink == 0, we are not the final opener (and cannot deallocate) 1485 * 3. i_nlink > 0 1486 * 1487 * If the fs is read only, then we have to treat all cases as per #3 1488 * since we are unable to do any deallocation. The inode will be 1489 * deallocated by the next read/write node to attempt an allocation 1490 * in the same resource group 1491 * 1492 * We have to (at the moment) hold the inodes main lock to cover 1493 * the gap between unlocking the shared lock on the iopen lock and 1494 * taking the exclusive lock. I'd rather do a shared -> exclusive 1495 * conversion on the iopen lock, but we can change that later. This 1496 * is safe, just less efficient. 1497 */ 1498 1499 static void gfs2_evict_inode(struct inode *inode) 1500 { 1501 struct super_block *sb = inode->i_sb; 1502 struct gfs2_sbd *sdp = sb->s_fs_info; 1503 struct gfs2_inode *ip = GFS2_I(inode); 1504 struct gfs2_holder gh; 1505 int ret; 1506 1507 if (inode->i_nlink || sb_rdonly(sb) || !ip->i_no_addr) 1508 goto out; 1509 1510 /* 1511 * In case of an incomplete mount, gfs2_evict_inode() may be called for 1512 * system files without having an active journal to write to. In that 1513 * case, skip the filesystem evict. 1514 */ 1515 if (!sdp->sd_jdesc) 1516 goto out; 1517 1518 gfs2_holder_mark_uninitialized(&gh); 1519 ret = evict_should_delete(inode, &gh); 1520 if (ret == SHOULD_DEFER_EVICTION) 1521 goto out; 1522 if (ret == SHOULD_DELETE_DINODE) 1523 ret = evict_unlinked_inode(inode); 1524 else 1525 ret = evict_linked_inode(inode); 1526 1527 if (gfs2_rs_active(&ip->i_res)) 1528 gfs2_rs_deltree(&ip->i_res); 1529 1530 if (gfs2_holder_initialized(&gh)) 1531 gfs2_glock_dq_uninit(&gh); 1532 if (ret && ret != GLR_TRYFAILED && ret != -EROFS) 1533 fs_warn(sdp, "gfs2_evict_inode: %d\n", ret); 1534 out: 1535 truncate_inode_pages_final(&inode->i_data); 1536 if (ip->i_qadata) 1537 gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0); 1538 gfs2_rs_deltree(&ip->i_res); 1539 gfs2_ordered_del_inode(ip); 1540 clear_inode(inode); 1541 gfs2_dir_hash_inval(ip); 1542 if (gfs2_holder_initialized(&ip->i_iopen_gh)) { 1543 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; 1544 1545 glock_clear_object(gl, ip); 1546 gfs2_glock_hold(gl); 1547 ip->i_iopen_gh.gh_flags |= GL_NOCACHE; 1548 gfs2_glock_dq_uninit(&ip->i_iopen_gh); 1549 gfs2_glock_put_eventually(gl); 1550 } 1551 if (ip->i_gl) { 1552 glock_clear_object(ip->i_gl, ip); 1553 wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE); 1554 gfs2_glock_add_to_lru(ip->i_gl); 1555 gfs2_glock_put_eventually(ip->i_gl); 1556 rcu_assign_pointer(ip->i_gl, NULL); 1557 } 1558 } 1559 1560 static struct inode *gfs2_alloc_inode(struct super_block *sb) 1561 { 1562 struct gfs2_inode *ip; 1563 1564 ip = alloc_inode_sb(sb, gfs2_inode_cachep, GFP_KERNEL); 1565 if (!ip) 1566 return NULL; 1567 ip->i_no_addr = 0; 1568 ip->i_flags = 0; 1569 ip->i_gl = NULL; 1570 gfs2_holder_mark_uninitialized(&ip->i_iopen_gh); 1571 memset(&ip->i_res, 0, sizeof(ip->i_res)); 1572 RB_CLEAR_NODE(&ip->i_res.rs_node); 1573 ip->i_rahead = 0; 1574 return &ip->i_inode; 1575 } 1576 1577 static void gfs2_free_inode(struct inode *inode) 1578 { 1579 kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode)); 1580 } 1581 1582 void free_local_statfs_inodes(struct gfs2_sbd *sdp) 1583 { 1584 struct local_statfs_inode *lsi, *safe; 1585 1586 /* Run through the statfs inodes list to iput and free memory */ 1587 list_for_each_entry_safe(lsi, safe, &sdp->sd_sc_inodes_list, si_list) { 1588 if (lsi->si_jid == sdp->sd_jdesc->jd_jid) 1589 sdp->sd_sc_inode = NULL; /* belongs to this node */ 1590 if (lsi->si_sc_inode) 1591 iput(lsi->si_sc_inode); 1592 list_del(&lsi->si_list); 1593 kfree(lsi); 1594 } 1595 } 1596 1597 struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp, 1598 unsigned int index) 1599 { 1600 struct local_statfs_inode *lsi; 1601 1602 /* Return the local (per node) statfs inode in the 1603 * sdp->sd_sc_inodes_list corresponding to the 'index'. */ 1604 list_for_each_entry(lsi, &sdp->sd_sc_inodes_list, si_list) { 1605 if (lsi->si_jid == index) 1606 return lsi->si_sc_inode; 1607 } 1608 return NULL; 1609 } 1610 1611 const struct super_operations gfs2_super_ops = { 1612 .alloc_inode = gfs2_alloc_inode, 1613 .free_inode = gfs2_free_inode, 1614 .write_inode = gfs2_write_inode, 1615 .dirty_inode = gfs2_dirty_inode, 1616 .evict_inode = gfs2_evict_inode, 1617 .put_super = gfs2_put_super, 1618 .sync_fs = gfs2_sync_fs, 1619 .freeze_super = gfs2_freeze_super, 1620 .thaw_super = gfs2_thaw_super, 1621 .statfs = gfs2_statfs, 1622 .drop_inode = gfs2_drop_inode, 1623 .show_options = gfs2_show_options, 1624 }; 1625 1626