1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/bio.h> 10 #include <linux/sched/signal.h> 11 #include <linux/slab.h> 12 #include <linux/spinlock.h> 13 #include <linux/completion.h> 14 #include <linux/buffer_head.h> 15 #include <linux/statfs.h> 16 #include <linux/seq_file.h> 17 #include <linux/mount.h> 18 #include <linux/kthread.h> 19 #include <linux/delay.h> 20 #include <linux/gfs2_ondisk.h> 21 #include <linux/crc32.h> 22 #include <linux/time.h> 23 #include <linux/wait.h> 24 #include <linux/writeback.h> 25 #include <linux/backing-dev.h> 26 #include <linux/kernel.h> 27 28 #include "gfs2.h" 29 #include "incore.h" 30 #include "bmap.h" 31 #include "dir.h" 32 #include "glock.h" 33 #include "glops.h" 34 #include "inode.h" 35 #include "log.h" 36 #include "meta_io.h" 37 #include "quota.h" 38 #include "recovery.h" 39 #include "rgrp.h" 40 #include "super.h" 41 #include "trans.h" 42 #include "util.h" 43 #include "sys.h" 44 #include "xattr.h" 45 #include "lops.h" 46 47 enum dinode_demise { 48 SHOULD_DELETE_DINODE, 49 SHOULD_NOT_DELETE_DINODE, 50 SHOULD_DEFER_EVICTION, 51 }; 52 53 /** 54 * gfs2_jindex_free - Clear all the journal index information 55 * @sdp: The GFS2 superblock 56 * 57 */ 58 59 void gfs2_jindex_free(struct gfs2_sbd *sdp) 60 { 61 struct list_head list; 62 struct gfs2_jdesc *jd; 63 64 spin_lock(&sdp->sd_jindex_spin); 65 list_add(&list, &sdp->sd_jindex_list); 66 list_del_init(&sdp->sd_jindex_list); 67 sdp->sd_journals = 0; 68 spin_unlock(&sdp->sd_jindex_spin); 69 70 sdp->sd_jdesc = NULL; 71 while (!list_empty(&list)) { 72 jd = list_first_entry(&list, struct gfs2_jdesc, jd_list); 73 gfs2_free_journal_extents(jd); 74 list_del(&jd->jd_list); 75 iput(jd->jd_inode); 76 jd->jd_inode = NULL; 77 kfree(jd); 78 } 79 } 80 81 static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid) 82 { 83 struct gfs2_jdesc *jd; 84 85 list_for_each_entry(jd, head, jd_list) { 86 if (jd->jd_jid == jid) 87 return jd; 88 } 89 return NULL; 90 } 91 92 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid) 93 { 94 struct gfs2_jdesc *jd; 95 96 spin_lock(&sdp->sd_jindex_spin); 97 jd = jdesc_find_i(&sdp->sd_jindex_list, jid); 98 spin_unlock(&sdp->sd_jindex_spin); 99 100 return jd; 101 } 102 103 int gfs2_jdesc_check(struct gfs2_jdesc *jd) 104 { 105 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 106 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 107 u64 size = i_size_read(jd->jd_inode); 108 109 if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30))) 110 return -EIO; 111 112 jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift; 113 114 if (gfs2_write_alloc_required(ip, 0, size)) { 115 gfs2_consist_inode(ip); 116 return -EIO; 117 } 118 119 return 0; 120 } 121 122 static int init_threads(struct gfs2_sbd *sdp) 123 { 124 struct task_struct *p; 125 int error = 0; 126 127 p = kthread_run(gfs2_logd, sdp, "gfs2_logd"); 128 if (IS_ERR(p)) { 129 error = PTR_ERR(p); 130 fs_err(sdp, "can't start logd thread: %d\n", error); 131 return error; 132 } 133 sdp->sd_logd_process = p; 134 135 p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad"); 136 if (IS_ERR(p)) { 137 error = PTR_ERR(p); 138 fs_err(sdp, "can't start quotad thread: %d\n", error); 139 goto fail; 140 } 141 sdp->sd_quotad_process = p; 142 return 0; 143 144 fail: 145 kthread_stop(sdp->sd_logd_process); 146 sdp->sd_logd_process = NULL; 147 return error; 148 } 149 150 /** 151 * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one 152 * @sdp: the filesystem 153 * 154 * Returns: errno 155 */ 156 157 int gfs2_make_fs_rw(struct gfs2_sbd *sdp) 158 { 159 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 160 struct gfs2_glock *j_gl = ip->i_gl; 161 struct gfs2_holder freeze_gh; 162 struct gfs2_log_header_host head; 163 int error; 164 165 error = init_threads(sdp); 166 if (error) 167 return error; 168 169 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 170 LM_FLAG_NOEXP | GL_EXACT, 171 &freeze_gh); 172 if (error) 173 goto fail_threads; 174 175 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); 176 if (gfs2_withdrawn(sdp)) { 177 error = -EIO; 178 goto fail; 179 } 180 181 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false); 182 if (error || gfs2_withdrawn(sdp)) 183 goto fail; 184 185 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { 186 gfs2_consist(sdp); 187 error = -EIO; 188 goto fail; 189 } 190 191 /* Initialize some head of the log stuff */ 192 sdp->sd_log_sequence = head.lh_sequence + 1; 193 gfs2_log_pointers_init(sdp, head.lh_blkno); 194 195 error = gfs2_quota_init(sdp); 196 if (error || gfs2_withdrawn(sdp)) 197 goto fail; 198 199 set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); 200 201 gfs2_glock_dq_uninit(&freeze_gh); 202 203 return 0; 204 205 fail: 206 gfs2_glock_dq_uninit(&freeze_gh); 207 fail_threads: 208 if (sdp->sd_quotad_process) 209 kthread_stop(sdp->sd_quotad_process); 210 sdp->sd_quotad_process = NULL; 211 if (sdp->sd_logd_process) 212 kthread_stop(sdp->sd_logd_process); 213 sdp->sd_logd_process = NULL; 214 return error; 215 } 216 217 void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf) 218 { 219 const struct gfs2_statfs_change *str = buf; 220 221 sc->sc_total = be64_to_cpu(str->sc_total); 222 sc->sc_free = be64_to_cpu(str->sc_free); 223 sc->sc_dinodes = be64_to_cpu(str->sc_dinodes); 224 } 225 226 void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf) 227 { 228 struct gfs2_statfs_change *str = buf; 229 230 str->sc_total = cpu_to_be64(sc->sc_total); 231 str->sc_free = cpu_to_be64(sc->sc_free); 232 str->sc_dinodes = cpu_to_be64(sc->sc_dinodes); 233 } 234 235 int gfs2_statfs_init(struct gfs2_sbd *sdp) 236 { 237 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 238 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 239 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 240 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 241 struct buffer_head *m_bh, *l_bh; 242 struct gfs2_holder gh; 243 int error; 244 245 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, 246 &gh); 247 if (error) 248 return error; 249 250 error = gfs2_meta_inode_buffer(m_ip, &m_bh); 251 if (error) 252 goto out; 253 254 if (sdp->sd_args.ar_spectator) { 255 spin_lock(&sdp->sd_statfs_spin); 256 gfs2_statfs_change_in(m_sc, m_bh->b_data + 257 sizeof(struct gfs2_dinode)); 258 spin_unlock(&sdp->sd_statfs_spin); 259 } else { 260 error = gfs2_meta_inode_buffer(l_ip, &l_bh); 261 if (error) 262 goto out_m_bh; 263 264 spin_lock(&sdp->sd_statfs_spin); 265 gfs2_statfs_change_in(m_sc, m_bh->b_data + 266 sizeof(struct gfs2_dinode)); 267 gfs2_statfs_change_in(l_sc, l_bh->b_data + 268 sizeof(struct gfs2_dinode)); 269 spin_unlock(&sdp->sd_statfs_spin); 270 271 brelse(l_bh); 272 } 273 274 out_m_bh: 275 brelse(m_bh); 276 out: 277 gfs2_glock_dq_uninit(&gh); 278 return 0; 279 } 280 281 void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free, 282 s64 dinodes) 283 { 284 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 285 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 286 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 287 struct buffer_head *l_bh; 288 s64 x, y; 289 int need_sync = 0; 290 int error; 291 292 error = gfs2_meta_inode_buffer(l_ip, &l_bh); 293 if (error) 294 return; 295 296 gfs2_trans_add_meta(l_ip->i_gl, l_bh); 297 298 spin_lock(&sdp->sd_statfs_spin); 299 l_sc->sc_total += total; 300 l_sc->sc_free += free; 301 l_sc->sc_dinodes += dinodes; 302 gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode)); 303 if (sdp->sd_args.ar_statfs_percent) { 304 x = 100 * l_sc->sc_free; 305 y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent; 306 if (x >= y || x <= -y) 307 need_sync = 1; 308 } 309 spin_unlock(&sdp->sd_statfs_spin); 310 311 brelse(l_bh); 312 if (need_sync) 313 gfs2_wake_up_statfs(sdp); 314 } 315 316 void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh, 317 struct buffer_head *l_bh) 318 { 319 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 320 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 321 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 322 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 323 324 gfs2_trans_add_meta(l_ip->i_gl, l_bh); 325 gfs2_trans_add_meta(m_ip->i_gl, m_bh); 326 327 spin_lock(&sdp->sd_statfs_spin); 328 m_sc->sc_total += l_sc->sc_total; 329 m_sc->sc_free += l_sc->sc_free; 330 m_sc->sc_dinodes += l_sc->sc_dinodes; 331 memset(l_sc, 0, sizeof(struct gfs2_statfs_change)); 332 memset(l_bh->b_data + sizeof(struct gfs2_dinode), 333 0, sizeof(struct gfs2_statfs_change)); 334 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); 335 spin_unlock(&sdp->sd_statfs_spin); 336 } 337 338 int gfs2_statfs_sync(struct super_block *sb, int type) 339 { 340 struct gfs2_sbd *sdp = sb->s_fs_info; 341 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 342 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 343 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 344 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 345 struct gfs2_holder gh; 346 struct buffer_head *m_bh, *l_bh; 347 int error; 348 349 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, 350 &gh); 351 if (error) 352 goto out; 353 354 error = gfs2_meta_inode_buffer(m_ip, &m_bh); 355 if (error) 356 goto out_unlock; 357 358 spin_lock(&sdp->sd_statfs_spin); 359 gfs2_statfs_change_in(m_sc, m_bh->b_data + 360 sizeof(struct gfs2_dinode)); 361 if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) { 362 spin_unlock(&sdp->sd_statfs_spin); 363 goto out_bh; 364 } 365 spin_unlock(&sdp->sd_statfs_spin); 366 367 error = gfs2_meta_inode_buffer(l_ip, &l_bh); 368 if (error) 369 goto out_bh; 370 371 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0); 372 if (error) 373 goto out_bh2; 374 375 update_statfs(sdp, m_bh, l_bh); 376 sdp->sd_statfs_force_sync = 0; 377 378 gfs2_trans_end(sdp); 379 380 out_bh2: 381 brelse(l_bh); 382 out_bh: 383 brelse(m_bh); 384 out_unlock: 385 gfs2_glock_dq_uninit(&gh); 386 out: 387 return error; 388 } 389 390 struct lfcc { 391 struct list_head list; 392 struct gfs2_holder gh; 393 }; 394 395 /** 396 * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all 397 * journals are clean 398 * @sdp: the file system 399 * @state: the state to put the transaction lock into 400 * @t_gh: the hold on the transaction lock 401 * 402 * Returns: errno 403 */ 404 405 static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp) 406 { 407 struct gfs2_inode *ip; 408 struct gfs2_jdesc *jd; 409 struct lfcc *lfcc; 410 LIST_HEAD(list); 411 struct gfs2_log_header_host lh; 412 int error; 413 414 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { 415 lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL); 416 if (!lfcc) { 417 error = -ENOMEM; 418 goto out; 419 } 420 ip = GFS2_I(jd->jd_inode); 421 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh); 422 if (error) { 423 kfree(lfcc); 424 goto out; 425 } 426 list_add(&lfcc->list, &list); 427 } 428 429 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE, 430 LM_FLAG_NOEXP, &sdp->sd_freeze_gh); 431 if (error) 432 goto out; 433 434 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { 435 error = gfs2_jdesc_check(jd); 436 if (error) 437 break; 438 error = gfs2_find_jhead(jd, &lh, false); 439 if (error) 440 break; 441 if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { 442 error = -EBUSY; 443 break; 444 } 445 } 446 447 if (error) 448 gfs2_glock_dq_uninit(&sdp->sd_freeze_gh); 449 450 out: 451 while (!list_empty(&list)) { 452 lfcc = list_first_entry(&list, struct lfcc, list); 453 list_del(&lfcc->list); 454 gfs2_glock_dq_uninit(&lfcc->gh); 455 kfree(lfcc); 456 } 457 return error; 458 } 459 460 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf) 461 { 462 struct gfs2_dinode *str = buf; 463 464 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 465 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI); 466 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI); 467 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr); 468 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino); 469 str->di_mode = cpu_to_be32(ip->i_inode.i_mode); 470 str->di_uid = cpu_to_be32(i_uid_read(&ip->i_inode)); 471 str->di_gid = cpu_to_be32(i_gid_read(&ip->i_inode)); 472 str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink); 473 str->di_size = cpu_to_be64(i_size_read(&ip->i_inode)); 474 str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); 475 str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec); 476 str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec); 477 str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec); 478 479 str->di_goal_meta = cpu_to_be64(ip->i_goal); 480 str->di_goal_data = cpu_to_be64(ip->i_goal); 481 str->di_generation = cpu_to_be64(ip->i_generation); 482 483 str->di_flags = cpu_to_be32(ip->i_diskflags); 484 str->di_height = cpu_to_be16(ip->i_height); 485 str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) && 486 !(ip->i_diskflags & GFS2_DIF_EXHASH) ? 487 GFS2_FORMAT_DE : 0); 488 str->di_depth = cpu_to_be16(ip->i_depth); 489 str->di_entries = cpu_to_be32(ip->i_entries); 490 491 str->di_eattr = cpu_to_be64(ip->i_eattr); 492 str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec); 493 str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec); 494 str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec); 495 } 496 497 /** 498 * gfs2_write_inode - Make sure the inode is stable on the disk 499 * @inode: The inode 500 * @wbc: The writeback control structure 501 * 502 * Returns: errno 503 */ 504 505 static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc) 506 { 507 struct gfs2_inode *ip = GFS2_I(inode); 508 struct gfs2_sbd *sdp = GFS2_SB(inode); 509 struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl); 510 struct backing_dev_info *bdi = inode_to_bdi(metamapping->host); 511 int ret = 0; 512 bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip)); 513 514 if (flush_all) 515 gfs2_log_flush(GFS2_SB(inode), ip->i_gl, 516 GFS2_LOG_HEAD_FLUSH_NORMAL | 517 GFS2_LFC_WRITE_INODE); 518 if (bdi->wb.dirty_exceeded) 519 gfs2_ail1_flush(sdp, wbc); 520 else 521 filemap_fdatawrite(metamapping); 522 if (flush_all) 523 ret = filemap_fdatawait(metamapping); 524 if (ret) 525 mark_inode_dirty_sync(inode); 526 else { 527 spin_lock(&inode->i_lock); 528 if (!(inode->i_flags & I_DIRTY)) 529 gfs2_ordered_del_inode(ip); 530 spin_unlock(&inode->i_lock); 531 } 532 return ret; 533 } 534 535 /** 536 * gfs2_dirty_inode - check for atime updates 537 * @inode: The inode in question 538 * @flags: The type of dirty 539 * 540 * Unfortunately it can be called under any combination of inode 541 * glock and transaction lock, so we have to check carefully. 542 * 543 * At the moment this deals only with atime - it should be possible 544 * to expand that role in future, once a review of the locking has 545 * been carried out. 546 */ 547 548 static void gfs2_dirty_inode(struct inode *inode, int flags) 549 { 550 struct gfs2_inode *ip = GFS2_I(inode); 551 struct gfs2_sbd *sdp = GFS2_SB(inode); 552 struct buffer_head *bh; 553 struct gfs2_holder gh; 554 int need_unlock = 0; 555 int need_endtrans = 0; 556 int ret; 557 558 if (!(flags & I_DIRTY_INODE)) 559 return; 560 if (unlikely(gfs2_withdrawn(sdp))) 561 return; 562 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { 563 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 564 if (ret) { 565 fs_err(sdp, "dirty_inode: glock %d\n", ret); 566 gfs2_dump_glock(NULL, ip->i_gl, true); 567 return; 568 } 569 need_unlock = 1; 570 } else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE)) 571 return; 572 573 if (current->journal_info == NULL) { 574 ret = gfs2_trans_begin(sdp, RES_DINODE, 0); 575 if (ret) { 576 fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret); 577 goto out; 578 } 579 need_endtrans = 1; 580 } 581 582 ret = gfs2_meta_inode_buffer(ip, &bh); 583 if (ret == 0) { 584 gfs2_trans_add_meta(ip->i_gl, bh); 585 gfs2_dinode_out(ip, bh->b_data); 586 brelse(bh); 587 } 588 589 if (need_endtrans) 590 gfs2_trans_end(sdp); 591 out: 592 if (need_unlock) 593 gfs2_glock_dq_uninit(&gh); 594 } 595 596 /** 597 * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one 598 * @sdp: the filesystem 599 * 600 * Returns: errno 601 */ 602 603 int gfs2_make_fs_ro(struct gfs2_sbd *sdp) 604 { 605 struct gfs2_holder freeze_gh; 606 int error = 0; 607 int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); 608 609 gfs2_holder_mark_uninitialized(&freeze_gh); 610 if (sdp->sd_freeze_gl && 611 !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) { 612 if (!log_write_allowed) { 613 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, 614 LM_ST_SHARED, LM_FLAG_TRY | 615 LM_FLAG_NOEXP | GL_EXACT, 616 &freeze_gh); 617 if (error == GLR_TRYFAILED) 618 error = 0; 619 } else { 620 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, 621 LM_ST_SHARED, 622 LM_FLAG_NOEXP | GL_EXACT, 623 &freeze_gh); 624 if (error && !gfs2_withdrawn(sdp)) 625 return error; 626 } 627 } 628 629 gfs2_flush_delete_work(sdp); 630 if (!log_write_allowed && current == sdp->sd_quotad_process) 631 fs_warn(sdp, "The quotad daemon is withdrawing.\n"); 632 else if (sdp->sd_quotad_process) 633 kthread_stop(sdp->sd_quotad_process); 634 sdp->sd_quotad_process = NULL; 635 636 if (!log_write_allowed && current == sdp->sd_logd_process) 637 fs_warn(sdp, "The logd daemon is withdrawing.\n"); 638 else if (sdp->sd_logd_process) 639 kthread_stop(sdp->sd_logd_process); 640 sdp->sd_logd_process = NULL; 641 642 if (log_write_allowed) { 643 gfs2_quota_sync(sdp->sd_vfs, 0); 644 gfs2_statfs_sync(sdp->sd_vfs, 0); 645 646 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN | 647 GFS2_LFC_MAKE_FS_RO); 648 wait_event(sdp->sd_reserving_log_wait, 649 atomic_read(&sdp->sd_reserving_log) == 0); 650 gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == 651 sdp->sd_jdesc->jd_blocks); 652 } else { 653 wait_event_timeout(sdp->sd_reserving_log_wait, 654 atomic_read(&sdp->sd_reserving_log) == 0, 655 HZ * 5); 656 } 657 if (gfs2_holder_initialized(&freeze_gh)) 658 gfs2_glock_dq_uninit(&freeze_gh); 659 660 gfs2_quota_cleanup(sdp); 661 662 if (!log_write_allowed) 663 sdp->sd_vfs->s_flags |= SB_RDONLY; 664 665 return error; 666 } 667 668 /** 669 * gfs2_put_super - Unmount the filesystem 670 * @sb: The VFS superblock 671 * 672 */ 673 674 static void gfs2_put_super(struct super_block *sb) 675 { 676 struct gfs2_sbd *sdp = sb->s_fs_info; 677 int error; 678 struct gfs2_jdesc *jd; 679 680 /* No more recovery requests */ 681 set_bit(SDF_NORECOVERY, &sdp->sd_flags); 682 smp_mb(); 683 684 /* Wait on outstanding recovery */ 685 restart: 686 spin_lock(&sdp->sd_jindex_spin); 687 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { 688 if (!test_bit(JDF_RECOVERY, &jd->jd_flags)) 689 continue; 690 spin_unlock(&sdp->sd_jindex_spin); 691 wait_on_bit(&jd->jd_flags, JDF_RECOVERY, 692 TASK_UNINTERRUPTIBLE); 693 goto restart; 694 } 695 spin_unlock(&sdp->sd_jindex_spin); 696 697 if (!sb_rdonly(sb)) { 698 error = gfs2_make_fs_ro(sdp); 699 if (error) 700 gfs2_io_error(sdp); 701 } 702 WARN_ON(gfs2_withdrawing(sdp)); 703 704 /* At this point, we're through modifying the disk */ 705 706 /* Release stuff */ 707 708 iput(sdp->sd_jindex); 709 iput(sdp->sd_statfs_inode); 710 iput(sdp->sd_rindex); 711 iput(sdp->sd_quota_inode); 712 713 gfs2_glock_put(sdp->sd_rename_gl); 714 gfs2_glock_put(sdp->sd_freeze_gl); 715 716 if (!sdp->sd_args.ar_spectator) { 717 if (gfs2_holder_initialized(&sdp->sd_journal_gh)) 718 gfs2_glock_dq_uninit(&sdp->sd_journal_gh); 719 if (gfs2_holder_initialized(&sdp->sd_jinode_gh)) 720 gfs2_glock_dq_uninit(&sdp->sd_jinode_gh); 721 gfs2_glock_dq_uninit(&sdp->sd_sc_gh); 722 gfs2_glock_dq_uninit(&sdp->sd_qc_gh); 723 free_local_statfs_inodes(sdp); 724 iput(sdp->sd_qc_inode); 725 } 726 727 gfs2_glock_dq_uninit(&sdp->sd_live_gh); 728 gfs2_clear_rgrpd(sdp); 729 gfs2_jindex_free(sdp); 730 /* Take apart glock structures and buffer lists */ 731 gfs2_gl_hash_clear(sdp); 732 truncate_inode_pages_final(&sdp->sd_aspace); 733 gfs2_delete_debugfs_file(sdp); 734 /* Unmount the locking protocol */ 735 gfs2_lm_unmount(sdp); 736 737 /* At this point, we're through participating in the lockspace */ 738 gfs2_sys_fs_del(sdp); 739 free_sbd(sdp); 740 } 741 742 /** 743 * gfs2_sync_fs - sync the filesystem 744 * @sb: the superblock 745 * 746 * Flushes the log to disk. 747 */ 748 749 static int gfs2_sync_fs(struct super_block *sb, int wait) 750 { 751 struct gfs2_sbd *sdp = sb->s_fs_info; 752 753 gfs2_quota_sync(sb, -1); 754 if (wait) 755 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | 756 GFS2_LFC_SYNC_FS); 757 return sdp->sd_log_error; 758 } 759 760 void gfs2_freeze_func(struct work_struct *work) 761 { 762 int error; 763 struct gfs2_holder freeze_gh; 764 struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work); 765 struct super_block *sb = sdp->sd_vfs; 766 767 atomic_inc(&sb->s_active); 768 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 769 LM_FLAG_NOEXP | GL_EXACT, &freeze_gh); 770 if (error) { 771 fs_info(sdp, "GFS2: couldn't get freeze lock : %d\n", error); 772 gfs2_assert_withdraw(sdp, 0); 773 } else { 774 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN); 775 error = thaw_super(sb); 776 if (error) { 777 fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n", 778 error); 779 gfs2_assert_withdraw(sdp, 0); 780 } 781 gfs2_glock_dq_uninit(&freeze_gh); 782 } 783 deactivate_super(sb); 784 clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags); 785 wake_up_bit(&sdp->sd_flags, SDF_FS_FROZEN); 786 return; 787 } 788 789 /** 790 * gfs2_freeze - prevent further writes to the filesystem 791 * @sb: the VFS structure for the filesystem 792 * 793 */ 794 795 static int gfs2_freeze(struct super_block *sb) 796 { 797 struct gfs2_sbd *sdp = sb->s_fs_info; 798 int error = 0; 799 800 mutex_lock(&sdp->sd_freeze_mutex); 801 if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) 802 goto out; 803 804 for (;;) { 805 if (gfs2_withdrawn(sdp)) { 806 error = -EINVAL; 807 goto out; 808 } 809 810 error = gfs2_lock_fs_check_clean(sdp); 811 if (!error) 812 break; 813 814 if (error == -EBUSY) 815 fs_err(sdp, "waiting for recovery before freeze\n"); 816 else if (error == -EIO) { 817 fs_err(sdp, "Fatal IO error: cannot freeze gfs2 due " 818 "to recovery error.\n"); 819 goto out; 820 } else { 821 fs_err(sdp, "error freezing FS: %d\n", error); 822 } 823 fs_err(sdp, "retrying...\n"); 824 msleep(1000); 825 } 826 set_bit(SDF_FS_FROZEN, &sdp->sd_flags); 827 out: 828 mutex_unlock(&sdp->sd_freeze_mutex); 829 return error; 830 } 831 832 /** 833 * gfs2_unfreeze - reallow writes to the filesystem 834 * @sb: the VFS structure for the filesystem 835 * 836 */ 837 838 static int gfs2_unfreeze(struct super_block *sb) 839 { 840 struct gfs2_sbd *sdp = sb->s_fs_info; 841 842 mutex_lock(&sdp->sd_freeze_mutex); 843 if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN || 844 !gfs2_holder_initialized(&sdp->sd_freeze_gh)) { 845 mutex_unlock(&sdp->sd_freeze_mutex); 846 return 0; 847 } 848 849 gfs2_glock_dq_uninit(&sdp->sd_freeze_gh); 850 mutex_unlock(&sdp->sd_freeze_mutex); 851 return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE); 852 } 853 854 /** 855 * statfs_fill - fill in the sg for a given RG 856 * @rgd: the RG 857 * @sc: the sc structure 858 * 859 * Returns: 0 on success, -ESTALE if the LVB is invalid 860 */ 861 862 static int statfs_slow_fill(struct gfs2_rgrpd *rgd, 863 struct gfs2_statfs_change_host *sc) 864 { 865 gfs2_rgrp_verify(rgd); 866 sc->sc_total += rgd->rd_data; 867 sc->sc_free += rgd->rd_free; 868 sc->sc_dinodes += rgd->rd_dinodes; 869 return 0; 870 } 871 872 /** 873 * gfs2_statfs_slow - Stat a filesystem using asynchronous locking 874 * @sdp: the filesystem 875 * @sc: the sc info that will be returned 876 * 877 * Any error (other than a signal) will cause this routine to fall back 878 * to the synchronous version. 879 * 880 * FIXME: This really shouldn't busy wait like this. 881 * 882 * Returns: errno 883 */ 884 885 static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc) 886 { 887 struct gfs2_rgrpd *rgd_next; 888 struct gfs2_holder *gha, *gh; 889 unsigned int slots = 64; 890 unsigned int x; 891 int done; 892 int error = 0, err; 893 894 memset(sc, 0, sizeof(struct gfs2_statfs_change_host)); 895 gha = kmalloc_array(slots, sizeof(struct gfs2_holder), GFP_KERNEL); 896 if (!gha) 897 return -ENOMEM; 898 for (x = 0; x < slots; x++) 899 gfs2_holder_mark_uninitialized(gha + x); 900 901 rgd_next = gfs2_rgrpd_get_first(sdp); 902 903 for (;;) { 904 done = 1; 905 906 for (x = 0; x < slots; x++) { 907 gh = gha + x; 908 909 if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) { 910 err = gfs2_glock_wait(gh); 911 if (err) { 912 gfs2_holder_uninit(gh); 913 error = err; 914 } else { 915 if (!error) { 916 struct gfs2_rgrpd *rgd = 917 gfs2_glock2rgrp(gh->gh_gl); 918 919 error = statfs_slow_fill(rgd, sc); 920 } 921 gfs2_glock_dq_uninit(gh); 922 } 923 } 924 925 if (gfs2_holder_initialized(gh)) 926 done = 0; 927 else if (rgd_next && !error) { 928 error = gfs2_glock_nq_init(rgd_next->rd_gl, 929 LM_ST_SHARED, 930 GL_ASYNC, 931 gh); 932 rgd_next = gfs2_rgrpd_get_next(rgd_next); 933 done = 0; 934 } 935 936 if (signal_pending(current)) 937 error = -ERESTARTSYS; 938 } 939 940 if (done) 941 break; 942 943 yield(); 944 } 945 946 kfree(gha); 947 return error; 948 } 949 950 /** 951 * gfs2_statfs_i - Do a statfs 952 * @sdp: the filesystem 953 * @sg: the sg structure 954 * 955 * Returns: errno 956 */ 957 958 static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc) 959 { 960 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 961 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 962 963 spin_lock(&sdp->sd_statfs_spin); 964 965 *sc = *m_sc; 966 sc->sc_total += l_sc->sc_total; 967 sc->sc_free += l_sc->sc_free; 968 sc->sc_dinodes += l_sc->sc_dinodes; 969 970 spin_unlock(&sdp->sd_statfs_spin); 971 972 if (sc->sc_free < 0) 973 sc->sc_free = 0; 974 if (sc->sc_free > sc->sc_total) 975 sc->sc_free = sc->sc_total; 976 if (sc->sc_dinodes < 0) 977 sc->sc_dinodes = 0; 978 979 return 0; 980 } 981 982 /** 983 * gfs2_statfs - Gather and return stats about the filesystem 984 * @sb: The superblock 985 * @statfsbuf: The buffer 986 * 987 * Returns: 0 on success or error code 988 */ 989 990 static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf) 991 { 992 struct super_block *sb = dentry->d_sb; 993 struct gfs2_sbd *sdp = sb->s_fs_info; 994 struct gfs2_statfs_change_host sc; 995 int error; 996 997 error = gfs2_rindex_update(sdp); 998 if (error) 999 return error; 1000 1001 if (gfs2_tune_get(sdp, gt_statfs_slow)) 1002 error = gfs2_statfs_slow(sdp, &sc); 1003 else 1004 error = gfs2_statfs_i(sdp, &sc); 1005 1006 if (error) 1007 return error; 1008 1009 buf->f_type = GFS2_MAGIC; 1010 buf->f_bsize = sdp->sd_sb.sb_bsize; 1011 buf->f_blocks = sc.sc_total; 1012 buf->f_bfree = sc.sc_free; 1013 buf->f_bavail = sc.sc_free; 1014 buf->f_files = sc.sc_dinodes + sc.sc_free; 1015 buf->f_ffree = sc.sc_free; 1016 buf->f_namelen = GFS2_FNAMESIZE; 1017 1018 return 0; 1019 } 1020 1021 /** 1022 * gfs2_drop_inode - Drop an inode (test for remote unlink) 1023 * @inode: The inode to drop 1024 * 1025 * If we've received a callback on an iopen lock then it's because a 1026 * remote node tried to deallocate the inode but failed due to this node 1027 * still having the inode open. Here we mark the link count zero 1028 * since we know that it must have reached zero if the GLF_DEMOTE flag 1029 * is set on the iopen glock. If we didn't do a disk read since the 1030 * remote node removed the final link then we might otherwise miss 1031 * this event. This check ensures that this node will deallocate the 1032 * inode's blocks, or alternatively pass the baton on to another 1033 * node for later deallocation. 1034 */ 1035 1036 static int gfs2_drop_inode(struct inode *inode) 1037 { 1038 struct gfs2_inode *ip = GFS2_I(inode); 1039 1040 if (!test_bit(GIF_FREE_VFS_INODE, &ip->i_flags) && 1041 inode->i_nlink && 1042 gfs2_holder_initialized(&ip->i_iopen_gh)) { 1043 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; 1044 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) 1045 clear_nlink(inode); 1046 } 1047 1048 /* 1049 * When under memory pressure when an inode's link count has dropped to 1050 * zero, defer deleting the inode to the delete workqueue. This avoids 1051 * calling into DLM under memory pressure, which can deadlock. 1052 */ 1053 if (!inode->i_nlink && 1054 unlikely(current->flags & PF_MEMALLOC) && 1055 gfs2_holder_initialized(&ip->i_iopen_gh)) { 1056 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; 1057 1058 gfs2_glock_hold(gl); 1059 if (!gfs2_queue_delete_work(gl, 0)) 1060 gfs2_glock_queue_put(gl); 1061 return false; 1062 } 1063 1064 return generic_drop_inode(inode); 1065 } 1066 1067 static int is_ancestor(const struct dentry *d1, const struct dentry *d2) 1068 { 1069 do { 1070 if (d1 == d2) 1071 return 1; 1072 d1 = d1->d_parent; 1073 } while (!IS_ROOT(d1)); 1074 return 0; 1075 } 1076 1077 /** 1078 * gfs2_show_options - Show mount options for /proc/mounts 1079 * @s: seq_file structure 1080 * @root: root of this (sub)tree 1081 * 1082 * Returns: 0 on success or error code 1083 */ 1084 1085 static int gfs2_show_options(struct seq_file *s, struct dentry *root) 1086 { 1087 struct gfs2_sbd *sdp = root->d_sb->s_fs_info; 1088 struct gfs2_args *args = &sdp->sd_args; 1089 int val; 1090 1091 if (is_ancestor(root, sdp->sd_master_dir)) 1092 seq_puts(s, ",meta"); 1093 if (args->ar_lockproto[0]) 1094 seq_show_option(s, "lockproto", args->ar_lockproto); 1095 if (args->ar_locktable[0]) 1096 seq_show_option(s, "locktable", args->ar_locktable); 1097 if (args->ar_hostdata[0]) 1098 seq_show_option(s, "hostdata", args->ar_hostdata); 1099 if (args->ar_spectator) 1100 seq_puts(s, ",spectator"); 1101 if (args->ar_localflocks) 1102 seq_puts(s, ",localflocks"); 1103 if (args->ar_debug) 1104 seq_puts(s, ",debug"); 1105 if (args->ar_posix_acl) 1106 seq_puts(s, ",acl"); 1107 if (args->ar_quota != GFS2_QUOTA_DEFAULT) { 1108 char *state; 1109 switch (args->ar_quota) { 1110 case GFS2_QUOTA_OFF: 1111 state = "off"; 1112 break; 1113 case GFS2_QUOTA_ACCOUNT: 1114 state = "account"; 1115 break; 1116 case GFS2_QUOTA_ON: 1117 state = "on"; 1118 break; 1119 default: 1120 state = "unknown"; 1121 break; 1122 } 1123 seq_printf(s, ",quota=%s", state); 1124 } 1125 if (args->ar_suiddir) 1126 seq_puts(s, ",suiddir"); 1127 if (args->ar_data != GFS2_DATA_DEFAULT) { 1128 char *state; 1129 switch (args->ar_data) { 1130 case GFS2_DATA_WRITEBACK: 1131 state = "writeback"; 1132 break; 1133 case GFS2_DATA_ORDERED: 1134 state = "ordered"; 1135 break; 1136 default: 1137 state = "unknown"; 1138 break; 1139 } 1140 seq_printf(s, ",data=%s", state); 1141 } 1142 if (args->ar_discard) 1143 seq_puts(s, ",discard"); 1144 val = sdp->sd_tune.gt_logd_secs; 1145 if (val != 30) 1146 seq_printf(s, ",commit=%d", val); 1147 val = sdp->sd_tune.gt_statfs_quantum; 1148 if (val != 30) 1149 seq_printf(s, ",statfs_quantum=%d", val); 1150 else if (sdp->sd_tune.gt_statfs_slow) 1151 seq_puts(s, ",statfs_quantum=0"); 1152 val = sdp->sd_tune.gt_quota_quantum; 1153 if (val != 60) 1154 seq_printf(s, ",quota_quantum=%d", val); 1155 if (args->ar_statfs_percent) 1156 seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent); 1157 if (args->ar_errors != GFS2_ERRORS_DEFAULT) { 1158 const char *state; 1159 1160 switch (args->ar_errors) { 1161 case GFS2_ERRORS_WITHDRAW: 1162 state = "withdraw"; 1163 break; 1164 case GFS2_ERRORS_PANIC: 1165 state = "panic"; 1166 break; 1167 default: 1168 state = "unknown"; 1169 break; 1170 } 1171 seq_printf(s, ",errors=%s", state); 1172 } 1173 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) 1174 seq_puts(s, ",nobarrier"); 1175 if (test_bit(SDF_DEMOTE, &sdp->sd_flags)) 1176 seq_puts(s, ",demote_interface_used"); 1177 if (args->ar_rgrplvb) 1178 seq_puts(s, ",rgrplvb"); 1179 if (args->ar_loccookie) 1180 seq_puts(s, ",loccookie"); 1181 return 0; 1182 } 1183 1184 static void gfs2_final_release_pages(struct gfs2_inode *ip) 1185 { 1186 struct inode *inode = &ip->i_inode; 1187 struct gfs2_glock *gl = ip->i_gl; 1188 1189 truncate_inode_pages(gfs2_glock2aspace(ip->i_gl), 0); 1190 truncate_inode_pages(&inode->i_data, 0); 1191 1192 if (atomic_read(&gl->gl_revokes) == 0) { 1193 clear_bit(GLF_LFLUSH, &gl->gl_flags); 1194 clear_bit(GLF_DIRTY, &gl->gl_flags); 1195 } 1196 } 1197 1198 static int gfs2_dinode_dealloc(struct gfs2_inode *ip) 1199 { 1200 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1201 struct gfs2_rgrpd *rgd; 1202 struct gfs2_holder gh; 1203 int error; 1204 1205 if (gfs2_get_inode_blocks(&ip->i_inode) != 1) { 1206 gfs2_consist_inode(ip); 1207 return -EIO; 1208 } 1209 1210 error = gfs2_rindex_update(sdp); 1211 if (error) 1212 return error; 1213 1214 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); 1215 if (error) 1216 return error; 1217 1218 rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1); 1219 if (!rgd) { 1220 gfs2_consist_inode(ip); 1221 error = -EIO; 1222 goto out_qs; 1223 } 1224 1225 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh); 1226 if (error) 1227 goto out_qs; 1228 1229 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1230 sdp->sd_jdesc->jd_blocks); 1231 if (error) 1232 goto out_rg_gunlock; 1233 1234 gfs2_free_di(rgd, ip); 1235 1236 gfs2_final_release_pages(ip); 1237 1238 gfs2_trans_end(sdp); 1239 1240 out_rg_gunlock: 1241 gfs2_glock_dq_uninit(&gh); 1242 out_qs: 1243 gfs2_quota_unhold(ip); 1244 return error; 1245 } 1246 1247 /** 1248 * gfs2_glock_put_eventually 1249 * @gl: The glock to put 1250 * 1251 * When under memory pressure, trigger a deferred glock put to make sure we 1252 * won't call into DLM and deadlock. Otherwise, put the glock directly. 1253 */ 1254 1255 static void gfs2_glock_put_eventually(struct gfs2_glock *gl) 1256 { 1257 if (current->flags & PF_MEMALLOC) 1258 gfs2_glock_queue_put(gl); 1259 else 1260 gfs2_glock_put(gl); 1261 } 1262 1263 static bool gfs2_upgrade_iopen_glock(struct inode *inode) 1264 { 1265 struct gfs2_inode *ip = GFS2_I(inode); 1266 struct gfs2_sbd *sdp = GFS2_SB(inode); 1267 struct gfs2_holder *gh = &ip->i_iopen_gh; 1268 long timeout = 5 * HZ; 1269 int error; 1270 1271 gh->gh_flags |= GL_NOCACHE; 1272 gfs2_glock_dq_wait(gh); 1273 1274 /* 1275 * If there are no other lock holders, we'll get the lock immediately. 1276 * Otherwise, the other nodes holding the lock will be notified about 1277 * our locking request. If they don't have the inode open, they'll 1278 * evict the cached inode and release the lock. Otherwise, if they 1279 * poke the inode glock, we'll take this as an indication that they 1280 * still need the iopen glock and that they'll take care of deleting 1281 * the inode when they're done. As a last resort, if another node 1282 * keeps holding the iopen glock without showing any activity on the 1283 * inode glock, we'll eventually time out. 1284 * 1285 * Note that we're passing the LM_FLAG_TRY_1CB flag to the first 1286 * locking request as an optimization to notify lock holders as soon as 1287 * possible. Without that flag, they'd be notified implicitly by the 1288 * second locking request. 1289 */ 1290 1291 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, gh); 1292 error = gfs2_glock_nq(gh); 1293 if (error != GLR_TRYFAILED) 1294 return !error; 1295 1296 gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh); 1297 error = gfs2_glock_nq(gh); 1298 if (error) 1299 return false; 1300 1301 timeout = wait_event_interruptible_timeout(sdp->sd_async_glock_wait, 1302 !test_bit(HIF_WAIT, &gh->gh_iflags) || 1303 test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags), 1304 timeout); 1305 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) { 1306 gfs2_glock_dq(gh); 1307 return false; 1308 } 1309 return true; 1310 } 1311 1312 /** 1313 * evict_should_delete - determine whether the inode is eligible for deletion 1314 * @inode: The inode to evict 1315 * 1316 * This function determines whether the evicted inode is eligible to be deleted 1317 * and locks the inode glock. 1318 * 1319 * Returns: the fate of the dinode 1320 */ 1321 static enum dinode_demise evict_should_delete(struct inode *inode, 1322 struct gfs2_holder *gh) 1323 { 1324 struct gfs2_inode *ip = GFS2_I(inode); 1325 struct super_block *sb = inode->i_sb; 1326 struct gfs2_sbd *sdp = sb->s_fs_info; 1327 int ret; 1328 1329 if (test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) { 1330 BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl)); 1331 goto should_delete; 1332 } 1333 1334 if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags)) 1335 return SHOULD_DEFER_EVICTION; 1336 1337 /* Deletes should never happen under memory pressure anymore. */ 1338 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC)) 1339 return SHOULD_DEFER_EVICTION; 1340 1341 /* Must not read inode block until block type has been verified */ 1342 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh); 1343 if (unlikely(ret)) { 1344 glock_clear_object(ip->i_iopen_gh.gh_gl, ip); 1345 ip->i_iopen_gh.gh_flags |= GL_NOCACHE; 1346 gfs2_glock_dq_uninit(&ip->i_iopen_gh); 1347 return SHOULD_DEFER_EVICTION; 1348 } 1349 1350 if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino)) 1351 return SHOULD_NOT_DELETE_DINODE; 1352 ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED); 1353 if (ret) 1354 return SHOULD_NOT_DELETE_DINODE; 1355 1356 if (test_bit(GIF_INVALID, &ip->i_flags)) { 1357 ret = gfs2_inode_refresh(ip); 1358 if (ret) 1359 return SHOULD_NOT_DELETE_DINODE; 1360 } 1361 1362 /* 1363 * The inode may have been recreated in the meantime. 1364 */ 1365 if (inode->i_nlink) 1366 return SHOULD_NOT_DELETE_DINODE; 1367 1368 should_delete: 1369 if (gfs2_holder_initialized(&ip->i_iopen_gh) && 1370 test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) { 1371 if (!gfs2_upgrade_iopen_glock(inode)) { 1372 gfs2_holder_uninit(&ip->i_iopen_gh); 1373 return SHOULD_NOT_DELETE_DINODE; 1374 } 1375 } 1376 return SHOULD_DELETE_DINODE; 1377 } 1378 1379 /** 1380 * evict_unlinked_inode - delete the pieces of an unlinked evicted inode 1381 * @inode: The inode to evict 1382 */ 1383 static int evict_unlinked_inode(struct inode *inode) 1384 { 1385 struct gfs2_inode *ip = GFS2_I(inode); 1386 int ret; 1387 1388 if (S_ISDIR(inode->i_mode) && 1389 (ip->i_diskflags & GFS2_DIF_EXHASH)) { 1390 ret = gfs2_dir_exhash_dealloc(ip); 1391 if (ret) 1392 goto out; 1393 } 1394 1395 if (ip->i_eattr) { 1396 ret = gfs2_ea_dealloc(ip); 1397 if (ret) 1398 goto out; 1399 } 1400 1401 if (!gfs2_is_stuffed(ip)) { 1402 ret = gfs2_file_dealloc(ip); 1403 if (ret) 1404 goto out; 1405 } 1406 1407 /* We're about to clear the bitmap for the dinode, but as soon as we 1408 do, gfs2_create_inode can create another inode at the same block 1409 location and try to set gl_object again. We clear gl_object here so 1410 that subsequent inode creates don't see an old gl_object. */ 1411 glock_clear_object(ip->i_gl, ip); 1412 ret = gfs2_dinode_dealloc(ip); 1413 gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino); 1414 out: 1415 return ret; 1416 } 1417 1418 /* 1419 * evict_linked_inode - evict an inode whose dinode has not been unlinked 1420 * @inode: The inode to evict 1421 */ 1422 static int evict_linked_inode(struct inode *inode) 1423 { 1424 struct super_block *sb = inode->i_sb; 1425 struct gfs2_sbd *sdp = sb->s_fs_info; 1426 struct gfs2_inode *ip = GFS2_I(inode); 1427 struct address_space *metamapping; 1428 int ret; 1429 1430 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL | 1431 GFS2_LFC_EVICT_INODE); 1432 metamapping = gfs2_glock2aspace(ip->i_gl); 1433 if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) { 1434 filemap_fdatawrite(metamapping); 1435 filemap_fdatawait(metamapping); 1436 } 1437 write_inode_now(inode, 1); 1438 gfs2_ail_flush(ip->i_gl, 0); 1439 1440 ret = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks); 1441 if (ret) 1442 return ret; 1443 1444 /* Needs to be done before glock release & also in a transaction */ 1445 truncate_inode_pages(&inode->i_data, 0); 1446 truncate_inode_pages(metamapping, 0); 1447 gfs2_trans_end(sdp); 1448 return 0; 1449 } 1450 1451 /** 1452 * gfs2_evict_inode - Remove an inode from cache 1453 * @inode: The inode to evict 1454 * 1455 * There are three cases to consider: 1456 * 1. i_nlink == 0, we are final opener (and must deallocate) 1457 * 2. i_nlink == 0, we are not the final opener (and cannot deallocate) 1458 * 3. i_nlink > 0 1459 * 1460 * If the fs is read only, then we have to treat all cases as per #3 1461 * since we are unable to do any deallocation. The inode will be 1462 * deallocated by the next read/write node to attempt an allocation 1463 * in the same resource group 1464 * 1465 * We have to (at the moment) hold the inodes main lock to cover 1466 * the gap between unlocking the shared lock on the iopen lock and 1467 * taking the exclusive lock. I'd rather do a shared -> exclusive 1468 * conversion on the iopen lock, but we can change that later. This 1469 * is safe, just less efficient. 1470 */ 1471 1472 static void gfs2_evict_inode(struct inode *inode) 1473 { 1474 struct super_block *sb = inode->i_sb; 1475 struct gfs2_sbd *sdp = sb->s_fs_info; 1476 struct gfs2_inode *ip = GFS2_I(inode); 1477 struct gfs2_holder gh; 1478 int ret; 1479 1480 if (test_bit(GIF_FREE_VFS_INODE, &ip->i_flags)) { 1481 clear_inode(inode); 1482 return; 1483 } 1484 1485 if (inode->i_nlink || sb_rdonly(sb)) 1486 goto out; 1487 1488 gfs2_holder_mark_uninitialized(&gh); 1489 ret = evict_should_delete(inode, &gh); 1490 if (ret == SHOULD_DEFER_EVICTION) 1491 goto out; 1492 if (ret == SHOULD_DELETE_DINODE) 1493 ret = evict_unlinked_inode(inode); 1494 else 1495 ret = evict_linked_inode(inode); 1496 1497 if (gfs2_rs_active(&ip->i_res)) 1498 gfs2_rs_deltree(&ip->i_res); 1499 1500 if (gfs2_holder_initialized(&gh)) { 1501 glock_clear_object(ip->i_gl, ip); 1502 gfs2_glock_dq_uninit(&gh); 1503 } 1504 if (ret && ret != GLR_TRYFAILED && ret != -EROFS) 1505 fs_warn(sdp, "gfs2_evict_inode: %d\n", ret); 1506 out: 1507 truncate_inode_pages_final(&inode->i_data); 1508 if (ip->i_qadata) 1509 gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0); 1510 gfs2_rs_delete(ip, NULL); 1511 gfs2_ordered_del_inode(ip); 1512 clear_inode(inode); 1513 gfs2_dir_hash_inval(ip); 1514 if (ip->i_gl) { 1515 glock_clear_object(ip->i_gl, ip); 1516 wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE); 1517 gfs2_glock_add_to_lru(ip->i_gl); 1518 gfs2_glock_put_eventually(ip->i_gl); 1519 ip->i_gl = NULL; 1520 } 1521 if (gfs2_holder_initialized(&ip->i_iopen_gh)) { 1522 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; 1523 1524 glock_clear_object(gl, ip); 1525 if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) { 1526 ip->i_iopen_gh.gh_flags |= GL_NOCACHE; 1527 gfs2_glock_dq(&ip->i_iopen_gh); 1528 } 1529 gfs2_glock_hold(gl); 1530 gfs2_holder_uninit(&ip->i_iopen_gh); 1531 gfs2_glock_put_eventually(gl); 1532 } 1533 } 1534 1535 static struct inode *gfs2_alloc_inode(struct super_block *sb) 1536 { 1537 struct gfs2_inode *ip; 1538 1539 ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL); 1540 if (!ip) 1541 return NULL; 1542 ip->i_flags = 0; 1543 ip->i_gl = NULL; 1544 gfs2_holder_mark_uninitialized(&ip->i_iopen_gh); 1545 memset(&ip->i_res, 0, sizeof(ip->i_res)); 1546 RB_CLEAR_NODE(&ip->i_res.rs_node); 1547 ip->i_rahead = 0; 1548 return &ip->i_inode; 1549 } 1550 1551 static void gfs2_free_inode(struct inode *inode) 1552 { 1553 kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode)); 1554 } 1555 1556 extern void free_local_statfs_inodes(struct gfs2_sbd *sdp) 1557 { 1558 struct local_statfs_inode *lsi, *safe; 1559 1560 /* Run through the statfs inodes list to iput and free memory */ 1561 list_for_each_entry_safe(lsi, safe, &sdp->sd_sc_inodes_list, si_list) { 1562 if (lsi->si_jid == sdp->sd_jdesc->jd_jid) 1563 sdp->sd_sc_inode = NULL; /* belongs to this node */ 1564 if (lsi->si_sc_inode) 1565 iput(lsi->si_sc_inode); 1566 list_del(&lsi->si_list); 1567 kfree(lsi); 1568 } 1569 } 1570 1571 extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp, 1572 unsigned int index) 1573 { 1574 struct local_statfs_inode *lsi; 1575 1576 /* Return the local (per node) statfs inode in the 1577 * sdp->sd_sc_inodes_list corresponding to the 'index'. */ 1578 list_for_each_entry(lsi, &sdp->sd_sc_inodes_list, si_list) { 1579 if (lsi->si_jid == index) 1580 return lsi->si_sc_inode; 1581 } 1582 return NULL; 1583 } 1584 1585 const struct super_operations gfs2_super_ops = { 1586 .alloc_inode = gfs2_alloc_inode, 1587 .free_inode = gfs2_free_inode, 1588 .write_inode = gfs2_write_inode, 1589 .dirty_inode = gfs2_dirty_inode, 1590 .evict_inode = gfs2_evict_inode, 1591 .put_super = gfs2_put_super, 1592 .sync_fs = gfs2_sync_fs, 1593 .freeze_super = gfs2_freeze, 1594 .thaw_super = gfs2_unfreeze, 1595 .statfs = gfs2_statfs, 1596 .drop_inode = gfs2_drop_inode, 1597 .show_options = gfs2_show_options, 1598 }; 1599 1600