1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/bio.h> 10 #include <linux/sched/signal.h> 11 #include <linux/slab.h> 12 #include <linux/spinlock.h> 13 #include <linux/completion.h> 14 #include <linux/buffer_head.h> 15 #include <linux/statfs.h> 16 #include <linux/seq_file.h> 17 #include <linux/mount.h> 18 #include <linux/kthread.h> 19 #include <linux/delay.h> 20 #include <linux/gfs2_ondisk.h> 21 #include <linux/crc32.h> 22 #include <linux/time.h> 23 #include <linux/wait.h> 24 #include <linux/writeback.h> 25 #include <linux/backing-dev.h> 26 #include <linux/kernel.h> 27 28 #include "gfs2.h" 29 #include "incore.h" 30 #include "bmap.h" 31 #include "dir.h" 32 #include "glock.h" 33 #include "glops.h" 34 #include "inode.h" 35 #include "log.h" 36 #include "meta_io.h" 37 #include "quota.h" 38 #include "recovery.h" 39 #include "rgrp.h" 40 #include "super.h" 41 #include "trans.h" 42 #include "util.h" 43 #include "sys.h" 44 #include "xattr.h" 45 #include "lops.h" 46 47 enum dinode_demise { 48 SHOULD_DELETE_DINODE, 49 SHOULD_NOT_DELETE_DINODE, 50 SHOULD_DEFER_EVICTION, 51 }; 52 53 /** 54 * gfs2_jindex_free - Clear all the journal index information 55 * @sdp: The GFS2 superblock 56 * 57 */ 58 59 void gfs2_jindex_free(struct gfs2_sbd *sdp) 60 { 61 struct list_head list; 62 struct gfs2_jdesc *jd; 63 64 spin_lock(&sdp->sd_jindex_spin); 65 list_add(&list, &sdp->sd_jindex_list); 66 list_del_init(&sdp->sd_jindex_list); 67 sdp->sd_journals = 0; 68 spin_unlock(&sdp->sd_jindex_spin); 69 70 sdp->sd_jdesc = NULL; 71 while (!list_empty(&list)) { 72 jd = list_first_entry(&list, struct gfs2_jdesc, jd_list); 73 gfs2_free_journal_extents(jd); 74 list_del(&jd->jd_list); 75 iput(jd->jd_inode); 76 jd->jd_inode = NULL; 77 kfree(jd); 78 } 79 } 80 81 static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid) 82 { 83 struct gfs2_jdesc *jd; 84 int found = 0; 85 86 list_for_each_entry(jd, head, jd_list) { 87 if (jd->jd_jid == jid) { 88 found = 1; 89 break; 90 } 91 } 92 93 if (!found) 94 jd = NULL; 95 96 return jd; 97 } 98 99 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid) 100 { 101 struct gfs2_jdesc *jd; 102 103 spin_lock(&sdp->sd_jindex_spin); 104 jd = jdesc_find_i(&sdp->sd_jindex_list, jid); 105 spin_unlock(&sdp->sd_jindex_spin); 106 107 return jd; 108 } 109 110 int gfs2_jdesc_check(struct gfs2_jdesc *jd) 111 { 112 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 113 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 114 u64 size = i_size_read(jd->jd_inode); 115 116 if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30))) 117 return -EIO; 118 119 jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift; 120 121 if (gfs2_write_alloc_required(ip, 0, size)) { 122 gfs2_consist_inode(ip); 123 return -EIO; 124 } 125 126 return 0; 127 } 128 129 static int init_threads(struct gfs2_sbd *sdp) 130 { 131 struct task_struct *p; 132 int error = 0; 133 134 p = kthread_run(gfs2_logd, sdp, "gfs2_logd"); 135 if (IS_ERR(p)) { 136 error = PTR_ERR(p); 137 fs_err(sdp, "can't start logd thread: %d\n", error); 138 return error; 139 } 140 sdp->sd_logd_process = p; 141 142 p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad"); 143 if (IS_ERR(p)) { 144 error = PTR_ERR(p); 145 fs_err(sdp, "can't start quotad thread: %d\n", error); 146 goto fail; 147 } 148 sdp->sd_quotad_process = p; 149 return 0; 150 151 fail: 152 kthread_stop(sdp->sd_logd_process); 153 sdp->sd_logd_process = NULL; 154 return error; 155 } 156 157 /** 158 * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one 159 * @sdp: the filesystem 160 * 161 * Returns: errno 162 */ 163 164 int gfs2_make_fs_rw(struct gfs2_sbd *sdp) 165 { 166 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 167 struct gfs2_glock *j_gl = ip->i_gl; 168 struct gfs2_holder freeze_gh; 169 struct gfs2_log_header_host head; 170 int error; 171 172 error = init_threads(sdp); 173 if (error) 174 return error; 175 176 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 177 LM_FLAG_NOEXP | GL_EXACT, 178 &freeze_gh); 179 if (error) 180 goto fail_threads; 181 182 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); 183 if (gfs2_withdrawn(sdp)) { 184 error = -EIO; 185 goto fail; 186 } 187 188 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false); 189 if (error || gfs2_withdrawn(sdp)) 190 goto fail; 191 192 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { 193 gfs2_consist(sdp); 194 error = -EIO; 195 goto fail; 196 } 197 198 /* Initialize some head of the log stuff */ 199 sdp->sd_log_sequence = head.lh_sequence + 1; 200 gfs2_log_pointers_init(sdp, head.lh_blkno); 201 202 error = gfs2_quota_init(sdp); 203 if (error || gfs2_withdrawn(sdp)) 204 goto fail; 205 206 set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); 207 208 gfs2_glock_dq_uninit(&freeze_gh); 209 210 return 0; 211 212 fail: 213 gfs2_glock_dq_uninit(&freeze_gh); 214 fail_threads: 215 if (sdp->sd_quotad_process) 216 kthread_stop(sdp->sd_quotad_process); 217 sdp->sd_quotad_process = NULL; 218 if (sdp->sd_logd_process) 219 kthread_stop(sdp->sd_logd_process); 220 sdp->sd_logd_process = NULL; 221 return error; 222 } 223 224 void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf) 225 { 226 const struct gfs2_statfs_change *str = buf; 227 228 sc->sc_total = be64_to_cpu(str->sc_total); 229 sc->sc_free = be64_to_cpu(str->sc_free); 230 sc->sc_dinodes = be64_to_cpu(str->sc_dinodes); 231 } 232 233 void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf) 234 { 235 struct gfs2_statfs_change *str = buf; 236 237 str->sc_total = cpu_to_be64(sc->sc_total); 238 str->sc_free = cpu_to_be64(sc->sc_free); 239 str->sc_dinodes = cpu_to_be64(sc->sc_dinodes); 240 } 241 242 int gfs2_statfs_init(struct gfs2_sbd *sdp) 243 { 244 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 245 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 246 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 247 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 248 struct buffer_head *m_bh, *l_bh; 249 struct gfs2_holder gh; 250 int error; 251 252 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, 253 &gh); 254 if (error) 255 return error; 256 257 error = gfs2_meta_inode_buffer(m_ip, &m_bh); 258 if (error) 259 goto out; 260 261 if (sdp->sd_args.ar_spectator) { 262 spin_lock(&sdp->sd_statfs_spin); 263 gfs2_statfs_change_in(m_sc, m_bh->b_data + 264 sizeof(struct gfs2_dinode)); 265 spin_unlock(&sdp->sd_statfs_spin); 266 } else { 267 error = gfs2_meta_inode_buffer(l_ip, &l_bh); 268 if (error) 269 goto out_m_bh; 270 271 spin_lock(&sdp->sd_statfs_spin); 272 gfs2_statfs_change_in(m_sc, m_bh->b_data + 273 sizeof(struct gfs2_dinode)); 274 gfs2_statfs_change_in(l_sc, l_bh->b_data + 275 sizeof(struct gfs2_dinode)); 276 spin_unlock(&sdp->sd_statfs_spin); 277 278 brelse(l_bh); 279 } 280 281 out_m_bh: 282 brelse(m_bh); 283 out: 284 gfs2_glock_dq_uninit(&gh); 285 return 0; 286 } 287 288 void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free, 289 s64 dinodes) 290 { 291 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 292 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 293 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 294 struct buffer_head *l_bh; 295 s64 x, y; 296 int need_sync = 0; 297 int error; 298 299 error = gfs2_meta_inode_buffer(l_ip, &l_bh); 300 if (error) 301 return; 302 303 gfs2_trans_add_meta(l_ip->i_gl, l_bh); 304 305 spin_lock(&sdp->sd_statfs_spin); 306 l_sc->sc_total += total; 307 l_sc->sc_free += free; 308 l_sc->sc_dinodes += dinodes; 309 gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode)); 310 if (sdp->sd_args.ar_statfs_percent) { 311 x = 100 * l_sc->sc_free; 312 y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent; 313 if (x >= y || x <= -y) 314 need_sync = 1; 315 } 316 spin_unlock(&sdp->sd_statfs_spin); 317 318 brelse(l_bh); 319 if (need_sync) 320 gfs2_wake_up_statfs(sdp); 321 } 322 323 void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh, 324 struct buffer_head *l_bh) 325 { 326 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 327 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 328 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 329 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 330 331 gfs2_trans_add_meta(l_ip->i_gl, l_bh); 332 gfs2_trans_add_meta(m_ip->i_gl, m_bh); 333 334 spin_lock(&sdp->sd_statfs_spin); 335 m_sc->sc_total += l_sc->sc_total; 336 m_sc->sc_free += l_sc->sc_free; 337 m_sc->sc_dinodes += l_sc->sc_dinodes; 338 memset(l_sc, 0, sizeof(struct gfs2_statfs_change)); 339 memset(l_bh->b_data + sizeof(struct gfs2_dinode), 340 0, sizeof(struct gfs2_statfs_change)); 341 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); 342 spin_unlock(&sdp->sd_statfs_spin); 343 } 344 345 int gfs2_statfs_sync(struct super_block *sb, int type) 346 { 347 struct gfs2_sbd *sdp = sb->s_fs_info; 348 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 349 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 350 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 351 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 352 struct gfs2_holder gh; 353 struct buffer_head *m_bh, *l_bh; 354 int error; 355 356 sb_start_write(sb); 357 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, 358 &gh); 359 if (error) 360 goto out; 361 362 error = gfs2_meta_inode_buffer(m_ip, &m_bh); 363 if (error) 364 goto out_unlock; 365 366 spin_lock(&sdp->sd_statfs_spin); 367 gfs2_statfs_change_in(m_sc, m_bh->b_data + 368 sizeof(struct gfs2_dinode)); 369 if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) { 370 spin_unlock(&sdp->sd_statfs_spin); 371 goto out_bh; 372 } 373 spin_unlock(&sdp->sd_statfs_spin); 374 375 error = gfs2_meta_inode_buffer(l_ip, &l_bh); 376 if (error) 377 goto out_bh; 378 379 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0); 380 if (error) 381 goto out_bh2; 382 383 update_statfs(sdp, m_bh, l_bh); 384 sdp->sd_statfs_force_sync = 0; 385 386 gfs2_trans_end(sdp); 387 388 out_bh2: 389 brelse(l_bh); 390 out_bh: 391 brelse(m_bh); 392 out_unlock: 393 gfs2_glock_dq_uninit(&gh); 394 out: 395 sb_end_write(sb); 396 return error; 397 } 398 399 struct lfcc { 400 struct list_head list; 401 struct gfs2_holder gh; 402 }; 403 404 /** 405 * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all 406 * journals are clean 407 * @sdp: the file system 408 * @state: the state to put the transaction lock into 409 * @t_gh: the hold on the transaction lock 410 * 411 * Returns: errno 412 */ 413 414 static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp) 415 { 416 struct gfs2_inode *ip; 417 struct gfs2_jdesc *jd; 418 struct lfcc *lfcc; 419 LIST_HEAD(list); 420 struct gfs2_log_header_host lh; 421 int error; 422 423 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { 424 lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL); 425 if (!lfcc) { 426 error = -ENOMEM; 427 goto out; 428 } 429 ip = GFS2_I(jd->jd_inode); 430 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh); 431 if (error) { 432 kfree(lfcc); 433 goto out; 434 } 435 list_add(&lfcc->list, &list); 436 } 437 438 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE, 439 LM_FLAG_NOEXP, &sdp->sd_freeze_gh); 440 if (error) 441 goto out; 442 443 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { 444 error = gfs2_jdesc_check(jd); 445 if (error) 446 break; 447 error = gfs2_find_jhead(jd, &lh, false); 448 if (error) 449 break; 450 if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { 451 error = -EBUSY; 452 break; 453 } 454 } 455 456 if (error) 457 gfs2_glock_dq_uninit(&sdp->sd_freeze_gh); 458 459 out: 460 while (!list_empty(&list)) { 461 lfcc = list_first_entry(&list, struct lfcc, list); 462 list_del(&lfcc->list); 463 gfs2_glock_dq_uninit(&lfcc->gh); 464 kfree(lfcc); 465 } 466 return error; 467 } 468 469 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf) 470 { 471 struct gfs2_dinode *str = buf; 472 473 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 474 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI); 475 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI); 476 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr); 477 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino); 478 str->di_mode = cpu_to_be32(ip->i_inode.i_mode); 479 str->di_uid = cpu_to_be32(i_uid_read(&ip->i_inode)); 480 str->di_gid = cpu_to_be32(i_gid_read(&ip->i_inode)); 481 str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink); 482 str->di_size = cpu_to_be64(i_size_read(&ip->i_inode)); 483 str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); 484 str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec); 485 str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec); 486 str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec); 487 488 str->di_goal_meta = cpu_to_be64(ip->i_goal); 489 str->di_goal_data = cpu_to_be64(ip->i_goal); 490 str->di_generation = cpu_to_be64(ip->i_generation); 491 492 str->di_flags = cpu_to_be32(ip->i_diskflags); 493 str->di_height = cpu_to_be16(ip->i_height); 494 str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) && 495 !(ip->i_diskflags & GFS2_DIF_EXHASH) ? 496 GFS2_FORMAT_DE : 0); 497 str->di_depth = cpu_to_be16(ip->i_depth); 498 str->di_entries = cpu_to_be32(ip->i_entries); 499 500 str->di_eattr = cpu_to_be64(ip->i_eattr); 501 str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec); 502 str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec); 503 str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec); 504 } 505 506 /** 507 * gfs2_write_inode - Make sure the inode is stable on the disk 508 * @inode: The inode 509 * @wbc: The writeback control structure 510 * 511 * Returns: errno 512 */ 513 514 static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc) 515 { 516 struct gfs2_inode *ip = GFS2_I(inode); 517 struct gfs2_sbd *sdp = GFS2_SB(inode); 518 struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl); 519 struct backing_dev_info *bdi = inode_to_bdi(metamapping->host); 520 int ret = 0; 521 bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip)); 522 523 if (flush_all) 524 gfs2_log_flush(GFS2_SB(inode), ip->i_gl, 525 GFS2_LOG_HEAD_FLUSH_NORMAL | 526 GFS2_LFC_WRITE_INODE); 527 if (bdi->wb.dirty_exceeded) 528 gfs2_ail1_flush(sdp, wbc); 529 else 530 filemap_fdatawrite(metamapping); 531 if (flush_all) 532 ret = filemap_fdatawait(metamapping); 533 if (ret) 534 mark_inode_dirty_sync(inode); 535 else { 536 spin_lock(&inode->i_lock); 537 if (!(inode->i_flags & I_DIRTY)) 538 gfs2_ordered_del_inode(ip); 539 spin_unlock(&inode->i_lock); 540 } 541 return ret; 542 } 543 544 /** 545 * gfs2_dirty_inode - check for atime updates 546 * @inode: The inode in question 547 * @flags: The type of dirty 548 * 549 * Unfortunately it can be called under any combination of inode 550 * glock and transaction lock, so we have to check carefully. 551 * 552 * At the moment this deals only with atime - it should be possible 553 * to expand that role in future, once a review of the locking has 554 * been carried out. 555 */ 556 557 static void gfs2_dirty_inode(struct inode *inode, int flags) 558 { 559 struct gfs2_inode *ip = GFS2_I(inode); 560 struct gfs2_sbd *sdp = GFS2_SB(inode); 561 struct buffer_head *bh; 562 struct gfs2_holder gh; 563 int need_unlock = 0; 564 int need_endtrans = 0; 565 int ret; 566 567 if (!(flags & I_DIRTY_INODE)) 568 return; 569 if (unlikely(gfs2_withdrawn(sdp))) 570 return; 571 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { 572 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 573 if (ret) { 574 fs_err(sdp, "dirty_inode: glock %d\n", ret); 575 gfs2_dump_glock(NULL, ip->i_gl, true); 576 return; 577 } 578 need_unlock = 1; 579 } else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE)) 580 return; 581 582 if (current->journal_info == NULL) { 583 ret = gfs2_trans_begin(sdp, RES_DINODE, 0); 584 if (ret) { 585 fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret); 586 goto out; 587 } 588 need_endtrans = 1; 589 } 590 591 ret = gfs2_meta_inode_buffer(ip, &bh); 592 if (ret == 0) { 593 gfs2_trans_add_meta(ip->i_gl, bh); 594 gfs2_dinode_out(ip, bh->b_data); 595 brelse(bh); 596 } 597 598 if (need_endtrans) 599 gfs2_trans_end(sdp); 600 out: 601 if (need_unlock) 602 gfs2_glock_dq_uninit(&gh); 603 } 604 605 /** 606 * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one 607 * @sdp: the filesystem 608 * 609 * Returns: errno 610 */ 611 612 int gfs2_make_fs_ro(struct gfs2_sbd *sdp) 613 { 614 struct gfs2_holder freeze_gh; 615 int error = 0; 616 int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); 617 618 gfs2_holder_mark_uninitialized(&freeze_gh); 619 if (sdp->sd_freeze_gl && 620 !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) { 621 if (!log_write_allowed) { 622 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, 623 LM_ST_SHARED, LM_FLAG_TRY | 624 LM_FLAG_NOEXP | GL_EXACT, 625 &freeze_gh); 626 if (error == GLR_TRYFAILED) 627 error = 0; 628 } else { 629 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, 630 LM_ST_SHARED, 631 LM_FLAG_NOEXP | GL_EXACT, 632 &freeze_gh); 633 if (error && !gfs2_withdrawn(sdp)) 634 return error; 635 } 636 } 637 638 gfs2_flush_delete_work(sdp); 639 if (!log_write_allowed && current == sdp->sd_quotad_process) 640 fs_warn(sdp, "The quotad daemon is withdrawing.\n"); 641 else if (sdp->sd_quotad_process) 642 kthread_stop(sdp->sd_quotad_process); 643 sdp->sd_quotad_process = NULL; 644 645 if (!log_write_allowed && current == sdp->sd_logd_process) 646 fs_warn(sdp, "The logd daemon is withdrawing.\n"); 647 else if (sdp->sd_logd_process) 648 kthread_stop(sdp->sd_logd_process); 649 sdp->sd_logd_process = NULL; 650 651 if (log_write_allowed) { 652 gfs2_quota_sync(sdp->sd_vfs, 0); 653 gfs2_statfs_sync(sdp->sd_vfs, 0); 654 655 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN | 656 GFS2_LFC_MAKE_FS_RO); 657 wait_event(sdp->sd_reserving_log_wait, 658 atomic_read(&sdp->sd_reserving_log) == 0); 659 gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == 660 sdp->sd_jdesc->jd_blocks); 661 } else { 662 wait_event_timeout(sdp->sd_reserving_log_wait, 663 atomic_read(&sdp->sd_reserving_log) == 0, 664 HZ * 5); 665 } 666 if (gfs2_holder_initialized(&freeze_gh)) 667 gfs2_glock_dq_uninit(&freeze_gh); 668 669 gfs2_quota_cleanup(sdp); 670 671 if (!log_write_allowed) 672 sdp->sd_vfs->s_flags |= SB_RDONLY; 673 674 return error; 675 } 676 677 /** 678 * gfs2_put_super - Unmount the filesystem 679 * @sb: The VFS superblock 680 * 681 */ 682 683 static void gfs2_put_super(struct super_block *sb) 684 { 685 struct gfs2_sbd *sdp = sb->s_fs_info; 686 int error; 687 struct gfs2_jdesc *jd; 688 689 /* No more recovery requests */ 690 set_bit(SDF_NORECOVERY, &sdp->sd_flags); 691 smp_mb(); 692 693 /* Wait on outstanding recovery */ 694 restart: 695 spin_lock(&sdp->sd_jindex_spin); 696 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { 697 if (!test_bit(JDF_RECOVERY, &jd->jd_flags)) 698 continue; 699 spin_unlock(&sdp->sd_jindex_spin); 700 wait_on_bit(&jd->jd_flags, JDF_RECOVERY, 701 TASK_UNINTERRUPTIBLE); 702 goto restart; 703 } 704 spin_unlock(&sdp->sd_jindex_spin); 705 706 if (!sb_rdonly(sb)) { 707 error = gfs2_make_fs_ro(sdp); 708 if (error) 709 gfs2_io_error(sdp); 710 } 711 WARN_ON(gfs2_withdrawing(sdp)); 712 713 /* At this point, we're through modifying the disk */ 714 715 /* Release stuff */ 716 717 iput(sdp->sd_jindex); 718 iput(sdp->sd_statfs_inode); 719 iput(sdp->sd_rindex); 720 iput(sdp->sd_quota_inode); 721 722 gfs2_glock_put(sdp->sd_rename_gl); 723 gfs2_glock_put(sdp->sd_freeze_gl); 724 725 if (!sdp->sd_args.ar_spectator) { 726 if (gfs2_holder_initialized(&sdp->sd_journal_gh)) 727 gfs2_glock_dq_uninit(&sdp->sd_journal_gh); 728 if (gfs2_holder_initialized(&sdp->sd_jinode_gh)) 729 gfs2_glock_dq_uninit(&sdp->sd_jinode_gh); 730 gfs2_glock_dq_uninit(&sdp->sd_sc_gh); 731 gfs2_glock_dq_uninit(&sdp->sd_qc_gh); 732 free_local_statfs_inodes(sdp); 733 iput(sdp->sd_qc_inode); 734 } 735 736 gfs2_glock_dq_uninit(&sdp->sd_live_gh); 737 gfs2_clear_rgrpd(sdp); 738 gfs2_jindex_free(sdp); 739 /* Take apart glock structures and buffer lists */ 740 gfs2_gl_hash_clear(sdp); 741 gfs2_delete_debugfs_file(sdp); 742 /* Unmount the locking protocol */ 743 gfs2_lm_unmount(sdp); 744 745 /* At this point, we're through participating in the lockspace */ 746 gfs2_sys_fs_del(sdp); 747 free_sbd(sdp); 748 } 749 750 /** 751 * gfs2_sync_fs - sync the filesystem 752 * @sb: the superblock 753 * 754 * Flushes the log to disk. 755 */ 756 757 static int gfs2_sync_fs(struct super_block *sb, int wait) 758 { 759 struct gfs2_sbd *sdp = sb->s_fs_info; 760 761 gfs2_quota_sync(sb, -1); 762 if (wait) 763 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | 764 GFS2_LFC_SYNC_FS); 765 return sdp->sd_log_error; 766 } 767 768 void gfs2_freeze_func(struct work_struct *work) 769 { 770 int error; 771 struct gfs2_holder freeze_gh; 772 struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work); 773 struct super_block *sb = sdp->sd_vfs; 774 775 atomic_inc(&sb->s_active); 776 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 777 LM_FLAG_NOEXP | GL_EXACT, &freeze_gh); 778 if (error) { 779 fs_info(sdp, "GFS2: couldn't get freeze lock : %d\n", error); 780 gfs2_assert_withdraw(sdp, 0); 781 } else { 782 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN); 783 error = thaw_super(sb); 784 if (error) { 785 fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n", 786 error); 787 gfs2_assert_withdraw(sdp, 0); 788 } 789 gfs2_glock_dq_uninit(&freeze_gh); 790 } 791 deactivate_super(sb); 792 clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags); 793 wake_up_bit(&sdp->sd_flags, SDF_FS_FROZEN); 794 return; 795 } 796 797 /** 798 * gfs2_freeze - prevent further writes to the filesystem 799 * @sb: the VFS structure for the filesystem 800 * 801 */ 802 803 static int gfs2_freeze(struct super_block *sb) 804 { 805 struct gfs2_sbd *sdp = sb->s_fs_info; 806 int error = 0; 807 808 mutex_lock(&sdp->sd_freeze_mutex); 809 if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) 810 goto out; 811 812 for (;;) { 813 if (gfs2_withdrawn(sdp)) { 814 error = -EINVAL; 815 goto out; 816 } 817 818 error = gfs2_lock_fs_check_clean(sdp); 819 if (!error) 820 break; 821 822 if (error == -EBUSY) 823 fs_err(sdp, "waiting for recovery before freeze\n"); 824 else if (error == -EIO) { 825 fs_err(sdp, "Fatal IO error: cannot freeze gfs2 due " 826 "to recovery error.\n"); 827 goto out; 828 } else { 829 fs_err(sdp, "error freezing FS: %d\n", error); 830 } 831 fs_err(sdp, "retrying...\n"); 832 msleep(1000); 833 } 834 set_bit(SDF_FS_FROZEN, &sdp->sd_flags); 835 out: 836 mutex_unlock(&sdp->sd_freeze_mutex); 837 return error; 838 } 839 840 /** 841 * gfs2_unfreeze - reallow writes to the filesystem 842 * @sb: the VFS structure for the filesystem 843 * 844 */ 845 846 static int gfs2_unfreeze(struct super_block *sb) 847 { 848 struct gfs2_sbd *sdp = sb->s_fs_info; 849 850 mutex_lock(&sdp->sd_freeze_mutex); 851 if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN || 852 !gfs2_holder_initialized(&sdp->sd_freeze_gh)) { 853 mutex_unlock(&sdp->sd_freeze_mutex); 854 return 0; 855 } 856 857 gfs2_glock_dq_uninit(&sdp->sd_freeze_gh); 858 mutex_unlock(&sdp->sd_freeze_mutex); 859 return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE); 860 } 861 862 /** 863 * statfs_fill - fill in the sg for a given RG 864 * @rgd: the RG 865 * @sc: the sc structure 866 * 867 * Returns: 0 on success, -ESTALE if the LVB is invalid 868 */ 869 870 static int statfs_slow_fill(struct gfs2_rgrpd *rgd, 871 struct gfs2_statfs_change_host *sc) 872 { 873 gfs2_rgrp_verify(rgd); 874 sc->sc_total += rgd->rd_data; 875 sc->sc_free += rgd->rd_free; 876 sc->sc_dinodes += rgd->rd_dinodes; 877 return 0; 878 } 879 880 /** 881 * gfs2_statfs_slow - Stat a filesystem using asynchronous locking 882 * @sdp: the filesystem 883 * @sc: the sc info that will be returned 884 * 885 * Any error (other than a signal) will cause this routine to fall back 886 * to the synchronous version. 887 * 888 * FIXME: This really shouldn't busy wait like this. 889 * 890 * Returns: errno 891 */ 892 893 static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc) 894 { 895 struct gfs2_rgrpd *rgd_next; 896 struct gfs2_holder *gha, *gh; 897 unsigned int slots = 64; 898 unsigned int x; 899 int done; 900 int error = 0, err; 901 902 memset(sc, 0, sizeof(struct gfs2_statfs_change_host)); 903 gha = kmalloc_array(slots, sizeof(struct gfs2_holder), GFP_KERNEL); 904 if (!gha) 905 return -ENOMEM; 906 for (x = 0; x < slots; x++) 907 gfs2_holder_mark_uninitialized(gha + x); 908 909 rgd_next = gfs2_rgrpd_get_first(sdp); 910 911 for (;;) { 912 done = 1; 913 914 for (x = 0; x < slots; x++) { 915 gh = gha + x; 916 917 if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) { 918 err = gfs2_glock_wait(gh); 919 if (err) { 920 gfs2_holder_uninit(gh); 921 error = err; 922 } else { 923 if (!error) { 924 struct gfs2_rgrpd *rgd = 925 gfs2_glock2rgrp(gh->gh_gl); 926 927 error = statfs_slow_fill(rgd, sc); 928 } 929 gfs2_glock_dq_uninit(gh); 930 } 931 } 932 933 if (gfs2_holder_initialized(gh)) 934 done = 0; 935 else if (rgd_next && !error) { 936 error = gfs2_glock_nq_init(rgd_next->rd_gl, 937 LM_ST_SHARED, 938 GL_ASYNC, 939 gh); 940 rgd_next = gfs2_rgrpd_get_next(rgd_next); 941 done = 0; 942 } 943 944 if (signal_pending(current)) 945 error = -ERESTARTSYS; 946 } 947 948 if (done) 949 break; 950 951 yield(); 952 } 953 954 kfree(gha); 955 return error; 956 } 957 958 /** 959 * gfs2_statfs_i - Do a statfs 960 * @sdp: the filesystem 961 * @sg: the sg structure 962 * 963 * Returns: errno 964 */ 965 966 static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc) 967 { 968 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 969 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 970 971 spin_lock(&sdp->sd_statfs_spin); 972 973 *sc = *m_sc; 974 sc->sc_total += l_sc->sc_total; 975 sc->sc_free += l_sc->sc_free; 976 sc->sc_dinodes += l_sc->sc_dinodes; 977 978 spin_unlock(&sdp->sd_statfs_spin); 979 980 if (sc->sc_free < 0) 981 sc->sc_free = 0; 982 if (sc->sc_free > sc->sc_total) 983 sc->sc_free = sc->sc_total; 984 if (sc->sc_dinodes < 0) 985 sc->sc_dinodes = 0; 986 987 return 0; 988 } 989 990 /** 991 * gfs2_statfs - Gather and return stats about the filesystem 992 * @sb: The superblock 993 * @statfsbuf: The buffer 994 * 995 * Returns: 0 on success or error code 996 */ 997 998 static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf) 999 { 1000 struct super_block *sb = dentry->d_sb; 1001 struct gfs2_sbd *sdp = sb->s_fs_info; 1002 struct gfs2_statfs_change_host sc; 1003 int error; 1004 1005 error = gfs2_rindex_update(sdp); 1006 if (error) 1007 return error; 1008 1009 if (gfs2_tune_get(sdp, gt_statfs_slow)) 1010 error = gfs2_statfs_slow(sdp, &sc); 1011 else 1012 error = gfs2_statfs_i(sdp, &sc); 1013 1014 if (error) 1015 return error; 1016 1017 buf->f_type = GFS2_MAGIC; 1018 buf->f_bsize = sdp->sd_sb.sb_bsize; 1019 buf->f_blocks = sc.sc_total; 1020 buf->f_bfree = sc.sc_free; 1021 buf->f_bavail = sc.sc_free; 1022 buf->f_files = sc.sc_dinodes + sc.sc_free; 1023 buf->f_ffree = sc.sc_free; 1024 buf->f_namelen = GFS2_FNAMESIZE; 1025 1026 return 0; 1027 } 1028 1029 /** 1030 * gfs2_drop_inode - Drop an inode (test for remote unlink) 1031 * @inode: The inode to drop 1032 * 1033 * If we've received a callback on an iopen lock then it's because a 1034 * remote node tried to deallocate the inode but failed due to this node 1035 * still having the inode open. Here we mark the link count zero 1036 * since we know that it must have reached zero if the GLF_DEMOTE flag 1037 * is set on the iopen glock. If we didn't do a disk read since the 1038 * remote node removed the final link then we might otherwise miss 1039 * this event. This check ensures that this node will deallocate the 1040 * inode's blocks, or alternatively pass the baton on to another 1041 * node for later deallocation. 1042 */ 1043 1044 static int gfs2_drop_inode(struct inode *inode) 1045 { 1046 struct gfs2_inode *ip = GFS2_I(inode); 1047 1048 if (!test_bit(GIF_FREE_VFS_INODE, &ip->i_flags) && 1049 inode->i_nlink && 1050 gfs2_holder_initialized(&ip->i_iopen_gh)) { 1051 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; 1052 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) 1053 clear_nlink(inode); 1054 } 1055 1056 /* 1057 * When under memory pressure when an inode's link count has dropped to 1058 * zero, defer deleting the inode to the delete workqueue. This avoids 1059 * calling into DLM under memory pressure, which can deadlock. 1060 */ 1061 if (!inode->i_nlink && 1062 unlikely(current->flags & PF_MEMALLOC) && 1063 gfs2_holder_initialized(&ip->i_iopen_gh)) { 1064 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; 1065 1066 gfs2_glock_hold(gl); 1067 if (!gfs2_queue_delete_work(gl, 0)) 1068 gfs2_glock_queue_put(gl); 1069 return false; 1070 } 1071 1072 return generic_drop_inode(inode); 1073 } 1074 1075 static int is_ancestor(const struct dentry *d1, const struct dentry *d2) 1076 { 1077 do { 1078 if (d1 == d2) 1079 return 1; 1080 d1 = d1->d_parent; 1081 } while (!IS_ROOT(d1)); 1082 return 0; 1083 } 1084 1085 /** 1086 * gfs2_show_options - Show mount options for /proc/mounts 1087 * @s: seq_file structure 1088 * @root: root of this (sub)tree 1089 * 1090 * Returns: 0 on success or error code 1091 */ 1092 1093 static int gfs2_show_options(struct seq_file *s, struct dentry *root) 1094 { 1095 struct gfs2_sbd *sdp = root->d_sb->s_fs_info; 1096 struct gfs2_args *args = &sdp->sd_args; 1097 int val; 1098 1099 if (is_ancestor(root, sdp->sd_master_dir)) 1100 seq_puts(s, ",meta"); 1101 if (args->ar_lockproto[0]) 1102 seq_show_option(s, "lockproto", args->ar_lockproto); 1103 if (args->ar_locktable[0]) 1104 seq_show_option(s, "locktable", args->ar_locktable); 1105 if (args->ar_hostdata[0]) 1106 seq_show_option(s, "hostdata", args->ar_hostdata); 1107 if (args->ar_spectator) 1108 seq_puts(s, ",spectator"); 1109 if (args->ar_localflocks) 1110 seq_puts(s, ",localflocks"); 1111 if (args->ar_debug) 1112 seq_puts(s, ",debug"); 1113 if (args->ar_posix_acl) 1114 seq_puts(s, ",acl"); 1115 if (args->ar_quota != GFS2_QUOTA_DEFAULT) { 1116 char *state; 1117 switch (args->ar_quota) { 1118 case GFS2_QUOTA_OFF: 1119 state = "off"; 1120 break; 1121 case GFS2_QUOTA_ACCOUNT: 1122 state = "account"; 1123 break; 1124 case GFS2_QUOTA_ON: 1125 state = "on"; 1126 break; 1127 default: 1128 state = "unknown"; 1129 break; 1130 } 1131 seq_printf(s, ",quota=%s", state); 1132 } 1133 if (args->ar_suiddir) 1134 seq_puts(s, ",suiddir"); 1135 if (args->ar_data != GFS2_DATA_DEFAULT) { 1136 char *state; 1137 switch (args->ar_data) { 1138 case GFS2_DATA_WRITEBACK: 1139 state = "writeback"; 1140 break; 1141 case GFS2_DATA_ORDERED: 1142 state = "ordered"; 1143 break; 1144 default: 1145 state = "unknown"; 1146 break; 1147 } 1148 seq_printf(s, ",data=%s", state); 1149 } 1150 if (args->ar_discard) 1151 seq_puts(s, ",discard"); 1152 val = sdp->sd_tune.gt_logd_secs; 1153 if (val != 30) 1154 seq_printf(s, ",commit=%d", val); 1155 val = sdp->sd_tune.gt_statfs_quantum; 1156 if (val != 30) 1157 seq_printf(s, ",statfs_quantum=%d", val); 1158 else if (sdp->sd_tune.gt_statfs_slow) 1159 seq_puts(s, ",statfs_quantum=0"); 1160 val = sdp->sd_tune.gt_quota_quantum; 1161 if (val != 60) 1162 seq_printf(s, ",quota_quantum=%d", val); 1163 if (args->ar_statfs_percent) 1164 seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent); 1165 if (args->ar_errors != GFS2_ERRORS_DEFAULT) { 1166 const char *state; 1167 1168 switch (args->ar_errors) { 1169 case GFS2_ERRORS_WITHDRAW: 1170 state = "withdraw"; 1171 break; 1172 case GFS2_ERRORS_PANIC: 1173 state = "panic"; 1174 break; 1175 default: 1176 state = "unknown"; 1177 break; 1178 } 1179 seq_printf(s, ",errors=%s", state); 1180 } 1181 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) 1182 seq_puts(s, ",nobarrier"); 1183 if (test_bit(SDF_DEMOTE, &sdp->sd_flags)) 1184 seq_puts(s, ",demote_interface_used"); 1185 if (args->ar_rgrplvb) 1186 seq_puts(s, ",rgrplvb"); 1187 if (args->ar_loccookie) 1188 seq_puts(s, ",loccookie"); 1189 return 0; 1190 } 1191 1192 static void gfs2_final_release_pages(struct gfs2_inode *ip) 1193 { 1194 struct inode *inode = &ip->i_inode; 1195 struct gfs2_glock *gl = ip->i_gl; 1196 1197 truncate_inode_pages(gfs2_glock2aspace(ip->i_gl), 0); 1198 truncate_inode_pages(&inode->i_data, 0); 1199 1200 if (atomic_read(&gl->gl_revokes) == 0) { 1201 clear_bit(GLF_LFLUSH, &gl->gl_flags); 1202 clear_bit(GLF_DIRTY, &gl->gl_flags); 1203 } 1204 } 1205 1206 static int gfs2_dinode_dealloc(struct gfs2_inode *ip) 1207 { 1208 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1209 struct gfs2_rgrpd *rgd; 1210 struct gfs2_holder gh; 1211 int error; 1212 1213 if (gfs2_get_inode_blocks(&ip->i_inode) != 1) { 1214 gfs2_consist_inode(ip); 1215 return -EIO; 1216 } 1217 1218 error = gfs2_rindex_update(sdp); 1219 if (error) 1220 return error; 1221 1222 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); 1223 if (error) 1224 return error; 1225 1226 rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1); 1227 if (!rgd) { 1228 gfs2_consist_inode(ip); 1229 error = -EIO; 1230 goto out_qs; 1231 } 1232 1233 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh); 1234 if (error) 1235 goto out_qs; 1236 1237 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1238 sdp->sd_jdesc->jd_blocks); 1239 if (error) 1240 goto out_rg_gunlock; 1241 1242 gfs2_free_di(rgd, ip); 1243 1244 gfs2_final_release_pages(ip); 1245 1246 gfs2_trans_end(sdp); 1247 1248 out_rg_gunlock: 1249 gfs2_glock_dq_uninit(&gh); 1250 out_qs: 1251 gfs2_quota_unhold(ip); 1252 return error; 1253 } 1254 1255 /** 1256 * gfs2_glock_put_eventually 1257 * @gl: The glock to put 1258 * 1259 * When under memory pressure, trigger a deferred glock put to make sure we 1260 * won't call into DLM and deadlock. Otherwise, put the glock directly. 1261 */ 1262 1263 static void gfs2_glock_put_eventually(struct gfs2_glock *gl) 1264 { 1265 if (current->flags & PF_MEMALLOC) 1266 gfs2_glock_queue_put(gl); 1267 else 1268 gfs2_glock_put(gl); 1269 } 1270 1271 static bool gfs2_upgrade_iopen_glock(struct inode *inode) 1272 { 1273 struct gfs2_inode *ip = GFS2_I(inode); 1274 struct gfs2_sbd *sdp = GFS2_SB(inode); 1275 struct gfs2_holder *gh = &ip->i_iopen_gh; 1276 long timeout = 5 * HZ; 1277 int error; 1278 1279 gh->gh_flags |= GL_NOCACHE; 1280 gfs2_glock_dq_wait(gh); 1281 1282 /* 1283 * If there are no other lock holders, we'll get the lock immediately. 1284 * Otherwise, the other nodes holding the lock will be notified about 1285 * our locking request. If they don't have the inode open, they'll 1286 * evict the cached inode and release the lock. Otherwise, if they 1287 * poke the inode glock, we'll take this as an indication that they 1288 * still need the iopen glock and that they'll take care of deleting 1289 * the inode when they're done. As a last resort, if another node 1290 * keeps holding the iopen glock without showing any activity on the 1291 * inode glock, we'll eventually time out. 1292 * 1293 * Note that we're passing the LM_FLAG_TRY_1CB flag to the first 1294 * locking request as an optimization to notify lock holders as soon as 1295 * possible. Without that flag, they'd be notified implicitly by the 1296 * second locking request. 1297 */ 1298 1299 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, gh); 1300 error = gfs2_glock_nq(gh); 1301 if (error != GLR_TRYFAILED) 1302 return !error; 1303 1304 gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh); 1305 error = gfs2_glock_nq(gh); 1306 if (error) 1307 return false; 1308 1309 timeout = wait_event_interruptible_timeout(sdp->sd_async_glock_wait, 1310 !test_bit(HIF_WAIT, &gh->gh_iflags) || 1311 test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags), 1312 timeout); 1313 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) { 1314 gfs2_glock_dq(gh); 1315 return false; 1316 } 1317 return true; 1318 } 1319 1320 /** 1321 * evict_should_delete - determine whether the inode is eligible for deletion 1322 * @inode: The inode to evict 1323 * 1324 * This function determines whether the evicted inode is eligible to be deleted 1325 * and locks the inode glock. 1326 * 1327 * Returns: the fate of the dinode 1328 */ 1329 static enum dinode_demise evict_should_delete(struct inode *inode, 1330 struct gfs2_holder *gh) 1331 { 1332 struct gfs2_inode *ip = GFS2_I(inode); 1333 struct super_block *sb = inode->i_sb; 1334 struct gfs2_sbd *sdp = sb->s_fs_info; 1335 int ret; 1336 1337 if (test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) { 1338 BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl)); 1339 goto should_delete; 1340 } 1341 1342 if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags)) 1343 return SHOULD_DEFER_EVICTION; 1344 1345 /* Deletes should never happen under memory pressure anymore. */ 1346 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC)) 1347 return SHOULD_DEFER_EVICTION; 1348 1349 /* Must not read inode block until block type has been verified */ 1350 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh); 1351 if (unlikely(ret)) { 1352 glock_clear_object(ip->i_iopen_gh.gh_gl, ip); 1353 ip->i_iopen_gh.gh_flags |= GL_NOCACHE; 1354 gfs2_glock_dq_uninit(&ip->i_iopen_gh); 1355 return SHOULD_DEFER_EVICTION; 1356 } 1357 1358 if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino)) 1359 return SHOULD_NOT_DELETE_DINODE; 1360 ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED); 1361 if (ret) 1362 return SHOULD_NOT_DELETE_DINODE; 1363 1364 if (test_bit(GIF_INVALID, &ip->i_flags)) { 1365 ret = gfs2_inode_refresh(ip); 1366 if (ret) 1367 return SHOULD_NOT_DELETE_DINODE; 1368 } 1369 1370 /* 1371 * The inode may have been recreated in the meantime. 1372 */ 1373 if (inode->i_nlink) 1374 return SHOULD_NOT_DELETE_DINODE; 1375 1376 should_delete: 1377 if (gfs2_holder_initialized(&ip->i_iopen_gh) && 1378 test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) { 1379 if (!gfs2_upgrade_iopen_glock(inode)) { 1380 gfs2_holder_uninit(&ip->i_iopen_gh); 1381 return SHOULD_NOT_DELETE_DINODE; 1382 } 1383 } 1384 return SHOULD_DELETE_DINODE; 1385 } 1386 1387 /** 1388 * evict_unlinked_inode - delete the pieces of an unlinked evicted inode 1389 * @inode: The inode to evict 1390 */ 1391 static int evict_unlinked_inode(struct inode *inode) 1392 { 1393 struct gfs2_inode *ip = GFS2_I(inode); 1394 int ret; 1395 1396 if (S_ISDIR(inode->i_mode) && 1397 (ip->i_diskflags & GFS2_DIF_EXHASH)) { 1398 ret = gfs2_dir_exhash_dealloc(ip); 1399 if (ret) 1400 goto out; 1401 } 1402 1403 if (ip->i_eattr) { 1404 ret = gfs2_ea_dealloc(ip); 1405 if (ret) 1406 goto out; 1407 } 1408 1409 if (!gfs2_is_stuffed(ip)) { 1410 ret = gfs2_file_dealloc(ip); 1411 if (ret) 1412 goto out; 1413 } 1414 1415 /* We're about to clear the bitmap for the dinode, but as soon as we 1416 do, gfs2_create_inode can create another inode at the same block 1417 location and try to set gl_object again. We clear gl_object here so 1418 that subsequent inode creates don't see an old gl_object. */ 1419 glock_clear_object(ip->i_gl, ip); 1420 ret = gfs2_dinode_dealloc(ip); 1421 gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino); 1422 out: 1423 return ret; 1424 } 1425 1426 /* 1427 * evict_linked_inode - evict an inode whose dinode has not been unlinked 1428 * @inode: The inode to evict 1429 */ 1430 static int evict_linked_inode(struct inode *inode) 1431 { 1432 struct super_block *sb = inode->i_sb; 1433 struct gfs2_sbd *sdp = sb->s_fs_info; 1434 struct gfs2_inode *ip = GFS2_I(inode); 1435 struct address_space *metamapping; 1436 int ret; 1437 1438 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL | 1439 GFS2_LFC_EVICT_INODE); 1440 metamapping = gfs2_glock2aspace(ip->i_gl); 1441 if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) { 1442 filemap_fdatawrite(metamapping); 1443 filemap_fdatawait(metamapping); 1444 } 1445 write_inode_now(inode, 1); 1446 gfs2_ail_flush(ip->i_gl, 0); 1447 1448 ret = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks); 1449 if (ret) 1450 return ret; 1451 1452 /* Needs to be done before glock release & also in a transaction */ 1453 truncate_inode_pages(&inode->i_data, 0); 1454 truncate_inode_pages(metamapping, 0); 1455 gfs2_trans_end(sdp); 1456 return 0; 1457 } 1458 1459 /** 1460 * gfs2_evict_inode - Remove an inode from cache 1461 * @inode: The inode to evict 1462 * 1463 * There are three cases to consider: 1464 * 1. i_nlink == 0, we are final opener (and must deallocate) 1465 * 2. i_nlink == 0, we are not the final opener (and cannot deallocate) 1466 * 3. i_nlink > 0 1467 * 1468 * If the fs is read only, then we have to treat all cases as per #3 1469 * since we are unable to do any deallocation. The inode will be 1470 * deallocated by the next read/write node to attempt an allocation 1471 * in the same resource group 1472 * 1473 * We have to (at the moment) hold the inodes main lock to cover 1474 * the gap between unlocking the shared lock on the iopen lock and 1475 * taking the exclusive lock. I'd rather do a shared -> exclusive 1476 * conversion on the iopen lock, but we can change that later. This 1477 * is safe, just less efficient. 1478 */ 1479 1480 static void gfs2_evict_inode(struct inode *inode) 1481 { 1482 struct super_block *sb = inode->i_sb; 1483 struct gfs2_sbd *sdp = sb->s_fs_info; 1484 struct gfs2_inode *ip = GFS2_I(inode); 1485 struct gfs2_holder gh; 1486 int ret; 1487 1488 if (test_bit(GIF_FREE_VFS_INODE, &ip->i_flags)) { 1489 clear_inode(inode); 1490 return; 1491 } 1492 1493 if (inode->i_nlink || sb_rdonly(sb)) 1494 goto out; 1495 1496 gfs2_holder_mark_uninitialized(&gh); 1497 ret = evict_should_delete(inode, &gh); 1498 if (ret == SHOULD_DEFER_EVICTION) 1499 goto out; 1500 if (ret == SHOULD_DELETE_DINODE) 1501 ret = evict_unlinked_inode(inode); 1502 else 1503 ret = evict_linked_inode(inode); 1504 1505 if (gfs2_rs_active(&ip->i_res)) 1506 gfs2_rs_deltree(&ip->i_res); 1507 1508 if (gfs2_holder_initialized(&gh)) { 1509 glock_clear_object(ip->i_gl, ip); 1510 gfs2_glock_dq_uninit(&gh); 1511 } 1512 if (ret && ret != GLR_TRYFAILED && ret != -EROFS) 1513 fs_warn(sdp, "gfs2_evict_inode: %d\n", ret); 1514 out: 1515 truncate_inode_pages_final(&inode->i_data); 1516 if (ip->i_qadata) 1517 gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0); 1518 gfs2_rs_delete(ip, NULL); 1519 gfs2_ordered_del_inode(ip); 1520 clear_inode(inode); 1521 gfs2_dir_hash_inval(ip); 1522 if (ip->i_gl) { 1523 glock_clear_object(ip->i_gl, ip); 1524 wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE); 1525 gfs2_glock_add_to_lru(ip->i_gl); 1526 gfs2_glock_put_eventually(ip->i_gl); 1527 ip->i_gl = NULL; 1528 } 1529 if (gfs2_holder_initialized(&ip->i_iopen_gh)) { 1530 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; 1531 1532 glock_clear_object(gl, ip); 1533 if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) { 1534 ip->i_iopen_gh.gh_flags |= GL_NOCACHE; 1535 gfs2_glock_dq(&ip->i_iopen_gh); 1536 } 1537 gfs2_glock_hold(gl); 1538 gfs2_holder_uninit(&ip->i_iopen_gh); 1539 gfs2_glock_put_eventually(gl); 1540 } 1541 } 1542 1543 static struct inode *gfs2_alloc_inode(struct super_block *sb) 1544 { 1545 struct gfs2_inode *ip; 1546 1547 ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL); 1548 if (!ip) 1549 return NULL; 1550 ip->i_flags = 0; 1551 ip->i_gl = NULL; 1552 gfs2_holder_mark_uninitialized(&ip->i_iopen_gh); 1553 memset(&ip->i_res, 0, sizeof(ip->i_res)); 1554 RB_CLEAR_NODE(&ip->i_res.rs_node); 1555 ip->i_rahead = 0; 1556 return &ip->i_inode; 1557 } 1558 1559 static void gfs2_free_inode(struct inode *inode) 1560 { 1561 kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode)); 1562 } 1563 1564 extern void free_local_statfs_inodes(struct gfs2_sbd *sdp) 1565 { 1566 struct local_statfs_inode *lsi, *safe; 1567 1568 /* Run through the statfs inodes list to iput and free memory */ 1569 list_for_each_entry_safe(lsi, safe, &sdp->sd_sc_inodes_list, si_list) { 1570 if (lsi->si_jid == sdp->sd_jdesc->jd_jid) 1571 sdp->sd_sc_inode = NULL; /* belongs to this node */ 1572 if (lsi->si_sc_inode) 1573 iput(lsi->si_sc_inode); 1574 list_del(&lsi->si_list); 1575 kfree(lsi); 1576 } 1577 } 1578 1579 extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp, 1580 unsigned int index) 1581 { 1582 struct local_statfs_inode *lsi; 1583 1584 /* Return the local (per node) statfs inode in the 1585 * sdp->sd_sc_inodes_list corresponding to the 'index'. */ 1586 list_for_each_entry(lsi, &sdp->sd_sc_inodes_list, si_list) { 1587 if (lsi->si_jid == index) 1588 return lsi->si_sc_inode; 1589 } 1590 return NULL; 1591 } 1592 1593 const struct super_operations gfs2_super_ops = { 1594 .alloc_inode = gfs2_alloc_inode, 1595 .free_inode = gfs2_free_inode, 1596 .write_inode = gfs2_write_inode, 1597 .dirty_inode = gfs2_dirty_inode, 1598 .evict_inode = gfs2_evict_inode, 1599 .put_super = gfs2_put_super, 1600 .sync_fs = gfs2_sync_fs, 1601 .freeze_super = gfs2_freeze, 1602 .thaw_super = gfs2_unfreeze, 1603 .statfs = gfs2_statfs, 1604 .drop_inode = gfs2_drop_inode, 1605 .show_options = gfs2_show_options, 1606 }; 1607 1608