1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 */ 6 7 #include <linux/spinlock.h> 8 #include <linux/completion.h> 9 #include <linux/buffer_head.h> 10 #include <linux/gfs2_ondisk.h> 11 #include <linux/bio.h> 12 #include <linux/posix_acl.h> 13 #include <linux/security.h> 14 15 #include "gfs2.h" 16 #include "incore.h" 17 #include "bmap.h" 18 #include "glock.h" 19 #include "glops.h" 20 #include "inode.h" 21 #include "log.h" 22 #include "meta_io.h" 23 #include "recovery.h" 24 #include "rgrp.h" 25 #include "util.h" 26 #include "trans.h" 27 #include "dir.h" 28 #include "lops.h" 29 30 struct workqueue_struct *gfs2_freeze_wq; 31 32 extern struct workqueue_struct *gfs2_control_wq; 33 34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) 35 { 36 fs_err(gl->gl_name.ln_sbd, 37 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page " 38 "state 0x%lx\n", 39 bh, (unsigned long long)bh->b_blocknr, bh->b_state, 40 bh->b_page->mapping, bh->b_page->flags); 41 fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n", 42 gl->gl_name.ln_type, gl->gl_name.ln_number, 43 gfs2_glock2aspace(gl)); 44 gfs2_lm(gl->gl_name.ln_sbd, "AIL error\n"); 45 gfs2_withdraw(gl->gl_name.ln_sbd); 46 } 47 48 /** 49 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL 50 * @gl: the glock 51 * @fsync: set when called from fsync (not all buffers will be clean) 52 * 53 * None of the buffers should be dirty, locked, or pinned. 54 */ 55 56 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, 57 unsigned int nr_revokes) 58 { 59 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 60 struct list_head *head = &gl->gl_ail_list; 61 struct gfs2_bufdata *bd, *tmp; 62 struct buffer_head *bh; 63 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock); 64 65 gfs2_log_lock(sdp); 66 spin_lock(&sdp->sd_ail_lock); 67 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) { 68 if (nr_revokes == 0) 69 break; 70 bh = bd->bd_bh; 71 if (bh->b_state & b_state) { 72 if (fsync) 73 continue; 74 gfs2_ail_error(gl, bh); 75 } 76 gfs2_trans_add_revoke(sdp, bd); 77 nr_revokes--; 78 } 79 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); 80 spin_unlock(&sdp->sd_ail_lock); 81 gfs2_log_unlock(sdp); 82 } 83 84 85 static int gfs2_ail_empty_gl(struct gfs2_glock *gl) 86 { 87 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 88 struct gfs2_trans tr; 89 unsigned int revokes; 90 int ret; 91 92 revokes = atomic_read(&gl->gl_ail_count); 93 94 if (!revokes) { 95 bool have_revokes; 96 bool log_in_flight; 97 98 /* 99 * We have nothing on the ail, but there could be revokes on 100 * the sdp revoke queue, in which case, we still want to flush 101 * the log and wait for it to finish. 102 * 103 * If the sdp revoke list is empty too, we might still have an 104 * io outstanding for writing revokes, so we should wait for 105 * it before returning. 106 * 107 * If none of these conditions are true, our revokes are all 108 * flushed and we can return. 109 */ 110 gfs2_log_lock(sdp); 111 have_revokes = !list_empty(&sdp->sd_log_revokes); 112 log_in_flight = atomic_read(&sdp->sd_log_in_flight); 113 gfs2_log_unlock(sdp); 114 if (have_revokes) 115 goto flush; 116 if (log_in_flight) 117 log_flush_wait(sdp); 118 return 0; 119 } 120 121 memset(&tr, 0, sizeof(tr)); 122 set_bit(TR_ONSTACK, &tr.tr_flags); 123 ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_); 124 if (ret) 125 goto flush; 126 __gfs2_ail_flush(gl, 0, revokes); 127 gfs2_trans_end(sdp); 128 129 flush: 130 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | 131 GFS2_LFC_AIL_EMPTY_GL); 132 return 0; 133 } 134 135 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) 136 { 137 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 138 unsigned int revokes = atomic_read(&gl->gl_ail_count); 139 int ret; 140 141 if (!revokes) 142 return; 143 144 ret = gfs2_trans_begin(sdp, 0, revokes); 145 if (ret) 146 return; 147 __gfs2_ail_flush(gl, fsync, revokes); 148 gfs2_trans_end(sdp); 149 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | 150 GFS2_LFC_AIL_FLUSH); 151 } 152 153 /** 154 * gfs2_rgrp_metasync - sync out the metadata of a resource group 155 * @gl: the glock protecting the resource group 156 * 157 */ 158 159 static int gfs2_rgrp_metasync(struct gfs2_glock *gl) 160 { 161 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 162 struct address_space *metamapping = &sdp->sd_aspace; 163 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); 164 const unsigned bsize = sdp->sd_sb.sb_bsize; 165 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK; 166 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1; 167 int error; 168 169 filemap_fdatawrite_range(metamapping, start, end); 170 error = filemap_fdatawait_range(metamapping, start, end); 171 WARN_ON_ONCE(error && !gfs2_withdrawn(sdp)); 172 mapping_set_error(metamapping, error); 173 if (error) 174 gfs2_io_error(sdp); 175 return error; 176 } 177 178 /** 179 * rgrp_go_sync - sync out the metadata for this glock 180 * @gl: the glock 181 * 182 * Called when demoting or unlocking an EX glock. We must flush 183 * to disk all dirty buffers/pages relating to this glock, and must not 184 * return to caller to demote/unlock the glock until I/O is complete. 185 */ 186 187 static int rgrp_go_sync(struct gfs2_glock *gl) 188 { 189 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 190 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); 191 int error; 192 193 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) 194 return 0; 195 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); 196 197 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | 198 GFS2_LFC_RGRP_GO_SYNC); 199 error = gfs2_rgrp_metasync(gl); 200 if (!error) 201 error = gfs2_ail_empty_gl(gl); 202 gfs2_free_clones(rgd); 203 return error; 204 } 205 206 /** 207 * rgrp_go_inval - invalidate the metadata for this glock 208 * @gl: the glock 209 * @flags: 210 * 211 * We never used LM_ST_DEFERRED with resource groups, so that we 212 * should always see the metadata flag set here. 213 * 214 */ 215 216 static void rgrp_go_inval(struct gfs2_glock *gl, int flags) 217 { 218 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 219 struct address_space *mapping = &sdp->sd_aspace; 220 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); 221 const unsigned bsize = sdp->sd_sb.sb_bsize; 222 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK; 223 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1; 224 225 gfs2_rgrp_brelse(rgd); 226 WARN_ON_ONCE(!(flags & DIO_METADATA)); 227 truncate_inode_pages_range(mapping, start, end); 228 rgd->rd_flags &= ~GFS2_RDF_UPTODATE; 229 } 230 231 static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl, 232 const char *fs_id_buf) 233 { 234 struct gfs2_rgrpd *rgd = gl->gl_object; 235 236 if (rgd) 237 gfs2_rgrp_dump(seq, rgd, fs_id_buf); 238 } 239 240 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl) 241 { 242 struct gfs2_inode *ip; 243 244 spin_lock(&gl->gl_lockref.lock); 245 ip = gl->gl_object; 246 if (ip) 247 set_bit(GIF_GLOP_PENDING, &ip->i_flags); 248 spin_unlock(&gl->gl_lockref.lock); 249 return ip; 250 } 251 252 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl) 253 { 254 struct gfs2_rgrpd *rgd; 255 256 spin_lock(&gl->gl_lockref.lock); 257 rgd = gl->gl_object; 258 spin_unlock(&gl->gl_lockref.lock); 259 260 return rgd; 261 } 262 263 static void gfs2_clear_glop_pending(struct gfs2_inode *ip) 264 { 265 if (!ip) 266 return; 267 268 clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags); 269 wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING); 270 } 271 272 /** 273 * gfs2_inode_metasync - sync out the metadata of an inode 274 * @gl: the glock protecting the inode 275 * 276 */ 277 int gfs2_inode_metasync(struct gfs2_glock *gl) 278 { 279 struct address_space *metamapping = gfs2_glock2aspace(gl); 280 int error; 281 282 filemap_fdatawrite(metamapping); 283 error = filemap_fdatawait(metamapping); 284 if (error) 285 gfs2_io_error(gl->gl_name.ln_sbd); 286 return error; 287 } 288 289 /** 290 * inode_go_sync - Sync the dirty metadata of an inode 291 * @gl: the glock protecting the inode 292 * 293 */ 294 295 static int inode_go_sync(struct gfs2_glock *gl) 296 { 297 struct gfs2_inode *ip = gfs2_glock2inode(gl); 298 int isreg = ip && S_ISREG(ip->i_inode.i_mode); 299 struct address_space *metamapping = gfs2_glock2aspace(gl); 300 int error = 0, ret; 301 302 if (isreg) { 303 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) 304 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); 305 inode_dio_wait(&ip->i_inode); 306 } 307 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) 308 goto out; 309 310 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); 311 312 gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | 313 GFS2_LFC_INODE_GO_SYNC); 314 filemap_fdatawrite(metamapping); 315 if (isreg) { 316 struct address_space *mapping = ip->i_inode.i_mapping; 317 filemap_fdatawrite(mapping); 318 error = filemap_fdatawait(mapping); 319 mapping_set_error(mapping, error); 320 } 321 ret = gfs2_inode_metasync(gl); 322 if (!error) 323 error = ret; 324 gfs2_ail_empty_gl(gl); 325 /* 326 * Writeback of the data mapping may cause the dirty flag to be set 327 * so we have to clear it again here. 328 */ 329 smp_mb__before_atomic(); 330 clear_bit(GLF_DIRTY, &gl->gl_flags); 331 332 out: 333 gfs2_clear_glop_pending(ip); 334 return error; 335 } 336 337 /** 338 * inode_go_inval - prepare a inode glock to be released 339 * @gl: the glock 340 * @flags: 341 * 342 * Normally we invalidate everything, but if we are moving into 343 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we 344 * can keep hold of the metadata, since it won't have changed. 345 * 346 */ 347 348 static void inode_go_inval(struct gfs2_glock *gl, int flags) 349 { 350 struct gfs2_inode *ip = gfs2_glock2inode(gl); 351 352 if (flags & DIO_METADATA) { 353 struct address_space *mapping = gfs2_glock2aspace(gl); 354 truncate_inode_pages(mapping, 0); 355 if (ip) { 356 set_bit(GIF_INVALID, &ip->i_flags); 357 forget_all_cached_acls(&ip->i_inode); 358 security_inode_invalidate_secctx(&ip->i_inode); 359 gfs2_dir_hash_inval(ip); 360 } 361 } 362 363 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) { 364 gfs2_log_flush(gl->gl_name.ln_sbd, NULL, 365 GFS2_LOG_HEAD_FLUSH_NORMAL | 366 GFS2_LFC_INODE_GO_INVAL); 367 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0; 368 } 369 if (ip && S_ISREG(ip->i_inode.i_mode)) 370 truncate_inode_pages(ip->i_inode.i_mapping, 0); 371 372 gfs2_clear_glop_pending(ip); 373 } 374 375 /** 376 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock 377 * @gl: the glock 378 * 379 * Returns: 1 if it's ok 380 */ 381 382 static int inode_go_demote_ok(const struct gfs2_glock *gl) 383 { 384 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 385 386 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) 387 return 0; 388 389 return 1; 390 } 391 392 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) 393 { 394 const struct gfs2_dinode *str = buf; 395 struct timespec64 atime; 396 u16 height, depth; 397 umode_t mode = be32_to_cpu(str->di_mode); 398 bool is_new = ip->i_inode.i_flags & I_NEW; 399 400 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) 401 goto corrupt; 402 if (unlikely(!is_new && inode_wrong_type(&ip->i_inode, mode))) 403 goto corrupt; 404 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino); 405 ip->i_inode.i_mode = mode; 406 if (is_new) { 407 ip->i_inode.i_rdev = 0; 408 switch (mode & S_IFMT) { 409 case S_IFBLK: 410 case S_IFCHR: 411 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major), 412 be32_to_cpu(str->di_minor)); 413 break; 414 } 415 } 416 417 i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid)); 418 i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid)); 419 set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink)); 420 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size)); 421 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks)); 422 atime.tv_sec = be64_to_cpu(str->di_atime); 423 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec); 424 if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0) 425 ip->i_inode.i_atime = atime; 426 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime); 427 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec); 428 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime); 429 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec); 430 431 ip->i_goal = be64_to_cpu(str->di_goal_meta); 432 ip->i_generation = be64_to_cpu(str->di_generation); 433 434 ip->i_diskflags = be32_to_cpu(str->di_flags); 435 ip->i_eattr = be64_to_cpu(str->di_eattr); 436 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */ 437 gfs2_set_inode_flags(&ip->i_inode); 438 height = be16_to_cpu(str->di_height); 439 if (unlikely(height > GFS2_MAX_META_HEIGHT)) 440 goto corrupt; 441 ip->i_height = (u8)height; 442 443 depth = be16_to_cpu(str->di_depth); 444 if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) 445 goto corrupt; 446 ip->i_depth = (u8)depth; 447 ip->i_entries = be32_to_cpu(str->di_entries); 448 449 if (S_ISREG(ip->i_inode.i_mode)) 450 gfs2_set_aops(&ip->i_inode); 451 452 return 0; 453 corrupt: 454 gfs2_consist_inode(ip); 455 return -EIO; 456 } 457 458 /** 459 * gfs2_inode_refresh - Refresh the incore copy of the dinode 460 * @ip: The GFS2 inode 461 * 462 * Returns: errno 463 */ 464 465 int gfs2_inode_refresh(struct gfs2_inode *ip) 466 { 467 struct buffer_head *dibh; 468 int error; 469 470 error = gfs2_meta_inode_buffer(ip, &dibh); 471 if (error) 472 return error; 473 474 error = gfs2_dinode_in(ip, dibh->b_data); 475 brelse(dibh); 476 clear_bit(GIF_INVALID, &ip->i_flags); 477 478 return error; 479 } 480 481 /** 482 * inode_go_lock - operation done after an inode lock is locked by a process 483 * @gl: the glock 484 * @flags: 485 * 486 * Returns: errno 487 */ 488 489 static int inode_go_lock(struct gfs2_holder *gh) 490 { 491 struct gfs2_glock *gl = gh->gh_gl; 492 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 493 struct gfs2_inode *ip = gl->gl_object; 494 int error = 0; 495 496 if (!ip || (gh->gh_flags & GL_SKIP)) 497 return 0; 498 499 if (test_bit(GIF_INVALID, &ip->i_flags)) { 500 error = gfs2_inode_refresh(ip); 501 if (error) 502 return error; 503 } 504 505 if (gh->gh_state != LM_ST_DEFERRED) 506 inode_dio_wait(&ip->i_inode); 507 508 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && 509 (gl->gl_state == LM_ST_EXCLUSIVE) && 510 (gh->gh_state == LM_ST_EXCLUSIVE)) { 511 spin_lock(&sdp->sd_trunc_lock); 512 if (list_empty(&ip->i_trunc_list)) 513 list_add(&ip->i_trunc_list, &sdp->sd_trunc_list); 514 spin_unlock(&sdp->sd_trunc_lock); 515 wake_up(&sdp->sd_quota_wait); 516 return 1; 517 } 518 519 return error; 520 } 521 522 /** 523 * inode_go_dump - print information about an inode 524 * @seq: The iterator 525 * @ip: the inode 526 * @fs_id_buf: file system id (may be empty) 527 * 528 */ 529 530 static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl, 531 const char *fs_id_buf) 532 { 533 struct gfs2_inode *ip = gl->gl_object; 534 struct inode *inode = &ip->i_inode; 535 unsigned long nrpages; 536 537 if (ip == NULL) 538 return; 539 540 xa_lock_irq(&inode->i_data.i_pages); 541 nrpages = inode->i_data.nrpages; 542 xa_unlock_irq(&inode->i_data.i_pages); 543 544 gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu " 545 "p:%lu\n", fs_id_buf, 546 (unsigned long long)ip->i_no_formal_ino, 547 (unsigned long long)ip->i_no_addr, 548 IF2DT(ip->i_inode.i_mode), ip->i_flags, 549 (unsigned int)ip->i_diskflags, 550 (unsigned long long)i_size_read(inode), nrpages); 551 } 552 553 /** 554 * freeze_go_sync - promote/demote the freeze glock 555 * @gl: the glock 556 * @state: the requested state 557 * @flags: 558 * 559 */ 560 561 static int freeze_go_sync(struct gfs2_glock *gl) 562 { 563 int error = 0; 564 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 565 566 /* 567 * We need to check gl_state == LM_ST_SHARED here and not gl_req == 568 * LM_ST_EXCLUSIVE. That's because when any node does a freeze, 569 * all the nodes should have the freeze glock in SH mode and they all 570 * call do_xmote: One for EX and the others for UN. They ALL must 571 * freeze locally, and they ALL must queue freeze work. The freeze_work 572 * calls freeze_func, which tries to reacquire the freeze glock in SH, 573 * effectively waiting for the thaw on the node who holds it in EX. 574 * Once thawed, the work func acquires the freeze glock in 575 * SH and everybody goes back to thawed. 576 */ 577 if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) && 578 !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) { 579 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE); 580 error = freeze_super(sdp->sd_vfs); 581 if (error) { 582 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n", 583 error); 584 if (gfs2_withdrawn(sdp)) { 585 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN); 586 return 0; 587 } 588 gfs2_assert_withdraw(sdp, 0); 589 } 590 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work); 591 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) 592 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE | 593 GFS2_LFC_FREEZE_GO_SYNC); 594 else /* read-only mounts */ 595 atomic_set(&sdp->sd_freeze_state, SFS_FROZEN); 596 } 597 return 0; 598 } 599 600 /** 601 * freeze_go_xmote_bh - After promoting/demoting the freeze glock 602 * @gl: the glock 603 * 604 */ 605 606 static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) 607 { 608 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 609 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 610 struct gfs2_glock *j_gl = ip->i_gl; 611 struct gfs2_log_header_host head; 612 int error; 613 614 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 615 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); 616 617 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false); 618 if (error) 619 gfs2_consist(sdp); 620 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) 621 gfs2_consist(sdp); 622 623 /* Initialize some head of the log stuff */ 624 if (!gfs2_withdrawn(sdp)) { 625 sdp->sd_log_sequence = head.lh_sequence + 1; 626 gfs2_log_pointers_init(sdp, head.lh_blkno); 627 } 628 } 629 return 0; 630 } 631 632 /** 633 * trans_go_demote_ok 634 * @gl: the glock 635 * 636 * Always returns 0 637 */ 638 639 static int freeze_go_demote_ok(const struct gfs2_glock *gl) 640 { 641 return 0; 642 } 643 644 /** 645 * iopen_go_callback - schedule the dcache entry for the inode to be deleted 646 * @gl: the glock 647 * 648 * gl_lockref.lock lock is held while calling this 649 */ 650 static void iopen_go_callback(struct gfs2_glock *gl, bool remote) 651 { 652 struct gfs2_inode *ip = gl->gl_object; 653 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 654 655 if (!remote || sb_rdonly(sdp->sd_vfs)) 656 return; 657 658 if (gl->gl_demote_state == LM_ST_UNLOCKED && 659 gl->gl_state == LM_ST_SHARED && ip) { 660 gl->gl_lockref.count++; 661 if (!queue_delayed_work(gfs2_delete_workqueue, 662 &gl->gl_delete, 0)) 663 gl->gl_lockref.count--; 664 } 665 } 666 667 static int iopen_go_demote_ok(const struct gfs2_glock *gl) 668 { 669 return !gfs2_delete_work_queued(gl); 670 } 671 672 /** 673 * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it 674 * @gl: glock being freed 675 * 676 * For now, this is only used for the journal inode glock. In withdraw 677 * situations, we need to wait for the glock to be freed so that we know 678 * other nodes may proceed with recovery / journal replay. 679 */ 680 static void inode_go_free(struct gfs2_glock *gl) 681 { 682 /* Note that we cannot reference gl_object because it's already set 683 * to NULL by this point in its lifecycle. */ 684 if (!test_bit(GLF_FREEING, &gl->gl_flags)) 685 return; 686 clear_bit_unlock(GLF_FREEING, &gl->gl_flags); 687 wake_up_bit(&gl->gl_flags, GLF_FREEING); 688 } 689 690 /** 691 * nondisk_go_callback - used to signal when a node did a withdraw 692 * @gl: the nondisk glock 693 * @remote: true if this came from a different cluster node 694 * 695 */ 696 static void nondisk_go_callback(struct gfs2_glock *gl, bool remote) 697 { 698 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 699 700 /* Ignore the callback unless it's from another node, and it's the 701 live lock. */ 702 if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK) 703 return; 704 705 /* First order of business is to cancel the demote request. We don't 706 * really want to demote a nondisk glock. At best it's just to inform 707 * us of another node's withdraw. We'll keep it in SH mode. */ 708 clear_bit(GLF_DEMOTE, &gl->gl_flags); 709 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); 710 711 /* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */ 712 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) || 713 test_bit(SDF_WITHDRAWN, &sdp->sd_flags) || 714 test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags)) 715 return; 716 717 /* We only care when a node wants us to unlock, because that means 718 * they want a journal recovered. */ 719 if (gl->gl_demote_state != LM_ST_UNLOCKED) 720 return; 721 722 if (sdp->sd_args.ar_spectator) { 723 fs_warn(sdp, "Spectator node cannot recover journals.\n"); 724 return; 725 } 726 727 fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n"); 728 set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags); 729 /* 730 * We can't call remote_withdraw directly here or gfs2_recover_journal 731 * because this is called from the glock unlock function and the 732 * remote_withdraw needs to enqueue and dequeue the same "live" glock 733 * we were called from. So we queue it to the control work queue in 734 * lock_dlm. 735 */ 736 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0); 737 } 738 739 const struct gfs2_glock_operations gfs2_meta_glops = { 740 .go_type = LM_TYPE_META, 741 .go_flags = GLOF_NONDISK, 742 }; 743 744 const struct gfs2_glock_operations gfs2_inode_glops = { 745 .go_sync = inode_go_sync, 746 .go_inval = inode_go_inval, 747 .go_demote_ok = inode_go_demote_ok, 748 .go_lock = inode_go_lock, 749 .go_dump = inode_go_dump, 750 .go_type = LM_TYPE_INODE, 751 .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB, 752 .go_free = inode_go_free, 753 }; 754 755 const struct gfs2_glock_operations gfs2_rgrp_glops = { 756 .go_sync = rgrp_go_sync, 757 .go_inval = rgrp_go_inval, 758 .go_lock = gfs2_rgrp_go_lock, 759 .go_dump = gfs2_rgrp_go_dump, 760 .go_type = LM_TYPE_RGRP, 761 .go_flags = GLOF_LVB, 762 }; 763 764 const struct gfs2_glock_operations gfs2_freeze_glops = { 765 .go_sync = freeze_go_sync, 766 .go_xmote_bh = freeze_go_xmote_bh, 767 .go_demote_ok = freeze_go_demote_ok, 768 .go_type = LM_TYPE_NONDISK, 769 .go_flags = GLOF_NONDISK, 770 }; 771 772 const struct gfs2_glock_operations gfs2_iopen_glops = { 773 .go_type = LM_TYPE_IOPEN, 774 .go_callback = iopen_go_callback, 775 .go_demote_ok = iopen_go_demote_ok, 776 .go_flags = GLOF_LRU | GLOF_NONDISK, 777 .go_subclass = 1, 778 }; 779 780 const struct gfs2_glock_operations gfs2_flock_glops = { 781 .go_type = LM_TYPE_FLOCK, 782 .go_flags = GLOF_LRU | GLOF_NONDISK, 783 }; 784 785 const struct gfs2_glock_operations gfs2_nondisk_glops = { 786 .go_type = LM_TYPE_NONDISK, 787 .go_flags = GLOF_NONDISK, 788 .go_callback = nondisk_go_callback, 789 }; 790 791 const struct gfs2_glock_operations gfs2_quota_glops = { 792 .go_type = LM_TYPE_QUOTA, 793 .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK, 794 }; 795 796 const struct gfs2_glock_operations gfs2_journal_glops = { 797 .go_type = LM_TYPE_JOURNAL, 798 .go_flags = GLOF_NONDISK, 799 }; 800 801 const struct gfs2_glock_operations *gfs2_glops_list[] = { 802 [LM_TYPE_META] = &gfs2_meta_glops, 803 [LM_TYPE_INODE] = &gfs2_inode_glops, 804 [LM_TYPE_RGRP] = &gfs2_rgrp_glops, 805 [LM_TYPE_IOPEN] = &gfs2_iopen_glops, 806 [LM_TYPE_FLOCK] = &gfs2_flock_glops, 807 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops, 808 [LM_TYPE_QUOTA] = &gfs2_quota_glops, 809 [LM_TYPE_JOURNAL] = &gfs2_journal_glops, 810 }; 811 812