1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/spinlock.h> 11 #include <linux/completion.h> 12 #include <linux/buffer_head.h> 13 #include <linux/gfs2_ondisk.h> 14 #include <linux/bio.h> 15 #include <linux/posix_acl.h> 16 17 #include "gfs2.h" 18 #include "incore.h" 19 #include "bmap.h" 20 #include "glock.h" 21 #include "glops.h" 22 #include "inode.h" 23 #include "log.h" 24 #include "meta_io.h" 25 #include "recovery.h" 26 #include "rgrp.h" 27 #include "util.h" 28 #include "trans.h" 29 #include "dir.h" 30 31 struct workqueue_struct *gfs2_freeze_wq; 32 33 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) 34 { 35 fs_err(gl->gl_name.ln_sbd, 36 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page " 37 "state 0x%lx\n", 38 bh, (unsigned long long)bh->b_blocknr, bh->b_state, 39 bh->b_page->mapping, bh->b_page->flags); 40 fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n", 41 gl->gl_name.ln_type, gl->gl_name.ln_number, 42 gfs2_glock2aspace(gl)); 43 gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n"); 44 } 45 46 /** 47 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL 48 * @gl: the glock 49 * @fsync: set when called from fsync (not all buffers will be clean) 50 * 51 * None of the buffers should be dirty, locked, or pinned. 52 */ 53 54 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, 55 unsigned int nr_revokes) 56 { 57 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 58 struct list_head *head = &gl->gl_ail_list; 59 struct gfs2_bufdata *bd, *tmp; 60 struct buffer_head *bh; 61 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock); 62 63 gfs2_log_lock(sdp); 64 spin_lock(&sdp->sd_ail_lock); 65 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) { 66 if (nr_revokes == 0) 67 break; 68 bh = bd->bd_bh; 69 if (bh->b_state & b_state) { 70 if (fsync) 71 continue; 72 gfs2_ail_error(gl, bh); 73 } 74 gfs2_trans_add_revoke(sdp, bd); 75 nr_revokes--; 76 } 77 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); 78 spin_unlock(&sdp->sd_ail_lock); 79 gfs2_log_unlock(sdp); 80 } 81 82 83 static void gfs2_ail_empty_gl(struct gfs2_glock *gl) 84 { 85 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 86 struct gfs2_trans tr; 87 88 memset(&tr, 0, sizeof(tr)); 89 INIT_LIST_HEAD(&tr.tr_buf); 90 INIT_LIST_HEAD(&tr.tr_databuf); 91 tr.tr_revokes = atomic_read(&gl->gl_ail_count); 92 93 if (!tr.tr_revokes) 94 return; 95 96 /* A shortened, inline version of gfs2_trans_begin() 97 * tr->alloced is not set since the transaction structure is 98 * on the stack */ 99 tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64)); 100 tr.tr_ip = _RET_IP_; 101 if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0) 102 return; 103 WARN_ON_ONCE(current->journal_info); 104 current->journal_info = &tr; 105 106 __gfs2_ail_flush(gl, 0, tr.tr_revokes); 107 108 gfs2_trans_end(sdp); 109 gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); 110 } 111 112 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) 113 { 114 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 115 unsigned int revokes = atomic_read(&gl->gl_ail_count); 116 unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); 117 int ret; 118 119 if (!revokes) 120 return; 121 122 while (revokes > max_revokes) 123 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); 124 125 ret = gfs2_trans_begin(sdp, 0, max_revokes); 126 if (ret) 127 return; 128 __gfs2_ail_flush(gl, fsync, max_revokes); 129 gfs2_trans_end(sdp); 130 gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); 131 } 132 133 /** 134 * rgrp_go_sync - sync out the metadata for this glock 135 * @gl: the glock 136 * 137 * Called when demoting or unlocking an EX glock. We must flush 138 * to disk all dirty buffers/pages relating to this glock, and must not 139 * not return to caller to demote/unlock the glock until I/O is complete. 140 */ 141 142 static void rgrp_go_sync(struct gfs2_glock *gl) 143 { 144 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 145 struct address_space *mapping = &sdp->sd_aspace; 146 struct gfs2_rgrpd *rgd; 147 int error; 148 149 spin_lock(&gl->gl_lockref.lock); 150 rgd = gl->gl_object; 151 if (rgd) 152 gfs2_rgrp_brelse(rgd); 153 spin_unlock(&gl->gl_lockref.lock); 154 155 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) 156 return; 157 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); 158 159 gfs2_log_flush(sdp, gl, NORMAL_FLUSH); 160 filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end); 161 error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end); 162 mapping_set_error(mapping, error); 163 gfs2_ail_empty_gl(gl); 164 165 spin_lock(&gl->gl_lockref.lock); 166 rgd = gl->gl_object; 167 if (rgd) 168 gfs2_free_clones(rgd); 169 spin_unlock(&gl->gl_lockref.lock); 170 } 171 172 /** 173 * rgrp_go_inval - invalidate the metadata for this glock 174 * @gl: the glock 175 * @flags: 176 * 177 * We never used LM_ST_DEFERRED with resource groups, so that we 178 * should always see the metadata flag set here. 179 * 180 */ 181 182 static void rgrp_go_inval(struct gfs2_glock *gl, int flags) 183 { 184 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 185 struct address_space *mapping = &sdp->sd_aspace; 186 struct gfs2_rgrpd *rgd = gl->gl_object; 187 188 if (rgd) 189 gfs2_rgrp_brelse(rgd); 190 191 WARN_ON_ONCE(!(flags & DIO_METADATA)); 192 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); 193 truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end); 194 195 if (rgd) 196 rgd->rd_flags &= ~GFS2_RDF_UPTODATE; 197 } 198 199 /** 200 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock 201 * @gl: the glock protecting the inode 202 * 203 */ 204 205 static void inode_go_sync(struct gfs2_glock *gl) 206 { 207 struct gfs2_inode *ip = gl->gl_object; 208 struct address_space *metamapping = gfs2_glock2aspace(gl); 209 int error; 210 211 if (ip && !S_ISREG(ip->i_inode.i_mode)) 212 ip = NULL; 213 if (ip) { 214 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) 215 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); 216 inode_dio_wait(&ip->i_inode); 217 } 218 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) 219 return; 220 221 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); 222 223 gfs2_log_flush(gl->gl_name.ln_sbd, gl, NORMAL_FLUSH); 224 filemap_fdatawrite(metamapping); 225 if (ip) { 226 struct address_space *mapping = ip->i_inode.i_mapping; 227 filemap_fdatawrite(mapping); 228 error = filemap_fdatawait(mapping); 229 mapping_set_error(mapping, error); 230 } 231 error = filemap_fdatawait(metamapping); 232 mapping_set_error(metamapping, error); 233 gfs2_ail_empty_gl(gl); 234 /* 235 * Writeback of the data mapping may cause the dirty flag to be set 236 * so we have to clear it again here. 237 */ 238 smp_mb__before_atomic(); 239 clear_bit(GLF_DIRTY, &gl->gl_flags); 240 } 241 242 /** 243 * inode_go_inval - prepare a inode glock to be released 244 * @gl: the glock 245 * @flags: 246 * 247 * Normally we invalidate everything, but if we are moving into 248 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we 249 * can keep hold of the metadata, since it won't have changed. 250 * 251 */ 252 253 static void inode_go_inval(struct gfs2_glock *gl, int flags) 254 { 255 struct gfs2_inode *ip = gl->gl_object; 256 257 gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count)); 258 259 if (flags & DIO_METADATA) { 260 struct address_space *mapping = gfs2_glock2aspace(gl); 261 truncate_inode_pages(mapping, 0); 262 if (ip) { 263 set_bit(GIF_INVALID, &ip->i_flags); 264 forget_all_cached_acls(&ip->i_inode); 265 gfs2_dir_hash_inval(ip); 266 } 267 } 268 269 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) { 270 gfs2_log_flush(gl->gl_name.ln_sbd, NULL, NORMAL_FLUSH); 271 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0; 272 } 273 if (ip && S_ISREG(ip->i_inode.i_mode)) 274 truncate_inode_pages(ip->i_inode.i_mapping, 0); 275 } 276 277 /** 278 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock 279 * @gl: the glock 280 * 281 * Returns: 1 if it's ok 282 */ 283 284 static int inode_go_demote_ok(const struct gfs2_glock *gl) 285 { 286 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 287 struct gfs2_holder *gh; 288 289 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) 290 return 0; 291 292 if (!list_empty(&gl->gl_holders)) { 293 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); 294 if (gh->gh_list.next != &gl->gl_holders) 295 return 0; 296 } 297 298 return 1; 299 } 300 301 /** 302 * gfs2_set_nlink - Set the inode's link count based on on-disk info 303 * @inode: The inode in question 304 * @nlink: The link count 305 * 306 * If the link count has hit zero, it must never be raised, whatever the 307 * on-disk inode might say. When new struct inodes are created the link 308 * count is set to 1, so that we can safely use this test even when reading 309 * in on disk information for the first time. 310 */ 311 312 static void gfs2_set_nlink(struct inode *inode, u32 nlink) 313 { 314 /* 315 * We will need to review setting the nlink count here in the 316 * light of the forthcoming ro bind mount work. This is a reminder 317 * to do that. 318 */ 319 if ((inode->i_nlink != nlink) && (inode->i_nlink != 0)) { 320 if (nlink == 0) 321 clear_nlink(inode); 322 else 323 set_nlink(inode, nlink); 324 } 325 } 326 327 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) 328 { 329 const struct gfs2_dinode *str = buf; 330 struct timespec atime; 331 u16 height, depth; 332 333 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) 334 goto corrupt; 335 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino); 336 ip->i_inode.i_mode = be32_to_cpu(str->di_mode); 337 ip->i_inode.i_rdev = 0; 338 switch (ip->i_inode.i_mode & S_IFMT) { 339 case S_IFBLK: 340 case S_IFCHR: 341 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major), 342 be32_to_cpu(str->di_minor)); 343 break; 344 }; 345 346 i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid)); 347 i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid)); 348 gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink)); 349 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size)); 350 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks)); 351 atime.tv_sec = be64_to_cpu(str->di_atime); 352 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec); 353 if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0) 354 ip->i_inode.i_atime = atime; 355 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime); 356 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec); 357 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime); 358 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec); 359 360 ip->i_goal = be64_to_cpu(str->di_goal_meta); 361 ip->i_generation = be64_to_cpu(str->di_generation); 362 363 ip->i_diskflags = be32_to_cpu(str->di_flags); 364 ip->i_eattr = be64_to_cpu(str->di_eattr); 365 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */ 366 gfs2_set_inode_flags(&ip->i_inode); 367 height = be16_to_cpu(str->di_height); 368 if (unlikely(height > GFS2_MAX_META_HEIGHT)) 369 goto corrupt; 370 ip->i_height = (u8)height; 371 372 depth = be16_to_cpu(str->di_depth); 373 if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) 374 goto corrupt; 375 ip->i_depth = (u8)depth; 376 ip->i_entries = be32_to_cpu(str->di_entries); 377 378 if (S_ISREG(ip->i_inode.i_mode)) 379 gfs2_set_aops(&ip->i_inode); 380 381 return 0; 382 corrupt: 383 gfs2_consist_inode(ip); 384 return -EIO; 385 } 386 387 /** 388 * gfs2_inode_refresh - Refresh the incore copy of the dinode 389 * @ip: The GFS2 inode 390 * 391 * Returns: errno 392 */ 393 394 int gfs2_inode_refresh(struct gfs2_inode *ip) 395 { 396 struct buffer_head *dibh; 397 int error; 398 399 error = gfs2_meta_inode_buffer(ip, &dibh); 400 if (error) 401 return error; 402 403 error = gfs2_dinode_in(ip, dibh->b_data); 404 brelse(dibh); 405 clear_bit(GIF_INVALID, &ip->i_flags); 406 407 return error; 408 } 409 410 /** 411 * inode_go_lock - operation done after an inode lock is locked by a process 412 * @gl: the glock 413 * @flags: 414 * 415 * Returns: errno 416 */ 417 418 static int inode_go_lock(struct gfs2_holder *gh) 419 { 420 struct gfs2_glock *gl = gh->gh_gl; 421 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 422 struct gfs2_inode *ip = gl->gl_object; 423 int error = 0; 424 425 if (!ip || (gh->gh_flags & GL_SKIP)) 426 return 0; 427 428 if (test_bit(GIF_INVALID, &ip->i_flags)) { 429 error = gfs2_inode_refresh(ip); 430 if (error) 431 return error; 432 } 433 434 if (gh->gh_state != LM_ST_DEFERRED) 435 inode_dio_wait(&ip->i_inode); 436 437 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && 438 (gl->gl_state == LM_ST_EXCLUSIVE) && 439 (gh->gh_state == LM_ST_EXCLUSIVE)) { 440 spin_lock(&sdp->sd_trunc_lock); 441 if (list_empty(&ip->i_trunc_list)) 442 list_add(&sdp->sd_trunc_list, &ip->i_trunc_list); 443 spin_unlock(&sdp->sd_trunc_lock); 444 wake_up(&sdp->sd_quota_wait); 445 return 1; 446 } 447 448 return error; 449 } 450 451 /** 452 * inode_go_dump - print information about an inode 453 * @seq: The iterator 454 * @ip: the inode 455 * 456 */ 457 458 static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) 459 { 460 const struct gfs2_inode *ip = gl->gl_object; 461 if (ip == NULL) 462 return; 463 gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n", 464 (unsigned long long)ip->i_no_formal_ino, 465 (unsigned long long)ip->i_no_addr, 466 IF2DT(ip->i_inode.i_mode), ip->i_flags, 467 (unsigned int)ip->i_diskflags, 468 (unsigned long long)i_size_read(&ip->i_inode)); 469 } 470 471 /** 472 * freeze_go_sync - promote/demote the freeze glock 473 * @gl: the glock 474 * @state: the requested state 475 * @flags: 476 * 477 */ 478 479 static void freeze_go_sync(struct gfs2_glock *gl) 480 { 481 int error = 0; 482 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 483 484 if (gl->gl_state == LM_ST_SHARED && 485 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 486 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE); 487 error = freeze_super(sdp->sd_vfs); 488 if (error) { 489 printk(KERN_INFO "GFS2: couldn't freeze filesystem: %d\n", error); 490 gfs2_assert_withdraw(sdp, 0); 491 } 492 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work); 493 gfs2_log_flush(sdp, NULL, FREEZE_FLUSH); 494 } 495 } 496 497 /** 498 * freeze_go_xmote_bh - After promoting/demoting the freeze glock 499 * @gl: the glock 500 * 501 */ 502 503 static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) 504 { 505 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 506 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 507 struct gfs2_glock *j_gl = ip->i_gl; 508 struct gfs2_log_header_host head; 509 int error; 510 511 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 512 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); 513 514 error = gfs2_find_jhead(sdp->sd_jdesc, &head); 515 if (error) 516 gfs2_consist(sdp); 517 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) 518 gfs2_consist(sdp); 519 520 /* Initialize some head of the log stuff */ 521 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) { 522 sdp->sd_log_sequence = head.lh_sequence + 1; 523 gfs2_log_pointers_init(sdp, head.lh_blkno); 524 } 525 } 526 return 0; 527 } 528 529 /** 530 * trans_go_demote_ok 531 * @gl: the glock 532 * 533 * Always returns 0 534 */ 535 536 static int freeze_go_demote_ok(const struct gfs2_glock *gl) 537 { 538 return 0; 539 } 540 541 /** 542 * iopen_go_callback - schedule the dcache entry for the inode to be deleted 543 * @gl: the glock 544 * 545 * gl_lockref.lock lock is held while calling this 546 */ 547 static void iopen_go_callback(struct gfs2_glock *gl, bool remote) 548 { 549 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; 550 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 551 552 if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY)) 553 return; 554 555 if (gl->gl_demote_state == LM_ST_UNLOCKED && 556 gl->gl_state == LM_ST_SHARED && ip) { 557 gl->gl_lockref.count++; 558 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) 559 gl->gl_lockref.count--; 560 } 561 } 562 563 const struct gfs2_glock_operations gfs2_meta_glops = { 564 .go_type = LM_TYPE_META, 565 }; 566 567 const struct gfs2_glock_operations gfs2_inode_glops = { 568 .go_sync = inode_go_sync, 569 .go_inval = inode_go_inval, 570 .go_demote_ok = inode_go_demote_ok, 571 .go_lock = inode_go_lock, 572 .go_dump = inode_go_dump, 573 .go_type = LM_TYPE_INODE, 574 .go_flags = GLOF_ASPACE | GLOF_LRU, 575 }; 576 577 const struct gfs2_glock_operations gfs2_rgrp_glops = { 578 .go_sync = rgrp_go_sync, 579 .go_inval = rgrp_go_inval, 580 .go_lock = gfs2_rgrp_go_lock, 581 .go_unlock = gfs2_rgrp_go_unlock, 582 .go_dump = gfs2_rgrp_dump, 583 .go_type = LM_TYPE_RGRP, 584 .go_flags = GLOF_LVB, 585 }; 586 587 const struct gfs2_glock_operations gfs2_freeze_glops = { 588 .go_sync = freeze_go_sync, 589 .go_xmote_bh = freeze_go_xmote_bh, 590 .go_demote_ok = freeze_go_demote_ok, 591 .go_type = LM_TYPE_NONDISK, 592 }; 593 594 const struct gfs2_glock_operations gfs2_iopen_glops = { 595 .go_type = LM_TYPE_IOPEN, 596 .go_callback = iopen_go_callback, 597 .go_flags = GLOF_LRU, 598 }; 599 600 const struct gfs2_glock_operations gfs2_flock_glops = { 601 .go_type = LM_TYPE_FLOCK, 602 .go_flags = GLOF_LRU, 603 }; 604 605 const struct gfs2_glock_operations gfs2_nondisk_glops = { 606 .go_type = LM_TYPE_NONDISK, 607 }; 608 609 const struct gfs2_glock_operations gfs2_quota_glops = { 610 .go_type = LM_TYPE_QUOTA, 611 .go_flags = GLOF_LVB | GLOF_LRU, 612 }; 613 614 const struct gfs2_glock_operations gfs2_journal_glops = { 615 .go_type = LM_TYPE_JOURNAL, 616 }; 617 618 const struct gfs2_glock_operations *gfs2_glops_list[] = { 619 [LM_TYPE_META] = &gfs2_meta_glops, 620 [LM_TYPE_INODE] = &gfs2_inode_glops, 621 [LM_TYPE_RGRP] = &gfs2_rgrp_glops, 622 [LM_TYPE_IOPEN] = &gfs2_iopen_glops, 623 [LM_TYPE_FLOCK] = &gfs2_flock_glops, 624 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops, 625 [LM_TYPE_QUOTA] = &gfs2_quota_glops, 626 [LM_TYPE_JOURNAL] = &gfs2_journal_glops, 627 }; 628 629