1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/spinlock.h> 11 #include <linux/completion.h> 12 #include <linux/buffer_head.h> 13 #include <linux/gfs2_ondisk.h> 14 #include <linux/bio.h> 15 #include <linux/posix_acl.h> 16 17 #include "gfs2.h" 18 #include "incore.h" 19 #include "bmap.h" 20 #include "glock.h" 21 #include "glops.h" 22 #include "inode.h" 23 #include "log.h" 24 #include "meta_io.h" 25 #include "recovery.h" 26 #include "rgrp.h" 27 #include "util.h" 28 #include "trans.h" 29 #include "dir.h" 30 31 struct workqueue_struct *gfs2_freeze_wq; 32 33 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) 34 { 35 fs_err(gl->gl_sbd, "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page state 0x%lx\n", 36 bh, (unsigned long long)bh->b_blocknr, bh->b_state, 37 bh->b_page->mapping, bh->b_page->flags); 38 fs_err(gl->gl_sbd, "AIL glock %u:%llu mapping %p\n", 39 gl->gl_name.ln_type, gl->gl_name.ln_number, 40 gfs2_glock2aspace(gl)); 41 gfs2_lm_withdraw(gl->gl_sbd, "AIL error\n"); 42 } 43 44 /** 45 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL 46 * @gl: the glock 47 * @fsync: set when called from fsync (not all buffers will be clean) 48 * 49 * None of the buffers should be dirty, locked, or pinned. 50 */ 51 52 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, 53 unsigned int nr_revokes) 54 { 55 struct gfs2_sbd *sdp = gl->gl_sbd; 56 struct list_head *head = &gl->gl_ail_list; 57 struct gfs2_bufdata *bd, *tmp; 58 struct buffer_head *bh; 59 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock); 60 61 gfs2_log_lock(sdp); 62 spin_lock(&sdp->sd_ail_lock); 63 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) { 64 if (nr_revokes == 0) 65 break; 66 bh = bd->bd_bh; 67 if (bh->b_state & b_state) { 68 if (fsync) 69 continue; 70 gfs2_ail_error(gl, bh); 71 } 72 gfs2_trans_add_revoke(sdp, bd); 73 nr_revokes--; 74 } 75 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); 76 spin_unlock(&sdp->sd_ail_lock); 77 gfs2_log_unlock(sdp); 78 } 79 80 81 static void gfs2_ail_empty_gl(struct gfs2_glock *gl) 82 { 83 struct gfs2_sbd *sdp = gl->gl_sbd; 84 struct gfs2_trans tr; 85 86 memset(&tr, 0, sizeof(tr)); 87 INIT_LIST_HEAD(&tr.tr_buf); 88 INIT_LIST_HEAD(&tr.tr_databuf); 89 tr.tr_revokes = atomic_read(&gl->gl_ail_count); 90 91 if (!tr.tr_revokes) 92 return; 93 94 /* A shortened, inline version of gfs2_trans_begin() 95 * tr->alloced is not set since the transaction structure is 96 * on the stack */ 97 tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64)); 98 tr.tr_ip = _RET_IP_; 99 if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0) 100 return; 101 WARN_ON_ONCE(current->journal_info); 102 current->journal_info = &tr; 103 104 __gfs2_ail_flush(gl, 0, tr.tr_revokes); 105 106 gfs2_trans_end(sdp); 107 gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); 108 } 109 110 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) 111 { 112 struct gfs2_sbd *sdp = gl->gl_sbd; 113 unsigned int revokes = atomic_read(&gl->gl_ail_count); 114 unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); 115 int ret; 116 117 if (!revokes) 118 return; 119 120 while (revokes > max_revokes) 121 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); 122 123 ret = gfs2_trans_begin(sdp, 0, max_revokes); 124 if (ret) 125 return; 126 __gfs2_ail_flush(gl, fsync, max_revokes); 127 gfs2_trans_end(sdp); 128 gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); 129 } 130 131 /** 132 * rgrp_go_sync - sync out the metadata for this glock 133 * @gl: the glock 134 * 135 * Called when demoting or unlocking an EX glock. We must flush 136 * to disk all dirty buffers/pages relating to this glock, and must not 137 * not return to caller to demote/unlock the glock until I/O is complete. 138 */ 139 140 static void rgrp_go_sync(struct gfs2_glock *gl) 141 { 142 struct gfs2_sbd *sdp = gl->gl_sbd; 143 struct address_space *mapping = &sdp->sd_aspace; 144 struct gfs2_rgrpd *rgd; 145 int error; 146 147 spin_lock(&gl->gl_spin); 148 rgd = gl->gl_object; 149 if (rgd) 150 gfs2_rgrp_brelse(rgd); 151 spin_unlock(&gl->gl_spin); 152 153 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) 154 return; 155 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); 156 157 gfs2_log_flush(sdp, gl, NORMAL_FLUSH); 158 filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end); 159 error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end); 160 mapping_set_error(mapping, error); 161 gfs2_ail_empty_gl(gl); 162 163 spin_lock(&gl->gl_spin); 164 rgd = gl->gl_object; 165 if (rgd) 166 gfs2_free_clones(rgd); 167 spin_unlock(&gl->gl_spin); 168 } 169 170 /** 171 * rgrp_go_inval - invalidate the metadata for this glock 172 * @gl: the glock 173 * @flags: 174 * 175 * We never used LM_ST_DEFERRED with resource groups, so that we 176 * should always see the metadata flag set here. 177 * 178 */ 179 180 static void rgrp_go_inval(struct gfs2_glock *gl, int flags) 181 { 182 struct gfs2_sbd *sdp = gl->gl_sbd; 183 struct address_space *mapping = &sdp->sd_aspace; 184 struct gfs2_rgrpd *rgd = gl->gl_object; 185 186 if (rgd) 187 gfs2_rgrp_brelse(rgd); 188 189 WARN_ON_ONCE(!(flags & DIO_METADATA)); 190 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); 191 truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end); 192 193 if (rgd) 194 rgd->rd_flags &= ~GFS2_RDF_UPTODATE; 195 } 196 197 /** 198 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock 199 * @gl: the glock protecting the inode 200 * 201 */ 202 203 static void inode_go_sync(struct gfs2_glock *gl) 204 { 205 struct gfs2_inode *ip = gl->gl_object; 206 struct address_space *metamapping = gfs2_glock2aspace(gl); 207 int error; 208 209 if (ip && !S_ISREG(ip->i_inode.i_mode)) 210 ip = NULL; 211 if (ip) { 212 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) 213 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); 214 inode_dio_wait(&ip->i_inode); 215 } 216 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) 217 return; 218 219 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); 220 221 gfs2_log_flush(gl->gl_sbd, gl, NORMAL_FLUSH); 222 filemap_fdatawrite(metamapping); 223 if (ip) { 224 struct address_space *mapping = ip->i_inode.i_mapping; 225 filemap_fdatawrite(mapping); 226 error = filemap_fdatawait(mapping); 227 mapping_set_error(mapping, error); 228 } 229 error = filemap_fdatawait(metamapping); 230 mapping_set_error(metamapping, error); 231 gfs2_ail_empty_gl(gl); 232 /* 233 * Writeback of the data mapping may cause the dirty flag to be set 234 * so we have to clear it again here. 235 */ 236 smp_mb__before_atomic(); 237 clear_bit(GLF_DIRTY, &gl->gl_flags); 238 } 239 240 /** 241 * inode_go_inval - prepare a inode glock to be released 242 * @gl: the glock 243 * @flags: 244 * 245 * Normally we invalidate everything, but if we are moving into 246 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we 247 * can keep hold of the metadata, since it won't have changed. 248 * 249 */ 250 251 static void inode_go_inval(struct gfs2_glock *gl, int flags) 252 { 253 struct gfs2_inode *ip = gl->gl_object; 254 255 gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count)); 256 257 if (flags & DIO_METADATA) { 258 struct address_space *mapping = gfs2_glock2aspace(gl); 259 truncate_inode_pages(mapping, 0); 260 if (ip) { 261 set_bit(GIF_INVALID, &ip->i_flags); 262 forget_all_cached_acls(&ip->i_inode); 263 gfs2_dir_hash_inval(ip); 264 } 265 } 266 267 if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) { 268 gfs2_log_flush(gl->gl_sbd, NULL, NORMAL_FLUSH); 269 gl->gl_sbd->sd_rindex_uptodate = 0; 270 } 271 if (ip && S_ISREG(ip->i_inode.i_mode)) 272 truncate_inode_pages(ip->i_inode.i_mapping, 0); 273 } 274 275 /** 276 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock 277 * @gl: the glock 278 * 279 * Returns: 1 if it's ok 280 */ 281 282 static int inode_go_demote_ok(const struct gfs2_glock *gl) 283 { 284 struct gfs2_sbd *sdp = gl->gl_sbd; 285 struct gfs2_holder *gh; 286 287 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) 288 return 0; 289 290 if (!list_empty(&gl->gl_holders)) { 291 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); 292 if (gh->gh_list.next != &gl->gl_holders) 293 return 0; 294 } 295 296 return 1; 297 } 298 299 /** 300 * gfs2_set_nlink - Set the inode's link count based on on-disk info 301 * @inode: The inode in question 302 * @nlink: The link count 303 * 304 * If the link count has hit zero, it must never be raised, whatever the 305 * on-disk inode might say. When new struct inodes are created the link 306 * count is set to 1, so that we can safely use this test even when reading 307 * in on disk information for the first time. 308 */ 309 310 static void gfs2_set_nlink(struct inode *inode, u32 nlink) 311 { 312 /* 313 * We will need to review setting the nlink count here in the 314 * light of the forthcoming ro bind mount work. This is a reminder 315 * to do that. 316 */ 317 if ((inode->i_nlink != nlink) && (inode->i_nlink != 0)) { 318 if (nlink == 0) 319 clear_nlink(inode); 320 else 321 set_nlink(inode, nlink); 322 } 323 } 324 325 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) 326 { 327 const struct gfs2_dinode *str = buf; 328 struct timespec atime; 329 u16 height, depth; 330 331 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) 332 goto corrupt; 333 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino); 334 ip->i_inode.i_mode = be32_to_cpu(str->di_mode); 335 ip->i_inode.i_rdev = 0; 336 switch (ip->i_inode.i_mode & S_IFMT) { 337 case S_IFBLK: 338 case S_IFCHR: 339 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major), 340 be32_to_cpu(str->di_minor)); 341 break; 342 }; 343 344 i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid)); 345 i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid)); 346 gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink)); 347 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size)); 348 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks)); 349 atime.tv_sec = be64_to_cpu(str->di_atime); 350 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec); 351 if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0) 352 ip->i_inode.i_atime = atime; 353 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime); 354 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec); 355 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime); 356 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec); 357 358 ip->i_goal = be64_to_cpu(str->di_goal_meta); 359 ip->i_generation = be64_to_cpu(str->di_generation); 360 361 ip->i_diskflags = be32_to_cpu(str->di_flags); 362 ip->i_eattr = be64_to_cpu(str->di_eattr); 363 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */ 364 gfs2_set_inode_flags(&ip->i_inode); 365 height = be16_to_cpu(str->di_height); 366 if (unlikely(height > GFS2_MAX_META_HEIGHT)) 367 goto corrupt; 368 ip->i_height = (u8)height; 369 370 depth = be16_to_cpu(str->di_depth); 371 if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) 372 goto corrupt; 373 ip->i_depth = (u8)depth; 374 ip->i_entries = be32_to_cpu(str->di_entries); 375 376 if (S_ISREG(ip->i_inode.i_mode)) 377 gfs2_set_aops(&ip->i_inode); 378 379 return 0; 380 corrupt: 381 gfs2_consist_inode(ip); 382 return -EIO; 383 } 384 385 /** 386 * gfs2_inode_refresh - Refresh the incore copy of the dinode 387 * @ip: The GFS2 inode 388 * 389 * Returns: errno 390 */ 391 392 int gfs2_inode_refresh(struct gfs2_inode *ip) 393 { 394 struct buffer_head *dibh; 395 int error; 396 397 error = gfs2_meta_inode_buffer(ip, &dibh); 398 if (error) 399 return error; 400 401 error = gfs2_dinode_in(ip, dibh->b_data); 402 brelse(dibh); 403 clear_bit(GIF_INVALID, &ip->i_flags); 404 405 return error; 406 } 407 408 /** 409 * inode_go_lock - operation done after an inode lock is locked by a process 410 * @gl: the glock 411 * @flags: 412 * 413 * Returns: errno 414 */ 415 416 static int inode_go_lock(struct gfs2_holder *gh) 417 { 418 struct gfs2_glock *gl = gh->gh_gl; 419 struct gfs2_sbd *sdp = gl->gl_sbd; 420 struct gfs2_inode *ip = gl->gl_object; 421 int error = 0; 422 423 if (!ip || (gh->gh_flags & GL_SKIP)) 424 return 0; 425 426 if (test_bit(GIF_INVALID, &ip->i_flags)) { 427 error = gfs2_inode_refresh(ip); 428 if (error) 429 return error; 430 } 431 432 if (gh->gh_state != LM_ST_DEFERRED) 433 inode_dio_wait(&ip->i_inode); 434 435 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && 436 (gl->gl_state == LM_ST_EXCLUSIVE) && 437 (gh->gh_state == LM_ST_EXCLUSIVE)) { 438 spin_lock(&sdp->sd_trunc_lock); 439 if (list_empty(&ip->i_trunc_list)) 440 list_add(&sdp->sd_trunc_list, &ip->i_trunc_list); 441 spin_unlock(&sdp->sd_trunc_lock); 442 wake_up(&sdp->sd_quota_wait); 443 return 1; 444 } 445 446 return error; 447 } 448 449 /** 450 * inode_go_dump - print information about an inode 451 * @seq: The iterator 452 * @ip: the inode 453 * 454 */ 455 456 static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) 457 { 458 const struct gfs2_inode *ip = gl->gl_object; 459 if (ip == NULL) 460 return; 461 gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n", 462 (unsigned long long)ip->i_no_formal_ino, 463 (unsigned long long)ip->i_no_addr, 464 IF2DT(ip->i_inode.i_mode), ip->i_flags, 465 (unsigned int)ip->i_diskflags, 466 (unsigned long long)i_size_read(&ip->i_inode)); 467 } 468 469 /** 470 * freeze_go_sync - promote/demote the freeze glock 471 * @gl: the glock 472 * @state: the requested state 473 * @flags: 474 * 475 */ 476 477 static void freeze_go_sync(struct gfs2_glock *gl) 478 { 479 int error = 0; 480 struct gfs2_sbd *sdp = gl->gl_sbd; 481 482 if (gl->gl_state == LM_ST_SHARED && 483 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 484 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE); 485 error = freeze_super(sdp->sd_vfs); 486 if (error) { 487 printk(KERN_INFO "GFS2: couldn't freeze filesystem: %d\n", error); 488 gfs2_assert_withdraw(sdp, 0); 489 } 490 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work); 491 gfs2_log_flush(sdp, NULL, FREEZE_FLUSH); 492 } 493 } 494 495 /** 496 * freeze_go_xmote_bh - After promoting/demoting the freeze glock 497 * @gl: the glock 498 * 499 */ 500 501 static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) 502 { 503 struct gfs2_sbd *sdp = gl->gl_sbd; 504 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 505 struct gfs2_glock *j_gl = ip->i_gl; 506 struct gfs2_log_header_host head; 507 int error; 508 509 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 510 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); 511 512 error = gfs2_find_jhead(sdp->sd_jdesc, &head); 513 if (error) 514 gfs2_consist(sdp); 515 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) 516 gfs2_consist(sdp); 517 518 /* Initialize some head of the log stuff */ 519 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) { 520 sdp->sd_log_sequence = head.lh_sequence + 1; 521 gfs2_log_pointers_init(sdp, head.lh_blkno); 522 } 523 } 524 return 0; 525 } 526 527 /** 528 * trans_go_demote_ok 529 * @gl: the glock 530 * 531 * Always returns 0 532 */ 533 534 static int freeze_go_demote_ok(const struct gfs2_glock *gl) 535 { 536 return 0; 537 } 538 539 /** 540 * iopen_go_callback - schedule the dcache entry for the inode to be deleted 541 * @gl: the glock 542 * 543 * gl_spin lock is held while calling this 544 */ 545 static void iopen_go_callback(struct gfs2_glock *gl, bool remote) 546 { 547 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; 548 struct gfs2_sbd *sdp = gl->gl_sbd; 549 550 if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY)) 551 return; 552 553 if (gl->gl_demote_state == LM_ST_UNLOCKED && 554 gl->gl_state == LM_ST_SHARED && ip) { 555 gl->gl_lockref.count++; 556 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) 557 gl->gl_lockref.count--; 558 } 559 } 560 561 const struct gfs2_glock_operations gfs2_meta_glops = { 562 .go_type = LM_TYPE_META, 563 }; 564 565 const struct gfs2_glock_operations gfs2_inode_glops = { 566 .go_sync = inode_go_sync, 567 .go_inval = inode_go_inval, 568 .go_demote_ok = inode_go_demote_ok, 569 .go_lock = inode_go_lock, 570 .go_dump = inode_go_dump, 571 .go_type = LM_TYPE_INODE, 572 .go_flags = GLOF_ASPACE | GLOF_LRU, 573 }; 574 575 const struct gfs2_glock_operations gfs2_rgrp_glops = { 576 .go_sync = rgrp_go_sync, 577 .go_inval = rgrp_go_inval, 578 .go_lock = gfs2_rgrp_go_lock, 579 .go_unlock = gfs2_rgrp_go_unlock, 580 .go_dump = gfs2_rgrp_dump, 581 .go_type = LM_TYPE_RGRP, 582 .go_flags = GLOF_LVB, 583 }; 584 585 const struct gfs2_glock_operations gfs2_freeze_glops = { 586 .go_sync = freeze_go_sync, 587 .go_xmote_bh = freeze_go_xmote_bh, 588 .go_demote_ok = freeze_go_demote_ok, 589 .go_type = LM_TYPE_NONDISK, 590 }; 591 592 const struct gfs2_glock_operations gfs2_iopen_glops = { 593 .go_type = LM_TYPE_IOPEN, 594 .go_callback = iopen_go_callback, 595 .go_flags = GLOF_LRU, 596 }; 597 598 const struct gfs2_glock_operations gfs2_flock_glops = { 599 .go_type = LM_TYPE_FLOCK, 600 .go_flags = GLOF_LRU, 601 }; 602 603 const struct gfs2_glock_operations gfs2_nondisk_glops = { 604 .go_type = LM_TYPE_NONDISK, 605 }; 606 607 const struct gfs2_glock_operations gfs2_quota_glops = { 608 .go_type = LM_TYPE_QUOTA, 609 .go_flags = GLOF_LVB | GLOF_LRU, 610 }; 611 612 const struct gfs2_glock_operations gfs2_journal_glops = { 613 .go_type = LM_TYPE_JOURNAL, 614 }; 615 616 const struct gfs2_glock_operations *gfs2_glops_list[] = { 617 [LM_TYPE_META] = &gfs2_meta_glops, 618 [LM_TYPE_INODE] = &gfs2_inode_glops, 619 [LM_TYPE_RGRP] = &gfs2_rgrp_glops, 620 [LM_TYPE_IOPEN] = &gfs2_iopen_glops, 621 [LM_TYPE_FLOCK] = &gfs2_flock_glops, 622 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops, 623 [LM_TYPE_QUOTA] = &gfs2_quota_glops, 624 [LM_TYPE_JOURNAL] = &gfs2_journal_glops, 625 }; 626 627