xref: /openbmc/linux/fs/gfs2/glops.c (revision c8ec3743)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/spinlock.h>
11 #include <linux/completion.h>
12 #include <linux/buffer_head.h>
13 #include <linux/gfs2_ondisk.h>
14 #include <linux/bio.h>
15 #include <linux/posix_acl.h>
16 #include <linux/security.h>
17 
18 #include "gfs2.h"
19 #include "incore.h"
20 #include "bmap.h"
21 #include "glock.h"
22 #include "glops.h"
23 #include "inode.h"
24 #include "log.h"
25 #include "meta_io.h"
26 #include "recovery.h"
27 #include "rgrp.h"
28 #include "util.h"
29 #include "trans.h"
30 #include "dir.h"
31 #include "lops.h"
32 
33 struct workqueue_struct *gfs2_freeze_wq;
34 
35 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
36 {
37 	fs_err(gl->gl_name.ln_sbd,
38 	       "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
39 	       "state 0x%lx\n",
40 	       bh, (unsigned long long)bh->b_blocknr, bh->b_state,
41 	       bh->b_page->mapping, bh->b_page->flags);
42 	fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
43 	       gl->gl_name.ln_type, gl->gl_name.ln_number,
44 	       gfs2_glock2aspace(gl));
45 	gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n");
46 }
47 
48 /**
49  * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
50  * @gl: the glock
51  * @fsync: set when called from fsync (not all buffers will be clean)
52  *
53  * None of the buffers should be dirty, locked, or pinned.
54  */
55 
56 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
57 			     unsigned int nr_revokes)
58 {
59 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
60 	struct list_head *head = &gl->gl_ail_list;
61 	struct gfs2_bufdata *bd, *tmp;
62 	struct buffer_head *bh;
63 	const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
64 
65 	gfs2_log_lock(sdp);
66 	spin_lock(&sdp->sd_ail_lock);
67 	list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
68 		if (nr_revokes == 0)
69 			break;
70 		bh = bd->bd_bh;
71 		if (bh->b_state & b_state) {
72 			if (fsync)
73 				continue;
74 			gfs2_ail_error(gl, bh);
75 		}
76 		gfs2_trans_add_revoke(sdp, bd);
77 		nr_revokes--;
78 	}
79 	GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
80 	spin_unlock(&sdp->sd_ail_lock);
81 	gfs2_log_unlock(sdp);
82 }
83 
84 
85 static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
86 {
87 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
88 	struct gfs2_trans tr;
89 
90 	memset(&tr, 0, sizeof(tr));
91 	INIT_LIST_HEAD(&tr.tr_buf);
92 	INIT_LIST_HEAD(&tr.tr_databuf);
93 	tr.tr_revokes = atomic_read(&gl->gl_ail_count);
94 
95 	if (!tr.tr_revokes)
96 		return;
97 
98 	/* A shortened, inline version of gfs2_trans_begin()
99          * tr->alloced is not set since the transaction structure is
100          * on the stack */
101 	tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
102 	tr.tr_ip = _RET_IP_;
103 	if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0)
104 		return;
105 	WARN_ON_ONCE(current->journal_info);
106 	current->journal_info = &tr;
107 
108 	__gfs2_ail_flush(gl, 0, tr.tr_revokes);
109 
110 	gfs2_trans_end(sdp);
111 	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
112 		       GFS2_LFC_AIL_EMPTY_GL);
113 }
114 
115 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
116 {
117 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
118 	unsigned int revokes = atomic_read(&gl->gl_ail_count);
119 	unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
120 	int ret;
121 
122 	if (!revokes)
123 		return;
124 
125 	while (revokes > max_revokes)
126 		max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
127 
128 	ret = gfs2_trans_begin(sdp, 0, max_revokes);
129 	if (ret)
130 		return;
131 	__gfs2_ail_flush(gl, fsync, max_revokes);
132 	gfs2_trans_end(sdp);
133 	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
134 		       GFS2_LFC_AIL_FLUSH);
135 }
136 
137 /**
138  * rgrp_go_sync - sync out the metadata for this glock
139  * @gl: the glock
140  *
141  * Called when demoting or unlocking an EX glock.  We must flush
142  * to disk all dirty buffers/pages relating to this glock, and must not
143  * return to caller to demote/unlock the glock until I/O is complete.
144  */
145 
146 static void rgrp_go_sync(struct gfs2_glock *gl)
147 {
148 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
149 	struct address_space *mapping = &sdp->sd_aspace;
150 	struct gfs2_rgrpd *rgd;
151 	int error;
152 
153 	spin_lock(&gl->gl_lockref.lock);
154 	rgd = gl->gl_object;
155 	if (rgd)
156 		gfs2_rgrp_brelse(rgd);
157 	spin_unlock(&gl->gl_lockref.lock);
158 
159 	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
160 		return;
161 	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
162 
163 	gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
164 		       GFS2_LFC_RGRP_GO_SYNC);
165 	filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
166 	error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
167 	mapping_set_error(mapping, error);
168 	gfs2_ail_empty_gl(gl);
169 
170 	spin_lock(&gl->gl_lockref.lock);
171 	rgd = gl->gl_object;
172 	if (rgd)
173 		gfs2_free_clones(rgd);
174 	spin_unlock(&gl->gl_lockref.lock);
175 }
176 
177 /**
178  * rgrp_go_inval - invalidate the metadata for this glock
179  * @gl: the glock
180  * @flags:
181  *
182  * We never used LM_ST_DEFERRED with resource groups, so that we
183  * should always see the metadata flag set here.
184  *
185  */
186 
187 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
188 {
189 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
190 	struct address_space *mapping = &sdp->sd_aspace;
191 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
192 
193 	if (rgd)
194 		gfs2_rgrp_brelse(rgd);
195 
196 	WARN_ON_ONCE(!(flags & DIO_METADATA));
197 	gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
198 	truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
199 
200 	if (rgd)
201 		rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
202 }
203 
204 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
205 {
206 	struct gfs2_inode *ip;
207 
208 	spin_lock(&gl->gl_lockref.lock);
209 	ip = gl->gl_object;
210 	if (ip)
211 		set_bit(GIF_GLOP_PENDING, &ip->i_flags);
212 	spin_unlock(&gl->gl_lockref.lock);
213 	return ip;
214 }
215 
216 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
217 {
218 	struct gfs2_rgrpd *rgd;
219 
220 	spin_lock(&gl->gl_lockref.lock);
221 	rgd = gl->gl_object;
222 	spin_unlock(&gl->gl_lockref.lock);
223 
224 	return rgd;
225 }
226 
227 static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
228 {
229 	if (!ip)
230 		return;
231 
232 	clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
233 	wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
234 }
235 
236 /**
237  * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
238  * @gl: the glock protecting the inode
239  *
240  */
241 
242 static void inode_go_sync(struct gfs2_glock *gl)
243 {
244 	struct gfs2_inode *ip = gfs2_glock2inode(gl);
245 	int isreg = ip && S_ISREG(ip->i_inode.i_mode);
246 	struct address_space *metamapping = gfs2_glock2aspace(gl);
247 	int error;
248 
249 	if (isreg) {
250 		if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
251 			unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
252 		inode_dio_wait(&ip->i_inode);
253 	}
254 	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
255 		goto out;
256 
257 	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
258 
259 	gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
260 		       GFS2_LFC_INODE_GO_SYNC);
261 	filemap_fdatawrite(metamapping);
262 	if (isreg) {
263 		struct address_space *mapping = ip->i_inode.i_mapping;
264 		filemap_fdatawrite(mapping);
265 		error = filemap_fdatawait(mapping);
266 		mapping_set_error(mapping, error);
267 	}
268 	error = filemap_fdatawait(metamapping);
269 	mapping_set_error(metamapping, error);
270 	gfs2_ail_empty_gl(gl);
271 	/*
272 	 * Writeback of the data mapping may cause the dirty flag to be set
273 	 * so we have to clear it again here.
274 	 */
275 	smp_mb__before_atomic();
276 	clear_bit(GLF_DIRTY, &gl->gl_flags);
277 
278 out:
279 	gfs2_clear_glop_pending(ip);
280 }
281 
282 /**
283  * inode_go_inval - prepare a inode glock to be released
284  * @gl: the glock
285  * @flags:
286  *
287  * Normally we invalidate everything, but if we are moving into
288  * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
289  * can keep hold of the metadata, since it won't have changed.
290  *
291  */
292 
293 static void inode_go_inval(struct gfs2_glock *gl, int flags)
294 {
295 	struct gfs2_inode *ip = gfs2_glock2inode(gl);
296 
297 	gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
298 
299 	if (flags & DIO_METADATA) {
300 		struct address_space *mapping = gfs2_glock2aspace(gl);
301 		truncate_inode_pages(mapping, 0);
302 		if (ip) {
303 			set_bit(GIF_INVALID, &ip->i_flags);
304 			forget_all_cached_acls(&ip->i_inode);
305 			security_inode_invalidate_secctx(&ip->i_inode);
306 			gfs2_dir_hash_inval(ip);
307 		}
308 	}
309 
310 	if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
311 		gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
312 			       GFS2_LOG_HEAD_FLUSH_NORMAL |
313 			       GFS2_LFC_INODE_GO_INVAL);
314 		gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
315 	}
316 	if (ip && S_ISREG(ip->i_inode.i_mode))
317 		truncate_inode_pages(ip->i_inode.i_mapping, 0);
318 
319 	gfs2_clear_glop_pending(ip);
320 }
321 
322 /**
323  * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
324  * @gl: the glock
325  *
326  * Returns: 1 if it's ok
327  */
328 
329 static int inode_go_demote_ok(const struct gfs2_glock *gl)
330 {
331 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
332 
333 	if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
334 		return 0;
335 
336 	return 1;
337 }
338 
339 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
340 {
341 	const struct gfs2_dinode *str = buf;
342 	struct timespec64 atime;
343 	u16 height, depth;
344 
345 	if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
346 		goto corrupt;
347 	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
348 	ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
349 	ip->i_inode.i_rdev = 0;
350 	switch (ip->i_inode.i_mode & S_IFMT) {
351 	case S_IFBLK:
352 	case S_IFCHR:
353 		ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
354 					   be32_to_cpu(str->di_minor));
355 		break;
356 	};
357 
358 	i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
359 	i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
360 	set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
361 	i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
362 	gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
363 	atime.tv_sec = be64_to_cpu(str->di_atime);
364 	atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
365 	if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0)
366 		ip->i_inode.i_atime = atime;
367 	ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
368 	ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
369 	ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
370 	ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
371 
372 	ip->i_goal = be64_to_cpu(str->di_goal_meta);
373 	ip->i_generation = be64_to_cpu(str->di_generation);
374 
375 	ip->i_diskflags = be32_to_cpu(str->di_flags);
376 	ip->i_eattr = be64_to_cpu(str->di_eattr);
377 	/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
378 	gfs2_set_inode_flags(&ip->i_inode);
379 	height = be16_to_cpu(str->di_height);
380 	if (unlikely(height > GFS2_MAX_META_HEIGHT))
381 		goto corrupt;
382 	ip->i_height = (u8)height;
383 
384 	depth = be16_to_cpu(str->di_depth);
385 	if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
386 		goto corrupt;
387 	ip->i_depth = (u8)depth;
388 	ip->i_entries = be32_to_cpu(str->di_entries);
389 
390 	if (S_ISREG(ip->i_inode.i_mode))
391 		gfs2_set_aops(&ip->i_inode);
392 
393 	return 0;
394 corrupt:
395 	gfs2_consist_inode(ip);
396 	return -EIO;
397 }
398 
399 /**
400  * gfs2_inode_refresh - Refresh the incore copy of the dinode
401  * @ip: The GFS2 inode
402  *
403  * Returns: errno
404  */
405 
406 int gfs2_inode_refresh(struct gfs2_inode *ip)
407 {
408 	struct buffer_head *dibh;
409 	int error;
410 
411 	error = gfs2_meta_inode_buffer(ip, &dibh);
412 	if (error)
413 		return error;
414 
415 	error = gfs2_dinode_in(ip, dibh->b_data);
416 	brelse(dibh);
417 	clear_bit(GIF_INVALID, &ip->i_flags);
418 
419 	return error;
420 }
421 
422 /**
423  * inode_go_lock - operation done after an inode lock is locked by a process
424  * @gl: the glock
425  * @flags:
426  *
427  * Returns: errno
428  */
429 
430 static int inode_go_lock(struct gfs2_holder *gh)
431 {
432 	struct gfs2_glock *gl = gh->gh_gl;
433 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
434 	struct gfs2_inode *ip = gl->gl_object;
435 	int error = 0;
436 
437 	if (!ip || (gh->gh_flags & GL_SKIP))
438 		return 0;
439 
440 	if (test_bit(GIF_INVALID, &ip->i_flags)) {
441 		error = gfs2_inode_refresh(ip);
442 		if (error)
443 			return error;
444 	}
445 
446 	if (gh->gh_state != LM_ST_DEFERRED)
447 		inode_dio_wait(&ip->i_inode);
448 
449 	if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
450 	    (gl->gl_state == LM_ST_EXCLUSIVE) &&
451 	    (gh->gh_state == LM_ST_EXCLUSIVE)) {
452 		spin_lock(&sdp->sd_trunc_lock);
453 		if (list_empty(&ip->i_trunc_list))
454 			list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
455 		spin_unlock(&sdp->sd_trunc_lock);
456 		wake_up(&sdp->sd_quota_wait);
457 		return 1;
458 	}
459 
460 	return error;
461 }
462 
463 /**
464  * inode_go_dump - print information about an inode
465  * @seq: The iterator
466  * @ip: the inode
467  *
468  */
469 
470 static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl)
471 {
472 	struct gfs2_inode *ip = gl->gl_object;
473 	struct inode *inode = &ip->i_inode;
474 	unsigned long nrpages;
475 
476 	if (ip == NULL)
477 		return;
478 
479 	xa_lock_irq(&inode->i_data.i_pages);
480 	nrpages = inode->i_data.nrpages;
481 	xa_unlock_irq(&inode->i_data.i_pages);
482 
483 	gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu p:%lu\n",
484 		  (unsigned long long)ip->i_no_formal_ino,
485 		  (unsigned long long)ip->i_no_addr,
486 		  IF2DT(ip->i_inode.i_mode), ip->i_flags,
487 		  (unsigned int)ip->i_diskflags,
488 		  (unsigned long long)i_size_read(inode), nrpages);
489 }
490 
491 /**
492  * freeze_go_sync - promote/demote the freeze glock
493  * @gl: the glock
494  * @state: the requested state
495  * @flags:
496  *
497  */
498 
499 static void freeze_go_sync(struct gfs2_glock *gl)
500 {
501 	int error = 0;
502 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
503 
504 	if (gl->gl_state == LM_ST_SHARED &&
505 	    test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
506 		atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
507 		error = freeze_super(sdp->sd_vfs);
508 		if (error) {
509 			printk(KERN_INFO "GFS2: couldn't freeze filesystem: %d\n", error);
510 			gfs2_assert_withdraw(sdp, 0);
511 		}
512 		queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
513 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
514 			       GFS2_LFC_FREEZE_GO_SYNC);
515 	}
516 }
517 
518 /**
519  * freeze_go_xmote_bh - After promoting/demoting the freeze glock
520  * @gl: the glock
521  *
522  */
523 
524 static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
525 {
526 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
527 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
528 	struct gfs2_glock *j_gl = ip->i_gl;
529 	struct gfs2_log_header_host head;
530 	int error;
531 
532 	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
533 		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
534 
535 		error = gfs2_find_jhead(sdp->sd_jdesc, &head);
536 		if (error)
537 			gfs2_consist(sdp);
538 		if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
539 			gfs2_consist(sdp);
540 
541 		/*  Initialize some head of the log stuff  */
542 		if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
543 			sdp->sd_log_sequence = head.lh_sequence + 1;
544 			gfs2_log_pointers_init(sdp, head.lh_blkno);
545 		}
546 	}
547 	return 0;
548 }
549 
550 /**
551  * trans_go_demote_ok
552  * @gl: the glock
553  *
554  * Always returns 0
555  */
556 
557 static int freeze_go_demote_ok(const struct gfs2_glock *gl)
558 {
559 	return 0;
560 }
561 
562 /**
563  * iopen_go_callback - schedule the dcache entry for the inode to be deleted
564  * @gl: the glock
565  *
566  * gl_lockref.lock lock is held while calling this
567  */
568 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
569 {
570 	struct gfs2_inode *ip = gl->gl_object;
571 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
572 
573 	if (!remote || sb_rdonly(sdp->sd_vfs))
574 		return;
575 
576 	if (gl->gl_demote_state == LM_ST_UNLOCKED &&
577 	    gl->gl_state == LM_ST_SHARED && ip) {
578 		gl->gl_lockref.count++;
579 		if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
580 			gl->gl_lockref.count--;
581 	}
582 }
583 
584 const struct gfs2_glock_operations gfs2_meta_glops = {
585 	.go_type = LM_TYPE_META,
586 };
587 
588 const struct gfs2_glock_operations gfs2_inode_glops = {
589 	.go_sync = inode_go_sync,
590 	.go_inval = inode_go_inval,
591 	.go_demote_ok = inode_go_demote_ok,
592 	.go_lock = inode_go_lock,
593 	.go_dump = inode_go_dump,
594 	.go_type = LM_TYPE_INODE,
595 	.go_flags = GLOF_ASPACE | GLOF_LRU,
596 };
597 
598 const struct gfs2_glock_operations gfs2_rgrp_glops = {
599 	.go_sync = rgrp_go_sync,
600 	.go_inval = rgrp_go_inval,
601 	.go_lock = gfs2_rgrp_go_lock,
602 	.go_unlock = gfs2_rgrp_go_unlock,
603 	.go_dump = gfs2_rgrp_dump,
604 	.go_type = LM_TYPE_RGRP,
605 	.go_flags = GLOF_LVB,
606 };
607 
608 const struct gfs2_glock_operations gfs2_freeze_glops = {
609 	.go_sync = freeze_go_sync,
610 	.go_xmote_bh = freeze_go_xmote_bh,
611 	.go_demote_ok = freeze_go_demote_ok,
612 	.go_type = LM_TYPE_NONDISK,
613 };
614 
615 const struct gfs2_glock_operations gfs2_iopen_glops = {
616 	.go_type = LM_TYPE_IOPEN,
617 	.go_callback = iopen_go_callback,
618 	.go_flags = GLOF_LRU,
619 };
620 
621 const struct gfs2_glock_operations gfs2_flock_glops = {
622 	.go_type = LM_TYPE_FLOCK,
623 	.go_flags = GLOF_LRU,
624 };
625 
626 const struct gfs2_glock_operations gfs2_nondisk_glops = {
627 	.go_type = LM_TYPE_NONDISK,
628 };
629 
630 const struct gfs2_glock_operations gfs2_quota_glops = {
631 	.go_type = LM_TYPE_QUOTA,
632 	.go_flags = GLOF_LVB | GLOF_LRU,
633 };
634 
635 const struct gfs2_glock_operations gfs2_journal_glops = {
636 	.go_type = LM_TYPE_JOURNAL,
637 };
638 
639 const struct gfs2_glock_operations *gfs2_glops_list[] = {
640 	[LM_TYPE_META] = &gfs2_meta_glops,
641 	[LM_TYPE_INODE] = &gfs2_inode_glops,
642 	[LM_TYPE_RGRP] = &gfs2_rgrp_glops,
643 	[LM_TYPE_IOPEN] = &gfs2_iopen_glops,
644 	[LM_TYPE_FLOCK] = &gfs2_flock_glops,
645 	[LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
646 	[LM_TYPE_QUOTA] = &gfs2_quota_glops,
647 	[LM_TYPE_JOURNAL] = &gfs2_journal_glops,
648 };
649 
650