xref: /openbmc/linux/fs/gfs2/glops.c (revision a97cbcd0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/gfs2_ondisk.h>
11 #include <linux/bio.h>
12 #include <linux/posix_acl.h>
13 #include <linux/security.h>
14 
15 #include "gfs2.h"
16 #include "incore.h"
17 #include "bmap.h"
18 #include "glock.h"
19 #include "glops.h"
20 #include "inode.h"
21 #include "log.h"
22 #include "meta_io.h"
23 #include "recovery.h"
24 #include "rgrp.h"
25 #include "util.h"
26 #include "trans.h"
27 #include "dir.h"
28 #include "lops.h"
29 
30 struct workqueue_struct *gfs2_freeze_wq;
31 
32 extern struct workqueue_struct *gfs2_control_wq;
33 
34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
35 {
36 	fs_err(gl->gl_name.ln_sbd,
37 	       "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
38 	       "state 0x%lx\n",
39 	       bh, (unsigned long long)bh->b_blocknr, bh->b_state,
40 	       bh->b_page->mapping, bh->b_page->flags);
41 	fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
42 	       gl->gl_name.ln_type, gl->gl_name.ln_number,
43 	       gfs2_glock2aspace(gl));
44 	gfs2_lm(gl->gl_name.ln_sbd, "AIL error\n");
45 	gfs2_withdraw(gl->gl_name.ln_sbd);
46 }
47 
48 /**
49  * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
50  * @gl: the glock
51  * @fsync: set when called from fsync (not all buffers will be clean)
52  *
53  * None of the buffers should be dirty, locked, or pinned.
54  */
55 
56 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
57 			     unsigned int nr_revokes)
58 {
59 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
60 	struct list_head *head = &gl->gl_ail_list;
61 	struct gfs2_bufdata *bd, *tmp;
62 	struct buffer_head *bh;
63 	const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
64 
65 	gfs2_log_lock(sdp);
66 	spin_lock(&sdp->sd_ail_lock);
67 	list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
68 		if (nr_revokes == 0)
69 			break;
70 		bh = bd->bd_bh;
71 		if (bh->b_state & b_state) {
72 			if (fsync)
73 				continue;
74 			gfs2_ail_error(gl, bh);
75 		}
76 		gfs2_trans_add_revoke(sdp, bd);
77 		nr_revokes--;
78 	}
79 	GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
80 	spin_unlock(&sdp->sd_ail_lock);
81 	gfs2_log_unlock(sdp);
82 }
83 
84 
85 static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
86 {
87 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
88 	struct gfs2_trans tr;
89 	int ret;
90 
91 	memset(&tr, 0, sizeof(tr));
92 	INIT_LIST_HEAD(&tr.tr_buf);
93 	INIT_LIST_HEAD(&tr.tr_databuf);
94 	INIT_LIST_HEAD(&tr.tr_ail1_list);
95 	INIT_LIST_HEAD(&tr.tr_ail2_list);
96 	tr.tr_revokes = atomic_read(&gl->gl_ail_count);
97 
98 	if (!tr.tr_revokes) {
99 		bool have_revokes;
100 		bool log_in_flight;
101 
102 		/*
103 		 * We have nothing on the ail, but there could be revokes on
104 		 * the sdp revoke queue, in which case, we still want to flush
105 		 * the log and wait for it to finish.
106 		 *
107 		 * If the sdp revoke list is empty too, we might still have an
108 		 * io outstanding for writing revokes, so we should wait for
109 		 * it before returning.
110 		 *
111 		 * If none of these conditions are true, our revokes are all
112 		 * flushed and we can return.
113 		 */
114 		gfs2_log_lock(sdp);
115 		have_revokes = !list_empty(&sdp->sd_log_revokes);
116 		log_in_flight = atomic_read(&sdp->sd_log_in_flight);
117 		gfs2_log_unlock(sdp);
118 		if (have_revokes)
119 			goto flush;
120 		if (log_in_flight)
121 			log_flush_wait(sdp);
122 		return 0;
123 	}
124 
125 	/* A shortened, inline version of gfs2_trans_begin()
126          * tr->alloced is not set since the transaction structure is
127          * on the stack */
128 	tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes);
129 	tr.tr_ip = _RET_IP_;
130 	ret = gfs2_log_reserve(sdp, tr.tr_reserved);
131 	if (ret < 0)
132 		return ret;
133 	WARN_ON_ONCE(current->journal_info);
134 	current->journal_info = &tr;
135 
136 	__gfs2_ail_flush(gl, 0, tr.tr_revokes);
137 
138 	gfs2_trans_end(sdp);
139 flush:
140 	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
141 		       GFS2_LFC_AIL_EMPTY_GL);
142 	return 0;
143 }
144 
145 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
146 {
147 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
148 	unsigned int revokes = atomic_read(&gl->gl_ail_count);
149 	unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
150 	int ret;
151 
152 	if (!revokes)
153 		return;
154 
155 	while (revokes > max_revokes)
156 		max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
157 
158 	ret = gfs2_trans_begin(sdp, 0, max_revokes);
159 	if (ret)
160 		return;
161 	__gfs2_ail_flush(gl, fsync, max_revokes);
162 	gfs2_trans_end(sdp);
163 	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
164 		       GFS2_LFC_AIL_FLUSH);
165 }
166 
167 /**
168  * rgrp_go_sync - sync out the metadata for this glock
169  * @gl: the glock
170  *
171  * Called when demoting or unlocking an EX glock.  We must flush
172  * to disk all dirty buffers/pages relating to this glock, and must not
173  * return to caller to demote/unlock the glock until I/O is complete.
174  */
175 
176 static int rgrp_go_sync(struct gfs2_glock *gl)
177 {
178 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
179 	struct address_space *mapping = &sdp->sd_aspace;
180 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
181 	int error;
182 
183 	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
184 		return 0;
185 	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
186 
187 	gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
188 		       GFS2_LFC_RGRP_GO_SYNC);
189 	filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
190 	error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
191 	WARN_ON_ONCE(error);
192 	mapping_set_error(mapping, error);
193 	if (!error)
194 		error = gfs2_ail_empty_gl(gl);
195 
196 	spin_lock(&gl->gl_lockref.lock);
197 	rgd = gl->gl_object;
198 	if (rgd)
199 		gfs2_free_clones(rgd);
200 	spin_unlock(&gl->gl_lockref.lock);
201 	return error;
202 }
203 
204 /**
205  * rgrp_go_inval - invalidate the metadata for this glock
206  * @gl: the glock
207  * @flags:
208  *
209  * We never used LM_ST_DEFERRED with resource groups, so that we
210  * should always see the metadata flag set here.
211  *
212  */
213 
214 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
215 {
216 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
217 	struct address_space *mapping = &sdp->sd_aspace;
218 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
219 
220 	if (rgd)
221 		gfs2_rgrp_brelse(rgd);
222 
223 	WARN_ON_ONCE(!(flags & DIO_METADATA));
224 	truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
225 
226 	if (rgd)
227 		rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
228 }
229 
230 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
231 {
232 	struct gfs2_inode *ip;
233 
234 	spin_lock(&gl->gl_lockref.lock);
235 	ip = gl->gl_object;
236 	if (ip)
237 		set_bit(GIF_GLOP_PENDING, &ip->i_flags);
238 	spin_unlock(&gl->gl_lockref.lock);
239 	return ip;
240 }
241 
242 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
243 {
244 	struct gfs2_rgrpd *rgd;
245 
246 	spin_lock(&gl->gl_lockref.lock);
247 	rgd = gl->gl_object;
248 	spin_unlock(&gl->gl_lockref.lock);
249 
250 	return rgd;
251 }
252 
253 static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
254 {
255 	if (!ip)
256 		return;
257 
258 	clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
259 	wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
260 }
261 
262 /**
263  * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
264  * @gl: the glock protecting the inode
265  *
266  */
267 
268 static int inode_go_sync(struct gfs2_glock *gl)
269 {
270 	struct gfs2_inode *ip = gfs2_glock2inode(gl);
271 	int isreg = ip && S_ISREG(ip->i_inode.i_mode);
272 	struct address_space *metamapping = gfs2_glock2aspace(gl);
273 	int error = 0, ret;
274 
275 	if (isreg) {
276 		if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
277 			unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
278 		inode_dio_wait(&ip->i_inode);
279 	}
280 	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
281 		goto out;
282 
283 	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
284 
285 	gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
286 		       GFS2_LFC_INODE_GO_SYNC);
287 	filemap_fdatawrite(metamapping);
288 	if (isreg) {
289 		struct address_space *mapping = ip->i_inode.i_mapping;
290 		filemap_fdatawrite(mapping);
291 		error = filemap_fdatawait(mapping);
292 		mapping_set_error(mapping, error);
293 	}
294 	ret = filemap_fdatawait(metamapping);
295 	mapping_set_error(metamapping, ret);
296 	if (!error)
297 		error = ret;
298 	gfs2_ail_empty_gl(gl);
299 	/*
300 	 * Writeback of the data mapping may cause the dirty flag to be set
301 	 * so we have to clear it again here.
302 	 */
303 	smp_mb__before_atomic();
304 	clear_bit(GLF_DIRTY, &gl->gl_flags);
305 
306 out:
307 	gfs2_clear_glop_pending(ip);
308 	return error;
309 }
310 
311 /**
312  * inode_go_inval - prepare a inode glock to be released
313  * @gl: the glock
314  * @flags:
315  *
316  * Normally we invalidate everything, but if we are moving into
317  * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
318  * can keep hold of the metadata, since it won't have changed.
319  *
320  */
321 
322 static void inode_go_inval(struct gfs2_glock *gl, int flags)
323 {
324 	struct gfs2_inode *ip = gfs2_glock2inode(gl);
325 
326 	if (flags & DIO_METADATA) {
327 		struct address_space *mapping = gfs2_glock2aspace(gl);
328 		truncate_inode_pages(mapping, 0);
329 		if (ip) {
330 			set_bit(GIF_INVALID, &ip->i_flags);
331 			forget_all_cached_acls(&ip->i_inode);
332 			security_inode_invalidate_secctx(&ip->i_inode);
333 			gfs2_dir_hash_inval(ip);
334 		}
335 	}
336 
337 	if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
338 		gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
339 			       GFS2_LOG_HEAD_FLUSH_NORMAL |
340 			       GFS2_LFC_INODE_GO_INVAL);
341 		gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
342 	}
343 	if (ip && S_ISREG(ip->i_inode.i_mode))
344 		truncate_inode_pages(ip->i_inode.i_mapping, 0);
345 
346 	gfs2_clear_glop_pending(ip);
347 }
348 
349 /**
350  * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
351  * @gl: the glock
352  *
353  * Returns: 1 if it's ok
354  */
355 
356 static int inode_go_demote_ok(const struct gfs2_glock *gl)
357 {
358 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
359 
360 	if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
361 		return 0;
362 
363 	return 1;
364 }
365 
366 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
367 {
368 	const struct gfs2_dinode *str = buf;
369 	struct timespec64 atime;
370 	u16 height, depth;
371 
372 	if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
373 		goto corrupt;
374 	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
375 	ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
376 	ip->i_inode.i_rdev = 0;
377 	switch (ip->i_inode.i_mode & S_IFMT) {
378 	case S_IFBLK:
379 	case S_IFCHR:
380 		ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
381 					   be32_to_cpu(str->di_minor));
382 		break;
383 	}
384 
385 	i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
386 	i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
387 	set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
388 	i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
389 	gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
390 	atime.tv_sec = be64_to_cpu(str->di_atime);
391 	atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
392 	if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0)
393 		ip->i_inode.i_atime = atime;
394 	ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
395 	ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
396 	ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
397 	ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
398 
399 	ip->i_goal = be64_to_cpu(str->di_goal_meta);
400 	ip->i_generation = be64_to_cpu(str->di_generation);
401 
402 	ip->i_diskflags = be32_to_cpu(str->di_flags);
403 	ip->i_eattr = be64_to_cpu(str->di_eattr);
404 	/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
405 	gfs2_set_inode_flags(&ip->i_inode);
406 	height = be16_to_cpu(str->di_height);
407 	if (unlikely(height > GFS2_MAX_META_HEIGHT))
408 		goto corrupt;
409 	ip->i_height = (u8)height;
410 
411 	depth = be16_to_cpu(str->di_depth);
412 	if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
413 		goto corrupt;
414 	ip->i_depth = (u8)depth;
415 	ip->i_entries = be32_to_cpu(str->di_entries);
416 
417 	if (S_ISREG(ip->i_inode.i_mode))
418 		gfs2_set_aops(&ip->i_inode);
419 
420 	return 0;
421 corrupt:
422 	gfs2_consist_inode(ip);
423 	return -EIO;
424 }
425 
426 /**
427  * gfs2_inode_refresh - Refresh the incore copy of the dinode
428  * @ip: The GFS2 inode
429  *
430  * Returns: errno
431  */
432 
433 int gfs2_inode_refresh(struct gfs2_inode *ip)
434 {
435 	struct buffer_head *dibh;
436 	int error;
437 
438 	error = gfs2_meta_inode_buffer(ip, &dibh);
439 	if (error)
440 		return error;
441 
442 	error = gfs2_dinode_in(ip, dibh->b_data);
443 	brelse(dibh);
444 	clear_bit(GIF_INVALID, &ip->i_flags);
445 
446 	return error;
447 }
448 
449 /**
450  * inode_go_lock - operation done after an inode lock is locked by a process
451  * @gl: the glock
452  * @flags:
453  *
454  * Returns: errno
455  */
456 
457 static int inode_go_lock(struct gfs2_holder *gh)
458 {
459 	struct gfs2_glock *gl = gh->gh_gl;
460 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
461 	struct gfs2_inode *ip = gl->gl_object;
462 	int error = 0;
463 
464 	if (!ip || (gh->gh_flags & GL_SKIP))
465 		return 0;
466 
467 	if (test_bit(GIF_INVALID, &ip->i_flags)) {
468 		error = gfs2_inode_refresh(ip);
469 		if (error)
470 			return error;
471 	}
472 
473 	if (gh->gh_state != LM_ST_DEFERRED)
474 		inode_dio_wait(&ip->i_inode);
475 
476 	if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
477 	    (gl->gl_state == LM_ST_EXCLUSIVE) &&
478 	    (gh->gh_state == LM_ST_EXCLUSIVE)) {
479 		spin_lock(&sdp->sd_trunc_lock);
480 		if (list_empty(&ip->i_trunc_list))
481 			list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
482 		spin_unlock(&sdp->sd_trunc_lock);
483 		wake_up(&sdp->sd_quota_wait);
484 		return 1;
485 	}
486 
487 	return error;
488 }
489 
490 /**
491  * inode_go_dump - print information about an inode
492  * @seq: The iterator
493  * @ip: the inode
494  * @fs_id_buf: file system id (may be empty)
495  *
496  */
497 
498 static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
499 			  const char *fs_id_buf)
500 {
501 	struct gfs2_inode *ip = gl->gl_object;
502 	struct inode *inode = &ip->i_inode;
503 	unsigned long nrpages;
504 
505 	if (ip == NULL)
506 		return;
507 
508 	xa_lock_irq(&inode->i_data.i_pages);
509 	nrpages = inode->i_data.nrpages;
510 	xa_unlock_irq(&inode->i_data.i_pages);
511 
512 	gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
513 		       "p:%lu\n", fs_id_buf,
514 		  (unsigned long long)ip->i_no_formal_ino,
515 		  (unsigned long long)ip->i_no_addr,
516 		  IF2DT(ip->i_inode.i_mode), ip->i_flags,
517 		  (unsigned int)ip->i_diskflags,
518 		  (unsigned long long)i_size_read(inode), nrpages);
519 }
520 
521 /**
522  * freeze_go_sync - promote/demote the freeze glock
523  * @gl: the glock
524  * @state: the requested state
525  * @flags:
526  *
527  */
528 
529 static int freeze_go_sync(struct gfs2_glock *gl)
530 {
531 	int error = 0;
532 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
533 
534 	if (gl->gl_req == LM_ST_EXCLUSIVE && !gfs2_withdrawn(sdp)) {
535 		atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
536 		error = freeze_super(sdp->sd_vfs);
537 		if (error) {
538 			fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
539 				error);
540 			if (gfs2_withdrawn(sdp)) {
541 				atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
542 				return 0;
543 			}
544 			gfs2_assert_withdraw(sdp, 0);
545 		}
546 		queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
547 		if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
548 			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
549 				       GFS2_LFC_FREEZE_GO_SYNC);
550 		else /* read-only mounts */
551 			atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
552 	}
553 	return 0;
554 }
555 
556 /**
557  * freeze_go_xmote_bh - After promoting/demoting the freeze glock
558  * @gl: the glock
559  *
560  */
561 
562 static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
563 {
564 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
565 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
566 	struct gfs2_glock *j_gl = ip->i_gl;
567 	struct gfs2_log_header_host head;
568 	int error;
569 
570 	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
571 		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
572 
573 		error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
574 		if (error)
575 			gfs2_consist(sdp);
576 		if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
577 			gfs2_consist(sdp);
578 
579 		/*  Initialize some head of the log stuff  */
580 		if (!gfs2_withdrawn(sdp)) {
581 			sdp->sd_log_sequence = head.lh_sequence + 1;
582 			gfs2_log_pointers_init(sdp, head.lh_blkno);
583 		}
584 	}
585 	return 0;
586 }
587 
588 /**
589  * trans_go_demote_ok
590  * @gl: the glock
591  *
592  * Always returns 0
593  */
594 
595 static int freeze_go_demote_ok(const struct gfs2_glock *gl)
596 {
597 	return 0;
598 }
599 
600 /**
601  * iopen_go_callback - schedule the dcache entry for the inode to be deleted
602  * @gl: the glock
603  *
604  * gl_lockref.lock lock is held while calling this
605  */
606 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
607 {
608 	struct gfs2_inode *ip = gl->gl_object;
609 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
610 
611 	if (!remote || sb_rdonly(sdp->sd_vfs))
612 		return;
613 
614 	if (gl->gl_demote_state == LM_ST_UNLOCKED &&
615 	    gl->gl_state == LM_ST_SHARED && ip) {
616 		gl->gl_lockref.count++;
617 		if (!queue_delayed_work(gfs2_delete_workqueue,
618 					&gl->gl_delete, 0))
619 			gl->gl_lockref.count--;
620 	}
621 }
622 
623 static int iopen_go_demote_ok(const struct gfs2_glock *gl)
624 {
625        return !gfs2_delete_work_queued(gl);
626 }
627 
628 /**
629  * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
630  * @gl: glock being freed
631  *
632  * For now, this is only used for the journal inode glock. In withdraw
633  * situations, we need to wait for the glock to be freed so that we know
634  * other nodes may proceed with recovery / journal replay.
635  */
636 static void inode_go_free(struct gfs2_glock *gl)
637 {
638 	/* Note that we cannot reference gl_object because it's already set
639 	 * to NULL by this point in its lifecycle. */
640 	if (!test_bit(GLF_FREEING, &gl->gl_flags))
641 		return;
642 	clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
643 	wake_up_bit(&gl->gl_flags, GLF_FREEING);
644 }
645 
646 /**
647  * nondisk_go_callback - used to signal when a node did a withdraw
648  * @gl: the nondisk glock
649  * @remote: true if this came from a different cluster node
650  *
651  */
652 static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
653 {
654 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
655 
656 	/* Ignore the callback unless it's from another node, and it's the
657 	   live lock. */
658 	if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
659 		return;
660 
661 	/* First order of business is to cancel the demote request. We don't
662 	 * really want to demote a nondisk glock. At best it's just to inform
663 	 * us of another node's withdraw. We'll keep it in SH mode. */
664 	clear_bit(GLF_DEMOTE, &gl->gl_flags);
665 	clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
666 
667 	/* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
668 	if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
669 	    test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
670 	    test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
671 		return;
672 
673 	/* We only care when a node wants us to unlock, because that means
674 	 * they want a journal recovered. */
675 	if (gl->gl_demote_state != LM_ST_UNLOCKED)
676 		return;
677 
678 	if (sdp->sd_args.ar_spectator) {
679 		fs_warn(sdp, "Spectator node cannot recover journals.\n");
680 		return;
681 	}
682 
683 	fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
684 	set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
685 	/*
686 	 * We can't call remote_withdraw directly here or gfs2_recover_journal
687 	 * because this is called from the glock unlock function and the
688 	 * remote_withdraw needs to enqueue and dequeue the same "live" glock
689 	 * we were called from. So we queue it to the control work queue in
690 	 * lock_dlm.
691 	 */
692 	queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
693 }
694 
695 const struct gfs2_glock_operations gfs2_meta_glops = {
696 	.go_type = LM_TYPE_META,
697 	.go_flags = GLOF_NONDISK,
698 };
699 
700 const struct gfs2_glock_operations gfs2_inode_glops = {
701 	.go_sync = inode_go_sync,
702 	.go_inval = inode_go_inval,
703 	.go_demote_ok = inode_go_demote_ok,
704 	.go_lock = inode_go_lock,
705 	.go_dump = inode_go_dump,
706 	.go_type = LM_TYPE_INODE,
707 	.go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
708 	.go_free = inode_go_free,
709 };
710 
711 const struct gfs2_glock_operations gfs2_rgrp_glops = {
712 	.go_sync = rgrp_go_sync,
713 	.go_inval = rgrp_go_inval,
714 	.go_lock = gfs2_rgrp_go_lock,
715 	.go_dump = gfs2_rgrp_dump,
716 	.go_type = LM_TYPE_RGRP,
717 	.go_flags = GLOF_LVB,
718 };
719 
720 const struct gfs2_glock_operations gfs2_freeze_glops = {
721 	.go_sync = freeze_go_sync,
722 	.go_xmote_bh = freeze_go_xmote_bh,
723 	.go_demote_ok = freeze_go_demote_ok,
724 	.go_type = LM_TYPE_NONDISK,
725 	.go_flags = GLOF_NONDISK,
726 };
727 
728 const struct gfs2_glock_operations gfs2_iopen_glops = {
729 	.go_type = LM_TYPE_IOPEN,
730 	.go_callback = iopen_go_callback,
731 	.go_demote_ok = iopen_go_demote_ok,
732 	.go_flags = GLOF_LRU | GLOF_NONDISK,
733 };
734 
735 const struct gfs2_glock_operations gfs2_flock_glops = {
736 	.go_type = LM_TYPE_FLOCK,
737 	.go_flags = GLOF_LRU | GLOF_NONDISK,
738 };
739 
740 const struct gfs2_glock_operations gfs2_nondisk_glops = {
741 	.go_type = LM_TYPE_NONDISK,
742 	.go_flags = GLOF_NONDISK,
743 	.go_callback = nondisk_go_callback,
744 };
745 
746 const struct gfs2_glock_operations gfs2_quota_glops = {
747 	.go_type = LM_TYPE_QUOTA,
748 	.go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
749 };
750 
751 const struct gfs2_glock_operations gfs2_journal_glops = {
752 	.go_type = LM_TYPE_JOURNAL,
753 	.go_flags = GLOF_NONDISK,
754 };
755 
756 const struct gfs2_glock_operations *gfs2_glops_list[] = {
757 	[LM_TYPE_META] = &gfs2_meta_glops,
758 	[LM_TYPE_INODE] = &gfs2_inode_glops,
759 	[LM_TYPE_RGRP] = &gfs2_rgrp_glops,
760 	[LM_TYPE_IOPEN] = &gfs2_iopen_glops,
761 	[LM_TYPE_FLOCK] = &gfs2_flock_glops,
762 	[LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
763 	[LM_TYPE_QUOTA] = &gfs2_quota_glops,
764 	[LM_TYPE_JOURNAL] = &gfs2_journal_glops,
765 };
766 
767