1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 */
6
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/gfs2_ondisk.h>
11 #include <linux/bio.h>
12 #include <linux/posix_acl.h>
13 #include <linux/security.h>
14
15 #include "gfs2.h"
16 #include "incore.h"
17 #include "bmap.h"
18 #include "glock.h"
19 #include "glops.h"
20 #include "inode.h"
21 #include "log.h"
22 #include "meta_io.h"
23 #include "recovery.h"
24 #include "rgrp.h"
25 #include "util.h"
26 #include "trans.h"
27 #include "dir.h"
28 #include "lops.h"
29
30 struct workqueue_struct *gfs2_freeze_wq;
31
32 extern struct workqueue_struct *gfs2_control_wq;
33
gfs2_ail_error(struct gfs2_glock * gl,const struct buffer_head * bh)34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
35 {
36 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
37
38 fs_err(sdp,
39 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
40 "state 0x%lx\n",
41 bh, (unsigned long long)bh->b_blocknr, bh->b_state,
42 bh->b_folio->mapping, bh->b_folio->flags);
43 fs_err(sdp, "AIL glock %u:%llu mapping %p\n",
44 gl->gl_name.ln_type, gl->gl_name.ln_number,
45 gfs2_glock2aspace(gl));
46 gfs2_lm(sdp, "AIL error\n");
47 gfs2_withdraw_delayed(sdp);
48 }
49
50 /**
51 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
52 * @gl: the glock
53 * @fsync: set when called from fsync (not all buffers will be clean)
54 * @nr_revokes: Number of buffers to revoke
55 *
56 * None of the buffers should be dirty, locked, or pinned.
57 */
58
__gfs2_ail_flush(struct gfs2_glock * gl,bool fsync,unsigned int nr_revokes)59 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
60 unsigned int nr_revokes)
61 {
62 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
63 struct list_head *head = &gl->gl_ail_list;
64 struct gfs2_bufdata *bd, *tmp;
65 struct buffer_head *bh;
66 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
67
68 gfs2_log_lock(sdp);
69 spin_lock(&sdp->sd_ail_lock);
70 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
71 if (nr_revokes == 0)
72 break;
73 bh = bd->bd_bh;
74 if (bh->b_state & b_state) {
75 if (fsync)
76 continue;
77 gfs2_ail_error(gl, bh);
78 }
79 gfs2_trans_add_revoke(sdp, bd);
80 nr_revokes--;
81 }
82 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
83 spin_unlock(&sdp->sd_ail_lock);
84 gfs2_log_unlock(sdp);
85
86 if (gfs2_withdrawing(sdp))
87 gfs2_withdraw(sdp);
88 }
89
90
gfs2_ail_empty_gl(struct gfs2_glock * gl)91 static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
92 {
93 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
94 struct gfs2_trans tr;
95 unsigned int revokes;
96 int ret = 0;
97
98 revokes = atomic_read(&gl->gl_ail_count);
99
100 if (!revokes) {
101 bool have_revokes;
102 bool log_in_flight;
103
104 /*
105 * We have nothing on the ail, but there could be revokes on
106 * the sdp revoke queue, in which case, we still want to flush
107 * the log and wait for it to finish.
108 *
109 * If the sdp revoke list is empty too, we might still have an
110 * io outstanding for writing revokes, so we should wait for
111 * it before returning.
112 *
113 * If none of these conditions are true, our revokes are all
114 * flushed and we can return.
115 */
116 gfs2_log_lock(sdp);
117 have_revokes = !list_empty(&sdp->sd_log_revokes);
118 log_in_flight = atomic_read(&sdp->sd_log_in_flight);
119 gfs2_log_unlock(sdp);
120 if (have_revokes)
121 goto flush;
122 if (log_in_flight)
123 log_flush_wait(sdp);
124 return 0;
125 }
126
127 memset(&tr, 0, sizeof(tr));
128 set_bit(TR_ONSTACK, &tr.tr_flags);
129 ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
130 if (ret) {
131 fs_err(sdp, "Transaction error %d: Unable to write revokes.", ret);
132 goto flush;
133 }
134 __gfs2_ail_flush(gl, 0, revokes);
135 gfs2_trans_end(sdp);
136
137 flush:
138 if (!ret)
139 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
140 GFS2_LFC_AIL_EMPTY_GL);
141 return ret;
142 }
143
gfs2_ail_flush(struct gfs2_glock * gl,bool fsync)144 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
145 {
146 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
147 unsigned int revokes = atomic_read(&gl->gl_ail_count);
148 int ret;
149
150 if (!revokes)
151 return;
152
153 ret = gfs2_trans_begin(sdp, 0, revokes);
154 if (ret)
155 return;
156 __gfs2_ail_flush(gl, fsync, revokes);
157 gfs2_trans_end(sdp);
158 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
159 GFS2_LFC_AIL_FLUSH);
160 }
161
162 /**
163 * gfs2_rgrp_metasync - sync out the metadata of a resource group
164 * @gl: the glock protecting the resource group
165 *
166 */
167
gfs2_rgrp_metasync(struct gfs2_glock * gl)168 static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
169 {
170 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
171 struct address_space *metamapping = &sdp->sd_aspace;
172 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
173 const unsigned bsize = sdp->sd_sb.sb_bsize;
174 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
175 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
176 int error;
177
178 filemap_fdatawrite_range(metamapping, start, end);
179 error = filemap_fdatawait_range(metamapping, start, end);
180 WARN_ON_ONCE(error && !gfs2_withdrawing_or_withdrawn(sdp));
181 mapping_set_error(metamapping, error);
182 if (error)
183 gfs2_io_error(sdp);
184 return error;
185 }
186
187 /**
188 * rgrp_go_sync - sync out the metadata for this glock
189 * @gl: the glock
190 *
191 * Called when demoting or unlocking an EX glock. We must flush
192 * to disk all dirty buffers/pages relating to this glock, and must not
193 * return to caller to demote/unlock the glock until I/O is complete.
194 */
195
rgrp_go_sync(struct gfs2_glock * gl)196 static int rgrp_go_sync(struct gfs2_glock *gl)
197 {
198 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
199 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
200 int error;
201
202 if (!rgd || !test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
203 return 0;
204 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
205
206 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
207 GFS2_LFC_RGRP_GO_SYNC);
208 error = gfs2_rgrp_metasync(gl);
209 if (!error)
210 error = gfs2_ail_empty_gl(gl);
211 gfs2_free_clones(rgd);
212 return error;
213 }
214
215 /**
216 * rgrp_go_inval - invalidate the metadata for this glock
217 * @gl: the glock
218 * @flags:
219 *
220 * We never used LM_ST_DEFERRED with resource groups, so that we
221 * should always see the metadata flag set here.
222 *
223 */
224
rgrp_go_inval(struct gfs2_glock * gl,int flags)225 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
226 {
227 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
228 struct address_space *mapping = &sdp->sd_aspace;
229 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
230 const unsigned bsize = sdp->sd_sb.sb_bsize;
231 loff_t start, end;
232
233 if (!rgd)
234 return;
235 start = (rgd->rd_addr * bsize) & PAGE_MASK;
236 end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
237 gfs2_rgrp_brelse(rgd);
238 WARN_ON_ONCE(!(flags & DIO_METADATA));
239 truncate_inode_pages_range(mapping, start, end);
240 }
241
gfs2_rgrp_go_dump(struct seq_file * seq,const struct gfs2_glock * gl,const char * fs_id_buf)242 static void gfs2_rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
243 const char *fs_id_buf)
244 {
245 struct gfs2_rgrpd *rgd = gl->gl_object;
246
247 if (rgd)
248 gfs2_rgrp_dump(seq, rgd, fs_id_buf);
249 }
250
gfs2_glock2inode(struct gfs2_glock * gl)251 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
252 {
253 struct gfs2_inode *ip;
254
255 spin_lock(&gl->gl_lockref.lock);
256 ip = gl->gl_object;
257 if (ip)
258 set_bit(GIF_GLOP_PENDING, &ip->i_flags);
259 spin_unlock(&gl->gl_lockref.lock);
260 return ip;
261 }
262
gfs2_glock2rgrp(struct gfs2_glock * gl)263 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
264 {
265 struct gfs2_rgrpd *rgd;
266
267 spin_lock(&gl->gl_lockref.lock);
268 rgd = gl->gl_object;
269 spin_unlock(&gl->gl_lockref.lock);
270
271 return rgd;
272 }
273
gfs2_clear_glop_pending(struct gfs2_inode * ip)274 static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
275 {
276 if (!ip)
277 return;
278
279 clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
280 wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
281 }
282
283 /**
284 * gfs2_inode_metasync - sync out the metadata of an inode
285 * @gl: the glock protecting the inode
286 *
287 */
gfs2_inode_metasync(struct gfs2_glock * gl)288 int gfs2_inode_metasync(struct gfs2_glock *gl)
289 {
290 struct address_space *metamapping = gfs2_glock2aspace(gl);
291 int error;
292
293 filemap_fdatawrite(metamapping);
294 error = filemap_fdatawait(metamapping);
295 if (error)
296 gfs2_io_error(gl->gl_name.ln_sbd);
297 return error;
298 }
299
300 /**
301 * inode_go_sync - Sync the dirty metadata of an inode
302 * @gl: the glock protecting the inode
303 *
304 */
305
inode_go_sync(struct gfs2_glock * gl)306 static int inode_go_sync(struct gfs2_glock *gl)
307 {
308 struct gfs2_inode *ip = gfs2_glock2inode(gl);
309 int isreg = ip && S_ISREG(ip->i_inode.i_mode);
310 struct address_space *metamapping = gfs2_glock2aspace(gl);
311 int error = 0, ret;
312
313 if (isreg) {
314 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
315 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
316 inode_dio_wait(&ip->i_inode);
317 }
318 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
319 goto out;
320
321 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
322
323 gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
324 GFS2_LFC_INODE_GO_SYNC);
325 filemap_fdatawrite(metamapping);
326 if (isreg) {
327 struct address_space *mapping = ip->i_inode.i_mapping;
328 filemap_fdatawrite(mapping);
329 error = filemap_fdatawait(mapping);
330 mapping_set_error(mapping, error);
331 }
332 ret = gfs2_inode_metasync(gl);
333 if (!error)
334 error = ret;
335 ret = gfs2_ail_empty_gl(gl);
336 if (!error)
337 error = ret;
338 /*
339 * Writeback of the data mapping may cause the dirty flag to be set
340 * so we have to clear it again here.
341 */
342 smp_mb__before_atomic();
343 clear_bit(GLF_DIRTY, &gl->gl_flags);
344
345 out:
346 gfs2_clear_glop_pending(ip);
347 return error;
348 }
349
350 /**
351 * inode_go_inval - prepare a inode glock to be released
352 * @gl: the glock
353 * @flags:
354 *
355 * Normally we invalidate everything, but if we are moving into
356 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
357 * can keep hold of the metadata, since it won't have changed.
358 *
359 */
360
inode_go_inval(struct gfs2_glock * gl,int flags)361 static void inode_go_inval(struct gfs2_glock *gl, int flags)
362 {
363 struct gfs2_inode *ip = gfs2_glock2inode(gl);
364
365 if (flags & DIO_METADATA) {
366 struct address_space *mapping = gfs2_glock2aspace(gl);
367 truncate_inode_pages(mapping, 0);
368 if (ip) {
369 set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
370 forget_all_cached_acls(&ip->i_inode);
371 security_inode_invalidate_secctx(&ip->i_inode);
372 gfs2_dir_hash_inval(ip);
373 }
374 }
375
376 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
377 gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
378 GFS2_LOG_HEAD_FLUSH_NORMAL |
379 GFS2_LFC_INODE_GO_INVAL);
380 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
381 }
382 if (ip && S_ISREG(ip->i_inode.i_mode))
383 truncate_inode_pages(ip->i_inode.i_mapping, 0);
384
385 gfs2_clear_glop_pending(ip);
386 }
387
388 /**
389 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
390 * @gl: the glock
391 *
392 * Returns: 1 if it's ok
393 */
394
inode_go_demote_ok(const struct gfs2_glock * gl)395 static int inode_go_demote_ok(const struct gfs2_glock *gl)
396 {
397 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
398
399 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
400 return 0;
401
402 return 1;
403 }
404
gfs2_dinode_in(struct gfs2_inode * ip,const void * buf)405 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
406 {
407 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
408 const struct gfs2_dinode *str = buf;
409 struct timespec64 atime;
410 u16 height, depth;
411 umode_t mode = be32_to_cpu(str->di_mode);
412 struct inode *inode = &ip->i_inode;
413 bool is_new = inode->i_state & I_NEW;
414
415 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
416 goto corrupt;
417 if (unlikely(!is_new && inode_wrong_type(inode, mode)))
418 goto corrupt;
419 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
420 inode->i_mode = mode;
421 if (is_new) {
422 inode->i_rdev = 0;
423 switch (mode & S_IFMT) {
424 case S_IFBLK:
425 case S_IFCHR:
426 inode->i_rdev = MKDEV(be32_to_cpu(str->di_major),
427 be32_to_cpu(str->di_minor));
428 break;
429 }
430 }
431
432 i_uid_write(inode, be32_to_cpu(str->di_uid));
433 i_gid_write(inode, be32_to_cpu(str->di_gid));
434 set_nlink(inode, be32_to_cpu(str->di_nlink));
435 i_size_write(inode, be64_to_cpu(str->di_size));
436 gfs2_set_inode_blocks(inode, be64_to_cpu(str->di_blocks));
437 atime.tv_sec = be64_to_cpu(str->di_atime);
438 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
439 if (timespec64_compare(&inode->i_atime, &atime) < 0)
440 inode->i_atime = atime;
441 inode->i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
442 inode->i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
443 inode_set_ctime(inode, be64_to_cpu(str->di_ctime),
444 be32_to_cpu(str->di_ctime_nsec));
445
446 ip->i_goal = be64_to_cpu(str->di_goal_meta);
447 ip->i_generation = be64_to_cpu(str->di_generation);
448
449 ip->i_diskflags = be32_to_cpu(str->di_flags);
450 ip->i_eattr = be64_to_cpu(str->di_eattr);
451 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
452 gfs2_set_inode_flags(inode);
453 height = be16_to_cpu(str->di_height);
454 if (unlikely(height > sdp->sd_max_height))
455 goto corrupt;
456 ip->i_height = (u8)height;
457
458 depth = be16_to_cpu(str->di_depth);
459 if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
460 goto corrupt;
461 ip->i_depth = (u8)depth;
462 ip->i_entries = be32_to_cpu(str->di_entries);
463
464 if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip))
465 goto corrupt;
466
467 if (S_ISREG(inode->i_mode))
468 gfs2_set_aops(inode);
469
470 return 0;
471 corrupt:
472 gfs2_consist_inode(ip);
473 return -EIO;
474 }
475
476 /**
477 * gfs2_inode_refresh - Refresh the incore copy of the dinode
478 * @ip: The GFS2 inode
479 *
480 * Returns: errno
481 */
482
gfs2_inode_refresh(struct gfs2_inode * ip)483 int gfs2_inode_refresh(struct gfs2_inode *ip)
484 {
485 struct buffer_head *dibh;
486 int error;
487
488 error = gfs2_meta_inode_buffer(ip, &dibh);
489 if (error)
490 return error;
491
492 error = gfs2_dinode_in(ip, dibh->b_data);
493 brelse(dibh);
494 return error;
495 }
496
497 /**
498 * inode_go_instantiate - read in an inode if necessary
499 * @gh: The glock holder
500 *
501 * Returns: errno
502 */
503
inode_go_instantiate(struct gfs2_glock * gl)504 static int inode_go_instantiate(struct gfs2_glock *gl)
505 {
506 struct gfs2_inode *ip = gl->gl_object;
507
508 if (!ip) /* no inode to populate - read it in later */
509 return 0;
510
511 return gfs2_inode_refresh(ip);
512 }
513
inode_go_held(struct gfs2_holder * gh)514 static int inode_go_held(struct gfs2_holder *gh)
515 {
516 struct gfs2_glock *gl = gh->gh_gl;
517 struct gfs2_inode *ip = gl->gl_object;
518 int error = 0;
519
520 if (!ip) /* no inode to populate - read it in later */
521 return 0;
522
523 if (gh->gh_state != LM_ST_DEFERRED)
524 inode_dio_wait(&ip->i_inode);
525
526 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
527 (gl->gl_state == LM_ST_EXCLUSIVE) &&
528 (gh->gh_state == LM_ST_EXCLUSIVE))
529 error = gfs2_truncatei_resume(ip);
530
531 return error;
532 }
533
534 /**
535 * inode_go_dump - print information about an inode
536 * @seq: The iterator
537 * @gl: The glock
538 * @fs_id_buf: file system id (may be empty)
539 *
540 */
541
inode_go_dump(struct seq_file * seq,const struct gfs2_glock * gl,const char * fs_id_buf)542 static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
543 const char *fs_id_buf)
544 {
545 struct gfs2_inode *ip = gl->gl_object;
546 const struct inode *inode = &ip->i_inode;
547
548 if (ip == NULL)
549 return;
550
551 gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
552 "p:%lu\n", fs_id_buf,
553 (unsigned long long)ip->i_no_formal_ino,
554 (unsigned long long)ip->i_no_addr,
555 IF2DT(inode->i_mode), ip->i_flags,
556 (unsigned int)ip->i_diskflags,
557 (unsigned long long)i_size_read(inode),
558 inode->i_data.nrpages);
559 }
560
561 /**
562 * freeze_go_callback - A cluster node is requesting a freeze
563 * @gl: the glock
564 * @remote: true if this came from a different cluster node
565 */
566
freeze_go_callback(struct gfs2_glock * gl,bool remote)567 static void freeze_go_callback(struct gfs2_glock *gl, bool remote)
568 {
569 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
570 struct super_block *sb = sdp->sd_vfs;
571
572 if (!remote ||
573 (gl->gl_state != LM_ST_SHARED &&
574 gl->gl_state != LM_ST_UNLOCKED) ||
575 gl->gl_demote_state != LM_ST_UNLOCKED)
576 return;
577
578 /*
579 * Try to get an active super block reference to prevent racing with
580 * unmount (see super_trylock_shared()). But note that unmount isn't
581 * the only place where a write lock on s_umount is taken, and we can
582 * fail here because of things like remount as well.
583 */
584 if (down_read_trylock(&sb->s_umount)) {
585 atomic_inc(&sb->s_active);
586 up_read(&sb->s_umount);
587 if (!queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work))
588 deactivate_super(sb);
589 }
590 }
591
592 /**
593 * freeze_go_xmote_bh - After promoting/demoting the freeze glock
594 * @gl: the glock
595 */
freeze_go_xmote_bh(struct gfs2_glock * gl)596 static int freeze_go_xmote_bh(struct gfs2_glock *gl)
597 {
598 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
599 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
600 struct gfs2_glock *j_gl = ip->i_gl;
601 struct gfs2_log_header_host head;
602 int error;
603
604 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
605 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
606
607 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
608 if (gfs2_assert_withdraw_delayed(sdp, !error))
609 return error;
610 if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
611 GFS2_LOG_HEAD_UNMOUNT))
612 return -EIO;
613 sdp->sd_log_sequence = head.lh_sequence + 1;
614 gfs2_log_pointers_init(sdp, head.lh_blkno);
615 }
616 return 0;
617 }
618
619 /**
620 * freeze_go_demote_ok
621 * @gl: the glock
622 *
623 * Always returns 0
624 */
625
freeze_go_demote_ok(const struct gfs2_glock * gl)626 static int freeze_go_demote_ok(const struct gfs2_glock *gl)
627 {
628 return 0;
629 }
630
631 /**
632 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
633 * @gl: the glock
634 * @remote: true if this came from a different cluster node
635 *
636 * gl_lockref.lock lock is held while calling this
637 */
iopen_go_callback(struct gfs2_glock * gl,bool remote)638 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
639 {
640 struct gfs2_inode *ip = gl->gl_object;
641 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
642
643 if (!remote || sb_rdonly(sdp->sd_vfs) ||
644 test_bit(SDF_KILL, &sdp->sd_flags))
645 return;
646
647 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
648 gl->gl_state == LM_ST_SHARED && ip) {
649 gl->gl_lockref.count++;
650 if (!gfs2_queue_try_to_evict(gl))
651 gl->gl_lockref.count--;
652 }
653 }
654
655 /**
656 * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
657 * @gl: glock being freed
658 *
659 * For now, this is only used for the journal inode glock. In withdraw
660 * situations, we need to wait for the glock to be freed so that we know
661 * other nodes may proceed with recovery / journal replay.
662 */
inode_go_free(struct gfs2_glock * gl)663 static void inode_go_free(struct gfs2_glock *gl)
664 {
665 /* Note that we cannot reference gl_object because it's already set
666 * to NULL by this point in its lifecycle. */
667 if (!test_bit(GLF_FREEING, &gl->gl_flags))
668 return;
669 clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
670 wake_up_bit(&gl->gl_flags, GLF_FREEING);
671 }
672
673 /**
674 * nondisk_go_callback - used to signal when a node did a withdraw
675 * @gl: the nondisk glock
676 * @remote: true if this came from a different cluster node
677 *
678 */
nondisk_go_callback(struct gfs2_glock * gl,bool remote)679 static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
680 {
681 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
682
683 /* Ignore the callback unless it's from another node, and it's the
684 live lock. */
685 if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
686 return;
687
688 /* First order of business is to cancel the demote request. We don't
689 * really want to demote a nondisk glock. At best it's just to inform
690 * us of another node's withdraw. We'll keep it in SH mode. */
691 clear_bit(GLF_DEMOTE, &gl->gl_flags);
692 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
693
694 /* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
695 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
696 test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
697 test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
698 return;
699
700 /* We only care when a node wants us to unlock, because that means
701 * they want a journal recovered. */
702 if (gl->gl_demote_state != LM_ST_UNLOCKED)
703 return;
704
705 if (sdp->sd_args.ar_spectator) {
706 fs_warn(sdp, "Spectator node cannot recover journals.\n");
707 return;
708 }
709
710 fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
711 set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
712 /*
713 * We can't call remote_withdraw directly here or gfs2_recover_journal
714 * because this is called from the glock unlock function and the
715 * remote_withdraw needs to enqueue and dequeue the same "live" glock
716 * we were called from. So we queue it to the control work queue in
717 * lock_dlm.
718 */
719 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
720 }
721
722 const struct gfs2_glock_operations gfs2_meta_glops = {
723 .go_type = LM_TYPE_META,
724 .go_flags = GLOF_NONDISK,
725 };
726
727 const struct gfs2_glock_operations gfs2_inode_glops = {
728 .go_sync = inode_go_sync,
729 .go_inval = inode_go_inval,
730 .go_demote_ok = inode_go_demote_ok,
731 .go_instantiate = inode_go_instantiate,
732 .go_held = inode_go_held,
733 .go_dump = inode_go_dump,
734 .go_type = LM_TYPE_INODE,
735 .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
736 .go_free = inode_go_free,
737 };
738
739 const struct gfs2_glock_operations gfs2_rgrp_glops = {
740 .go_sync = rgrp_go_sync,
741 .go_inval = rgrp_go_inval,
742 .go_instantiate = gfs2_rgrp_go_instantiate,
743 .go_dump = gfs2_rgrp_go_dump,
744 .go_type = LM_TYPE_RGRP,
745 .go_flags = GLOF_LVB,
746 };
747
748 const struct gfs2_glock_operations gfs2_freeze_glops = {
749 .go_xmote_bh = freeze_go_xmote_bh,
750 .go_demote_ok = freeze_go_demote_ok,
751 .go_callback = freeze_go_callback,
752 .go_type = LM_TYPE_NONDISK,
753 .go_flags = GLOF_NONDISK,
754 };
755
756 const struct gfs2_glock_operations gfs2_iopen_glops = {
757 .go_type = LM_TYPE_IOPEN,
758 .go_callback = iopen_go_callback,
759 .go_dump = inode_go_dump,
760 .go_flags = GLOF_LRU | GLOF_NONDISK,
761 .go_subclass = 1,
762 };
763
764 const struct gfs2_glock_operations gfs2_flock_glops = {
765 .go_type = LM_TYPE_FLOCK,
766 .go_flags = GLOF_LRU | GLOF_NONDISK,
767 };
768
769 const struct gfs2_glock_operations gfs2_nondisk_glops = {
770 .go_type = LM_TYPE_NONDISK,
771 .go_flags = GLOF_NONDISK,
772 .go_callback = nondisk_go_callback,
773 };
774
775 const struct gfs2_glock_operations gfs2_quota_glops = {
776 .go_type = LM_TYPE_QUOTA,
777 .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
778 };
779
780 const struct gfs2_glock_operations gfs2_journal_glops = {
781 .go_type = LM_TYPE_JOURNAL,
782 .go_flags = GLOF_NONDISK,
783 };
784
785 const struct gfs2_glock_operations *gfs2_glops_list[] = {
786 [LM_TYPE_META] = &gfs2_meta_glops,
787 [LM_TYPE_INODE] = &gfs2_inode_glops,
788 [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
789 [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
790 [LM_TYPE_FLOCK] = &gfs2_flock_glops,
791 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
792 [LM_TYPE_QUOTA] = &gfs2_quota_glops,
793 [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
794 };
795
796