xref: /openbmc/linux/fs/gfs2/lock_dlm.c (revision d6b412c5)
17336d0e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2f057f6cdSSteven Whitehouse /*
3f057f6cdSSteven Whitehouse  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4e0c2a9aaSDavid Teigland  * Copyright 2004-2011 Red Hat, Inc.
5f057f6cdSSteven Whitehouse  */
6f057f6cdSSteven Whitehouse 
7d77d1b58SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8d77d1b58SJoe Perches 
9f057f6cdSSteven Whitehouse #include <linux/fs.h>
10f057f6cdSSteven Whitehouse #include <linux/dlm.h>
115a0e3ad6STejun Heo #include <linux/slab.h>
12f057f6cdSSteven Whitehouse #include <linux/types.h>
13e0c2a9aaSDavid Teigland #include <linux/delay.h>
14f057f6cdSSteven Whitehouse #include <linux/gfs2_ondisk.h>
15174cd4b1SIngo Molnar #include <linux/sched/signal.h>
16f057f6cdSSteven Whitehouse 
17f057f6cdSSteven Whitehouse #include "incore.h"
18f057f6cdSSteven Whitehouse #include "glock.h"
19601ef0d5SBob Peterson #include "glops.h"
20601ef0d5SBob Peterson #include "recovery.h"
21f057f6cdSSteven Whitehouse #include "util.h"
22e0c2a9aaSDavid Teigland #include "sys.h"
23a245769fSSteven Whitehouse #include "trace_gfs2.h"
24f057f6cdSSteven Whitehouse 
25a245769fSSteven Whitehouse /**
26a245769fSSteven Whitehouse  * gfs2_update_stats - Update time based stats
27c551f66cSLee Jones  * @s: The stats to update (local or global)
28c551f66cSLee Jones  * @index: The index inside @s
29a245769fSSteven Whitehouse  * @sample: New data to include
30c551f66cSLee Jones  */
gfs2_update_stats(struct gfs2_lkstats * s,unsigned index,s64 sample)31c551f66cSLee Jones static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index,
32c551f66cSLee Jones 				     s64 sample)
33c551f66cSLee Jones {
34c551f66cSLee Jones 	/*
35a245769fSSteven Whitehouse 	 * @delta is the difference between the current rtt sample and the
36a245769fSSteven Whitehouse 	 * running average srtt. We add 1/8 of that to the srtt in order to
37c9ea8c8bSAndreas Gruenbacher 	 * update the current srtt estimate. The variance estimate is a bit
385a5ec83dSAndreas Gruenbacher 	 * more complicated. We subtract the current variance estimate from
395a5ec83dSAndreas Gruenbacher 	 * the abs value of the @delta and add 1/4 of that to the running
405a5ec83dSAndreas Gruenbacher 	 * total.  That's equivalent to 3/4 of the current variance
415a5ec83dSAndreas Gruenbacher 	 * estimate plus 1/4 of the abs of @delta.
42a245769fSSteven Whitehouse 	 *
43c551f66cSLee Jones 	 * Note that the index points at the array entry containing the
44c551f66cSLee Jones 	 * smoothed mean value, and the variance is always in the following
45c551f66cSLee Jones 	 * entry
46a245769fSSteven Whitehouse 	 *
47a245769fSSteven Whitehouse 	 * Reference: TCP/IP Illustrated, vol 2, p. 831,832
48c551f66cSLee Jones 	 * All times are in units of integer nanoseconds. Unlike the TCP/IP
49c551f66cSLee Jones 	 * case, they are not scaled fixed point.
50a245769fSSteven Whitehouse 	 */
51a245769fSSteven Whitehouse 
52a245769fSSteven Whitehouse 	s64 delta = sample - s->stats[index];
53a245769fSSteven Whitehouse 	s->stats[index] += (delta >> 3);
54a245769fSSteven Whitehouse 	index++;
555a5ec83dSAndreas Gruenbacher 	s->stats[index] += (s64)(abs(delta) - s->stats[index]) >> 2;
56a245769fSSteven Whitehouse }
57a245769fSSteven Whitehouse 
58a245769fSSteven Whitehouse /**
59a245769fSSteven Whitehouse  * gfs2_update_reply_times - Update locking statistics
60a245769fSSteven Whitehouse  * @gl: The glock to update
61a245769fSSteven Whitehouse  *
62a245769fSSteven Whitehouse  * This assumes that gl->gl_dstamp has been set earlier.
63a245769fSSteven Whitehouse  *
64a245769fSSteven Whitehouse  * The rtt (lock round trip time) is an estimate of the time
65a245769fSSteven Whitehouse  * taken to perform a dlm lock request. We update it on each
66a245769fSSteven Whitehouse  * reply from the dlm.
67a245769fSSteven Whitehouse  *
68a245769fSSteven Whitehouse  * The blocking flag is set on the glock for all dlm requests
69a245769fSSteven Whitehouse  * which may potentially block due to lock requests from other nodes.
70a245769fSSteven Whitehouse  * DLM requests where the current lock state is exclusive, the
71a245769fSSteven Whitehouse  * requested state is null (or unlocked) or where the TRY or
72a245769fSSteven Whitehouse  * TRY_1CB flags are set are classified as non-blocking. All
73a245769fSSteven Whitehouse  * other DLM requests are counted as (potentially) blocking.
74a245769fSSteven Whitehouse  */
gfs2_update_reply_times(struct gfs2_glock * gl)75a245769fSSteven Whitehouse static inline void gfs2_update_reply_times(struct gfs2_glock *gl)
76a245769fSSteven Whitehouse {
77a245769fSSteven Whitehouse 	struct gfs2_pcpu_lkstats *lks;
78a245769fSSteven Whitehouse 	const unsigned gltype = gl->gl_name.ln_type;
79a245769fSSteven Whitehouse 	unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ?
80a245769fSSteven Whitehouse 			 GFS2_LKS_SRTTB : GFS2_LKS_SRTT;
81a245769fSSteven Whitehouse 	s64 rtt;
82a245769fSSteven Whitehouse 
83a245769fSSteven Whitehouse 	preempt_disable();
84a245769fSSteven Whitehouse 	rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp));
8515562c43SBob Peterson 	lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats);
86a245769fSSteven Whitehouse 	gfs2_update_stats(&gl->gl_stats, index, rtt);		/* Local */
87a245769fSSteven Whitehouse 	gfs2_update_stats(&lks->lkstats[gltype], index, rtt);	/* Global */
88a245769fSSteven Whitehouse 	preempt_enable();
89a245769fSSteven Whitehouse 
90a245769fSSteven Whitehouse 	trace_gfs2_glock_lock_time(gl, rtt);
91a245769fSSteven Whitehouse }
92a245769fSSteven Whitehouse 
93a245769fSSteven Whitehouse /**
94a245769fSSteven Whitehouse  * gfs2_update_request_times - Update locking statistics
95a245769fSSteven Whitehouse  * @gl: The glock to update
96a245769fSSteven Whitehouse  *
97a245769fSSteven Whitehouse  * The irt (lock inter-request times) measures the average time
98a245769fSSteven Whitehouse  * between requests to the dlm. It is updated immediately before
99a245769fSSteven Whitehouse  * each dlm call.
100a245769fSSteven Whitehouse  */
101a245769fSSteven Whitehouse 
gfs2_update_request_times(struct gfs2_glock * gl)102a245769fSSteven Whitehouse static inline void gfs2_update_request_times(struct gfs2_glock *gl)
103a245769fSSteven Whitehouse {
104a245769fSSteven Whitehouse 	struct gfs2_pcpu_lkstats *lks;
105a245769fSSteven Whitehouse 	const unsigned gltype = gl->gl_name.ln_type;
106a245769fSSteven Whitehouse 	ktime_t dstamp;
107a245769fSSteven Whitehouse 	s64 irt;
108a245769fSSteven Whitehouse 
109a245769fSSteven Whitehouse 	preempt_disable();
110a245769fSSteven Whitehouse 	dstamp = gl->gl_dstamp;
111a245769fSSteven Whitehouse 	gl->gl_dstamp = ktime_get_real();
112a245769fSSteven Whitehouse 	irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp));
11315562c43SBob Peterson 	lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats);
114a245769fSSteven Whitehouse 	gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt);		/* Local */
115a245769fSSteven Whitehouse 	gfs2_update_stats(&lks->lkstats[gltype], GFS2_LKS_SIRT, irt);	/* Global */
116a245769fSSteven Whitehouse 	preempt_enable();
117a245769fSSteven Whitehouse }
118a245769fSSteven Whitehouse 
gdlm_ast(void * arg)119f057f6cdSSteven Whitehouse static void gdlm_ast(void *arg)
120f057f6cdSSteven Whitehouse {
121f057f6cdSSteven Whitehouse 	struct gfs2_glock *gl = arg;
122f057f6cdSSteven Whitehouse 	unsigned ret = gl->gl_state;
123f057f6cdSSteven Whitehouse 
1240636b34bSAndreas Gruenbacher 	/* If the glock is dead, we only react to a dlm_unlock() reply. */
1250636b34bSAndreas Gruenbacher 	if (__lockref_is_dead(&gl->gl_lockref) &&
1260636b34bSAndreas Gruenbacher 	    gl->gl_lksb.sb_status != -DLM_EUNLOCK)
1270636b34bSAndreas Gruenbacher 		return;
1280636b34bSAndreas Gruenbacher 
129a245769fSSteven Whitehouse 	gfs2_update_reply_times(gl);
130f057f6cdSSteven Whitehouse 	BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
131f057f6cdSSteven Whitehouse 
1324e2f8849SDavid Teigland 	if ((gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) && gl->gl_lksb.sb_lvbptr)
1334e2f8849SDavid Teigland 		memset(gl->gl_lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
134f057f6cdSSteven Whitehouse 
135f057f6cdSSteven Whitehouse 	switch (gl->gl_lksb.sb_status) {
136f057f6cdSSteven Whitehouse 	case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
137601ef0d5SBob Peterson 		if (gl->gl_ops->go_free)
138601ef0d5SBob Peterson 			gl->gl_ops->go_free(gl);
139fc0e38daSSteven Whitehouse 		gfs2_glock_free(gl);
140f057f6cdSSteven Whitehouse 		return;
141f057f6cdSSteven Whitehouse 	case -DLM_ECANCEL: /* Cancel while getting lock */
142f057f6cdSSteven Whitehouse 		ret |= LM_OUT_CANCELED;
143f057f6cdSSteven Whitehouse 		goto out;
144f057f6cdSSteven Whitehouse 	case -EAGAIN: /* Try lock fails */
1451fea7c25SSteven Whitehouse 	case -EDEADLK: /* Deadlock detected */
146f057f6cdSSteven Whitehouse 		goto out;
1471fea7c25SSteven Whitehouse 	case -ETIMEDOUT: /* Canceled due to timeout */
148f057f6cdSSteven Whitehouse 		ret |= LM_OUT_ERROR;
149f057f6cdSSteven Whitehouse 		goto out;
150f057f6cdSSteven Whitehouse 	case 0: /* Success */
151f057f6cdSSteven Whitehouse 		break;
152f057f6cdSSteven Whitehouse 	default: /* Something unexpected */
153f057f6cdSSteven Whitehouse 		BUG();
154f057f6cdSSteven Whitehouse 	}
155f057f6cdSSteven Whitehouse 
15602ffad08SBenjamin Marzinski 	ret = gl->gl_req;
157f057f6cdSSteven Whitehouse 	if (gl->gl_lksb.sb_flags & DLM_SBF_ALTMODE) {
15802ffad08SBenjamin Marzinski 		if (gl->gl_req == LM_ST_SHARED)
159f057f6cdSSteven Whitehouse 			ret = LM_ST_DEFERRED;
16002ffad08SBenjamin Marzinski 		else if (gl->gl_req == LM_ST_DEFERRED)
161f057f6cdSSteven Whitehouse 			ret = LM_ST_SHARED;
162f057f6cdSSteven Whitehouse 		else
163f057f6cdSSteven Whitehouse 			BUG();
164f057f6cdSSteven Whitehouse 	}
165f057f6cdSSteven Whitehouse 
166f057f6cdSSteven Whitehouse 	set_bit(GLF_INITIAL, &gl->gl_flags);
167f057f6cdSSteven Whitehouse 	gfs2_glock_complete(gl, ret);
168f057f6cdSSteven Whitehouse 	return;
169f057f6cdSSteven Whitehouse out:
170f057f6cdSSteven Whitehouse 	if (!test_bit(GLF_INITIAL, &gl->gl_flags))
171f057f6cdSSteven Whitehouse 		gl->gl_lksb.sb_lkid = 0;
172f057f6cdSSteven Whitehouse 	gfs2_glock_complete(gl, ret);
173f057f6cdSSteven Whitehouse }
174f057f6cdSSteven Whitehouse 
gdlm_bast(void * arg,int mode)175f057f6cdSSteven Whitehouse static void gdlm_bast(void *arg, int mode)
176f057f6cdSSteven Whitehouse {
177f057f6cdSSteven Whitehouse 	struct gfs2_glock *gl = arg;
178f057f6cdSSteven Whitehouse 
1790636b34bSAndreas Gruenbacher 	if (__lockref_is_dead(&gl->gl_lockref))
1800636b34bSAndreas Gruenbacher 		return;
1810636b34bSAndreas Gruenbacher 
182f057f6cdSSteven Whitehouse 	switch (mode) {
183f057f6cdSSteven Whitehouse 	case DLM_LOCK_EX:
184f057f6cdSSteven Whitehouse 		gfs2_glock_cb(gl, LM_ST_UNLOCKED);
185f057f6cdSSteven Whitehouse 		break;
186f057f6cdSSteven Whitehouse 	case DLM_LOCK_CW:
187f057f6cdSSteven Whitehouse 		gfs2_glock_cb(gl, LM_ST_DEFERRED);
188f057f6cdSSteven Whitehouse 		break;
189f057f6cdSSteven Whitehouse 	case DLM_LOCK_PR:
190f057f6cdSSteven Whitehouse 		gfs2_glock_cb(gl, LM_ST_SHARED);
191f057f6cdSSteven Whitehouse 		break;
192f057f6cdSSteven Whitehouse 	default:
193e54c78a2SBob Peterson 		fs_err(gl->gl_name.ln_sbd, "unknown bast mode %d\n", mode);
194f057f6cdSSteven Whitehouse 		BUG();
195f057f6cdSSteven Whitehouse 	}
196f057f6cdSSteven Whitehouse }
197f057f6cdSSteven Whitehouse 
198f057f6cdSSteven Whitehouse /* convert gfs lock-state to dlm lock-mode */
199f057f6cdSSteven Whitehouse 
make_mode(struct gfs2_sbd * sdp,const unsigned int lmstate)200e54c78a2SBob Peterson static int make_mode(struct gfs2_sbd *sdp, const unsigned int lmstate)
201f057f6cdSSteven Whitehouse {
202f057f6cdSSteven Whitehouse 	switch (lmstate) {
203f057f6cdSSteven Whitehouse 	case LM_ST_UNLOCKED:
204f057f6cdSSteven Whitehouse 		return DLM_LOCK_NL;
205f057f6cdSSteven Whitehouse 	case LM_ST_EXCLUSIVE:
206f057f6cdSSteven Whitehouse 		return DLM_LOCK_EX;
207f057f6cdSSteven Whitehouse 	case LM_ST_DEFERRED:
208f057f6cdSSteven Whitehouse 		return DLM_LOCK_CW;
209f057f6cdSSteven Whitehouse 	case LM_ST_SHARED:
210f057f6cdSSteven Whitehouse 		return DLM_LOCK_PR;
211f057f6cdSSteven Whitehouse 	}
212e54c78a2SBob Peterson 	fs_err(sdp, "unknown LM state %d\n", lmstate);
213f057f6cdSSteven Whitehouse 	BUG();
214f057f6cdSSteven Whitehouse 	return -1;
215f057f6cdSSteven Whitehouse }
216f057f6cdSSteven Whitehouse 
make_flags(struct gfs2_glock * gl,const unsigned int gfs_flags,const int req)2174c569a72SBob Peterson static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
218f057f6cdSSteven Whitehouse 		      const int req)
219f057f6cdSSteven Whitehouse {
220dba2d70cSDavid Teigland 	u32 lkf = 0;
221dba2d70cSDavid Teigland 
2224e2f8849SDavid Teigland 	if (gl->gl_lksb.sb_lvbptr)
223dba2d70cSDavid Teigland 		lkf |= DLM_LKF_VALBLK;
224f057f6cdSSteven Whitehouse 
225f057f6cdSSteven Whitehouse 	if (gfs_flags & LM_FLAG_TRY)
226f057f6cdSSteven Whitehouse 		lkf |= DLM_LKF_NOQUEUE;
227f057f6cdSSteven Whitehouse 
228f057f6cdSSteven Whitehouse 	if (gfs_flags & LM_FLAG_TRY_1CB) {
229f057f6cdSSteven Whitehouse 		lkf |= DLM_LKF_NOQUEUE;
230f057f6cdSSteven Whitehouse 		lkf |= DLM_LKF_NOQUEUEBAST;
231f057f6cdSSteven Whitehouse 	}
232f057f6cdSSteven Whitehouse 
233f057f6cdSSteven Whitehouse 	if (gfs_flags & LM_FLAG_ANY) {
234f057f6cdSSteven Whitehouse 		if (req == DLM_LOCK_PR)
235f057f6cdSSteven Whitehouse 			lkf |= DLM_LKF_ALTCW;
236f057f6cdSSteven Whitehouse 		else if (req == DLM_LOCK_CW)
237f057f6cdSSteven Whitehouse 			lkf |= DLM_LKF_ALTPR;
238f057f6cdSSteven Whitehouse 		else
239f057f6cdSSteven Whitehouse 			BUG();
240f057f6cdSSteven Whitehouse 	}
241f057f6cdSSteven Whitehouse 
242dba2d70cSDavid Teigland 	if (gl->gl_lksb.sb_lkid != 0) {
243f057f6cdSSteven Whitehouse 		lkf |= DLM_LKF_CONVERT;
2444c569a72SBob Peterson 		if (test_bit(GLF_BLOCKING, &gl->gl_flags))
2454c569a72SBob Peterson 			lkf |= DLM_LKF_QUECVT;
2464c569a72SBob Peterson 	}
247f057f6cdSSteven Whitehouse 
248f057f6cdSSteven Whitehouse 	return lkf;
249f057f6cdSSteven Whitehouse }
250f057f6cdSSteven Whitehouse 
gfs2_reverse_hex(char * c,u64 value)251a245769fSSteven Whitehouse static void gfs2_reverse_hex(char *c, u64 value)
252a245769fSSteven Whitehouse {
253ec148752SNathan Straz 	*c = '0';
254a245769fSSteven Whitehouse 	while (value) {
255a245769fSSteven Whitehouse 		*c-- = hex_asc[value & 0x0f];
256a245769fSSteven Whitehouse 		value >>= 4;
257a245769fSSteven Whitehouse 	}
258a245769fSSteven Whitehouse }
259a245769fSSteven Whitehouse 
gdlm_lock(struct gfs2_glock * gl,unsigned int req_state,unsigned int flags)260921169caSSteven Whitehouse static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
261921169caSSteven Whitehouse 		     unsigned int flags)
262f057f6cdSSteven Whitehouse {
26315562c43SBob Peterson 	struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
264f057f6cdSSteven Whitehouse 	int req;
265f057f6cdSSteven Whitehouse 	u32 lkf;
266a245769fSSteven Whitehouse 	char strname[GDLM_STRNAME_BYTES] = "";
267a892b123SAndreas Gruenbacher 	int error;
268f057f6cdSSteven Whitehouse 
269e54c78a2SBob Peterson 	req = make_mode(gl->gl_name.ln_sbd, req_state);
2704c569a72SBob Peterson 	lkf = make_flags(gl, flags, req);
271a245769fSSteven Whitehouse 	gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
272a245769fSSteven Whitehouse 	gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
273a245769fSSteven Whitehouse 	if (gl->gl_lksb.sb_lkid) {
274a245769fSSteven Whitehouse 		gfs2_update_request_times(gl);
275a245769fSSteven Whitehouse 	} else {
276a245769fSSteven Whitehouse 		memset(strname, ' ', GDLM_STRNAME_BYTES - 1);
277a245769fSSteven Whitehouse 		strname[GDLM_STRNAME_BYTES - 1] = '\0';
278a245769fSSteven Whitehouse 		gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type);
279a245769fSSteven Whitehouse 		gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number);
280a245769fSSteven Whitehouse 		gl->gl_dstamp = ktime_get_real();
281a245769fSSteven Whitehouse 	}
282f057f6cdSSteven Whitehouse 	/*
283f057f6cdSSteven Whitehouse 	 * Submit the actual lock request.
284f057f6cdSSteven Whitehouse 	 */
285f057f6cdSSteven Whitehouse 
286a892b123SAndreas Gruenbacher again:
287a892b123SAndreas Gruenbacher 	error = dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname,
288f057f6cdSSteven Whitehouse 			GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
289a892b123SAndreas Gruenbacher 	if (error == -EBUSY) {
290a892b123SAndreas Gruenbacher 		msleep(20);
291a892b123SAndreas Gruenbacher 		goto again;
292a892b123SAndreas Gruenbacher 	}
293a892b123SAndreas Gruenbacher 	return error;
294f057f6cdSSteven Whitehouse }
295f057f6cdSSteven Whitehouse 
gdlm_put_lock(struct gfs2_glock * gl)296bc015cb8SSteven Whitehouse static void gdlm_put_lock(struct gfs2_glock *gl)
297f057f6cdSSteven Whitehouse {
29815562c43SBob Peterson 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
299e402746aSSteven Whitehouse 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
300f057f6cdSSteven Whitehouse 	int error;
301f057f6cdSSteven Whitehouse 
3020636b34bSAndreas Gruenbacher 	BUG_ON(!__lockref_is_dead(&gl->gl_lockref));
3030636b34bSAndreas Gruenbacher 
3040636b34bSAndreas Gruenbacher 	if (gl->gl_lksb.sb_lkid == 0) {
3050636b34bSAndreas Gruenbacher 		gfs2_glock_free(gl);
3060636b34bSAndreas Gruenbacher 		return;
3070636b34bSAndreas Gruenbacher 	}
308f057f6cdSSteven Whitehouse 
309a245769fSSteven Whitehouse 	clear_bit(GLF_BLOCKING, &gl->gl_flags);
310a245769fSSteven Whitehouse 	gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
311a245769fSSteven Whitehouse 	gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
312a245769fSSteven Whitehouse 	gfs2_update_request_times(gl);
313fb6791d1SDavid Teigland 
314d1340f80SBob Peterson 	/* don't want to call dlm if we've unmounted the lock protocol */
3150636b34bSAndreas Gruenbacher 	if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
3160636b34bSAndreas Gruenbacher 		gfs2_glock_free(gl);
3170636b34bSAndreas Gruenbacher 		return;
3180636b34bSAndreas Gruenbacher 	}
31978178ca8SBob Peterson 	/* don't want to skip dlm_unlock writing the lvb when lock has one */
320d4e0bfecSDavid Teigland 
321fb6791d1SDavid Teigland 	if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
3220636b34bSAndreas Gruenbacher 	    !gl->gl_lksb.sb_lvbptr) {
3230636b34bSAndreas Gruenbacher 		gfs2_glock_free_later(gl);
3240636b34bSAndreas Gruenbacher 		return;
3250636b34bSAndreas Gruenbacher 	}
326fb6791d1SDavid Teigland 
327a892b123SAndreas Gruenbacher again:
328f057f6cdSSteven Whitehouse 	error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
329f057f6cdSSteven Whitehouse 			   NULL, gl);
330a892b123SAndreas Gruenbacher 	if (error == -EBUSY) {
331a892b123SAndreas Gruenbacher 		msleep(20);
332a892b123SAndreas Gruenbacher 		goto again;
333a892b123SAndreas Gruenbacher 	}
334a892b123SAndreas Gruenbacher 
335f057f6cdSSteven Whitehouse 	if (error) {
336e54c78a2SBob Peterson 		fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n",
337d77d1b58SJoe Perches 		       gl->gl_name.ln_type,
338f057f6cdSSteven Whitehouse 		       (unsigned long long)gl->gl_name.ln_number, error);
339f057f6cdSSteven Whitehouse 	}
340f057f6cdSSteven Whitehouse }
341f057f6cdSSteven Whitehouse 
gdlm_cancel(struct gfs2_glock * gl)342f057f6cdSSteven Whitehouse static void gdlm_cancel(struct gfs2_glock *gl)
343f057f6cdSSteven Whitehouse {
34415562c43SBob Peterson 	struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
345f057f6cdSSteven Whitehouse 	dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl);
346f057f6cdSSteven Whitehouse }
347f057f6cdSSteven Whitehouse 
348e0c2a9aaSDavid Teigland /*
349e0c2a9aaSDavid Teigland  * dlm/gfs2 recovery coordination using dlm_recover callbacks
350e0c2a9aaSDavid Teigland  *
351601ef0d5SBob Peterson  *  0. gfs2 checks for another cluster node withdraw, needing journal replay
352e0c2a9aaSDavid Teigland  *  1. dlm_controld sees lockspace members change
353e0c2a9aaSDavid Teigland  *  2. dlm_controld blocks dlm-kernel locking activity
354e0c2a9aaSDavid Teigland  *  3. dlm_controld within dlm-kernel notifies gfs2 (recover_prep)
355e0c2a9aaSDavid Teigland  *  4. dlm_controld starts and finishes its own user level recovery
356e0c2a9aaSDavid Teigland  *  5. dlm_controld starts dlm-kernel dlm_recoverd to do kernel recovery
357e0c2a9aaSDavid Teigland  *  6. dlm_recoverd notifies gfs2 of failed nodes (recover_slot)
358e0c2a9aaSDavid Teigland  *  7. dlm_recoverd does its own lock recovery
359e0c2a9aaSDavid Teigland  *  8. dlm_recoverd unblocks dlm-kernel locking activity
360e0c2a9aaSDavid Teigland  *  9. dlm_recoverd notifies gfs2 when done (recover_done with new generation)
361e0c2a9aaSDavid Teigland  * 10. gfs2_control updates control_lock lvb with new generation and jid bits
362e0c2a9aaSDavid Teigland  * 11. gfs2_control enqueues journals for gfs2_recover to recover (maybe none)
363e0c2a9aaSDavid Teigland  * 12. gfs2_recover dequeues and recovers journals of failed nodes
364e0c2a9aaSDavid Teigland  * 13. gfs2_recover provides recovery results to gfs2_control (recovery_result)
365e0c2a9aaSDavid Teigland  * 14. gfs2_control updates control_lock lvb jid bits for recovered journals
366e0c2a9aaSDavid Teigland  * 15. gfs2_control unblocks normal locking when all journals are recovered
367e0c2a9aaSDavid Teigland  *
368e0c2a9aaSDavid Teigland  * - failures during recovery
369e0c2a9aaSDavid Teigland  *
370e0c2a9aaSDavid Teigland  * recover_prep() may set BLOCK_LOCKS (step 3) again before gfs2_control
371e0c2a9aaSDavid Teigland  * clears BLOCK_LOCKS (step 15), e.g. another node fails while still
372e0c2a9aaSDavid Teigland  * recovering for a prior failure.  gfs2_control needs a way to detect
373e0c2a9aaSDavid Teigland  * this so it can leave BLOCK_LOCKS set in step 15.  This is managed using
374e0c2a9aaSDavid Teigland  * the recover_block and recover_start values.
375e0c2a9aaSDavid Teigland  *
376e0c2a9aaSDavid Teigland  * recover_done() provides a new lockspace generation number each time it
377e0c2a9aaSDavid Teigland  * is called (step 9).  This generation number is saved as recover_start.
378e0c2a9aaSDavid Teigland  * When recover_prep() is called, it sets BLOCK_LOCKS and sets
379e0c2a9aaSDavid Teigland  * recover_block = recover_start.  So, while recover_block is equal to
380e0c2a9aaSDavid Teigland  * recover_start, BLOCK_LOCKS should remain set.  (recover_spin must
381e0c2a9aaSDavid Teigland  * be held around the BLOCK_LOCKS/recover_block/recover_start logic.)
382e0c2a9aaSDavid Teigland  *
383e0c2a9aaSDavid Teigland  * - more specific gfs2 steps in sequence above
384e0c2a9aaSDavid Teigland  *
385e0c2a9aaSDavid Teigland  *  3. recover_prep sets BLOCK_LOCKS and sets recover_block = recover_start
386e0c2a9aaSDavid Teigland  *  6. recover_slot records any failed jids (maybe none)
387e0c2a9aaSDavid Teigland  *  9. recover_done sets recover_start = new generation number
388e0c2a9aaSDavid Teigland  * 10. gfs2_control sets control_lock lvb = new gen + bits for failed jids
389e0c2a9aaSDavid Teigland  * 12. gfs2_recover does journal recoveries for failed jids identified above
390e0c2a9aaSDavid Teigland  * 14. gfs2_control clears control_lock lvb bits for recovered jids
391e0c2a9aaSDavid Teigland  * 15. gfs2_control checks if recover_block == recover_start (step 3 occured
392e0c2a9aaSDavid Teigland  *     again) then do nothing, otherwise if recover_start > recover_block
393e0c2a9aaSDavid Teigland  *     then clear BLOCK_LOCKS.
394e0c2a9aaSDavid Teigland  *
395e0c2a9aaSDavid Teigland  * - parallel recovery steps across all nodes
396e0c2a9aaSDavid Teigland  *
397e0c2a9aaSDavid Teigland  * All nodes attempt to update the control_lock lvb with the new generation
398e0c2a9aaSDavid Teigland  * number and jid bits, but only the first to get the control_lock EX will
399e0c2a9aaSDavid Teigland  * do so; others will see that it's already done (lvb already contains new
400e0c2a9aaSDavid Teigland  * generation number.)
401e0c2a9aaSDavid Teigland  *
402e0c2a9aaSDavid Teigland  * . All nodes get the same recover_prep/recover_slot/recover_done callbacks
403e0c2a9aaSDavid Teigland  * . All nodes attempt to set control_lock lvb gen + bits for the new gen
404e0c2a9aaSDavid Teigland  * . One node gets control_lock first and writes the lvb, others see it's done
405e0c2a9aaSDavid Teigland  * . All nodes attempt to recover jids for which they see control_lock bits set
406e0c2a9aaSDavid Teigland  * . One node succeeds for a jid, and that one clears the jid bit in the lvb
407e0c2a9aaSDavid Teigland  * . All nodes will eventually see all lvb bits clear and unblock locks
408e0c2a9aaSDavid Teigland  *
409e0c2a9aaSDavid Teigland  * - is there a problem with clearing an lvb bit that should be set
410e0c2a9aaSDavid Teigland  *   and missing a journal recovery?
411e0c2a9aaSDavid Teigland  *
412e0c2a9aaSDavid Teigland  * 1. jid fails
413e0c2a9aaSDavid Teigland  * 2. lvb bit set for step 1
414e0c2a9aaSDavid Teigland  * 3. jid recovered for step 1
415e0c2a9aaSDavid Teigland  * 4. jid taken again (new mount)
416e0c2a9aaSDavid Teigland  * 5. jid fails (for step 4)
417e0c2a9aaSDavid Teigland  * 6. lvb bit set for step 5 (will already be set)
418e0c2a9aaSDavid Teigland  * 7. lvb bit cleared for step 3
419e0c2a9aaSDavid Teigland  *
420e0c2a9aaSDavid Teigland  * This is not a problem because the failure in step 5 does not
421e0c2a9aaSDavid Teigland  * require recovery, because the mount in step 4 could not have
422e0c2a9aaSDavid Teigland  * progressed far enough to unblock locks and access the fs.  The
423e0c2a9aaSDavid Teigland  * control_mount() function waits for all recoveries to be complete
424e0c2a9aaSDavid Teigland  * for the latest lockspace generation before ever unblocking locks
425e0c2a9aaSDavid Teigland  * and returning.  The mount in step 4 waits until the recovery in
426e0c2a9aaSDavid Teigland  * step 1 is done.
427e0c2a9aaSDavid Teigland  *
428e0c2a9aaSDavid Teigland  * - special case of first mounter: first node to mount the fs
429e0c2a9aaSDavid Teigland  *
430e0c2a9aaSDavid Teigland  * The first node to mount a gfs2 fs needs to check all the journals
431e0c2a9aaSDavid Teigland  * and recover any that need recovery before other nodes are allowed
432e0c2a9aaSDavid Teigland  * to mount the fs.  (Others may begin mounting, but they must wait
433e0c2a9aaSDavid Teigland  * for the first mounter to be done before taking locks on the fs
434e0c2a9aaSDavid Teigland  * or accessing the fs.)  This has two parts:
435e0c2a9aaSDavid Teigland  *
436e0c2a9aaSDavid Teigland  * 1. The mounted_lock tells a node it's the first to mount the fs.
437e0c2a9aaSDavid Teigland  * Each node holds the mounted_lock in PR while it's mounted.
438e0c2a9aaSDavid Teigland  * Each node tries to acquire the mounted_lock in EX when it mounts.
439e0c2a9aaSDavid Teigland  * If a node is granted the mounted_lock EX it means there are no
440e0c2a9aaSDavid Teigland  * other mounted nodes (no PR locks exist), and it is the first mounter.
441e0c2a9aaSDavid Teigland  * The mounted_lock is demoted to PR when first recovery is done, so
442e0c2a9aaSDavid Teigland  * others will fail to get an EX lock, but will get a PR lock.
443e0c2a9aaSDavid Teigland  *
444e0c2a9aaSDavid Teigland  * 2. The control_lock blocks others in control_mount() while the first
445e0c2a9aaSDavid Teigland  * mounter is doing first mount recovery of all journals.
446e0c2a9aaSDavid Teigland  * A mounting node needs to acquire control_lock in EX mode before
447e0c2a9aaSDavid Teigland  * it can proceed.  The first mounter holds control_lock in EX while doing
448e0c2a9aaSDavid Teigland  * the first mount recovery, blocking mounts from other nodes, then demotes
449e0c2a9aaSDavid Teigland  * control_lock to NL when it's done (others_may_mount/first_done),
450e0c2a9aaSDavid Teigland  * allowing other nodes to continue mounting.
451e0c2a9aaSDavid Teigland  *
452e0c2a9aaSDavid Teigland  * first mounter:
453e0c2a9aaSDavid Teigland  * control_lock EX/NOQUEUE success
454e0c2a9aaSDavid Teigland  * mounted_lock EX/NOQUEUE success (no other PR, so no other mounters)
455e0c2a9aaSDavid Teigland  * set first=1
456e0c2a9aaSDavid Teigland  * do first mounter recovery
457e0c2a9aaSDavid Teigland  * mounted_lock EX->PR
458e0c2a9aaSDavid Teigland  * control_lock EX->NL, write lvb generation
459e0c2a9aaSDavid Teigland  *
460e0c2a9aaSDavid Teigland  * other mounter:
461e0c2a9aaSDavid Teigland  * control_lock EX/NOQUEUE success (if fail -EAGAIN, retry)
462e0c2a9aaSDavid Teigland  * mounted_lock EX/NOQUEUE fail -EAGAIN (expected due to other mounters PR)
463e0c2a9aaSDavid Teigland  * mounted_lock PR/NOQUEUE success
464e0c2a9aaSDavid Teigland  * read lvb generation
465e0c2a9aaSDavid Teigland  * control_lock EX->NL
466e0c2a9aaSDavid Teigland  * set first=0
467e0c2a9aaSDavid Teigland  *
468e0c2a9aaSDavid Teigland  * - mount during recovery
469e0c2a9aaSDavid Teigland  *
470e0c2a9aaSDavid Teigland  * If a node mounts while others are doing recovery (not first mounter),
471e0c2a9aaSDavid Teigland  * the mounting node will get its initial recover_done() callback without
472e0c2a9aaSDavid Teigland  * having seen any previous failures/callbacks.
473e0c2a9aaSDavid Teigland  *
474e0c2a9aaSDavid Teigland  * It must wait for all recoveries preceding its mount to be finished
475e0c2a9aaSDavid Teigland  * before it unblocks locks.  It does this by repeating the "other mounter"
476e0c2a9aaSDavid Teigland  * steps above until the lvb generation number is >= its mount generation
477e0c2a9aaSDavid Teigland  * number (from initial recover_done) and all lvb bits are clear.
478e0c2a9aaSDavid Teigland  *
479e0c2a9aaSDavid Teigland  * - control_lock lvb format
480e0c2a9aaSDavid Teigland  *
481e0c2a9aaSDavid Teigland  * 4 bytes generation number: the latest dlm lockspace generation number
482e0c2a9aaSDavid Teigland  * from recover_done callback.  Indicates the jid bitmap has been updated
483e0c2a9aaSDavid Teigland  * to reflect all slot failures through that generation.
484e0c2a9aaSDavid Teigland  * 4 bytes unused.
485e0c2a9aaSDavid Teigland  * GDLM_LVB_SIZE-8 bytes of jid bit map. If bit N is set, it indicates
486e0c2a9aaSDavid Teigland  * that jid N needs recovery.
487e0c2a9aaSDavid Teigland  */
488e0c2a9aaSDavid Teigland 
489e0c2a9aaSDavid Teigland #define JID_BITMAP_OFFSET 8 /* 4 byte generation number + 4 byte unused */
490e0c2a9aaSDavid Teigland 
control_lvb_read(struct lm_lockstruct * ls,uint32_t * lvb_gen,char * lvb_bits)491e0c2a9aaSDavid Teigland static void control_lvb_read(struct lm_lockstruct *ls, uint32_t *lvb_gen,
492e0c2a9aaSDavid Teigland 			     char *lvb_bits)
493e0c2a9aaSDavid Teigland {
494951b4bd5SAl Viro 	__le32 gen;
495e0c2a9aaSDavid Teigland 	memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE);
496951b4bd5SAl Viro 	memcpy(&gen, lvb_bits, sizeof(__le32));
497e0c2a9aaSDavid Teigland 	*lvb_gen = le32_to_cpu(gen);
498e0c2a9aaSDavid Teigland }
499e0c2a9aaSDavid Teigland 
control_lvb_write(struct lm_lockstruct * ls,uint32_t lvb_gen,char * lvb_bits)500e0c2a9aaSDavid Teigland static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen,
501e0c2a9aaSDavid Teigland 			      char *lvb_bits)
502e0c2a9aaSDavid Teigland {
503951b4bd5SAl Viro 	__le32 gen;
504e0c2a9aaSDavid Teigland 	memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE);
505e0c2a9aaSDavid Teigland 	gen = cpu_to_le32(lvb_gen);
506951b4bd5SAl Viro 	memcpy(ls->ls_control_lvb, &gen, sizeof(__le32));
507e0c2a9aaSDavid Teigland }
508e0c2a9aaSDavid Teigland 
all_jid_bits_clear(char * lvb)509e0c2a9aaSDavid Teigland static int all_jid_bits_clear(char *lvb)
510e0c2a9aaSDavid Teigland {
5114146c3d4SAkinobu Mita 	return !memchr_inv(lvb + JID_BITMAP_OFFSET, 0,
5124146c3d4SAkinobu Mita 			GDLM_LVB_SIZE - JID_BITMAP_OFFSET);
513e0c2a9aaSDavid Teigland }
514e0c2a9aaSDavid Teigland 
sync_wait_cb(void * arg)515e0c2a9aaSDavid Teigland static void sync_wait_cb(void *arg)
516e0c2a9aaSDavid Teigland {
517e0c2a9aaSDavid Teigland 	struct lm_lockstruct *ls = arg;
518e0c2a9aaSDavid Teigland 	complete(&ls->ls_sync_wait);
519e0c2a9aaSDavid Teigland }
520e0c2a9aaSDavid Teigland 
sync_unlock(struct gfs2_sbd * sdp,struct dlm_lksb * lksb,char * name)521e0c2a9aaSDavid Teigland static int sync_unlock(struct gfs2_sbd *sdp, struct dlm_lksb *lksb, char *name)
522f057f6cdSSteven Whitehouse {
523f057f6cdSSteven Whitehouse 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
524f057f6cdSSteven Whitehouse 	int error;
525f057f6cdSSteven Whitehouse 
526e0c2a9aaSDavid Teigland 	error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls);
527e0c2a9aaSDavid Teigland 	if (error) {
528e0c2a9aaSDavid Teigland 		fs_err(sdp, "%s lkid %x error %d\n",
529e0c2a9aaSDavid Teigland 		       name, lksb->sb_lkid, error);
530e0c2a9aaSDavid Teigland 		return error;
531f057f6cdSSteven Whitehouse 	}
532f057f6cdSSteven Whitehouse 
533e0c2a9aaSDavid Teigland 	wait_for_completion(&ls->ls_sync_wait);
534e0c2a9aaSDavid Teigland 
535e0c2a9aaSDavid Teigland 	if (lksb->sb_status != -DLM_EUNLOCK) {
536e0c2a9aaSDavid Teigland 		fs_err(sdp, "%s lkid %x status %d\n",
537e0c2a9aaSDavid Teigland 		       name, lksb->sb_lkid, lksb->sb_status);
538e0c2a9aaSDavid Teigland 		return -1;
539e0c2a9aaSDavid Teigland 	}
540e0c2a9aaSDavid Teigland 	return 0;
541e0c2a9aaSDavid Teigland }
542e0c2a9aaSDavid Teigland 
sync_lock(struct gfs2_sbd * sdp,int mode,uint32_t flags,unsigned int num,struct dlm_lksb * lksb,char * name)543e0c2a9aaSDavid Teigland static int sync_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags,
544e0c2a9aaSDavid Teigland 		     unsigned int num, struct dlm_lksb *lksb, char *name)
545e0c2a9aaSDavid Teigland {
546e0c2a9aaSDavid Teigland 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
547e0c2a9aaSDavid Teigland 	char strname[GDLM_STRNAME_BYTES];
548e0c2a9aaSDavid Teigland 	int error, status;
549e0c2a9aaSDavid Teigland 
550e0c2a9aaSDavid Teigland 	memset(strname, 0, GDLM_STRNAME_BYTES);
551e0c2a9aaSDavid Teigland 	snprintf(strname, GDLM_STRNAME_BYTES, "%8x%16x", LM_TYPE_NONDISK, num);
552e0c2a9aaSDavid Teigland 
553e0c2a9aaSDavid Teigland 	error = dlm_lock(ls->ls_dlm, mode, lksb, flags,
554e0c2a9aaSDavid Teigland 			 strname, GDLM_STRNAME_BYTES - 1,
555e0c2a9aaSDavid Teigland 			 0, sync_wait_cb, ls, NULL);
556e0c2a9aaSDavid Teigland 	if (error) {
557e0c2a9aaSDavid Teigland 		fs_err(sdp, "%s lkid %x flags %x mode %d error %d\n",
558e0c2a9aaSDavid Teigland 		       name, lksb->sb_lkid, flags, mode, error);
559e0c2a9aaSDavid Teigland 		return error;
560e0c2a9aaSDavid Teigland 	}
561e0c2a9aaSDavid Teigland 
562e0c2a9aaSDavid Teigland 	wait_for_completion(&ls->ls_sync_wait);
563e0c2a9aaSDavid Teigland 
564e0c2a9aaSDavid Teigland 	status = lksb->sb_status;
565e0c2a9aaSDavid Teigland 
566e0c2a9aaSDavid Teigland 	if (status && status != -EAGAIN) {
567e0c2a9aaSDavid Teigland 		fs_err(sdp, "%s lkid %x flags %x mode %d status %d\n",
568e0c2a9aaSDavid Teigland 		       name, lksb->sb_lkid, flags, mode, status);
569e0c2a9aaSDavid Teigland 	}
570e0c2a9aaSDavid Teigland 
571e0c2a9aaSDavid Teigland 	return status;
572e0c2a9aaSDavid Teigland }
573e0c2a9aaSDavid Teigland 
mounted_unlock(struct gfs2_sbd * sdp)574e0c2a9aaSDavid Teigland static int mounted_unlock(struct gfs2_sbd *sdp)
575e0c2a9aaSDavid Teigland {
576e0c2a9aaSDavid Teigland 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
577e0c2a9aaSDavid Teigland 	return sync_unlock(sdp, &ls->ls_mounted_lksb, "mounted_lock");
578e0c2a9aaSDavid Teigland }
579e0c2a9aaSDavid Teigland 
mounted_lock(struct gfs2_sbd * sdp,int mode,uint32_t flags)580e0c2a9aaSDavid Teigland static int mounted_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags)
581e0c2a9aaSDavid Teigland {
582e0c2a9aaSDavid Teigland 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
583e0c2a9aaSDavid Teigland 	return sync_lock(sdp, mode, flags, GFS2_MOUNTED_LOCK,
584e0c2a9aaSDavid Teigland 			 &ls->ls_mounted_lksb, "mounted_lock");
585e0c2a9aaSDavid Teigland }
586e0c2a9aaSDavid Teigland 
control_unlock(struct gfs2_sbd * sdp)587e0c2a9aaSDavid Teigland static int control_unlock(struct gfs2_sbd *sdp)
588e0c2a9aaSDavid Teigland {
589e0c2a9aaSDavid Teigland 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
590e0c2a9aaSDavid Teigland 	return sync_unlock(sdp, &ls->ls_control_lksb, "control_lock");
591e0c2a9aaSDavid Teigland }
592e0c2a9aaSDavid Teigland 
control_lock(struct gfs2_sbd * sdp,int mode,uint32_t flags)593e0c2a9aaSDavid Teigland static int control_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags)
594e0c2a9aaSDavid Teigland {
595e0c2a9aaSDavid Teigland 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
596e0c2a9aaSDavid Teigland 	return sync_lock(sdp, mode, flags, GFS2_CONTROL_LOCK,
597e0c2a9aaSDavid Teigland 			 &ls->ls_control_lksb, "control_lock");
598e0c2a9aaSDavid Teigland }
599e0c2a9aaSDavid Teigland 
600601ef0d5SBob Peterson /**
601601ef0d5SBob Peterson  * remote_withdraw - react to a node withdrawing from the file system
602601ef0d5SBob Peterson  * @sdp: The superblock
603601ef0d5SBob Peterson  */
remote_withdraw(struct gfs2_sbd * sdp)604601ef0d5SBob Peterson static void remote_withdraw(struct gfs2_sbd *sdp)
605601ef0d5SBob Peterson {
606601ef0d5SBob Peterson 	struct gfs2_jdesc *jd;
607601ef0d5SBob Peterson 	int ret = 0, count = 0;
608601ef0d5SBob Peterson 
609601ef0d5SBob Peterson 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
610601ef0d5SBob Peterson 		if (jd->jd_jid == sdp->sd_lockstruct.ls_jid)
611601ef0d5SBob Peterson 			continue;
612601ef0d5SBob Peterson 		ret = gfs2_recover_journal(jd, true);
613601ef0d5SBob Peterson 		if (ret)
614601ef0d5SBob Peterson 			break;
615601ef0d5SBob Peterson 		count++;
616601ef0d5SBob Peterson 	}
617601ef0d5SBob Peterson 
618601ef0d5SBob Peterson 	/* Now drop the additional reference we acquired */
619601ef0d5SBob Peterson 	fs_err(sdp, "Journals checked: %d, ret = %d.\n", count, ret);
620601ef0d5SBob Peterson }
621601ef0d5SBob Peterson 
gfs2_control_func(struct work_struct * work)622e0c2a9aaSDavid Teigland static void gfs2_control_func(struct work_struct *work)
623e0c2a9aaSDavid Teigland {
624e0c2a9aaSDavid Teigland 	struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work);
625e0c2a9aaSDavid Teigland 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
626e0c2a9aaSDavid Teigland 	uint32_t block_gen, start_gen, lvb_gen, flags;
627e0c2a9aaSDavid Teigland 	int recover_set = 0;
628e0c2a9aaSDavid Teigland 	int write_lvb = 0;
629e0c2a9aaSDavid Teigland 	int recover_size;
630e0c2a9aaSDavid Teigland 	int i, error;
631e0c2a9aaSDavid Teigland 
632601ef0d5SBob Peterson 	/* First check for other nodes that may have done a withdraw. */
633601ef0d5SBob Peterson 	if (test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags)) {
634601ef0d5SBob Peterson 		remote_withdraw(sdp);
635601ef0d5SBob Peterson 		clear_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
636601ef0d5SBob Peterson 		return;
637601ef0d5SBob Peterson 	}
638601ef0d5SBob Peterson 
639e0c2a9aaSDavid Teigland 	spin_lock(&ls->ls_recover_spin);
640e0c2a9aaSDavid Teigland 	/*
641e0c2a9aaSDavid Teigland 	 * No MOUNT_DONE means we're still mounting; control_mount()
642e0c2a9aaSDavid Teigland 	 * will set this flag, after which this thread will take over
643e0c2a9aaSDavid Teigland 	 * all further clearing of BLOCK_LOCKS.
644e0c2a9aaSDavid Teigland 	 *
645e0c2a9aaSDavid Teigland 	 * FIRST_MOUNT means this node is doing first mounter recovery,
646e0c2a9aaSDavid Teigland 	 * for which recovery control is handled by
647e0c2a9aaSDavid Teigland 	 * control_mount()/control_first_done(), not this thread.
648e0c2a9aaSDavid Teigland 	 */
649e0c2a9aaSDavid Teigland 	if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) ||
650e0c2a9aaSDavid Teigland 	     test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
651e0c2a9aaSDavid Teigland 		spin_unlock(&ls->ls_recover_spin);
652e0c2a9aaSDavid Teigland 		return;
653e0c2a9aaSDavid Teigland 	}
654e0c2a9aaSDavid Teigland 	block_gen = ls->ls_recover_block;
655e0c2a9aaSDavid Teigland 	start_gen = ls->ls_recover_start;
656e0c2a9aaSDavid Teigland 	spin_unlock(&ls->ls_recover_spin);
657e0c2a9aaSDavid Teigland 
658e0c2a9aaSDavid Teigland 	/*
659e0c2a9aaSDavid Teigland 	 * Equal block_gen and start_gen implies we are between
660e0c2a9aaSDavid Teigland 	 * recover_prep and recover_done callbacks, which means
661e0c2a9aaSDavid Teigland 	 * dlm recovery is in progress and dlm locking is blocked.
662e0c2a9aaSDavid Teigland 	 * There's no point trying to do any work until recover_done.
663e0c2a9aaSDavid Teigland 	 */
664e0c2a9aaSDavid Teigland 
665e0c2a9aaSDavid Teigland 	if (block_gen == start_gen)
666e0c2a9aaSDavid Teigland 		return;
667e0c2a9aaSDavid Teigland 
668e0c2a9aaSDavid Teigland 	/*
669e0c2a9aaSDavid Teigland 	 * Propagate recover_submit[] and recover_result[] to lvb:
670e0c2a9aaSDavid Teigland 	 * dlm_recoverd adds to recover_submit[] jids needing recovery
671e0c2a9aaSDavid Teigland 	 * gfs2_recover adds to recover_result[] journal recovery results
672e0c2a9aaSDavid Teigland 	 *
673e0c2a9aaSDavid Teigland 	 * set lvb bit for jids in recover_submit[] if the lvb has not
674e0c2a9aaSDavid Teigland 	 * yet been updated for the generation of the failure
675e0c2a9aaSDavid Teigland 	 *
676e0c2a9aaSDavid Teigland 	 * clear lvb bit for jids in recover_result[] if the result of
677e0c2a9aaSDavid Teigland 	 * the journal recovery is SUCCESS
678e0c2a9aaSDavid Teigland 	 */
679e0c2a9aaSDavid Teigland 
680e0c2a9aaSDavid Teigland 	error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_VALBLK);
681e0c2a9aaSDavid Teigland 	if (error) {
682e0c2a9aaSDavid Teigland 		fs_err(sdp, "control lock EX error %d\n", error);
683e0c2a9aaSDavid Teigland 		return;
684e0c2a9aaSDavid Teigland 	}
685e0c2a9aaSDavid Teigland 
68657c7310bSDavid Teigland 	control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits);
687e0c2a9aaSDavid Teigland 
688e0c2a9aaSDavid Teigland 	spin_lock(&ls->ls_recover_spin);
689e0c2a9aaSDavid Teigland 	if (block_gen != ls->ls_recover_block ||
690e0c2a9aaSDavid Teigland 	    start_gen != ls->ls_recover_start) {
691e0c2a9aaSDavid Teigland 		fs_info(sdp, "recover generation %u block1 %u %u\n",
692e0c2a9aaSDavid Teigland 			start_gen, block_gen, ls->ls_recover_block);
693e0c2a9aaSDavid Teigland 		spin_unlock(&ls->ls_recover_spin);
694e0c2a9aaSDavid Teigland 		control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
695e0c2a9aaSDavid Teigland 		return;
696e0c2a9aaSDavid Teigland 	}
697e0c2a9aaSDavid Teigland 
698e0c2a9aaSDavid Teigland 	recover_size = ls->ls_recover_size;
699e0c2a9aaSDavid Teigland 
700e0c2a9aaSDavid Teigland 	if (lvb_gen <= start_gen) {
701e0c2a9aaSDavid Teigland 		/*
702e0c2a9aaSDavid Teigland 		 * Clear lvb bits for jids we've successfully recovered.
703e0c2a9aaSDavid Teigland 		 * Because all nodes attempt to recover failed journals,
704e0c2a9aaSDavid Teigland 		 * a journal can be recovered multiple times successfully
705e0c2a9aaSDavid Teigland 		 * in succession.  Only the first will really do recovery,
706e0c2a9aaSDavid Teigland 		 * the others find it clean, but still report a successful
707e0c2a9aaSDavid Teigland 		 * recovery.  So, another node may have already recovered
708e0c2a9aaSDavid Teigland 		 * the jid and cleared the lvb bit for it.
709e0c2a9aaSDavid Teigland 		 */
710e0c2a9aaSDavid Teigland 		for (i = 0; i < recover_size; i++) {
711e0c2a9aaSDavid Teigland 			if (ls->ls_recover_result[i] != LM_RD_SUCCESS)
712e0c2a9aaSDavid Teigland 				continue;
713e0c2a9aaSDavid Teigland 
714e0c2a9aaSDavid Teigland 			ls->ls_recover_result[i] = 0;
715e0c2a9aaSDavid Teigland 
71657c7310bSDavid Teigland 			if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET))
717e0c2a9aaSDavid Teigland 				continue;
718e0c2a9aaSDavid Teigland 
71957c7310bSDavid Teigland 			__clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET);
720e0c2a9aaSDavid Teigland 			write_lvb = 1;
721e0c2a9aaSDavid Teigland 		}
722e0c2a9aaSDavid Teigland 	}
723e0c2a9aaSDavid Teigland 
724e0c2a9aaSDavid Teigland 	if (lvb_gen == start_gen) {
725e0c2a9aaSDavid Teigland 		/*
726e0c2a9aaSDavid Teigland 		 * Failed slots before start_gen are already set in lvb.
727e0c2a9aaSDavid Teigland 		 */
728e0c2a9aaSDavid Teigland 		for (i = 0; i < recover_size; i++) {
729e0c2a9aaSDavid Teigland 			if (!ls->ls_recover_submit[i])
730e0c2a9aaSDavid Teigland 				continue;
731e0c2a9aaSDavid Teigland 			if (ls->ls_recover_submit[i] < lvb_gen)
732e0c2a9aaSDavid Teigland 				ls->ls_recover_submit[i] = 0;
733e0c2a9aaSDavid Teigland 		}
734e0c2a9aaSDavid Teigland 	} else if (lvb_gen < start_gen) {
735e0c2a9aaSDavid Teigland 		/*
736e0c2a9aaSDavid Teigland 		 * Failed slots before start_gen are not yet set in lvb.
737e0c2a9aaSDavid Teigland 		 */
738e0c2a9aaSDavid Teigland 		for (i = 0; i < recover_size; i++) {
739e0c2a9aaSDavid Teigland 			if (!ls->ls_recover_submit[i])
740e0c2a9aaSDavid Teigland 				continue;
741e0c2a9aaSDavid Teigland 			if (ls->ls_recover_submit[i] < start_gen) {
742e0c2a9aaSDavid Teigland 				ls->ls_recover_submit[i] = 0;
74357c7310bSDavid Teigland 				__set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET);
744e0c2a9aaSDavid Teigland 			}
745e0c2a9aaSDavid Teigland 		}
746e0c2a9aaSDavid Teigland 		/* even if there are no bits to set, we need to write the
747e0c2a9aaSDavid Teigland 		   latest generation to the lvb */
748e0c2a9aaSDavid Teigland 		write_lvb = 1;
749e0c2a9aaSDavid Teigland 	} else {
750e0c2a9aaSDavid Teigland 		/*
751e0c2a9aaSDavid Teigland 		 * we should be getting a recover_done() for lvb_gen soon
752e0c2a9aaSDavid Teigland 		 */
753e0c2a9aaSDavid Teigland 	}
754e0c2a9aaSDavid Teigland 	spin_unlock(&ls->ls_recover_spin);
755e0c2a9aaSDavid Teigland 
756e0c2a9aaSDavid Teigland 	if (write_lvb) {
75757c7310bSDavid Teigland 		control_lvb_write(ls, start_gen, ls->ls_lvb_bits);
758e0c2a9aaSDavid Teigland 		flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK;
759e0c2a9aaSDavid Teigland 	} else {
760e0c2a9aaSDavid Teigland 		flags = DLM_LKF_CONVERT;
761e0c2a9aaSDavid Teigland 	}
762e0c2a9aaSDavid Teigland 
763e0c2a9aaSDavid Teigland 	error = control_lock(sdp, DLM_LOCK_NL, flags);
764e0c2a9aaSDavid Teigland 	if (error) {
765e0c2a9aaSDavid Teigland 		fs_err(sdp, "control lock NL error %d\n", error);
766e0c2a9aaSDavid Teigland 		return;
767e0c2a9aaSDavid Teigland 	}
768e0c2a9aaSDavid Teigland 
769e0c2a9aaSDavid Teigland 	/*
770e0c2a9aaSDavid Teigland 	 * Everyone will see jid bits set in the lvb, run gfs2_recover_set(),
771e0c2a9aaSDavid Teigland 	 * and clear a jid bit in the lvb if the recovery is a success.
772e0c2a9aaSDavid Teigland 	 * Eventually all journals will be recovered, all jid bits will
773e0c2a9aaSDavid Teigland 	 * be cleared in the lvb, and everyone will clear BLOCK_LOCKS.
774e0c2a9aaSDavid Teigland 	 */
775e0c2a9aaSDavid Teigland 
776e0c2a9aaSDavid Teigland 	for (i = 0; i < recover_size; i++) {
77757c7310bSDavid Teigland 		if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) {
778e0c2a9aaSDavid Teigland 			fs_info(sdp, "recover generation %u jid %d\n",
779e0c2a9aaSDavid Teigland 				start_gen, i);
780e0c2a9aaSDavid Teigland 			gfs2_recover_set(sdp, i);
781e0c2a9aaSDavid Teigland 			recover_set++;
782e0c2a9aaSDavid Teigland 		}
783e0c2a9aaSDavid Teigland 	}
784e0c2a9aaSDavid Teigland 	if (recover_set)
785e0c2a9aaSDavid Teigland 		return;
786e0c2a9aaSDavid Teigland 
787e0c2a9aaSDavid Teigland 	/*
788e0c2a9aaSDavid Teigland 	 * No more jid bits set in lvb, all recovery is done, unblock locks
789e0c2a9aaSDavid Teigland 	 * (unless a new recover_prep callback has occured blocking locks
790e0c2a9aaSDavid Teigland 	 * again while working above)
791e0c2a9aaSDavid Teigland 	 */
792e0c2a9aaSDavid Teigland 
793e0c2a9aaSDavid Teigland 	spin_lock(&ls->ls_recover_spin);
794e0c2a9aaSDavid Teigland 	if (ls->ls_recover_block == block_gen &&
795e0c2a9aaSDavid Teigland 	    ls->ls_recover_start == start_gen) {
796e0c2a9aaSDavid Teigland 		clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
797e0c2a9aaSDavid Teigland 		spin_unlock(&ls->ls_recover_spin);
798e0c2a9aaSDavid Teigland 		fs_info(sdp, "recover generation %u done\n", start_gen);
799e0c2a9aaSDavid Teigland 		gfs2_glock_thaw(sdp);
800e0c2a9aaSDavid Teigland 	} else {
801e0c2a9aaSDavid Teigland 		fs_info(sdp, "recover generation %u block2 %u %u\n",
802e0c2a9aaSDavid Teigland 			start_gen, block_gen, ls->ls_recover_block);
803e0c2a9aaSDavid Teigland 		spin_unlock(&ls->ls_recover_spin);
804e0c2a9aaSDavid Teigland 	}
805e0c2a9aaSDavid Teigland }
806e0c2a9aaSDavid Teigland 
control_mount(struct gfs2_sbd * sdp)807e0c2a9aaSDavid Teigland static int control_mount(struct gfs2_sbd *sdp)
808e0c2a9aaSDavid Teigland {
809e0c2a9aaSDavid Teigland 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
810e0c2a9aaSDavid Teigland 	uint32_t start_gen, block_gen, mount_gen, lvb_gen;
811e0c2a9aaSDavid Teigland 	int mounted_mode;
812e0c2a9aaSDavid Teigland 	int retries = 0;
813e0c2a9aaSDavid Teigland 	int error;
814e0c2a9aaSDavid Teigland 
815e0c2a9aaSDavid Teigland 	memset(&ls->ls_mounted_lksb, 0, sizeof(struct dlm_lksb));
816e0c2a9aaSDavid Teigland 	memset(&ls->ls_control_lksb, 0, sizeof(struct dlm_lksb));
817e0c2a9aaSDavid Teigland 	memset(&ls->ls_control_lvb, 0, GDLM_LVB_SIZE);
818e0c2a9aaSDavid Teigland 	ls->ls_control_lksb.sb_lvbptr = ls->ls_control_lvb;
819e0c2a9aaSDavid Teigland 	init_completion(&ls->ls_sync_wait);
820e0c2a9aaSDavid Teigland 
821e0c2a9aaSDavid Teigland 	set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
822e0c2a9aaSDavid Teigland 
823e0c2a9aaSDavid Teigland 	error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_VALBLK);
824e0c2a9aaSDavid Teigland 	if (error) {
825e0c2a9aaSDavid Teigland 		fs_err(sdp, "control_mount control_lock NL error %d\n", error);
826e0c2a9aaSDavid Teigland 		return error;
827e0c2a9aaSDavid Teigland 	}
828e0c2a9aaSDavid Teigland 
829e0c2a9aaSDavid Teigland 	error = mounted_lock(sdp, DLM_LOCK_NL, 0);
830e0c2a9aaSDavid Teigland 	if (error) {
831e0c2a9aaSDavid Teigland 		fs_err(sdp, "control_mount mounted_lock NL error %d\n", error);
832e0c2a9aaSDavid Teigland 		control_unlock(sdp);
833e0c2a9aaSDavid Teigland 		return error;
834e0c2a9aaSDavid Teigland 	}
835e0c2a9aaSDavid Teigland 	mounted_mode = DLM_LOCK_NL;
836e0c2a9aaSDavid Teigland 
837e0c2a9aaSDavid Teigland restart:
838e0c2a9aaSDavid Teigland 	if (retries++ && signal_pending(current)) {
839e0c2a9aaSDavid Teigland 		error = -EINTR;
840e0c2a9aaSDavid Teigland 		goto fail;
841e0c2a9aaSDavid Teigland 	}
842e0c2a9aaSDavid Teigland 
843e0c2a9aaSDavid Teigland 	/*
844e0c2a9aaSDavid Teigland 	 * We always start with both locks in NL. control_lock is
845e0c2a9aaSDavid Teigland 	 * demoted to NL below so we don't need to do it here.
846e0c2a9aaSDavid Teigland 	 */
847e0c2a9aaSDavid Teigland 
848e0c2a9aaSDavid Teigland 	if (mounted_mode != DLM_LOCK_NL) {
849e0c2a9aaSDavid Teigland 		error = mounted_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
850f057f6cdSSteven Whitehouse 		if (error)
851e0c2a9aaSDavid Teigland 			goto fail;
852e0c2a9aaSDavid Teigland 		mounted_mode = DLM_LOCK_NL;
853e0c2a9aaSDavid Teigland 	}
854e0c2a9aaSDavid Teigland 
855e0c2a9aaSDavid Teigland 	/*
856e0c2a9aaSDavid Teigland 	 * Other nodes need to do some work in dlm recovery and gfs2_control
857e0c2a9aaSDavid Teigland 	 * before the recover_done and control_lock will be ready for us below.
858e0c2a9aaSDavid Teigland 	 * A delay here is not required but often avoids having to retry.
859e0c2a9aaSDavid Teigland 	 */
860e0c2a9aaSDavid Teigland 
861e0c2a9aaSDavid Teigland 	msleep_interruptible(500);
862e0c2a9aaSDavid Teigland 
863e0c2a9aaSDavid Teigland 	/*
864e0c2a9aaSDavid Teigland 	 * Acquire control_lock in EX and mounted_lock in either EX or PR.
865e0c2a9aaSDavid Teigland 	 * control_lock lvb keeps track of any pending journal recoveries.
866e0c2a9aaSDavid Teigland 	 * mounted_lock indicates if any other nodes have the fs mounted.
867e0c2a9aaSDavid Teigland 	 */
868e0c2a9aaSDavid Teigland 
869e0c2a9aaSDavid Teigland 	error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE|DLM_LKF_VALBLK);
870e0c2a9aaSDavid Teigland 	if (error == -EAGAIN) {
871e0c2a9aaSDavid Teigland 		goto restart;
872e0c2a9aaSDavid Teigland 	} else if (error) {
873e0c2a9aaSDavid Teigland 		fs_err(sdp, "control_mount control_lock EX error %d\n", error);
874e0c2a9aaSDavid Teigland 		goto fail;
875e0c2a9aaSDavid Teigland 	}
876e0c2a9aaSDavid Teigland 
8774a772772SBob Peterson 	/**
8784a772772SBob Peterson 	 * If we're a spectator, we don't want to take the lock in EX because
8794a772772SBob Peterson 	 * we cannot do the first-mount responsibility it implies: recovery.
8804a772772SBob Peterson 	 */
8814a772772SBob Peterson 	if (sdp->sd_args.ar_spectator)
8824a772772SBob Peterson 		goto locks_done;
8834a772772SBob Peterson 
884e0c2a9aaSDavid Teigland 	error = mounted_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE);
885e0c2a9aaSDavid Teigland 	if (!error) {
886e0c2a9aaSDavid Teigland 		mounted_mode = DLM_LOCK_EX;
887e0c2a9aaSDavid Teigland 		goto locks_done;
888e0c2a9aaSDavid Teigland 	} else if (error != -EAGAIN) {
889e0c2a9aaSDavid Teigland 		fs_err(sdp, "control_mount mounted_lock EX error %d\n", error);
890e0c2a9aaSDavid Teigland 		goto fail;
891e0c2a9aaSDavid Teigland 	}
892e0c2a9aaSDavid Teigland 
893e0c2a9aaSDavid Teigland 	error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE);
894e0c2a9aaSDavid Teigland 	if (!error) {
895e0c2a9aaSDavid Teigland 		mounted_mode = DLM_LOCK_PR;
896e0c2a9aaSDavid Teigland 		goto locks_done;
897e0c2a9aaSDavid Teigland 	} else {
898e0c2a9aaSDavid Teigland 		/* not even -EAGAIN should happen here */
899e0c2a9aaSDavid Teigland 		fs_err(sdp, "control_mount mounted_lock PR error %d\n", error);
900e0c2a9aaSDavid Teigland 		goto fail;
901e0c2a9aaSDavid Teigland 	}
902e0c2a9aaSDavid Teigland 
903e0c2a9aaSDavid Teigland locks_done:
904e0c2a9aaSDavid Teigland 	/*
905e0c2a9aaSDavid Teigland 	 * If we got both locks above in EX, then we're the first mounter.
906e0c2a9aaSDavid Teigland 	 * If not, then we need to wait for the control_lock lvb to be
907e0c2a9aaSDavid Teigland 	 * updated by other mounted nodes to reflect our mount generation.
908e0c2a9aaSDavid Teigland 	 *
909e0c2a9aaSDavid Teigland 	 * In simple first mounter cases, first mounter will see zero lvb_gen,
910e0c2a9aaSDavid Teigland 	 * but in cases where all existing nodes leave/fail before mounting
911e0c2a9aaSDavid Teigland 	 * nodes finish control_mount, then all nodes will be mounting and
912e0c2a9aaSDavid Teigland 	 * lvb_gen will be non-zero.
913e0c2a9aaSDavid Teigland 	 */
914e0c2a9aaSDavid Teigland 
91557c7310bSDavid Teigland 	control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits);
916e0c2a9aaSDavid Teigland 
917e0c2a9aaSDavid Teigland 	if (lvb_gen == 0xFFFFFFFF) {
918e0c2a9aaSDavid Teigland 		/* special value to force mount attempts to fail */
919e0c2a9aaSDavid Teigland 		fs_err(sdp, "control_mount control_lock disabled\n");
920e0c2a9aaSDavid Teigland 		error = -EINVAL;
921e0c2a9aaSDavid Teigland 		goto fail;
922e0c2a9aaSDavid Teigland 	}
923e0c2a9aaSDavid Teigland 
924e0c2a9aaSDavid Teigland 	if (mounted_mode == DLM_LOCK_EX) {
925e0c2a9aaSDavid Teigland 		/* first mounter, keep both EX while doing first recovery */
926e0c2a9aaSDavid Teigland 		spin_lock(&ls->ls_recover_spin);
927e0c2a9aaSDavid Teigland 		clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
928e0c2a9aaSDavid Teigland 		set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags);
929e0c2a9aaSDavid Teigland 		set_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
930e0c2a9aaSDavid Teigland 		spin_unlock(&ls->ls_recover_spin);
931e0c2a9aaSDavid Teigland 		fs_info(sdp, "first mounter control generation %u\n", lvb_gen);
932e0c2a9aaSDavid Teigland 		return 0;
933e0c2a9aaSDavid Teigland 	}
934e0c2a9aaSDavid Teigland 
935e0c2a9aaSDavid Teigland 	error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
936e0c2a9aaSDavid Teigland 	if (error)
937e0c2a9aaSDavid Teigland 		goto fail;
938e0c2a9aaSDavid Teigland 
939e0c2a9aaSDavid Teigland 	/*
940e0c2a9aaSDavid Teigland 	 * We are not first mounter, now we need to wait for the control_lock
941e0c2a9aaSDavid Teigland 	 * lvb generation to be >= the generation from our first recover_done
942e0c2a9aaSDavid Teigland 	 * and all lvb bits to be clear (no pending journal recoveries.)
943e0c2a9aaSDavid Teigland 	 */
944e0c2a9aaSDavid Teigland 
94557c7310bSDavid Teigland 	if (!all_jid_bits_clear(ls->ls_lvb_bits)) {
946e0c2a9aaSDavid Teigland 		/* journals need recovery, wait until all are clear */
947e0c2a9aaSDavid Teigland 		fs_info(sdp, "control_mount wait for journal recovery\n");
948e0c2a9aaSDavid Teigland 		goto restart;
949e0c2a9aaSDavid Teigland 	}
950e0c2a9aaSDavid Teigland 
951e0c2a9aaSDavid Teigland 	spin_lock(&ls->ls_recover_spin);
952e0c2a9aaSDavid Teigland 	block_gen = ls->ls_recover_block;
953e0c2a9aaSDavid Teigland 	start_gen = ls->ls_recover_start;
954e0c2a9aaSDavid Teigland 	mount_gen = ls->ls_recover_mount;
955e0c2a9aaSDavid Teigland 
956e0c2a9aaSDavid Teigland 	if (lvb_gen < mount_gen) {
957e0c2a9aaSDavid Teigland 		/* wait for mounted nodes to update control_lock lvb to our
958e0c2a9aaSDavid Teigland 		   generation, which might include new recovery bits set */
9594a772772SBob Peterson 		if (sdp->sd_args.ar_spectator) {
9604a772772SBob Peterson 			fs_info(sdp, "Recovery is required. Waiting for a "
9614a772772SBob Peterson 				"non-spectator to mount.\n");
9624a772772SBob Peterson 			msleep_interruptible(1000);
9634a772772SBob Peterson 		} else {
9644a772772SBob Peterson 			fs_info(sdp, "control_mount wait1 block %u start %u "
9654a772772SBob Peterson 				"mount %u lvb %u flags %lx\n", block_gen,
9664a772772SBob Peterson 				start_gen, mount_gen, lvb_gen,
9674a772772SBob Peterson 				ls->ls_recover_flags);
9684a772772SBob Peterson 		}
969e0c2a9aaSDavid Teigland 		spin_unlock(&ls->ls_recover_spin);
970e0c2a9aaSDavid Teigland 		goto restart;
971e0c2a9aaSDavid Teigland 	}
972e0c2a9aaSDavid Teigland 
973e0c2a9aaSDavid Teigland 	if (lvb_gen != start_gen) {
974e0c2a9aaSDavid Teigland 		/* wait for mounted nodes to update control_lock lvb to the
975e0c2a9aaSDavid Teigland 		   latest recovery generation */
976e0c2a9aaSDavid Teigland 		fs_info(sdp, "control_mount wait2 block %u start %u mount %u "
977e0c2a9aaSDavid Teigland 			"lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
978e0c2a9aaSDavid Teigland 			lvb_gen, ls->ls_recover_flags);
979e0c2a9aaSDavid Teigland 		spin_unlock(&ls->ls_recover_spin);
980e0c2a9aaSDavid Teigland 		goto restart;
981e0c2a9aaSDavid Teigland 	}
982e0c2a9aaSDavid Teigland 
983e0c2a9aaSDavid Teigland 	if (block_gen == start_gen) {
984e0c2a9aaSDavid Teigland 		/* dlm recovery in progress, wait for it to finish */
985e0c2a9aaSDavid Teigland 		fs_info(sdp, "control_mount wait3 block %u start %u mount %u "
986e0c2a9aaSDavid Teigland 			"lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
987e0c2a9aaSDavid Teigland 			lvb_gen, ls->ls_recover_flags);
988e0c2a9aaSDavid Teigland 		spin_unlock(&ls->ls_recover_spin);
989e0c2a9aaSDavid Teigland 		goto restart;
990e0c2a9aaSDavid Teigland 	}
991e0c2a9aaSDavid Teigland 
992e0c2a9aaSDavid Teigland 	clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
993e0c2a9aaSDavid Teigland 	set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags);
994e0c2a9aaSDavid Teigland 	memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t));
995e0c2a9aaSDavid Teigland 	memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t));
996e0c2a9aaSDavid Teigland 	spin_unlock(&ls->ls_recover_spin);
997e0c2a9aaSDavid Teigland 	return 0;
998e0c2a9aaSDavid Teigland 
999e0c2a9aaSDavid Teigland fail:
1000e0c2a9aaSDavid Teigland 	mounted_unlock(sdp);
1001e0c2a9aaSDavid Teigland 	control_unlock(sdp);
1002e0c2a9aaSDavid Teigland 	return error;
1003e0c2a9aaSDavid Teigland }
1004e0c2a9aaSDavid Teigland 
control_first_done(struct gfs2_sbd * sdp)1005e0c2a9aaSDavid Teigland static int control_first_done(struct gfs2_sbd *sdp)
1006e0c2a9aaSDavid Teigland {
1007e0c2a9aaSDavid Teigland 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1008e0c2a9aaSDavid Teigland 	uint32_t start_gen, block_gen;
1009e0c2a9aaSDavid Teigland 	int error;
1010e0c2a9aaSDavid Teigland 
1011e0c2a9aaSDavid Teigland restart:
1012e0c2a9aaSDavid Teigland 	spin_lock(&ls->ls_recover_spin);
1013e0c2a9aaSDavid Teigland 	start_gen = ls->ls_recover_start;
1014e0c2a9aaSDavid Teigland 	block_gen = ls->ls_recover_block;
1015e0c2a9aaSDavid Teigland 
1016e0c2a9aaSDavid Teigland 	if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags) ||
1017e0c2a9aaSDavid Teigland 	    !test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) ||
1018e0c2a9aaSDavid Teigland 	    !test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
1019e0c2a9aaSDavid Teigland 		/* sanity check, should not happen */
1020e0c2a9aaSDavid Teigland 		fs_err(sdp, "control_first_done start %u block %u flags %lx\n",
1021e0c2a9aaSDavid Teigland 		       start_gen, block_gen, ls->ls_recover_flags);
1022e0c2a9aaSDavid Teigland 		spin_unlock(&ls->ls_recover_spin);
1023e0c2a9aaSDavid Teigland 		control_unlock(sdp);
1024e0c2a9aaSDavid Teigland 		return -1;
1025e0c2a9aaSDavid Teigland 	}
1026e0c2a9aaSDavid Teigland 
1027e0c2a9aaSDavid Teigland 	if (start_gen == block_gen) {
1028e0c2a9aaSDavid Teigland 		/*
1029e0c2a9aaSDavid Teigland 		 * Wait for the end of a dlm recovery cycle to switch from
1030e0c2a9aaSDavid Teigland 		 * first mounter recovery.  We can ignore any recover_slot
1031e0c2a9aaSDavid Teigland 		 * callbacks between the recover_prep and next recover_done
1032e0c2a9aaSDavid Teigland 		 * because we are still the first mounter and any failed nodes
1033e0c2a9aaSDavid Teigland 		 * have not fully mounted, so they don't need recovery.
1034e0c2a9aaSDavid Teigland 		 */
1035e0c2a9aaSDavid Teigland 		spin_unlock(&ls->ls_recover_spin);
1036e0c2a9aaSDavid Teigland 		fs_info(sdp, "control_first_done wait gen %u\n", start_gen);
1037e0c2a9aaSDavid Teigland 
1038e0c2a9aaSDavid Teigland 		wait_on_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY,
103974316201SNeilBrown 			    TASK_UNINTERRUPTIBLE);
1040e0c2a9aaSDavid Teigland 		goto restart;
1041e0c2a9aaSDavid Teigland 	}
1042e0c2a9aaSDavid Teigland 
1043e0c2a9aaSDavid Teigland 	clear_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
1044e0c2a9aaSDavid Teigland 	set_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags);
1045e0c2a9aaSDavid Teigland 	memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t));
1046e0c2a9aaSDavid Teigland 	memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t));
1047e0c2a9aaSDavid Teigland 	spin_unlock(&ls->ls_recover_spin);
1048e0c2a9aaSDavid Teigland 
104957c7310bSDavid Teigland 	memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE);
105057c7310bSDavid Teigland 	control_lvb_write(ls, start_gen, ls->ls_lvb_bits);
1051e0c2a9aaSDavid Teigland 
1052e0c2a9aaSDavid Teigland 	error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT);
1053e0c2a9aaSDavid Teigland 	if (error)
1054e0c2a9aaSDavid Teigland 		fs_err(sdp, "control_first_done mounted PR error %d\n", error);
1055e0c2a9aaSDavid Teigland 
1056e0c2a9aaSDavid Teigland 	error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT|DLM_LKF_VALBLK);
1057e0c2a9aaSDavid Teigland 	if (error)
1058e0c2a9aaSDavid Teigland 		fs_err(sdp, "control_first_done control NL error %d\n", error);
1059f057f6cdSSteven Whitehouse 
1060f057f6cdSSteven Whitehouse 	return error;
1061f057f6cdSSteven Whitehouse }
1062f057f6cdSSteven Whitehouse 
1063e0c2a9aaSDavid Teigland /*
1064e0c2a9aaSDavid Teigland  * Expand static jid arrays if necessary (by increments of RECOVER_SIZE_INC)
1065fe39dc98SZhang Jiaming  * to accommodate the largest slot number.  (NB dlm slot numbers start at 1,
1066e0c2a9aaSDavid Teigland  * gfs2 jids start at 0, so jid = slot - 1)
1067e0c2a9aaSDavid Teigland  */
1068e0c2a9aaSDavid Teigland 
1069e0c2a9aaSDavid Teigland #define RECOVER_SIZE_INC 16
1070e0c2a9aaSDavid Teigland 
set_recover_size(struct gfs2_sbd * sdp,struct dlm_slot * slots,int num_slots)1071e0c2a9aaSDavid Teigland static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
1072e0c2a9aaSDavid Teigland 			    int num_slots)
1073e0c2a9aaSDavid Teigland {
1074e0c2a9aaSDavid Teigland 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1075e0c2a9aaSDavid Teigland 	uint32_t *submit = NULL;
1076e0c2a9aaSDavid Teigland 	uint32_t *result = NULL;
1077e0c2a9aaSDavid Teigland 	uint32_t old_size, new_size;
1078e0c2a9aaSDavid Teigland 	int i, max_jid;
1079e0c2a9aaSDavid Teigland 
108057c7310bSDavid Teigland 	if (!ls->ls_lvb_bits) {
108157c7310bSDavid Teigland 		ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
108257c7310bSDavid Teigland 		if (!ls->ls_lvb_bits)
108357c7310bSDavid Teigland 			return -ENOMEM;
108457c7310bSDavid Teigland 	}
108557c7310bSDavid Teigland 
1086e0c2a9aaSDavid Teigland 	max_jid = 0;
1087e0c2a9aaSDavid Teigland 	for (i = 0; i < num_slots; i++) {
1088e0c2a9aaSDavid Teigland 		if (max_jid < slots[i].slot - 1)
1089e0c2a9aaSDavid Teigland 			max_jid = slots[i].slot - 1;
1090e0c2a9aaSDavid Teigland 	}
1091e0c2a9aaSDavid Teigland 
1092e0c2a9aaSDavid Teigland 	old_size = ls->ls_recover_size;
10938f0daef5SAndreas Gruenbacher 	new_size = old_size;
10948f0daef5SAndreas Gruenbacher 	while (new_size < max_jid + 1)
10958f0daef5SAndreas Gruenbacher 		new_size += RECOVER_SIZE_INC;
10968f0daef5SAndreas Gruenbacher 	if (new_size == old_size)
1097e0c2a9aaSDavid Teigland 		return 0;
1098e0c2a9aaSDavid Teigland 
10996ec43b18SFabian Frederick 	submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
11006ec43b18SFabian Frederick 	result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
1101e0c2a9aaSDavid Teigland 	if (!submit || !result) {
1102e0c2a9aaSDavid Teigland 		kfree(submit);
1103e0c2a9aaSDavid Teigland 		kfree(result);
1104e0c2a9aaSDavid Teigland 		return -ENOMEM;
1105e0c2a9aaSDavid Teigland 	}
1106e0c2a9aaSDavid Teigland 
1107e0c2a9aaSDavid Teigland 	spin_lock(&ls->ls_recover_spin);
1108e0c2a9aaSDavid Teigland 	memcpy(submit, ls->ls_recover_submit, old_size * sizeof(uint32_t));
1109e0c2a9aaSDavid Teigland 	memcpy(result, ls->ls_recover_result, old_size * sizeof(uint32_t));
1110e0c2a9aaSDavid Teigland 	kfree(ls->ls_recover_submit);
1111e0c2a9aaSDavid Teigland 	kfree(ls->ls_recover_result);
1112e0c2a9aaSDavid Teigland 	ls->ls_recover_submit = submit;
1113e0c2a9aaSDavid Teigland 	ls->ls_recover_result = result;
1114e0c2a9aaSDavid Teigland 	ls->ls_recover_size = new_size;
1115e0c2a9aaSDavid Teigland 	spin_unlock(&ls->ls_recover_spin);
1116e0c2a9aaSDavid Teigland 	return 0;
1117e0c2a9aaSDavid Teigland }
1118e0c2a9aaSDavid Teigland 
free_recover_size(struct lm_lockstruct * ls)1119e0c2a9aaSDavid Teigland static void free_recover_size(struct lm_lockstruct *ls)
1120e0c2a9aaSDavid Teigland {
112157c7310bSDavid Teigland 	kfree(ls->ls_lvb_bits);
1122e0c2a9aaSDavid Teigland 	kfree(ls->ls_recover_submit);
1123e0c2a9aaSDavid Teigland 	kfree(ls->ls_recover_result);
1124e0c2a9aaSDavid Teigland 	ls->ls_recover_submit = NULL;
1125e0c2a9aaSDavid Teigland 	ls->ls_recover_result = NULL;
1126e0c2a9aaSDavid Teigland 	ls->ls_recover_size = 0;
1127cc1dfa8bSThomas Tai 	ls->ls_lvb_bits = NULL;
1128e0c2a9aaSDavid Teigland }
1129e0c2a9aaSDavid Teigland 
1130e0c2a9aaSDavid Teigland /* dlm calls before it does lock recovery */
1131e0c2a9aaSDavid Teigland 
gdlm_recover_prep(void * arg)1132e0c2a9aaSDavid Teigland static void gdlm_recover_prep(void *arg)
1133e0c2a9aaSDavid Teigland {
1134e0c2a9aaSDavid Teigland 	struct gfs2_sbd *sdp = arg;
1135e0c2a9aaSDavid Teigland 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1136e0c2a9aaSDavid Teigland 
1137d6b412c5SAndreas Gruenbacher 	if (gfs2_withdrawing_or_withdrawn(sdp)) {
113803678a99SBob Peterson 		fs_err(sdp, "recover_prep ignored due to withdraw.\n");
113903678a99SBob Peterson 		return;
114003678a99SBob Peterson 	}
1141e0c2a9aaSDavid Teigland 	spin_lock(&ls->ls_recover_spin);
1142e0c2a9aaSDavid Teigland 	ls->ls_recover_block = ls->ls_recover_start;
1143e0c2a9aaSDavid Teigland 	set_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags);
1144e0c2a9aaSDavid Teigland 
1145e0c2a9aaSDavid Teigland 	if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) ||
1146e0c2a9aaSDavid Teigland 	     test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
1147e0c2a9aaSDavid Teigland 		spin_unlock(&ls->ls_recover_spin);
1148e0c2a9aaSDavid Teigland 		return;
1149e0c2a9aaSDavid Teigland 	}
1150e0c2a9aaSDavid Teigland 	set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
1151e0c2a9aaSDavid Teigland 	spin_unlock(&ls->ls_recover_spin);
1152e0c2a9aaSDavid Teigland }
1153e0c2a9aaSDavid Teigland 
1154e0c2a9aaSDavid Teigland /* dlm calls after recover_prep has been completed on all lockspace members;
1155e0c2a9aaSDavid Teigland    identifies slot/jid of failed member */
1156e0c2a9aaSDavid Teigland 
gdlm_recover_slot(void * arg,struct dlm_slot * slot)1157e0c2a9aaSDavid Teigland static void gdlm_recover_slot(void *arg, struct dlm_slot *slot)
1158e0c2a9aaSDavid Teigland {
1159e0c2a9aaSDavid Teigland 	struct gfs2_sbd *sdp = arg;
1160e0c2a9aaSDavid Teigland 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1161e0c2a9aaSDavid Teigland 	int jid = slot->slot - 1;
1162e0c2a9aaSDavid Teigland 
1163d6b412c5SAndreas Gruenbacher 	if (gfs2_withdrawing_or_withdrawn(sdp)) {
116403678a99SBob Peterson 		fs_err(sdp, "recover_slot jid %d ignored due to withdraw.\n",
116503678a99SBob Peterson 		       jid);
116603678a99SBob Peterson 		return;
116703678a99SBob Peterson 	}
1168e0c2a9aaSDavid Teigland 	spin_lock(&ls->ls_recover_spin);
1169e0c2a9aaSDavid Teigland 	if (ls->ls_recover_size < jid + 1) {
1170af38816eSAndreas Gruenbacher 		fs_err(sdp, "recover_slot jid %d gen %u short size %d\n",
1171e0c2a9aaSDavid Teigland 		       jid, ls->ls_recover_block, ls->ls_recover_size);
1172e0c2a9aaSDavid Teigland 		spin_unlock(&ls->ls_recover_spin);
1173e0c2a9aaSDavid Teigland 		return;
1174e0c2a9aaSDavid Teigland 	}
1175e0c2a9aaSDavid Teigland 
1176e0c2a9aaSDavid Teigland 	if (ls->ls_recover_submit[jid]) {
1177ad781971SDavid Teigland 		fs_info(sdp, "recover_slot jid %d gen %u prev %u\n",
1178e0c2a9aaSDavid Teigland 			jid, ls->ls_recover_block, ls->ls_recover_submit[jid]);
1179e0c2a9aaSDavid Teigland 	}
1180e0c2a9aaSDavid Teigland 	ls->ls_recover_submit[jid] = ls->ls_recover_block;
1181e0c2a9aaSDavid Teigland 	spin_unlock(&ls->ls_recover_spin);
1182e0c2a9aaSDavid Teigland }
1183e0c2a9aaSDavid Teigland 
1184e0c2a9aaSDavid Teigland /* dlm calls after recover_slot and after it completes lock recovery */
1185e0c2a9aaSDavid Teigland 
gdlm_recover_done(void * arg,struct dlm_slot * slots,int num_slots,int our_slot,uint32_t generation)1186e0c2a9aaSDavid Teigland static void gdlm_recover_done(void *arg, struct dlm_slot *slots, int num_slots,
1187e0c2a9aaSDavid Teigland 			      int our_slot, uint32_t generation)
1188e0c2a9aaSDavid Teigland {
1189e0c2a9aaSDavid Teigland 	struct gfs2_sbd *sdp = arg;
1190e0c2a9aaSDavid Teigland 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1191e0c2a9aaSDavid Teigland 
1192d6b412c5SAndreas Gruenbacher 	if (gfs2_withdrawing_or_withdrawn(sdp)) {
119303678a99SBob Peterson 		fs_err(sdp, "recover_done ignored due to withdraw.\n");
119403678a99SBob Peterson 		return;
119503678a99SBob Peterson 	}
1196e0c2a9aaSDavid Teigland 	/* ensure the ls jid arrays are large enough */
1197e0c2a9aaSDavid Teigland 	set_recover_size(sdp, slots, num_slots);
1198e0c2a9aaSDavid Teigland 
1199e0c2a9aaSDavid Teigland 	spin_lock(&ls->ls_recover_spin);
1200e0c2a9aaSDavid Teigland 	ls->ls_recover_start = generation;
1201e0c2a9aaSDavid Teigland 
1202e0c2a9aaSDavid Teigland 	if (!ls->ls_recover_mount) {
1203e0c2a9aaSDavid Teigland 		ls->ls_recover_mount = generation;
1204e0c2a9aaSDavid Teigland 		ls->ls_jid = our_slot - 1;
1205e0c2a9aaSDavid Teigland 	}
1206e0c2a9aaSDavid Teigland 
1207e0c2a9aaSDavid Teigland 	if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags))
1208e0c2a9aaSDavid Teigland 		queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
1209e0c2a9aaSDavid Teigland 
1210e0c2a9aaSDavid Teigland 	clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags);
12114e857c58SPeter Zijlstra 	smp_mb__after_atomic();
1212e0c2a9aaSDavid Teigland 	wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY);
1213e0c2a9aaSDavid Teigland 	spin_unlock(&ls->ls_recover_spin);
1214e0c2a9aaSDavid Teigland }
1215e0c2a9aaSDavid Teigland 
1216e0c2a9aaSDavid Teigland /* gfs2_recover thread has a journal recovery result */
1217e0c2a9aaSDavid Teigland 
gdlm_recovery_result(struct gfs2_sbd * sdp,unsigned int jid,unsigned int result)1218e0c2a9aaSDavid Teigland static void gdlm_recovery_result(struct gfs2_sbd *sdp, unsigned int jid,
1219e0c2a9aaSDavid Teigland 				 unsigned int result)
1220e0c2a9aaSDavid Teigland {
1221e0c2a9aaSDavid Teigland 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1222e0c2a9aaSDavid Teigland 
1223d6b412c5SAndreas Gruenbacher 	if (gfs2_withdrawing_or_withdrawn(sdp)) {
122403678a99SBob Peterson 		fs_err(sdp, "recovery_result jid %d ignored due to withdraw.\n",
122503678a99SBob Peterson 		       jid);
122603678a99SBob Peterson 		return;
122703678a99SBob Peterson 	}
1228e0c2a9aaSDavid Teigland 	if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags))
1229e0c2a9aaSDavid Teigland 		return;
1230e0c2a9aaSDavid Teigland 
1231e0c2a9aaSDavid Teigland 	/* don't care about the recovery of own journal during mount */
1232e0c2a9aaSDavid Teigland 	if (jid == ls->ls_jid)
1233e0c2a9aaSDavid Teigland 		return;
1234e0c2a9aaSDavid Teigland 
1235e0c2a9aaSDavid Teigland 	spin_lock(&ls->ls_recover_spin);
1236e0c2a9aaSDavid Teigland 	if (test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
1237e0c2a9aaSDavid Teigland 		spin_unlock(&ls->ls_recover_spin);
1238e0c2a9aaSDavid Teigland 		return;
1239e0c2a9aaSDavid Teigland 	}
1240e0c2a9aaSDavid Teigland 	if (ls->ls_recover_size < jid + 1) {
1241af38816eSAndreas Gruenbacher 		fs_err(sdp, "recovery_result jid %d short size %d\n",
1242e0c2a9aaSDavid Teigland 		       jid, ls->ls_recover_size);
1243e0c2a9aaSDavid Teigland 		spin_unlock(&ls->ls_recover_spin);
1244e0c2a9aaSDavid Teigland 		return;
1245e0c2a9aaSDavid Teigland 	}
1246e0c2a9aaSDavid Teigland 
1247e0c2a9aaSDavid Teigland 	fs_info(sdp, "recover jid %d result %s\n", jid,
1248e0c2a9aaSDavid Teigland 		result == LM_RD_GAVEUP ? "busy" : "success");
1249e0c2a9aaSDavid Teigland 
1250e0c2a9aaSDavid Teigland 	ls->ls_recover_result[jid] = result;
1251e0c2a9aaSDavid Teigland 
1252e0c2a9aaSDavid Teigland 	/* GAVEUP means another node is recovering the journal; delay our
1253e0c2a9aaSDavid Teigland 	   next attempt to recover it, to give the other node a chance to
1254e0c2a9aaSDavid Teigland 	   finish before trying again */
1255e0c2a9aaSDavid Teigland 
1256e0c2a9aaSDavid Teigland 	if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags))
1257e0c2a9aaSDavid Teigland 		queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work,
1258e0c2a9aaSDavid Teigland 				   result == LM_RD_GAVEUP ? HZ : 0);
1259e0c2a9aaSDavid Teigland 	spin_unlock(&ls->ls_recover_spin);
1260e0c2a9aaSDavid Teigland }
1261e0c2a9aaSDavid Teigland 
126227c3b415SBob Peterson static const struct dlm_lockspace_ops gdlm_lockspace_ops = {
1263e0c2a9aaSDavid Teigland 	.recover_prep = gdlm_recover_prep,
1264e0c2a9aaSDavid Teigland 	.recover_slot = gdlm_recover_slot,
1265e0c2a9aaSDavid Teigland 	.recover_done = gdlm_recover_done,
1266e0c2a9aaSDavid Teigland };
1267e0c2a9aaSDavid Teigland 
gdlm_mount(struct gfs2_sbd * sdp,const char * table)1268e0c2a9aaSDavid Teigland static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
1269e0c2a9aaSDavid Teigland {
1270e0c2a9aaSDavid Teigland 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1271e0c2a9aaSDavid Teigland 	char cluster[GFS2_LOCKNAME_LEN];
1272e0c2a9aaSDavid Teigland 	const char *fsname;
1273e0c2a9aaSDavid Teigland 	uint32_t flags;
1274e0c2a9aaSDavid Teigland 	int error, ops_result;
1275e0c2a9aaSDavid Teigland 
1276e0c2a9aaSDavid Teigland 	/*
1277e0c2a9aaSDavid Teigland 	 * initialize everything
1278e0c2a9aaSDavid Teigland 	 */
1279e0c2a9aaSDavid Teigland 
1280e0c2a9aaSDavid Teigland 	INIT_DELAYED_WORK(&sdp->sd_control_work, gfs2_control_func);
1281e0c2a9aaSDavid Teigland 	spin_lock_init(&ls->ls_recover_spin);
1282e0c2a9aaSDavid Teigland 	ls->ls_recover_flags = 0;
1283e0c2a9aaSDavid Teigland 	ls->ls_recover_mount = 0;
1284e0c2a9aaSDavid Teigland 	ls->ls_recover_start = 0;
1285e0c2a9aaSDavid Teigland 	ls->ls_recover_block = 0;
1286e0c2a9aaSDavid Teigland 	ls->ls_recover_size = 0;
1287e0c2a9aaSDavid Teigland 	ls->ls_recover_submit = NULL;
1288e0c2a9aaSDavid Teigland 	ls->ls_recover_result = NULL;
128957c7310bSDavid Teigland 	ls->ls_lvb_bits = NULL;
1290e0c2a9aaSDavid Teigland 
1291e0c2a9aaSDavid Teigland 	error = set_recover_size(sdp, NULL, 0);
1292e0c2a9aaSDavid Teigland 	if (error)
1293e0c2a9aaSDavid Teigland 		goto fail;
1294e0c2a9aaSDavid Teigland 
1295e0c2a9aaSDavid Teigland 	/*
1296e0c2a9aaSDavid Teigland 	 * prepare dlm_new_lockspace args
1297e0c2a9aaSDavid Teigland 	 */
1298e0c2a9aaSDavid Teigland 
1299e0c2a9aaSDavid Teigland 	fsname = strchr(table, ':');
1300e0c2a9aaSDavid Teigland 	if (!fsname) {
1301e0c2a9aaSDavid Teigland 		fs_info(sdp, "no fsname found\n");
1302e0c2a9aaSDavid Teigland 		error = -EINVAL;
1303e0c2a9aaSDavid Teigland 		goto fail_free;
1304e0c2a9aaSDavid Teigland 	}
1305e0c2a9aaSDavid Teigland 	memset(cluster, 0, sizeof(cluster));
1306e0c2a9aaSDavid Teigland 	memcpy(cluster, table, strlen(table) - strlen(fsname));
1307e0c2a9aaSDavid Teigland 	fsname++;
1308e0c2a9aaSDavid Teigland 
130912cda13cSAlexander Aring 	flags = DLM_LSFL_NEWEXCL;
1310e0c2a9aaSDavid Teigland 
1311e0c2a9aaSDavid Teigland 	/*
1312e0c2a9aaSDavid Teigland 	 * create/join lockspace
1313e0c2a9aaSDavid Teigland 	 */
1314e0c2a9aaSDavid Teigland 
1315e0c2a9aaSDavid Teigland 	error = dlm_new_lockspace(fsname, cluster, flags, GDLM_LVB_SIZE,
1316e0c2a9aaSDavid Teigland 				  &gdlm_lockspace_ops, sdp, &ops_result,
1317e0c2a9aaSDavid Teigland 				  &ls->ls_dlm);
1318e0c2a9aaSDavid Teigland 	if (error) {
1319e0c2a9aaSDavid Teigland 		fs_err(sdp, "dlm_new_lockspace error %d\n", error);
1320e0c2a9aaSDavid Teigland 		goto fail_free;
1321e0c2a9aaSDavid Teigland 	}
1322e0c2a9aaSDavid Teigland 
1323e0c2a9aaSDavid Teigland 	if (ops_result < 0) {
1324e0c2a9aaSDavid Teigland 		/*
1325e0c2a9aaSDavid Teigland 		 * dlm does not support ops callbacks,
1326e0c2a9aaSDavid Teigland 		 * old dlm_controld/gfs_controld are used, try without ops.
1327e0c2a9aaSDavid Teigland 		 */
1328e0c2a9aaSDavid Teigland 		fs_info(sdp, "dlm lockspace ops not used\n");
1329e0c2a9aaSDavid Teigland 		free_recover_size(ls);
1330e0c2a9aaSDavid Teigland 		set_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags);
1331e0c2a9aaSDavid Teigland 		return 0;
1332e0c2a9aaSDavid Teigland 	}
1333e0c2a9aaSDavid Teigland 
1334e0c2a9aaSDavid Teigland 	if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) {
1335e0c2a9aaSDavid Teigland 		fs_err(sdp, "dlm lockspace ops disallow jid preset\n");
1336e0c2a9aaSDavid Teigland 		error = -EINVAL;
1337e0c2a9aaSDavid Teigland 		goto fail_release;
1338e0c2a9aaSDavid Teigland 	}
1339e0c2a9aaSDavid Teigland 
1340e0c2a9aaSDavid Teigland 	/*
1341e0c2a9aaSDavid Teigland 	 * control_mount() uses control_lock to determine first mounter,
1342e0c2a9aaSDavid Teigland 	 * and for later mounts, waits for any recoveries to be cleared.
1343e0c2a9aaSDavid Teigland 	 */
1344e0c2a9aaSDavid Teigland 
1345e0c2a9aaSDavid Teigland 	error = control_mount(sdp);
1346e0c2a9aaSDavid Teigland 	if (error) {
1347e0c2a9aaSDavid Teigland 		fs_err(sdp, "mount control error %d\n", error);
1348e0c2a9aaSDavid Teigland 		goto fail_release;
1349e0c2a9aaSDavid Teigland 	}
1350e0c2a9aaSDavid Teigland 
1351e0c2a9aaSDavid Teigland 	ls->ls_first = !!test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
1352e0c2a9aaSDavid Teigland 	clear_bit(SDF_NOJOURNALID, &sdp->sd_flags);
13534e857c58SPeter Zijlstra 	smp_mb__after_atomic();
1354e0c2a9aaSDavid Teigland 	wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
1355e0c2a9aaSDavid Teigland 	return 0;
1356e0c2a9aaSDavid Teigland 
1357e0c2a9aaSDavid Teigland fail_release:
1358e0c2a9aaSDavid Teigland 	dlm_release_lockspace(ls->ls_dlm, 2);
1359e0c2a9aaSDavid Teigland fail_free:
1360e0c2a9aaSDavid Teigland 	free_recover_size(ls);
1361e0c2a9aaSDavid Teigland fail:
1362e0c2a9aaSDavid Teigland 	return error;
1363e0c2a9aaSDavid Teigland }
1364e0c2a9aaSDavid Teigland 
gdlm_first_done(struct gfs2_sbd * sdp)1365e0c2a9aaSDavid Teigland static void gdlm_first_done(struct gfs2_sbd *sdp)
1366e0c2a9aaSDavid Teigland {
1367e0c2a9aaSDavid Teigland 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1368e0c2a9aaSDavid Teigland 	int error;
1369e0c2a9aaSDavid Teigland 
1370e0c2a9aaSDavid Teigland 	if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags))
1371e0c2a9aaSDavid Teigland 		return;
1372e0c2a9aaSDavid Teigland 
1373e0c2a9aaSDavid Teigland 	error = control_first_done(sdp);
1374e0c2a9aaSDavid Teigland 	if (error)
1375e0c2a9aaSDavid Teigland 		fs_err(sdp, "mount first_done error %d\n", error);
1376e0c2a9aaSDavid Teigland }
1377e0c2a9aaSDavid Teigland 
gdlm_unmount(struct gfs2_sbd * sdp)1378f057f6cdSSteven Whitehouse static void gdlm_unmount(struct gfs2_sbd *sdp)
1379f057f6cdSSteven Whitehouse {
1380f057f6cdSSteven Whitehouse 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1381f057f6cdSSteven Whitehouse 
1382e0c2a9aaSDavid Teigland 	if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags))
1383e0c2a9aaSDavid Teigland 		goto release;
1384e0c2a9aaSDavid Teigland 
1385e0c2a9aaSDavid Teigland 	/* wait for gfs2_control_wq to be done with this mount */
1386e0c2a9aaSDavid Teigland 
1387e0c2a9aaSDavid Teigland 	spin_lock(&ls->ls_recover_spin);
1388e0c2a9aaSDavid Teigland 	set_bit(DFL_UNMOUNT, &ls->ls_recover_flags);
1389e0c2a9aaSDavid Teigland 	spin_unlock(&ls->ls_recover_spin);
139043829731STejun Heo 	flush_delayed_work(&sdp->sd_control_work);
1391e0c2a9aaSDavid Teigland 
1392e0c2a9aaSDavid Teigland 	/* mounted_lock and control_lock will be purged in dlm recovery */
1393e0c2a9aaSDavid Teigland release:
1394f057f6cdSSteven Whitehouse 	if (ls->ls_dlm) {
1395f057f6cdSSteven Whitehouse 		dlm_release_lockspace(ls->ls_dlm, 2);
1396f057f6cdSSteven Whitehouse 		ls->ls_dlm = NULL;
1397f057f6cdSSteven Whitehouse 	}
1398e0c2a9aaSDavid Teigland 
1399e0c2a9aaSDavid Teigland 	free_recover_size(ls);
1400f057f6cdSSteven Whitehouse }
1401f057f6cdSSteven Whitehouse 
1402f057f6cdSSteven Whitehouse static const match_table_t dlm_tokens = {
1403f057f6cdSSteven Whitehouse 	{ Opt_jid, "jid=%d"},
1404f057f6cdSSteven Whitehouse 	{ Opt_id, "id=%d"},
1405f057f6cdSSteven Whitehouse 	{ Opt_first, "first=%d"},
1406f057f6cdSSteven Whitehouse 	{ Opt_nodir, "nodir=%d"},
1407f057f6cdSSteven Whitehouse 	{ Opt_err, NULL },
1408f057f6cdSSteven Whitehouse };
1409f057f6cdSSteven Whitehouse 
1410f057f6cdSSteven Whitehouse const struct lm_lockops gfs2_dlm_ops = {
1411f057f6cdSSteven Whitehouse 	.lm_proto_name = "lock_dlm",
1412f057f6cdSSteven Whitehouse 	.lm_mount = gdlm_mount,
1413e0c2a9aaSDavid Teigland 	.lm_first_done = gdlm_first_done,
1414e0c2a9aaSDavid Teigland 	.lm_recovery_result = gdlm_recovery_result,
1415f057f6cdSSteven Whitehouse 	.lm_unmount = gdlm_unmount,
1416f057f6cdSSteven Whitehouse 	.lm_put_lock = gdlm_put_lock,
1417f057f6cdSSteven Whitehouse 	.lm_lock = gdlm_lock,
1418f057f6cdSSteven Whitehouse 	.lm_cancel = gdlm_cancel,
1419f057f6cdSSteven Whitehouse 	.lm_tokens = &dlm_tokens,
1420f057f6cdSSteven Whitehouse };
1421f057f6cdSSteven Whitehouse 
1422