xref: /openbmc/linux/fs/dlm/recoverd.c (revision 64c70b1c)
1 /******************************************************************************
2 *******************************************************************************
3 **
4 **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
5 **  Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
6 **
7 **  This copyrighted material is made available to anyone wishing to use,
8 **  modify, copy, or redistribute it subject to the terms and conditions
9 **  of the GNU General Public License v.2.
10 **
11 *******************************************************************************
12 ******************************************************************************/
13 
14 #include "dlm_internal.h"
15 #include "lockspace.h"
16 #include "member.h"
17 #include "dir.h"
18 #include "ast.h"
19 #include "recover.h"
20 #include "lowcomms.h"
21 #include "lock.h"
22 #include "requestqueue.h"
23 #include "recoverd.h"
24 
25 
26 /* If the start for which we're re-enabling locking (seq) has been superseded
27    by a newer stop (ls_recover_seq), we need to leave locking disabled. */
28 
29 static int enable_locking(struct dlm_ls *ls, uint64_t seq)
30 {
31 	int error = -EINTR;
32 
33 	spin_lock(&ls->ls_recover_lock);
34 	if (ls->ls_recover_seq == seq) {
35 		set_bit(LSFL_RUNNING, &ls->ls_flags);
36 		up_write(&ls->ls_in_recovery);
37 		error = 0;
38 	}
39 	spin_unlock(&ls->ls_recover_lock);
40 	return error;
41 }
42 
43 static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
44 {
45 	unsigned long start;
46 	int error, neg = 0;
47 
48 	log_debug(ls, "recover %llx", (unsigned long long)rv->seq);
49 
50 	mutex_lock(&ls->ls_recoverd_active);
51 
52 	/*
53 	 * Suspending and resuming dlm_astd ensures that no lkb's from this ls
54 	 * will be processed by dlm_astd during recovery.
55 	 */
56 
57 	dlm_astd_suspend();
58 	dlm_astd_resume();
59 
60 	/*
61 	 * This list of root rsb's will be the basis of most of the recovery
62 	 * routines.
63 	 */
64 
65 	dlm_create_root_list(ls);
66 
67 	/*
68 	 * Free all the tossed rsb's so we don't have to recover them.
69 	 */
70 
71 	dlm_clear_toss_list(ls);
72 
73 	/*
74 	 * Add or remove nodes from the lockspace's ls_nodes list.
75 	 * Also waits for all nodes to complete dlm_recover_members.
76 	 */
77 
78 	error = dlm_recover_members(ls, rv, &neg);
79 	if (error) {
80 		log_debug(ls, "recover_members failed %d", error);
81 		goto fail;
82 	}
83 	start = jiffies;
84 
85 	/*
86 	 * Rebuild our own share of the directory by collecting from all other
87 	 * nodes their master rsb names that hash to us.
88 	 */
89 
90 	error = dlm_recover_directory(ls);
91 	if (error) {
92 		log_debug(ls, "recover_directory failed %d", error);
93 		goto fail;
94 	}
95 
96 	/*
97 	 * Wait for all nodes to complete directory rebuild.
98 	 */
99 
100 	error = dlm_recover_directory_wait(ls);
101 	if (error) {
102 		log_debug(ls, "recover_directory_wait failed %d", error);
103 		goto fail;
104 	}
105 
106 	/*
107 	 * We may have outstanding operations that are waiting for a reply from
108 	 * a failed node.  Mark these to be resent after recovery.  Unlock and
109 	 * cancel ops can just be completed.
110 	 */
111 
112 	dlm_recover_waiters_pre(ls);
113 
114 	error = dlm_recovery_stopped(ls);
115 	if (error)
116 		goto fail;
117 
118 	if (neg || dlm_no_directory(ls)) {
119 		/*
120 		 * Clear lkb's for departed nodes.
121 		 */
122 
123 		dlm_purge_locks(ls);
124 
125 		/*
126 		 * Get new master nodeid's for rsb's that were mastered on
127 		 * departed nodes.
128 		 */
129 
130 		error = dlm_recover_masters(ls);
131 		if (error) {
132 			log_debug(ls, "recover_masters failed %d", error);
133 			goto fail;
134 		}
135 
136 		/*
137 		 * Send our locks on remastered rsb's to the new masters.
138 		 */
139 
140 		error = dlm_recover_locks(ls);
141 		if (error) {
142 			log_debug(ls, "recover_locks failed %d", error);
143 			goto fail;
144 		}
145 
146 		error = dlm_recover_locks_wait(ls);
147 		if (error) {
148 			log_debug(ls, "recover_locks_wait failed %d", error);
149 			goto fail;
150 		}
151 
152 		/*
153 		 * Finalize state in master rsb's now that all locks can be
154 		 * checked.  This includes conversion resolution and lvb
155 		 * settings.
156 		 */
157 
158 		dlm_recover_rsbs(ls);
159 	} else {
160 		/*
161 		 * Other lockspace members may be going through the "neg" steps
162 		 * while also adding us to the lockspace, in which case they'll
163 		 * be doing the recover_locks (RS_LOCKS) barrier.
164 		 */
165 		dlm_set_recover_status(ls, DLM_RS_LOCKS);
166 
167 		error = dlm_recover_locks_wait(ls);
168 		if (error) {
169 			log_debug(ls, "recover_locks_wait failed %d", error);
170 			goto fail;
171 		}
172 	}
173 
174 	dlm_release_root_list(ls);
175 
176 	/*
177 	 * Purge directory-related requests that are saved in requestqueue.
178 	 * All dir requests from before recovery are invalid now due to the dir
179 	 * rebuild and will be resent by the requesting nodes.
180 	 */
181 
182 	dlm_purge_requestqueue(ls);
183 
184 	dlm_set_recover_status(ls, DLM_RS_DONE);
185 	error = dlm_recover_done_wait(ls);
186 	if (error) {
187 		log_debug(ls, "recover_done_wait failed %d", error);
188 		goto fail;
189 	}
190 
191 	dlm_clear_members_gone(ls);
192 
193 	dlm_adjust_timeouts(ls);
194 
195 	error = enable_locking(ls, rv->seq);
196 	if (error) {
197 		log_debug(ls, "enable_locking failed %d", error);
198 		goto fail;
199 	}
200 
201 	error = dlm_process_requestqueue(ls);
202 	if (error) {
203 		log_debug(ls, "process_requestqueue failed %d", error);
204 		goto fail;
205 	}
206 
207 	error = dlm_recover_waiters_post(ls);
208 	if (error) {
209 		log_debug(ls, "recover_waiters_post failed %d", error);
210 		goto fail;
211 	}
212 
213 	dlm_grant_after_purge(ls);
214 
215 	dlm_astd_wake();
216 
217 	log_debug(ls, "recover %llx done: %u ms",
218 		  (unsigned long long)rv->seq,
219 		  jiffies_to_msecs(jiffies - start));
220 	mutex_unlock(&ls->ls_recoverd_active);
221 
222 	return 0;
223 
224  fail:
225 	dlm_release_root_list(ls);
226 	log_debug(ls, "recover %llx error %d",
227 		  (unsigned long long)rv->seq, error);
228 	mutex_unlock(&ls->ls_recoverd_active);
229 	return error;
230 }
231 
232 /* The dlm_ls_start() that created the rv we take here may already have been
233    stopped via dlm_ls_stop(); in that case we need to leave the RECOVERY_STOP
234    flag set. */
235 
236 static void do_ls_recovery(struct dlm_ls *ls)
237 {
238 	struct dlm_recover *rv = NULL;
239 
240 	spin_lock(&ls->ls_recover_lock);
241 	rv = ls->ls_recover_args;
242 	ls->ls_recover_args = NULL;
243 	if (rv && ls->ls_recover_seq == rv->seq)
244 		clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
245 	spin_unlock(&ls->ls_recover_lock);
246 
247 	if (rv) {
248 		ls_recover(ls, rv);
249 		kfree(rv->nodeids);
250 		kfree(rv);
251 	}
252 }
253 
254 static int dlm_recoverd(void *arg)
255 {
256 	struct dlm_ls *ls;
257 
258 	ls = dlm_find_lockspace_local(arg);
259 	if (!ls) {
260 		log_print("dlm_recoverd: no lockspace %p", arg);
261 		return -1;
262 	}
263 
264 	while (!kthread_should_stop()) {
265 		set_current_state(TASK_INTERRUPTIBLE);
266 		if (!test_bit(LSFL_WORK, &ls->ls_flags))
267 			schedule();
268 		set_current_state(TASK_RUNNING);
269 
270 		if (test_and_clear_bit(LSFL_WORK, &ls->ls_flags))
271 			do_ls_recovery(ls);
272 	}
273 
274 	dlm_put_lockspace(ls);
275 	return 0;
276 }
277 
278 void dlm_recoverd_kick(struct dlm_ls *ls)
279 {
280 	set_bit(LSFL_WORK, &ls->ls_flags);
281 	wake_up_process(ls->ls_recoverd_task);
282 }
283 
284 int dlm_recoverd_start(struct dlm_ls *ls)
285 {
286 	struct task_struct *p;
287 	int error = 0;
288 
289 	p = kthread_run(dlm_recoverd, ls, "dlm_recoverd");
290 	if (IS_ERR(p))
291 		error = PTR_ERR(p);
292 	else
293                 ls->ls_recoverd_task = p;
294 	return error;
295 }
296 
297 void dlm_recoverd_stop(struct dlm_ls *ls)
298 {
299 	kthread_stop(ls->ls_recoverd_task);
300 }
301 
302 void dlm_recoverd_suspend(struct dlm_ls *ls)
303 {
304 	wake_up(&ls->ls_wait_general);
305 	mutex_lock(&ls->ls_recoverd_active);
306 }
307 
308 void dlm_recoverd_resume(struct dlm_ls *ls)
309 {
310 	mutex_unlock(&ls->ls_recoverd_active);
311 }
312 
313