xref: /openbmc/linux/fs/ocfs2/dlm/dlmrecovery.c (revision 3213486f)
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * dlmrecovery.c
5  *
6  * recovery stuff
7  *
8  * Copyright (C) 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  *
25  */
26 
27 
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/init.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
36 #include <linux/blkdev.h>
37 #include <linux/socket.h>
38 #include <linux/inet.h>
39 #include <linux/timer.h>
40 #include <linux/kthread.h>
41 #include <linux/delay.h>
42 
43 
44 #include "cluster/heartbeat.h"
45 #include "cluster/nodemanager.h"
46 #include "cluster/tcp.h"
47 
48 #include "dlmapi.h"
49 #include "dlmcommon.h"
50 #include "dlmdomain.h"
51 
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
53 #include "cluster/masklog.h"
54 
55 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
56 
57 static int dlm_recovery_thread(void *data);
58 static int dlm_do_recovery(struct dlm_ctxt *dlm);
59 
60 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
61 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
62 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
63 static int dlm_request_all_locks(struct dlm_ctxt *dlm,
64 				 u8 request_from, u8 dead_node);
65 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm);
66 
67 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
68 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
69 					const char *lockname, int namelen,
70 					int total_locks, u64 cookie,
71 					u8 flags, u8 master);
72 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
73 				    struct dlm_migratable_lockres *mres,
74 				    u8 send_to,
75 				    struct dlm_lock_resource *res,
76 				    int total_locks);
77 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
78 				     struct dlm_lock_resource *res,
79 				     struct dlm_migratable_lockres *mres);
80 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
81 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
82 				 u8 dead_node, u8 send_to);
83 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
84 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
85 					struct list_head *list, u8 dead_node);
86 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
87 					      u8 dead_node, u8 new_master);
88 static void dlm_reco_ast(void *astdata);
89 static void dlm_reco_bast(void *astdata, int blocked_type);
90 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
91 static void dlm_request_all_locks_worker(struct dlm_work_item *item,
92 					 void *data);
93 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
94 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
95 				      struct dlm_lock_resource *res,
96 				      u8 *real_master);
97 
98 static u64 dlm_get_next_mig_cookie(void);
99 
100 static DEFINE_SPINLOCK(dlm_reco_state_lock);
101 static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
102 static u64 dlm_mig_cookie = 1;
103 
104 static u64 dlm_get_next_mig_cookie(void)
105 {
106 	u64 c;
107 	spin_lock(&dlm_mig_cookie_lock);
108 	c = dlm_mig_cookie;
109 	if (dlm_mig_cookie == (~0ULL))
110 		dlm_mig_cookie = 1;
111 	else
112 		dlm_mig_cookie++;
113 	spin_unlock(&dlm_mig_cookie_lock);
114 	return c;
115 }
116 
117 static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
118 					  u8 dead_node)
119 {
120 	assert_spin_locked(&dlm->spinlock);
121 	if (dlm->reco.dead_node != dead_node)
122 		mlog(0, "%s: changing dead_node from %u to %u\n",
123 		     dlm->name, dlm->reco.dead_node, dead_node);
124 	dlm->reco.dead_node = dead_node;
125 }
126 
127 static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
128 				       u8 master)
129 {
130 	assert_spin_locked(&dlm->spinlock);
131 	mlog(0, "%s: changing new_master from %u to %u\n",
132 	     dlm->name, dlm->reco.new_master, master);
133 	dlm->reco.new_master = master;
134 }
135 
136 static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
137 {
138 	assert_spin_locked(&dlm->spinlock);
139 	clear_bit(dlm->reco.dead_node, dlm->recovery_map);
140 	dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
141 	dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
142 }
143 
144 static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
145 {
146 	spin_lock(&dlm->spinlock);
147 	__dlm_reset_recovery(dlm);
148 	spin_unlock(&dlm->spinlock);
149 }
150 
151 /* Worker function used during recovery. */
152 void dlm_dispatch_work(struct work_struct *work)
153 {
154 	struct dlm_ctxt *dlm =
155 		container_of(work, struct dlm_ctxt, dispatched_work);
156 	LIST_HEAD(tmp_list);
157 	struct dlm_work_item *item, *next;
158 	dlm_workfunc_t *workfunc;
159 	int tot=0;
160 
161 	spin_lock(&dlm->work_lock);
162 	list_splice_init(&dlm->work_list, &tmp_list);
163 	spin_unlock(&dlm->work_lock);
164 
165 	list_for_each_entry(item, &tmp_list, list) {
166 		tot++;
167 	}
168 	mlog(0, "%s: work thread has %d work items\n", dlm->name, tot);
169 
170 	list_for_each_entry_safe(item, next, &tmp_list, list) {
171 		workfunc = item->func;
172 		list_del_init(&item->list);
173 
174 		/* already have ref on dlm to avoid having
175 		 * it disappear.  just double-check. */
176 		BUG_ON(item->dlm != dlm);
177 
178 		/* this is allowed to sleep and
179 		 * call network stuff */
180 		workfunc(item, item->data);
181 
182 		dlm_put(dlm);
183 		kfree(item);
184 	}
185 }
186 
187 /*
188  * RECOVERY THREAD
189  */
190 
191 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
192 {
193 	/* wake the recovery thread
194 	 * this will wake the reco thread in one of three places
195 	 * 1) sleeping with no recovery happening
196 	 * 2) sleeping with recovery mastered elsewhere
197 	 * 3) recovery mastered here, waiting on reco data */
198 
199 	wake_up(&dlm->dlm_reco_thread_wq);
200 }
201 
202 /* Launch the recovery thread */
203 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
204 {
205 	mlog(0, "starting dlm recovery thread...\n");
206 
207 	dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
208 			"dlm_reco-%s", dlm->name);
209 	if (IS_ERR(dlm->dlm_reco_thread_task)) {
210 		mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
211 		dlm->dlm_reco_thread_task = NULL;
212 		return -EINVAL;
213 	}
214 
215 	return 0;
216 }
217 
218 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
219 {
220 	if (dlm->dlm_reco_thread_task) {
221 		mlog(0, "waiting for dlm recovery thread to exit\n");
222 		kthread_stop(dlm->dlm_reco_thread_task);
223 		dlm->dlm_reco_thread_task = NULL;
224 	}
225 }
226 
227 
228 
229 /*
230  * this is lame, but here's how recovery works...
231  * 1) all recovery threads cluster wide will work on recovering
232  *    ONE node at a time
233  * 2) negotiate who will take over all the locks for the dead node.
234  *    thats right... ALL the locks.
235  * 3) once a new master is chosen, everyone scans all locks
236  *    and moves aside those mastered by the dead guy
237  * 4) each of these locks should be locked until recovery is done
238  * 5) the new master collects up all of secondary lock queue info
239  *    one lock at a time, forcing each node to communicate back
240  *    before continuing
241  * 6) each secondary lock queue responds with the full known lock info
242  * 7) once the new master has run all its locks, it sends a ALLDONE!
243  *    message to everyone
244  * 8) upon receiving this message, the secondary queue node unlocks
245  *    and responds to the ALLDONE
246  * 9) once the new master gets responses from everyone, he unlocks
247  *    everything and recovery for this dead node is done
248  *10) go back to 2) while there are still dead nodes
249  *
250  */
251 
252 static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
253 {
254 	struct dlm_reco_node_data *ndata;
255 	struct dlm_lock_resource *res;
256 
257 	mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
258 	     dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
259 	     dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
260 	     dlm->reco.dead_node, dlm->reco.new_master);
261 
262 	list_for_each_entry(ndata, &dlm->reco.node_data, list) {
263 		char *st = "unknown";
264 		switch (ndata->state) {
265 			case DLM_RECO_NODE_DATA_INIT:
266 				st = "init";
267 				break;
268 			case DLM_RECO_NODE_DATA_REQUESTING:
269 				st = "requesting";
270 				break;
271 			case DLM_RECO_NODE_DATA_DEAD:
272 				st = "dead";
273 				break;
274 			case DLM_RECO_NODE_DATA_RECEIVING:
275 				st = "receiving";
276 				break;
277 			case DLM_RECO_NODE_DATA_REQUESTED:
278 				st = "requested";
279 				break;
280 			case DLM_RECO_NODE_DATA_DONE:
281 				st = "done";
282 				break;
283 			case DLM_RECO_NODE_DATA_FINALIZE_SENT:
284 				st = "finalize-sent";
285 				break;
286 			default:
287 				st = "bad";
288 				break;
289 		}
290 		mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
291 		     dlm->name, ndata->node_num, st);
292 	}
293 	list_for_each_entry(res, &dlm->reco.resources, recovering) {
294 		mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
295 		     dlm->name, res->lockname.len, res->lockname.name);
296 	}
297 }
298 
299 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
300 
301 static int dlm_recovery_thread(void *data)
302 {
303 	int status;
304 	struct dlm_ctxt *dlm = data;
305 	unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
306 
307 	mlog(0, "dlm thread running for %s...\n", dlm->name);
308 
309 	while (!kthread_should_stop()) {
310 		if (dlm_domain_fully_joined(dlm)) {
311 			status = dlm_do_recovery(dlm);
312 			if (status == -EAGAIN) {
313 				/* do not sleep, recheck immediately. */
314 				continue;
315 			}
316 			if (status < 0)
317 				mlog_errno(status);
318 		}
319 
320 		wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
321 						 kthread_should_stop(),
322 						 timeout);
323 	}
324 
325 	mlog(0, "quitting DLM recovery thread\n");
326 	return 0;
327 }
328 
329 /* returns true when the recovery master has contacted us */
330 static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
331 {
332 	int ready;
333 	spin_lock(&dlm->spinlock);
334 	ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
335 	spin_unlock(&dlm->spinlock);
336 	return ready;
337 }
338 
339 /* returns true if node is no longer in the domain
340  * could be dead or just not joined */
341 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
342 {
343 	int dead;
344 	spin_lock(&dlm->spinlock);
345 	dead = !test_bit(node, dlm->domain_map);
346 	spin_unlock(&dlm->spinlock);
347 	return dead;
348 }
349 
350 /* returns true if node is no longer in the domain
351  * could be dead or just not joined */
352 static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
353 {
354 	int recovered;
355 	spin_lock(&dlm->spinlock);
356 	recovered = !test_bit(node, dlm->recovery_map);
357 	spin_unlock(&dlm->spinlock);
358 	return recovered;
359 }
360 
361 
362 void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
363 {
364 	if (dlm_is_node_dead(dlm, node))
365 		return;
366 
367 	printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in "
368 	       "domain %s\n", node, dlm->name);
369 
370 	if (timeout)
371 		wait_event_timeout(dlm->dlm_reco_thread_wq,
372 				   dlm_is_node_dead(dlm, node),
373 				   msecs_to_jiffies(timeout));
374 	else
375 		wait_event(dlm->dlm_reco_thread_wq,
376 			   dlm_is_node_dead(dlm, node));
377 }
378 
379 void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
380 {
381 	if (dlm_is_node_recovered(dlm, node))
382 		return;
383 
384 	printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in "
385 	       "domain %s\n", node, dlm->name);
386 
387 	if (timeout)
388 		wait_event_timeout(dlm->dlm_reco_thread_wq,
389 				   dlm_is_node_recovered(dlm, node),
390 				   msecs_to_jiffies(timeout));
391 	else
392 		wait_event(dlm->dlm_reco_thread_wq,
393 			   dlm_is_node_recovered(dlm, node));
394 }
395 
396 /* callers of the top-level api calls (dlmlock/dlmunlock) should
397  * block on the dlm->reco.event when recovery is in progress.
398  * the dlm recovery thread will set this state when it begins
399  * recovering a dead node (as the new master or not) and clear
400  * the state and wake as soon as all affected lock resources have
401  * been marked with the RECOVERY flag */
402 static int dlm_in_recovery(struct dlm_ctxt *dlm)
403 {
404 	int in_recovery;
405 	spin_lock(&dlm->spinlock);
406 	in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
407 	spin_unlock(&dlm->spinlock);
408 	return in_recovery;
409 }
410 
411 
412 void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
413 {
414 	if (dlm_in_recovery(dlm)) {
415 		mlog(0, "%s: reco thread %d in recovery: "
416 		     "state=%d, master=%u, dead=%u\n",
417 		     dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
418 		     dlm->reco.state, dlm->reco.new_master,
419 		     dlm->reco.dead_node);
420 	}
421 	wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
422 }
423 
424 static void dlm_begin_recovery(struct dlm_ctxt *dlm)
425 {
426 	assert_spin_locked(&dlm->spinlock);
427 	BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
428 	printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n",
429 	       dlm->name, dlm->reco.dead_node);
430 	dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
431 }
432 
433 static void dlm_end_recovery(struct dlm_ctxt *dlm)
434 {
435 	spin_lock(&dlm->spinlock);
436 	BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
437 	dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
438 	spin_unlock(&dlm->spinlock);
439 	printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name);
440 	wake_up(&dlm->reco.event);
441 }
442 
443 static void dlm_print_recovery_master(struct dlm_ctxt *dlm)
444 {
445 	printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the "
446 	       "dead node %u in domain %s\n", dlm->reco.new_master,
447 	       (dlm->node_num == dlm->reco.new_master ? "me" : "he"),
448 	       dlm->reco.dead_node, dlm->name);
449 }
450 
451 static int dlm_do_recovery(struct dlm_ctxt *dlm)
452 {
453 	int status = 0;
454 	int ret;
455 
456 	spin_lock(&dlm->spinlock);
457 
458 	if (dlm->migrate_done) {
459 		mlog(0, "%s: no need do recovery after migrating all "
460 		     "lock resources\n", dlm->name);
461 		spin_unlock(&dlm->spinlock);
462 		return 0;
463 	}
464 
465 	/* check to see if the new master has died */
466 	if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
467 	    test_bit(dlm->reco.new_master, dlm->recovery_map)) {
468 		mlog(0, "new master %u died while recovering %u!\n",
469 		     dlm->reco.new_master, dlm->reco.dead_node);
470 		/* unset the new_master, leave dead_node */
471 		dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
472 	}
473 
474 	/* select a target to recover */
475 	if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
476 		int bit;
477 
478 		bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0);
479 		if (bit >= O2NM_MAX_NODES || bit < 0)
480 			dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
481 		else
482 			dlm_set_reco_dead_node(dlm, bit);
483 	} else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
484 		/* BUG? */
485 		mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
486 		     dlm->reco.dead_node);
487 		dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
488 	}
489 
490 	if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
491 		// mlog(0, "nothing to recover!  sleeping now!\n");
492 		spin_unlock(&dlm->spinlock);
493 		/* return to main thread loop and sleep. */
494 		return 0;
495 	}
496 	mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
497 	     dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
498 	     dlm->reco.dead_node);
499 
500 	/* take write barrier */
501 	/* (stops the list reshuffling thread, proxy ast handling) */
502 	dlm_begin_recovery(dlm);
503 
504 	spin_unlock(&dlm->spinlock);
505 
506 	if (dlm->reco.new_master == dlm->node_num)
507 		goto master_here;
508 
509 	if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
510 		/* choose a new master, returns 0 if this node
511 		 * is the master, -EEXIST if it's another node.
512 		 * this does not return until a new master is chosen
513 		 * or recovery completes entirely. */
514 		ret = dlm_pick_recovery_master(dlm);
515 		if (!ret) {
516 			/* already notified everyone.  go. */
517 			goto master_here;
518 		}
519 		mlog(0, "another node will master this recovery session.\n");
520 	}
521 
522 	dlm_print_recovery_master(dlm);
523 
524 	/* it is safe to start everything back up here
525 	 * because all of the dead node's lock resources
526 	 * have been marked as in-recovery */
527 	dlm_end_recovery(dlm);
528 
529 	/* sleep out in main dlm_recovery_thread loop. */
530 	return 0;
531 
532 master_here:
533 	dlm_print_recovery_master(dlm);
534 
535 	status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
536 	if (status < 0) {
537 		/* we should never hit this anymore */
538 		mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, "
539 		     "retrying.\n", dlm->name, status, dlm->reco.dead_node);
540 		/* yield a bit to allow any final network messages
541 		 * to get handled on remaining nodes */
542 		msleep(100);
543 	} else {
544 		/* success!  see if any other nodes need recovery */
545 		mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
546 		     dlm->name, dlm->reco.dead_node, dlm->node_num);
547 		spin_lock(&dlm->spinlock);
548 		__dlm_reset_recovery(dlm);
549 		dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
550 		spin_unlock(&dlm->spinlock);
551 	}
552 	dlm_end_recovery(dlm);
553 
554 	/* continue and look for another dead node */
555 	return -EAGAIN;
556 }
557 
558 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
559 {
560 	int status = 0;
561 	struct dlm_reco_node_data *ndata;
562 	int all_nodes_done;
563 	int destroy = 0;
564 	int pass = 0;
565 
566 	do {
567 		/* we have become recovery master.  there is no escaping
568 		 * this, so just keep trying until we get it. */
569 		status = dlm_init_recovery_area(dlm, dead_node);
570 		if (status < 0) {
571 			mlog(ML_ERROR, "%s: failed to alloc recovery area, "
572 			     "retrying\n", dlm->name);
573 			msleep(1000);
574 		}
575 	} while (status != 0);
576 
577 	/* safe to access the node data list without a lock, since this
578 	 * process is the only one to change the list */
579 	list_for_each_entry(ndata, &dlm->reco.node_data, list) {
580 		BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
581 		ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
582 
583 		mlog(0, "%s: Requesting lock info from node %u\n", dlm->name,
584 		     ndata->node_num);
585 
586 		if (ndata->node_num == dlm->node_num) {
587 			ndata->state = DLM_RECO_NODE_DATA_DONE;
588 			continue;
589 		}
590 
591 		do {
592 			status = dlm_request_all_locks(dlm, ndata->node_num,
593 						       dead_node);
594 			if (status < 0) {
595 				mlog_errno(status);
596 				if (dlm_is_host_down(status)) {
597 					/* node died, ignore it for recovery */
598 					status = 0;
599 					ndata->state = DLM_RECO_NODE_DATA_DEAD;
600 					/* wait for the domain map to catch up
601 					 * with the network state. */
602 					wait_event_timeout(dlm->dlm_reco_thread_wq,
603 							   dlm_is_node_dead(dlm,
604 								ndata->node_num),
605 							   msecs_to_jiffies(1000));
606 					mlog(0, "waited 1 sec for %u, "
607 					     "dead? %s\n", ndata->node_num,
608 					     dlm_is_node_dead(dlm, ndata->node_num) ?
609 					     "yes" : "no");
610 				} else {
611 					/* -ENOMEM on the other node */
612 					mlog(0, "%s: node %u returned "
613 					     "%d during recovery, retrying "
614 					     "after a short wait\n",
615 					     dlm->name, ndata->node_num,
616 					     status);
617 					msleep(100);
618 				}
619 			}
620 		} while (status != 0);
621 
622 		spin_lock(&dlm_reco_state_lock);
623 		switch (ndata->state) {
624 			case DLM_RECO_NODE_DATA_INIT:
625 			case DLM_RECO_NODE_DATA_FINALIZE_SENT:
626 			case DLM_RECO_NODE_DATA_REQUESTED:
627 				BUG();
628 				break;
629 			case DLM_RECO_NODE_DATA_DEAD:
630 				mlog(0, "node %u died after requesting "
631 				     "recovery info for node %u\n",
632 				     ndata->node_num, dead_node);
633 				/* fine.  don't need this node's info.
634 				 * continue without it. */
635 				break;
636 			case DLM_RECO_NODE_DATA_REQUESTING:
637 				ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
638 				mlog(0, "now receiving recovery data from "
639 				     "node %u for dead node %u\n",
640 				     ndata->node_num, dead_node);
641 				break;
642 			case DLM_RECO_NODE_DATA_RECEIVING:
643 				mlog(0, "already receiving recovery data from "
644 				     "node %u for dead node %u\n",
645 				     ndata->node_num, dead_node);
646 				break;
647 			case DLM_RECO_NODE_DATA_DONE:
648 				mlog(0, "already DONE receiving recovery data "
649 				     "from node %u for dead node %u\n",
650 				     ndata->node_num, dead_node);
651 				break;
652 		}
653 		spin_unlock(&dlm_reco_state_lock);
654 	}
655 
656 	mlog(0, "%s: Done requesting all lock info\n", dlm->name);
657 
658 	/* nodes should be sending reco data now
659 	 * just need to wait */
660 
661 	while (1) {
662 		/* check all the nodes now to see if we are
663 		 * done, or if anyone died */
664 		all_nodes_done = 1;
665 		spin_lock(&dlm_reco_state_lock);
666 		list_for_each_entry(ndata, &dlm->reco.node_data, list) {
667 			mlog(0, "checking recovery state of node %u\n",
668 			     ndata->node_num);
669 			switch (ndata->state) {
670 				case DLM_RECO_NODE_DATA_INIT:
671 				case DLM_RECO_NODE_DATA_REQUESTING:
672 					mlog(ML_ERROR, "bad ndata state for "
673 					     "node %u: state=%d\n",
674 					     ndata->node_num, ndata->state);
675 					BUG();
676 					break;
677 				case DLM_RECO_NODE_DATA_DEAD:
678 					mlog(0, "node %u died after "
679 					     "requesting recovery info for "
680 					     "node %u\n", ndata->node_num,
681 					     dead_node);
682 					break;
683 				case DLM_RECO_NODE_DATA_RECEIVING:
684 				case DLM_RECO_NODE_DATA_REQUESTED:
685 					mlog(0, "%s: node %u still in state %s\n",
686 					     dlm->name, ndata->node_num,
687 					     ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
688 					     "receiving" : "requested");
689 					all_nodes_done = 0;
690 					break;
691 				case DLM_RECO_NODE_DATA_DONE:
692 					mlog(0, "%s: node %u state is done\n",
693 					     dlm->name, ndata->node_num);
694 					break;
695 				case DLM_RECO_NODE_DATA_FINALIZE_SENT:
696 					mlog(0, "%s: node %u state is finalize\n",
697 					     dlm->name, ndata->node_num);
698 					break;
699 			}
700 		}
701 		spin_unlock(&dlm_reco_state_lock);
702 
703 		mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
704 		     all_nodes_done?"yes":"no");
705 		if (all_nodes_done) {
706 			int ret;
707 
708 			/* Set this flag on recovery master to avoid
709 			 * a new recovery for another dead node start
710 			 * before the recovery is not done. That may
711 			 * cause recovery hung.*/
712 			spin_lock(&dlm->spinlock);
713 			dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
714 			spin_unlock(&dlm->spinlock);
715 
716 			/* all nodes are now in DLM_RECO_NODE_DATA_DONE state
717 	 		 * just send a finalize message to everyone and
718 	 		 * clean up */
719 			mlog(0, "all nodes are done! send finalize\n");
720 			ret = dlm_send_finalize_reco_message(dlm);
721 			if (ret < 0)
722 				mlog_errno(ret);
723 
724 			spin_lock(&dlm->spinlock);
725 			dlm_finish_local_lockres_recovery(dlm, dead_node,
726 							  dlm->node_num);
727 			spin_unlock(&dlm->spinlock);
728 			mlog(0, "should be done with recovery!\n");
729 
730 			mlog(0, "finishing recovery of %s at %lu, "
731 			     "dead=%u, this=%u, new=%u\n", dlm->name,
732 			     jiffies, dlm->reco.dead_node,
733 			     dlm->node_num, dlm->reco.new_master);
734 			destroy = 1;
735 			status = 0;
736 			/* rescan everything marked dirty along the way */
737 			dlm_kick_thread(dlm, NULL);
738 			break;
739 		}
740 		/* wait to be signalled, with periodic timeout
741 		 * to check for node death */
742 		wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
743 					 kthread_should_stop(),
744 					 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
745 
746 	}
747 
748 	if (destroy)
749 		dlm_destroy_recovery_area(dlm);
750 
751 	return status;
752 }
753 
754 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
755 {
756 	int num=0;
757 	struct dlm_reco_node_data *ndata;
758 
759 	spin_lock(&dlm->spinlock);
760 	memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
761 	/* nodes can only be removed (by dying) after dropping
762 	 * this lock, and death will be trapped later, so this should do */
763 	spin_unlock(&dlm->spinlock);
764 
765 	while (1) {
766 		num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
767 		if (num >= O2NM_MAX_NODES) {
768 			break;
769 		}
770 		BUG_ON(num == dead_node);
771 
772 		ndata = kzalloc(sizeof(*ndata), GFP_NOFS);
773 		if (!ndata) {
774 			dlm_destroy_recovery_area(dlm);
775 			return -ENOMEM;
776 		}
777 		ndata->node_num = num;
778 		ndata->state = DLM_RECO_NODE_DATA_INIT;
779 		spin_lock(&dlm_reco_state_lock);
780 		list_add_tail(&ndata->list, &dlm->reco.node_data);
781 		spin_unlock(&dlm_reco_state_lock);
782 		num++;
783 	}
784 
785 	return 0;
786 }
787 
788 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm)
789 {
790 	struct dlm_reco_node_data *ndata, *next;
791 	LIST_HEAD(tmplist);
792 
793 	spin_lock(&dlm_reco_state_lock);
794 	list_splice_init(&dlm->reco.node_data, &tmplist);
795 	spin_unlock(&dlm_reco_state_lock);
796 
797 	list_for_each_entry_safe(ndata, next, &tmplist, list) {
798 		list_del_init(&ndata->list);
799 		kfree(ndata);
800 	}
801 }
802 
803 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
804 				 u8 dead_node)
805 {
806 	struct dlm_lock_request lr;
807 	int ret;
808 	int status;
809 
810 	mlog(0, "\n");
811 
812 
813 	mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
814 		  "to %u\n", dead_node, request_from);
815 
816 	memset(&lr, 0, sizeof(lr));
817 	lr.node_idx = dlm->node_num;
818 	lr.dead_node = dead_node;
819 
820 	// send message
821 	ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
822 				 &lr, sizeof(lr), request_from, &status);
823 
824 	/* negative status is handled by caller */
825 	if (ret < 0)
826 		mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
827 		     "to recover dead node %u\n", dlm->name, ret,
828 		     request_from, dead_node);
829 	else
830 		ret = status;
831 	// return from here, then
832 	// sleep until all received or error
833 	return ret;
834 
835 }
836 
837 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
838 				  void **ret_data)
839 {
840 	struct dlm_ctxt *dlm = data;
841 	struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
842 	char *buf = NULL;
843 	struct dlm_work_item *item = NULL;
844 
845 	if (!dlm_grab(dlm))
846 		return -EINVAL;
847 
848 	if (lr->dead_node != dlm->reco.dead_node) {
849 		mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
850 		     "dead_node is %u\n", dlm->name, lr->node_idx,
851 		     lr->dead_node, dlm->reco.dead_node);
852 		dlm_print_reco_node_status(dlm);
853 		/* this is a hack */
854 		dlm_put(dlm);
855 		return -ENOMEM;
856 	}
857 	BUG_ON(lr->dead_node != dlm->reco.dead_node);
858 
859 	item = kzalloc(sizeof(*item), GFP_NOFS);
860 	if (!item) {
861 		dlm_put(dlm);
862 		return -ENOMEM;
863 	}
864 
865 	/* this will get freed by dlm_request_all_locks_worker */
866 	buf = (char *) __get_free_page(GFP_NOFS);
867 	if (!buf) {
868 		kfree(item);
869 		dlm_put(dlm);
870 		return -ENOMEM;
871 	}
872 
873 	/* queue up work for dlm_request_all_locks_worker */
874 	dlm_grab(dlm);  /* get an extra ref for the work item */
875 	dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
876 	item->u.ral.reco_master = lr->node_idx;
877 	item->u.ral.dead_node = lr->dead_node;
878 	spin_lock(&dlm->work_lock);
879 	list_add_tail(&item->list, &dlm->work_list);
880 	spin_unlock(&dlm->work_lock);
881 	queue_work(dlm->dlm_worker, &dlm->dispatched_work);
882 
883 	dlm_put(dlm);
884 	return 0;
885 }
886 
887 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
888 {
889 	struct dlm_migratable_lockres *mres;
890 	struct dlm_lock_resource *res;
891 	struct dlm_ctxt *dlm;
892 	LIST_HEAD(resources);
893 	int ret;
894 	u8 dead_node, reco_master;
895 	int skip_all_done = 0;
896 
897 	dlm = item->dlm;
898 	dead_node = item->u.ral.dead_node;
899 	reco_master = item->u.ral.reco_master;
900 	mres = (struct dlm_migratable_lockres *)data;
901 
902 	mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
903 	     dlm->name, dead_node, reco_master);
904 
905 	if (dead_node != dlm->reco.dead_node ||
906 	    reco_master != dlm->reco.new_master) {
907 		/* worker could have been created before the recovery master
908 		 * died.  if so, do not continue, but do not error. */
909 		if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
910 			mlog(ML_NOTICE, "%s: will not send recovery state, "
911 			     "recovery master %u died, thread=(dead=%u,mas=%u)"
912 			     " current=(dead=%u,mas=%u)\n", dlm->name,
913 			     reco_master, dead_node, reco_master,
914 			     dlm->reco.dead_node, dlm->reco.new_master);
915 		} else {
916 			mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
917 			     "master=%u), request(dead=%u, master=%u)\n",
918 			     dlm->name, dlm->reco.dead_node,
919 			     dlm->reco.new_master, dead_node, reco_master);
920 		}
921 		goto leave;
922 	}
923 
924 	/* lock resources should have already been moved to the
925  	 * dlm->reco.resources list.  now move items from that list
926  	 * to a temp list if the dead owner matches.  note that the
927 	 * whole cluster recovers only one node at a time, so we
928 	 * can safely move UNKNOWN lock resources for each recovery
929 	 * session. */
930 	dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
931 
932 	/* now we can begin blasting lockreses without the dlm lock */
933 
934 	/* any errors returned will be due to the new_master dying,
935 	 * the dlm_reco_thread should detect this */
936 	list_for_each_entry(res, &resources, recovering) {
937 		ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
938 				   	DLM_MRES_RECOVERY);
939 		if (ret < 0) {
940 			mlog(ML_ERROR, "%s: node %u went down while sending "
941 			     "recovery state for dead node %u, ret=%d\n", dlm->name,
942 			     reco_master, dead_node, ret);
943 			skip_all_done = 1;
944 			break;
945 		}
946 	}
947 
948 	/* move the resources back to the list */
949 	spin_lock(&dlm->spinlock);
950 	list_splice_init(&resources, &dlm->reco.resources);
951 	spin_unlock(&dlm->spinlock);
952 
953 	if (!skip_all_done) {
954 		ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
955 		if (ret < 0) {
956 			mlog(ML_ERROR, "%s: node %u went down while sending "
957 			     "recovery all-done for dead node %u, ret=%d\n",
958 			     dlm->name, reco_master, dead_node, ret);
959 		}
960 	}
961 leave:
962 	free_page((unsigned long)data);
963 }
964 
965 
966 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
967 {
968 	int ret, tmpret;
969 	struct dlm_reco_data_done done_msg;
970 
971 	memset(&done_msg, 0, sizeof(done_msg));
972 	done_msg.node_idx = dlm->node_num;
973 	done_msg.dead_node = dead_node;
974 	mlog(0, "sending DATA DONE message to %u, "
975 	     "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
976 	     done_msg.dead_node);
977 
978 	ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
979 				 sizeof(done_msg), send_to, &tmpret);
980 	if (ret < 0) {
981 		mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u "
982 		     "to recover dead node %u\n", dlm->name, ret, send_to,
983 		     dead_node);
984 		if (!dlm_is_host_down(ret)) {
985 			BUG();
986 		}
987 	} else
988 		ret = tmpret;
989 	return ret;
990 }
991 
992 
993 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
994 			       void **ret_data)
995 {
996 	struct dlm_ctxt *dlm = data;
997 	struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
998 	struct dlm_reco_node_data *ndata = NULL;
999 	int ret = -EINVAL;
1000 
1001 	if (!dlm_grab(dlm))
1002 		return -EINVAL;
1003 
1004 	mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
1005 	     "node_idx=%u, this node=%u\n", done->dead_node,
1006 	     dlm->reco.dead_node, done->node_idx, dlm->node_num);
1007 
1008 	mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
1009 			"Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
1010 			"node_idx=%u, this node=%u\n", done->dead_node,
1011 			dlm->reco.dead_node, done->node_idx, dlm->node_num);
1012 
1013 	spin_lock(&dlm_reco_state_lock);
1014 	list_for_each_entry(ndata, &dlm->reco.node_data, list) {
1015 		if (ndata->node_num != done->node_idx)
1016 			continue;
1017 
1018 		switch (ndata->state) {
1019 			/* should have moved beyond INIT but not to FINALIZE yet */
1020 			case DLM_RECO_NODE_DATA_INIT:
1021 			case DLM_RECO_NODE_DATA_DEAD:
1022 			case DLM_RECO_NODE_DATA_FINALIZE_SENT:
1023 				mlog(ML_ERROR, "bad ndata state for node %u:"
1024 				     " state=%d\n", ndata->node_num,
1025 				     ndata->state);
1026 				BUG();
1027 				break;
1028 			/* these states are possible at this point, anywhere along
1029 			 * the line of recovery */
1030 			case DLM_RECO_NODE_DATA_DONE:
1031 			case DLM_RECO_NODE_DATA_RECEIVING:
1032 			case DLM_RECO_NODE_DATA_REQUESTED:
1033 			case DLM_RECO_NODE_DATA_REQUESTING:
1034 				mlog(0, "node %u is DONE sending "
1035 					  "recovery data!\n",
1036 					  ndata->node_num);
1037 
1038 				ndata->state = DLM_RECO_NODE_DATA_DONE;
1039 				ret = 0;
1040 				break;
1041 		}
1042 	}
1043 	spin_unlock(&dlm_reco_state_lock);
1044 
1045 	/* wake the recovery thread, some node is done */
1046 	if (!ret)
1047 		dlm_kick_recovery_thread(dlm);
1048 
1049 	if (ret < 0)
1050 		mlog(ML_ERROR, "failed to find recovery node data for node "
1051 		     "%u\n", done->node_idx);
1052 	dlm_put(dlm);
1053 
1054 	mlog(0, "leaving reco data done handler, ret=%d\n", ret);
1055 	return ret;
1056 }
1057 
1058 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
1059 					struct list_head *list,
1060 				       	u8 dead_node)
1061 {
1062 	struct dlm_lock_resource *res, *next;
1063 	struct dlm_lock *lock;
1064 
1065 	spin_lock(&dlm->spinlock);
1066 	list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
1067 		/* always prune any $RECOVERY entries for dead nodes,
1068 		 * otherwise hangs can occur during later recovery */
1069 		if (dlm_is_recovery_lock(res->lockname.name,
1070 					 res->lockname.len)) {
1071 			spin_lock(&res->spinlock);
1072 			list_for_each_entry(lock, &res->granted, list) {
1073 				if (lock->ml.node == dead_node) {
1074 					mlog(0, "AHA! there was "
1075 					     "a $RECOVERY lock for dead "
1076 					     "node %u (%s)!\n",
1077 					     dead_node, dlm->name);
1078 					list_del_init(&lock->list);
1079 					dlm_lock_put(lock);
1080 					/* Can't schedule DLM_UNLOCK_FREE_LOCK
1081 					 * - do manually */
1082 					dlm_lock_put(lock);
1083 					break;
1084 				}
1085 			}
1086 			spin_unlock(&res->spinlock);
1087 			continue;
1088 		}
1089 
1090 		if (res->owner == dead_node) {
1091 			mlog(0, "found lockres owned by dead node while "
1092 				  "doing recovery for node %u. sending it.\n",
1093 				  dead_node);
1094 			list_move_tail(&res->recovering, list);
1095 		} else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
1096 			mlog(0, "found UNKNOWN owner while doing recovery "
1097 				  "for node %u. sending it.\n", dead_node);
1098 			list_move_tail(&res->recovering, list);
1099 		}
1100 	}
1101 	spin_unlock(&dlm->spinlock);
1102 }
1103 
1104 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
1105 {
1106 	int total_locks = 0;
1107 	struct list_head *iter, *queue = &res->granted;
1108 	int i;
1109 
1110 	for (i=0; i<3; i++) {
1111 		list_for_each(iter, queue)
1112 			total_locks++;
1113 		queue++;
1114 	}
1115 	return total_locks;
1116 }
1117 
1118 
1119 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1120 				      struct dlm_migratable_lockres *mres,
1121 				      u8 send_to,
1122 				      struct dlm_lock_resource *res,
1123 				      int total_locks)
1124 {
1125 	u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
1126 	int mres_total_locks = be32_to_cpu(mres->total_locks);
1127 	int sz, ret = 0, status = 0;
1128 	u8 orig_flags = mres->flags,
1129 	   orig_master = mres->master;
1130 
1131 	BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
1132 	if (!mres->num_locks)
1133 		return 0;
1134 
1135 	sz = sizeof(struct dlm_migratable_lockres) +
1136 		(mres->num_locks * sizeof(struct dlm_migratable_lock));
1137 
1138 	/* add an all-done flag if we reached the last lock */
1139 	orig_flags = mres->flags;
1140 	BUG_ON(total_locks > mres_total_locks);
1141 	if (total_locks == mres_total_locks)
1142 		mres->flags |= DLM_MRES_ALL_DONE;
1143 
1144 	mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
1145 	     dlm->name, res->lockname.len, res->lockname.name,
1146 	     orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery",
1147 	     send_to);
1148 
1149 	/* send it */
1150 	ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
1151 				 sz, send_to, &status);
1152 	if (ret < 0) {
1153 		/* XXX: negative status is not handled.
1154 		 * this will end up killing this node. */
1155 		mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to "
1156 		     "node %u (%s)\n", dlm->name, mres->lockname_len,
1157 		     mres->lockname, ret, send_to,
1158 		     (orig_flags & DLM_MRES_MIGRATION ?
1159 		      "migration" : "recovery"));
1160 	} else {
1161 		/* might get an -ENOMEM back here */
1162 		ret = status;
1163 		if (ret < 0) {
1164 			mlog_errno(ret);
1165 
1166 			if (ret == -EFAULT) {
1167 				mlog(ML_ERROR, "node %u told me to kill "
1168 				     "myself!\n", send_to);
1169 				BUG();
1170 			}
1171 		}
1172 	}
1173 
1174 	/* zero and reinit the message buffer */
1175 	dlm_init_migratable_lockres(mres, res->lockname.name,
1176 				    res->lockname.len, mres_total_locks,
1177 				    mig_cookie, orig_flags, orig_master);
1178 	return ret;
1179 }
1180 
1181 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1182 					const char *lockname, int namelen,
1183 					int total_locks, u64 cookie,
1184 					u8 flags, u8 master)
1185 {
1186 	/* mres here is one full page */
1187 	clear_page(mres);
1188 	mres->lockname_len = namelen;
1189 	memcpy(mres->lockname, lockname, namelen);
1190 	mres->num_locks = 0;
1191 	mres->total_locks = cpu_to_be32(total_locks);
1192 	mres->mig_cookie = cpu_to_be64(cookie);
1193 	mres->flags = flags;
1194 	mres->master = master;
1195 }
1196 
1197 static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
1198 					  struct dlm_migratable_lockres *mres,
1199 					  int queue)
1200 {
1201 	if (!lock->lksb)
1202 	       return;
1203 
1204 	/* Ignore lvb in all locks in the blocked list */
1205 	if (queue == DLM_BLOCKED_LIST)
1206 		return;
1207 
1208 	/* Only consider lvbs in locks with granted EX or PR lock levels */
1209 	if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
1210 		return;
1211 
1212 	if (dlm_lvb_is_empty(mres->lvb)) {
1213 		memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1214 		return;
1215 	}
1216 
1217 	/* Ensure the lvb copied for migration matches in other valid locks */
1218 	if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
1219 		return;
1220 
1221 	mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
1222 	     "node=%u\n",
1223 	     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
1224 	     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
1225 	     lock->lockres->lockname.len, lock->lockres->lockname.name,
1226 	     lock->ml.node);
1227 	dlm_print_one_lock_resource(lock->lockres);
1228 	BUG();
1229 }
1230 
1231 /* returns 1 if this lock fills the network structure,
1232  * 0 otherwise */
1233 static int dlm_add_lock_to_array(struct dlm_lock *lock,
1234 				 struct dlm_migratable_lockres *mres, int queue)
1235 {
1236 	struct dlm_migratable_lock *ml;
1237 	int lock_num = mres->num_locks;
1238 
1239 	ml = &(mres->ml[lock_num]);
1240 	ml->cookie = lock->ml.cookie;
1241 	ml->type = lock->ml.type;
1242 	ml->convert_type = lock->ml.convert_type;
1243 	ml->highest_blocked = lock->ml.highest_blocked;
1244 	ml->list = queue;
1245 	if (lock->lksb) {
1246 		ml->flags = lock->lksb->flags;
1247 		dlm_prepare_lvb_for_migration(lock, mres, queue);
1248 	}
1249 	ml->node = lock->ml.node;
1250 	mres->num_locks++;
1251 	/* we reached the max, send this network message */
1252 	if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
1253 		return 1;
1254 	return 0;
1255 }
1256 
1257 static void dlm_add_dummy_lock(struct dlm_ctxt *dlm,
1258 			       struct dlm_migratable_lockres *mres)
1259 {
1260 	struct dlm_lock dummy;
1261 	memset(&dummy, 0, sizeof(dummy));
1262 	dummy.ml.cookie = 0;
1263 	dummy.ml.type = LKM_IVMODE;
1264 	dummy.ml.convert_type = LKM_IVMODE;
1265 	dummy.ml.highest_blocked = LKM_IVMODE;
1266 	dummy.lksb = NULL;
1267 	dummy.ml.node = dlm->node_num;
1268 	dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST);
1269 }
1270 
1271 static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm,
1272 				    struct dlm_migratable_lock *ml,
1273 				    u8 *nodenum)
1274 {
1275 	if (unlikely(ml->cookie == 0 &&
1276 	    ml->type == LKM_IVMODE &&
1277 	    ml->convert_type == LKM_IVMODE &&
1278 	    ml->highest_blocked == LKM_IVMODE &&
1279 	    ml->list == DLM_BLOCKED_LIST)) {
1280 		*nodenum = ml->node;
1281 		return 1;
1282 	}
1283 	return 0;
1284 }
1285 
1286 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1287 			 struct dlm_migratable_lockres *mres,
1288 			 u8 send_to, u8 flags)
1289 {
1290 	struct list_head *queue;
1291 	int total_locks, i;
1292 	u64 mig_cookie = 0;
1293 	struct dlm_lock *lock;
1294 	int ret = 0;
1295 
1296 	BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1297 
1298 	mlog(0, "sending to %u\n", send_to);
1299 
1300 	total_locks = dlm_num_locks_in_lockres(res);
1301 	if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
1302 		/* rare, but possible */
1303 		mlog(0, "argh.  lockres has %d locks.  this will "
1304 			  "require more than one network packet to "
1305 			  "migrate\n", total_locks);
1306 		mig_cookie = dlm_get_next_mig_cookie();
1307 	}
1308 
1309 	dlm_init_migratable_lockres(mres, res->lockname.name,
1310 				    res->lockname.len, total_locks,
1311 				    mig_cookie, flags, res->owner);
1312 
1313 	total_locks = 0;
1314 	for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
1315 		queue = dlm_list_idx_to_ptr(res, i);
1316 		list_for_each_entry(lock, queue, list) {
1317 			/* add another lock. */
1318 			total_locks++;
1319 			if (!dlm_add_lock_to_array(lock, mres, i))
1320 				continue;
1321 
1322 			/* this filled the lock message,
1323 			 * we must send it immediately. */
1324 			ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1325 						       res, total_locks);
1326 			if (ret < 0)
1327 				goto error;
1328 		}
1329 	}
1330 	if (total_locks == 0) {
1331 		/* send a dummy lock to indicate a mastery reference only */
1332 		mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n",
1333 		     dlm->name, res->lockname.len, res->lockname.name,
1334 		     send_to, flags & DLM_MRES_RECOVERY ? "recovery" :
1335 		     "migration");
1336 		dlm_add_dummy_lock(dlm, mres);
1337 	}
1338 	/* flush any remaining locks */
1339 	ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1340 	if (ret < 0)
1341 		goto error;
1342 	return ret;
1343 
1344 error:
1345 	mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
1346 	     dlm->name, ret);
1347 	if (!dlm_is_host_down(ret))
1348 		BUG();
1349 	mlog(0, "%s: node %u went down while sending %s "
1350 	     "lockres %.*s\n", dlm->name, send_to,
1351 	     flags & DLM_MRES_RECOVERY ?  "recovery" : "migration",
1352 	     res->lockname.len, res->lockname.name);
1353 	return ret;
1354 }
1355 
1356 
1357 
1358 /*
1359  * this message will contain no more than one page worth of
1360  * recovery data, and it will work on only one lockres.
1361  * there may be many locks in this page, and we may need to wait
1362  * for additional packets to complete all the locks (rare, but
1363  * possible).
1364  */
1365 /*
1366  * NOTE: the allocation error cases here are scary
1367  * we really cannot afford to fail an alloc in recovery
1368  * do we spin?  returning an error only delays the problem really
1369  */
1370 
1371 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
1372 			    void **ret_data)
1373 {
1374 	struct dlm_ctxt *dlm = data;
1375 	struct dlm_migratable_lockres *mres =
1376 		(struct dlm_migratable_lockres *)msg->buf;
1377 	int ret = 0;
1378 	u8 real_master;
1379 	u8 extra_refs = 0;
1380 	char *buf = NULL;
1381 	struct dlm_work_item *item = NULL;
1382 	struct dlm_lock_resource *res = NULL;
1383 	unsigned int hash;
1384 
1385 	if (!dlm_grab(dlm))
1386 		return -EINVAL;
1387 
1388 	if (!dlm_joined(dlm)) {
1389 		mlog(ML_ERROR, "Domain %s not joined! "
1390 			  "lockres %.*s, master %u\n",
1391 			  dlm->name, mres->lockname_len,
1392 			  mres->lockname, mres->master);
1393 		dlm_put(dlm);
1394 		return -EINVAL;
1395 	}
1396 
1397 	BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1398 
1399 	real_master = mres->master;
1400 	if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1401 		/* cannot migrate a lockres with no master */
1402 		BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1403 	}
1404 
1405 	mlog(0, "%s message received from node %u\n",
1406 		  (mres->flags & DLM_MRES_RECOVERY) ?
1407 		  "recovery" : "migration", mres->master);
1408 	if (mres->flags & DLM_MRES_ALL_DONE)
1409 		mlog(0, "all done flag.  all lockres data received!\n");
1410 
1411 	ret = -ENOMEM;
1412 	buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
1413 	item = kzalloc(sizeof(*item), GFP_NOFS);
1414 	if (!buf || !item)
1415 		goto leave;
1416 
1417 	/* lookup the lock to see if we have a secondary queue for this
1418 	 * already...  just add the locks in and this will have its owner
1419 	 * and RECOVERY flag changed when it completes. */
1420 	hash = dlm_lockid_hash(mres->lockname, mres->lockname_len);
1421 	spin_lock(&dlm->spinlock);
1422 	res = __dlm_lookup_lockres_full(dlm, mres->lockname, mres->lockname_len,
1423 			hash);
1424 	if (res) {
1425 	 	/* this will get a ref on res */
1426 		/* mark it as recovering/migrating and hash it */
1427 		spin_lock(&res->spinlock);
1428 		if (res->state & DLM_LOCK_RES_DROPPING_REF) {
1429 			mlog(0, "%s: node is attempting to migrate "
1430 				"lockres %.*s, but marked as dropping "
1431 				" ref!\n", dlm->name,
1432 				mres->lockname_len, mres->lockname);
1433 			ret = -EINVAL;
1434 			spin_unlock(&res->spinlock);
1435 			spin_unlock(&dlm->spinlock);
1436 			dlm_lockres_put(res);
1437 			goto leave;
1438 		}
1439 
1440 		if (mres->flags & DLM_MRES_RECOVERY) {
1441 			res->state |= DLM_LOCK_RES_RECOVERING;
1442 		} else {
1443 			if (res->state & DLM_LOCK_RES_MIGRATING) {
1444 				/* this is at least the second
1445 				 * lockres message */
1446 				mlog(0, "lock %.*s is already migrating\n",
1447 					  mres->lockname_len,
1448 					  mres->lockname);
1449 			} else if (res->state & DLM_LOCK_RES_RECOVERING) {
1450 				/* caller should BUG */
1451 				mlog(ML_ERROR, "node is attempting to migrate "
1452 				     "lock %.*s, but marked as recovering!\n",
1453 				     mres->lockname_len, mres->lockname);
1454 				ret = -EFAULT;
1455 				spin_unlock(&res->spinlock);
1456 				spin_unlock(&dlm->spinlock);
1457 				dlm_lockres_put(res);
1458 				goto leave;
1459 			}
1460 			res->state |= DLM_LOCK_RES_MIGRATING;
1461 		}
1462 		spin_unlock(&res->spinlock);
1463 		spin_unlock(&dlm->spinlock);
1464 	} else {
1465 		spin_unlock(&dlm->spinlock);
1466 		/* need to allocate, just like if it was
1467 		 * mastered here normally  */
1468 		res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1469 		if (!res)
1470 			goto leave;
1471 
1472 		/* to match the ref that we would have gotten if
1473 		 * dlm_lookup_lockres had succeeded */
1474 		dlm_lockres_get(res);
1475 
1476 		/* mark it as recovering/migrating and hash it */
1477 		if (mres->flags & DLM_MRES_RECOVERY)
1478 			res->state |= DLM_LOCK_RES_RECOVERING;
1479 		else
1480 			res->state |= DLM_LOCK_RES_MIGRATING;
1481 
1482 		spin_lock(&dlm->spinlock);
1483 		__dlm_insert_lockres(dlm, res);
1484 		spin_unlock(&dlm->spinlock);
1485 
1486 		/* Add an extra ref for this lock-less lockres lest the
1487 		 * dlm_thread purges it before we get the chance to add
1488 		 * locks to it */
1489 		dlm_lockres_get(res);
1490 
1491 		/* There are three refs that need to be put.
1492 		 * 1. Taken above.
1493 		 * 2. kref_init in dlm_new_lockres()->dlm_init_lockres().
1494 		 * 3. dlm_lookup_lockres()
1495 		 * The first one is handled at the end of this function. The
1496 		 * other two are handled in the worker thread after locks have
1497 		 * been attached. Yes, we don't wait for purge time to match
1498 		 * kref_init. The lockres will still have atleast one ref
1499 		 * added because it is in the hash __dlm_insert_lockres() */
1500 		extra_refs++;
1501 
1502 		/* now that the new lockres is inserted,
1503 		 * make it usable by other processes */
1504 		spin_lock(&res->spinlock);
1505 		res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1506 		spin_unlock(&res->spinlock);
1507 		wake_up(&res->wq);
1508 	}
1509 
1510 	/* at this point we have allocated everything we need,
1511 	 * and we have a hashed lockres with an extra ref and
1512 	 * the proper res->state flags. */
1513 	ret = 0;
1514 	spin_lock(&res->spinlock);
1515 	/* drop this either when master requery finds a different master
1516 	 * or when a lock is added by the recovery worker */
1517 	dlm_lockres_grab_inflight_ref(dlm, res);
1518 	if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1519 		/* migration cannot have an unknown master */
1520 		BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1521 		mlog(0, "recovery has passed me a lockres with an "
1522 			  "unknown owner.. will need to requery: "
1523 			  "%.*s\n", mres->lockname_len, mres->lockname);
1524 	} else {
1525 		/* take a reference now to pin the lockres, drop it
1526 		 * when locks are added in the worker */
1527 		dlm_change_lockres_owner(dlm, res, dlm->node_num);
1528 	}
1529 	spin_unlock(&res->spinlock);
1530 
1531 	/* queue up work for dlm_mig_lockres_worker */
1532 	dlm_grab(dlm);  /* get an extra ref for the work item */
1533 	memcpy(buf, msg->buf, be16_to_cpu(msg->data_len));  /* copy the whole message */
1534 	dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1535 	item->u.ml.lockres = res; /* already have a ref */
1536 	item->u.ml.real_master = real_master;
1537 	item->u.ml.extra_ref = extra_refs;
1538 	spin_lock(&dlm->work_lock);
1539 	list_add_tail(&item->list, &dlm->work_list);
1540 	spin_unlock(&dlm->work_lock);
1541 	queue_work(dlm->dlm_worker, &dlm->dispatched_work);
1542 
1543 leave:
1544 	/* One extra ref taken needs to be put here */
1545 	if (extra_refs)
1546 		dlm_lockres_put(res);
1547 
1548 	dlm_put(dlm);
1549 	if (ret < 0) {
1550 		kfree(buf);
1551 		kfree(item);
1552 		mlog_errno(ret);
1553 	}
1554 
1555 	return ret;
1556 }
1557 
1558 
1559 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1560 {
1561 	struct dlm_ctxt *dlm;
1562 	struct dlm_migratable_lockres *mres;
1563 	int ret = 0;
1564 	struct dlm_lock_resource *res;
1565 	u8 real_master;
1566 	u8 extra_ref;
1567 
1568 	dlm = item->dlm;
1569 	mres = (struct dlm_migratable_lockres *)data;
1570 
1571 	res = item->u.ml.lockres;
1572 	real_master = item->u.ml.real_master;
1573 	extra_ref = item->u.ml.extra_ref;
1574 
1575 	if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1576 		/* this case is super-rare. only occurs if
1577 		 * node death happens during migration. */
1578 again:
1579 		ret = dlm_lockres_master_requery(dlm, res, &real_master);
1580 		if (ret < 0) {
1581 			mlog(0, "dlm_lockres_master_requery ret=%d\n",
1582 				  ret);
1583 			goto again;
1584 		}
1585 		if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1586 			mlog(0, "lockres %.*s not claimed.  "
1587 				   "this node will take it.\n",
1588 				   res->lockname.len, res->lockname.name);
1589 		} else {
1590 			spin_lock(&res->spinlock);
1591 			dlm_lockres_drop_inflight_ref(dlm, res);
1592 			spin_unlock(&res->spinlock);
1593 			mlog(0, "master needs to respond to sender "
1594 				  "that node %u still owns %.*s\n",
1595 				  real_master, res->lockname.len,
1596 				  res->lockname.name);
1597 			/* cannot touch this lockres */
1598 			goto leave;
1599 		}
1600 	}
1601 
1602 	ret = dlm_process_recovery_data(dlm, res, mres);
1603 	if (ret < 0)
1604 		mlog(0, "dlm_process_recovery_data returned  %d\n", ret);
1605 	else
1606 		mlog(0, "dlm_process_recovery_data succeeded\n");
1607 
1608 	if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
1609 	                   (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
1610 		ret = dlm_finish_migration(dlm, res, mres->master);
1611 		if (ret < 0)
1612 			mlog_errno(ret);
1613 	}
1614 
1615 leave:
1616 	/* See comment in dlm_mig_lockres_handler() */
1617 	if (res) {
1618 		if (extra_ref)
1619 			dlm_lockres_put(res);
1620 		dlm_lockres_put(res);
1621 	}
1622 	kfree(data);
1623 }
1624 
1625 
1626 
1627 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1628 				      struct dlm_lock_resource *res,
1629 				      u8 *real_master)
1630 {
1631 	struct dlm_node_iter iter;
1632 	int nodenum;
1633 	int ret = 0;
1634 
1635 	*real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
1636 
1637 	/* we only reach here if one of the two nodes in a
1638 	 * migration died while the migration was in progress.
1639 	 * at this point we need to requery the master.  we
1640 	 * know that the new_master got as far as creating
1641 	 * an mle on at least one node, but we do not know
1642 	 * if any nodes had actually cleared the mle and set
1643 	 * the master to the new_master.  the old master
1644 	 * is supposed to set the owner to UNKNOWN in the
1645 	 * event of a new_master death, so the only possible
1646 	 * responses that we can get from nodes here are
1647 	 * that the master is new_master, or that the master
1648 	 * is UNKNOWN.
1649 	 * if all nodes come back with UNKNOWN then we know
1650 	 * the lock needs remastering here.
1651 	 * if any node comes back with a valid master, check
1652 	 * to see if that master is the one that we are
1653 	 * recovering.  if so, then the new_master died and
1654 	 * we need to remaster this lock.  if not, then the
1655 	 * new_master survived and that node will respond to
1656 	 * other nodes about the owner.
1657 	 * if there is an owner, this node needs to dump this
1658 	 * lockres and alert the sender that this lockres
1659 	 * was rejected. */
1660 	spin_lock(&dlm->spinlock);
1661 	dlm_node_iter_init(dlm->domain_map, &iter);
1662 	spin_unlock(&dlm->spinlock);
1663 
1664 	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1665 		/* do not send to self */
1666 		if (nodenum == dlm->node_num)
1667 			continue;
1668 		ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1669 		if (ret < 0) {
1670 			mlog_errno(ret);
1671 			if (!dlm_is_host_down(ret))
1672 				BUG();
1673 			/* host is down, so answer for that node would be
1674 			 * DLM_LOCK_RES_OWNER_UNKNOWN.  continue. */
1675 		}
1676 		if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1677 			mlog(0, "lock master is %u\n", *real_master);
1678 			break;
1679 		}
1680 	}
1681 	return ret;
1682 }
1683 
1684 
1685 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1686 			  u8 nodenum, u8 *real_master)
1687 {
1688 	int ret = -EINVAL;
1689 	struct dlm_master_requery req;
1690 	int status = DLM_LOCK_RES_OWNER_UNKNOWN;
1691 
1692 	memset(&req, 0, sizeof(req));
1693 	req.node_idx = dlm->node_num;
1694 	req.namelen = res->lockname.len;
1695 	memcpy(req.name, res->lockname.name, res->lockname.len);
1696 
1697 resend:
1698 	ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
1699 				 &req, sizeof(req), nodenum, &status);
1700 	if (ret < 0)
1701 		mlog(ML_ERROR, "Error %d when sending message %u (key "
1702 		     "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG,
1703 		     dlm->key, nodenum);
1704 	else if (status == -ENOMEM) {
1705 		mlog_errno(status);
1706 		msleep(50);
1707 		goto resend;
1708 	} else {
1709 		BUG_ON(status < 0);
1710 		BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
1711 		*real_master = (u8) (status & 0xff);
1712 		mlog(0, "node %u responded to master requery with %u\n",
1713 			  nodenum, *real_master);
1714 		ret = 0;
1715 	}
1716 	return ret;
1717 }
1718 
1719 
1720 /* this function cannot error, so unless the sending
1721  * or receiving of the message failed, the owner can
1722  * be trusted */
1723 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1724 			       void **ret_data)
1725 {
1726 	struct dlm_ctxt *dlm = data;
1727 	struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1728 	struct dlm_lock_resource *res = NULL;
1729 	unsigned int hash;
1730 	int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1731 	u32 flags = DLM_ASSERT_MASTER_REQUERY;
1732 	int dispatched = 0;
1733 
1734 	if (!dlm_grab(dlm)) {
1735 		/* since the domain has gone away on this
1736 		 * node, the proper response is UNKNOWN */
1737 		return master;
1738 	}
1739 
1740 	hash = dlm_lockid_hash(req->name, req->namelen);
1741 
1742 	spin_lock(&dlm->spinlock);
1743 	res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
1744 	if (res) {
1745 		spin_lock(&res->spinlock);
1746 		master = res->owner;
1747 		if (master == dlm->node_num) {
1748 			int ret = dlm_dispatch_assert_master(dlm, res,
1749 							     0, 0, flags);
1750 			if (ret < 0) {
1751 				mlog_errno(ret);
1752 				spin_unlock(&res->spinlock);
1753 				dlm_lockres_put(res);
1754 				spin_unlock(&dlm->spinlock);
1755 				dlm_put(dlm);
1756 				/* sender will take care of this and retry */
1757 				return ret;
1758 			} else {
1759 				dispatched = 1;
1760 				__dlm_lockres_grab_inflight_worker(dlm, res);
1761 				spin_unlock(&res->spinlock);
1762 			}
1763 		} else {
1764 			/* put.. incase we are not the master */
1765 			spin_unlock(&res->spinlock);
1766 			dlm_lockres_put(res);
1767 		}
1768 	}
1769 	spin_unlock(&dlm->spinlock);
1770 
1771 	if (!dispatched)
1772 		dlm_put(dlm);
1773 	return master;
1774 }
1775 
1776 static inline struct list_head *
1777 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1778 {
1779 	struct list_head *ret;
1780 	BUG_ON(list_num < 0);
1781 	BUG_ON(list_num > 2);
1782 	ret = &(res->granted);
1783 	ret += list_num;
1784 	return ret;
1785 }
1786 /* TODO: do ast flush business
1787  * TODO: do MIGRATING and RECOVERING spinning
1788  */
1789 
1790 /*
1791 * NOTE about in-flight requests during migration:
1792 *
1793 * Before attempting the migrate, the master has marked the lockres as
1794 * MIGRATING and then flushed all of its pending ASTS.  So any in-flight
1795 * requests either got queued before the MIGRATING flag got set, in which
1796 * case the lock data will reflect the change and a return message is on
1797 * the way, or the request failed to get in before MIGRATING got set.  In
1798 * this case, the caller will be told to spin and wait for the MIGRATING
1799 * flag to be dropped, then recheck the master.
1800 * This holds true for the convert, cancel and unlock cases, and since lvb
1801 * updates are tied to these same messages, it applies to lvb updates as
1802 * well.  For the lock case, there is no way a lock can be on the master
1803 * queue and not be on the secondary queue since the lock is always added
1804 * locally first.  This means that the new target node will never be sent
1805 * a lock that he doesn't already have on the list.
1806 * In total, this means that the local lock is correct and should not be
1807 * updated to match the one sent by the master.  Any messages sent back
1808 * from the master before the MIGRATING flag will bring the lock properly
1809 * up-to-date, and the change will be ordered properly for the waiter.
1810 * We will *not* attempt to modify the lock underneath the waiter.
1811 */
1812 
1813 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1814 				     struct dlm_lock_resource *res,
1815 				     struct dlm_migratable_lockres *mres)
1816 {
1817 	struct dlm_migratable_lock *ml;
1818 	struct list_head *queue, *iter;
1819 	struct list_head *tmpq = NULL;
1820 	struct dlm_lock *newlock = NULL;
1821 	struct dlm_lockstatus *lksb = NULL;
1822 	int ret = 0;
1823 	int i, j, bad;
1824 	struct dlm_lock *lock;
1825 	u8 from = O2NM_MAX_NODES;
1826 	__be64 c;
1827 
1828 	mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1829 	for (i=0; i<mres->num_locks; i++) {
1830 		ml = &(mres->ml[i]);
1831 
1832 		if (dlm_is_dummy_lock(dlm, ml, &from)) {
1833 			/* placeholder, just need to set the refmap bit */
1834 			BUG_ON(mres->num_locks != 1);
1835 			mlog(0, "%s:%.*s: dummy lock for %u\n",
1836 			     dlm->name, mres->lockname_len, mres->lockname,
1837 			     from);
1838 			spin_lock(&res->spinlock);
1839 			dlm_lockres_set_refmap_bit(dlm, res, from);
1840 			spin_unlock(&res->spinlock);
1841 			break;
1842 		}
1843 		BUG_ON(ml->highest_blocked != LKM_IVMODE);
1844 		newlock = NULL;
1845 		lksb = NULL;
1846 
1847 		queue = dlm_list_num_to_pointer(res, ml->list);
1848 		tmpq = NULL;
1849 
1850 		/* if the lock is for the local node it needs to
1851 		 * be moved to the proper location within the queue.
1852 		 * do not allocate a new lock structure. */
1853 		if (ml->node == dlm->node_num) {
1854 			/* MIGRATION ONLY! */
1855 			BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1856 
1857 			lock = NULL;
1858 			spin_lock(&res->spinlock);
1859 			for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
1860 				tmpq = dlm_list_idx_to_ptr(res, j);
1861 				list_for_each(iter, tmpq) {
1862 					lock = list_entry(iter,
1863 						  struct dlm_lock, list);
1864 					if (lock->ml.cookie == ml->cookie)
1865 						break;
1866 					lock = NULL;
1867 				}
1868 				if (lock)
1869 					break;
1870 			}
1871 
1872 			/* lock is always created locally first, and
1873 			 * destroyed locally last.  it must be on the list */
1874 			if (!lock) {
1875 				c = ml->cookie;
1876 				mlog(ML_ERROR, "Could not find local lock "
1877 					       "with cookie %u:%llu, node %u, "
1878 					       "list %u, flags 0x%x, type %d, "
1879 					       "conv %d, highest blocked %d\n",
1880 				     dlm_get_lock_cookie_node(be64_to_cpu(c)),
1881 				     dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1882 				     ml->node, ml->list, ml->flags, ml->type,
1883 				     ml->convert_type, ml->highest_blocked);
1884 				__dlm_print_one_lock_resource(res);
1885 				BUG();
1886 			}
1887 
1888 			if (lock->ml.node != ml->node) {
1889 				c = lock->ml.cookie;
1890 				mlog(ML_ERROR, "Mismatched node# in lock "
1891 				     "cookie %u:%llu, name %.*s, node %u\n",
1892 				     dlm_get_lock_cookie_node(be64_to_cpu(c)),
1893 				     dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1894 				     res->lockname.len, res->lockname.name,
1895 				     lock->ml.node);
1896 				c = ml->cookie;
1897 				mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
1898 				     "node %u, list %u, flags 0x%x, type %d, "
1899 				     "conv %d, highest blocked %d\n",
1900 				     dlm_get_lock_cookie_node(be64_to_cpu(c)),
1901 				     dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1902 				     ml->node, ml->list, ml->flags, ml->type,
1903 				     ml->convert_type, ml->highest_blocked);
1904 				__dlm_print_one_lock_resource(res);
1905 				BUG();
1906 			}
1907 
1908 			if (tmpq != queue) {
1909 				c = ml->cookie;
1910 				mlog(0, "Lock cookie %u:%llu was on list %u "
1911 				     "instead of list %u for %.*s\n",
1912 				     dlm_get_lock_cookie_node(be64_to_cpu(c)),
1913 				     dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1914 				     j, ml->list, res->lockname.len,
1915 				     res->lockname.name);
1916 				__dlm_print_one_lock_resource(res);
1917 				spin_unlock(&res->spinlock);
1918 				continue;
1919 			}
1920 
1921 			/* see NOTE above about why we do not update
1922 			 * to match the master here */
1923 
1924 			/* move the lock to its proper place */
1925 			/* do not alter lock refcount.  switching lists. */
1926 			list_move_tail(&lock->list, queue);
1927 			spin_unlock(&res->spinlock);
1928 
1929 			mlog(0, "just reordered a local lock!\n");
1930 			continue;
1931 		}
1932 
1933 		/* lock is for another node. */
1934 		newlock = dlm_new_lock(ml->type, ml->node,
1935 				       be64_to_cpu(ml->cookie), NULL);
1936 		if (!newlock) {
1937 			ret = -ENOMEM;
1938 			goto leave;
1939 		}
1940 		lksb = newlock->lksb;
1941 		dlm_lock_attach_lockres(newlock, res);
1942 
1943 		if (ml->convert_type != LKM_IVMODE) {
1944 			BUG_ON(queue != &res->converting);
1945 			newlock->ml.convert_type = ml->convert_type;
1946 		}
1947 		lksb->flags |= (ml->flags &
1948 				(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
1949 
1950 		if (ml->type == LKM_NLMODE)
1951 			goto skip_lvb;
1952 
1953 		/*
1954 		 * If the lock is in the blocked list it can't have a valid lvb,
1955 		 * so skip it
1956 		 */
1957 		if (ml->list == DLM_BLOCKED_LIST)
1958 			goto skip_lvb;
1959 
1960 		if (!dlm_lvb_is_empty(mres->lvb)) {
1961 			if (lksb->flags & DLM_LKSB_PUT_LVB) {
1962 				/* other node was trying to update
1963 				 * lvb when node died.  recreate the
1964 				 * lksb with the updated lvb. */
1965 				memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
1966 				/* the lock resource lvb update must happen
1967 				 * NOW, before the spinlock is dropped.
1968 				 * we no longer wait for the AST to update
1969 				 * the lvb. */
1970 				memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1971 			} else {
1972 				/* otherwise, the node is sending its
1973 				 * most recent valid lvb info */
1974 				BUG_ON(ml->type != LKM_EXMODE &&
1975 				       ml->type != LKM_PRMODE);
1976 				if (!dlm_lvb_is_empty(res->lvb) &&
1977  				    (ml->type == LKM_EXMODE ||
1978  				     memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1979  					int i;
1980  					mlog(ML_ERROR, "%s:%.*s: received bad "
1981  					     "lvb! type=%d\n", dlm->name,
1982  					     res->lockname.len,
1983  					     res->lockname.name, ml->type);
1984  					printk("lockres lvb=[");
1985  					for (i=0; i<DLM_LVB_LEN; i++)
1986  						printk("%02x", res->lvb[i]);
1987  					printk("]\nmigrated lvb=[");
1988  					for (i=0; i<DLM_LVB_LEN; i++)
1989  						printk("%02x", mres->lvb[i]);
1990  					printk("]\n");
1991  					dlm_print_one_lock_resource(res);
1992  					BUG();
1993 				}
1994 				memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1995 			}
1996 		}
1997 skip_lvb:
1998 
1999 		/* NOTE:
2000 		 * wrt lock queue ordering and recovery:
2001 		 *    1. order of locks on granted queue is
2002 		 *       meaningless.
2003 		 *    2. order of locks on converting queue is
2004 		 *       LOST with the node death.  sorry charlie.
2005 		 *    3. order of locks on the blocked queue is
2006 		 *       also LOST.
2007 		 * order of locks does not affect integrity, it
2008 		 * just means that a lock request may get pushed
2009 		 * back in line as a result of the node death.
2010 		 * also note that for a given node the lock order
2011 		 * for its secondary queue locks is preserved
2012 		 * relative to each other, but clearly *not*
2013 		 * preserved relative to locks from other nodes.
2014 		 */
2015 		bad = 0;
2016 		spin_lock(&res->spinlock);
2017 		list_for_each_entry(lock, queue, list) {
2018 			if (lock->ml.cookie == ml->cookie) {
2019 				c = lock->ml.cookie;
2020 				mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
2021 				     "exists on this lockres!\n", dlm->name,
2022 				     res->lockname.len, res->lockname.name,
2023 				     dlm_get_lock_cookie_node(be64_to_cpu(c)),
2024 				     dlm_get_lock_cookie_seq(be64_to_cpu(c)));
2025 
2026 				mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
2027 				     "node=%u, cookie=%u:%llu, queue=%d\n",
2028 	      			     ml->type, ml->convert_type, ml->node,
2029 				     dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)),
2030 				     dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)),
2031 				     ml->list);
2032 
2033 				__dlm_print_one_lock_resource(res);
2034 				bad = 1;
2035 				break;
2036 			}
2037 		}
2038 		if (!bad) {
2039 			dlm_lock_get(newlock);
2040 			if (mres->flags & DLM_MRES_RECOVERY &&
2041 					ml->list == DLM_CONVERTING_LIST &&
2042 					newlock->ml.type >
2043 					newlock->ml.convert_type) {
2044 				/* newlock is doing downconvert, add it to the
2045 				 * head of converting list */
2046 				list_add(&newlock->list, queue);
2047 			} else
2048 				list_add_tail(&newlock->list, queue);
2049 			mlog(0, "%s:%.*s: added lock for node %u, "
2050 			     "setting refmap bit\n", dlm->name,
2051 			     res->lockname.len, res->lockname.name, ml->node);
2052 			dlm_lockres_set_refmap_bit(dlm, res, ml->node);
2053 		}
2054 		spin_unlock(&res->spinlock);
2055 	}
2056 	mlog(0, "done running all the locks\n");
2057 
2058 leave:
2059 	/* balance the ref taken when the work was queued */
2060 	spin_lock(&res->spinlock);
2061 	dlm_lockres_drop_inflight_ref(dlm, res);
2062 	spin_unlock(&res->spinlock);
2063 
2064 	if (ret < 0)
2065 		mlog_errno(ret);
2066 
2067 	return ret;
2068 }
2069 
2070 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
2071 				       struct dlm_lock_resource *res)
2072 {
2073 	int i;
2074 	struct list_head *queue;
2075 	struct dlm_lock *lock, *next;
2076 
2077 	assert_spin_locked(&dlm->spinlock);
2078 	assert_spin_locked(&res->spinlock);
2079 	res->state |= DLM_LOCK_RES_RECOVERING;
2080 	if (!list_empty(&res->recovering)) {
2081 		mlog(0,
2082 		     "Recovering res %s:%.*s, is already on recovery list!\n",
2083 		     dlm->name, res->lockname.len, res->lockname.name);
2084 		list_del_init(&res->recovering);
2085 		dlm_lockres_put(res);
2086 	}
2087 	/* We need to hold a reference while on the recovery list */
2088 	dlm_lockres_get(res);
2089 	list_add_tail(&res->recovering, &dlm->reco.resources);
2090 
2091 	/* find any pending locks and put them back on proper list */
2092 	for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
2093 		queue = dlm_list_idx_to_ptr(res, i);
2094 		list_for_each_entry_safe(lock, next, queue, list) {
2095 			dlm_lock_get(lock);
2096 			if (lock->convert_pending) {
2097 				/* move converting lock back to granted */
2098 				mlog(0, "node died with convert pending "
2099 				     "on %.*s. move back to granted list.\n",
2100 				     res->lockname.len, res->lockname.name);
2101 				dlm_revert_pending_convert(res, lock);
2102 				lock->convert_pending = 0;
2103 			} else if (lock->lock_pending) {
2104 				/* remove pending lock requests completely */
2105 				BUG_ON(i != DLM_BLOCKED_LIST);
2106 				mlog(0, "node died with lock pending "
2107 				     "on %.*s. remove from blocked list and skip.\n",
2108 				     res->lockname.len, res->lockname.name);
2109 				/* lock will be floating until ref in
2110 				 * dlmlock_remote is freed after the network
2111 				 * call returns.  ok for it to not be on any
2112 				 * list since no ast can be called
2113 				 * (the master is dead). */
2114 				dlm_revert_pending_lock(res, lock);
2115 				lock->lock_pending = 0;
2116 			} else if (lock->unlock_pending) {
2117 				/* if an unlock was in progress, treat as
2118 				 * if this had completed successfully
2119 				 * before sending this lock state to the
2120 				 * new master.  note that the dlm_unlock
2121 				 * call is still responsible for calling
2122 				 * the unlockast.  that will happen after
2123 				 * the network call times out.  for now,
2124 				 * just move lists to prepare the new
2125 				 * recovery master.  */
2126 				BUG_ON(i != DLM_GRANTED_LIST);
2127 				mlog(0, "node died with unlock pending "
2128 				     "on %.*s. remove from blocked list and skip.\n",
2129 				     res->lockname.len, res->lockname.name);
2130 				dlm_commit_pending_unlock(res, lock);
2131 				lock->unlock_pending = 0;
2132 			} else if (lock->cancel_pending) {
2133 				/* if a cancel was in progress, treat as
2134 				 * if this had completed successfully
2135 				 * before sending this lock state to the
2136 				 * new master */
2137 				BUG_ON(i != DLM_CONVERTING_LIST);
2138 				mlog(0, "node died with cancel pending "
2139 				     "on %.*s. move back to granted list.\n",
2140 				     res->lockname.len, res->lockname.name);
2141 				dlm_commit_pending_cancel(res, lock);
2142 				lock->cancel_pending = 0;
2143 			}
2144 			dlm_lock_put(lock);
2145 		}
2146 	}
2147 }
2148 
2149 
2150 
2151 /* removes all recovered locks from the recovery list.
2152  * sets the res->owner to the new master.
2153  * unsets the RECOVERY flag and wakes waiters. */
2154 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2155 					      u8 dead_node, u8 new_master)
2156 {
2157 	int i;
2158 	struct hlist_head *bucket;
2159 	struct dlm_lock_resource *res, *next;
2160 
2161 	assert_spin_locked(&dlm->spinlock);
2162 
2163 	list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
2164 		if (res->owner == dead_node) {
2165 			mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2166 			     dlm->name, res->lockname.len, res->lockname.name,
2167 			     res->owner, new_master);
2168 			list_del_init(&res->recovering);
2169 			spin_lock(&res->spinlock);
2170 			/* new_master has our reference from
2171 			 * the lock state sent during recovery */
2172 			dlm_change_lockres_owner(dlm, res, new_master);
2173 			res->state &= ~DLM_LOCK_RES_RECOVERING;
2174 			if (__dlm_lockres_has_locks(res))
2175 				__dlm_dirty_lockres(dlm, res);
2176 			spin_unlock(&res->spinlock);
2177 			wake_up(&res->wq);
2178 			dlm_lockres_put(res);
2179 		}
2180 	}
2181 
2182 	/* this will become unnecessary eventually, but
2183 	 * for now we need to run the whole hash, clear
2184 	 * the RECOVERING state and set the owner
2185 	 * if necessary */
2186 	for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2187 		bucket = dlm_lockres_hash(dlm, i);
2188 		hlist_for_each_entry(res, bucket, hash_node) {
2189 			if (res->state & DLM_LOCK_RES_RECOVERY_WAITING) {
2190 				spin_lock(&res->spinlock);
2191 				res->state &= ~DLM_LOCK_RES_RECOVERY_WAITING;
2192 				spin_unlock(&res->spinlock);
2193 				wake_up(&res->wq);
2194 			}
2195 
2196 			if (!(res->state & DLM_LOCK_RES_RECOVERING))
2197 				continue;
2198 
2199 			if (res->owner != dead_node &&
2200 			    res->owner != dlm->node_num)
2201 				continue;
2202 
2203 			if (!list_empty(&res->recovering)) {
2204 				list_del_init(&res->recovering);
2205 				dlm_lockres_put(res);
2206 			}
2207 
2208 			/* new_master has our reference from
2209 			 * the lock state sent during recovery */
2210 			mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2211 			     dlm->name, res->lockname.len, res->lockname.name,
2212 			     res->owner, new_master);
2213 			spin_lock(&res->spinlock);
2214 			dlm_change_lockres_owner(dlm, res, new_master);
2215 			res->state &= ~DLM_LOCK_RES_RECOVERING;
2216 			if (__dlm_lockres_has_locks(res))
2217 				__dlm_dirty_lockres(dlm, res);
2218 			spin_unlock(&res->spinlock);
2219 			wake_up(&res->wq);
2220 		}
2221 	}
2222 }
2223 
2224 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
2225 {
2226 	if (local) {
2227 		if (lock->ml.type != LKM_EXMODE &&
2228 		    lock->ml.type != LKM_PRMODE)
2229 			return 1;
2230 	} else if (lock->ml.type == LKM_EXMODE)
2231 		return 1;
2232 	return 0;
2233 }
2234 
2235 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
2236 			       struct dlm_lock_resource *res, u8 dead_node)
2237 {
2238 	struct list_head *queue;
2239 	struct dlm_lock *lock;
2240 	int blank_lvb = 0, local = 0;
2241 	int i;
2242 	u8 search_node;
2243 
2244 	assert_spin_locked(&dlm->spinlock);
2245 	assert_spin_locked(&res->spinlock);
2246 
2247 	if (res->owner == dlm->node_num)
2248 		/* if this node owned the lockres, and if the dead node
2249 		 * had an EX when he died, blank out the lvb */
2250 		search_node = dead_node;
2251 	else {
2252 		/* if this is a secondary lockres, and we had no EX or PR
2253 		 * locks granted, we can no longer trust the lvb */
2254 		search_node = dlm->node_num;
2255 		local = 1;  /* check local state for valid lvb */
2256 	}
2257 
2258 	for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
2259 		queue = dlm_list_idx_to_ptr(res, i);
2260 		list_for_each_entry(lock, queue, list) {
2261 			if (lock->ml.node == search_node) {
2262 				if (dlm_lvb_needs_invalidation(lock, local)) {
2263 					/* zero the lksb lvb and lockres lvb */
2264 					blank_lvb = 1;
2265 					memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
2266 				}
2267 			}
2268 		}
2269 	}
2270 
2271 	if (blank_lvb) {
2272 		mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
2273 		     res->lockname.len, res->lockname.name, dead_node);
2274 		memset(res->lvb, 0, DLM_LVB_LEN);
2275 	}
2276 }
2277 
2278 static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2279 				struct dlm_lock_resource *res, u8 dead_node)
2280 {
2281 	struct dlm_lock *lock, *next;
2282 	unsigned int freed = 0;
2283 
2284 	/* this node is the lockres master:
2285 	 * 1) remove any stale locks for the dead node
2286 	 * 2) if the dead node had an EX when he died, blank out the lvb
2287 	 */
2288 	assert_spin_locked(&dlm->spinlock);
2289 	assert_spin_locked(&res->spinlock);
2290 
2291 	/* We do two dlm_lock_put(). One for removing from list and the other is
2292 	 * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */
2293 
2294 	/* TODO: check pending_asts, pending_basts here */
2295 	list_for_each_entry_safe(lock, next, &res->granted, list) {
2296 		if (lock->ml.node == dead_node) {
2297 			list_del_init(&lock->list);
2298 			dlm_lock_put(lock);
2299 			/* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2300 			dlm_lock_put(lock);
2301 			freed++;
2302 		}
2303 	}
2304 	list_for_each_entry_safe(lock, next, &res->converting, list) {
2305 		if (lock->ml.node == dead_node) {
2306 			list_del_init(&lock->list);
2307 			dlm_lock_put(lock);
2308 			/* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2309 			dlm_lock_put(lock);
2310 			freed++;
2311 		}
2312 	}
2313 	list_for_each_entry_safe(lock, next, &res->blocked, list) {
2314 		if (lock->ml.node == dead_node) {
2315 			list_del_init(&lock->list);
2316 			dlm_lock_put(lock);
2317 			/* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2318 			dlm_lock_put(lock);
2319 			freed++;
2320 		}
2321 	}
2322 
2323 	if (freed) {
2324 		mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
2325 		     "dropping ref from lockres\n", dlm->name,
2326 		     res->lockname.len, res->lockname.name, freed, dead_node);
2327 		if(!test_bit(dead_node, res->refmap)) {
2328 			mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
2329 			     "but ref was not set\n", dlm->name,
2330 			     res->lockname.len, res->lockname.name, freed, dead_node);
2331 			__dlm_print_one_lock_resource(res);
2332 		}
2333 		res->state |= DLM_LOCK_RES_RECOVERY_WAITING;
2334 		dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2335 	} else if (test_bit(dead_node, res->refmap)) {
2336 		mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2337 		     "no locks and had not purged before dying\n", dlm->name,
2338 		     res->lockname.len, res->lockname.name, dead_node);
2339 		dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2340 	}
2341 
2342 	/* do not kick thread yet */
2343 	__dlm_dirty_lockres(dlm, res);
2344 }
2345 
2346 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2347 {
2348 	struct dlm_lock_resource *res;
2349 	int i;
2350 	struct hlist_head *bucket;
2351 	struct hlist_node *tmp;
2352 	struct dlm_lock *lock;
2353 
2354 
2355 	/* purge any stale mles */
2356 	dlm_clean_master_list(dlm, dead_node);
2357 
2358 	/*
2359 	 * now clean up all lock resources.  there are two rules:
2360 	 *
2361 	 * 1) if the dead node was the master, move the lockres
2362 	 *    to the recovering list.  set the RECOVERING flag.
2363 	 *    this lockres needs to be cleaned up before it can
2364 	 *    be used further.
2365 	 *
2366 	 * 2) if this node was the master, remove all locks from
2367 	 *    each of the lockres queues that were owned by the
2368 	 *    dead node.  once recovery finishes, the dlm thread
2369 	 *    can be kicked again to see if any ASTs or BASTs
2370 	 *    need to be fired as a result.
2371 	 */
2372 	for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2373 		bucket = dlm_lockres_hash(dlm, i);
2374 		hlist_for_each_entry_safe(res, tmp, bucket, hash_node) {
2375  			/* always prune any $RECOVERY entries for dead nodes,
2376  			 * otherwise hangs can occur during later recovery */
2377 			if (dlm_is_recovery_lock(res->lockname.name,
2378 						 res->lockname.len)) {
2379 				spin_lock(&res->spinlock);
2380 				list_for_each_entry(lock, &res->granted, list) {
2381 					if (lock->ml.node == dead_node) {
2382 						mlog(0, "AHA! there was "
2383 						     "a $RECOVERY lock for dead "
2384 						     "node %u (%s)!\n",
2385 						     dead_node, dlm->name);
2386 						list_del_init(&lock->list);
2387 						dlm_lock_put(lock);
2388 						/* Can't schedule
2389 						 * DLM_UNLOCK_FREE_LOCK
2390 						 * - do manually */
2391 						dlm_lock_put(lock);
2392 						break;
2393 					}
2394 				}
2395 
2396 				if ((res->owner == dead_node) &&
2397 							(res->state & DLM_LOCK_RES_DROPPING_REF)) {
2398 					dlm_lockres_get(res);
2399 					__dlm_do_purge_lockres(dlm, res);
2400 					spin_unlock(&res->spinlock);
2401 					wake_up(&res->wq);
2402 					dlm_lockres_put(res);
2403 					continue;
2404 				} else if (res->owner == dlm->node_num)
2405 					dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2406 				spin_unlock(&res->spinlock);
2407 				continue;
2408 			}
2409 			spin_lock(&res->spinlock);
2410 			/* zero the lvb if necessary */
2411 			dlm_revalidate_lvb(dlm, res, dead_node);
2412 			if (res->owner == dead_node) {
2413 				if (res->state & DLM_LOCK_RES_DROPPING_REF) {
2414 					mlog(0, "%s:%.*s: owned by "
2415 						"dead node %u, this node was "
2416 						"dropping its ref when master died. "
2417 						"continue, purging the lockres.\n",
2418 						dlm->name, res->lockname.len,
2419 						res->lockname.name, dead_node);
2420 					dlm_lockres_get(res);
2421 					__dlm_do_purge_lockres(dlm, res);
2422 					spin_unlock(&res->spinlock);
2423 					wake_up(&res->wq);
2424 					dlm_lockres_put(res);
2425 					continue;
2426 				}
2427 				dlm_move_lockres_to_recovery_list(dlm, res);
2428 			} else if (res->owner == dlm->node_num) {
2429 				dlm_free_dead_locks(dlm, res, dead_node);
2430 				__dlm_lockres_calc_usage(dlm, res);
2431 			} else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2432 				if (test_bit(dead_node, res->refmap)) {
2433 					mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2434 						"no locks and had not purged before dying\n",
2435 						dlm->name, res->lockname.len,
2436 						res->lockname.name, dead_node);
2437 					dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2438 				}
2439 			}
2440 			spin_unlock(&res->spinlock);
2441 		}
2442 	}
2443 
2444 }
2445 
2446 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
2447 {
2448 	assert_spin_locked(&dlm->spinlock);
2449 
2450 	if (dlm->reco.new_master == idx) {
2451 		mlog(0, "%s: recovery master %d just died\n",
2452 		     dlm->name, idx);
2453 		if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2454 			/* finalize1 was reached, so it is safe to clear
2455 			 * the new_master and dead_node.  that recovery
2456 			 * is complete. */
2457 			mlog(0, "%s: dead master %d had reached "
2458 			     "finalize1 state, clearing\n", dlm->name, idx);
2459 			dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2460 			__dlm_reset_recovery(dlm);
2461 		}
2462 	}
2463 
2464 	/* Clean up join state on node death. */
2465 	if (dlm->joining_node == idx) {
2466 		mlog(0, "Clearing join state for node %u\n", idx);
2467 		__dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
2468 	}
2469 
2470 	/* check to see if the node is already considered dead */
2471 	if (!test_bit(idx, dlm->live_nodes_map)) {
2472 		mlog(0, "for domain %s, node %d is already dead. "
2473 		     "another node likely did recovery already.\n",
2474 		     dlm->name, idx);
2475 		return;
2476 	}
2477 
2478 	/* check to see if we do not care about this node */
2479 	if (!test_bit(idx, dlm->domain_map)) {
2480 		/* This also catches the case that we get a node down
2481 		 * but haven't joined the domain yet. */
2482 		mlog(0, "node %u already removed from domain!\n", idx);
2483 		return;
2484 	}
2485 
2486 	clear_bit(idx, dlm->live_nodes_map);
2487 
2488 	/* make sure local cleanup occurs before the heartbeat events */
2489 	if (!test_bit(idx, dlm->recovery_map))
2490 		dlm_do_local_recovery_cleanup(dlm, idx);
2491 
2492 	/* notify anything attached to the heartbeat events */
2493 	dlm_hb_event_notify_attached(dlm, idx, 0);
2494 
2495 	mlog(0, "node %u being removed from domain map!\n", idx);
2496 	clear_bit(idx, dlm->domain_map);
2497 	clear_bit(idx, dlm->exit_domain_map);
2498 	/* wake up migration waiters if a node goes down.
2499 	 * perhaps later we can genericize this for other waiters. */
2500 	wake_up(&dlm->migration_wq);
2501 
2502 	set_bit(idx, dlm->recovery_map);
2503 }
2504 
2505 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
2506 {
2507 	struct dlm_ctxt *dlm = data;
2508 
2509 	if (!dlm_grab(dlm))
2510 		return;
2511 
2512 	/*
2513 	 * This will notify any dlm users that a node in our domain
2514 	 * went away without notifying us first.
2515 	 */
2516 	if (test_bit(idx, dlm->domain_map))
2517 		dlm_fire_domain_eviction_callbacks(dlm, idx);
2518 
2519 	spin_lock(&dlm->spinlock);
2520 	__dlm_hb_node_down(dlm, idx);
2521 	spin_unlock(&dlm->spinlock);
2522 
2523 	dlm_put(dlm);
2524 }
2525 
2526 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
2527 {
2528 	struct dlm_ctxt *dlm = data;
2529 
2530 	if (!dlm_grab(dlm))
2531 		return;
2532 
2533 	spin_lock(&dlm->spinlock);
2534 	set_bit(idx, dlm->live_nodes_map);
2535 	/* do NOT notify mle attached to the heartbeat events.
2536 	 * new nodes are not interesting in mastery until joined. */
2537 	spin_unlock(&dlm->spinlock);
2538 
2539 	dlm_put(dlm);
2540 }
2541 
2542 static void dlm_reco_ast(void *astdata)
2543 {
2544 	struct dlm_ctxt *dlm = astdata;
2545 	mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2546 	     dlm->node_num, dlm->name);
2547 }
2548 static void dlm_reco_bast(void *astdata, int blocked_type)
2549 {
2550 	struct dlm_ctxt *dlm = astdata;
2551 	mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2552 	     dlm->node_num, dlm->name);
2553 }
2554 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2555 {
2556 	mlog(0, "unlockast for recovery lock fired!\n");
2557 }
2558 
2559 /*
2560  * dlm_pick_recovery_master will continually attempt to use
2561  * dlmlock() on the special "$RECOVERY" lockres with the
2562  * LKM_NOQUEUE flag to get an EX.  every thread that enters
2563  * this function on each node racing to become the recovery
2564  * master will not stop attempting this until either:
2565  * a) this node gets the EX (and becomes the recovery master),
2566  * or b) dlm->reco.new_master gets set to some nodenum
2567  * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2568  * so each time a recovery master is needed, the entire cluster
2569  * will sync at this point.  if the new master dies, that will
2570  * be detected in dlm_do_recovery */
2571 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2572 {
2573 	enum dlm_status ret;
2574 	struct dlm_lockstatus lksb;
2575 	int status = -EINVAL;
2576 
2577 	mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2578 	     dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2579 again:
2580 	memset(&lksb, 0, sizeof(lksb));
2581 
2582 	ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
2583 		      DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN,
2584 		      dlm_reco_ast, dlm, dlm_reco_bast);
2585 
2586 	mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2587 	     dlm->name, ret, lksb.status);
2588 
2589 	if (ret == DLM_NORMAL) {
2590 		mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2591 		     dlm->name, dlm->node_num);
2592 
2593 		/* got the EX lock.  check to see if another node
2594 		 * just became the reco master */
2595 		if (dlm_reco_master_ready(dlm)) {
2596 			mlog(0, "%s: got reco EX lock, but %u will "
2597 			     "do the recovery\n", dlm->name,
2598 			     dlm->reco.new_master);
2599 			status = -EEXIST;
2600 		} else {
2601 			status = 0;
2602 
2603 			/* see if recovery was already finished elsewhere */
2604 			spin_lock(&dlm->spinlock);
2605 			if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2606 				status = -EINVAL;
2607 				mlog(0, "%s: got reco EX lock, but "
2608 				     "node got recovered already\n", dlm->name);
2609 				if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2610 					mlog(ML_ERROR, "%s: new master is %u "
2611 					     "but no dead node!\n",
2612 					     dlm->name, dlm->reco.new_master);
2613 					BUG();
2614 				}
2615 			}
2616 			spin_unlock(&dlm->spinlock);
2617 		}
2618 
2619 		/* if this node has actually become the recovery master,
2620 		 * set the master and send the messages to begin recovery */
2621 		if (!status) {
2622 			mlog(0, "%s: dead=%u, this=%u, sending "
2623 			     "begin_reco now\n", dlm->name,
2624 			     dlm->reco.dead_node, dlm->node_num);
2625 			status = dlm_send_begin_reco_message(dlm,
2626 				      dlm->reco.dead_node);
2627 			/* this always succeeds */
2628 			BUG_ON(status);
2629 
2630 			/* set the new_master to this node */
2631 			spin_lock(&dlm->spinlock);
2632 			dlm_set_reco_master(dlm, dlm->node_num);
2633 			spin_unlock(&dlm->spinlock);
2634 		}
2635 
2636 		/* recovery lock is a special case.  ast will not get fired,
2637 		 * so just go ahead and unlock it. */
2638 		ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
2639 		if (ret == DLM_DENIED) {
2640 			mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2641 			ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2642 		}
2643 		if (ret != DLM_NORMAL) {
2644 			/* this would really suck. this could only happen
2645 			 * if there was a network error during the unlock
2646 			 * because of node death.  this means the unlock
2647 			 * is actually "done" and the lock structure is
2648 			 * even freed.  we can continue, but only
2649 			 * because this specific lock name is special. */
2650 			mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
2651 		}
2652 	} else if (ret == DLM_NOTQUEUED) {
2653 		mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2654 		     dlm->name, dlm->node_num);
2655 		/* another node is master. wait on
2656 		 * reco.new_master != O2NM_INVALID_NODE_NUM
2657 		 * for at most one second */
2658 		wait_event_timeout(dlm->dlm_reco_thread_wq,
2659 					 dlm_reco_master_ready(dlm),
2660 					 msecs_to_jiffies(1000));
2661 		if (!dlm_reco_master_ready(dlm)) {
2662 			mlog(0, "%s: reco master taking awhile\n",
2663 			     dlm->name);
2664 			goto again;
2665 		}
2666 		/* another node has informed this one that it is reco master */
2667 		mlog(0, "%s: reco master %u is ready to recover %u\n",
2668 		     dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
2669 		status = -EEXIST;
2670 	} else if (ret == DLM_RECOVERING) {
2671 		mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
2672 		     dlm->name, dlm->node_num);
2673 		goto again;
2674 	} else {
2675 		struct dlm_lock_resource *res;
2676 
2677 		/* dlmlock returned something other than NOTQUEUED or NORMAL */
2678 		mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2679 		     "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2680 		     dlm_errname(lksb.status));
2681 		res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2682 					 DLM_RECOVERY_LOCK_NAME_LEN);
2683 		if (res) {
2684 			dlm_print_one_lock_resource(res);
2685 			dlm_lockres_put(res);
2686 		} else {
2687 			mlog(ML_ERROR, "recovery lock not found\n");
2688 		}
2689 		BUG();
2690 	}
2691 
2692 	return status;
2693 }
2694 
2695 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2696 {
2697 	struct dlm_begin_reco br;
2698 	int ret = 0;
2699 	struct dlm_node_iter iter;
2700 	int nodenum;
2701 	int status;
2702 
2703 	mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
2704 
2705 	spin_lock(&dlm->spinlock);
2706 	dlm_node_iter_init(dlm->domain_map, &iter);
2707 	spin_unlock(&dlm->spinlock);
2708 
2709 	clear_bit(dead_node, iter.node_map);
2710 
2711 	memset(&br, 0, sizeof(br));
2712 	br.node_idx = dlm->node_num;
2713 	br.dead_node = dead_node;
2714 
2715 	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2716 		ret = 0;
2717 		if (nodenum == dead_node) {
2718 			mlog(0, "not sending begin reco to dead node "
2719 				  "%u\n", dead_node);
2720 			continue;
2721 		}
2722 		if (nodenum == dlm->node_num) {
2723 			mlog(0, "not sending begin reco to self\n");
2724 			continue;
2725 		}
2726 retry:
2727 		ret = -EINVAL;
2728 		mlog(0, "attempting to send begin reco msg to %d\n",
2729 			  nodenum);
2730 		ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
2731 					 &br, sizeof(br), nodenum, &status);
2732 		/* negative status is handled ok by caller here */
2733 		if (ret >= 0)
2734 			ret = status;
2735 		if (dlm_is_host_down(ret)) {
2736 			/* node is down.  not involved in recovery
2737 			 * so just keep going */
2738 			mlog(ML_NOTICE, "%s: node %u was down when sending "
2739 			     "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2740 			ret = 0;
2741 		}
2742 
2743 		/*
2744 		 * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
2745 		 * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
2746 		 * We are handling both for compatibility reasons.
2747 		 */
2748 		if (ret == -EAGAIN || ret == EAGAIN) {
2749 			mlog(0, "%s: trying to start recovery of node "
2750 			     "%u, but node %u is waiting for last recovery "
2751 			     "to complete, backoff for a bit\n", dlm->name,
2752 			     dead_node, nodenum);
2753 			msleep(100);
2754 			goto retry;
2755 		}
2756 		if (ret < 0) {
2757 			struct dlm_lock_resource *res;
2758 
2759 			/* this is now a serious problem, possibly ENOMEM
2760 			 * in the network stack.  must retry */
2761 			mlog_errno(ret);
2762 			mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2763 			     "returned %d\n", dlm->name, nodenum, ret);
2764 			res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2765 						 DLM_RECOVERY_LOCK_NAME_LEN);
2766 			if (res) {
2767 				dlm_print_one_lock_resource(res);
2768 				dlm_lockres_put(res);
2769 			} else {
2770 				mlog(ML_ERROR, "recovery lock not found\n");
2771 			}
2772 			/* sleep for a bit in hopes that we can avoid
2773 			 * another ENOMEM */
2774 			msleep(100);
2775 			goto retry;
2776 		}
2777 	}
2778 
2779 	return ret;
2780 }
2781 
2782 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2783 			   void **ret_data)
2784 {
2785 	struct dlm_ctxt *dlm = data;
2786 	struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
2787 
2788 	/* ok to return 0, domain has gone away */
2789 	if (!dlm_grab(dlm))
2790 		return 0;
2791 
2792 	spin_lock(&dlm->spinlock);
2793 	if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2794 		mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
2795 		     "but this node is in finalize state, waiting on finalize2\n",
2796 		     dlm->name, br->node_idx, br->dead_node,
2797 		     dlm->reco.dead_node, dlm->reco.new_master);
2798 		spin_unlock(&dlm->spinlock);
2799 		dlm_put(dlm);
2800 		return -EAGAIN;
2801 	}
2802 	spin_unlock(&dlm->spinlock);
2803 
2804 	mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2805 	     dlm->name, br->node_idx, br->dead_node,
2806 	     dlm->reco.dead_node, dlm->reco.new_master);
2807 
2808 	dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
2809 
2810 	spin_lock(&dlm->spinlock);
2811 	if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2812 		if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2813 			mlog(0, "%s: new_master %u died, changing "
2814 			     "to %u\n", dlm->name, dlm->reco.new_master,
2815 			     br->node_idx);
2816 		} else {
2817 			mlog(0, "%s: new_master %u NOT DEAD, changing "
2818 			     "to %u\n", dlm->name, dlm->reco.new_master,
2819 			     br->node_idx);
2820 			/* may not have seen the new master as dead yet */
2821 		}
2822 	}
2823 	if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2824 		mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2825 		     "node %u changing it to %u\n", dlm->name,
2826 		     dlm->reco.dead_node, br->node_idx, br->dead_node);
2827 	}
2828 	dlm_set_reco_master(dlm, br->node_idx);
2829 	dlm_set_reco_dead_node(dlm, br->dead_node);
2830 	if (!test_bit(br->dead_node, dlm->recovery_map)) {
2831 		mlog(0, "recovery master %u sees %u as dead, but this "
2832 		     "node has not yet.  marking %u as dead\n",
2833 		     br->node_idx, br->dead_node, br->dead_node);
2834 		if (!test_bit(br->dead_node, dlm->domain_map) ||
2835 		    !test_bit(br->dead_node, dlm->live_nodes_map))
2836 			mlog(0, "%u not in domain/live_nodes map "
2837 			     "so setting it in reco map manually\n",
2838 			     br->dead_node);
2839 		/* force the recovery cleanup in __dlm_hb_node_down
2840 		 * both of these will be cleared in a moment */
2841 		set_bit(br->dead_node, dlm->domain_map);
2842 		set_bit(br->dead_node, dlm->live_nodes_map);
2843 		__dlm_hb_node_down(dlm, br->dead_node);
2844 	}
2845 	spin_unlock(&dlm->spinlock);
2846 
2847 	dlm_kick_recovery_thread(dlm);
2848 
2849 	mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2850 	     dlm->name, br->node_idx, br->dead_node,
2851 	     dlm->reco.dead_node, dlm->reco.new_master);
2852 
2853 	dlm_put(dlm);
2854 	return 0;
2855 }
2856 
2857 #define DLM_FINALIZE_STAGE2  0x01
2858 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2859 {
2860 	int ret = 0;
2861 	struct dlm_finalize_reco fr;
2862 	struct dlm_node_iter iter;
2863 	int nodenum;
2864 	int status;
2865 	int stage = 1;
2866 
2867 	mlog(0, "finishing recovery for node %s:%u, "
2868 	     "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
2869 
2870 	spin_lock(&dlm->spinlock);
2871 	dlm_node_iter_init(dlm->domain_map, &iter);
2872 	spin_unlock(&dlm->spinlock);
2873 
2874 stage2:
2875 	memset(&fr, 0, sizeof(fr));
2876 	fr.node_idx = dlm->node_num;
2877 	fr.dead_node = dlm->reco.dead_node;
2878 	if (stage == 2)
2879 		fr.flags |= DLM_FINALIZE_STAGE2;
2880 
2881 	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2882 		if (nodenum == dlm->node_num)
2883 			continue;
2884 		ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
2885 					 &fr, sizeof(fr), nodenum, &status);
2886 		if (ret >= 0)
2887 			ret = status;
2888 		if (ret < 0) {
2889 			mlog(ML_ERROR, "Error %d when sending message %u (key "
2890 			     "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG,
2891 			     dlm->key, nodenum);
2892 			if (dlm_is_host_down(ret)) {
2893 				/* this has no effect on this recovery
2894 				 * session, so set the status to zero to
2895 				 * finish out the last recovery */
2896 				mlog(ML_ERROR, "node %u went down after this "
2897 				     "node finished recovery.\n", nodenum);
2898 				ret = 0;
2899 				continue;
2900 			}
2901 			break;
2902 		}
2903 	}
2904 	if (stage == 1) {
2905 		/* reset the node_iter back to the top and send finalize2 */
2906 		iter.curnode = -1;
2907 		stage = 2;
2908 		goto stage2;
2909 	}
2910 
2911 	return ret;
2912 }
2913 
2914 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2915 			      void **ret_data)
2916 {
2917 	struct dlm_ctxt *dlm = data;
2918 	struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
2919 	int stage = 1;
2920 
2921 	/* ok to return 0, domain has gone away */
2922 	if (!dlm_grab(dlm))
2923 		return 0;
2924 
2925 	if (fr->flags & DLM_FINALIZE_STAGE2)
2926 		stage = 2;
2927 
2928 	mlog(0, "%s: node %u finalizing recovery stage%d of "
2929 	     "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2930 	     fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2931 
2932 	spin_lock(&dlm->spinlock);
2933 
2934 	if (dlm->reco.new_master != fr->node_idx) {
2935 		mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
2936 		     "%u is supposed to be the new master, dead=%u\n",
2937 		     fr->node_idx, dlm->reco.new_master, fr->dead_node);
2938 		BUG();
2939 	}
2940 	if (dlm->reco.dead_node != fr->dead_node) {
2941 		mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
2942 		     "node %u, but node %u is supposed to be dead\n",
2943 		     fr->node_idx, fr->dead_node, dlm->reco.dead_node);
2944 		BUG();
2945 	}
2946 
2947 	switch (stage) {
2948 		case 1:
2949 			dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2950 			if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2951 				mlog(ML_ERROR, "%s: received finalize1 from "
2952 				     "new master %u for dead node %u, but "
2953 				     "this node has already received it!\n",
2954 				     dlm->name, fr->node_idx, fr->dead_node);
2955 				dlm_print_reco_node_status(dlm);
2956 				BUG();
2957 			}
2958 			dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
2959 			spin_unlock(&dlm->spinlock);
2960 			break;
2961 		case 2:
2962 			if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
2963 				mlog(ML_ERROR, "%s: received finalize2 from "
2964 				     "new master %u for dead node %u, but "
2965 				     "this node did not have finalize1!\n",
2966 				     dlm->name, fr->node_idx, fr->dead_node);
2967 				dlm_print_reco_node_status(dlm);
2968 				BUG();
2969 			}
2970 			dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2971 			__dlm_reset_recovery(dlm);
2972 			spin_unlock(&dlm->spinlock);
2973 			dlm_kick_recovery_thread(dlm);
2974 			break;
2975 	}
2976 
2977 	mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2978 	     dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);
2979 
2980 	dlm_put(dlm);
2981 	return 0;
2982 }
2983