xref: /openbmc/linux/fs/ocfs2/dlm/dlmmaster.c (revision f3a8b664)
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * dlmmod.c
5  *
6  * standalone DLM module
7  *
8  * Copyright (C) 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  *
25  */
26 
27 
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/init.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
36 #include <linux/blkdev.h>
37 #include <linux/socket.h>
38 #include <linux/inet.h>
39 #include <linux/spinlock.h>
40 #include <linux/delay.h>
41 
42 
43 #include "cluster/heartbeat.h"
44 #include "cluster/nodemanager.h"
45 #include "cluster/tcp.h"
46 
47 #include "dlmapi.h"
48 #include "dlmcommon.h"
49 #include "dlmdomain.h"
50 #include "dlmdebug.h"
51 
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
53 #include "cluster/masklog.h"
54 
55 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
56 			      struct dlm_master_list_entry *mle,
57 			      struct o2nm_node *node,
58 			      int idx);
59 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
60 			    struct dlm_master_list_entry *mle,
61 			    struct o2nm_node *node,
62 			    int idx);
63 
64 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
65 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
66 				struct dlm_lock_resource *res,
67 				void *nodemap, u32 flags);
68 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
69 
70 static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
71 				struct dlm_master_list_entry *mle,
72 				const char *name,
73 				unsigned int namelen)
74 {
75 	if (dlm != mle->dlm)
76 		return 0;
77 
78 	if (namelen != mle->mnamelen ||
79 	    memcmp(name, mle->mname, namelen) != 0)
80 		return 0;
81 
82 	return 1;
83 }
84 
85 static struct kmem_cache *dlm_lockres_cache;
86 static struct kmem_cache *dlm_lockname_cache;
87 static struct kmem_cache *dlm_mle_cache;
88 
89 static void dlm_mle_release(struct kref *kref);
90 static void dlm_init_mle(struct dlm_master_list_entry *mle,
91 			enum dlm_mle_type type,
92 			struct dlm_ctxt *dlm,
93 			struct dlm_lock_resource *res,
94 			const char *name,
95 			unsigned int namelen);
96 static void dlm_put_mle(struct dlm_master_list_entry *mle);
97 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
98 static int dlm_find_mle(struct dlm_ctxt *dlm,
99 			struct dlm_master_list_entry **mle,
100 			char *name, unsigned int namelen);
101 
102 static int dlm_do_master_request(struct dlm_lock_resource *res,
103 				 struct dlm_master_list_entry *mle, int to);
104 
105 
106 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
107 				     struct dlm_lock_resource *res,
108 				     struct dlm_master_list_entry *mle,
109 				     int *blocked);
110 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
111 				    struct dlm_lock_resource *res,
112 				    struct dlm_master_list_entry *mle,
113 				    int blocked);
114 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
115 				 struct dlm_lock_resource *res,
116 				 struct dlm_master_list_entry *mle,
117 				 struct dlm_master_list_entry **oldmle,
118 				 const char *name, unsigned int namelen,
119 				 u8 new_master, u8 master);
120 
121 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
122 				    struct dlm_lock_resource *res);
123 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
124 				      struct dlm_lock_resource *res);
125 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
126 				       struct dlm_lock_resource *res,
127 				       u8 target);
128 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
129 				       struct dlm_lock_resource *res);
130 
131 
132 int dlm_is_host_down(int errno)
133 {
134 	switch (errno) {
135 		case -EBADF:
136 		case -ECONNREFUSED:
137 		case -ENOTCONN:
138 		case -ECONNRESET:
139 		case -EPIPE:
140 		case -EHOSTDOWN:
141 		case -EHOSTUNREACH:
142 		case -ETIMEDOUT:
143 		case -ECONNABORTED:
144 		case -ENETDOWN:
145 		case -ENETUNREACH:
146 		case -ENETRESET:
147 		case -ESHUTDOWN:
148 		case -ENOPROTOOPT:
149 		case -EINVAL:   /* if returned from our tcp code,
150 				   this means there is no socket */
151 			return 1;
152 	}
153 	return 0;
154 }
155 
156 
157 /*
158  * MASTER LIST FUNCTIONS
159  */
160 
161 
162 /*
163  * regarding master list entries and heartbeat callbacks:
164  *
165  * in order to avoid sleeping and allocation that occurs in
166  * heartbeat, master list entries are simply attached to the
167  * dlm's established heartbeat callbacks.  the mle is attached
168  * when it is created, and since the dlm->spinlock is held at
169  * that time, any heartbeat event will be properly discovered
170  * by the mle.  the mle needs to be detached from the
171  * dlm->mle_hb_events list as soon as heartbeat events are no
172  * longer useful to the mle, and before the mle is freed.
173  *
174  * as a general rule, heartbeat events are no longer needed by
175  * the mle once an "answer" regarding the lock master has been
176  * received.
177  */
178 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
179 					      struct dlm_master_list_entry *mle)
180 {
181 	assert_spin_locked(&dlm->spinlock);
182 
183 	list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
184 }
185 
186 
187 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
188 					      struct dlm_master_list_entry *mle)
189 {
190 	if (!list_empty(&mle->hb_events))
191 		list_del_init(&mle->hb_events);
192 }
193 
194 
195 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
196 					    struct dlm_master_list_entry *mle)
197 {
198 	spin_lock(&dlm->spinlock);
199 	__dlm_mle_detach_hb_events(dlm, mle);
200 	spin_unlock(&dlm->spinlock);
201 }
202 
203 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
204 {
205 	struct dlm_ctxt *dlm;
206 	dlm = mle->dlm;
207 
208 	assert_spin_locked(&dlm->spinlock);
209 	assert_spin_locked(&dlm->master_lock);
210 	mle->inuse++;
211 	kref_get(&mle->mle_refs);
212 }
213 
214 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
215 {
216 	struct dlm_ctxt *dlm;
217 	dlm = mle->dlm;
218 
219 	spin_lock(&dlm->spinlock);
220 	spin_lock(&dlm->master_lock);
221 	mle->inuse--;
222 	__dlm_put_mle(mle);
223 	spin_unlock(&dlm->master_lock);
224 	spin_unlock(&dlm->spinlock);
225 
226 }
227 
228 /* remove from list and free */
229 static void __dlm_put_mle(struct dlm_master_list_entry *mle)
230 {
231 	struct dlm_ctxt *dlm;
232 	dlm = mle->dlm;
233 
234 	assert_spin_locked(&dlm->spinlock);
235 	assert_spin_locked(&dlm->master_lock);
236 	if (!atomic_read(&mle->mle_refs.refcount)) {
237 		/* this may or may not crash, but who cares.
238 		 * it's a BUG. */
239 		mlog(ML_ERROR, "bad mle: %p\n", mle);
240 		dlm_print_one_mle(mle);
241 		BUG();
242 	} else
243 		kref_put(&mle->mle_refs, dlm_mle_release);
244 }
245 
246 
247 /* must not have any spinlocks coming in */
248 static void dlm_put_mle(struct dlm_master_list_entry *mle)
249 {
250 	struct dlm_ctxt *dlm;
251 	dlm = mle->dlm;
252 
253 	spin_lock(&dlm->spinlock);
254 	spin_lock(&dlm->master_lock);
255 	__dlm_put_mle(mle);
256 	spin_unlock(&dlm->master_lock);
257 	spin_unlock(&dlm->spinlock);
258 }
259 
260 static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
261 {
262 	kref_get(&mle->mle_refs);
263 }
264 
265 static void dlm_init_mle(struct dlm_master_list_entry *mle,
266 			enum dlm_mle_type type,
267 			struct dlm_ctxt *dlm,
268 			struct dlm_lock_resource *res,
269 			const char *name,
270 			unsigned int namelen)
271 {
272 	assert_spin_locked(&dlm->spinlock);
273 
274 	mle->dlm = dlm;
275 	mle->type = type;
276 	INIT_HLIST_NODE(&mle->master_hash_node);
277 	INIT_LIST_HEAD(&mle->hb_events);
278 	memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
279 	spin_lock_init(&mle->spinlock);
280 	init_waitqueue_head(&mle->wq);
281 	atomic_set(&mle->woken, 0);
282 	kref_init(&mle->mle_refs);
283 	memset(mle->response_map, 0, sizeof(mle->response_map));
284 	mle->master = O2NM_MAX_NODES;
285 	mle->new_master = O2NM_MAX_NODES;
286 	mle->inuse = 0;
287 
288 	BUG_ON(mle->type != DLM_MLE_BLOCK &&
289 	       mle->type != DLM_MLE_MASTER &&
290 	       mle->type != DLM_MLE_MIGRATION);
291 
292 	if (mle->type == DLM_MLE_MASTER) {
293 		BUG_ON(!res);
294 		mle->mleres = res;
295 		memcpy(mle->mname, res->lockname.name, res->lockname.len);
296 		mle->mnamelen = res->lockname.len;
297 		mle->mnamehash = res->lockname.hash;
298 	} else {
299 		BUG_ON(!name);
300 		mle->mleres = NULL;
301 		memcpy(mle->mname, name, namelen);
302 		mle->mnamelen = namelen;
303 		mle->mnamehash = dlm_lockid_hash(name, namelen);
304 	}
305 
306 	atomic_inc(&dlm->mle_tot_count[mle->type]);
307 	atomic_inc(&dlm->mle_cur_count[mle->type]);
308 
309 	/* copy off the node_map and register hb callbacks on our copy */
310 	memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
311 	memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
312 	clear_bit(dlm->node_num, mle->vote_map);
313 	clear_bit(dlm->node_num, mle->node_map);
314 
315 	/* attach the mle to the domain node up/down events */
316 	__dlm_mle_attach_hb_events(dlm, mle);
317 }
318 
319 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
320 {
321 	assert_spin_locked(&dlm->spinlock);
322 	assert_spin_locked(&dlm->master_lock);
323 
324 	if (!hlist_unhashed(&mle->master_hash_node))
325 		hlist_del_init(&mle->master_hash_node);
326 }
327 
328 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
329 {
330 	struct hlist_head *bucket;
331 
332 	assert_spin_locked(&dlm->master_lock);
333 
334 	bucket = dlm_master_hash(dlm, mle->mnamehash);
335 	hlist_add_head(&mle->master_hash_node, bucket);
336 }
337 
338 /* returns 1 if found, 0 if not */
339 static int dlm_find_mle(struct dlm_ctxt *dlm,
340 			struct dlm_master_list_entry **mle,
341 			char *name, unsigned int namelen)
342 {
343 	struct dlm_master_list_entry *tmpmle;
344 	struct hlist_head *bucket;
345 	unsigned int hash;
346 
347 	assert_spin_locked(&dlm->master_lock);
348 
349 	hash = dlm_lockid_hash(name, namelen);
350 	bucket = dlm_master_hash(dlm, hash);
351 	hlist_for_each_entry(tmpmle, bucket, master_hash_node) {
352 		if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
353 			continue;
354 		dlm_get_mle(tmpmle);
355 		*mle = tmpmle;
356 		return 1;
357 	}
358 	return 0;
359 }
360 
361 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
362 {
363 	struct dlm_master_list_entry *mle;
364 
365 	assert_spin_locked(&dlm->spinlock);
366 
367 	list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
368 		if (node_up)
369 			dlm_mle_node_up(dlm, mle, NULL, idx);
370 		else
371 			dlm_mle_node_down(dlm, mle, NULL, idx);
372 	}
373 }
374 
375 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
376 			      struct dlm_master_list_entry *mle,
377 			      struct o2nm_node *node, int idx)
378 {
379 	spin_lock(&mle->spinlock);
380 
381 	if (!test_bit(idx, mle->node_map))
382 		mlog(0, "node %u already removed from nodemap!\n", idx);
383 	else
384 		clear_bit(idx, mle->node_map);
385 
386 	spin_unlock(&mle->spinlock);
387 }
388 
389 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
390 			    struct dlm_master_list_entry *mle,
391 			    struct o2nm_node *node, int idx)
392 {
393 	spin_lock(&mle->spinlock);
394 
395 	if (test_bit(idx, mle->node_map))
396 		mlog(0, "node %u already in node map!\n", idx);
397 	else
398 		set_bit(idx, mle->node_map);
399 
400 	spin_unlock(&mle->spinlock);
401 }
402 
403 
404 int dlm_init_mle_cache(void)
405 {
406 	dlm_mle_cache = kmem_cache_create("o2dlm_mle",
407 					  sizeof(struct dlm_master_list_entry),
408 					  0, SLAB_HWCACHE_ALIGN,
409 					  NULL);
410 	if (dlm_mle_cache == NULL)
411 		return -ENOMEM;
412 	return 0;
413 }
414 
415 void dlm_destroy_mle_cache(void)
416 {
417 	if (dlm_mle_cache)
418 		kmem_cache_destroy(dlm_mle_cache);
419 }
420 
421 static void dlm_mle_release(struct kref *kref)
422 {
423 	struct dlm_master_list_entry *mle;
424 	struct dlm_ctxt *dlm;
425 
426 	mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
427 	dlm = mle->dlm;
428 
429 	assert_spin_locked(&dlm->spinlock);
430 	assert_spin_locked(&dlm->master_lock);
431 
432 	mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname,
433 	     mle->type);
434 
435 	/* remove from list if not already */
436 	__dlm_unlink_mle(dlm, mle);
437 
438 	/* detach the mle from the domain node up/down events */
439 	__dlm_mle_detach_hb_events(dlm, mle);
440 
441 	atomic_dec(&dlm->mle_cur_count[mle->type]);
442 
443 	/* NOTE: kfree under spinlock here.
444 	 * if this is bad, we can move this to a freelist. */
445 	kmem_cache_free(dlm_mle_cache, mle);
446 }
447 
448 
449 /*
450  * LOCK RESOURCE FUNCTIONS
451  */
452 
453 int dlm_init_master_caches(void)
454 {
455 	dlm_lockres_cache = kmem_cache_create("o2dlm_lockres",
456 					      sizeof(struct dlm_lock_resource),
457 					      0, SLAB_HWCACHE_ALIGN, NULL);
458 	if (!dlm_lockres_cache)
459 		goto bail;
460 
461 	dlm_lockname_cache = kmem_cache_create("o2dlm_lockname",
462 					       DLM_LOCKID_NAME_MAX, 0,
463 					       SLAB_HWCACHE_ALIGN, NULL);
464 	if (!dlm_lockname_cache)
465 		goto bail;
466 
467 	return 0;
468 bail:
469 	dlm_destroy_master_caches();
470 	return -ENOMEM;
471 }
472 
473 void dlm_destroy_master_caches(void)
474 {
475 	if (dlm_lockname_cache) {
476 		kmem_cache_destroy(dlm_lockname_cache);
477 		dlm_lockname_cache = NULL;
478 	}
479 
480 	if (dlm_lockres_cache) {
481 		kmem_cache_destroy(dlm_lockres_cache);
482 		dlm_lockres_cache = NULL;
483 	}
484 }
485 
486 static void dlm_lockres_release(struct kref *kref)
487 {
488 	struct dlm_lock_resource *res;
489 	struct dlm_ctxt *dlm;
490 
491 	res = container_of(kref, struct dlm_lock_resource, refs);
492 	dlm = res->dlm;
493 
494 	/* This should not happen -- all lockres' have a name
495 	 * associated with them at init time. */
496 	BUG_ON(!res->lockname.name);
497 
498 	mlog(0, "destroying lockres %.*s\n", res->lockname.len,
499 	     res->lockname.name);
500 
501 	atomic_dec(&dlm->res_cur_count);
502 
503 	if (!hlist_unhashed(&res->hash_node) ||
504 	    !list_empty(&res->granted) ||
505 	    !list_empty(&res->converting) ||
506 	    !list_empty(&res->blocked) ||
507 	    !list_empty(&res->dirty) ||
508 	    !list_empty(&res->recovering) ||
509 	    !list_empty(&res->purge)) {
510 		mlog(ML_ERROR,
511 		     "Going to BUG for resource %.*s."
512 		     "  We're on a list! [%c%c%c%c%c%c%c]\n",
513 		     res->lockname.len, res->lockname.name,
514 		     !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
515 		     !list_empty(&res->granted) ? 'G' : ' ',
516 		     !list_empty(&res->converting) ? 'C' : ' ',
517 		     !list_empty(&res->blocked) ? 'B' : ' ',
518 		     !list_empty(&res->dirty) ? 'D' : ' ',
519 		     !list_empty(&res->recovering) ? 'R' : ' ',
520 		     !list_empty(&res->purge) ? 'P' : ' ');
521 
522 		dlm_print_one_lock_resource(res);
523 	}
524 
525 	/* By the time we're ready to blow this guy away, we shouldn't
526 	 * be on any lists. */
527 	BUG_ON(!hlist_unhashed(&res->hash_node));
528 	BUG_ON(!list_empty(&res->granted));
529 	BUG_ON(!list_empty(&res->converting));
530 	BUG_ON(!list_empty(&res->blocked));
531 	BUG_ON(!list_empty(&res->dirty));
532 	BUG_ON(!list_empty(&res->recovering));
533 	BUG_ON(!list_empty(&res->purge));
534 
535 	kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
536 
537 	kmem_cache_free(dlm_lockres_cache, res);
538 }
539 
540 void dlm_lockres_put(struct dlm_lock_resource *res)
541 {
542 	kref_put(&res->refs, dlm_lockres_release);
543 }
544 
545 static void dlm_init_lockres(struct dlm_ctxt *dlm,
546 			     struct dlm_lock_resource *res,
547 			     const char *name, unsigned int namelen)
548 {
549 	char *qname;
550 
551 	/* If we memset here, we lose our reference to the kmalloc'd
552 	 * res->lockname.name, so be sure to init every field
553 	 * correctly! */
554 
555 	qname = (char *) res->lockname.name;
556 	memcpy(qname, name, namelen);
557 
558 	res->lockname.len = namelen;
559 	res->lockname.hash = dlm_lockid_hash(name, namelen);
560 
561 	init_waitqueue_head(&res->wq);
562 	spin_lock_init(&res->spinlock);
563 	INIT_HLIST_NODE(&res->hash_node);
564 	INIT_LIST_HEAD(&res->granted);
565 	INIT_LIST_HEAD(&res->converting);
566 	INIT_LIST_HEAD(&res->blocked);
567 	INIT_LIST_HEAD(&res->dirty);
568 	INIT_LIST_HEAD(&res->recovering);
569 	INIT_LIST_HEAD(&res->purge);
570 	INIT_LIST_HEAD(&res->tracking);
571 	atomic_set(&res->asts_reserved, 0);
572 	res->migration_pending = 0;
573 	res->inflight_locks = 0;
574 	res->inflight_assert_workers = 0;
575 
576 	res->dlm = dlm;
577 
578 	kref_init(&res->refs);
579 
580 	atomic_inc(&dlm->res_tot_count);
581 	atomic_inc(&dlm->res_cur_count);
582 
583 	/* just for consistency */
584 	spin_lock(&res->spinlock);
585 	dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
586 	spin_unlock(&res->spinlock);
587 
588 	res->state = DLM_LOCK_RES_IN_PROGRESS;
589 
590 	res->last_used = 0;
591 
592 	spin_lock(&dlm->spinlock);
593 	list_add_tail(&res->tracking, &dlm->tracking_list);
594 	spin_unlock(&dlm->spinlock);
595 
596 	memset(res->lvb, 0, DLM_LVB_LEN);
597 	memset(res->refmap, 0, sizeof(res->refmap));
598 }
599 
600 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
601 				   const char *name,
602 				   unsigned int namelen)
603 {
604 	struct dlm_lock_resource *res = NULL;
605 
606 	res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
607 	if (!res)
608 		goto error;
609 
610 	res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
611 	if (!res->lockname.name)
612 		goto error;
613 
614 	dlm_init_lockres(dlm, res, name, namelen);
615 	return res;
616 
617 error:
618 	if (res)
619 		kmem_cache_free(dlm_lockres_cache, res);
620 	return NULL;
621 }
622 
623 void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
624 				struct dlm_lock_resource *res, int bit)
625 {
626 	assert_spin_locked(&res->spinlock);
627 
628 	mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len,
629 	     res->lockname.name, bit, __builtin_return_address(0));
630 
631 	set_bit(bit, res->refmap);
632 }
633 
634 void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
635 				  struct dlm_lock_resource *res, int bit)
636 {
637 	assert_spin_locked(&res->spinlock);
638 
639 	mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len,
640 	     res->lockname.name, bit, __builtin_return_address(0));
641 
642 	clear_bit(bit, res->refmap);
643 }
644 
645 static void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
646 				   struct dlm_lock_resource *res)
647 {
648 	res->inflight_locks++;
649 
650 	mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
651 	     res->lockname.len, res->lockname.name, res->inflight_locks,
652 	     __builtin_return_address(0));
653 }
654 
655 void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
656 				   struct dlm_lock_resource *res)
657 {
658 	assert_spin_locked(&res->spinlock);
659 	__dlm_lockres_grab_inflight_ref(dlm, res);
660 }
661 
662 void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
663 				   struct dlm_lock_resource *res)
664 {
665 	assert_spin_locked(&res->spinlock);
666 
667 	BUG_ON(res->inflight_locks == 0);
668 
669 	res->inflight_locks--;
670 
671 	mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name,
672 	     res->lockname.len, res->lockname.name, res->inflight_locks,
673 	     __builtin_return_address(0));
674 
675 	wake_up(&res->wq);
676 }
677 
678 void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
679 		struct dlm_lock_resource *res)
680 {
681 	assert_spin_locked(&res->spinlock);
682 	res->inflight_assert_workers++;
683 	mlog(0, "%s:%.*s: inflight assert worker++: now %u\n",
684 			dlm->name, res->lockname.len, res->lockname.name,
685 			res->inflight_assert_workers);
686 }
687 
688 static void __dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
689 		struct dlm_lock_resource *res)
690 {
691 	assert_spin_locked(&res->spinlock);
692 	BUG_ON(res->inflight_assert_workers == 0);
693 	res->inflight_assert_workers--;
694 	mlog(0, "%s:%.*s: inflight assert worker--: now %u\n",
695 			dlm->name, res->lockname.len, res->lockname.name,
696 			res->inflight_assert_workers);
697 }
698 
699 static void dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
700 		struct dlm_lock_resource *res)
701 {
702 	spin_lock(&res->spinlock);
703 	__dlm_lockres_drop_inflight_worker(dlm, res);
704 	spin_unlock(&res->spinlock);
705 }
706 
707 /*
708  * lookup a lock resource by name.
709  * may already exist in the hashtable.
710  * lockid is null terminated
711  *
712  * if not, allocate enough for the lockres and for
713  * the temporary structure used in doing the mastering.
714  *
715  * also, do a lookup in the dlm->master_list to see
716  * if another node has begun mastering the same lock.
717  * if so, there should be a block entry in there
718  * for this name, and we should *not* attempt to master
719  * the lock here.   need to wait around for that node
720  * to assert_master (or die).
721  *
722  */
723 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
724 					  const char *lockid,
725 					  int namelen,
726 					  int flags)
727 {
728 	struct dlm_lock_resource *tmpres=NULL, *res=NULL;
729 	struct dlm_master_list_entry *mle = NULL;
730 	struct dlm_master_list_entry *alloc_mle = NULL;
731 	int blocked = 0;
732 	int ret, nodenum;
733 	struct dlm_node_iter iter;
734 	unsigned int hash;
735 	int tries = 0;
736 	int bit, wait_on_recovery = 0;
737 
738 	BUG_ON(!lockid);
739 
740 	hash = dlm_lockid_hash(lockid, namelen);
741 
742 	mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
743 
744 lookup:
745 	spin_lock(&dlm->spinlock);
746 	tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
747 	if (tmpres) {
748 		spin_unlock(&dlm->spinlock);
749 		spin_lock(&tmpres->spinlock);
750 
751 		/*
752 		 * Right after dlm spinlock was released, dlm_thread could have
753 		 * purged the lockres. Check if lockres got unhashed. If so
754 		 * start over.
755 		 */
756 		if (hlist_unhashed(&tmpres->hash_node)) {
757 			spin_unlock(&tmpres->spinlock);
758 			dlm_lockres_put(tmpres);
759 			tmpres = NULL;
760 			goto lookup;
761 		}
762 
763 		/* Wait on the thread that is mastering the resource */
764 		if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
765 			__dlm_wait_on_lockres(tmpres);
766 			BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
767 			spin_unlock(&tmpres->spinlock);
768 			dlm_lockres_put(tmpres);
769 			tmpres = NULL;
770 			goto lookup;
771 		}
772 
773 		/* Wait on the resource purge to complete before continuing */
774 		if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) {
775 			BUG_ON(tmpres->owner == dlm->node_num);
776 			__dlm_wait_on_lockres_flags(tmpres,
777 						    DLM_LOCK_RES_DROPPING_REF);
778 			spin_unlock(&tmpres->spinlock);
779 			dlm_lockres_put(tmpres);
780 			tmpres = NULL;
781 			goto lookup;
782 		}
783 
784 		/* Grab inflight ref to pin the resource */
785 		dlm_lockres_grab_inflight_ref(dlm, tmpres);
786 
787 		spin_unlock(&tmpres->spinlock);
788 		if (res) {
789 			spin_lock(&dlm->track_lock);
790 			if (!list_empty(&res->tracking))
791 				list_del_init(&res->tracking);
792 			else
793 				mlog(ML_ERROR, "Resource %.*s not "
794 						"on the Tracking list\n",
795 						res->lockname.len,
796 						res->lockname.name);
797 			spin_unlock(&dlm->track_lock);
798 			dlm_lockres_put(res);
799 		}
800 		res = tmpres;
801 		goto leave;
802 	}
803 
804 	if (!res) {
805 		spin_unlock(&dlm->spinlock);
806 		mlog(0, "allocating a new resource\n");
807 		/* nothing found and we need to allocate one. */
808 		alloc_mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
809 		if (!alloc_mle)
810 			goto leave;
811 		res = dlm_new_lockres(dlm, lockid, namelen);
812 		if (!res)
813 			goto leave;
814 		goto lookup;
815 	}
816 
817 	mlog(0, "no lockres found, allocated our own: %p\n", res);
818 
819 	if (flags & LKM_LOCAL) {
820 		/* caller knows it's safe to assume it's not mastered elsewhere
821 		 * DONE!  return right away */
822 		spin_lock(&res->spinlock);
823 		dlm_change_lockres_owner(dlm, res, dlm->node_num);
824 		__dlm_insert_lockres(dlm, res);
825 		dlm_lockres_grab_inflight_ref(dlm, res);
826 		spin_unlock(&res->spinlock);
827 		spin_unlock(&dlm->spinlock);
828 		/* lockres still marked IN_PROGRESS */
829 		goto wake_waiters;
830 	}
831 
832 	/* check master list to see if another node has started mastering it */
833 	spin_lock(&dlm->master_lock);
834 
835 	/* if we found a block, wait for lock to be mastered by another node */
836 	blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
837 	if (blocked) {
838 		int mig;
839 		if (mle->type == DLM_MLE_MASTER) {
840 			mlog(ML_ERROR, "master entry for nonexistent lock!\n");
841 			BUG();
842 		}
843 		mig = (mle->type == DLM_MLE_MIGRATION);
844 		/* if there is a migration in progress, let the migration
845 		 * finish before continuing.  we can wait for the absence
846 		 * of the MIGRATION mle: either the migrate finished or
847 		 * one of the nodes died and the mle was cleaned up.
848 		 * if there is a BLOCK here, but it already has a master
849 		 * set, we are too late.  the master does not have a ref
850 		 * for us in the refmap.  detach the mle and drop it.
851 		 * either way, go back to the top and start over. */
852 		if (mig || mle->master != O2NM_MAX_NODES) {
853 			BUG_ON(mig && mle->master == dlm->node_num);
854 			/* we arrived too late.  the master does not
855 			 * have a ref for us. retry. */
856 			mlog(0, "%s:%.*s: late on %s\n",
857 			     dlm->name, namelen, lockid,
858 			     mig ?  "MIGRATION" : "BLOCK");
859 			spin_unlock(&dlm->master_lock);
860 			spin_unlock(&dlm->spinlock);
861 
862 			/* master is known, detach */
863 			if (!mig)
864 				dlm_mle_detach_hb_events(dlm, mle);
865 			dlm_put_mle(mle);
866 			mle = NULL;
867 			/* this is lame, but we can't wait on either
868 			 * the mle or lockres waitqueue here */
869 			if (mig)
870 				msleep(100);
871 			goto lookup;
872 		}
873 	} else {
874 		/* go ahead and try to master lock on this node */
875 		mle = alloc_mle;
876 		/* make sure this does not get freed below */
877 		alloc_mle = NULL;
878 		dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
879 		set_bit(dlm->node_num, mle->maybe_map);
880 		__dlm_insert_mle(dlm, mle);
881 
882 		/* still holding the dlm spinlock, check the recovery map
883 		 * to see if there are any nodes that still need to be
884 		 * considered.  these will not appear in the mle nodemap
885 		 * but they might own this lockres.  wait on them. */
886 		bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
887 		if (bit < O2NM_MAX_NODES) {
888 			mlog(0, "%s: res %.*s, At least one node (%d) "
889 			     "to recover before lock mastery can begin\n",
890 			     dlm->name, namelen, (char *)lockid, bit);
891 			wait_on_recovery = 1;
892 		}
893 	}
894 
895 	/* at this point there is either a DLM_MLE_BLOCK or a
896 	 * DLM_MLE_MASTER on the master list, so it's safe to add the
897 	 * lockres to the hashtable.  anyone who finds the lock will
898 	 * still have to wait on the IN_PROGRESS. */
899 
900 	/* finally add the lockres to its hash bucket */
901 	__dlm_insert_lockres(dlm, res);
902 
903 	/* since this lockres is new it doesn't not require the spinlock */
904 	__dlm_lockres_grab_inflight_ref(dlm, res);
905 
906 	/* get an extra ref on the mle in case this is a BLOCK
907 	 * if so, the creator of the BLOCK may try to put the last
908 	 * ref at this time in the assert master handler, so we
909 	 * need an extra one to keep from a bad ptr deref. */
910 	dlm_get_mle_inuse(mle);
911 	spin_unlock(&dlm->master_lock);
912 	spin_unlock(&dlm->spinlock);
913 
914 redo_request:
915 	while (wait_on_recovery) {
916 		/* any cluster changes that occurred after dropping the
917 		 * dlm spinlock would be detectable be a change on the mle,
918 		 * so we only need to clear out the recovery map once. */
919 		if (dlm_is_recovery_lock(lockid, namelen)) {
920 			mlog(0, "%s: Recovery map is not empty, but must "
921 			     "master $RECOVERY lock now\n", dlm->name);
922 			if (!dlm_pre_master_reco_lockres(dlm, res))
923 				wait_on_recovery = 0;
924 			else {
925 				mlog(0, "%s: waiting 500ms for heartbeat state "
926 				    "change\n", dlm->name);
927 				msleep(500);
928 			}
929 			continue;
930 		}
931 
932 		dlm_kick_recovery_thread(dlm);
933 		msleep(1000);
934 		dlm_wait_for_recovery(dlm);
935 
936 		spin_lock(&dlm->spinlock);
937 		bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
938 		if (bit < O2NM_MAX_NODES) {
939 			mlog(0, "%s: res %.*s, At least one node (%d) "
940 			     "to recover before lock mastery can begin\n",
941 			     dlm->name, namelen, (char *)lockid, bit);
942 			wait_on_recovery = 1;
943 		} else
944 			wait_on_recovery = 0;
945 		spin_unlock(&dlm->spinlock);
946 
947 		if (wait_on_recovery)
948 			dlm_wait_for_node_recovery(dlm, bit, 10000);
949 	}
950 
951 	/* must wait for lock to be mastered elsewhere */
952 	if (blocked)
953 		goto wait;
954 
955 	ret = -EINVAL;
956 	dlm_node_iter_init(mle->vote_map, &iter);
957 	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
958 		ret = dlm_do_master_request(res, mle, nodenum);
959 		if (ret < 0)
960 			mlog_errno(ret);
961 		if (mle->master != O2NM_MAX_NODES) {
962 			/* found a master ! */
963 			if (mle->master <= nodenum)
964 				break;
965 			/* if our master request has not reached the master
966 			 * yet, keep going until it does.  this is how the
967 			 * master will know that asserts are needed back to
968 			 * the lower nodes. */
969 			mlog(0, "%s: res %.*s, Requests only up to %u but "
970 			     "master is %u, keep going\n", dlm->name, namelen,
971 			     lockid, nodenum, mle->master);
972 		}
973 	}
974 
975 wait:
976 	/* keep going until the response map includes all nodes */
977 	ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
978 	if (ret < 0) {
979 		wait_on_recovery = 1;
980 		mlog(0, "%s: res %.*s, Node map changed, redo the master "
981 		     "request now, blocked=%d\n", dlm->name, res->lockname.len,
982 		     res->lockname.name, blocked);
983 		if (++tries > 20) {
984 			mlog(ML_ERROR, "%s: res %.*s, Spinning on "
985 			     "dlm_wait_for_lock_mastery, blocked = %d\n",
986 			     dlm->name, res->lockname.len,
987 			     res->lockname.name, blocked);
988 			dlm_print_one_lock_resource(res);
989 			dlm_print_one_mle(mle);
990 			tries = 0;
991 		}
992 		goto redo_request;
993 	}
994 
995 	mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len,
996 	     res->lockname.name, res->owner);
997 	/* make sure we never continue without this */
998 	BUG_ON(res->owner == O2NM_MAX_NODES);
999 
1000 	/* master is known, detach if not already detached */
1001 	dlm_mle_detach_hb_events(dlm, mle);
1002 	dlm_put_mle(mle);
1003 	/* put the extra ref */
1004 	dlm_put_mle_inuse(mle);
1005 
1006 wake_waiters:
1007 	spin_lock(&res->spinlock);
1008 	res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1009 	spin_unlock(&res->spinlock);
1010 	wake_up(&res->wq);
1011 
1012 leave:
1013 	/* need to free the unused mle */
1014 	if (alloc_mle)
1015 		kmem_cache_free(dlm_mle_cache, alloc_mle);
1016 
1017 	return res;
1018 }
1019 
1020 
1021 #define DLM_MASTERY_TIMEOUT_MS   5000
1022 
1023 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
1024 				     struct dlm_lock_resource *res,
1025 				     struct dlm_master_list_entry *mle,
1026 				     int *blocked)
1027 {
1028 	u8 m;
1029 	int ret, bit;
1030 	int map_changed, voting_done;
1031 	int assert, sleep;
1032 
1033 recheck:
1034 	ret = 0;
1035 	assert = 0;
1036 
1037 	/* check if another node has already become the owner */
1038 	spin_lock(&res->spinlock);
1039 	if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1040 		mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
1041 		     res->lockname.len, res->lockname.name, res->owner);
1042 		spin_unlock(&res->spinlock);
1043 		/* this will cause the master to re-assert across
1044 		 * the whole cluster, freeing up mles */
1045 		if (res->owner != dlm->node_num) {
1046 			ret = dlm_do_master_request(res, mle, res->owner);
1047 			if (ret < 0) {
1048 				/* give recovery a chance to run */
1049 				mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
1050 				msleep(500);
1051 				goto recheck;
1052 			}
1053 		}
1054 		ret = 0;
1055 		goto leave;
1056 	}
1057 	spin_unlock(&res->spinlock);
1058 
1059 	spin_lock(&mle->spinlock);
1060 	m = mle->master;
1061 	map_changed = (memcmp(mle->vote_map, mle->node_map,
1062 			      sizeof(mle->vote_map)) != 0);
1063 	voting_done = (memcmp(mle->vote_map, mle->response_map,
1064 			     sizeof(mle->vote_map)) == 0);
1065 
1066 	/* restart if we hit any errors */
1067 	if (map_changed) {
1068 		int b;
1069 		mlog(0, "%s: %.*s: node map changed, restarting\n",
1070 		     dlm->name, res->lockname.len, res->lockname.name);
1071 		ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1072 		b = (mle->type == DLM_MLE_BLOCK);
1073 		if ((*blocked && !b) || (!*blocked && b)) {
1074 			mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1075 			     dlm->name, res->lockname.len, res->lockname.name,
1076 			     *blocked, b);
1077 			*blocked = b;
1078 		}
1079 		spin_unlock(&mle->spinlock);
1080 		if (ret < 0) {
1081 			mlog_errno(ret);
1082 			goto leave;
1083 		}
1084 		mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1085 		     "rechecking now\n", dlm->name, res->lockname.len,
1086 		     res->lockname.name);
1087 		goto recheck;
1088 	} else {
1089 		if (!voting_done) {
1090 			mlog(0, "map not changed and voting not done "
1091 			     "for %s:%.*s\n", dlm->name, res->lockname.len,
1092 			     res->lockname.name);
1093 		}
1094 	}
1095 
1096 	if (m != O2NM_MAX_NODES) {
1097 		/* another node has done an assert!
1098 		 * all done! */
1099 		sleep = 0;
1100 	} else {
1101 		sleep = 1;
1102 		/* have all nodes responded? */
1103 		if (voting_done && !*blocked) {
1104 			bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1105 			if (dlm->node_num <= bit) {
1106 				/* my node number is lowest.
1107 			 	 * now tell other nodes that I am
1108 				 * mastering this. */
1109 				mle->master = dlm->node_num;
1110 				/* ref was grabbed in get_lock_resource
1111 				 * will be dropped in dlmlock_master */
1112 				assert = 1;
1113 				sleep = 0;
1114 			}
1115 			/* if voting is done, but we have not received
1116 			 * an assert master yet, we must sleep */
1117 		}
1118 	}
1119 
1120 	spin_unlock(&mle->spinlock);
1121 
1122 	/* sleep if we haven't finished voting yet */
1123 	if (sleep) {
1124 		unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
1125 
1126 		/*
1127 		if (atomic_read(&mle->mle_refs.refcount) < 2)
1128 			mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1129 			atomic_read(&mle->mle_refs.refcount),
1130 			res->lockname.len, res->lockname.name);
1131 		*/
1132 		atomic_set(&mle->woken, 0);
1133 		(void)wait_event_timeout(mle->wq,
1134 					 (atomic_read(&mle->woken) == 1),
1135 					 timeo);
1136 		if (res->owner == O2NM_MAX_NODES) {
1137 			mlog(0, "%s:%.*s: waiting again\n", dlm->name,
1138 			     res->lockname.len, res->lockname.name);
1139 			goto recheck;
1140 		}
1141 		mlog(0, "done waiting, master is %u\n", res->owner);
1142 		ret = 0;
1143 		goto leave;
1144 	}
1145 
1146 	ret = 0;   /* done */
1147 	if (assert) {
1148 		m = dlm->node_num;
1149 		mlog(0, "about to master %.*s here, this=%u\n",
1150 		     res->lockname.len, res->lockname.name, m);
1151 		ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
1152 		if (ret) {
1153 			/* This is a failure in the network path,
1154 			 * not in the response to the assert_master
1155 			 * (any nonzero response is a BUG on this node).
1156 			 * Most likely a socket just got disconnected
1157 			 * due to node death. */
1158 			mlog_errno(ret);
1159 		}
1160 		/* no longer need to restart lock mastery.
1161 		 * all living nodes have been contacted. */
1162 		ret = 0;
1163 	}
1164 
1165 	/* set the lockres owner */
1166 	spin_lock(&res->spinlock);
1167 	/* mastery reference obtained either during
1168 	 * assert_master_handler or in get_lock_resource */
1169 	dlm_change_lockres_owner(dlm, res, m);
1170 	spin_unlock(&res->spinlock);
1171 
1172 leave:
1173 	return ret;
1174 }
1175 
1176 struct dlm_bitmap_diff_iter
1177 {
1178 	int curnode;
1179 	unsigned long *orig_bm;
1180 	unsigned long *cur_bm;
1181 	unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
1182 };
1183 
1184 enum dlm_node_state_change
1185 {
1186 	NODE_DOWN = -1,
1187 	NODE_NO_CHANGE = 0,
1188 	NODE_UP
1189 };
1190 
1191 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
1192 				      unsigned long *orig_bm,
1193 				      unsigned long *cur_bm)
1194 {
1195 	unsigned long p1, p2;
1196 	int i;
1197 
1198 	iter->curnode = -1;
1199 	iter->orig_bm = orig_bm;
1200 	iter->cur_bm = cur_bm;
1201 
1202 	for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1203        		p1 = *(iter->orig_bm + i);
1204 	       	p2 = *(iter->cur_bm + i);
1205 		iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1206 	}
1207 }
1208 
1209 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1210 				     enum dlm_node_state_change *state)
1211 {
1212 	int bit;
1213 
1214 	if (iter->curnode >= O2NM_MAX_NODES)
1215 		return -ENOENT;
1216 
1217 	bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1218 			    iter->curnode+1);
1219 	if (bit >= O2NM_MAX_NODES) {
1220 		iter->curnode = O2NM_MAX_NODES;
1221 		return -ENOENT;
1222 	}
1223 
1224 	/* if it was there in the original then this node died */
1225 	if (test_bit(bit, iter->orig_bm))
1226 		*state = NODE_DOWN;
1227 	else
1228 		*state = NODE_UP;
1229 
1230 	iter->curnode = bit;
1231 	return bit;
1232 }
1233 
1234 
1235 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1236 				    struct dlm_lock_resource *res,
1237 				    struct dlm_master_list_entry *mle,
1238 				    int blocked)
1239 {
1240 	struct dlm_bitmap_diff_iter bdi;
1241 	enum dlm_node_state_change sc;
1242 	int node;
1243 	int ret = 0;
1244 
1245 	mlog(0, "something happened such that the "
1246 	     "master process may need to be restarted!\n");
1247 
1248 	assert_spin_locked(&mle->spinlock);
1249 
1250 	dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1251 	node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1252 	while (node >= 0) {
1253 		if (sc == NODE_UP) {
1254 			/* a node came up.  clear any old vote from
1255 			 * the response map and set it in the vote map
1256 			 * then restart the mastery. */
1257 			mlog(ML_NOTICE, "node %d up while restarting\n", node);
1258 
1259 			/* redo the master request, but only for the new node */
1260 			mlog(0, "sending request to new node\n");
1261 			clear_bit(node, mle->response_map);
1262 			set_bit(node, mle->vote_map);
1263 		} else {
1264 			mlog(ML_ERROR, "node down! %d\n", node);
1265 			if (blocked) {
1266 				int lowest = find_next_bit(mle->maybe_map,
1267 						       O2NM_MAX_NODES, 0);
1268 
1269 				/* act like it was never there */
1270 				clear_bit(node, mle->maybe_map);
1271 
1272 			       	if (node == lowest) {
1273 					mlog(0, "expected master %u died"
1274 					    " while this node was blocked "
1275 					    "waiting on it!\n", node);
1276 					lowest = find_next_bit(mle->maybe_map,
1277 						       	O2NM_MAX_NODES,
1278 						       	lowest+1);
1279 					if (lowest < O2NM_MAX_NODES) {
1280 						mlog(0, "%s:%.*s:still "
1281 						     "blocked. waiting on %u "
1282 						     "now\n", dlm->name,
1283 						     res->lockname.len,
1284 						     res->lockname.name,
1285 						     lowest);
1286 					} else {
1287 						/* mle is an MLE_BLOCK, but
1288 						 * there is now nothing left to
1289 						 * block on.  we need to return
1290 						 * all the way back out and try
1291 						 * again with an MLE_MASTER.
1292 						 * dlm_do_local_recovery_cleanup
1293 						 * has already run, so the mle
1294 						 * refcount is ok */
1295 						mlog(0, "%s:%.*s: no "
1296 						     "longer blocking. try to "
1297 						     "master this here\n",
1298 						     dlm->name,
1299 						     res->lockname.len,
1300 						     res->lockname.name);
1301 						mle->type = DLM_MLE_MASTER;
1302 						mle->mleres = res;
1303 					}
1304 				}
1305 			}
1306 
1307 			/* now blank out everything, as if we had never
1308 			 * contacted anyone */
1309 			memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
1310 			memset(mle->response_map, 0, sizeof(mle->response_map));
1311 			/* reset the vote_map to the current node_map */
1312 			memcpy(mle->vote_map, mle->node_map,
1313 			       sizeof(mle->node_map));
1314 			/* put myself into the maybe map */
1315 			if (mle->type != DLM_MLE_BLOCK)
1316 				set_bit(dlm->node_num, mle->maybe_map);
1317 		}
1318 		ret = -EAGAIN;
1319 		node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1320 	}
1321 	return ret;
1322 }
1323 
1324 
1325 /*
1326  * DLM_MASTER_REQUEST_MSG
1327  *
1328  * returns: 0 on success,
1329  *          -errno on a network error
1330  *
1331  * on error, the caller should assume the target node is "dead"
1332  *
1333  */
1334 
1335 static int dlm_do_master_request(struct dlm_lock_resource *res,
1336 				 struct dlm_master_list_entry *mle, int to)
1337 {
1338 	struct dlm_ctxt *dlm = mle->dlm;
1339 	struct dlm_master_request request;
1340 	int ret, response=0, resend;
1341 
1342 	memset(&request, 0, sizeof(request));
1343 	request.node_idx = dlm->node_num;
1344 
1345 	BUG_ON(mle->type == DLM_MLE_MIGRATION);
1346 
1347 	request.namelen = (u8)mle->mnamelen;
1348 	memcpy(request.name, mle->mname, request.namelen);
1349 
1350 again:
1351 	ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1352 				 sizeof(request), to, &response);
1353 	if (ret < 0)  {
1354 		if (ret == -ESRCH) {
1355 			/* should never happen */
1356 			mlog(ML_ERROR, "TCP stack not ready!\n");
1357 			BUG();
1358 		} else if (ret == -EINVAL) {
1359 			mlog(ML_ERROR, "bad args passed to o2net!\n");
1360 			BUG();
1361 		} else if (ret == -ENOMEM) {
1362 			mlog(ML_ERROR, "out of memory while trying to send "
1363 			     "network message!  retrying\n");
1364 			/* this is totally crude */
1365 			msleep(50);
1366 			goto again;
1367 		} else if (!dlm_is_host_down(ret)) {
1368 			/* not a network error. bad. */
1369 			mlog_errno(ret);
1370 			mlog(ML_ERROR, "unhandled error!");
1371 			BUG();
1372 		}
1373 		/* all other errors should be network errors,
1374 		 * and likely indicate node death */
1375 		mlog(ML_ERROR, "link to %d went down!\n", to);
1376 		goto out;
1377 	}
1378 
1379 	ret = 0;
1380 	resend = 0;
1381 	spin_lock(&mle->spinlock);
1382 	switch (response) {
1383 		case DLM_MASTER_RESP_YES:
1384 			set_bit(to, mle->response_map);
1385 			mlog(0, "node %u is the master, response=YES\n", to);
1386 			mlog(0, "%s:%.*s: master node %u now knows I have a "
1387 			     "reference\n", dlm->name, res->lockname.len,
1388 			     res->lockname.name, to);
1389 			mle->master = to;
1390 			break;
1391 		case DLM_MASTER_RESP_NO:
1392 			mlog(0, "node %u not master, response=NO\n", to);
1393 			set_bit(to, mle->response_map);
1394 			break;
1395 		case DLM_MASTER_RESP_MAYBE:
1396 			mlog(0, "node %u not master, response=MAYBE\n", to);
1397 			set_bit(to, mle->response_map);
1398 			set_bit(to, mle->maybe_map);
1399 			break;
1400 		case DLM_MASTER_RESP_ERROR:
1401 			mlog(0, "node %u hit an error, resending\n", to);
1402 			resend = 1;
1403 			response = 0;
1404 			break;
1405 		default:
1406 			mlog(ML_ERROR, "bad response! %u\n", response);
1407 			BUG();
1408 	}
1409 	spin_unlock(&mle->spinlock);
1410 	if (resend) {
1411 		/* this is also totally crude */
1412 		msleep(50);
1413 		goto again;
1414 	}
1415 
1416 out:
1417 	return ret;
1418 }
1419 
1420 /*
1421  * locks that can be taken here:
1422  * dlm->spinlock
1423  * res->spinlock
1424  * mle->spinlock
1425  * dlm->master_list
1426  *
1427  * if possible, TRIM THIS DOWN!!!
1428  */
1429 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
1430 			       void **ret_data)
1431 {
1432 	u8 response = DLM_MASTER_RESP_MAYBE;
1433 	struct dlm_ctxt *dlm = data;
1434 	struct dlm_lock_resource *res = NULL;
1435 	struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1436 	struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1437 	char *name;
1438 	unsigned int namelen, hash;
1439 	int found, ret;
1440 	int set_maybe;
1441 	int dispatch_assert = 0;
1442 	int dispatched = 0;
1443 
1444 	if (!dlm_grab(dlm))
1445 		return DLM_MASTER_RESP_NO;
1446 
1447 	if (!dlm_domain_fully_joined(dlm)) {
1448 		response = DLM_MASTER_RESP_NO;
1449 		goto send_response;
1450 	}
1451 
1452 	name = request->name;
1453 	namelen = request->namelen;
1454 	hash = dlm_lockid_hash(name, namelen);
1455 
1456 	if (namelen > DLM_LOCKID_NAME_MAX) {
1457 		response = DLM_IVBUFLEN;
1458 		goto send_response;
1459 	}
1460 
1461 way_up_top:
1462 	spin_lock(&dlm->spinlock);
1463 	res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1464 	if (res) {
1465 		spin_unlock(&dlm->spinlock);
1466 
1467 		/* take care of the easy cases up front */
1468 		spin_lock(&res->spinlock);
1469 
1470 		/*
1471 		 * Right after dlm spinlock was released, dlm_thread could have
1472 		 * purged the lockres. Check if lockres got unhashed. If so
1473 		 * start over.
1474 		 */
1475 		if (hlist_unhashed(&res->hash_node)) {
1476 			spin_unlock(&res->spinlock);
1477 			dlm_lockres_put(res);
1478 			goto way_up_top;
1479 		}
1480 
1481 		if (res->state & (DLM_LOCK_RES_RECOVERING|
1482 				  DLM_LOCK_RES_MIGRATING)) {
1483 			spin_unlock(&res->spinlock);
1484 			mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1485 			     "being recovered/migrated\n");
1486 			response = DLM_MASTER_RESP_ERROR;
1487 			if (mle)
1488 				kmem_cache_free(dlm_mle_cache, mle);
1489 			goto send_response;
1490 		}
1491 
1492 		if (res->owner == dlm->node_num) {
1493 			dlm_lockres_set_refmap_bit(dlm, res, request->node_idx);
1494 			spin_unlock(&res->spinlock);
1495 			response = DLM_MASTER_RESP_YES;
1496 			if (mle)
1497 				kmem_cache_free(dlm_mle_cache, mle);
1498 
1499 			/* this node is the owner.
1500 			 * there is some extra work that needs to
1501 			 * happen now.  the requesting node has
1502 			 * caused all nodes up to this one to
1503 			 * create mles.  this node now needs to
1504 			 * go back and clean those up. */
1505 			dispatch_assert = 1;
1506 			goto send_response;
1507 		} else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1508 			spin_unlock(&res->spinlock);
1509 			// mlog(0, "node %u is the master\n", res->owner);
1510 			response = DLM_MASTER_RESP_NO;
1511 			if (mle)
1512 				kmem_cache_free(dlm_mle_cache, mle);
1513 			goto send_response;
1514 		}
1515 
1516 		/* ok, there is no owner.  either this node is
1517 		 * being blocked, or it is actively trying to
1518 		 * master this lock. */
1519 		if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1520 			mlog(ML_ERROR, "lock with no owner should be "
1521 			     "in-progress!\n");
1522 			BUG();
1523 		}
1524 
1525 		// mlog(0, "lockres is in progress...\n");
1526 		spin_lock(&dlm->master_lock);
1527 		found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1528 		if (!found) {
1529 			mlog(ML_ERROR, "no mle found for this lock!\n");
1530 			BUG();
1531 		}
1532 		set_maybe = 1;
1533 		spin_lock(&tmpmle->spinlock);
1534 		if (tmpmle->type == DLM_MLE_BLOCK) {
1535 			// mlog(0, "this node is waiting for "
1536 			// "lockres to be mastered\n");
1537 			response = DLM_MASTER_RESP_NO;
1538 		} else if (tmpmle->type == DLM_MLE_MIGRATION) {
1539 			mlog(0, "node %u is master, but trying to migrate to "
1540 			     "node %u.\n", tmpmle->master, tmpmle->new_master);
1541 			if (tmpmle->master == dlm->node_num) {
1542 				mlog(ML_ERROR, "no owner on lockres, but this "
1543 				     "node is trying to migrate it to %u?!\n",
1544 				     tmpmle->new_master);
1545 				BUG();
1546 			} else {
1547 				/* the real master can respond on its own */
1548 				response = DLM_MASTER_RESP_NO;
1549 			}
1550 		} else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1551 			set_maybe = 0;
1552 			if (tmpmle->master == dlm->node_num) {
1553 				response = DLM_MASTER_RESP_YES;
1554 				/* this node will be the owner.
1555 				 * go back and clean the mles on any
1556 				 * other nodes */
1557 				dispatch_assert = 1;
1558 				dlm_lockres_set_refmap_bit(dlm, res,
1559 							   request->node_idx);
1560 			} else
1561 				response = DLM_MASTER_RESP_NO;
1562 		} else {
1563 			// mlog(0, "this node is attempting to "
1564 			// "master lockres\n");
1565 			response = DLM_MASTER_RESP_MAYBE;
1566 		}
1567 		if (set_maybe)
1568 			set_bit(request->node_idx, tmpmle->maybe_map);
1569 		spin_unlock(&tmpmle->spinlock);
1570 
1571 		spin_unlock(&dlm->master_lock);
1572 		spin_unlock(&res->spinlock);
1573 
1574 		/* keep the mle attached to heartbeat events */
1575 		dlm_put_mle(tmpmle);
1576 		if (mle)
1577 			kmem_cache_free(dlm_mle_cache, mle);
1578 		goto send_response;
1579 	}
1580 
1581 	/*
1582 	 * lockres doesn't exist on this node
1583 	 * if there is an MLE_BLOCK, return NO
1584 	 * if there is an MLE_MASTER, return MAYBE
1585 	 * otherwise, add an MLE_BLOCK, return NO
1586 	 */
1587 	spin_lock(&dlm->master_lock);
1588 	found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1589 	if (!found) {
1590 		/* this lockid has never been seen on this node yet */
1591 		// mlog(0, "no mle found\n");
1592 		if (!mle) {
1593 			spin_unlock(&dlm->master_lock);
1594 			spin_unlock(&dlm->spinlock);
1595 
1596 			mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
1597 			if (!mle) {
1598 				response = DLM_MASTER_RESP_ERROR;
1599 				mlog_errno(-ENOMEM);
1600 				goto send_response;
1601 			}
1602 			goto way_up_top;
1603 		}
1604 
1605 		// mlog(0, "this is second time thru, already allocated, "
1606 		// "add the block.\n");
1607 		dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
1608 		set_bit(request->node_idx, mle->maybe_map);
1609 		__dlm_insert_mle(dlm, mle);
1610 		response = DLM_MASTER_RESP_NO;
1611 	} else {
1612 		// mlog(0, "mle was found\n");
1613 		set_maybe = 1;
1614 		spin_lock(&tmpmle->spinlock);
1615 		if (tmpmle->master == dlm->node_num) {
1616 			mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1617 			BUG();
1618 		}
1619 		if (tmpmle->type == DLM_MLE_BLOCK)
1620 			response = DLM_MASTER_RESP_NO;
1621 		else if (tmpmle->type == DLM_MLE_MIGRATION) {
1622 			mlog(0, "migration mle was found (%u->%u)\n",
1623 			     tmpmle->master, tmpmle->new_master);
1624 			/* real master can respond on its own */
1625 			response = DLM_MASTER_RESP_NO;
1626 		} else
1627 			response = DLM_MASTER_RESP_MAYBE;
1628 		if (set_maybe)
1629 			set_bit(request->node_idx, tmpmle->maybe_map);
1630 		spin_unlock(&tmpmle->spinlock);
1631 	}
1632 	spin_unlock(&dlm->master_lock);
1633 	spin_unlock(&dlm->spinlock);
1634 
1635 	if (found) {
1636 		/* keep the mle attached to heartbeat events */
1637 		dlm_put_mle(tmpmle);
1638 	}
1639 send_response:
1640 	/*
1641 	 * __dlm_lookup_lockres() grabbed a reference to this lockres.
1642 	 * The reference is released by dlm_assert_master_worker() under
1643 	 * the call to dlm_dispatch_assert_master().  If
1644 	 * dlm_assert_master_worker() isn't called, we drop it here.
1645 	 */
1646 	if (dispatch_assert) {
1647 		if (response != DLM_MASTER_RESP_YES)
1648 			mlog(ML_ERROR, "invalid response %d\n", response);
1649 		if (!res) {
1650 			mlog(ML_ERROR, "bad lockres while trying to assert!\n");
1651 			BUG();
1652 		}
1653 		mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1654 			     dlm->node_num, res->lockname.len, res->lockname.name);
1655 		spin_lock(&res->spinlock);
1656 		ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1657 						 DLM_ASSERT_MASTER_MLE_CLEANUP);
1658 		if (ret < 0) {
1659 			mlog(ML_ERROR, "failed to dispatch assert master work\n");
1660 			response = DLM_MASTER_RESP_ERROR;
1661 			spin_unlock(&res->spinlock);
1662 			dlm_lockres_put(res);
1663 		} else {
1664 			dispatched = 1;
1665 			__dlm_lockres_grab_inflight_worker(dlm, res);
1666 			spin_unlock(&res->spinlock);
1667 		}
1668 	} else {
1669 		if (res)
1670 			dlm_lockres_put(res);
1671 	}
1672 
1673 	if (!dispatched)
1674 		dlm_put(dlm);
1675 	return response;
1676 }
1677 
1678 /*
1679  * DLM_ASSERT_MASTER_MSG
1680  */
1681 
1682 
1683 /*
1684  * NOTE: this can be used for debugging
1685  * can periodically run all locks owned by this node
1686  * and re-assert across the cluster...
1687  */
1688 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
1689 				struct dlm_lock_resource *res,
1690 				void *nodemap, u32 flags)
1691 {
1692 	struct dlm_assert_master assert;
1693 	int to, tmpret;
1694 	struct dlm_node_iter iter;
1695 	int ret = 0;
1696 	int reassert;
1697 	const char *lockname = res->lockname.name;
1698 	unsigned int namelen = res->lockname.len;
1699 
1700 	BUG_ON(namelen > O2NM_MAX_NAME_LEN);
1701 
1702 	spin_lock(&res->spinlock);
1703 	res->state |= DLM_LOCK_RES_SETREF_INPROG;
1704 	spin_unlock(&res->spinlock);
1705 
1706 again:
1707 	reassert = 0;
1708 
1709 	/* note that if this nodemap is empty, it returns 0 */
1710 	dlm_node_iter_init(nodemap, &iter);
1711 	while ((to = dlm_node_iter_next(&iter)) >= 0) {
1712 		int r = 0;
1713 		struct dlm_master_list_entry *mle = NULL;
1714 
1715 		mlog(0, "sending assert master to %d (%.*s)\n", to,
1716 		     namelen, lockname);
1717 		memset(&assert, 0, sizeof(assert));
1718 		assert.node_idx = dlm->node_num;
1719 		assert.namelen = namelen;
1720 		memcpy(assert.name, lockname, namelen);
1721 		assert.flags = cpu_to_be32(flags);
1722 
1723 		tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1724 					    &assert, sizeof(assert), to, &r);
1725 		if (tmpret < 0) {
1726 			mlog(ML_ERROR, "Error %d when sending message %u (key "
1727 			     "0x%x) to node %u\n", tmpret,
1728 			     DLM_ASSERT_MASTER_MSG, dlm->key, to);
1729 			if (!dlm_is_host_down(tmpret)) {
1730 				mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
1731 				BUG();
1732 			}
1733 			/* a node died.  finish out the rest of the nodes. */
1734 			mlog(0, "link to %d went down!\n", to);
1735 			/* any nonzero status return will do */
1736 			ret = tmpret;
1737 			r = 0;
1738 		} else if (r < 0) {
1739 			/* ok, something horribly messed.  kill thyself. */
1740 			mlog(ML_ERROR,"during assert master of %.*s to %u, "
1741 			     "got %d.\n", namelen, lockname, to, r);
1742 			spin_lock(&dlm->spinlock);
1743 			spin_lock(&dlm->master_lock);
1744 			if (dlm_find_mle(dlm, &mle, (char *)lockname,
1745 					 namelen)) {
1746 				dlm_print_one_mle(mle);
1747 				__dlm_put_mle(mle);
1748 			}
1749 			spin_unlock(&dlm->master_lock);
1750 			spin_unlock(&dlm->spinlock);
1751 			BUG();
1752 		}
1753 
1754 		if (r & DLM_ASSERT_RESPONSE_REASSERT &&
1755 		    !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) {
1756 				mlog(ML_ERROR, "%.*s: very strange, "
1757 				     "master MLE but no lockres on %u\n",
1758 				     namelen, lockname, to);
1759 		}
1760 
1761 		if (r & DLM_ASSERT_RESPONSE_REASSERT) {
1762 			mlog(0, "%.*s: node %u create mles on other "
1763 			     "nodes and requests a re-assert\n",
1764 			     namelen, lockname, to);
1765 			reassert = 1;
1766 		}
1767 		if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
1768 			mlog(0, "%.*s: node %u has a reference to this "
1769 			     "lockres, set the bit in the refmap\n",
1770 			     namelen, lockname, to);
1771 			spin_lock(&res->spinlock);
1772 			dlm_lockres_set_refmap_bit(dlm, res, to);
1773 			spin_unlock(&res->spinlock);
1774 		}
1775 	}
1776 
1777 	if (reassert)
1778 		goto again;
1779 
1780 	spin_lock(&res->spinlock);
1781 	res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
1782 	spin_unlock(&res->spinlock);
1783 	wake_up(&res->wq);
1784 
1785 	return ret;
1786 }
1787 
1788 /*
1789  * locks that can be taken here:
1790  * dlm->spinlock
1791  * res->spinlock
1792  * mle->spinlock
1793  * dlm->master_list
1794  *
1795  * if possible, TRIM THIS DOWN!!!
1796  */
1797 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1798 			      void **ret_data)
1799 {
1800 	struct dlm_ctxt *dlm = data;
1801 	struct dlm_master_list_entry *mle = NULL;
1802 	struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1803 	struct dlm_lock_resource *res = NULL;
1804 	char *name;
1805 	unsigned int namelen, hash;
1806 	u32 flags;
1807 	int master_request = 0, have_lockres_ref = 0;
1808 	int ret = 0;
1809 
1810 	if (!dlm_grab(dlm))
1811 		return 0;
1812 
1813 	name = assert->name;
1814 	namelen = assert->namelen;
1815 	hash = dlm_lockid_hash(name, namelen);
1816 	flags = be32_to_cpu(assert->flags);
1817 
1818 	if (namelen > DLM_LOCKID_NAME_MAX) {
1819 		mlog(ML_ERROR, "Invalid name length!");
1820 		goto done;
1821 	}
1822 
1823 	spin_lock(&dlm->spinlock);
1824 
1825 	if (flags)
1826 		mlog(0, "assert_master with flags: %u\n", flags);
1827 
1828 	/* find the MLE */
1829 	spin_lock(&dlm->master_lock);
1830 	if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1831 		/* not an error, could be master just re-asserting */
1832 		mlog(0, "just got an assert_master from %u, but no "
1833 		     "MLE for it! (%.*s)\n", assert->node_idx,
1834 		     namelen, name);
1835 	} else {
1836 		int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1837 		if (bit >= O2NM_MAX_NODES) {
1838 			/* not necessarily an error, though less likely.
1839 			 * could be master just re-asserting. */
1840 			mlog(0, "no bits set in the maybe_map, but %u "
1841 			     "is asserting! (%.*s)\n", assert->node_idx,
1842 			     namelen, name);
1843 		} else if (bit != assert->node_idx) {
1844 			if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1845 				mlog(0, "master %u was found, %u should "
1846 				     "back off\n", assert->node_idx, bit);
1847 			} else {
1848 				/* with the fix for bug 569, a higher node
1849 				 * number winning the mastery will respond
1850 				 * YES to mastery requests, but this node
1851 				 * had no way of knowing.  let it pass. */
1852 				mlog(0, "%u is the lowest node, "
1853 				     "%u is asserting. (%.*s)  %u must "
1854 				     "have begun after %u won.\n", bit,
1855 				     assert->node_idx, namelen, name, bit,
1856 				     assert->node_idx);
1857 			}
1858 		}
1859 		if (mle->type == DLM_MLE_MIGRATION) {
1860 			if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1861 				mlog(0, "%s:%.*s: got cleanup assert"
1862 				     " from %u for migration\n",
1863 				     dlm->name, namelen, name,
1864 				     assert->node_idx);
1865 			} else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1866 				mlog(0, "%s:%.*s: got unrelated assert"
1867 				     " from %u for migration, ignoring\n",
1868 				     dlm->name, namelen, name,
1869 				     assert->node_idx);
1870 				__dlm_put_mle(mle);
1871 				spin_unlock(&dlm->master_lock);
1872 				spin_unlock(&dlm->spinlock);
1873 				goto done;
1874 			}
1875 		}
1876 	}
1877 	spin_unlock(&dlm->master_lock);
1878 
1879 	/* ok everything checks out with the MLE
1880 	 * now check to see if there is a lockres */
1881 	res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1882 	if (res) {
1883 		spin_lock(&res->spinlock);
1884 		if (res->state & DLM_LOCK_RES_RECOVERING)  {
1885 			mlog(ML_ERROR, "%u asserting but %.*s is "
1886 			     "RECOVERING!\n", assert->node_idx, namelen, name);
1887 			goto kill;
1888 		}
1889 		if (!mle) {
1890 			if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1891 			    res->owner != assert->node_idx) {
1892 				mlog(ML_ERROR, "DIE! Mastery assert from %u, "
1893 				     "but current owner is %u! (%.*s)\n",
1894 				     assert->node_idx, res->owner, namelen,
1895 				     name);
1896 				__dlm_print_one_lock_resource(res);
1897 				BUG();
1898 			}
1899 		} else if (mle->type != DLM_MLE_MIGRATION) {
1900 			if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1901 				/* owner is just re-asserting */
1902 				if (res->owner == assert->node_idx) {
1903 					mlog(0, "owner %u re-asserting on "
1904 					     "lock %.*s\n", assert->node_idx,
1905 					     namelen, name);
1906 					goto ok;
1907 				}
1908 				mlog(ML_ERROR, "got assert_master from "
1909 				     "node %u, but %u is the owner! "
1910 				     "(%.*s)\n", assert->node_idx,
1911 				     res->owner, namelen, name);
1912 				goto kill;
1913 			}
1914 			if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1915 				mlog(ML_ERROR, "got assert from %u, but lock "
1916 				     "with no owner should be "
1917 				     "in-progress! (%.*s)\n",
1918 				     assert->node_idx,
1919 				     namelen, name);
1920 				goto kill;
1921 			}
1922 		} else /* mle->type == DLM_MLE_MIGRATION */ {
1923 			/* should only be getting an assert from new master */
1924 			if (assert->node_idx != mle->new_master) {
1925 				mlog(ML_ERROR, "got assert from %u, but "
1926 				     "new master is %u, and old master "
1927 				     "was %u (%.*s)\n",
1928 				     assert->node_idx, mle->new_master,
1929 				     mle->master, namelen, name);
1930 				goto kill;
1931 			}
1932 
1933 		}
1934 ok:
1935 		spin_unlock(&res->spinlock);
1936 	}
1937 
1938 	// mlog(0, "woo!  got an assert_master from node %u!\n",
1939 	// 	     assert->node_idx);
1940 	if (mle) {
1941 		int extra_ref = 0;
1942 		int nn = -1;
1943 		int rr, err = 0;
1944 
1945 		spin_lock(&mle->spinlock);
1946 		if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1947 			extra_ref = 1;
1948 		else {
1949 			/* MASTER mle: if any bits set in the response map
1950 			 * then the calling node needs to re-assert to clear
1951 			 * up nodes that this node contacted */
1952 			while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1953 						    nn+1)) < O2NM_MAX_NODES) {
1954 				if (nn != dlm->node_num && nn != assert->node_idx) {
1955 					master_request = 1;
1956 					break;
1957 				}
1958 			}
1959 		}
1960 		mle->master = assert->node_idx;
1961 		atomic_set(&mle->woken, 1);
1962 		wake_up(&mle->wq);
1963 		spin_unlock(&mle->spinlock);
1964 
1965 		if (res) {
1966 			int wake = 0;
1967 			spin_lock(&res->spinlock);
1968 			if (mle->type == DLM_MLE_MIGRATION) {
1969 				mlog(0, "finishing off migration of lockres %.*s, "
1970 			     		"from %u to %u\n",
1971 			       		res->lockname.len, res->lockname.name,
1972 			       		dlm->node_num, mle->new_master);
1973 				res->state &= ~DLM_LOCK_RES_MIGRATING;
1974 				wake = 1;
1975 				dlm_change_lockres_owner(dlm, res, mle->new_master);
1976 				BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1977 			} else {
1978 				dlm_change_lockres_owner(dlm, res, mle->master);
1979 			}
1980 			spin_unlock(&res->spinlock);
1981 			have_lockres_ref = 1;
1982 			if (wake)
1983 				wake_up(&res->wq);
1984 		}
1985 
1986 		/* master is known, detach if not already detached.
1987 		 * ensures that only one assert_master call will happen
1988 		 * on this mle. */
1989 		spin_lock(&dlm->master_lock);
1990 
1991 		rr = atomic_read(&mle->mle_refs.refcount);
1992 		if (mle->inuse > 0) {
1993 			if (extra_ref && rr < 3)
1994 				err = 1;
1995 			else if (!extra_ref && rr < 2)
1996 				err = 1;
1997 		} else {
1998 			if (extra_ref && rr < 2)
1999 				err = 1;
2000 			else if (!extra_ref && rr < 1)
2001 				err = 1;
2002 		}
2003 		if (err) {
2004 			mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
2005 			     "that will mess up this node, refs=%d, extra=%d, "
2006 			     "inuse=%d\n", dlm->name, namelen, name,
2007 			     assert->node_idx, rr, extra_ref, mle->inuse);
2008 			dlm_print_one_mle(mle);
2009 		}
2010 		__dlm_unlink_mle(dlm, mle);
2011 		__dlm_mle_detach_hb_events(dlm, mle);
2012 		__dlm_put_mle(mle);
2013 		if (extra_ref) {
2014 			/* the assert master message now balances the extra
2015 		 	 * ref given by the master / migration request message.
2016 		 	 * if this is the last put, it will be removed
2017 		 	 * from the list. */
2018 			__dlm_put_mle(mle);
2019 		}
2020 		spin_unlock(&dlm->master_lock);
2021 	} else if (res) {
2022 		if (res->owner != assert->node_idx) {
2023 			mlog(0, "assert_master from %u, but current "
2024 			     "owner is %u (%.*s), no mle\n", assert->node_idx,
2025 			     res->owner, namelen, name);
2026 		}
2027 	}
2028 	spin_unlock(&dlm->spinlock);
2029 
2030 done:
2031 	ret = 0;
2032 	if (res) {
2033 		spin_lock(&res->spinlock);
2034 		res->state |= DLM_LOCK_RES_SETREF_INPROG;
2035 		spin_unlock(&res->spinlock);
2036 		*ret_data = (void *)res;
2037 	}
2038 	dlm_put(dlm);
2039 	if (master_request) {
2040 		mlog(0, "need to tell master to reassert\n");
2041 		/* positive. negative would shoot down the node. */
2042 		ret |= DLM_ASSERT_RESPONSE_REASSERT;
2043 		if (!have_lockres_ref) {
2044 			mlog(ML_ERROR, "strange, got assert from %u, MASTER "
2045 			     "mle present here for %s:%.*s, but no lockres!\n",
2046 			     assert->node_idx, dlm->name, namelen, name);
2047 		}
2048 	}
2049 	if (have_lockres_ref) {
2050 		/* let the master know we have a reference to the lockres */
2051 		ret |= DLM_ASSERT_RESPONSE_MASTERY_REF;
2052 		mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
2053 		     dlm->name, namelen, name, assert->node_idx);
2054 	}
2055 	return ret;
2056 
2057 kill:
2058 	/* kill the caller! */
2059 	mlog(ML_ERROR, "Bad message received from another node.  Dumping state "
2060 	     "and killing the other node now!  This node is OK and can continue.\n");
2061 	__dlm_print_one_lock_resource(res);
2062 	spin_unlock(&res->spinlock);
2063 	spin_lock(&dlm->master_lock);
2064 	if (mle)
2065 		__dlm_put_mle(mle);
2066 	spin_unlock(&dlm->master_lock);
2067 	spin_unlock(&dlm->spinlock);
2068 	*ret_data = (void *)res;
2069 	dlm_put(dlm);
2070 	return -EINVAL;
2071 }
2072 
2073 void dlm_assert_master_post_handler(int status, void *data, void *ret_data)
2074 {
2075 	struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
2076 
2077 	if (ret_data) {
2078 		spin_lock(&res->spinlock);
2079 		res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
2080 		spin_unlock(&res->spinlock);
2081 		wake_up(&res->wq);
2082 		dlm_lockres_put(res);
2083 	}
2084 	return;
2085 }
2086 
2087 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2088 			       struct dlm_lock_resource *res,
2089 			       int ignore_higher, u8 request_from, u32 flags)
2090 {
2091 	struct dlm_work_item *item;
2092 	item = kzalloc(sizeof(*item), GFP_ATOMIC);
2093 	if (!item)
2094 		return -ENOMEM;
2095 
2096 
2097 	/* queue up work for dlm_assert_master_worker */
2098 	dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
2099 	item->u.am.lockres = res; /* already have a ref */
2100 	/* can optionally ignore node numbers higher than this node */
2101 	item->u.am.ignore_higher = ignore_higher;
2102 	item->u.am.request_from = request_from;
2103 	item->u.am.flags = flags;
2104 
2105 	if (ignore_higher)
2106 		mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2107 		     res->lockname.name);
2108 
2109 	spin_lock(&dlm->work_lock);
2110 	list_add_tail(&item->list, &dlm->work_list);
2111 	spin_unlock(&dlm->work_lock);
2112 
2113 	queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2114 	return 0;
2115 }
2116 
2117 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
2118 {
2119 	struct dlm_ctxt *dlm = data;
2120 	int ret = 0;
2121 	struct dlm_lock_resource *res;
2122 	unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
2123 	int ignore_higher;
2124 	int bit;
2125 	u8 request_from;
2126 	u32 flags;
2127 
2128 	dlm = item->dlm;
2129 	res = item->u.am.lockres;
2130 	ignore_higher = item->u.am.ignore_higher;
2131 	request_from = item->u.am.request_from;
2132 	flags = item->u.am.flags;
2133 
2134 	spin_lock(&dlm->spinlock);
2135 	memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
2136 	spin_unlock(&dlm->spinlock);
2137 
2138 	clear_bit(dlm->node_num, nodemap);
2139 	if (ignore_higher) {
2140 		/* if is this just to clear up mles for nodes below
2141 		 * this node, do not send the message to the original
2142 		 * caller or any node number higher than this */
2143 		clear_bit(request_from, nodemap);
2144 		bit = dlm->node_num;
2145 		while (1) {
2146 			bit = find_next_bit(nodemap, O2NM_MAX_NODES,
2147 					    bit+1);
2148 		       	if (bit >= O2NM_MAX_NODES)
2149 				break;
2150 			clear_bit(bit, nodemap);
2151 		}
2152 	}
2153 
2154 	/*
2155 	 * If we're migrating this lock to someone else, we are no
2156 	 * longer allowed to assert out own mastery.  OTOH, we need to
2157 	 * prevent migration from starting while we're still asserting
2158 	 * our dominance.  The reserved ast delays migration.
2159 	 */
2160 	spin_lock(&res->spinlock);
2161 	if (res->state & DLM_LOCK_RES_MIGRATING) {
2162 		mlog(0, "Someone asked us to assert mastery, but we're "
2163 		     "in the middle of migration.  Skipping assert, "
2164 		     "the new master will handle that.\n");
2165 		spin_unlock(&res->spinlock);
2166 		goto put;
2167 	} else
2168 		__dlm_lockres_reserve_ast(res);
2169 	spin_unlock(&res->spinlock);
2170 
2171 	/* this call now finishes out the nodemap
2172 	 * even if one or more nodes die */
2173 	mlog(0, "worker about to master %.*s here, this=%u\n",
2174 		     res->lockname.len, res->lockname.name, dlm->node_num);
2175 	ret = dlm_do_assert_master(dlm, res, nodemap, flags);
2176 	if (ret < 0) {
2177 		/* no need to restart, we are done */
2178 		if (!dlm_is_host_down(ret))
2179 			mlog_errno(ret);
2180 	}
2181 
2182 	/* Ok, we've asserted ourselves.  Let's let migration start. */
2183 	dlm_lockres_release_ast(dlm, res);
2184 
2185 put:
2186 	dlm_lockres_drop_inflight_worker(dlm, res);
2187 
2188 	dlm_lockres_put(res);
2189 
2190 	mlog(0, "finished with dlm_assert_master_worker\n");
2191 }
2192 
2193 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2194  * We cannot wait for node recovery to complete to begin mastering this
2195  * lockres because this lockres is used to kick off recovery! ;-)
2196  * So, do a pre-check on all living nodes to see if any of those nodes
2197  * think that $RECOVERY is currently mastered by a dead node.  If so,
2198  * we wait a short time to allow that node to get notified by its own
2199  * heartbeat stack, then check again.  All $RECOVERY lock resources
2200  * mastered by dead nodes are purged when the hearbeat callback is
2201  * fired, so we can know for sure that it is safe to continue once
2202  * the node returns a live node or no node.  */
2203 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2204 				       struct dlm_lock_resource *res)
2205 {
2206 	struct dlm_node_iter iter;
2207 	int nodenum;
2208 	int ret = 0;
2209 	u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
2210 
2211 	spin_lock(&dlm->spinlock);
2212 	dlm_node_iter_init(dlm->domain_map, &iter);
2213 	spin_unlock(&dlm->spinlock);
2214 
2215 	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2216 		/* do not send to self */
2217 		if (nodenum == dlm->node_num)
2218 			continue;
2219 		ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2220 		if (ret < 0) {
2221 			mlog_errno(ret);
2222 			if (!dlm_is_host_down(ret))
2223 				BUG();
2224 			/* host is down, so answer for that node would be
2225 			 * DLM_LOCK_RES_OWNER_UNKNOWN.  continue. */
2226 			ret = 0;
2227 		}
2228 
2229 		if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
2230 			/* check to see if this master is in the recovery map */
2231 			spin_lock(&dlm->spinlock);
2232 			if (test_bit(master, dlm->recovery_map)) {
2233 				mlog(ML_NOTICE, "%s: node %u has not seen "
2234 				     "node %u go down yet, and thinks the "
2235 				     "dead node is mastering the recovery "
2236 				     "lock.  must wait.\n", dlm->name,
2237 				     nodenum, master);
2238 				ret = -EAGAIN;
2239 			}
2240 			spin_unlock(&dlm->spinlock);
2241 			mlog(0, "%s: reco lock master is %u\n", dlm->name,
2242 			     master);
2243 			break;
2244 		}
2245 	}
2246 	return ret;
2247 }
2248 
2249 /*
2250  * DLM_DEREF_LOCKRES_MSG
2251  */
2252 
2253 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2254 {
2255 	struct dlm_deref_lockres deref;
2256 	int ret = 0, r;
2257 	const char *lockname;
2258 	unsigned int namelen;
2259 
2260 	lockname = res->lockname.name;
2261 	namelen = res->lockname.len;
2262 	BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2263 
2264 	memset(&deref, 0, sizeof(deref));
2265 	deref.node_idx = dlm->node_num;
2266 	deref.namelen = namelen;
2267 	memcpy(deref.name, lockname, namelen);
2268 
2269 	ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2270 				 &deref, sizeof(deref), res->owner, &r);
2271 	if (ret < 0)
2272 		mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n",
2273 		     dlm->name, namelen, lockname, ret, res->owner);
2274 	else if (r < 0) {
2275 		/* BAD.  other node says I did not have a ref. */
2276 		mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
2277 		     dlm->name, namelen, lockname, res->owner, r);
2278 		dlm_print_one_lock_resource(res);
2279 		if (r == -ENOMEM)
2280 			BUG();
2281 	} else
2282 		ret = r;
2283 
2284 	return ret;
2285 }
2286 
2287 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2288 			      void **ret_data)
2289 {
2290 	struct dlm_ctxt *dlm = data;
2291 	struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf;
2292 	struct dlm_lock_resource *res = NULL;
2293 	char *name;
2294 	unsigned int namelen;
2295 	int ret = -EINVAL;
2296 	u8 node;
2297 	unsigned int hash;
2298 	struct dlm_work_item *item;
2299 	int cleared = 0;
2300 	int dispatch = 0;
2301 
2302 	if (!dlm_grab(dlm))
2303 		return 0;
2304 
2305 	name = deref->name;
2306 	namelen = deref->namelen;
2307 	node = deref->node_idx;
2308 
2309 	if (namelen > DLM_LOCKID_NAME_MAX) {
2310 		mlog(ML_ERROR, "Invalid name length!");
2311 		goto done;
2312 	}
2313 	if (deref->node_idx >= O2NM_MAX_NODES) {
2314 		mlog(ML_ERROR, "Invalid node number: %u\n", node);
2315 		goto done;
2316 	}
2317 
2318 	hash = dlm_lockid_hash(name, namelen);
2319 
2320 	spin_lock(&dlm->spinlock);
2321 	res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2322 	if (!res) {
2323 		spin_unlock(&dlm->spinlock);
2324 		mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2325 		     dlm->name, namelen, name);
2326 		goto done;
2327 	}
2328 	spin_unlock(&dlm->spinlock);
2329 
2330 	spin_lock(&res->spinlock);
2331 	if (res->state & DLM_LOCK_RES_SETREF_INPROG)
2332 		dispatch = 1;
2333 	else {
2334 		BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2335 		if (test_bit(node, res->refmap)) {
2336 			dlm_lockres_clear_refmap_bit(dlm, res, node);
2337 			cleared = 1;
2338 		}
2339 	}
2340 	spin_unlock(&res->spinlock);
2341 
2342 	if (!dispatch) {
2343 		if (cleared)
2344 			dlm_lockres_calc_usage(dlm, res);
2345 		else {
2346 			mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2347 		     	"but it is already dropped!\n", dlm->name,
2348 		     	res->lockname.len, res->lockname.name, node);
2349 			dlm_print_one_lock_resource(res);
2350 		}
2351 		ret = DLM_DEREF_RESPONSE_DONE;
2352 		goto done;
2353 	}
2354 
2355 	item = kzalloc(sizeof(*item), GFP_NOFS);
2356 	if (!item) {
2357 		ret = -ENOMEM;
2358 		mlog_errno(ret);
2359 		goto done;
2360 	}
2361 
2362 	dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL);
2363 	item->u.dl.deref_res = res;
2364 	item->u.dl.deref_node = node;
2365 
2366 	spin_lock(&dlm->work_lock);
2367 	list_add_tail(&item->list, &dlm->work_list);
2368 	spin_unlock(&dlm->work_lock);
2369 
2370 	queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2371 	return DLM_DEREF_RESPONSE_INPROG;
2372 
2373 done:
2374 	if (res)
2375 		dlm_lockres_put(res);
2376 	dlm_put(dlm);
2377 
2378 	return ret;
2379 }
2380 
2381 int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data,
2382 			      void **ret_data)
2383 {
2384 	struct dlm_ctxt *dlm = data;
2385 	struct dlm_deref_lockres_done *deref
2386 			= (struct dlm_deref_lockres_done *)msg->buf;
2387 	struct dlm_lock_resource *res = NULL;
2388 	char *name;
2389 	unsigned int namelen;
2390 	int ret = -EINVAL;
2391 	u8 node;
2392 	unsigned int hash;
2393 
2394 	if (!dlm_grab(dlm))
2395 		return 0;
2396 
2397 	name = deref->name;
2398 	namelen = deref->namelen;
2399 	node = deref->node_idx;
2400 
2401 	if (namelen > DLM_LOCKID_NAME_MAX) {
2402 		mlog(ML_ERROR, "Invalid name length!");
2403 		goto done;
2404 	}
2405 	if (deref->node_idx >= O2NM_MAX_NODES) {
2406 		mlog(ML_ERROR, "Invalid node number: %u\n", node);
2407 		goto done;
2408 	}
2409 
2410 	hash = dlm_lockid_hash(name, namelen);
2411 
2412 	spin_lock(&dlm->spinlock);
2413 	res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2414 	if (!res) {
2415 		spin_unlock(&dlm->spinlock);
2416 		mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2417 		     dlm->name, namelen, name);
2418 		goto done;
2419 	}
2420 
2421 	spin_lock(&res->spinlock);
2422 	if (!(res->state & DLM_LOCK_RES_DROPPING_REF)) {
2423 		spin_unlock(&res->spinlock);
2424 		spin_unlock(&dlm->spinlock);
2425 		mlog(ML_NOTICE, "%s:%.*s: node %u sends deref done "
2426 			"but it is already derefed!\n", dlm->name,
2427 			res->lockname.len, res->lockname.name, node);
2428 		ret = 0;
2429 		goto done;
2430 	}
2431 
2432 	__dlm_do_purge_lockres(dlm, res);
2433 	spin_unlock(&res->spinlock);
2434 	wake_up(&res->wq);
2435 
2436 	spin_unlock(&dlm->spinlock);
2437 
2438 	ret = 0;
2439 done:
2440 	if (res)
2441 		dlm_lockres_put(res);
2442 	dlm_put(dlm);
2443 	return ret;
2444 }
2445 
2446 static void dlm_drop_lockres_ref_done(struct dlm_ctxt *dlm,
2447 		struct dlm_lock_resource *res, u8 node)
2448 {
2449 	struct dlm_deref_lockres_done deref;
2450 	int ret = 0, r;
2451 	const char *lockname;
2452 	unsigned int namelen;
2453 
2454 	lockname = res->lockname.name;
2455 	namelen = res->lockname.len;
2456 	BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2457 
2458 	memset(&deref, 0, sizeof(deref));
2459 	deref.node_idx = dlm->node_num;
2460 	deref.namelen = namelen;
2461 	memcpy(deref.name, lockname, namelen);
2462 
2463 	ret = o2net_send_message(DLM_DEREF_LOCKRES_DONE, dlm->key,
2464 				 &deref, sizeof(deref), node, &r);
2465 	if (ret < 0) {
2466 		mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF DONE "
2467 				" to node %u\n", dlm->name, namelen,
2468 				lockname, ret, node);
2469 	} else if (r < 0) {
2470 		/* ignore the error */
2471 		mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
2472 		     dlm->name, namelen, lockname, node, r);
2473 		dlm_print_one_lock_resource(res);
2474 	}
2475 }
2476 
2477 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2478 {
2479 	struct dlm_ctxt *dlm;
2480 	struct dlm_lock_resource *res;
2481 	u8 node;
2482 	u8 cleared = 0;
2483 
2484 	dlm = item->dlm;
2485 	res = item->u.dl.deref_res;
2486 	node = item->u.dl.deref_node;
2487 
2488 	spin_lock(&res->spinlock);
2489 	BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2490 	__dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2491 	if (test_bit(node, res->refmap)) {
2492 		dlm_lockres_clear_refmap_bit(dlm, res, node);
2493 		cleared = 1;
2494 	}
2495 	spin_unlock(&res->spinlock);
2496 
2497 	dlm_drop_lockres_ref_done(dlm, res, node);
2498 
2499 	if (cleared) {
2500 		mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
2501 		     dlm->name, res->lockname.len, res->lockname.name, node);
2502 		dlm_lockres_calc_usage(dlm, res);
2503 	} else {
2504 		mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2505 		     "but it is already dropped!\n", dlm->name,
2506 		     res->lockname.len, res->lockname.name, node);
2507 		dlm_print_one_lock_resource(res);
2508 	}
2509 
2510 	dlm_lockres_put(res);
2511 }
2512 
2513 /*
2514  * A migrateable resource is one that is :
2515  * 1. locally mastered, and,
2516  * 2. zero local locks, and,
2517  * 3. one or more non-local locks, or, one or more references
2518  * Returns 1 if yes, 0 if not.
2519  */
2520 static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
2521 				      struct dlm_lock_resource *res)
2522 {
2523 	enum dlm_lockres_list idx;
2524 	int nonlocal = 0, node_ref;
2525 	struct list_head *queue;
2526 	struct dlm_lock *lock;
2527 	u64 cookie;
2528 
2529 	assert_spin_locked(&res->spinlock);
2530 
2531 	/* delay migration when the lockres is in MIGRATING state */
2532 	if (res->state & DLM_LOCK_RES_MIGRATING)
2533 		return 0;
2534 
2535 	/* delay migration when the lockres is in RECOCERING state */
2536 	if (res->state & (DLM_LOCK_RES_RECOVERING|
2537 			DLM_LOCK_RES_RECOVERY_WAITING))
2538 		return 0;
2539 
2540 	if (res->owner != dlm->node_num)
2541 		return 0;
2542 
2543         for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
2544 		queue = dlm_list_idx_to_ptr(res, idx);
2545 		list_for_each_entry(lock, queue, list) {
2546 			if (lock->ml.node != dlm->node_num) {
2547 				nonlocal++;
2548 				continue;
2549 			}
2550 			cookie = be64_to_cpu(lock->ml.cookie);
2551 			mlog(0, "%s: Not migrateable res %.*s, lock %u:%llu on "
2552 			     "%s list\n", dlm->name, res->lockname.len,
2553 			     res->lockname.name,
2554 			     dlm_get_lock_cookie_node(cookie),
2555 			     dlm_get_lock_cookie_seq(cookie),
2556 			     dlm_list_in_text(idx));
2557 			return 0;
2558 		}
2559 	}
2560 
2561 	if (!nonlocal) {
2562 		node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
2563 		if (node_ref >= O2NM_MAX_NODES)
2564 			return 0;
2565 	}
2566 
2567 	mlog(0, "%s: res %.*s, Migrateable\n", dlm->name, res->lockname.len,
2568 	     res->lockname.name);
2569 
2570 	return 1;
2571 }
2572 
2573 /*
2574  * DLM_MIGRATE_LOCKRES
2575  */
2576 
2577 
2578 static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2579 			       struct dlm_lock_resource *res, u8 target)
2580 {
2581 	struct dlm_master_list_entry *mle = NULL;
2582 	struct dlm_master_list_entry *oldmle = NULL;
2583  	struct dlm_migratable_lockres *mres = NULL;
2584 	int ret = 0;
2585 	const char *name;
2586 	unsigned int namelen;
2587 	int mle_added = 0;
2588 	int wake = 0;
2589 
2590 	if (!dlm_grab(dlm))
2591 		return -EINVAL;
2592 
2593 	BUG_ON(target == O2NM_MAX_NODES);
2594 
2595 	name = res->lockname.name;
2596 	namelen = res->lockname.len;
2597 
2598 	mlog(0, "%s: Migrating %.*s to node %u\n", dlm->name, namelen, name,
2599 	     target);
2600 
2601 	/* preallocate up front. if this fails, abort */
2602 	ret = -ENOMEM;
2603 	mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
2604 	if (!mres) {
2605 		mlog_errno(ret);
2606 		goto leave;
2607 	}
2608 
2609 	mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
2610 	if (!mle) {
2611 		mlog_errno(ret);
2612 		goto leave;
2613 	}
2614 	ret = 0;
2615 
2616 	/*
2617 	 * clear any existing master requests and
2618 	 * add the migration mle to the list
2619 	 */
2620 	spin_lock(&dlm->spinlock);
2621 	spin_lock(&dlm->master_lock);
2622 	ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2623 				    namelen, target, dlm->node_num);
2624 	/* get an extra reference on the mle.
2625 	 * otherwise the assert_master from the new
2626 	 * master will destroy this.
2627 	 */
2628 	dlm_get_mle_inuse(mle);
2629 	spin_unlock(&dlm->master_lock);
2630 	spin_unlock(&dlm->spinlock);
2631 
2632 	if (ret == -EEXIST) {
2633 		mlog(0, "another process is already migrating it\n");
2634 		goto fail;
2635 	}
2636 	mle_added = 1;
2637 
2638 	/*
2639 	 * set the MIGRATING flag and flush asts
2640 	 * if we fail after this we need to re-dirty the lockres
2641 	 */
2642 	if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2643 		mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2644 		     "the target went down.\n", res->lockname.len,
2645 		     res->lockname.name, target);
2646 		spin_lock(&res->spinlock);
2647 		res->state &= ~DLM_LOCK_RES_MIGRATING;
2648 		wake = 1;
2649 		spin_unlock(&res->spinlock);
2650 		ret = -EINVAL;
2651 	}
2652 
2653 fail:
2654 	if (ret != -EEXIST && oldmle) {
2655 		/* master is known, detach if not already detached */
2656 		dlm_mle_detach_hb_events(dlm, oldmle);
2657 		dlm_put_mle(oldmle);
2658 	}
2659 
2660 	if (ret < 0) {
2661 		if (mle_added) {
2662 			dlm_mle_detach_hb_events(dlm, mle);
2663 			dlm_put_mle(mle);
2664 			dlm_put_mle_inuse(mle);
2665 		} else if (mle) {
2666 			kmem_cache_free(dlm_mle_cache, mle);
2667 			mle = NULL;
2668 		}
2669 		goto leave;
2670 	}
2671 
2672 	/*
2673 	 * at this point, we have a migration target, an mle
2674 	 * in the master list, and the MIGRATING flag set on
2675 	 * the lockres
2676 	 */
2677 
2678 	/* now that remote nodes are spinning on the MIGRATING flag,
2679 	 * ensure that all assert_master work is flushed. */
2680 	flush_workqueue(dlm->dlm_worker);
2681 
2682 	/* notify new node and send all lock state */
2683 	/* call send_one_lockres with migration flag.
2684 	 * this serves as notice to the target node that a
2685 	 * migration is starting. */
2686 	ret = dlm_send_one_lockres(dlm, res, mres, target,
2687 				   DLM_MRES_MIGRATION);
2688 
2689 	if (ret < 0) {
2690 		mlog(0, "migration to node %u failed with %d\n",
2691 		     target, ret);
2692 		/* migration failed, detach and clean up mle */
2693 		dlm_mle_detach_hb_events(dlm, mle);
2694 		dlm_put_mle(mle);
2695 		dlm_put_mle_inuse(mle);
2696 		spin_lock(&res->spinlock);
2697 		res->state &= ~DLM_LOCK_RES_MIGRATING;
2698 		wake = 1;
2699 		spin_unlock(&res->spinlock);
2700 		if (dlm_is_host_down(ret))
2701 			dlm_wait_for_node_death(dlm, target,
2702 						DLM_NODE_DEATH_WAIT_MAX);
2703 		goto leave;
2704 	}
2705 
2706 	/* at this point, the target sends a message to all nodes,
2707 	 * (using dlm_do_migrate_request).  this node is skipped since
2708 	 * we had to put an mle in the list to begin the process.  this
2709 	 * node now waits for target to do an assert master.  this node
2710 	 * will be the last one notified, ensuring that the migration
2711 	 * is complete everywhere.  if the target dies while this is
2712 	 * going on, some nodes could potentially see the target as the
2713 	 * master, so it is important that my recovery finds the migration
2714 	 * mle and sets the master to UNKNOWN. */
2715 
2716 
2717 	/* wait for new node to assert master */
2718 	while (1) {
2719 		ret = wait_event_interruptible_timeout(mle->wq,
2720 					(atomic_read(&mle->woken) == 1),
2721 					msecs_to_jiffies(5000));
2722 
2723 		if (ret >= 0) {
2724 		       	if (atomic_read(&mle->woken) == 1 ||
2725 			    res->owner == target)
2726 				break;
2727 
2728 			mlog(0, "%s:%.*s: timed out during migration\n",
2729 			     dlm->name, res->lockname.len, res->lockname.name);
2730 			/* avoid hang during shutdown when migrating lockres
2731 			 * to a node which also goes down */
2732 			if (dlm_is_node_dead(dlm, target)) {
2733 				mlog(0, "%s:%.*s: expected migration "
2734 				     "target %u is no longer up, restarting\n",
2735 				     dlm->name, res->lockname.len,
2736 				     res->lockname.name, target);
2737 				ret = -EINVAL;
2738 				/* migration failed, detach and clean up mle */
2739 				dlm_mle_detach_hb_events(dlm, mle);
2740 				dlm_put_mle(mle);
2741 				dlm_put_mle_inuse(mle);
2742 				spin_lock(&res->spinlock);
2743 				res->state &= ~DLM_LOCK_RES_MIGRATING;
2744 				wake = 1;
2745 				spin_unlock(&res->spinlock);
2746 				goto leave;
2747 			}
2748 		} else
2749 			mlog(0, "%s:%.*s: caught signal during migration\n",
2750 			     dlm->name, res->lockname.len, res->lockname.name);
2751 	}
2752 
2753 	/* all done, set the owner, clear the flag */
2754 	spin_lock(&res->spinlock);
2755 	dlm_set_lockres_owner(dlm, res, target);
2756 	res->state &= ~DLM_LOCK_RES_MIGRATING;
2757 	dlm_remove_nonlocal_locks(dlm, res);
2758 	spin_unlock(&res->spinlock);
2759 	wake_up(&res->wq);
2760 
2761 	/* master is known, detach if not already detached */
2762 	dlm_mle_detach_hb_events(dlm, mle);
2763 	dlm_put_mle_inuse(mle);
2764 	ret = 0;
2765 
2766 	dlm_lockres_calc_usage(dlm, res);
2767 
2768 leave:
2769 	/* re-dirty the lockres if we failed */
2770 	if (ret < 0)
2771 		dlm_kick_thread(dlm, res);
2772 
2773 	/* wake up waiters if the MIGRATING flag got set
2774 	 * but migration failed */
2775 	if (wake)
2776 		wake_up(&res->wq);
2777 
2778 	if (mres)
2779 		free_page((unsigned long)mres);
2780 
2781 	dlm_put(dlm);
2782 
2783 	mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm->name, namelen,
2784 	     name, target, ret);
2785 	return ret;
2786 }
2787 
2788 #define DLM_MIGRATION_RETRY_MS  100
2789 
2790 /*
2791  * Should be called only after beginning the domain leave process.
2792  * There should not be any remaining locks on nonlocal lock resources,
2793  * and there should be no local locks left on locally mastered resources.
2794  *
2795  * Called with the dlm spinlock held, may drop it to do migration, but
2796  * will re-acquire before exit.
2797  *
2798  * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped
2799  */
2800 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2801 {
2802 	int ret;
2803 	int lock_dropped = 0;
2804 	u8 target = O2NM_MAX_NODES;
2805 
2806 	assert_spin_locked(&dlm->spinlock);
2807 
2808 	spin_lock(&res->spinlock);
2809 	if (dlm_is_lockres_migrateable(dlm, res))
2810 		target = dlm_pick_migration_target(dlm, res);
2811 	spin_unlock(&res->spinlock);
2812 
2813 	if (target == O2NM_MAX_NODES)
2814 		goto leave;
2815 
2816 	/* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
2817 	spin_unlock(&dlm->spinlock);
2818 	lock_dropped = 1;
2819 	ret = dlm_migrate_lockres(dlm, res, target);
2820 	if (ret)
2821 		mlog(0, "%s: res %.*s, Migrate to node %u failed with %d\n",
2822 		     dlm->name, res->lockname.len, res->lockname.name,
2823 		     target, ret);
2824 	spin_lock(&dlm->spinlock);
2825 leave:
2826 	return lock_dropped;
2827 }
2828 
2829 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2830 {
2831 	int ret;
2832 	spin_lock(&dlm->ast_lock);
2833 	spin_lock(&lock->spinlock);
2834 	ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2835 	spin_unlock(&lock->spinlock);
2836 	spin_unlock(&dlm->ast_lock);
2837 	return ret;
2838 }
2839 
2840 static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2841 				     struct dlm_lock_resource *res,
2842 				     u8 mig_target)
2843 {
2844 	int can_proceed;
2845 	spin_lock(&res->spinlock);
2846 	can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2847 	spin_unlock(&res->spinlock);
2848 
2849 	/* target has died, so make the caller break out of the
2850 	 * wait_event, but caller must recheck the domain_map */
2851 	spin_lock(&dlm->spinlock);
2852 	if (!test_bit(mig_target, dlm->domain_map))
2853 		can_proceed = 1;
2854 	spin_unlock(&dlm->spinlock);
2855 	return can_proceed;
2856 }
2857 
2858 static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm,
2859 				struct dlm_lock_resource *res)
2860 {
2861 	int ret;
2862 	spin_lock(&res->spinlock);
2863 	ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2864 	spin_unlock(&res->spinlock);
2865 	return ret;
2866 }
2867 
2868 
2869 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2870 				       struct dlm_lock_resource *res,
2871 				       u8 target)
2872 {
2873 	int ret = 0;
2874 
2875 	mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2876 	       res->lockname.len, res->lockname.name, dlm->node_num,
2877 	       target);
2878 	/* need to set MIGRATING flag on lockres.  this is done by
2879 	 * ensuring that all asts have been flushed for this lockres. */
2880 	spin_lock(&res->spinlock);
2881 	BUG_ON(res->migration_pending);
2882 	res->migration_pending = 1;
2883 	/* strategy is to reserve an extra ast then release
2884 	 * it below, letting the release do all of the work */
2885 	__dlm_lockres_reserve_ast(res);
2886 	spin_unlock(&res->spinlock);
2887 
2888 	/* now flush all the pending asts */
2889 	dlm_kick_thread(dlm, res);
2890 	/* before waiting on DIRTY, block processes which may
2891 	 * try to dirty the lockres before MIGRATING is set */
2892 	spin_lock(&res->spinlock);
2893 	BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2894 	res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2895 	spin_unlock(&res->spinlock);
2896 	/* now wait on any pending asts and the DIRTY state */
2897 	wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2898 	dlm_lockres_release_ast(dlm, res);
2899 
2900 	mlog(0, "about to wait on migration_wq, dirty=%s\n",
2901 	       res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2902 	/* if the extra ref we just put was the final one, this
2903 	 * will pass thru immediately.  otherwise, we need to wait
2904 	 * for the last ast to finish. */
2905 again:
2906 	ret = wait_event_interruptible_timeout(dlm->migration_wq,
2907 		   dlm_migration_can_proceed(dlm, res, target),
2908 		   msecs_to_jiffies(1000));
2909 	if (ret < 0) {
2910 		mlog(0, "woken again: migrating? %s, dead? %s\n",
2911 		       res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2912 		       test_bit(target, dlm->domain_map) ? "no":"yes");
2913 	} else {
2914 		mlog(0, "all is well: migrating? %s, dead? %s\n",
2915 		       res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2916 		       test_bit(target, dlm->domain_map) ? "no":"yes");
2917 	}
2918 	if (!dlm_migration_can_proceed(dlm, res, target)) {
2919 		mlog(0, "trying again...\n");
2920 		goto again;
2921 	}
2922 
2923 	ret = 0;
2924 	/* did the target go down or die? */
2925 	spin_lock(&dlm->spinlock);
2926 	if (!test_bit(target, dlm->domain_map)) {
2927 		mlog(ML_ERROR, "aha. migration target %u just went down\n",
2928 		     target);
2929 		ret = -EHOSTDOWN;
2930 	}
2931 	spin_unlock(&dlm->spinlock);
2932 
2933 	/*
2934 	 * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for
2935 	 * another try; otherwise, we are sure the MIGRATING state is there,
2936 	 * drop the unneded state which blocked threads trying to DIRTY
2937 	 */
2938 	spin_lock(&res->spinlock);
2939 	BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2940 	res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2941 	if (!ret)
2942 		BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2943 	else
2944 		res->migration_pending = 0;
2945 	spin_unlock(&res->spinlock);
2946 
2947 	/*
2948 	 * at this point:
2949 	 *
2950 	 *   o the DLM_LOCK_RES_MIGRATING flag is set if target not down
2951 	 *   o there are no pending asts on this lockres
2952 	 *   o all processes trying to reserve an ast on this
2953 	 *     lockres must wait for the MIGRATING flag to clear
2954 	 */
2955 	return ret;
2956 }
2957 
2958 /* last step in the migration process.
2959  * original master calls this to free all of the dlm_lock
2960  * structures that used to be for other nodes. */
2961 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2962 				      struct dlm_lock_resource *res)
2963 {
2964 	struct list_head *queue = &res->granted;
2965 	int i, bit;
2966 	struct dlm_lock *lock, *next;
2967 
2968 	assert_spin_locked(&res->spinlock);
2969 
2970 	BUG_ON(res->owner == dlm->node_num);
2971 
2972 	for (i=0; i<3; i++) {
2973 		list_for_each_entry_safe(lock, next, queue, list) {
2974 			if (lock->ml.node != dlm->node_num) {
2975 				mlog(0, "putting lock for node %u\n",
2976 				     lock->ml.node);
2977 				/* be extra careful */
2978 				BUG_ON(!list_empty(&lock->ast_list));
2979 				BUG_ON(!list_empty(&lock->bast_list));
2980 				BUG_ON(lock->ast_pending);
2981 				BUG_ON(lock->bast_pending);
2982 				dlm_lockres_clear_refmap_bit(dlm, res,
2983 							     lock->ml.node);
2984 				list_del_init(&lock->list);
2985 				dlm_lock_put(lock);
2986 				/* In a normal unlock, we would have added a
2987 				 * DLM_UNLOCK_FREE_LOCK action. Force it. */
2988 				dlm_lock_put(lock);
2989 			}
2990 		}
2991 		queue++;
2992 	}
2993 	bit = 0;
2994 	while (1) {
2995 		bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
2996 		if (bit >= O2NM_MAX_NODES)
2997 			break;
2998 		/* do not clear the local node reference, if there is a
2999 		 * process holding this, let it drop the ref itself */
3000 		if (bit != dlm->node_num) {
3001 			mlog(0, "%s:%.*s: node %u had a ref to this "
3002 			     "migrating lockres, clearing\n", dlm->name,
3003 			     res->lockname.len, res->lockname.name, bit);
3004 			dlm_lockres_clear_refmap_bit(dlm, res, bit);
3005 		}
3006 		bit++;
3007 	}
3008 }
3009 
3010 /*
3011  * Pick a node to migrate the lock resource to. This function selects a
3012  * potential target based first on the locks and then on refmap. It skips
3013  * nodes that are in the process of exiting the domain.
3014  */
3015 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
3016 				    struct dlm_lock_resource *res)
3017 {
3018 	enum dlm_lockres_list idx;
3019 	struct list_head *queue = &res->granted;
3020 	struct dlm_lock *lock;
3021 	int noderef;
3022 	u8 nodenum = O2NM_MAX_NODES;
3023 
3024 	assert_spin_locked(&dlm->spinlock);
3025 	assert_spin_locked(&res->spinlock);
3026 
3027 	/* Go through all the locks */
3028 	for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
3029 		queue = dlm_list_idx_to_ptr(res, idx);
3030 		list_for_each_entry(lock, queue, list) {
3031 			if (lock->ml.node == dlm->node_num)
3032 				continue;
3033 			if (test_bit(lock->ml.node, dlm->exit_domain_map))
3034 				continue;
3035 			nodenum = lock->ml.node;
3036 			goto bail;
3037 		}
3038 	}
3039 
3040 	/* Go thru the refmap */
3041 	noderef = -1;
3042 	while (1) {
3043 		noderef = find_next_bit(res->refmap, O2NM_MAX_NODES,
3044 					noderef + 1);
3045 		if (noderef >= O2NM_MAX_NODES)
3046 			break;
3047 		if (noderef == dlm->node_num)
3048 			continue;
3049 		if (test_bit(noderef, dlm->exit_domain_map))
3050 			continue;
3051 		nodenum = noderef;
3052 		goto bail;
3053 	}
3054 
3055 bail:
3056 	return nodenum;
3057 }
3058 
3059 /* this is called by the new master once all lockres
3060  * data has been received */
3061 static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
3062 				  struct dlm_lock_resource *res,
3063 				  u8 master, u8 new_master,
3064 				  struct dlm_node_iter *iter)
3065 {
3066 	struct dlm_migrate_request migrate;
3067 	int ret, skip, status = 0;
3068 	int nodenum;
3069 
3070 	memset(&migrate, 0, sizeof(migrate));
3071 	migrate.namelen = res->lockname.len;
3072 	memcpy(migrate.name, res->lockname.name, migrate.namelen);
3073 	migrate.new_master = new_master;
3074 	migrate.master = master;
3075 
3076 	ret = 0;
3077 
3078 	/* send message to all nodes, except the master and myself */
3079 	while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
3080 		if (nodenum == master ||
3081 		    nodenum == new_master)
3082 			continue;
3083 
3084 		/* We could race exit domain. If exited, skip. */
3085 		spin_lock(&dlm->spinlock);
3086 		skip = (!test_bit(nodenum, dlm->domain_map));
3087 		spin_unlock(&dlm->spinlock);
3088 		if (skip) {
3089 			clear_bit(nodenum, iter->node_map);
3090 			continue;
3091 		}
3092 
3093 		ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
3094 					 &migrate, sizeof(migrate), nodenum,
3095 					 &status);
3096 		if (ret < 0) {
3097 			mlog(ML_ERROR, "%s: res %.*s, Error %d send "
3098 			     "MIGRATE_REQUEST to node %u\n", dlm->name,
3099 			     migrate.namelen, migrate.name, ret, nodenum);
3100 			if (!dlm_is_host_down(ret)) {
3101 				mlog(ML_ERROR, "unhandled error=%d!\n", ret);
3102 				BUG();
3103 			}
3104 			clear_bit(nodenum, iter->node_map);
3105 			ret = 0;
3106 		} else if (status < 0) {
3107 			mlog(0, "migrate request (node %u) returned %d!\n",
3108 			     nodenum, status);
3109 			ret = status;
3110 		} else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) {
3111 			/* during the migration request we short-circuited
3112 			 * the mastery of the lockres.  make sure we have
3113 			 * a mastery ref for nodenum */
3114 			mlog(0, "%s:%.*s: need ref for node %u\n",
3115 			     dlm->name, res->lockname.len, res->lockname.name,
3116 			     nodenum);
3117 			spin_lock(&res->spinlock);
3118 			dlm_lockres_set_refmap_bit(dlm, res, nodenum);
3119 			spin_unlock(&res->spinlock);
3120 		}
3121 	}
3122 
3123 	if (ret < 0)
3124 		mlog_errno(ret);
3125 
3126 	mlog(0, "returning ret=%d\n", ret);
3127 	return ret;
3128 }
3129 
3130 
3131 /* if there is an existing mle for this lockres, we now know who the master is.
3132  * (the one who sent us *this* message) we can clear it up right away.
3133  * since the process that put the mle on the list still has a reference to it,
3134  * we can unhash it now, set the master and wake the process.  as a result,
3135  * we will have no mle in the list to start with.  now we can add an mle for
3136  * the migration and this should be the only one found for those scanning the
3137  * list.  */
3138 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
3139 				void **ret_data)
3140 {
3141 	struct dlm_ctxt *dlm = data;
3142 	struct dlm_lock_resource *res = NULL;
3143 	struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
3144 	struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
3145 	const char *name;
3146 	unsigned int namelen, hash;
3147 	int ret = 0;
3148 
3149 	if (!dlm_grab(dlm))
3150 		return 0;
3151 
3152 	name = migrate->name;
3153 	namelen = migrate->namelen;
3154 	hash = dlm_lockid_hash(name, namelen);
3155 
3156 	/* preallocate.. if this fails, abort */
3157 	mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
3158 
3159 	if (!mle) {
3160 		ret = -ENOMEM;
3161 		goto leave;
3162 	}
3163 
3164 	/* check for pre-existing lock */
3165 	spin_lock(&dlm->spinlock);
3166 	res = __dlm_lookup_lockres(dlm, name, namelen, hash);
3167 	if (res) {
3168 		spin_lock(&res->spinlock);
3169 		if (res->state & DLM_LOCK_RES_RECOVERING) {
3170 			/* if all is working ok, this can only mean that we got
3171 		 	* a migrate request from a node that we now see as
3172 		 	* dead.  what can we do here?  drop it to the floor? */
3173 			spin_unlock(&res->spinlock);
3174 			mlog(ML_ERROR, "Got a migrate request, but the "
3175 			     "lockres is marked as recovering!");
3176 			kmem_cache_free(dlm_mle_cache, mle);
3177 			ret = -EINVAL; /* need a better solution */
3178 			goto unlock;
3179 		}
3180 		res->state |= DLM_LOCK_RES_MIGRATING;
3181 		spin_unlock(&res->spinlock);
3182 	}
3183 
3184 	spin_lock(&dlm->master_lock);
3185 	/* ignore status.  only nonzero status would BUG. */
3186 	ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3187 				    name, namelen,
3188 				    migrate->new_master,
3189 				    migrate->master);
3190 
3191 	if (ret < 0)
3192 		kmem_cache_free(dlm_mle_cache, mle);
3193 
3194 	spin_unlock(&dlm->master_lock);
3195 unlock:
3196 	spin_unlock(&dlm->spinlock);
3197 
3198 	if (oldmle) {
3199 		/* master is known, detach if not already detached */
3200 		dlm_mle_detach_hb_events(dlm, oldmle);
3201 		dlm_put_mle(oldmle);
3202 	}
3203 
3204 	if (res)
3205 		dlm_lockres_put(res);
3206 leave:
3207 	dlm_put(dlm);
3208 	return ret;
3209 }
3210 
3211 /* must be holding dlm->spinlock and dlm->master_lock
3212  * when adding a migration mle, we can clear any other mles
3213  * in the master list because we know with certainty that
3214  * the master is "master".  so we remove any old mle from
3215  * the list after setting it's master field, and then add
3216  * the new migration mle.  this way we can hold with the rule
3217  * of having only one mle for a given lock name at all times. */
3218 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
3219 				 struct dlm_lock_resource *res,
3220 				 struct dlm_master_list_entry *mle,
3221 				 struct dlm_master_list_entry **oldmle,
3222 				 const char *name, unsigned int namelen,
3223 				 u8 new_master, u8 master)
3224 {
3225 	int found;
3226 	int ret = 0;
3227 
3228 	*oldmle = NULL;
3229 
3230 	assert_spin_locked(&dlm->spinlock);
3231 	assert_spin_locked(&dlm->master_lock);
3232 
3233 	/* caller is responsible for any ref taken here on oldmle */
3234 	found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
3235 	if (found) {
3236 		struct dlm_master_list_entry *tmp = *oldmle;
3237 		spin_lock(&tmp->spinlock);
3238 		if (tmp->type == DLM_MLE_MIGRATION) {
3239 			if (master == dlm->node_num) {
3240 				/* ah another process raced me to it */
3241 				mlog(0, "tried to migrate %.*s, but some "
3242 				     "process beat me to it\n",
3243 				     namelen, name);
3244 				spin_unlock(&tmp->spinlock);
3245 				return -EEXIST;
3246 			} else {
3247 				/* bad.  2 NODES are trying to migrate! */
3248 				mlog(ML_ERROR, "migration error  mle: "
3249 				     "master=%u new_master=%u // request: "
3250 				     "master=%u new_master=%u // "
3251 				     "lockres=%.*s\n",
3252 				     tmp->master, tmp->new_master,
3253 				     master, new_master,
3254 				     namelen, name);
3255 				BUG();
3256 			}
3257 		} else {
3258 			/* this is essentially what assert_master does */
3259 			tmp->master = master;
3260 			atomic_set(&tmp->woken, 1);
3261 			wake_up(&tmp->wq);
3262 			/* remove it so that only one mle will be found */
3263 			__dlm_unlink_mle(dlm, tmp);
3264 			__dlm_mle_detach_hb_events(dlm, tmp);
3265 			if (tmp->type == DLM_MLE_MASTER) {
3266 				ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
3267 				mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3268 						"telling master to get ref "
3269 						"for cleared out mle during "
3270 						"migration\n", dlm->name,
3271 						namelen, name, master,
3272 						new_master);
3273 			}
3274 		}
3275 		spin_unlock(&tmp->spinlock);
3276 	}
3277 
3278 	/* now add a migration mle to the tail of the list */
3279 	dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3280 	mle->new_master = new_master;
3281 	/* the new master will be sending an assert master for this.
3282 	 * at that point we will get the refmap reference */
3283 	mle->master = master;
3284 	/* do this for consistency with other mle types */
3285 	set_bit(new_master, mle->maybe_map);
3286 	__dlm_insert_mle(dlm, mle);
3287 
3288 	return ret;
3289 }
3290 
3291 /*
3292  * Sets the owner of the lockres, associated to the mle, to UNKNOWN
3293  */
3294 static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm,
3295 					struct dlm_master_list_entry *mle)
3296 {
3297 	struct dlm_lock_resource *res;
3298 
3299 	/* Find the lockres associated to the mle and set its owner to UNK */
3300 	res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen,
3301 				   mle->mnamehash);
3302 	if (res) {
3303 		spin_unlock(&dlm->master_lock);
3304 
3305 		/* move lockres onto recovery list */
3306 		spin_lock(&res->spinlock);
3307 		dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
3308 		dlm_move_lockres_to_recovery_list(dlm, res);
3309 		spin_unlock(&res->spinlock);
3310 		dlm_lockres_put(res);
3311 
3312 		/* about to get rid of mle, detach from heartbeat */
3313 		__dlm_mle_detach_hb_events(dlm, mle);
3314 
3315 		/* dump the mle */
3316 		spin_lock(&dlm->master_lock);
3317 		__dlm_put_mle(mle);
3318 		spin_unlock(&dlm->master_lock);
3319 	}
3320 
3321 	return res;
3322 }
3323 
3324 static void dlm_clean_migration_mle(struct dlm_ctxt *dlm,
3325 				    struct dlm_master_list_entry *mle)
3326 {
3327 	__dlm_mle_detach_hb_events(dlm, mle);
3328 
3329 	spin_lock(&mle->spinlock);
3330 	__dlm_unlink_mle(dlm, mle);
3331 	atomic_set(&mle->woken, 1);
3332 	spin_unlock(&mle->spinlock);
3333 
3334 	wake_up(&mle->wq);
3335 }
3336 
3337 static void dlm_clean_block_mle(struct dlm_ctxt *dlm,
3338 				struct dlm_master_list_entry *mle, u8 dead_node)
3339 {
3340 	int bit;
3341 
3342 	BUG_ON(mle->type != DLM_MLE_BLOCK);
3343 
3344 	spin_lock(&mle->spinlock);
3345 	bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
3346 	if (bit != dead_node) {
3347 		mlog(0, "mle found, but dead node %u would not have been "
3348 		     "master\n", dead_node);
3349 		spin_unlock(&mle->spinlock);
3350 	} else {
3351 		/* Must drop the refcount by one since the assert_master will
3352 		 * never arrive. This may result in the mle being unlinked and
3353 		 * freed, but there may still be a process waiting in the
3354 		 * dlmlock path which is fine. */
3355 		mlog(0, "node %u was expected master\n", dead_node);
3356 		atomic_set(&mle->woken, 1);
3357 		spin_unlock(&mle->spinlock);
3358 		wake_up(&mle->wq);
3359 
3360 		/* Do not need events any longer, so detach from heartbeat */
3361 		__dlm_mle_detach_hb_events(dlm, mle);
3362 		__dlm_put_mle(mle);
3363 	}
3364 }
3365 
3366 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
3367 {
3368 	struct dlm_master_list_entry *mle;
3369 	struct dlm_lock_resource *res;
3370 	struct hlist_head *bucket;
3371 	struct hlist_node *tmp;
3372 	unsigned int i;
3373 
3374 	mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node);
3375 top:
3376 	assert_spin_locked(&dlm->spinlock);
3377 
3378 	/* clean the master list */
3379 	spin_lock(&dlm->master_lock);
3380 	for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3381 		bucket = dlm_master_hash(dlm, i);
3382 		hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
3383 			BUG_ON(mle->type != DLM_MLE_BLOCK &&
3384 			       mle->type != DLM_MLE_MASTER &&
3385 			       mle->type != DLM_MLE_MIGRATION);
3386 
3387 			/* MASTER mles are initiated locally. The waiting
3388 			 * process will notice the node map change shortly.
3389 			 * Let that happen as normal. */
3390 			if (mle->type == DLM_MLE_MASTER)
3391 				continue;
3392 
3393 			/* BLOCK mles are initiated by other nodes. Need to
3394 			 * clean up if the dead node would have been the
3395 			 * master. */
3396 			if (mle->type == DLM_MLE_BLOCK) {
3397 				dlm_clean_block_mle(dlm, mle, dead_node);
3398 				continue;
3399 			}
3400 
3401 			/* Everything else is a MIGRATION mle */
3402 
3403 			/* The rule for MIGRATION mles is that the master
3404 			 * becomes UNKNOWN if *either* the original or the new
3405 			 * master dies. All UNKNOWN lockres' are sent to
3406 			 * whichever node becomes the recovery master. The new
3407 			 * master is responsible for determining if there is
3408 			 * still a master for this lockres, or if he needs to
3409 			 * take over mastery. Either way, this node should
3410 			 * expect another message to resolve this. */
3411 
3412 			if (mle->master != dead_node &&
3413 			    mle->new_master != dead_node)
3414 				continue;
3415 
3416 			if (mle->new_master == dead_node && mle->inuse) {
3417 				mlog(ML_NOTICE, "%s: target %u died during "
3418 						"migration from %u, the MLE is "
3419 						"still keep used, ignore it!\n",
3420 						dlm->name, dead_node,
3421 						mle->master);
3422 				continue;
3423 			}
3424 
3425 			/* If we have reached this point, this mle needs to be
3426 			 * removed from the list and freed. */
3427 			dlm_clean_migration_mle(dlm, mle);
3428 
3429 			mlog(0, "%s: node %u died during migration from "
3430 			     "%u to %u!\n", dlm->name, dead_node, mle->master,
3431 			     mle->new_master);
3432 
3433 			/* If we find a lockres associated with the mle, we've
3434 			 * hit this rare case that messes up our lock ordering.
3435 			 * If so, we need to drop the master lock so that we can
3436 			 * take the lockres lock, meaning that we will have to
3437 			 * restart from the head of list. */
3438 			res = dlm_reset_mleres_owner(dlm, mle);
3439 			if (res)
3440 				/* restart */
3441 				goto top;
3442 
3443 			/* This may be the last reference */
3444 			__dlm_put_mle(mle);
3445 		}
3446 	}
3447 	spin_unlock(&dlm->master_lock);
3448 }
3449 
3450 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3451 			 u8 old_master)
3452 {
3453 	struct dlm_node_iter iter;
3454 	int ret = 0;
3455 
3456 	spin_lock(&dlm->spinlock);
3457 	dlm_node_iter_init(dlm->domain_map, &iter);
3458 	clear_bit(old_master, iter.node_map);
3459 	clear_bit(dlm->node_num, iter.node_map);
3460 	spin_unlock(&dlm->spinlock);
3461 
3462 	/* ownership of the lockres is changing.  account for the
3463 	 * mastery reference here since old_master will briefly have
3464 	 * a reference after the migration completes */
3465 	spin_lock(&res->spinlock);
3466 	dlm_lockres_set_refmap_bit(dlm, res, old_master);
3467 	spin_unlock(&res->spinlock);
3468 
3469 	mlog(0, "now time to do a migrate request to other nodes\n");
3470 	ret = dlm_do_migrate_request(dlm, res, old_master,
3471 				     dlm->node_num, &iter);
3472 	if (ret < 0) {
3473 		mlog_errno(ret);
3474 		goto leave;
3475 	}
3476 
3477 	mlog(0, "doing assert master of %.*s to all except the original node\n",
3478 	     res->lockname.len, res->lockname.name);
3479 	/* this call now finishes out the nodemap
3480 	 * even if one or more nodes die */
3481 	ret = dlm_do_assert_master(dlm, res, iter.node_map,
3482 				   DLM_ASSERT_MASTER_FINISH_MIGRATION);
3483 	if (ret < 0) {
3484 		/* no longer need to retry.  all living nodes contacted. */
3485 		mlog_errno(ret);
3486 		ret = 0;
3487 	}
3488 
3489 	memset(iter.node_map, 0, sizeof(iter.node_map));
3490 	set_bit(old_master, iter.node_map);
3491 	mlog(0, "doing assert master of %.*s back to %u\n",
3492 	     res->lockname.len, res->lockname.name, old_master);
3493 	ret = dlm_do_assert_master(dlm, res, iter.node_map,
3494 				   DLM_ASSERT_MASTER_FINISH_MIGRATION);
3495 	if (ret < 0) {
3496 		mlog(0, "assert master to original master failed "
3497 		     "with %d.\n", ret);
3498 		/* the only nonzero status here would be because of
3499 		 * a dead original node.  we're done. */
3500 		ret = 0;
3501 	}
3502 
3503 	/* all done, set the owner, clear the flag */
3504 	spin_lock(&res->spinlock);
3505 	dlm_set_lockres_owner(dlm, res, dlm->node_num);
3506 	res->state &= ~DLM_LOCK_RES_MIGRATING;
3507 	spin_unlock(&res->spinlock);
3508 	/* re-dirty it on the new master */
3509 	dlm_kick_thread(dlm, res);
3510 	wake_up(&res->wq);
3511 leave:
3512 	return ret;
3513 }
3514 
3515 /*
3516  * LOCKRES AST REFCOUNT
3517  * this is integral to migration
3518  */
3519 
3520 /* for future intent to call an ast, reserve one ahead of time.
3521  * this should be called only after waiting on the lockres
3522  * with dlm_wait_on_lockres, and while still holding the
3523  * spinlock after the call. */
3524 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
3525 {
3526 	assert_spin_locked(&res->spinlock);
3527 	if (res->state & DLM_LOCK_RES_MIGRATING) {
3528 		__dlm_print_one_lock_resource(res);
3529 	}
3530 	BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3531 
3532 	atomic_inc(&res->asts_reserved);
3533 }
3534 
3535 /*
3536  * used to drop the reserved ast, either because it went unused,
3537  * or because the ast/bast was actually called.
3538  *
3539  * also, if there is a pending migration on this lockres,
3540  * and this was the last pending ast on the lockres,
3541  * atomically set the MIGRATING flag before we drop the lock.
3542  * this is how we ensure that migration can proceed with no
3543  * asts in progress.  note that it is ok if the state of the
3544  * queues is such that a lock should be granted in the future
3545  * or that a bast should be fired, because the new master will
3546  * shuffle the lists on this lockres as soon as it is migrated.
3547  */
3548 void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
3549 			     struct dlm_lock_resource *res)
3550 {
3551 	if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
3552 		return;
3553 
3554 	if (!res->migration_pending) {
3555 		spin_unlock(&res->spinlock);
3556 		return;
3557 	}
3558 
3559 	BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3560 	res->migration_pending = 0;
3561 	res->state |= DLM_LOCK_RES_MIGRATING;
3562 	spin_unlock(&res->spinlock);
3563 	wake_up(&res->wq);
3564 	wake_up(&dlm->migration_wq);
3565 }
3566 
3567 void dlm_force_free_mles(struct dlm_ctxt *dlm)
3568 {
3569 	int i;
3570 	struct hlist_head *bucket;
3571 	struct dlm_master_list_entry *mle;
3572 	struct hlist_node *tmp;
3573 
3574 	/*
3575 	 * We notified all other nodes that we are exiting the domain and
3576 	 * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
3577 	 * around we force free them and wake any processes that are waiting
3578 	 * on the mles
3579 	 */
3580 	spin_lock(&dlm->spinlock);
3581 	spin_lock(&dlm->master_lock);
3582 
3583 	BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
3584 	BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES));
3585 
3586 	for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3587 		bucket = dlm_master_hash(dlm, i);
3588 		hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
3589 			if (mle->type != DLM_MLE_BLOCK) {
3590 				mlog(ML_ERROR, "bad mle: %p\n", mle);
3591 				dlm_print_one_mle(mle);
3592 			}
3593 			atomic_set(&mle->woken, 1);
3594 			wake_up(&mle->wq);
3595 
3596 			__dlm_unlink_mle(dlm, mle);
3597 			__dlm_mle_detach_hb_events(dlm, mle);
3598 			__dlm_put_mle(mle);
3599 		}
3600 	}
3601 	spin_unlock(&dlm->master_lock);
3602 	spin_unlock(&dlm->spinlock);
3603 }
3604