xref: /openbmc/linux/fs/ocfs2/dlm/dlmunlock.c (revision 2f5947df)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* -*- mode: c; c-basic-offset: 8; -*-
3  * vim: noexpandtab sw=8 ts=8 sts=0:
4  *
5  * dlmunlock.c
6  *
7  * underlying calls for unlocking locks
8  *
9  * Copyright (C) 2004 Oracle.  All rights reserved.
10  */
11 
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/types.h>
16 #include <linux/highmem.h>
17 #include <linux/init.h>
18 #include <linux/sysctl.h>
19 #include <linux/random.h>
20 #include <linux/blkdev.h>
21 #include <linux/socket.h>
22 #include <linux/inet.h>
23 #include <linux/spinlock.h>
24 #include <linux/delay.h>
25 
26 #include "cluster/heartbeat.h"
27 #include "cluster/nodemanager.h"
28 #include "cluster/tcp.h"
29 
30 #include "dlmapi.h"
31 #include "dlmcommon.h"
32 
33 #define MLOG_MASK_PREFIX ML_DLM
34 #include "cluster/masklog.h"
35 
36 #define DLM_UNLOCK_FREE_LOCK           0x00000001
37 #define DLM_UNLOCK_CALL_AST            0x00000002
38 #define DLM_UNLOCK_REMOVE_LOCK         0x00000004
39 #define DLM_UNLOCK_REGRANT_LOCK        0x00000008
40 #define DLM_UNLOCK_CLEAR_CONVERT_TYPE  0x00000010
41 
42 
43 static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm,
44 					      struct dlm_lock_resource *res,
45 					      struct dlm_lock *lock,
46 					      struct dlm_lockstatus *lksb,
47 					      int *actions);
48 static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm,
49 					      struct dlm_lock_resource *res,
50 					      struct dlm_lock *lock,
51 					      struct dlm_lockstatus *lksb,
52 					      int *actions);
53 
54 static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
55 						 struct dlm_lock_resource *res,
56 						 struct dlm_lock *lock,
57 						 struct dlm_lockstatus *lksb,
58 						 int flags,
59 						 u8 owner);
60 
61 
62 /*
63  * according to the spec:
64  * http://opendlm.sourceforge.net/cvsmirror/opendlm/docs/dlmbook_final.pdf
65  *
66  *  flags & LKM_CANCEL != 0: must be converting or blocked
67  *  flags & LKM_CANCEL == 0: must be granted
68  *
69  * So to unlock a converting lock, you must first cancel the
70  * convert (passing LKM_CANCEL in flags), then call the unlock
71  * again (with no LKM_CANCEL in flags).
72  */
73 
74 
75 /*
76  * locking:
77  *   caller needs:  none
78  *   taken:         res->spinlock and lock->spinlock taken and dropped
79  *   held on exit:  none
80  * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network
81  * all callers should have taken an extra ref on lock coming in
82  */
83 static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
84 					struct dlm_lock_resource *res,
85 					struct dlm_lock *lock,
86 					struct dlm_lockstatus *lksb,
87 					int flags, int *call_ast,
88 					int master_node)
89 {
90 	enum dlm_status status;
91 	int actions = 0;
92 	int in_use;
93         u8 owner;
94 
95 	mlog(0, "master_node = %d, valblk = %d\n", master_node,
96 	     flags & LKM_VALBLK);
97 
98 	if (master_node)
99 		BUG_ON(res->owner != dlm->node_num);
100 	else
101 		BUG_ON(res->owner == dlm->node_num);
102 
103 	spin_lock(&dlm->ast_lock);
104 	/* We want to be sure that we're not freeing a lock
105 	 * that still has AST's pending... */
106 	in_use = !list_empty(&lock->ast_list);
107 	spin_unlock(&dlm->ast_lock);
108 	if (in_use && !(flags & LKM_CANCEL)) {
109 	       mlog(ML_ERROR, "lockres %.*s: Someone is calling dlmunlock "
110 		    "while waiting for an ast!", res->lockname.len,
111 		    res->lockname.name);
112 		return DLM_BADPARAM;
113 	}
114 
115 	spin_lock(&res->spinlock);
116 	if (res->state & DLM_LOCK_RES_IN_PROGRESS) {
117 		if (master_node && !(flags & LKM_CANCEL)) {
118 			mlog(ML_ERROR, "lockres in progress!\n");
119 			spin_unlock(&res->spinlock);
120 			return DLM_FORWARD;
121 		}
122 		/* ok for this to sleep if not in a network handler */
123 		__dlm_wait_on_lockres(res);
124 		res->state |= DLM_LOCK_RES_IN_PROGRESS;
125 	}
126 	spin_lock(&lock->spinlock);
127 
128 	if (res->state & DLM_LOCK_RES_RECOVERING) {
129 		status = DLM_RECOVERING;
130 		goto leave;
131 	}
132 
133 	if (res->state & DLM_LOCK_RES_MIGRATING) {
134 		status = DLM_MIGRATING;
135 		goto leave;
136 	}
137 
138 	/* see above for what the spec says about
139 	 * LKM_CANCEL and the lock queue state */
140 	if (flags & LKM_CANCEL)
141 		status = dlm_get_cancel_actions(dlm, res, lock, lksb, &actions);
142 	else
143 		status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions);
144 
145 	if (status != DLM_NORMAL && (status != DLM_CANCELGRANT || !master_node))
146 		goto leave;
147 
148 	/* By now this has been masked out of cancel requests. */
149 	if (flags & LKM_VALBLK) {
150 		/* make the final update to the lvb */
151 		if (master_node)
152 			memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN);
153 		else
154 			flags |= LKM_PUT_LVB; /* let the send function
155 					       * handle it. */
156 	}
157 
158 	if (!master_node) {
159 		owner = res->owner;
160 		/* drop locks and send message */
161 		if (flags & LKM_CANCEL)
162 			lock->cancel_pending = 1;
163 		else
164 			lock->unlock_pending = 1;
165 		spin_unlock(&lock->spinlock);
166 		spin_unlock(&res->spinlock);
167 		status = dlm_send_remote_unlock_request(dlm, res, lock, lksb,
168 							flags, owner);
169 		spin_lock(&res->spinlock);
170 		spin_lock(&lock->spinlock);
171 		/* if the master told us the lock was already granted,
172 		 * let the ast handle all of these actions */
173 		if (status == DLM_CANCELGRANT) {
174 			actions &= ~(DLM_UNLOCK_REMOVE_LOCK|
175 				     DLM_UNLOCK_REGRANT_LOCK|
176 				     DLM_UNLOCK_CLEAR_CONVERT_TYPE);
177 		} else if (status == DLM_RECOVERING ||
178 			   status == DLM_MIGRATING ||
179 			   status == DLM_FORWARD ||
180 			   status == DLM_NOLOCKMGR
181 			   ) {
182 			/* must clear the actions because this unlock
183 			 * is about to be retried.  cannot free or do
184 			 * any list manipulation. */
185 			mlog(0, "%s:%.*s: clearing actions, %s\n",
186 			     dlm->name, res->lockname.len,
187 			     res->lockname.name,
188 			     status==DLM_RECOVERING?"recovering":
189 			     (status==DLM_MIGRATING?"migrating":
190 				(status == DLM_FORWARD ? "forward" :
191 						"nolockmanager")));
192 			actions = 0;
193 		}
194 		if (flags & LKM_CANCEL)
195 			lock->cancel_pending = 0;
196 		else
197 			lock->unlock_pending = 0;
198 
199 	}
200 
201 	/* get an extra ref on lock.  if we are just switching
202 	 * lists here, we dont want the lock to go away. */
203 	dlm_lock_get(lock);
204 
205 	if (actions & DLM_UNLOCK_REMOVE_LOCK) {
206 		list_del_init(&lock->list);
207 		dlm_lock_put(lock);
208 	}
209 	if (actions & DLM_UNLOCK_REGRANT_LOCK) {
210 		dlm_lock_get(lock);
211 		list_add_tail(&lock->list, &res->granted);
212 	}
213 	if (actions & DLM_UNLOCK_CLEAR_CONVERT_TYPE) {
214 		mlog(0, "clearing convert_type at %smaster node\n",
215 		     master_node ? "" : "non-");
216 		lock->ml.convert_type = LKM_IVMODE;
217 	}
218 
219 	/* remove the extra ref on lock */
220 	dlm_lock_put(lock);
221 
222 leave:
223 	res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
224 	if (!dlm_lock_on_list(&res->converting, lock))
225 		BUG_ON(lock->ml.convert_type != LKM_IVMODE);
226 	else
227 		BUG_ON(lock->ml.convert_type == LKM_IVMODE);
228 	spin_unlock(&lock->spinlock);
229 	spin_unlock(&res->spinlock);
230 	wake_up(&res->wq);
231 
232 	/* let the caller's final dlm_lock_put handle the actual kfree */
233 	if (actions & DLM_UNLOCK_FREE_LOCK) {
234 		/* this should always be coupled with list removal */
235 		BUG_ON(!(actions & DLM_UNLOCK_REMOVE_LOCK));
236 		mlog(0, "lock %u:%llu should be gone now! refs=%d\n",
237 		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
238 		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
239 		     kref_read(&lock->lock_refs)-1);
240 		dlm_lock_put(lock);
241 	}
242 	if (actions & DLM_UNLOCK_CALL_AST)
243 		*call_ast = 1;
244 
245 	/* if cancel or unlock succeeded, lvb work is done */
246 	if (status == DLM_NORMAL)
247 		lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB);
248 
249 	return status;
250 }
251 
252 void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
253 			       struct dlm_lock *lock)
254 {
255 	/* leave DLM_LKSB_PUT_LVB on the lksb so any final
256 	 * update of the lvb will be sent to the new master */
257 	list_del_init(&lock->list);
258 }
259 
260 void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
261 			       struct dlm_lock *lock)
262 {
263 	list_move_tail(&lock->list, &res->granted);
264 	lock->ml.convert_type = LKM_IVMODE;
265 }
266 
267 
268 static inline enum dlm_status dlmunlock_master(struct dlm_ctxt *dlm,
269 					  struct dlm_lock_resource *res,
270 					  struct dlm_lock *lock,
271 					  struct dlm_lockstatus *lksb,
272 					  int flags,
273 					  int *call_ast)
274 {
275 	return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 1);
276 }
277 
278 static inline enum dlm_status dlmunlock_remote(struct dlm_ctxt *dlm,
279 					  struct dlm_lock_resource *res,
280 					  struct dlm_lock *lock,
281 					  struct dlm_lockstatus *lksb,
282 					  int flags, int *call_ast)
283 {
284 	return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 0);
285 }
286 
287 /*
288  * locking:
289  *   caller needs:  none
290  *   taken:         none
291  *   held on exit:  none
292  * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network
293  */
294 static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
295 						 struct dlm_lock_resource *res,
296 						 struct dlm_lock *lock,
297 						 struct dlm_lockstatus *lksb,
298 						 int flags,
299 						 u8 owner)
300 {
301 	struct dlm_unlock_lock unlock;
302 	int tmpret;
303 	enum dlm_status ret;
304 	int status = 0;
305 	struct kvec vec[2];
306 	size_t veclen = 1;
307 
308 	mlog(0, "%.*s\n", res->lockname.len, res->lockname.name);
309 
310 	if (owner == dlm->node_num) {
311 		/* ended up trying to contact ourself.  this means
312 		 * that the lockres had been remote but became local
313 		 * via a migration.  just retry it, now as local */
314 		mlog(0, "%s:%.*s: this node became the master due to a "
315 		     "migration, re-evaluate now\n", dlm->name,
316 		     res->lockname.len, res->lockname.name);
317 		return DLM_FORWARD;
318 	}
319 
320 	memset(&unlock, 0, sizeof(unlock));
321 	unlock.node_idx = dlm->node_num;
322 	unlock.flags = cpu_to_be32(flags);
323 	unlock.cookie = lock->ml.cookie;
324 	unlock.namelen = res->lockname.len;
325 	memcpy(unlock.name, res->lockname.name, unlock.namelen);
326 
327 	vec[0].iov_len = sizeof(struct dlm_unlock_lock);
328 	vec[0].iov_base = &unlock;
329 
330 	if (flags & LKM_PUT_LVB) {
331 		/* extra data to send if we are updating lvb */
332 		vec[1].iov_len = DLM_LVB_LEN;
333 		vec[1].iov_base = lock->lksb->lvb;
334 		veclen++;
335 	}
336 
337 	tmpret = o2net_send_message_vec(DLM_UNLOCK_LOCK_MSG, dlm->key,
338 					vec, veclen, owner, &status);
339 	if (tmpret >= 0) {
340 		// successfully sent and received
341 		if (status == DLM_FORWARD)
342 			mlog(0, "master was in-progress.  retry\n");
343 		ret = status;
344 	} else {
345 		mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
346 		     "node %u\n", tmpret, DLM_UNLOCK_LOCK_MSG, dlm->key, owner);
347 		if (dlm_is_host_down(tmpret)) {
348 			/* NOTE: this seems strange, but it is what we want.
349 			 * when the master goes down during a cancel or
350 			 * unlock, the recovery code completes the operation
351 			 * as if the master had not died, then passes the
352 			 * updated state to the recovery master.  this thread
353 			 * just needs to finish out the operation and call
354 			 * the unlockast. */
355 			if (dlm_is_node_dead(dlm, owner))
356 				ret = DLM_NORMAL;
357 			else
358 				ret = DLM_NOLOCKMGR;
359 		} else {
360 			/* something bad.  this will BUG in ocfs2 */
361 			ret = dlm_err_to_dlm_status(tmpret);
362 		}
363 	}
364 
365 	return ret;
366 }
367 
368 /*
369  * locking:
370  *   caller needs:  none
371  *   taken:         takes and drops res->spinlock
372  *   held on exit:  none
373  * returns: DLM_NORMAL, DLM_BADARGS, DLM_IVLOCKID,
374  *          return value from dlmunlock_master
375  */
376 int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
377 			    void **ret_data)
378 {
379 	struct dlm_ctxt *dlm = data;
380 	struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf;
381 	struct dlm_lock_resource *res = NULL;
382 	struct dlm_lock *lock = NULL;
383 	enum dlm_status status = DLM_NORMAL;
384 	int found = 0, i;
385 	struct dlm_lockstatus *lksb = NULL;
386 	int ignore;
387 	u32 flags;
388 	struct list_head *queue;
389 
390 	flags = be32_to_cpu(unlock->flags);
391 
392 	if (flags & LKM_GET_LVB) {
393 		mlog(ML_ERROR, "bad args!  GET_LVB specified on unlock!\n");
394 		return DLM_BADARGS;
395 	}
396 
397 	if ((flags & (LKM_PUT_LVB|LKM_CANCEL)) == (LKM_PUT_LVB|LKM_CANCEL)) {
398 		mlog(ML_ERROR, "bad args!  cannot modify lvb on a CANCEL "
399 		     "request!\n");
400 		return DLM_BADARGS;
401 	}
402 
403 	if (unlock->namelen > DLM_LOCKID_NAME_MAX) {
404 		mlog(ML_ERROR, "Invalid name length in unlock handler!\n");
405 		return DLM_IVBUFLEN;
406 	}
407 
408 	if (!dlm_grab(dlm))
409 		return DLM_FORWARD;
410 
411 	mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
412 			"Domain %s not fully joined!\n", dlm->name);
413 
414 	mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : "none");
415 
416 	res = dlm_lookup_lockres(dlm, unlock->name, unlock->namelen);
417 	if (!res) {
418 		/* We assume here that a no lock resource simply means
419 		 * it was migrated away and destroyed before the other
420 		 * node could detect it. */
421 		mlog(0, "returning DLM_FORWARD -- res no longer exists\n");
422 		status = DLM_FORWARD;
423 		goto not_found;
424 	}
425 
426 	queue=&res->granted;
427 	found = 0;
428 	spin_lock(&res->spinlock);
429 	if (res->state & DLM_LOCK_RES_RECOVERING) {
430 		spin_unlock(&res->spinlock);
431 		mlog(0, "returning DLM_RECOVERING\n");
432 		status = DLM_RECOVERING;
433 		goto leave;
434 	}
435 
436 	if (res->state & DLM_LOCK_RES_MIGRATING) {
437 		spin_unlock(&res->spinlock);
438 		mlog(0, "returning DLM_MIGRATING\n");
439 		status = DLM_MIGRATING;
440 		goto leave;
441 	}
442 
443 	if (res->owner != dlm->node_num) {
444 		spin_unlock(&res->spinlock);
445 		mlog(0, "returning DLM_FORWARD -- not master\n");
446 		status = DLM_FORWARD;
447 		goto leave;
448 	}
449 
450 	for (i=0; i<3; i++) {
451 		list_for_each_entry(lock, queue, list) {
452 			if (lock->ml.cookie == unlock->cookie &&
453 		    	    lock->ml.node == unlock->node_idx) {
454 				dlm_lock_get(lock);
455 				found = 1;
456 				break;
457 			}
458 		}
459 		if (found)
460 			break;
461 		/* scan granted -> converting -> blocked queues */
462 		queue++;
463 	}
464 	spin_unlock(&res->spinlock);
465 	if (!found) {
466 		status = DLM_IVLOCKID;
467 		goto not_found;
468 	}
469 
470 	/* lock was found on queue */
471 	lksb = lock->lksb;
472 	if (flags & (LKM_VALBLK|LKM_PUT_LVB) &&
473 	    lock->ml.type != LKM_EXMODE)
474 		flags &= ~(LKM_VALBLK|LKM_PUT_LVB);
475 
476 	/* unlockast only called on originating node */
477 	if (flags & LKM_PUT_LVB) {
478 		lksb->flags |= DLM_LKSB_PUT_LVB;
479 		memcpy(&lksb->lvb[0], &unlock->lvb[0], DLM_LVB_LEN);
480 	}
481 
482 	/* if this is in-progress, propagate the DLM_FORWARD
483 	 * all the way back out */
484 	status = dlmunlock_master(dlm, res, lock, lksb, flags, &ignore);
485 	if (status == DLM_FORWARD)
486 		mlog(0, "lockres is in progress\n");
487 
488 	if (flags & LKM_PUT_LVB)
489 		lksb->flags &= ~DLM_LKSB_PUT_LVB;
490 
491 	dlm_lockres_calc_usage(dlm, res);
492 	dlm_kick_thread(dlm, res);
493 
494 not_found:
495 	if (!found)
496 		mlog(ML_ERROR, "failed to find lock to unlock! "
497 			       "cookie=%u:%llu\n",
498 		     dlm_get_lock_cookie_node(be64_to_cpu(unlock->cookie)),
499 		     dlm_get_lock_cookie_seq(be64_to_cpu(unlock->cookie)));
500 	else
501 		dlm_lock_put(lock);
502 
503 leave:
504 	if (res)
505 		dlm_lockres_put(res);
506 
507 	dlm_put(dlm);
508 
509 	return status;
510 }
511 
512 
513 static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm,
514 					      struct dlm_lock_resource *res,
515 					      struct dlm_lock *lock,
516 					      struct dlm_lockstatus *lksb,
517 					      int *actions)
518 {
519 	enum dlm_status status;
520 
521 	if (dlm_lock_on_list(&res->blocked, lock)) {
522 		/* cancel this outright */
523 		status = DLM_NORMAL;
524 		*actions = (DLM_UNLOCK_CALL_AST |
525 			    DLM_UNLOCK_REMOVE_LOCK);
526 	} else if (dlm_lock_on_list(&res->converting, lock)) {
527 		/* cancel the request, put back on granted */
528 		status = DLM_NORMAL;
529 		*actions = (DLM_UNLOCK_CALL_AST |
530 			    DLM_UNLOCK_REMOVE_LOCK |
531 			    DLM_UNLOCK_REGRANT_LOCK |
532 			    DLM_UNLOCK_CLEAR_CONVERT_TYPE);
533 	} else if (dlm_lock_on_list(&res->granted, lock)) {
534 		/* too late, already granted. */
535 		status = DLM_CANCELGRANT;
536 		*actions = DLM_UNLOCK_CALL_AST;
537 	} else {
538 		mlog(ML_ERROR, "lock to cancel is not on any list!\n");
539 		status = DLM_IVLOCKID;
540 		*actions = 0;
541 	}
542 	return status;
543 }
544 
545 static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm,
546 					      struct dlm_lock_resource *res,
547 					      struct dlm_lock *lock,
548 					      struct dlm_lockstatus *lksb,
549 					      int *actions)
550 {
551 	enum dlm_status status;
552 
553 	/* unlock request */
554 	if (!dlm_lock_on_list(&res->granted, lock)) {
555 		status = DLM_DENIED;
556 		dlm_error(status);
557 		*actions = 0;
558 	} else {
559 		/* unlock granted lock */
560 		status = DLM_NORMAL;
561 		*actions = (DLM_UNLOCK_FREE_LOCK |
562 			    DLM_UNLOCK_CALL_AST |
563 			    DLM_UNLOCK_REMOVE_LOCK);
564 	}
565 	return status;
566 }
567 
568 /* there seems to be no point in doing this async
569  * since (even for the remote case) there is really
570  * no work to queue up... so just do it and fire the
571  * unlockast by hand when done... */
572 enum dlm_status dlmunlock(struct dlm_ctxt *dlm, struct dlm_lockstatus *lksb,
573 			  int flags, dlm_astunlockfunc_t *unlockast, void *data)
574 {
575 	enum dlm_status status;
576 	struct dlm_lock_resource *res;
577 	struct dlm_lock *lock = NULL;
578 	int call_ast, is_master;
579 
580 	if (!lksb) {
581 		dlm_error(DLM_BADARGS);
582 		return DLM_BADARGS;
583 	}
584 
585 	if (flags & ~(LKM_CANCEL | LKM_VALBLK | LKM_INVVALBLK)) {
586 		dlm_error(DLM_BADPARAM);
587 		return DLM_BADPARAM;
588 	}
589 
590 	if ((flags & (LKM_VALBLK | LKM_CANCEL)) == (LKM_VALBLK | LKM_CANCEL)) {
591 		mlog(0, "VALBLK given with CANCEL: ignoring VALBLK\n");
592 		flags &= ~LKM_VALBLK;
593 	}
594 
595 	if (!lksb->lockid || !lksb->lockid->lockres) {
596 		dlm_error(DLM_BADPARAM);
597 		return DLM_BADPARAM;
598 	}
599 
600 	lock = lksb->lockid;
601 	BUG_ON(!lock);
602 	dlm_lock_get(lock);
603 
604 	res = lock->lockres;
605 	BUG_ON(!res);
606 	dlm_lockres_get(res);
607 retry:
608 	call_ast = 0;
609 	/* need to retry up here because owner may have changed */
610 	mlog(0, "lock=%p res=%p\n", lock, res);
611 
612 	spin_lock(&res->spinlock);
613 	is_master = (res->owner == dlm->node_num);
614 	if (flags & LKM_VALBLK && lock->ml.type != LKM_EXMODE)
615 		flags &= ~LKM_VALBLK;
616 	spin_unlock(&res->spinlock);
617 
618 	if (is_master) {
619 		status = dlmunlock_master(dlm, res, lock, lksb, flags,
620 					  &call_ast);
621 		mlog(0, "done calling dlmunlock_master: returned %d, "
622 		     "call_ast is %d\n", status, call_ast);
623 	} else {
624 		status = dlmunlock_remote(dlm, res, lock, lksb, flags,
625 					  &call_ast);
626 		mlog(0, "done calling dlmunlock_remote: returned %d, "
627 		     "call_ast is %d\n", status, call_ast);
628 	}
629 
630 	if (status == DLM_RECOVERING ||
631 	    status == DLM_MIGRATING ||
632 	    status == DLM_FORWARD ||
633 	    status == DLM_NOLOCKMGR) {
634 
635 		/* We want to go away for a tiny bit to allow recovery
636 		 * / migration to complete on this resource. I don't
637 		 * know of any wait queue we could sleep on as this
638 		 * may be happening on another node. Perhaps the
639 		 * proper solution is to queue up requests on the
640 		 * other end? */
641 
642 		/* do we want to yield(); ?? */
643 		msleep(50);
644 
645 		mlog(0, "retrying unlock due to pending recovery/"
646 		     "migration/in-progress/reconnect\n");
647 		goto retry;
648 	}
649 
650 	if (call_ast) {
651 		mlog(0, "calling unlockast(%p, %d)\n", data, status);
652 		if (is_master) {
653 			/* it is possible that there is one last bast
654 			 * pending.  make sure it is flushed, then
655 			 * call the unlockast.
656 			 * not an issue if this is a mastered remotely,
657 			 * since this lock has been removed from the
658 			 * lockres queues and cannot be found. */
659 			dlm_kick_thread(dlm, NULL);
660 			wait_event(dlm->ast_wq,
661 				   dlm_lock_basts_flushed(dlm, lock));
662 		}
663 		(*unlockast)(data, status);
664 	}
665 
666 	if (status == DLM_CANCELGRANT)
667 		status = DLM_NORMAL;
668 
669 	if (status == DLM_NORMAL) {
670 		mlog(0, "kicking the thread\n");
671 		dlm_kick_thread(dlm, res);
672 	} else
673 		dlm_error(status);
674 
675 	dlm_lockres_calc_usage(dlm, res);
676 	dlm_lockres_put(res);
677 	dlm_lock_put(lock);
678 
679 	mlog(0, "returning status=%d!\n", status);
680 	return status;
681 }
682 EXPORT_SYMBOL_GPL(dlmunlock);
683 
684