xref: /openbmc/linux/fs/ocfs2/dlm/dlmunlock.c (revision 78560d41)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* -*- mode: c; c-basic-offset: 8; -*-
3  * vim: noexpandtab sw=8 ts=8 sts=0:
4  *
5  * dlmunlock.c
6  *
7  * underlying calls for unlocking locks
8  *
9  * Copyright (C) 2004 Oracle.  All rights reserved.
10  */
11 
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/types.h>
16 #include <linux/highmem.h>
17 #include <linux/init.h>
18 #include <linux/sysctl.h>
19 #include <linux/random.h>
20 #include <linux/blkdev.h>
21 #include <linux/socket.h>
22 #include <linux/inet.h>
23 #include <linux/spinlock.h>
24 #include <linux/delay.h>
25 
26 #include "../cluster/heartbeat.h"
27 #include "../cluster/nodemanager.h"
28 #include "../cluster/tcp.h"
29 
30 #include "dlmapi.h"
31 #include "dlmcommon.h"
32 
33 #define MLOG_MASK_PREFIX ML_DLM
34 #include "../cluster/masklog.h"
35 
36 #define DLM_UNLOCK_FREE_LOCK           0x00000001
37 #define DLM_UNLOCK_CALL_AST            0x00000002
38 #define DLM_UNLOCK_REMOVE_LOCK         0x00000004
39 #define DLM_UNLOCK_REGRANT_LOCK        0x00000008
40 #define DLM_UNLOCK_CLEAR_CONVERT_TYPE  0x00000010
41 
42 
43 static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm,
44 					      struct dlm_lock_resource *res,
45 					      struct dlm_lock *lock,
46 					      struct dlm_lockstatus *lksb,
47 					      int *actions);
48 static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm,
49 					      struct dlm_lock_resource *res,
50 					      struct dlm_lock *lock,
51 					      struct dlm_lockstatus *lksb,
52 					      int *actions);
53 
54 static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
55 						 struct dlm_lock_resource *res,
56 						 struct dlm_lock *lock,
57 						 struct dlm_lockstatus *lksb,
58 						 int flags,
59 						 u8 owner);
60 
61 
62 /*
63  * according to the spec:
64  * http://opendlm.sourceforge.net/cvsmirror/opendlm/docs/dlmbook_final.pdf
65  *
66  *  flags & LKM_CANCEL != 0: must be converting or blocked
67  *  flags & LKM_CANCEL == 0: must be granted
68  *
69  * So to unlock a converting lock, you must first cancel the
70  * convert (passing LKM_CANCEL in flags), then call the unlock
71  * again (with no LKM_CANCEL in flags).
72  */
73 
74 
75 /*
76  * locking:
77  *   caller needs:  none
78  *   taken:         res->spinlock and lock->spinlock taken and dropped
79  *   held on exit:  none
80  * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network
81  * all callers should have taken an extra ref on lock coming in
82  */
83 static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
84 					struct dlm_lock_resource *res,
85 					struct dlm_lock *lock,
86 					struct dlm_lockstatus *lksb,
87 					int flags, int *call_ast,
88 					int master_node)
89 {
90 	enum dlm_status status;
91 	int actions = 0;
92 	int in_use;
93 	u8 owner;
94 	int recovery_wait = 0;
95 
96 	mlog(0, "master_node = %d, valblk = %d\n", master_node,
97 	     flags & LKM_VALBLK);
98 
99 	if (master_node)
100 		BUG_ON(res->owner != dlm->node_num);
101 	else
102 		BUG_ON(res->owner == dlm->node_num);
103 
104 	spin_lock(&dlm->ast_lock);
105 	/* We want to be sure that we're not freeing a lock
106 	 * that still has AST's pending... */
107 	in_use = !list_empty(&lock->ast_list);
108 	spin_unlock(&dlm->ast_lock);
109 	if (in_use && !(flags & LKM_CANCEL)) {
110 	       mlog(ML_ERROR, "lockres %.*s: Someone is calling dlmunlock "
111 		    "while waiting for an ast!", res->lockname.len,
112 		    res->lockname.name);
113 		return DLM_BADPARAM;
114 	}
115 
116 	spin_lock(&res->spinlock);
117 	if (res->state & DLM_LOCK_RES_IN_PROGRESS) {
118 		if (master_node && !(flags & LKM_CANCEL)) {
119 			mlog(ML_ERROR, "lockres in progress!\n");
120 			spin_unlock(&res->spinlock);
121 			return DLM_FORWARD;
122 		}
123 		/* ok for this to sleep if not in a network handler */
124 		__dlm_wait_on_lockres(res);
125 		res->state |= DLM_LOCK_RES_IN_PROGRESS;
126 	}
127 	spin_lock(&lock->spinlock);
128 
129 	if (res->state & DLM_LOCK_RES_RECOVERING) {
130 		status = DLM_RECOVERING;
131 		goto leave;
132 	}
133 
134 	if (res->state & DLM_LOCK_RES_MIGRATING) {
135 		status = DLM_MIGRATING;
136 		goto leave;
137 	}
138 
139 	/* see above for what the spec says about
140 	 * LKM_CANCEL and the lock queue state */
141 	if (flags & LKM_CANCEL)
142 		status = dlm_get_cancel_actions(dlm, res, lock, lksb, &actions);
143 	else
144 		status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions);
145 
146 	if (status != DLM_NORMAL && (status != DLM_CANCELGRANT || !master_node))
147 		goto leave;
148 
149 	/* By now this has been masked out of cancel requests. */
150 	if (flags & LKM_VALBLK) {
151 		/* make the final update to the lvb */
152 		if (master_node)
153 			memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN);
154 		else
155 			flags |= LKM_PUT_LVB; /* let the send function
156 					       * handle it. */
157 	}
158 
159 	if (!master_node) {
160 		owner = res->owner;
161 		/* drop locks and send message */
162 		if (flags & LKM_CANCEL)
163 			lock->cancel_pending = 1;
164 		else
165 			lock->unlock_pending = 1;
166 		spin_unlock(&lock->spinlock);
167 		spin_unlock(&res->spinlock);
168 		status = dlm_send_remote_unlock_request(dlm, res, lock, lksb,
169 							flags, owner);
170 		spin_lock(&res->spinlock);
171 		spin_lock(&lock->spinlock);
172 		/* if the master told us the lock was already granted,
173 		 * let the ast handle all of these actions */
174 		if (status == DLM_CANCELGRANT) {
175 			actions &= ~(DLM_UNLOCK_REMOVE_LOCK|
176 				     DLM_UNLOCK_REGRANT_LOCK|
177 				     DLM_UNLOCK_CLEAR_CONVERT_TYPE);
178 		} else if (status == DLM_RECOVERING ||
179 			   status == DLM_MIGRATING ||
180 			   status == DLM_FORWARD ||
181 			   status == DLM_NOLOCKMGR
182 			   ) {
183 			/* must clear the actions because this unlock
184 			 * is about to be retried.  cannot free or do
185 			 * any list manipulation. */
186 			mlog(0, "%s:%.*s: clearing actions, %s\n",
187 			     dlm->name, res->lockname.len,
188 			     res->lockname.name,
189 			     status==DLM_RECOVERING?"recovering":
190 			     (status==DLM_MIGRATING?"migrating":
191 				(status == DLM_FORWARD ? "forward" :
192 						"nolockmanager")));
193 			actions = 0;
194 		}
195 		if (flags & LKM_CANCEL)
196 			lock->cancel_pending = 0;
197 		else {
198 			if (!lock->unlock_pending)
199 				recovery_wait = 1;
200 			else
201 				lock->unlock_pending = 0;
202 		}
203 	}
204 
205 	/* get an extra ref on lock.  if we are just switching
206 	 * lists here, we dont want the lock to go away. */
207 	dlm_lock_get(lock);
208 
209 	if (actions & DLM_UNLOCK_REMOVE_LOCK) {
210 		list_del_init(&lock->list);
211 		dlm_lock_put(lock);
212 	}
213 	if (actions & DLM_UNLOCK_REGRANT_LOCK) {
214 		dlm_lock_get(lock);
215 		list_add_tail(&lock->list, &res->granted);
216 	}
217 	if (actions & DLM_UNLOCK_CLEAR_CONVERT_TYPE) {
218 		mlog(0, "clearing convert_type at %smaster node\n",
219 		     master_node ? "" : "non-");
220 		lock->ml.convert_type = LKM_IVMODE;
221 	}
222 
223 	/* remove the extra ref on lock */
224 	dlm_lock_put(lock);
225 
226 leave:
227 	res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
228 	if (!dlm_lock_on_list(&res->converting, lock))
229 		BUG_ON(lock->ml.convert_type != LKM_IVMODE);
230 	else
231 		BUG_ON(lock->ml.convert_type == LKM_IVMODE);
232 	spin_unlock(&lock->spinlock);
233 	spin_unlock(&res->spinlock);
234 	wake_up(&res->wq);
235 
236 	if (recovery_wait) {
237 		spin_lock(&res->spinlock);
238 		/* Unlock request will directly succeed after owner dies,
239 		 * and the lock is already removed from grant list. We have to
240 		 * wait for RECOVERING done or we miss the chance to purge it
241 		 * since the removement is much faster than RECOVERING proc.
242 		 */
243 		__dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_RECOVERING);
244 		spin_unlock(&res->spinlock);
245 	}
246 
247 	/* let the caller's final dlm_lock_put handle the actual kfree */
248 	if (actions & DLM_UNLOCK_FREE_LOCK) {
249 		/* this should always be coupled with list removal */
250 		BUG_ON(!(actions & DLM_UNLOCK_REMOVE_LOCK));
251 		mlog(0, "lock %u:%llu should be gone now! refs=%d\n",
252 		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
253 		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
254 		     kref_read(&lock->lock_refs)-1);
255 		dlm_lock_put(lock);
256 	}
257 	if (actions & DLM_UNLOCK_CALL_AST)
258 		*call_ast = 1;
259 
260 	/* if cancel or unlock succeeded, lvb work is done */
261 	if (status == DLM_NORMAL)
262 		lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB);
263 
264 	return status;
265 }
266 
267 void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
268 			       struct dlm_lock *lock)
269 {
270 	/* leave DLM_LKSB_PUT_LVB on the lksb so any final
271 	 * update of the lvb will be sent to the new master */
272 	list_del_init(&lock->list);
273 }
274 
275 void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
276 			       struct dlm_lock *lock)
277 {
278 	list_move_tail(&lock->list, &res->granted);
279 	lock->ml.convert_type = LKM_IVMODE;
280 }
281 
282 
283 static inline enum dlm_status dlmunlock_master(struct dlm_ctxt *dlm,
284 					  struct dlm_lock_resource *res,
285 					  struct dlm_lock *lock,
286 					  struct dlm_lockstatus *lksb,
287 					  int flags,
288 					  int *call_ast)
289 {
290 	return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 1);
291 }
292 
293 static inline enum dlm_status dlmunlock_remote(struct dlm_ctxt *dlm,
294 					  struct dlm_lock_resource *res,
295 					  struct dlm_lock *lock,
296 					  struct dlm_lockstatus *lksb,
297 					  int flags, int *call_ast)
298 {
299 	return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 0);
300 }
301 
302 /*
303  * locking:
304  *   caller needs:  none
305  *   taken:         none
306  *   held on exit:  none
307  * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network
308  */
309 static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
310 						 struct dlm_lock_resource *res,
311 						 struct dlm_lock *lock,
312 						 struct dlm_lockstatus *lksb,
313 						 int flags,
314 						 u8 owner)
315 {
316 	struct dlm_unlock_lock unlock;
317 	int tmpret;
318 	enum dlm_status ret;
319 	int status = 0;
320 	struct kvec vec[2];
321 	size_t veclen = 1;
322 
323 	mlog(0, "%.*s\n", res->lockname.len, res->lockname.name);
324 
325 	if (owner == dlm->node_num) {
326 		/* ended up trying to contact ourself.  this means
327 		 * that the lockres had been remote but became local
328 		 * via a migration.  just retry it, now as local */
329 		mlog(0, "%s:%.*s: this node became the master due to a "
330 		     "migration, re-evaluate now\n", dlm->name,
331 		     res->lockname.len, res->lockname.name);
332 		return DLM_FORWARD;
333 	}
334 
335 	memset(&unlock, 0, sizeof(unlock));
336 	unlock.node_idx = dlm->node_num;
337 	unlock.flags = cpu_to_be32(flags);
338 	unlock.cookie = lock->ml.cookie;
339 	unlock.namelen = res->lockname.len;
340 	memcpy(unlock.name, res->lockname.name, unlock.namelen);
341 
342 	vec[0].iov_len = sizeof(struct dlm_unlock_lock);
343 	vec[0].iov_base = &unlock;
344 
345 	if (flags & LKM_PUT_LVB) {
346 		/* extra data to send if we are updating lvb */
347 		vec[1].iov_len = DLM_LVB_LEN;
348 		vec[1].iov_base = lock->lksb->lvb;
349 		veclen++;
350 	}
351 
352 	tmpret = o2net_send_message_vec(DLM_UNLOCK_LOCK_MSG, dlm->key,
353 					vec, veclen, owner, &status);
354 	if (tmpret >= 0) {
355 		// successfully sent and received
356 		if (status == DLM_FORWARD)
357 			mlog(0, "master was in-progress.  retry\n");
358 		ret = status;
359 	} else {
360 		mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
361 		     "node %u\n", tmpret, DLM_UNLOCK_LOCK_MSG, dlm->key, owner);
362 		if (dlm_is_host_down(tmpret)) {
363 			/* NOTE: this seems strange, but it is what we want.
364 			 * when the master goes down during a cancel or
365 			 * unlock, the recovery code completes the operation
366 			 * as if the master had not died, then passes the
367 			 * updated state to the recovery master.  this thread
368 			 * just needs to finish out the operation and call
369 			 * the unlockast. */
370 			if (dlm_is_node_dead(dlm, owner))
371 				ret = DLM_NORMAL;
372 			else
373 				ret = DLM_NOLOCKMGR;
374 		} else {
375 			/* something bad.  this will BUG in ocfs2 */
376 			ret = dlm_err_to_dlm_status(tmpret);
377 		}
378 	}
379 
380 	return ret;
381 }
382 
383 /*
384  * locking:
385  *   caller needs:  none
386  *   taken:         takes and drops res->spinlock
387  *   held on exit:  none
388  * returns: DLM_NORMAL, DLM_BADARGS, DLM_IVLOCKID,
389  *          return value from dlmunlock_master
390  */
391 int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
392 			    void **ret_data)
393 {
394 	struct dlm_ctxt *dlm = data;
395 	struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf;
396 	struct dlm_lock_resource *res = NULL;
397 	struct dlm_lock *lock = NULL;
398 	enum dlm_status status = DLM_NORMAL;
399 	int found = 0, i;
400 	struct dlm_lockstatus *lksb = NULL;
401 	int ignore;
402 	u32 flags;
403 	struct list_head *queue;
404 
405 	flags = be32_to_cpu(unlock->flags);
406 
407 	if (flags & LKM_GET_LVB) {
408 		mlog(ML_ERROR, "bad args!  GET_LVB specified on unlock!\n");
409 		return DLM_BADARGS;
410 	}
411 
412 	if ((flags & (LKM_PUT_LVB|LKM_CANCEL)) == (LKM_PUT_LVB|LKM_CANCEL)) {
413 		mlog(ML_ERROR, "bad args!  cannot modify lvb on a CANCEL "
414 		     "request!\n");
415 		return DLM_BADARGS;
416 	}
417 
418 	if (unlock->namelen > DLM_LOCKID_NAME_MAX) {
419 		mlog(ML_ERROR, "Invalid name length in unlock handler!\n");
420 		return DLM_IVBUFLEN;
421 	}
422 
423 	if (!dlm_grab(dlm))
424 		return DLM_FORWARD;
425 
426 	mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
427 			"Domain %s not fully joined!\n", dlm->name);
428 
429 	mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : "none");
430 
431 	res = dlm_lookup_lockres(dlm, unlock->name, unlock->namelen);
432 	if (!res) {
433 		/* We assume here that a no lock resource simply means
434 		 * it was migrated away and destroyed before the other
435 		 * node could detect it. */
436 		mlog(0, "returning DLM_FORWARD -- res no longer exists\n");
437 		status = DLM_FORWARD;
438 		goto not_found;
439 	}
440 
441 	queue=&res->granted;
442 	found = 0;
443 	spin_lock(&res->spinlock);
444 	if (res->state & DLM_LOCK_RES_RECOVERING) {
445 		spin_unlock(&res->spinlock);
446 		mlog(0, "returning DLM_RECOVERING\n");
447 		status = DLM_RECOVERING;
448 		goto leave;
449 	}
450 
451 	if (res->state & DLM_LOCK_RES_MIGRATING) {
452 		spin_unlock(&res->spinlock);
453 		mlog(0, "returning DLM_MIGRATING\n");
454 		status = DLM_MIGRATING;
455 		goto leave;
456 	}
457 
458 	if (res->owner != dlm->node_num) {
459 		spin_unlock(&res->spinlock);
460 		mlog(0, "returning DLM_FORWARD -- not master\n");
461 		status = DLM_FORWARD;
462 		goto leave;
463 	}
464 
465 	for (i=0; i<3; i++) {
466 		list_for_each_entry(lock, queue, list) {
467 			if (lock->ml.cookie == unlock->cookie &&
468 		    	    lock->ml.node == unlock->node_idx) {
469 				dlm_lock_get(lock);
470 				found = 1;
471 				break;
472 			}
473 		}
474 		if (found)
475 			break;
476 		/* scan granted -> converting -> blocked queues */
477 		queue++;
478 	}
479 	spin_unlock(&res->spinlock);
480 	if (!found) {
481 		status = DLM_IVLOCKID;
482 		goto not_found;
483 	}
484 
485 	/* lock was found on queue */
486 	lksb = lock->lksb;
487 	if (flags & (LKM_VALBLK|LKM_PUT_LVB) &&
488 	    lock->ml.type != LKM_EXMODE)
489 		flags &= ~(LKM_VALBLK|LKM_PUT_LVB);
490 
491 	/* unlockast only called on originating node */
492 	if (flags & LKM_PUT_LVB) {
493 		lksb->flags |= DLM_LKSB_PUT_LVB;
494 		memcpy(&lksb->lvb[0], &unlock->lvb[0], DLM_LVB_LEN);
495 	}
496 
497 	/* if this is in-progress, propagate the DLM_FORWARD
498 	 * all the way back out */
499 	status = dlmunlock_master(dlm, res, lock, lksb, flags, &ignore);
500 	if (status == DLM_FORWARD)
501 		mlog(0, "lockres is in progress\n");
502 
503 	if (flags & LKM_PUT_LVB)
504 		lksb->flags &= ~DLM_LKSB_PUT_LVB;
505 
506 	dlm_lockres_calc_usage(dlm, res);
507 	dlm_kick_thread(dlm, res);
508 
509 not_found:
510 	if (!found)
511 		mlog(ML_ERROR, "failed to find lock to unlock! "
512 			       "cookie=%u:%llu\n",
513 		     dlm_get_lock_cookie_node(be64_to_cpu(unlock->cookie)),
514 		     dlm_get_lock_cookie_seq(be64_to_cpu(unlock->cookie)));
515 	else
516 		dlm_lock_put(lock);
517 
518 leave:
519 	if (res)
520 		dlm_lockres_put(res);
521 
522 	dlm_put(dlm);
523 
524 	return status;
525 }
526 
527 
528 static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm,
529 					      struct dlm_lock_resource *res,
530 					      struct dlm_lock *lock,
531 					      struct dlm_lockstatus *lksb,
532 					      int *actions)
533 {
534 	enum dlm_status status;
535 
536 	if (dlm_lock_on_list(&res->blocked, lock)) {
537 		/* cancel this outright */
538 		status = DLM_NORMAL;
539 		*actions = (DLM_UNLOCK_CALL_AST |
540 			    DLM_UNLOCK_REMOVE_LOCK);
541 	} else if (dlm_lock_on_list(&res->converting, lock)) {
542 		/* cancel the request, put back on granted */
543 		status = DLM_NORMAL;
544 		*actions = (DLM_UNLOCK_CALL_AST |
545 			    DLM_UNLOCK_REMOVE_LOCK |
546 			    DLM_UNLOCK_REGRANT_LOCK |
547 			    DLM_UNLOCK_CLEAR_CONVERT_TYPE);
548 	} else if (dlm_lock_on_list(&res->granted, lock)) {
549 		/* too late, already granted. */
550 		status = DLM_CANCELGRANT;
551 		*actions = DLM_UNLOCK_CALL_AST;
552 	} else {
553 		mlog(ML_ERROR, "lock to cancel is not on any list!\n");
554 		status = DLM_IVLOCKID;
555 		*actions = 0;
556 	}
557 	return status;
558 }
559 
560 static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm,
561 					      struct dlm_lock_resource *res,
562 					      struct dlm_lock *lock,
563 					      struct dlm_lockstatus *lksb,
564 					      int *actions)
565 {
566 	enum dlm_status status;
567 
568 	/* unlock request */
569 	if (!dlm_lock_on_list(&res->granted, lock)) {
570 		status = DLM_DENIED;
571 		dlm_error(status);
572 		*actions = 0;
573 	} else {
574 		/* unlock granted lock */
575 		status = DLM_NORMAL;
576 		*actions = (DLM_UNLOCK_FREE_LOCK |
577 			    DLM_UNLOCK_CALL_AST |
578 			    DLM_UNLOCK_REMOVE_LOCK);
579 	}
580 	return status;
581 }
582 
583 /* there seems to be no point in doing this async
584  * since (even for the remote case) there is really
585  * no work to queue up... so just do it and fire the
586  * unlockast by hand when done... */
587 enum dlm_status dlmunlock(struct dlm_ctxt *dlm, struct dlm_lockstatus *lksb,
588 			  int flags, dlm_astunlockfunc_t *unlockast, void *data)
589 {
590 	enum dlm_status status;
591 	struct dlm_lock_resource *res;
592 	struct dlm_lock *lock = NULL;
593 	int call_ast, is_master;
594 
595 	if (!lksb) {
596 		dlm_error(DLM_BADARGS);
597 		return DLM_BADARGS;
598 	}
599 
600 	if (flags & ~(LKM_CANCEL | LKM_VALBLK | LKM_INVVALBLK)) {
601 		dlm_error(DLM_BADPARAM);
602 		return DLM_BADPARAM;
603 	}
604 
605 	if ((flags & (LKM_VALBLK | LKM_CANCEL)) == (LKM_VALBLK | LKM_CANCEL)) {
606 		mlog(0, "VALBLK given with CANCEL: ignoring VALBLK\n");
607 		flags &= ~LKM_VALBLK;
608 	}
609 
610 	if (!lksb->lockid || !lksb->lockid->lockres) {
611 		dlm_error(DLM_BADPARAM);
612 		return DLM_BADPARAM;
613 	}
614 
615 	lock = lksb->lockid;
616 	BUG_ON(!lock);
617 	dlm_lock_get(lock);
618 
619 	res = lock->lockres;
620 	BUG_ON(!res);
621 	dlm_lockres_get(res);
622 retry:
623 	call_ast = 0;
624 	/* need to retry up here because owner may have changed */
625 	mlog(0, "lock=%p res=%p\n", lock, res);
626 
627 	spin_lock(&res->spinlock);
628 	is_master = (res->owner == dlm->node_num);
629 	if (flags & LKM_VALBLK && lock->ml.type != LKM_EXMODE)
630 		flags &= ~LKM_VALBLK;
631 	spin_unlock(&res->spinlock);
632 
633 	if (is_master) {
634 		status = dlmunlock_master(dlm, res, lock, lksb, flags,
635 					  &call_ast);
636 		mlog(0, "done calling dlmunlock_master: returned %d, "
637 		     "call_ast is %d\n", status, call_ast);
638 	} else {
639 		status = dlmunlock_remote(dlm, res, lock, lksb, flags,
640 					  &call_ast);
641 		mlog(0, "done calling dlmunlock_remote: returned %d, "
642 		     "call_ast is %d\n", status, call_ast);
643 	}
644 
645 	if (status == DLM_RECOVERING ||
646 	    status == DLM_MIGRATING ||
647 	    status == DLM_FORWARD ||
648 	    status == DLM_NOLOCKMGR) {
649 
650 		/* We want to go away for a tiny bit to allow recovery
651 		 * / migration to complete on this resource. I don't
652 		 * know of any wait queue we could sleep on as this
653 		 * may be happening on another node. Perhaps the
654 		 * proper solution is to queue up requests on the
655 		 * other end? */
656 
657 		/* do we want to yield(); ?? */
658 		msleep(50);
659 
660 		mlog(0, "retrying unlock due to pending recovery/"
661 		     "migration/in-progress/reconnect\n");
662 		goto retry;
663 	}
664 
665 	if (call_ast) {
666 		mlog(0, "calling unlockast(%p, %d)\n", data, status);
667 		if (is_master) {
668 			/* it is possible that there is one last bast
669 			 * pending.  make sure it is flushed, then
670 			 * call the unlockast.
671 			 * not an issue if this is a mastered remotely,
672 			 * since this lock has been removed from the
673 			 * lockres queues and cannot be found. */
674 			dlm_kick_thread(dlm, NULL);
675 			wait_event(dlm->ast_wq,
676 				   dlm_lock_basts_flushed(dlm, lock));
677 		}
678 		(*unlockast)(data, status);
679 	}
680 
681 	if (status == DLM_CANCELGRANT)
682 		status = DLM_NORMAL;
683 
684 	if (status == DLM_NORMAL) {
685 		mlog(0, "kicking the thread\n");
686 		dlm_kick_thread(dlm, res);
687 	} else
688 		dlm_error(status);
689 
690 	dlm_lockres_calc_usage(dlm, res);
691 	dlm_lockres_put(res);
692 	dlm_lock_put(lock);
693 
694 	mlog(0, "returning status=%d!\n", status);
695 	return status;
696 }
697 EXPORT_SYMBOL_GPL(dlmunlock);
698 
699