xref: /openbmc/linux/fs/ocfs2/dlm/dlmlock.c (revision 7b6d864b)
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * dlmlock.c
5  *
6  * underlying calls for lock creation
7  *
8  * Copyright (C) 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  *
25  */
26 
27 
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/init.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
36 #include <linux/blkdev.h>
37 #include <linux/socket.h>
38 #include <linux/inet.h>
39 #include <linux/spinlock.h>
40 #include <linux/delay.h>
41 
42 
43 #include "cluster/heartbeat.h"
44 #include "cluster/nodemanager.h"
45 #include "cluster/tcp.h"
46 
47 #include "dlmapi.h"
48 #include "dlmcommon.h"
49 
50 #include "dlmconvert.h"
51 
52 #define MLOG_MASK_PREFIX ML_DLM
53 #include "cluster/masklog.h"
54 
55 static struct kmem_cache *dlm_lock_cache = NULL;
56 
57 static DEFINE_SPINLOCK(dlm_cookie_lock);
58 static u64 dlm_next_cookie = 1;
59 
60 static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
61 					       struct dlm_lock_resource *res,
62 					       struct dlm_lock *lock, int flags);
63 static void dlm_init_lock(struct dlm_lock *newlock, int type,
64 			  u8 node, u64 cookie);
65 static void dlm_lock_release(struct kref *kref);
66 static void dlm_lock_detach_lockres(struct dlm_lock *lock);
67 
68 int dlm_init_lock_cache(void)
69 {
70 	dlm_lock_cache = kmem_cache_create("o2dlm_lock",
71 					   sizeof(struct dlm_lock),
72 					   0, SLAB_HWCACHE_ALIGN, NULL);
73 	if (dlm_lock_cache == NULL)
74 		return -ENOMEM;
75 	return 0;
76 }
77 
78 void dlm_destroy_lock_cache(void)
79 {
80 	if (dlm_lock_cache)
81 		kmem_cache_destroy(dlm_lock_cache);
82 }
83 
84 /* Tell us whether we can grant a new lock request.
85  * locking:
86  *   caller needs:  res->spinlock
87  *   taken:         none
88  *   held on exit:  none
89  * returns: 1 if the lock can be granted, 0 otherwise.
90  */
91 static int dlm_can_grant_new_lock(struct dlm_lock_resource *res,
92 				  struct dlm_lock *lock)
93 {
94 	struct list_head *iter;
95 	struct dlm_lock *tmplock;
96 
97 	list_for_each(iter, &res->granted) {
98 		tmplock = list_entry(iter, struct dlm_lock, list);
99 
100 		if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
101 			return 0;
102 	}
103 
104 	list_for_each(iter, &res->converting) {
105 		tmplock = list_entry(iter, struct dlm_lock, list);
106 
107 		if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
108 			return 0;
109 		if (!dlm_lock_compatible(tmplock->ml.convert_type,
110 					 lock->ml.type))
111 			return 0;
112 	}
113 
114 	return 1;
115 }
116 
117 /* performs lock creation at the lockres master site
118  * locking:
119  *   caller needs:  none
120  *   taken:         takes and drops res->spinlock
121  *   held on exit:  none
122  * returns: DLM_NORMAL, DLM_NOTQUEUED
123  */
124 static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm,
125 				      struct dlm_lock_resource *res,
126 				      struct dlm_lock *lock, int flags)
127 {
128 	int call_ast = 0, kick_thread = 0;
129 	enum dlm_status status = DLM_NORMAL;
130 
131 	mlog(0, "type=%d\n", lock->ml.type);
132 
133 	spin_lock(&res->spinlock);
134 	/* if called from dlm_create_lock_handler, need to
135 	 * ensure it will not sleep in dlm_wait_on_lockres */
136 	status = __dlm_lockres_state_to_status(res);
137 	if (status != DLM_NORMAL &&
138 	    lock->ml.node != dlm->node_num) {
139 		/* erf.  state changed after lock was dropped. */
140 		spin_unlock(&res->spinlock);
141 		dlm_error(status);
142 		return status;
143 	}
144 	__dlm_wait_on_lockres(res);
145 	__dlm_lockres_reserve_ast(res);
146 
147 	if (dlm_can_grant_new_lock(res, lock)) {
148 		mlog(0, "I can grant this lock right away\n");
149 		/* got it right away */
150 		lock->lksb->status = DLM_NORMAL;
151 		status = DLM_NORMAL;
152 		dlm_lock_get(lock);
153 		list_add_tail(&lock->list, &res->granted);
154 
155 		/* for the recovery lock, we can't allow the ast
156 		 * to be queued since the dlmthread is already
157 		 * frozen.  but the recovery lock is always locked
158 		 * with LKM_NOQUEUE so we do not need the ast in
159 		 * this special case */
160 		if (!dlm_is_recovery_lock(res->lockname.name,
161 					  res->lockname.len)) {
162 			kick_thread = 1;
163 			call_ast = 1;
164 		} else {
165 			mlog(0, "%s: returning DLM_NORMAL to "
166 			     "node %u for reco lock\n", dlm->name,
167 			     lock->ml.node);
168 		}
169 	} else {
170 		/* for NOQUEUE request, unless we get the
171 		 * lock right away, return DLM_NOTQUEUED */
172 		if (flags & LKM_NOQUEUE) {
173 			status = DLM_NOTQUEUED;
174 			if (dlm_is_recovery_lock(res->lockname.name,
175 						 res->lockname.len)) {
176 				mlog(0, "%s: returning NOTQUEUED to "
177 				     "node %u for reco lock\n", dlm->name,
178 				     lock->ml.node);
179 			}
180 		} else {
181 			status = DLM_NORMAL;
182 			dlm_lock_get(lock);
183 			list_add_tail(&lock->list, &res->blocked);
184 			kick_thread = 1;
185 		}
186 	}
187 
188 	spin_unlock(&res->spinlock);
189 	wake_up(&res->wq);
190 
191 	/* either queue the ast or release it */
192 	if (call_ast)
193 		dlm_queue_ast(dlm, lock);
194 	else
195 		dlm_lockres_release_ast(dlm, res);
196 
197 	dlm_lockres_calc_usage(dlm, res);
198 	if (kick_thread)
199 		dlm_kick_thread(dlm, res);
200 
201 	return status;
202 }
203 
204 void dlm_revert_pending_lock(struct dlm_lock_resource *res,
205 			     struct dlm_lock *lock)
206 {
207 	/* remove from local queue if it failed */
208 	list_del_init(&lock->list);
209 	lock->lksb->flags &= ~DLM_LKSB_GET_LVB;
210 }
211 
212 
213 /*
214  * locking:
215  *   caller needs:  none
216  *   taken:         takes and drops res->spinlock
217  *   held on exit:  none
218  * returns: DLM_DENIED, DLM_RECOVERING, or net status
219  */
220 static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
221 				      struct dlm_lock_resource *res,
222 				      struct dlm_lock *lock, int flags)
223 {
224 	enum dlm_status status = DLM_DENIED;
225 	int lockres_changed = 1;
226 
227 	mlog(0, "type=%d, lockres %.*s, flags = 0x%x\n",
228 	     lock->ml.type, res->lockname.len,
229 	     res->lockname.name, flags);
230 
231 	/*
232 	 * Wait if resource is getting recovered, remastered, etc.
233 	 * If the resource was remastered and new owner is self, then exit.
234 	 */
235 	spin_lock(&res->spinlock);
236 	__dlm_wait_on_lockres(res);
237 	if (res->owner == dlm->node_num) {
238 		spin_unlock(&res->spinlock);
239 		return DLM_RECOVERING;
240 	}
241 	res->state |= DLM_LOCK_RES_IN_PROGRESS;
242 
243 	/* add lock to local (secondary) queue */
244 	dlm_lock_get(lock);
245 	list_add_tail(&lock->list, &res->blocked);
246 	lock->lock_pending = 1;
247 	spin_unlock(&res->spinlock);
248 
249 	/* spec seems to say that you will get DLM_NORMAL when the lock
250 	 * has been queued, meaning we need to wait for a reply here. */
251 	status = dlm_send_remote_lock_request(dlm, res, lock, flags);
252 
253 	spin_lock(&res->spinlock);
254 	res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
255 	lock->lock_pending = 0;
256 	if (status != DLM_NORMAL) {
257 		if (status == DLM_RECOVERING &&
258 		    dlm_is_recovery_lock(res->lockname.name,
259 					 res->lockname.len)) {
260 			/* recovery lock was mastered by dead node.
261 			 * we need to have calc_usage shoot down this
262 			 * lockres and completely remaster it. */
263 			mlog(0, "%s: recovery lock was owned by "
264 			     "dead node %u, remaster it now.\n",
265 			     dlm->name, res->owner);
266 		} else if (status != DLM_NOTQUEUED) {
267 			/*
268 			 * DO NOT call calc_usage, as this would unhash
269 			 * the remote lockres before we ever get to use
270 			 * it.  treat as if we never made any change to
271 			 * the lockres.
272 			 */
273 			lockres_changed = 0;
274 			dlm_error(status);
275 		}
276 		dlm_revert_pending_lock(res, lock);
277 		dlm_lock_put(lock);
278 	} else if (dlm_is_recovery_lock(res->lockname.name,
279 					res->lockname.len)) {
280 		/* special case for the $RECOVERY lock.
281 		 * there will never be an AST delivered to put
282 		 * this lock on the proper secondary queue
283 		 * (granted), so do it manually. */
284 		mlog(0, "%s: $RECOVERY lock for this node (%u) is "
285 		     "mastered by %u; got lock, manually granting (no ast)\n",
286 		     dlm->name, dlm->node_num, res->owner);
287 		list_move_tail(&lock->list, &res->granted);
288 	}
289 	spin_unlock(&res->spinlock);
290 
291 	if (lockres_changed)
292 		dlm_lockres_calc_usage(dlm, res);
293 
294 	wake_up(&res->wq);
295 	return status;
296 }
297 
298 
299 /* for remote lock creation.
300  * locking:
301  *   caller needs:  none, but need res->state & DLM_LOCK_RES_IN_PROGRESS
302  *   taken:         none
303  *   held on exit:  none
304  * returns: DLM_NOLOCKMGR, or net status
305  */
306 static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
307 					       struct dlm_lock_resource *res,
308 					       struct dlm_lock *lock, int flags)
309 {
310 	struct dlm_create_lock create;
311 	int tmpret, status = 0;
312 	enum dlm_status ret;
313 
314 	memset(&create, 0, sizeof(create));
315 	create.node_idx = dlm->node_num;
316 	create.requested_type = lock->ml.type;
317 	create.cookie = lock->ml.cookie;
318 	create.namelen = res->lockname.len;
319 	create.flags = cpu_to_be32(flags);
320 	memcpy(create.name, res->lockname.name, create.namelen);
321 
322 	tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create,
323 				    sizeof(create), res->owner, &status);
324 	if (tmpret >= 0) {
325 		ret = status;
326 		if (ret == DLM_REJECTED) {
327 			mlog(ML_ERROR, "%s: res %.*s, Stale lockres no longer "
328 			     "owned by node %u. That node is coming back up "
329 			     "currently.\n", dlm->name, create.namelen,
330 			     create.name, res->owner);
331 			dlm_print_one_lock_resource(res);
332 			BUG();
333 		}
334 	} else {
335 		mlog(ML_ERROR, "%s: res %.*s, Error %d send CREATE LOCK to "
336 		     "node %u\n", dlm->name, create.namelen, create.name,
337 		     tmpret, res->owner);
338 		if (dlm_is_host_down(tmpret))
339 			ret = DLM_RECOVERING;
340 		else
341 			ret = dlm_err_to_dlm_status(tmpret);
342 	}
343 
344 	return ret;
345 }
346 
347 void dlm_lock_get(struct dlm_lock *lock)
348 {
349 	kref_get(&lock->lock_refs);
350 }
351 
352 void dlm_lock_put(struct dlm_lock *lock)
353 {
354 	kref_put(&lock->lock_refs, dlm_lock_release);
355 }
356 
357 static void dlm_lock_release(struct kref *kref)
358 {
359 	struct dlm_lock *lock;
360 
361 	lock = container_of(kref, struct dlm_lock, lock_refs);
362 
363 	BUG_ON(!list_empty(&lock->list));
364 	BUG_ON(!list_empty(&lock->ast_list));
365 	BUG_ON(!list_empty(&lock->bast_list));
366 	BUG_ON(lock->ast_pending);
367 	BUG_ON(lock->bast_pending);
368 
369 	dlm_lock_detach_lockres(lock);
370 
371 	if (lock->lksb_kernel_allocated) {
372 		mlog(0, "freeing kernel-allocated lksb\n");
373 		kfree(lock->lksb);
374 	}
375 	kmem_cache_free(dlm_lock_cache, lock);
376 }
377 
378 /* associate a lock with it's lockres, getting a ref on the lockres */
379 void dlm_lock_attach_lockres(struct dlm_lock *lock,
380 			     struct dlm_lock_resource *res)
381 {
382 	dlm_lockres_get(res);
383 	lock->lockres = res;
384 }
385 
386 /* drop ref on lockres, if there is still one associated with lock */
387 static void dlm_lock_detach_lockres(struct dlm_lock *lock)
388 {
389 	struct dlm_lock_resource *res;
390 
391 	res = lock->lockres;
392 	if (res) {
393 		lock->lockres = NULL;
394 		mlog(0, "removing lock's lockres reference\n");
395 		dlm_lockres_put(res);
396 	}
397 }
398 
399 static void dlm_init_lock(struct dlm_lock *newlock, int type,
400 			  u8 node, u64 cookie)
401 {
402 	INIT_LIST_HEAD(&newlock->list);
403 	INIT_LIST_HEAD(&newlock->ast_list);
404 	INIT_LIST_HEAD(&newlock->bast_list);
405 	spin_lock_init(&newlock->spinlock);
406 	newlock->ml.type = type;
407 	newlock->ml.convert_type = LKM_IVMODE;
408 	newlock->ml.highest_blocked = LKM_IVMODE;
409 	newlock->ml.node = node;
410 	newlock->ml.pad1 = 0;
411 	newlock->ml.list = 0;
412 	newlock->ml.flags = 0;
413 	newlock->ast = NULL;
414 	newlock->bast = NULL;
415 	newlock->astdata = NULL;
416 	newlock->ml.cookie = cpu_to_be64(cookie);
417 	newlock->ast_pending = 0;
418 	newlock->bast_pending = 0;
419 	newlock->convert_pending = 0;
420 	newlock->lock_pending = 0;
421 	newlock->unlock_pending = 0;
422 	newlock->cancel_pending = 0;
423 	newlock->lksb_kernel_allocated = 0;
424 
425 	kref_init(&newlock->lock_refs);
426 }
427 
428 struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
429 			       struct dlm_lockstatus *lksb)
430 {
431 	struct dlm_lock *lock;
432 	int kernel_allocated = 0;
433 
434 	lock = kmem_cache_zalloc(dlm_lock_cache, GFP_NOFS);
435 	if (!lock)
436 		return NULL;
437 
438 	if (!lksb) {
439 		/* zero memory only if kernel-allocated */
440 		lksb = kzalloc(sizeof(*lksb), GFP_NOFS);
441 		if (!lksb) {
442 			kmem_cache_free(dlm_lock_cache, lock);
443 			return NULL;
444 		}
445 		kernel_allocated = 1;
446 	}
447 
448 	dlm_init_lock(lock, type, node, cookie);
449 	if (kernel_allocated)
450 		lock->lksb_kernel_allocated = 1;
451 	lock->lksb = lksb;
452 	lksb->lockid = lock;
453 	return lock;
454 }
455 
456 /* handler for lock creation net message
457  * locking:
458  *   caller needs:  none
459  *   taken:         takes and drops res->spinlock
460  *   held on exit:  none
461  * returns: DLM_NORMAL, DLM_SYSERR, DLM_IVLOCKID, DLM_NOTQUEUED
462  */
463 int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data,
464 			    void **ret_data)
465 {
466 	struct dlm_ctxt *dlm = data;
467 	struct dlm_create_lock *create = (struct dlm_create_lock *)msg->buf;
468 	struct dlm_lock_resource *res = NULL;
469 	struct dlm_lock *newlock = NULL;
470 	struct dlm_lockstatus *lksb = NULL;
471 	enum dlm_status status = DLM_NORMAL;
472 	char *name;
473 	unsigned int namelen;
474 
475 	BUG_ON(!dlm);
476 
477 	if (!dlm_grab(dlm))
478 		return DLM_REJECTED;
479 
480 	name = create->name;
481 	namelen = create->namelen;
482 	status = DLM_REJECTED;
483 	if (!dlm_domain_fully_joined(dlm)) {
484 		mlog(ML_ERROR, "Domain %s not fully joined, but node %u is "
485 		     "sending a create_lock message for lock %.*s!\n",
486 		     dlm->name, create->node_idx, namelen, name);
487 		dlm_error(status);
488 		goto leave;
489 	}
490 
491 	status = DLM_IVBUFLEN;
492 	if (namelen > DLM_LOCKID_NAME_MAX) {
493 		dlm_error(status);
494 		goto leave;
495 	}
496 
497 	status = DLM_SYSERR;
498 	newlock = dlm_new_lock(create->requested_type,
499 			       create->node_idx,
500 			       be64_to_cpu(create->cookie), NULL);
501 	if (!newlock) {
502 		dlm_error(status);
503 		goto leave;
504 	}
505 
506 	lksb = newlock->lksb;
507 
508 	if (be32_to_cpu(create->flags) & LKM_GET_LVB) {
509 		lksb->flags |= DLM_LKSB_GET_LVB;
510 		mlog(0, "set DLM_LKSB_GET_LVB flag\n");
511 	}
512 
513 	status = DLM_IVLOCKID;
514 	res = dlm_lookup_lockres(dlm, name, namelen);
515 	if (!res) {
516 		dlm_error(status);
517 		goto leave;
518 	}
519 
520 	spin_lock(&res->spinlock);
521 	status = __dlm_lockres_state_to_status(res);
522 	spin_unlock(&res->spinlock);
523 
524 	if (status != DLM_NORMAL) {
525 		mlog(0, "lockres recovering/migrating/in-progress\n");
526 		goto leave;
527 	}
528 
529 	dlm_lock_attach_lockres(newlock, res);
530 
531 	status = dlmlock_master(dlm, res, newlock, be32_to_cpu(create->flags));
532 leave:
533 	if (status != DLM_NORMAL)
534 		if (newlock)
535 			dlm_lock_put(newlock);
536 
537 	if (res)
538 		dlm_lockres_put(res);
539 
540 	dlm_put(dlm);
541 
542 	return status;
543 }
544 
545 
546 /* fetch next node-local (u8 nodenum + u56 cookie) into u64 */
547 static inline void dlm_get_next_cookie(u8 node_num, u64 *cookie)
548 {
549 	u64 tmpnode = node_num;
550 
551 	/* shift single byte of node num into top 8 bits */
552 	tmpnode <<= 56;
553 
554 	spin_lock(&dlm_cookie_lock);
555 	*cookie = (dlm_next_cookie | tmpnode);
556 	if (++dlm_next_cookie & 0xff00000000000000ull) {
557 		mlog(0, "This node's cookie will now wrap!\n");
558 		dlm_next_cookie = 1;
559 	}
560 	spin_unlock(&dlm_cookie_lock);
561 }
562 
563 enum dlm_status dlmlock(struct dlm_ctxt *dlm, int mode,
564 			struct dlm_lockstatus *lksb, int flags,
565 			const char *name, int namelen, dlm_astlockfunc_t *ast,
566 			void *data, dlm_bastlockfunc_t *bast)
567 {
568 	enum dlm_status status;
569 	struct dlm_lock_resource *res = NULL;
570 	struct dlm_lock *lock = NULL;
571 	int convert = 0, recovery = 0;
572 
573 	/* yes this function is a mess.
574 	 * TODO: clean this up.  lots of common code in the
575 	 *       lock and convert paths, especially in the retry blocks */
576 	if (!lksb) {
577 		dlm_error(DLM_BADARGS);
578 		return DLM_BADARGS;
579 	}
580 
581 	status = DLM_BADPARAM;
582 	if (mode != LKM_EXMODE && mode != LKM_PRMODE && mode != LKM_NLMODE) {
583 		dlm_error(status);
584 		goto error;
585 	}
586 
587 	if (flags & ~LKM_VALID_FLAGS) {
588 		dlm_error(status);
589 		goto error;
590 	}
591 
592 	convert = (flags & LKM_CONVERT);
593 	recovery = (flags & LKM_RECOVERY);
594 
595 	if (recovery &&
596 	    (!dlm_is_recovery_lock(name, namelen) || convert) ) {
597 		dlm_error(status);
598 		goto error;
599 	}
600 	if (convert && (flags & LKM_LOCAL)) {
601 		mlog(ML_ERROR, "strange LOCAL convert request!\n");
602 		goto error;
603 	}
604 
605 	if (convert) {
606 		/* CONVERT request */
607 
608 		/* if converting, must pass in a valid dlm_lock */
609 		lock = lksb->lockid;
610 		if (!lock) {
611 			mlog(ML_ERROR, "NULL lock pointer in convert "
612 			     "request\n");
613 			goto error;
614 		}
615 
616 		res = lock->lockres;
617 		if (!res) {
618 			mlog(ML_ERROR, "NULL lockres pointer in convert "
619 			     "request\n");
620 			goto error;
621 		}
622 		dlm_lockres_get(res);
623 
624 		/* XXX: for ocfs2 purposes, the ast/bast/astdata/lksb are
625 	 	 * static after the original lock call.  convert requests will
626 		 * ensure that everything is the same, or return DLM_BADARGS.
627 	 	 * this means that DLM_DENIED_NOASTS will never be returned.
628 	 	 */
629 		if (lock->lksb != lksb || lock->ast != ast ||
630 		    lock->bast != bast || lock->astdata != data) {
631 			status = DLM_BADARGS;
632 			mlog(ML_ERROR, "new args:  lksb=%p, ast=%p, bast=%p, "
633 			     "astdata=%p\n", lksb, ast, bast, data);
634 			mlog(ML_ERROR, "orig args: lksb=%p, ast=%p, bast=%p, "
635 			     "astdata=%p\n", lock->lksb, lock->ast,
636 			     lock->bast, lock->astdata);
637 			goto error;
638 		}
639 retry_convert:
640 		dlm_wait_for_recovery(dlm);
641 
642 		if (res->owner == dlm->node_num)
643 			status = dlmconvert_master(dlm, res, lock, flags, mode);
644 		else
645 			status = dlmconvert_remote(dlm, res, lock, flags, mode);
646 		if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
647 		    status == DLM_FORWARD) {
648 			/* for now, see how this works without sleeping
649 			 * and just retry right away.  I suspect the reco
650 			 * or migration will complete fast enough that
651 			 * no waiting will be necessary */
652 			mlog(0, "retrying convert with migration/recovery/"
653 			     "in-progress\n");
654 			msleep(100);
655 			goto retry_convert;
656 		}
657 	} else {
658 		u64 tmpcookie;
659 
660 		/* LOCK request */
661 		status = DLM_BADARGS;
662 		if (!name) {
663 			dlm_error(status);
664 			goto error;
665 		}
666 
667 		status = DLM_IVBUFLEN;
668 		if (namelen > DLM_LOCKID_NAME_MAX || namelen < 1) {
669 			dlm_error(status);
670 			goto error;
671 		}
672 
673 		dlm_get_next_cookie(dlm->node_num, &tmpcookie);
674 		lock = dlm_new_lock(mode, dlm->node_num, tmpcookie, lksb);
675 		if (!lock) {
676 			dlm_error(status);
677 			goto error;
678 		}
679 
680 		if (!recovery)
681 			dlm_wait_for_recovery(dlm);
682 
683 		/* find or create the lock resource */
684 		res = dlm_get_lock_resource(dlm, name, namelen, flags);
685 		if (!res) {
686 			status = DLM_IVLOCKID;
687 			dlm_error(status);
688 			goto error;
689 		}
690 
691 		mlog(0, "type=%d, flags = 0x%x\n", mode, flags);
692 		mlog(0, "creating lock: lock=%p res=%p\n", lock, res);
693 
694 		dlm_lock_attach_lockres(lock, res);
695 		lock->ast = ast;
696 		lock->bast = bast;
697 		lock->astdata = data;
698 
699 retry_lock:
700 		if (flags & LKM_VALBLK) {
701 			mlog(0, "LKM_VALBLK passed by caller\n");
702 
703 			/* LVB requests for non PR, PW or EX locks are
704 			 * ignored. */
705 			if (mode < LKM_PRMODE)
706 				flags &= ~LKM_VALBLK;
707 			else {
708 				flags |= LKM_GET_LVB;
709 				lock->lksb->flags |= DLM_LKSB_GET_LVB;
710 			}
711 		}
712 
713 		if (res->owner == dlm->node_num)
714 			status = dlmlock_master(dlm, res, lock, flags);
715 		else
716 			status = dlmlock_remote(dlm, res, lock, flags);
717 
718 		if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
719 		    status == DLM_FORWARD) {
720 			msleep(100);
721 			if (recovery) {
722 				if (status != DLM_RECOVERING)
723 					goto retry_lock;
724 				/* wait to see the node go down, then
725 				 * drop down and allow the lockres to
726 				 * get cleaned up.  need to remaster. */
727 				dlm_wait_for_node_death(dlm, res->owner,
728 						DLM_NODE_DEATH_WAIT_MAX);
729 			} else {
730 				dlm_wait_for_recovery(dlm);
731 				goto retry_lock;
732 			}
733 		}
734 
735 		/* Inflight taken in dlm_get_lock_resource() is dropped here */
736 		spin_lock(&res->spinlock);
737 		dlm_lockres_drop_inflight_ref(dlm, res);
738 		spin_unlock(&res->spinlock);
739 
740 		dlm_lockres_calc_usage(dlm, res);
741 		dlm_kick_thread(dlm, res);
742 
743 		if (status != DLM_NORMAL) {
744 			lock->lksb->flags &= ~DLM_LKSB_GET_LVB;
745 			if (status != DLM_NOTQUEUED)
746 				dlm_error(status);
747 			goto error;
748 		}
749 	}
750 
751 error:
752 	if (status != DLM_NORMAL) {
753 		if (lock && !convert)
754 			dlm_lock_put(lock);
755 		// this is kind of unnecessary
756 		lksb->status = status;
757 	}
758 
759 	/* put lockres ref from the convert path
760 	 * or from dlm_get_lock_resource */
761 	if (res)
762 		dlm_lockres_put(res);
763 
764 	return status;
765 }
766 EXPORT_SYMBOL_GPL(dlmlock);
767