1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dlmlock.c 5 * 6 * underlying calls for lock creation 7 * 8 * Copyright (C) 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 * 25 */ 26 27 28 #include <linux/module.h> 29 #include <linux/fs.h> 30 #include <linux/types.h> 31 #include <linux/slab.h> 32 #include <linux/highmem.h> 33 #include <linux/init.h> 34 #include <linux/sysctl.h> 35 #include <linux/random.h> 36 #include <linux/blkdev.h> 37 #include <linux/socket.h> 38 #include <linux/inet.h> 39 #include <linux/spinlock.h> 40 #include <linux/delay.h> 41 42 43 #include "cluster/heartbeat.h" 44 #include "cluster/nodemanager.h" 45 #include "cluster/tcp.h" 46 47 #include "dlmapi.h" 48 #include "dlmcommon.h" 49 50 #include "dlmconvert.h" 51 52 #define MLOG_MASK_PREFIX ML_DLM 53 #include "cluster/masklog.h" 54 55 static struct kmem_cache *dlm_lock_cache = NULL; 56 57 static DEFINE_SPINLOCK(dlm_cookie_lock); 58 static u64 dlm_next_cookie = 1; 59 60 static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, 61 struct dlm_lock_resource *res, 62 struct dlm_lock *lock, int flags); 63 static void dlm_init_lock(struct dlm_lock *newlock, int type, 64 u8 node, u64 cookie); 65 static void dlm_lock_release(struct kref *kref); 66 static void dlm_lock_detach_lockres(struct dlm_lock *lock); 67 68 int dlm_init_lock_cache(void) 69 { 70 dlm_lock_cache = kmem_cache_create("o2dlm_lock", 71 sizeof(struct dlm_lock), 72 0, SLAB_HWCACHE_ALIGN, NULL); 73 if (dlm_lock_cache == NULL) 74 return -ENOMEM; 75 return 0; 76 } 77 78 void dlm_destroy_lock_cache(void) 79 { 80 if (dlm_lock_cache) 81 kmem_cache_destroy(dlm_lock_cache); 82 } 83 84 /* Tell us whether we can grant a new lock request. 85 * locking: 86 * caller needs: res->spinlock 87 * taken: none 88 * held on exit: none 89 * returns: 1 if the lock can be granted, 0 otherwise. 90 */ 91 static int dlm_can_grant_new_lock(struct dlm_lock_resource *res, 92 struct dlm_lock *lock) 93 { 94 struct list_head *iter; 95 struct dlm_lock *tmplock; 96 97 list_for_each(iter, &res->granted) { 98 tmplock = list_entry(iter, struct dlm_lock, list); 99 100 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) 101 return 0; 102 } 103 104 list_for_each(iter, &res->converting) { 105 tmplock = list_entry(iter, struct dlm_lock, list); 106 107 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) 108 return 0; 109 if (!dlm_lock_compatible(tmplock->ml.convert_type, 110 lock->ml.type)) 111 return 0; 112 } 113 114 return 1; 115 } 116 117 /* performs lock creation at the lockres master site 118 * locking: 119 * caller needs: none 120 * taken: takes and drops res->spinlock 121 * held on exit: none 122 * returns: DLM_NORMAL, DLM_NOTQUEUED 123 */ 124 static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm, 125 struct dlm_lock_resource *res, 126 struct dlm_lock *lock, int flags) 127 { 128 int call_ast = 0, kick_thread = 0; 129 enum dlm_status status = DLM_NORMAL; 130 131 mlog(0, "type=%d\n", lock->ml.type); 132 133 spin_lock(&res->spinlock); 134 /* if called from dlm_create_lock_handler, need to 135 * ensure it will not sleep in dlm_wait_on_lockres */ 136 status = __dlm_lockres_state_to_status(res); 137 if (status != DLM_NORMAL && 138 lock->ml.node != dlm->node_num) { 139 /* erf. state changed after lock was dropped. */ 140 spin_unlock(&res->spinlock); 141 dlm_error(status); 142 return status; 143 } 144 __dlm_wait_on_lockres(res); 145 __dlm_lockres_reserve_ast(res); 146 147 if (dlm_can_grant_new_lock(res, lock)) { 148 mlog(0, "I can grant this lock right away\n"); 149 /* got it right away */ 150 lock->lksb->status = DLM_NORMAL; 151 status = DLM_NORMAL; 152 dlm_lock_get(lock); 153 list_add_tail(&lock->list, &res->granted); 154 155 /* for the recovery lock, we can't allow the ast 156 * to be queued since the dlmthread is already 157 * frozen. but the recovery lock is always locked 158 * with LKM_NOQUEUE so we do not need the ast in 159 * this special case */ 160 if (!dlm_is_recovery_lock(res->lockname.name, 161 res->lockname.len)) { 162 kick_thread = 1; 163 call_ast = 1; 164 } else { 165 mlog(0, "%s: returning DLM_NORMAL to " 166 "node %u for reco lock\n", dlm->name, 167 lock->ml.node); 168 } 169 } else { 170 /* for NOQUEUE request, unless we get the 171 * lock right away, return DLM_NOTQUEUED */ 172 if (flags & LKM_NOQUEUE) { 173 status = DLM_NOTQUEUED; 174 if (dlm_is_recovery_lock(res->lockname.name, 175 res->lockname.len)) { 176 mlog(0, "%s: returning NOTQUEUED to " 177 "node %u for reco lock\n", dlm->name, 178 lock->ml.node); 179 } 180 } else { 181 dlm_lock_get(lock); 182 list_add_tail(&lock->list, &res->blocked); 183 kick_thread = 1; 184 } 185 } 186 187 spin_unlock(&res->spinlock); 188 wake_up(&res->wq); 189 190 /* either queue the ast or release it */ 191 if (call_ast) 192 dlm_queue_ast(dlm, lock); 193 else 194 dlm_lockres_release_ast(dlm, res); 195 196 dlm_lockres_calc_usage(dlm, res); 197 if (kick_thread) 198 dlm_kick_thread(dlm, res); 199 200 return status; 201 } 202 203 void dlm_revert_pending_lock(struct dlm_lock_resource *res, 204 struct dlm_lock *lock) 205 { 206 /* remove from local queue if it failed */ 207 list_del_init(&lock->list); 208 lock->lksb->flags &= ~DLM_LKSB_GET_LVB; 209 } 210 211 212 /* 213 * locking: 214 * caller needs: none 215 * taken: takes and drops res->spinlock 216 * held on exit: none 217 * returns: DLM_DENIED, DLM_RECOVERING, or net status 218 */ 219 static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, 220 struct dlm_lock_resource *res, 221 struct dlm_lock *lock, int flags) 222 { 223 enum dlm_status status = DLM_DENIED; 224 int lockres_changed = 1; 225 226 mlog(0, "type=%d, lockres %.*s, flags = 0x%x\n", 227 lock->ml.type, res->lockname.len, 228 res->lockname.name, flags); 229 230 /* 231 * Wait if resource is getting recovered, remastered, etc. 232 * If the resource was remastered and new owner is self, then exit. 233 */ 234 spin_lock(&res->spinlock); 235 __dlm_wait_on_lockres(res); 236 if (res->owner == dlm->node_num) { 237 spin_unlock(&res->spinlock); 238 return DLM_RECOVERING; 239 } 240 res->state |= DLM_LOCK_RES_IN_PROGRESS; 241 242 /* add lock to local (secondary) queue */ 243 dlm_lock_get(lock); 244 list_add_tail(&lock->list, &res->blocked); 245 lock->lock_pending = 1; 246 spin_unlock(&res->spinlock); 247 248 /* spec seems to say that you will get DLM_NORMAL when the lock 249 * has been queued, meaning we need to wait for a reply here. */ 250 status = dlm_send_remote_lock_request(dlm, res, lock, flags); 251 252 spin_lock(&res->spinlock); 253 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 254 lock->lock_pending = 0; 255 if (status != DLM_NORMAL) { 256 if (status == DLM_RECOVERING && 257 dlm_is_recovery_lock(res->lockname.name, 258 res->lockname.len)) { 259 /* recovery lock was mastered by dead node. 260 * we need to have calc_usage shoot down this 261 * lockres and completely remaster it. */ 262 mlog(0, "%s: recovery lock was owned by " 263 "dead node %u, remaster it now.\n", 264 dlm->name, res->owner); 265 } else if (status != DLM_NOTQUEUED) { 266 /* 267 * DO NOT call calc_usage, as this would unhash 268 * the remote lockres before we ever get to use 269 * it. treat as if we never made any change to 270 * the lockres. 271 */ 272 lockres_changed = 0; 273 dlm_error(status); 274 } 275 dlm_revert_pending_lock(res, lock); 276 dlm_lock_put(lock); 277 } else if (dlm_is_recovery_lock(res->lockname.name, 278 res->lockname.len)) { 279 /* special case for the $RECOVERY lock. 280 * there will never be an AST delivered to put 281 * this lock on the proper secondary queue 282 * (granted), so do it manually. */ 283 mlog(0, "%s: $RECOVERY lock for this node (%u) is " 284 "mastered by %u; got lock, manually granting (no ast)\n", 285 dlm->name, dlm->node_num, res->owner); 286 list_move_tail(&lock->list, &res->granted); 287 } 288 spin_unlock(&res->spinlock); 289 290 if (lockres_changed) 291 dlm_lockres_calc_usage(dlm, res); 292 293 wake_up(&res->wq); 294 return status; 295 } 296 297 298 /* for remote lock creation. 299 * locking: 300 * caller needs: none, but need res->state & DLM_LOCK_RES_IN_PROGRESS 301 * taken: none 302 * held on exit: none 303 * returns: DLM_NOLOCKMGR, or net status 304 */ 305 static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, 306 struct dlm_lock_resource *res, 307 struct dlm_lock *lock, int flags) 308 { 309 struct dlm_create_lock create; 310 int tmpret, status = 0; 311 enum dlm_status ret; 312 313 memset(&create, 0, sizeof(create)); 314 create.node_idx = dlm->node_num; 315 create.requested_type = lock->ml.type; 316 create.cookie = lock->ml.cookie; 317 create.namelen = res->lockname.len; 318 create.flags = cpu_to_be32(flags); 319 memcpy(create.name, res->lockname.name, create.namelen); 320 321 tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create, 322 sizeof(create), res->owner, &status); 323 if (tmpret >= 0) { 324 ret = status; 325 if (ret == DLM_REJECTED) { 326 mlog(ML_ERROR, "%s: res %.*s, Stale lockres no longer " 327 "owned by node %u. That node is coming back up " 328 "currently.\n", dlm->name, create.namelen, 329 create.name, res->owner); 330 dlm_print_one_lock_resource(res); 331 BUG(); 332 } 333 } else { 334 mlog(ML_ERROR, "%s: res %.*s, Error %d send CREATE LOCK to " 335 "node %u\n", dlm->name, create.namelen, create.name, 336 tmpret, res->owner); 337 if (dlm_is_host_down(tmpret)) 338 ret = DLM_RECOVERING; 339 else 340 ret = dlm_err_to_dlm_status(tmpret); 341 } 342 343 return ret; 344 } 345 346 void dlm_lock_get(struct dlm_lock *lock) 347 { 348 kref_get(&lock->lock_refs); 349 } 350 351 void dlm_lock_put(struct dlm_lock *lock) 352 { 353 kref_put(&lock->lock_refs, dlm_lock_release); 354 } 355 356 static void dlm_lock_release(struct kref *kref) 357 { 358 struct dlm_lock *lock; 359 360 lock = container_of(kref, struct dlm_lock, lock_refs); 361 362 BUG_ON(!list_empty(&lock->list)); 363 BUG_ON(!list_empty(&lock->ast_list)); 364 BUG_ON(!list_empty(&lock->bast_list)); 365 BUG_ON(lock->ast_pending); 366 BUG_ON(lock->bast_pending); 367 368 dlm_lock_detach_lockres(lock); 369 370 if (lock->lksb_kernel_allocated) { 371 mlog(0, "freeing kernel-allocated lksb\n"); 372 kfree(lock->lksb); 373 } 374 kmem_cache_free(dlm_lock_cache, lock); 375 } 376 377 /* associate a lock with it's lockres, getting a ref on the lockres */ 378 void dlm_lock_attach_lockres(struct dlm_lock *lock, 379 struct dlm_lock_resource *res) 380 { 381 dlm_lockres_get(res); 382 lock->lockres = res; 383 } 384 385 /* drop ref on lockres, if there is still one associated with lock */ 386 static void dlm_lock_detach_lockres(struct dlm_lock *lock) 387 { 388 struct dlm_lock_resource *res; 389 390 res = lock->lockres; 391 if (res) { 392 lock->lockres = NULL; 393 mlog(0, "removing lock's lockres reference\n"); 394 dlm_lockres_put(res); 395 } 396 } 397 398 static void dlm_init_lock(struct dlm_lock *newlock, int type, 399 u8 node, u64 cookie) 400 { 401 INIT_LIST_HEAD(&newlock->list); 402 INIT_LIST_HEAD(&newlock->ast_list); 403 INIT_LIST_HEAD(&newlock->bast_list); 404 spin_lock_init(&newlock->spinlock); 405 newlock->ml.type = type; 406 newlock->ml.convert_type = LKM_IVMODE; 407 newlock->ml.highest_blocked = LKM_IVMODE; 408 newlock->ml.node = node; 409 newlock->ml.pad1 = 0; 410 newlock->ml.list = 0; 411 newlock->ml.flags = 0; 412 newlock->ast = NULL; 413 newlock->bast = NULL; 414 newlock->astdata = NULL; 415 newlock->ml.cookie = cpu_to_be64(cookie); 416 newlock->ast_pending = 0; 417 newlock->bast_pending = 0; 418 newlock->convert_pending = 0; 419 newlock->lock_pending = 0; 420 newlock->unlock_pending = 0; 421 newlock->cancel_pending = 0; 422 newlock->lksb_kernel_allocated = 0; 423 424 kref_init(&newlock->lock_refs); 425 } 426 427 struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie, 428 struct dlm_lockstatus *lksb) 429 { 430 struct dlm_lock *lock; 431 int kernel_allocated = 0; 432 433 lock = kmem_cache_zalloc(dlm_lock_cache, GFP_NOFS); 434 if (!lock) 435 return NULL; 436 437 if (!lksb) { 438 /* zero memory only if kernel-allocated */ 439 lksb = kzalloc(sizeof(*lksb), GFP_NOFS); 440 if (!lksb) { 441 kmem_cache_free(dlm_lock_cache, lock); 442 return NULL; 443 } 444 kernel_allocated = 1; 445 } 446 447 dlm_init_lock(lock, type, node, cookie); 448 if (kernel_allocated) 449 lock->lksb_kernel_allocated = 1; 450 lock->lksb = lksb; 451 lksb->lockid = lock; 452 return lock; 453 } 454 455 /* handler for lock creation net message 456 * locking: 457 * caller needs: none 458 * taken: takes and drops res->spinlock 459 * held on exit: none 460 * returns: DLM_NORMAL, DLM_SYSERR, DLM_IVLOCKID, DLM_NOTQUEUED 461 */ 462 int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data, 463 void **ret_data) 464 { 465 struct dlm_ctxt *dlm = data; 466 struct dlm_create_lock *create = (struct dlm_create_lock *)msg->buf; 467 struct dlm_lock_resource *res = NULL; 468 struct dlm_lock *newlock = NULL; 469 struct dlm_lockstatus *lksb = NULL; 470 enum dlm_status status = DLM_NORMAL; 471 char *name; 472 unsigned int namelen; 473 474 BUG_ON(!dlm); 475 476 if (!dlm_grab(dlm)) 477 return DLM_REJECTED; 478 479 name = create->name; 480 namelen = create->namelen; 481 status = DLM_REJECTED; 482 if (!dlm_domain_fully_joined(dlm)) { 483 mlog(ML_ERROR, "Domain %s not fully joined, but node %u is " 484 "sending a create_lock message for lock %.*s!\n", 485 dlm->name, create->node_idx, namelen, name); 486 dlm_error(status); 487 goto leave; 488 } 489 490 status = DLM_IVBUFLEN; 491 if (namelen > DLM_LOCKID_NAME_MAX) { 492 dlm_error(status); 493 goto leave; 494 } 495 496 status = DLM_SYSERR; 497 newlock = dlm_new_lock(create->requested_type, 498 create->node_idx, 499 be64_to_cpu(create->cookie), NULL); 500 if (!newlock) { 501 dlm_error(status); 502 goto leave; 503 } 504 505 lksb = newlock->lksb; 506 507 if (be32_to_cpu(create->flags) & LKM_GET_LVB) { 508 lksb->flags |= DLM_LKSB_GET_LVB; 509 mlog(0, "set DLM_LKSB_GET_LVB flag\n"); 510 } 511 512 status = DLM_IVLOCKID; 513 res = dlm_lookup_lockres(dlm, name, namelen); 514 if (!res) { 515 dlm_error(status); 516 goto leave; 517 } 518 519 spin_lock(&res->spinlock); 520 status = __dlm_lockres_state_to_status(res); 521 spin_unlock(&res->spinlock); 522 523 if (status != DLM_NORMAL) { 524 mlog(0, "lockres recovering/migrating/in-progress\n"); 525 goto leave; 526 } 527 528 dlm_lock_attach_lockres(newlock, res); 529 530 status = dlmlock_master(dlm, res, newlock, be32_to_cpu(create->flags)); 531 leave: 532 if (status != DLM_NORMAL) 533 if (newlock) 534 dlm_lock_put(newlock); 535 536 if (res) 537 dlm_lockres_put(res); 538 539 dlm_put(dlm); 540 541 return status; 542 } 543 544 545 /* fetch next node-local (u8 nodenum + u56 cookie) into u64 */ 546 static inline void dlm_get_next_cookie(u8 node_num, u64 *cookie) 547 { 548 u64 tmpnode = node_num; 549 550 /* shift single byte of node num into top 8 bits */ 551 tmpnode <<= 56; 552 553 spin_lock(&dlm_cookie_lock); 554 *cookie = (dlm_next_cookie | tmpnode); 555 if (++dlm_next_cookie & 0xff00000000000000ull) { 556 mlog(0, "This node's cookie will now wrap!\n"); 557 dlm_next_cookie = 1; 558 } 559 spin_unlock(&dlm_cookie_lock); 560 } 561 562 enum dlm_status dlmlock(struct dlm_ctxt *dlm, int mode, 563 struct dlm_lockstatus *lksb, int flags, 564 const char *name, int namelen, dlm_astlockfunc_t *ast, 565 void *data, dlm_bastlockfunc_t *bast) 566 { 567 enum dlm_status status; 568 struct dlm_lock_resource *res = NULL; 569 struct dlm_lock *lock = NULL; 570 int convert = 0, recovery = 0; 571 572 /* yes this function is a mess. 573 * TODO: clean this up. lots of common code in the 574 * lock and convert paths, especially in the retry blocks */ 575 if (!lksb) { 576 dlm_error(DLM_BADARGS); 577 return DLM_BADARGS; 578 } 579 580 status = DLM_BADPARAM; 581 if (mode != LKM_EXMODE && mode != LKM_PRMODE && mode != LKM_NLMODE) { 582 dlm_error(status); 583 goto error; 584 } 585 586 if (flags & ~LKM_VALID_FLAGS) { 587 dlm_error(status); 588 goto error; 589 } 590 591 convert = (flags & LKM_CONVERT); 592 recovery = (flags & LKM_RECOVERY); 593 594 if (recovery && 595 (!dlm_is_recovery_lock(name, namelen) || convert) ) { 596 dlm_error(status); 597 goto error; 598 } 599 if (convert && (flags & LKM_LOCAL)) { 600 mlog(ML_ERROR, "strange LOCAL convert request!\n"); 601 goto error; 602 } 603 604 if (convert) { 605 /* CONVERT request */ 606 607 /* if converting, must pass in a valid dlm_lock */ 608 lock = lksb->lockid; 609 if (!lock) { 610 mlog(ML_ERROR, "NULL lock pointer in convert " 611 "request\n"); 612 goto error; 613 } 614 615 res = lock->lockres; 616 if (!res) { 617 mlog(ML_ERROR, "NULL lockres pointer in convert " 618 "request\n"); 619 goto error; 620 } 621 dlm_lockres_get(res); 622 623 /* XXX: for ocfs2 purposes, the ast/bast/astdata/lksb are 624 * static after the original lock call. convert requests will 625 * ensure that everything is the same, or return DLM_BADARGS. 626 * this means that DLM_DENIED_NOASTS will never be returned. 627 */ 628 if (lock->lksb != lksb || lock->ast != ast || 629 lock->bast != bast || lock->astdata != data) { 630 status = DLM_BADARGS; 631 mlog(ML_ERROR, "new args: lksb=%p, ast=%p, bast=%p, " 632 "astdata=%p\n", lksb, ast, bast, data); 633 mlog(ML_ERROR, "orig args: lksb=%p, ast=%p, bast=%p, " 634 "astdata=%p\n", lock->lksb, lock->ast, 635 lock->bast, lock->astdata); 636 goto error; 637 } 638 retry_convert: 639 dlm_wait_for_recovery(dlm); 640 641 if (res->owner == dlm->node_num) 642 status = dlmconvert_master(dlm, res, lock, flags, mode); 643 else 644 status = dlmconvert_remote(dlm, res, lock, flags, mode); 645 if (status == DLM_RECOVERING || status == DLM_MIGRATING || 646 status == DLM_FORWARD) { 647 /* for now, see how this works without sleeping 648 * and just retry right away. I suspect the reco 649 * or migration will complete fast enough that 650 * no waiting will be necessary */ 651 mlog(0, "retrying convert with migration/recovery/" 652 "in-progress\n"); 653 msleep(100); 654 goto retry_convert; 655 } 656 } else { 657 u64 tmpcookie; 658 659 /* LOCK request */ 660 status = DLM_BADARGS; 661 if (!name) { 662 dlm_error(status); 663 goto error; 664 } 665 666 status = DLM_IVBUFLEN; 667 if (namelen > DLM_LOCKID_NAME_MAX || namelen < 1) { 668 dlm_error(status); 669 goto error; 670 } 671 672 dlm_get_next_cookie(dlm->node_num, &tmpcookie); 673 lock = dlm_new_lock(mode, dlm->node_num, tmpcookie, lksb); 674 if (!lock) { 675 dlm_error(status); 676 goto error; 677 } 678 679 if (!recovery) 680 dlm_wait_for_recovery(dlm); 681 682 /* find or create the lock resource */ 683 res = dlm_get_lock_resource(dlm, name, namelen, flags); 684 if (!res) { 685 status = DLM_IVLOCKID; 686 dlm_error(status); 687 goto error; 688 } 689 690 mlog(0, "type=%d, flags = 0x%x\n", mode, flags); 691 mlog(0, "creating lock: lock=%p res=%p\n", lock, res); 692 693 dlm_lock_attach_lockres(lock, res); 694 lock->ast = ast; 695 lock->bast = bast; 696 lock->astdata = data; 697 698 retry_lock: 699 if (flags & LKM_VALBLK) { 700 mlog(0, "LKM_VALBLK passed by caller\n"); 701 702 /* LVB requests for non PR, PW or EX locks are 703 * ignored. */ 704 if (mode < LKM_PRMODE) 705 flags &= ~LKM_VALBLK; 706 else { 707 flags |= LKM_GET_LVB; 708 lock->lksb->flags |= DLM_LKSB_GET_LVB; 709 } 710 } 711 712 if (res->owner == dlm->node_num) 713 status = dlmlock_master(dlm, res, lock, flags); 714 else 715 status = dlmlock_remote(dlm, res, lock, flags); 716 717 if (status == DLM_RECOVERING || status == DLM_MIGRATING || 718 status == DLM_FORWARD) { 719 msleep(100); 720 if (recovery) { 721 if (status != DLM_RECOVERING) 722 goto retry_lock; 723 /* wait to see the node go down, then 724 * drop down and allow the lockres to 725 * get cleaned up. need to remaster. */ 726 dlm_wait_for_node_death(dlm, res->owner, 727 DLM_NODE_DEATH_WAIT_MAX); 728 } else { 729 dlm_wait_for_recovery(dlm); 730 goto retry_lock; 731 } 732 } 733 734 /* Inflight taken in dlm_get_lock_resource() is dropped here */ 735 spin_lock(&res->spinlock); 736 dlm_lockres_drop_inflight_ref(dlm, res); 737 spin_unlock(&res->spinlock); 738 739 dlm_lockres_calc_usage(dlm, res); 740 dlm_kick_thread(dlm, res); 741 742 if (status != DLM_NORMAL) { 743 lock->lksb->flags &= ~DLM_LKSB_GET_LVB; 744 if (status != DLM_NOTQUEUED) 745 dlm_error(status); 746 goto error; 747 } 748 } 749 750 error: 751 if (status != DLM_NORMAL) { 752 if (lock && !convert) 753 dlm_lock_put(lock); 754 // this is kind of unnecessary 755 lksb->status = status; 756 } 757 758 /* put lockres ref from the convert path 759 * or from dlm_get_lock_resource */ 760 if (res) 761 dlm_lockres_put(res); 762 763 return status; 764 } 765 EXPORT_SYMBOL_GPL(dlmlock); 766