1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dlmlock.c 5 * 6 * underlying calls for lock creation 7 * 8 * Copyright (C) 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 * 25 */ 26 27 28 #include <linux/module.h> 29 #include <linux/fs.h> 30 #include <linux/types.h> 31 #include <linux/slab.h> 32 #include <linux/highmem.h> 33 #include <linux/utsname.h> 34 #include <linux/init.h> 35 #include <linux/sysctl.h> 36 #include <linux/random.h> 37 #include <linux/blkdev.h> 38 #include <linux/socket.h> 39 #include <linux/inet.h> 40 #include <linux/spinlock.h> 41 #include <linux/delay.h> 42 43 44 #include "cluster/heartbeat.h" 45 #include "cluster/nodemanager.h" 46 #include "cluster/tcp.h" 47 48 #include "dlmapi.h" 49 #include "dlmcommon.h" 50 51 #include "dlmconvert.h" 52 53 #define MLOG_MASK_PREFIX ML_DLM 54 #include "cluster/masklog.h" 55 56 static DEFINE_SPINLOCK(dlm_cookie_lock); 57 static u64 dlm_next_cookie = 1; 58 59 static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, 60 struct dlm_lock_resource *res, 61 struct dlm_lock *lock, int flags); 62 static void dlm_init_lock(struct dlm_lock *newlock, int type, 63 u8 node, u64 cookie); 64 static void dlm_lock_release(struct kref *kref); 65 static void dlm_lock_detach_lockres(struct dlm_lock *lock); 66 67 /* Tell us whether we can grant a new lock request. 68 * locking: 69 * caller needs: res->spinlock 70 * taken: none 71 * held on exit: none 72 * returns: 1 if the lock can be granted, 0 otherwise. 73 */ 74 static int dlm_can_grant_new_lock(struct dlm_lock_resource *res, 75 struct dlm_lock *lock) 76 { 77 struct list_head *iter; 78 struct dlm_lock *tmplock; 79 80 list_for_each(iter, &res->granted) { 81 tmplock = list_entry(iter, struct dlm_lock, list); 82 83 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) 84 return 0; 85 } 86 87 list_for_each(iter, &res->converting) { 88 tmplock = list_entry(iter, struct dlm_lock, list); 89 90 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) 91 return 0; 92 } 93 94 return 1; 95 } 96 97 /* performs lock creation at the lockres master site 98 * locking: 99 * caller needs: none 100 * taken: takes and drops res->spinlock 101 * held on exit: none 102 * returns: DLM_NORMAL, DLM_NOTQUEUED 103 */ 104 static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm, 105 struct dlm_lock_resource *res, 106 struct dlm_lock *lock, int flags) 107 { 108 int call_ast = 0, kick_thread = 0; 109 enum dlm_status status = DLM_NORMAL; 110 111 mlog_entry("type=%d\n", lock->ml.type); 112 113 spin_lock(&res->spinlock); 114 /* if called from dlm_create_lock_handler, need to 115 * ensure it will not sleep in dlm_wait_on_lockres */ 116 status = __dlm_lockres_state_to_status(res); 117 if (status != DLM_NORMAL && 118 lock->ml.node != dlm->node_num) { 119 /* erf. state changed after lock was dropped. */ 120 spin_unlock(&res->spinlock); 121 dlm_error(status); 122 return status; 123 } 124 __dlm_wait_on_lockres(res); 125 __dlm_lockres_reserve_ast(res); 126 127 if (dlm_can_grant_new_lock(res, lock)) { 128 mlog(0, "I can grant this lock right away\n"); 129 /* got it right away */ 130 lock->lksb->status = DLM_NORMAL; 131 status = DLM_NORMAL; 132 dlm_lock_get(lock); 133 list_add_tail(&lock->list, &res->granted); 134 135 /* for the recovery lock, we can't allow the ast 136 * to be queued since the dlmthread is already 137 * frozen. but the recovery lock is always locked 138 * with LKM_NOQUEUE so we do not need the ast in 139 * this special case */ 140 if (!dlm_is_recovery_lock(res->lockname.name, 141 res->lockname.len)) { 142 kick_thread = 1; 143 call_ast = 1; 144 } else { 145 mlog(0, "%s: returning DLM_NORMAL to " 146 "node %u for reco lock\n", dlm->name, 147 lock->ml.node); 148 } 149 } else { 150 /* for NOQUEUE request, unless we get the 151 * lock right away, return DLM_NOTQUEUED */ 152 if (flags & LKM_NOQUEUE) { 153 status = DLM_NOTQUEUED; 154 if (dlm_is_recovery_lock(res->lockname.name, 155 res->lockname.len)) { 156 mlog(0, "%s: returning NOTQUEUED to " 157 "node %u for reco lock\n", dlm->name, 158 lock->ml.node); 159 } 160 } else { 161 dlm_lock_get(lock); 162 list_add_tail(&lock->list, &res->blocked); 163 kick_thread = 1; 164 } 165 } 166 /* reduce the inflight count, this may result in the lockres 167 * being purged below during calc_usage */ 168 if (lock->ml.node == dlm->node_num) 169 dlm_lockres_drop_inflight_ref(dlm, res); 170 171 spin_unlock(&res->spinlock); 172 wake_up(&res->wq); 173 174 /* either queue the ast or release it */ 175 if (call_ast) 176 dlm_queue_ast(dlm, lock); 177 else 178 dlm_lockres_release_ast(dlm, res); 179 180 dlm_lockres_calc_usage(dlm, res); 181 if (kick_thread) 182 dlm_kick_thread(dlm, res); 183 184 return status; 185 } 186 187 void dlm_revert_pending_lock(struct dlm_lock_resource *res, 188 struct dlm_lock *lock) 189 { 190 /* remove from local queue if it failed */ 191 list_del_init(&lock->list); 192 lock->lksb->flags &= ~DLM_LKSB_GET_LVB; 193 } 194 195 196 /* 197 * locking: 198 * caller needs: none 199 * taken: takes and drops res->spinlock 200 * held on exit: none 201 * returns: DLM_DENIED, DLM_RECOVERING, or net status 202 */ 203 static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, 204 struct dlm_lock_resource *res, 205 struct dlm_lock *lock, int flags) 206 { 207 enum dlm_status status = DLM_DENIED; 208 int lockres_changed = 1; 209 210 mlog_entry("type=%d\n", lock->ml.type); 211 mlog(0, "lockres %.*s, flags = 0x%x\n", res->lockname.len, 212 res->lockname.name, flags); 213 214 spin_lock(&res->spinlock); 215 216 /* will exit this call with spinlock held */ 217 __dlm_wait_on_lockres(res); 218 res->state |= DLM_LOCK_RES_IN_PROGRESS; 219 220 /* add lock to local (secondary) queue */ 221 dlm_lock_get(lock); 222 list_add_tail(&lock->list, &res->blocked); 223 lock->lock_pending = 1; 224 spin_unlock(&res->spinlock); 225 226 /* spec seems to say that you will get DLM_NORMAL when the lock 227 * has been queued, meaning we need to wait for a reply here. */ 228 status = dlm_send_remote_lock_request(dlm, res, lock, flags); 229 230 spin_lock(&res->spinlock); 231 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 232 lock->lock_pending = 0; 233 if (status != DLM_NORMAL) { 234 if (status == DLM_RECOVERING && 235 dlm_is_recovery_lock(res->lockname.name, 236 res->lockname.len)) { 237 /* recovery lock was mastered by dead node. 238 * we need to have calc_usage shoot down this 239 * lockres and completely remaster it. */ 240 mlog(0, "%s: recovery lock was owned by " 241 "dead node %u, remaster it now.\n", 242 dlm->name, res->owner); 243 } else if (status != DLM_NOTQUEUED) { 244 /* 245 * DO NOT call calc_usage, as this would unhash 246 * the remote lockres before we ever get to use 247 * it. treat as if we never made any change to 248 * the lockres. 249 */ 250 lockres_changed = 0; 251 dlm_error(status); 252 } 253 dlm_revert_pending_lock(res, lock); 254 dlm_lock_put(lock); 255 } else if (dlm_is_recovery_lock(res->lockname.name, 256 res->lockname.len)) { 257 /* special case for the $RECOVERY lock. 258 * there will never be an AST delivered to put 259 * this lock on the proper secondary queue 260 * (granted), so do it manually. */ 261 mlog(0, "%s: $RECOVERY lock for this node (%u) is " 262 "mastered by %u; got lock, manually granting (no ast)\n", 263 dlm->name, dlm->node_num, res->owner); 264 list_move_tail(&lock->list, &res->granted); 265 } 266 spin_unlock(&res->spinlock); 267 268 if (lockres_changed) 269 dlm_lockres_calc_usage(dlm, res); 270 271 wake_up(&res->wq); 272 return status; 273 } 274 275 276 /* for remote lock creation. 277 * locking: 278 * caller needs: none, but need res->state & DLM_LOCK_RES_IN_PROGRESS 279 * taken: none 280 * held on exit: none 281 * returns: DLM_NOLOCKMGR, or net status 282 */ 283 static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, 284 struct dlm_lock_resource *res, 285 struct dlm_lock *lock, int flags) 286 { 287 struct dlm_create_lock create; 288 int tmpret, status = 0; 289 enum dlm_status ret; 290 291 mlog_entry_void(); 292 293 memset(&create, 0, sizeof(create)); 294 create.node_idx = dlm->node_num; 295 create.requested_type = lock->ml.type; 296 create.cookie = lock->ml.cookie; 297 create.namelen = res->lockname.len; 298 create.flags = cpu_to_be32(flags); 299 memcpy(create.name, res->lockname.name, create.namelen); 300 301 tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create, 302 sizeof(create), res->owner, &status); 303 if (tmpret >= 0) { 304 // successfully sent and received 305 ret = status; // this is already a dlm_status 306 if (ret == DLM_REJECTED) { 307 mlog(ML_ERROR, "%s:%.*s: BUG. this is a stale lockres " 308 "no longer owned by %u. that node is coming back " 309 "up currently.\n", dlm->name, create.namelen, 310 create.name, res->owner); 311 dlm_print_one_lock_resource(res); 312 BUG(); 313 } 314 } else { 315 mlog_errno(tmpret); 316 if (dlm_is_host_down(tmpret)) { 317 ret = DLM_RECOVERING; 318 mlog(0, "node %u died so returning DLM_RECOVERING " 319 "from lock message!\n", res->owner); 320 } else { 321 ret = dlm_err_to_dlm_status(tmpret); 322 } 323 } 324 325 return ret; 326 } 327 328 void dlm_lock_get(struct dlm_lock *lock) 329 { 330 kref_get(&lock->lock_refs); 331 } 332 333 void dlm_lock_put(struct dlm_lock *lock) 334 { 335 kref_put(&lock->lock_refs, dlm_lock_release); 336 } 337 338 static void dlm_lock_release(struct kref *kref) 339 { 340 struct dlm_lock *lock; 341 342 lock = container_of(kref, struct dlm_lock, lock_refs); 343 344 BUG_ON(!list_empty(&lock->list)); 345 BUG_ON(!list_empty(&lock->ast_list)); 346 BUG_ON(!list_empty(&lock->bast_list)); 347 BUG_ON(lock->ast_pending); 348 BUG_ON(lock->bast_pending); 349 350 dlm_lock_detach_lockres(lock); 351 352 if (lock->lksb_kernel_allocated) { 353 mlog(0, "freeing kernel-allocated lksb\n"); 354 kfree(lock->lksb); 355 } 356 kfree(lock); 357 } 358 359 /* associate a lock with it's lockres, getting a ref on the lockres */ 360 void dlm_lock_attach_lockres(struct dlm_lock *lock, 361 struct dlm_lock_resource *res) 362 { 363 dlm_lockres_get(res); 364 lock->lockres = res; 365 } 366 367 /* drop ref on lockres, if there is still one associated with lock */ 368 static void dlm_lock_detach_lockres(struct dlm_lock *lock) 369 { 370 struct dlm_lock_resource *res; 371 372 res = lock->lockres; 373 if (res) { 374 lock->lockres = NULL; 375 mlog(0, "removing lock's lockres reference\n"); 376 dlm_lockres_put(res); 377 } 378 } 379 380 static void dlm_init_lock(struct dlm_lock *newlock, int type, 381 u8 node, u64 cookie) 382 { 383 INIT_LIST_HEAD(&newlock->list); 384 INIT_LIST_HEAD(&newlock->ast_list); 385 INIT_LIST_HEAD(&newlock->bast_list); 386 spin_lock_init(&newlock->spinlock); 387 newlock->ml.type = type; 388 newlock->ml.convert_type = LKM_IVMODE; 389 newlock->ml.highest_blocked = LKM_IVMODE; 390 newlock->ml.node = node; 391 newlock->ml.pad1 = 0; 392 newlock->ml.list = 0; 393 newlock->ml.flags = 0; 394 newlock->ast = NULL; 395 newlock->bast = NULL; 396 newlock->astdata = NULL; 397 newlock->ml.cookie = cpu_to_be64(cookie); 398 newlock->ast_pending = 0; 399 newlock->bast_pending = 0; 400 newlock->convert_pending = 0; 401 newlock->lock_pending = 0; 402 newlock->unlock_pending = 0; 403 newlock->cancel_pending = 0; 404 newlock->lksb_kernel_allocated = 0; 405 406 kref_init(&newlock->lock_refs); 407 } 408 409 struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie, 410 struct dlm_lockstatus *lksb) 411 { 412 struct dlm_lock *lock; 413 int kernel_allocated = 0; 414 415 lock = kzalloc(sizeof(*lock), GFP_NOFS); 416 if (!lock) 417 return NULL; 418 419 if (!lksb) { 420 /* zero memory only if kernel-allocated */ 421 lksb = kzalloc(sizeof(*lksb), GFP_NOFS); 422 if (!lksb) { 423 kfree(lock); 424 return NULL; 425 } 426 kernel_allocated = 1; 427 } 428 429 dlm_init_lock(lock, type, node, cookie); 430 if (kernel_allocated) 431 lock->lksb_kernel_allocated = 1; 432 lock->lksb = lksb; 433 lksb->lockid = lock; 434 return lock; 435 } 436 437 /* handler for lock creation net message 438 * locking: 439 * caller needs: none 440 * taken: takes and drops res->spinlock 441 * held on exit: none 442 * returns: DLM_NORMAL, DLM_SYSERR, DLM_IVLOCKID, DLM_NOTQUEUED 443 */ 444 int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data, 445 void **ret_data) 446 { 447 struct dlm_ctxt *dlm = data; 448 struct dlm_create_lock *create = (struct dlm_create_lock *)msg->buf; 449 struct dlm_lock_resource *res = NULL; 450 struct dlm_lock *newlock = NULL; 451 struct dlm_lockstatus *lksb = NULL; 452 enum dlm_status status = DLM_NORMAL; 453 char *name; 454 unsigned int namelen; 455 456 BUG_ON(!dlm); 457 458 mlog_entry_void(); 459 460 if (!dlm_grab(dlm)) 461 return DLM_REJECTED; 462 463 name = create->name; 464 namelen = create->namelen; 465 status = DLM_REJECTED; 466 if (!dlm_domain_fully_joined(dlm)) { 467 mlog(ML_ERROR, "Domain %s not fully joined, but node %u is " 468 "sending a create_lock message for lock %.*s!\n", 469 dlm->name, create->node_idx, namelen, name); 470 dlm_error(status); 471 goto leave; 472 } 473 474 status = DLM_IVBUFLEN; 475 if (namelen > DLM_LOCKID_NAME_MAX) { 476 dlm_error(status); 477 goto leave; 478 } 479 480 status = DLM_SYSERR; 481 newlock = dlm_new_lock(create->requested_type, 482 create->node_idx, 483 be64_to_cpu(create->cookie), NULL); 484 if (!newlock) { 485 dlm_error(status); 486 goto leave; 487 } 488 489 lksb = newlock->lksb; 490 491 if (be32_to_cpu(create->flags) & LKM_GET_LVB) { 492 lksb->flags |= DLM_LKSB_GET_LVB; 493 mlog(0, "set DLM_LKSB_GET_LVB flag\n"); 494 } 495 496 status = DLM_IVLOCKID; 497 res = dlm_lookup_lockres(dlm, name, namelen); 498 if (!res) { 499 dlm_error(status); 500 goto leave; 501 } 502 503 spin_lock(&res->spinlock); 504 status = __dlm_lockres_state_to_status(res); 505 spin_unlock(&res->spinlock); 506 507 if (status != DLM_NORMAL) { 508 mlog(0, "lockres recovering/migrating/in-progress\n"); 509 goto leave; 510 } 511 512 dlm_lock_attach_lockres(newlock, res); 513 514 status = dlmlock_master(dlm, res, newlock, be32_to_cpu(create->flags)); 515 leave: 516 if (status != DLM_NORMAL) 517 if (newlock) 518 dlm_lock_put(newlock); 519 520 if (res) 521 dlm_lockres_put(res); 522 523 dlm_put(dlm); 524 525 return status; 526 } 527 528 529 /* fetch next node-local (u8 nodenum + u56 cookie) into u64 */ 530 static inline void dlm_get_next_cookie(u8 node_num, u64 *cookie) 531 { 532 u64 tmpnode = node_num; 533 534 /* shift single byte of node num into top 8 bits */ 535 tmpnode <<= 56; 536 537 spin_lock(&dlm_cookie_lock); 538 *cookie = (dlm_next_cookie | tmpnode); 539 if (++dlm_next_cookie & 0xff00000000000000ull) { 540 mlog(0, "This node's cookie will now wrap!\n"); 541 dlm_next_cookie = 1; 542 } 543 spin_unlock(&dlm_cookie_lock); 544 } 545 546 enum dlm_status dlmlock(struct dlm_ctxt *dlm, int mode, 547 struct dlm_lockstatus *lksb, int flags, 548 const char *name, int namelen, dlm_astlockfunc_t *ast, 549 void *data, dlm_bastlockfunc_t *bast) 550 { 551 enum dlm_status status; 552 struct dlm_lock_resource *res = NULL; 553 struct dlm_lock *lock = NULL; 554 int convert = 0, recovery = 0; 555 556 /* yes this function is a mess. 557 * TODO: clean this up. lots of common code in the 558 * lock and convert paths, especially in the retry blocks */ 559 if (!lksb) { 560 dlm_error(DLM_BADARGS); 561 return DLM_BADARGS; 562 } 563 564 status = DLM_BADPARAM; 565 if (mode != LKM_EXMODE && mode != LKM_PRMODE && mode != LKM_NLMODE) { 566 dlm_error(status); 567 goto error; 568 } 569 570 if (flags & ~LKM_VALID_FLAGS) { 571 dlm_error(status); 572 goto error; 573 } 574 575 convert = (flags & LKM_CONVERT); 576 recovery = (flags & LKM_RECOVERY); 577 578 if (recovery && 579 (!dlm_is_recovery_lock(name, namelen) || convert) ) { 580 dlm_error(status); 581 goto error; 582 } 583 if (convert && (flags & LKM_LOCAL)) { 584 mlog(ML_ERROR, "strange LOCAL convert request!\n"); 585 goto error; 586 } 587 588 if (convert) { 589 /* CONVERT request */ 590 591 /* if converting, must pass in a valid dlm_lock */ 592 lock = lksb->lockid; 593 if (!lock) { 594 mlog(ML_ERROR, "NULL lock pointer in convert " 595 "request\n"); 596 goto error; 597 } 598 599 res = lock->lockres; 600 if (!res) { 601 mlog(ML_ERROR, "NULL lockres pointer in convert " 602 "request\n"); 603 goto error; 604 } 605 dlm_lockres_get(res); 606 607 /* XXX: for ocfs2 purposes, the ast/bast/astdata/lksb are 608 * static after the original lock call. convert requests will 609 * ensure that everything is the same, or return DLM_BADARGS. 610 * this means that DLM_DENIED_NOASTS will never be returned. 611 */ 612 if (lock->lksb != lksb || lock->ast != ast || 613 lock->bast != bast || lock->astdata != data) { 614 status = DLM_BADARGS; 615 mlog(ML_ERROR, "new args: lksb=%p, ast=%p, bast=%p, " 616 "astdata=%p\n", lksb, ast, bast, data); 617 mlog(ML_ERROR, "orig args: lksb=%p, ast=%p, bast=%p, " 618 "astdata=%p\n", lock->lksb, lock->ast, 619 lock->bast, lock->astdata); 620 goto error; 621 } 622 retry_convert: 623 dlm_wait_for_recovery(dlm); 624 625 if (res->owner == dlm->node_num) 626 status = dlmconvert_master(dlm, res, lock, flags, mode); 627 else 628 status = dlmconvert_remote(dlm, res, lock, flags, mode); 629 if (status == DLM_RECOVERING || status == DLM_MIGRATING || 630 status == DLM_FORWARD) { 631 /* for now, see how this works without sleeping 632 * and just retry right away. I suspect the reco 633 * or migration will complete fast enough that 634 * no waiting will be necessary */ 635 mlog(0, "retrying convert with migration/recovery/" 636 "in-progress\n"); 637 msleep(100); 638 goto retry_convert; 639 } 640 } else { 641 u64 tmpcookie; 642 643 /* LOCK request */ 644 status = DLM_BADARGS; 645 if (!name) { 646 dlm_error(status); 647 goto error; 648 } 649 650 status = DLM_IVBUFLEN; 651 if (namelen > DLM_LOCKID_NAME_MAX || namelen < 1) { 652 dlm_error(status); 653 goto error; 654 } 655 656 dlm_get_next_cookie(dlm->node_num, &tmpcookie); 657 lock = dlm_new_lock(mode, dlm->node_num, tmpcookie, lksb); 658 if (!lock) { 659 dlm_error(status); 660 goto error; 661 } 662 663 if (!recovery) 664 dlm_wait_for_recovery(dlm); 665 666 /* find or create the lock resource */ 667 res = dlm_get_lock_resource(dlm, name, namelen, flags); 668 if (!res) { 669 status = DLM_IVLOCKID; 670 dlm_error(status); 671 goto error; 672 } 673 674 mlog(0, "type=%d, flags = 0x%x\n", mode, flags); 675 mlog(0, "creating lock: lock=%p res=%p\n", lock, res); 676 677 dlm_lock_attach_lockres(lock, res); 678 lock->ast = ast; 679 lock->bast = bast; 680 lock->astdata = data; 681 682 retry_lock: 683 if (flags & LKM_VALBLK) { 684 mlog(0, "LKM_VALBLK passed by caller\n"); 685 686 /* LVB requests for non PR, PW or EX locks are 687 * ignored. */ 688 if (mode < LKM_PRMODE) 689 flags &= ~LKM_VALBLK; 690 else { 691 flags |= LKM_GET_LVB; 692 lock->lksb->flags |= DLM_LKSB_GET_LVB; 693 } 694 } 695 696 if (res->owner == dlm->node_num) 697 status = dlmlock_master(dlm, res, lock, flags); 698 else 699 status = dlmlock_remote(dlm, res, lock, flags); 700 701 if (status == DLM_RECOVERING || status == DLM_MIGRATING || 702 status == DLM_FORWARD) { 703 mlog(0, "retrying lock with migration/" 704 "recovery/in progress\n"); 705 msleep(100); 706 /* no waiting for dlm_reco_thread */ 707 if (recovery) { 708 if (status != DLM_RECOVERING) 709 goto retry_lock; 710 711 mlog(0, "%s: got RECOVERING " 712 "for $RECOVERY lock, master " 713 "was %u\n", dlm->name, 714 res->owner); 715 /* wait to see the node go down, then 716 * drop down and allow the lockres to 717 * get cleaned up. need to remaster. */ 718 dlm_wait_for_node_death(dlm, res->owner, 719 DLM_NODE_DEATH_WAIT_MAX); 720 } else { 721 dlm_wait_for_recovery(dlm); 722 goto retry_lock; 723 } 724 } 725 726 if (status != DLM_NORMAL) { 727 lock->lksb->flags &= ~DLM_LKSB_GET_LVB; 728 if (status != DLM_NOTQUEUED) 729 dlm_error(status); 730 goto error; 731 } 732 } 733 734 error: 735 if (status != DLM_NORMAL) { 736 if (lock && !convert) 737 dlm_lock_put(lock); 738 // this is kind of unnecessary 739 lksb->status = status; 740 } 741 742 /* put lockres ref from the convert path 743 * or from dlm_get_lock_resource */ 744 if (res) 745 dlm_lockres_put(res); 746 747 return status; 748 } 749 EXPORT_SYMBOL_GPL(dlmlock); 750